Commit c66ac9db authored by Nicholas Bellinger's avatar Nicholas Bellinger Committed by James Bottomley

[SCSI] target: Add LIO target core v4.0.0-rc6

LIO target is a full featured in-kernel target framework with the
following feature set:

High-performance, non-blocking, multithreaded architecture with SIMD
support.

Advanced SCSI feature set:

    * Persistent Reservations (PRs)
    * Asymmetric Logical Unit Assignment (ALUA)
    * Protocol and intra-nexus multiplexing, load-balancing and failover (MC/S)
    * Full Error Recovery (ERL=0,1,2)
    * Active/active task migration and session continuation (ERL=2)
    * Thin LUN provisioning (UNMAP and WRITE_SAMExx)

Multiprotocol target plugins

Storage media independence:

    * Virtualization of all storage media; transparent mapping of IO to LUNs
    * No hard limits on number of LUNs per Target; maximum LUN size ~750 TB
    * Backstores: SATA, SAS, SCSI, BluRay, DVD, FLASH, USB, ramdisk, etc.

Standards compliance:

    * Full compliance with IETF (RFC 3720)
    * Full implementation of SPC-4 PRs and ALUA

Significant code cleanups done by Christoph Hellwig.

[jejb: fix up for new block bdev exclusive interface. Minor fixes from
 Randy Dunlap and Dan Carpenter.]
Signed-off-by: default avatarNicholas A. Bellinger <nab@linux-iscsi.org>
Signed-off-by: default avatarJames Bottomley <James.Bottomley@suse.de>
parent f4013c38
#!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include <" + fabric_mod_name + "_base.h>\n"
buf += "#include <" + fabric_mod_name + "_fabric.h>\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!(se_nacl_new))\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!(tpg)) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!(" + fabric_mod_port + ")) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd_to_pool = " + fabric_mod_name + "_release_cmd,\n"
buf += " .release_cmd_direct = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .new_cmd_failure = " + fabric_mod_name + "_new_cmd_failure,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " .pack_lun = " + fabric_mod_name + "_pack_lun,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (!(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return -ENOMEM;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!(" + fabric_mod_name + "_fabric_configfs))\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "#ifdef MODULE\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
buf += "#endif\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <" + fabric_mod_name + "_base.h>\n"
buf += "#include <" + fabric_mod_name + "_fabric.h>\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!(nacl)) {\n"
buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('release_cmd_to_pool', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('new_cmd_failure\)\(', fo):
buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
if re.search('pack_lun\)\(', fo):
buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
buf += "{\n"
buf += " WARN_ON(lun >= 256);\n"
buf += " /* Caller wants this byte-swapped */\n"
buf += " return cpu_to_le64((lun & 0xff) << 8);\n"
buf += "}\n\n"
bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kbuild"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/include/ -I$(srctree)/drivers/scsi/ -I$(srctree)/include/scsi/ -I$(srctree)/drivers/target/" + fabric_mod_name + "\n\n"
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Kbuild"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kbuild..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
>>>>>>>>>> The TCM v4 fabric module script generator <<<<<<<<<<
Greetings all,
This document is intended to be a mini-HOWTO for using the tcm_mod_builder.py
script to generate a brand new functional TCM v4 fabric .ko module of your very own,
that once built can be immediately be loaded to start access the new TCM/ConfigFS
fabric skeleton, by simply using:
modprobe $TCM_NEW_MOD
mkdir -p /sys/kernel/config/target/$TCM_NEW_MOD
This script will create a new drivers/target/$TCM_NEW_MOD/, and will do the following
*) Generate new API callers for drivers/target/target_core_fabric_configs.c logic
->make_nodeacl(), ->drop_nodeacl(), ->make_tpg(), ->drop_tpg()
->make_wwn(), ->drop_wwn(). These are created into $TCM_NEW_MOD/$TCM_NEW_MOD_configfs.c
*) Generate basic infrastructure for loading/unloading LKMs and TCM/ConfigFS fabric module
using a skeleton struct target_core_fabric_ops API template.
*) Based on user defined T10 Proto_Ident for the new fabric module being built,
the TransportID / Initiator and Target WWPN related handlers for
SPC-3 persistent reservation are automatically generated in $TCM_NEW_MOD/$TCM_NEW_MOD_fabric.c
using drivers/target/target_core_fabric_lib.c logic.
*) NOP API calls for all other Data I/O path and fabric dependent attribute logic
in $TCM_NEW_MOD/$TCM_NEW_MOD_fabric.c
tcm_mod_builder.py depends upon the mandatory '-p $PROTO_IDENT' and '-m
$FABRIC_MOD_name' parameters, and actually running the script looks like:
target:/mnt/sdb/lio-core-2.6.git/Documentation/target# python tcm_mod_builder.py -p iSCSI -m tcm_nab5000
tcm_dir: /mnt/sdb/lio-core-2.6.git/Documentation/target/../../
Set fabric_mod_name: tcm_nab5000
Set fabric_mod_dir:
/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000
Using proto_ident: iSCSI
Creating fabric_mod_dir:
/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000
Writing file:
/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_base.h
Using tcm_mod_scan_fabric_ops:
/mnt/sdb/lio-core-2.6.git/Documentation/target/../../include/target/target_core_fabric_ops.h
Writing file:
/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_fabric.c
Writing file:
/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_fabric.h
Writing file:
/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/tcm_nab5000_configfs.c
Writing file:
/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/Kbuild
Writing file:
/mnt/sdb/lio-core-2.6.git/Documentation/target/../../drivers/target/tcm_nab5000/Kconfig
Would you like to add tcm_nab5000to drivers/target/Kbuild..? [yes,no]: yes
Would you like to add tcm_nab5000to drivers/target/Kconfig..? [yes,no]: yes
At the end of tcm_mod_builder.py. the script will ask to add the following
line to drivers/target/Kbuild:
obj-$(CONFIG_TCM_NAB5000) += tcm_nab5000/
and the same for drivers/target/Kconfig:
source "drivers/target/tcm_nab5000/Kconfig"
*) Run 'make menuconfig' and select the new CONFIG_TCM_NAB5000 item:
<M> TCM_NAB5000 fabric module
*) Build using 'make modules', once completed you will have:
target:/mnt/sdb/lio-core-2.6.git# ls -la drivers/target/tcm_nab5000/
total 1348
drwxr-xr-x 2 root root 4096 2010-10-05 03:23 .
drwxr-xr-x 9 root root 4096 2010-10-05 03:22 ..
-rw-r--r-- 1 root root 282 2010-10-05 03:22 Kbuild
-rw-r--r-- 1 root root 171 2010-10-05 03:22 Kconfig
-rw-r--r-- 1 root root 49 2010-10-05 03:23 modules.order
-rw-r--r-- 1 root root 738 2010-10-05 03:22 tcm_nab5000_base.h
-rw-r--r-- 1 root root 9096 2010-10-05 03:22 tcm_nab5000_configfs.c
-rw-r--r-- 1 root root 191200 2010-10-05 03:23 tcm_nab5000_configfs.o
-rw-r--r-- 1 root root 40504 2010-10-05 03:23 .tcm_nab5000_configfs.o.cmd
-rw-r--r-- 1 root root 5414 2010-10-05 03:22 tcm_nab5000_fabric.c
-rw-r--r-- 1 root root 2016 2010-10-05 03:22 tcm_nab5000_fabric.h
-rw-r--r-- 1 root root 190932 2010-10-05 03:23 tcm_nab5000_fabric.o
-rw-r--r-- 1 root root 40713 2010-10-05 03:23 .tcm_nab5000_fabric.o.cmd
-rw-r--r-- 1 root root 401861 2010-10-05 03:23 tcm_nab5000.ko
-rw-r--r-- 1 root root 265 2010-10-05 03:23 .tcm_nab5000.ko.cmd
-rw-r--r-- 1 root root 459 2010-10-05 03:23 tcm_nab5000.mod.c
-rw-r--r-- 1 root root 23896 2010-10-05 03:23 tcm_nab5000.mod.o
-rw-r--r-- 1 root root 22655 2010-10-05 03:23 .tcm_nab5000.mod.o.cmd
-rw-r--r-- 1 root root 379022 2010-10-05 03:23 tcm_nab5000.o
-rw-r--r-- 1 root root 211 2010-10-05 03:23 .tcm_nab5000.o.cmd
*) Load the new module, create a lun_0 configfs group, and add new TCM Core
IBLOCK backstore symlink to port:
target:/mnt/sdb/lio-core-2.6.git# insmod drivers/target/tcm_nab5000.ko
target:/mnt/sdb/lio-core-2.6.git# mkdir -p /sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0
target:/mnt/sdb/lio-core-2.6.git# cd /sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0/
target:/sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0# ln -s /sys/kernel/config/target/core/iblock_0/lvm_test0 nab5000_port
target:/sys/kernel/config/target/nab5000/iqn.foo/tpgt_1/lun/lun_0# cd -
target:/mnt/sdb/lio-core-2.6.git# tree /sys/kernel/config/target/nab5000/
/sys/kernel/config/target/nab5000/
|-- discovery_auth
|-- iqn.foo
| `-- tpgt_1
| |-- acls
| |-- attrib
| |-- lun
| | `-- lun_0
| | |-- alua_tg_pt_gp
| | |-- alua_tg_pt_offline
| | |-- alua_tg_pt_status
| | |-- alua_tg_pt_write_md
| | `-- nab5000_port -> ../../../../../../target/core/iblock_0/lvm_test0
| |-- np
| `-- param
`-- version
target:/mnt/sdb/lio-core-2.6.git# lsmod
Module Size Used by
tcm_nab5000 3935 4
iscsi_target_mod 193211 0
target_core_stgt 8090 0
target_core_pscsi 11122 1
target_core_file 9172 2
target_core_iblock 9280 1
target_core_mod 228575 31
tcm_nab5000,iscsi_target_mod,target_core_stgt,target_core_pscsi,target_core_file,target_core_iblock
libfc 73681 0
scsi_debug 56265 0
scsi_tgt 8666 1 target_core_stgt
configfs 20644 2 target_core_mod
----------------------------------------------------------------------
Future TODO items:
*) Add more T10 proto_idents
*) Make tcm_mod_dump_fabric_ops() smarter and generate function pointer
defs directly from include/target/target_core_fabric_ops.h:struct target_core_fabric_ops
structure members.
October 5th, 2010
Nicholas A. Bellinger <nab@linux-iscsi.org>
......@@ -26,6 +26,8 @@ source "drivers/ata/Kconfig"
source "drivers/md/Kconfig"
source "drivers/target/Kconfig"
source "drivers/message/fusion/Kconfig"
source "drivers/firewire/Kconfig"
......
......@@ -46,6 +46,7 @@ obj-y += macintosh/
obj-$(CONFIG_IDE) += ide/
obj-$(CONFIG_SCSI) += scsi/
obj-$(CONFIG_ATA) += ata/
obj-$(CONFIG_TARGET_CORE) += target/
obj-$(CONFIG_MTD) += mtd/
obj-$(CONFIG_SPI) += spi/
obj-y += net/
......
menuconfig TARGET_CORE
tristate "Generic Target Core Mod (TCM) and ConfigFS Infrastructure"
depends on SCSI && BLOCK
select CONFIGFS_FS
default n
help
Say Y or M here to enable the TCM Storage Engine and ConfigFS enabled
control path for target_core_mod. This includes built-in TCM RAMDISK
subsystem logic for virtual LUN 0 access
if TARGET_CORE
config TCM_IBLOCK
tristate "TCM/IBLOCK Subsystem Plugin for Linux/BLOCK"
help
Say Y here to enable the TCM/IBLOCK subsystem plugin for non-buffered
access to Linux/Block devices using BIO
config TCM_FILEIO
tristate "TCM/FILEIO Subsystem Plugin for Linux/VFS"
help
Say Y here to enable the TCM/FILEIO subsystem plugin for buffered
access to Linux/VFS struct file or struct block_device
config TCM_PSCSI
tristate "TCM/pSCSI Subsystem Plugin for Linux/SCSI"
help
Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
passthrough access to Linux/SCSI device
endif
EXTRA_CFLAGS += -I$(srctree)/drivers/target/ -I$(srctree)/drivers/scsi/
target_core_mod-y := target_core_configfs.o \
target_core_device.o \
target_core_fabric_configfs.o \
target_core_fabric_lib.o \
target_core_hba.o \
target_core_pr.o \
target_core_alua.o \
target_core_scdb.o \
target_core_tmr.o \
target_core_tpg.o \
target_core_transport.o \
target_core_cdb.o \
target_core_ua.o \
target_core_rd.o \
target_core_mib.o
obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
# Subsystem modules
obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o
obj-$(CONFIG_TCM_FILEIO) += target_core_file.o
obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
/*******************************************************************************
* Filename: target_core_alua.c
*
* This file contains SPC-3 compliant asymmetric logical unit assigntment (ALUA)
*
* Copyright (c) 2009-2010 Rising Tide Systems
* Copyright (c) 2009-2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/version.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/configfs.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_configfs.h>
#include "target_core_alua.h"
#include "target_core_hba.h"
#include "target_core_ua.h"
static int core_alua_check_transition(int state, int *primary);
static int core_alua_set_tg_pt_secondary_state(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
struct se_port *port, int explict, int offline);
/*
* REPORT_TARGET_PORT_GROUPS
*
* See spc4r17 section 6.27
*/
int core_emulate_report_target_port_groups(struct se_cmd *cmd)
{
struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev;
struct se_port *port;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first
Target port group descriptor */
spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
tg_pt_gp_list) {
/*
* PREF: Preferred target port bit, determine if this
* bit should be set for port group.
*/
if (tg_pt_gp->tg_pt_gp_pref)
buf[off] = 0x80;
/*
* Set the ASYMMETRIC ACCESS State
*/
buf[off++] |= (atomic_read(
&tg_pt_gp->tg_pt_gp_alua_access_state) & 0xff);
/*
* Set supported ASYMMETRIC ACCESS State bits
*/
buf[off] = 0x80; /* T_SUP */
buf[off] |= 0x40; /* O_SUP */
buf[off] |= 0x8; /* U_SUP */
buf[off] |= 0x4; /* S_SUP */
buf[off] |= 0x2; /* AN_SUP */
buf[off++] |= 0x1; /* AO_SUP */
/*
* TARGET PORT GROUP
*/
buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff);
buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff);
off++; /* Skip over Reserved */
/*
* STATUS CODE
*/
buf[off++] = (tg_pt_gp->tg_pt_gp_alua_access_status & 0xff);
/*
* Vendor Specific field
*/
buf[off++] = 0x00;
/*
* TARGET PORT COUNT
*/
buf[off++] = (tg_pt_gp->tg_pt_gp_members & 0xff);
rd_len += 8;
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
tg_pt_gp_mem_list) {
port = tg_pt_gp_mem->tg_pt;
/*
* Start Target Port descriptor format
*
* See spc4r17 section 6.2.7 Table 247
*/
off += 2; /* Skip over Obsolete */
/*
* Set RELATIVE TARGET PORT IDENTIFIER
*/
buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
buf[off++] = (port->sep_rtpi & 0xff);
rd_len += 4;
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
}
spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
/*
* Set the RETURN DATA LENGTH set in the header of the DataIN Payload
*/
buf[0] = ((rd_len >> 24) & 0xff);
buf[1] = ((rd_len >> 16) & 0xff);
buf[2] = ((rd_len >> 8) & 0xff);
buf[3] = (rd_len & 0xff);
return 0;
}
/*
* SET_TARGET_PORT_GROUPS for explict ALUA operation.
*
* See spc4r17 section 6.35
*/
int core_emulate_set_target_port_groups(struct se_cmd *cmd)
{
struct se_device *dev = SE_DEV(cmd);
struct se_subsystem_dev *su_dev = SE_DEV(cmd)->se_sub_dev;
struct se_port *port, *l_port = SE_LUN(cmd)->lun_sep;
struct se_node_acl *nacl = SE_SESS(cmd)->se_node_acl;
struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *l_tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *l_tg_pt_gp_mem;
unsigned char *buf = (unsigned char *)T_TASK(cmd)->t_task_buf;
unsigned char *ptr = &buf[4]; /* Skip over RESERVED area in header */
u32 len = 4; /* Skip over RESERVED area in header */
int alua_access_state, primary = 0, rc;
u16 tg_pt_id, rtpi;
if (!(l_port))
return PYX_TRANSPORT_LU_COMM_FAILURE;
/*
* Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
* for the local tg_pt_gp.
*/
l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
if (!(l_tg_pt_gp_mem)) {
printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
if (!(l_tg_pt_gp)) {
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
if (!(rc)) {
printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS"
" while TPGS_EXPLICT_ALUA is disabled\n");
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
while (len < cmd->data_length) {
alua_access_state = (ptr[0] & 0x0f);
/*
* Check the received ALUA access state, and determine if
* the state is a primary or secondary target port asymmetric
* access state.
*/
rc = core_alua_check_transition(alua_access_state, &primary);
if (rc != 0) {
/*
* If the SET TARGET PORT GROUPS attempts to establish
* an invalid combination of target port asymmetric
* access states or attempts to establish an
* unsupported target port asymmetric access state,
* then the command shall be terminated with CHECK
* CONDITION status, with the sense key set to ILLEGAL
* REQUEST, and the additional sense code set to INVALID
* FIELD IN PARAMETER LIST.
*/
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
rc = -1;
/*
* If the ASYMMETRIC ACCESS STATE field (see table 267)
* specifies a primary target port asymmetric access state,
* then the TARGET PORT GROUP OR TARGET PORT field specifies
* a primary target port group for which the primary target
* port asymmetric access state shall be changed. If the
* ASYMMETRIC ACCESS STATE field specifies a secondary target
* port asymmetric access state, then the TARGET PORT GROUP OR
* TARGET PORT field specifies the relative target port
* identifier (see 3.1.120) of the target port for which the
* secondary target port asymmetric access state shall be
* changed.
*/
if (primary) {
tg_pt_id = ((ptr[2] << 8) & 0xff);
tg_pt_id |= (ptr[3] & 0xff);
/*
* Locate the matching target port group ID from
* the global tg_pt_gp list
*/
spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp,
&T10_ALUA(su_dev)->tg_pt_gps_list,
tg_pt_gp_list) {
if (!(tg_pt_gp->tg_pt_gp_valid_id))
continue;
if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
continue;
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_inc();
spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
rc = core_alua_do_port_transition(tg_pt_gp,
dev, l_port, nacl,
alua_access_state, 1);
spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_dec();
break;
}
spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
/*
* If not matching target port group ID can be located
* throw an exception with ASCQ: INVALID_PARAMETER_LIST
*/
if (rc != 0)
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
} else {
/*
* Extact the RELATIVE TARGET PORT IDENTIFIER to identify
* the Target Port in question for the the incoming
* SET_TARGET_PORT_GROUPS op.
*/
rtpi = ((ptr[2] << 8) & 0xff);
rtpi |= (ptr[3] & 0xff);
/*
* Locate the matching relative target port identifer
* for the struct se_device storage object.
*/
spin_lock(&dev->se_port_lock);
list_for_each_entry(port, &dev->dev_sep_list,
sep_list) {
if (port->sep_rtpi != rtpi)
continue;
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
spin_unlock(&dev->se_port_lock);
rc = core_alua_set_tg_pt_secondary_state(
tg_pt_gp_mem, port, 1, 1);
spin_lock(&dev->se_port_lock);
break;
}
spin_unlock(&dev->se_port_lock);
/*
* If not matching relative target port identifier can
* be located, throw an exception with ASCQ:
* INVALID_PARAMETER_LIST
*/
if (rc != 0)
return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
}
ptr += 4;
len += 4;
}
return 0;
}
static inline int core_alua_state_nonoptimized(
struct se_cmd *cmd,
unsigned char *cdb,
int nonop_delay_msecs,
u8 *alua_ascq)
{
/*
* Set SCF_ALUA_NON_OPTIMIZED here, this value will be checked
* later to determine if processing of this cmd needs to be
* temporarily delayed for the Active/NonOptimized primary access state.
*/
cmd->se_cmd_flags |= SCF_ALUA_NON_OPTIMIZED;
cmd->alua_nonop_delay = nonop_delay_msecs;
return 0;
}
static inline int core_alua_state_standby(
struct se_cmd *cmd,
unsigned char *cdb,
u8 *alua_ascq)
{
/*
* Allowed CDBs for ALUA_ACCESS_STATE_STANDBY as defined by
* spc4r17 section 5.9.2.4.4
*/
switch (cdb[0]) {
case INQUIRY:
case LOG_SELECT:
case LOG_SENSE:
case MODE_SELECT:
case MODE_SENSE:
case REPORT_LUNS:
case RECEIVE_DIAGNOSTIC:
case SEND_DIAGNOSTIC:
case MAINTENANCE_IN:
switch (cdb[1]) {
case MI_REPORT_TARGET_PGS:
return 0;
default:
*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
return 1;
}
case MAINTENANCE_OUT:
switch (cdb[1]) {
case MO_SET_TARGET_PGS:
return 0;
default:
*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
return 1;
}
case REQUEST_SENSE:
case PERSISTENT_RESERVE_IN:
case PERSISTENT_RESERVE_OUT:
case READ_BUFFER:
case WRITE_BUFFER:
return 0;
default:
*alua_ascq = ASCQ_04H_ALUA_TG_PT_STANDBY;
return 1;
}
return 0;
}
static inline int core_alua_state_unavailable(
struct se_cmd *cmd,
unsigned char *cdb,
u8 *alua_ascq)
{
/*
* Allowed CDBs for ALUA_ACCESS_STATE_UNAVAILABLE as defined by
* spc4r17 section 5.9.2.4.5
*/
switch (cdb[0]) {
case INQUIRY:
case REPORT_LUNS:
case MAINTENANCE_IN:
switch (cdb[1]) {
case MI_REPORT_TARGET_PGS:
return 0;
default:
*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
return 1;
}
case MAINTENANCE_OUT:
switch (cdb[1]) {
case MO_SET_TARGET_PGS:
return 0;
default:
*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
return 1;
}
case REQUEST_SENSE:
case READ_BUFFER:
case WRITE_BUFFER:
return 0;
default:
*alua_ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE;
return 1;
}
return 0;
}
static inline int core_alua_state_transition(
struct se_cmd *cmd,
unsigned char *cdb,
u8 *alua_ascq)
{
/*
* Allowed CDBs for ALUA_ACCESS_STATE_TRANSITIO as defined by
* spc4r17 section 5.9.2.5
*/
switch (cdb[0]) {
case INQUIRY:
case REPORT_LUNS:
case MAINTENANCE_IN:
switch (cdb[1]) {
case MI_REPORT_TARGET_PGS:
return 0;
default:
*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
return 1;
}
case REQUEST_SENSE:
case READ_BUFFER:
case WRITE_BUFFER:
return 0;
default:
*alua_ascq = ASCQ_04H_ALUA_STATE_TRANSITION;
return 1;
}
return 0;
}
/*
* Used for alua_type SPC_ALUA_PASSTHROUGH and SPC2_ALUA_DISABLED
* in transport_cmd_sequencer(). This function is assigned to
* struct t10_alua *->state_check() in core_setup_alua()
*/
static int core_alua_state_check_nop(
struct se_cmd *cmd,
unsigned char *cdb,
u8 *alua_ascq)
{
return 0;
}
/*
* Used for alua_type SPC3_ALUA_EMULATED in transport_cmd_sequencer().
* This function is assigned to struct t10_alua *->state_check() in
* core_setup_alua()
*
* Also, this function can return three different return codes to
* signal transport_generic_cmd_sequencer()
*
* return 1: Is used to signal LUN not accecsable, and check condition/not ready
* return 0: Used to signal success
* reutrn -1: Used to signal failure, and invalid cdb field
*/
static int core_alua_state_check(
struct se_cmd *cmd,
unsigned char *cdb,
u8 *alua_ascq)
{
struct se_lun *lun = SE_LUN(cmd);
struct se_port *port = lun->lun_sep;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
int out_alua_state, nonop_delay_msecs;
if (!(port))
return 0;
/*
* First, check for a struct se_port specific secondary ALUA target port
* access state: OFFLINE
*/
if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
printk(KERN_INFO "ALUA: Got secondary offline status for local"
" target port\n");
*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
return 1;
}
/*
* Second, obtain the struct t10_alua_tg_pt_gp_member pointer to the
* ALUA target port group, to obtain current ALUA access state.
* Otherwise look for the underlying struct se_device association with
* a ALUA logical unit group.
*/
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
out_alua_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs;
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
/*
* Process ALUA_ACCESS_STATE_ACTIVE_OPTMIZED in a seperate conditional
* statement so the complier knows explictly to check this case first.
* For the Optimized ALUA access state case, we want to process the
* incoming fabric cmd ASAP..
*/
if (out_alua_state == ALUA_ACCESS_STATE_ACTIVE_OPTMIZED)
return 0;
switch (out_alua_state) {
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
return core_alua_state_nonoptimized(cmd, cdb,
nonop_delay_msecs, alua_ascq);
case ALUA_ACCESS_STATE_STANDBY:
return core_alua_state_standby(cmd, cdb, alua_ascq);
case ALUA_ACCESS_STATE_UNAVAILABLE:
return core_alua_state_unavailable(cmd, cdb, alua_ascq);
case ALUA_ACCESS_STATE_TRANSITION:
return core_alua_state_transition(cmd, cdb, alua_ascq);
/*
* OFFLINE is a secondary ALUA target port group access state, that is
* handled above with struct se_port->sep_tg_pt_secondary_offline=1
*/
case ALUA_ACCESS_STATE_OFFLINE:
default:
printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n",
out_alua_state);
return -1;
}
return 0;
}
/*
* Check implict and explict ALUA state change request.
*/
static int core_alua_check_transition(int state, int *primary)
{
switch (state) {
case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
case ALUA_ACCESS_STATE_STANDBY:
case ALUA_ACCESS_STATE_UNAVAILABLE:
/*
* OPTIMIZED, NON-OPTIMIZED, STANDBY and UNAVAILABLE are
* defined as primary target port asymmetric access states.
*/
*primary = 1;
break;
case ALUA_ACCESS_STATE_OFFLINE:
/*
* OFFLINE state is defined as a secondary target port
* asymmetric access state.
*/
*primary = 0;
break;
default:
printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state);
return -1;
}
return 0;
}
static char *core_alua_dump_state(int state)
{
switch (state) {
case ALUA_ACCESS_STATE_ACTIVE_OPTMIZED:
return "Active/Optimized";
case ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED:
return "Active/NonOptimized";
case ALUA_ACCESS_STATE_STANDBY:
return "Standby";
case ALUA_ACCESS_STATE_UNAVAILABLE:
return "Unavailable";
case ALUA_ACCESS_STATE_OFFLINE:
return "Offline";
default:
return "Unknown";
}
return NULL;
}
char *core_alua_dump_status(int status)
{
switch (status) {
case ALUA_STATUS_NONE:
return "None";
case ALUA_STATUS_ALTERED_BY_EXPLICT_STPG:
return "Altered by Explict STPG";
case ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA:
return "Altered by Implict ALUA";
default:
return "Unknown";
}
return NULL;
}
/*
* Used by fabric modules to determine when we need to delay processing
* for the Active/NonOptimized paths..
*/
int core_alua_check_nonop_delay(
struct se_cmd *cmd)
{
if (!(cmd->se_cmd_flags & SCF_ALUA_NON_OPTIMIZED))
return 0;
if (in_interrupt())
return 0;
/*
* The ALUA Active/NonOptimized access state delay can be disabled
* in via configfs with a value of zero
*/
if (!(cmd->alua_nonop_delay))
return 0;
/*
* struct se_cmd->alua_nonop_delay gets set by a target port group
* defined interval in core_alua_state_nonoptimized()
*/
msleep_interruptible(cmd->alua_nonop_delay);
return 0;
}
EXPORT_SYMBOL(core_alua_check_nonop_delay);
/*
* Called with tg_pt_gp->tg_pt_gp_md_mutex or tg_pt_gp_mem->sep_tg_pt_md_mutex
*
*/
static int core_alua_write_tpg_metadata(
const char *path,
unsigned char *md_buf,
u32 md_buf_len)
{
mm_segment_t old_fs;
struct file *file;
struct iovec iov[1];
int flags = O_RDWR | O_CREAT | O_TRUNC, ret;
memset(iov, 0, sizeof(struct iovec));
file = filp_open(path, flags, 0600);
if (IS_ERR(file) || !file || !file->f_dentry) {
printk(KERN_ERR "filp_open(%s) for ALUA metadata failed\n",
path);
return -ENODEV;
}
iov[0].iov_base = &md_buf[0];
iov[0].iov_len = md_buf_len;
old_fs = get_fs();
set_fs(get_ds());
ret = vfs_writev(file, &iov[0], 1, &file->f_pos);
set_fs(old_fs);
if (ret < 0) {
printk(KERN_ERR "Error writing ALUA metadata file: %s\n", path);
filp_close(file, NULL);
return -EIO;
}
filp_close(file, NULL);
return 0;
}
/*
* Called with tg_pt_gp->tg_pt_gp_md_mutex held
*/
static int core_alua_update_tpg_primary_metadata(
struct t10_alua_tg_pt_gp *tg_pt_gp,
int primary_state,
unsigned char *md_buf)
{
struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
struct t10_wwn *wwn = &su_dev->t10_wwn;
char path[ALUA_METADATA_PATH_LEN];
int len;
memset(path, 0, ALUA_METADATA_PATH_LEN);
len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
"tg_pt_gp_id=%hu\n"
"alua_access_state=0x%02x\n"
"alua_access_status=0x%02x\n",
tg_pt_gp->tg_pt_gp_id, primary_state,
tg_pt_gp->tg_pt_gp_alua_access_status);
snprintf(path, ALUA_METADATA_PATH_LEN,
"/var/target/alua/tpgs_%s/%s", &wwn->unit_serial[0],
config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item));
return core_alua_write_tpg_metadata(path, md_buf, len);
}
static int core_alua_do_transition_tg_pt(
struct t10_alua_tg_pt_gp *tg_pt_gp,
struct se_port *l_port,
struct se_node_acl *nacl,
unsigned char *md_buf,
int new_state,
int explict)
{
struct se_dev_entry *se_deve;
struct se_lun_acl *lacl;
struct se_port *port;
struct t10_alua_tg_pt_gp_member *mem;
int old_state = 0;
/*
* Save the old primary ALUA access state, and set the current state
* to ALUA_ACCESS_STATE_TRANSITION.
*/
old_state = atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state);
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
ALUA_ACCESS_STATE_TRANSITION);
tg_pt_gp->tg_pt_gp_alua_access_status = (explict) ?
ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
/*
* Check for the optional ALUA primary state transition delay
*/
if (tg_pt_gp->tg_pt_gp_trans_delay_msecs != 0)
msleep_interruptible(tg_pt_gp->tg_pt_gp_trans_delay_msecs);
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
list_for_each_entry(mem, &tg_pt_gp->tg_pt_gp_mem_list,
tg_pt_gp_mem_list) {
port = mem->tg_pt;
/*
* After an implicit target port asymmetric access state
* change, a device server shall establish a unit attention
* condition for the initiator port associated with every I_T
* nexus with the additional sense code set to ASYMMETRIC
* ACCESS STATE CHAGED.
*
* After an explicit target port asymmetric access state
* change, a device server shall establish a unit attention
* condition with the additional sense code set to ASYMMETRIC
* ACCESS STATE CHANGED for the initiator port associated with
* every I_T nexus other than the I_T nexus on which the SET
* TARGET PORT GROUPS command
*/
atomic_inc(&mem->tg_pt_gp_mem_ref_cnt);
smp_mb__after_atomic_inc();
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
spin_lock_bh(&port->sep_alua_lock);
list_for_each_entry(se_deve, &port->sep_alua_list,
alua_port_list) {
lacl = se_deve->se_lun_acl;
/*
* se_deve->se_lun_acl pointer may be NULL for a
* entry created without explict Node+MappedLUN ACLs
*/
if (!(lacl))
continue;
if (explict &&
(nacl != NULL) && (nacl == lacl->se_lun_nacl) &&
(l_port != NULL) && (l_port == port))
continue;
core_scsi3_ua_allocate(lacl->se_lun_nacl,
se_deve->mapped_lun, 0x2A,
ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED);
}
spin_unlock_bh(&port->sep_alua_lock);
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
atomic_dec(&mem->tg_pt_gp_mem_ref_cnt);
smp_mb__after_atomic_dec();
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
/*
* Update the ALUA metadata buf that has been allocated in
* core_alua_do_port_transition(), this metadata will be written
* to struct file.
*
* Note that there is the case where we do not want to update the
* metadata when the saved metadata is being parsed in userspace
* when setting the existing port access state and access status.
*
* Also note that the failure to write out the ALUA metadata to
* struct file does NOT affect the actual ALUA transition.
*/
if (tg_pt_gp->tg_pt_gp_write_metadata) {
mutex_lock(&tg_pt_gp->tg_pt_gp_md_mutex);
core_alua_update_tpg_primary_metadata(tg_pt_gp,
new_state, md_buf);
mutex_unlock(&tg_pt_gp->tg_pt_gp_md_mutex);
}
/*
* Set the current primary ALUA access state to the requested new state
*/
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
" from primary access state %s to %s\n", (explict) ? "explict" :
"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
core_alua_dump_state(new_state));
return 0;
}
int core_alua_do_port_transition(
struct t10_alua_tg_pt_gp *l_tg_pt_gp,
struct se_device *l_dev,
struct se_port *l_port,
struct se_node_acl *l_nacl,
int new_state,
int explict)
{
struct se_device *dev;
struct se_port *port;
struct se_subsystem_dev *su_dev;
struct se_node_acl *nacl;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem, *local_lu_gp_mem;
struct t10_alua_tg_pt_gp *tg_pt_gp;
unsigned char *md_buf;
int primary;
if (core_alua_check_transition(new_state, &primary) != 0)
return -EINVAL;
md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
if (!(md_buf)) {
printk("Unable to allocate buf for ALUA metadata\n");
return -ENOMEM;
}
local_lu_gp_mem = l_dev->dev_alua_lu_gp_mem;
spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
lu_gp = local_lu_gp_mem->lu_gp;
atomic_inc(&lu_gp->lu_gp_ref_cnt);
smp_mb__after_atomic_inc();
spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
/*
* For storage objects that are members of the 'default_lu_gp',
* we only do transition on the passed *l_tp_pt_gp, and not
* on all of the matching target port groups IDs in default_lu_gp.
*/
if (!(lu_gp->lu_gp_id)) {
/*
* core_alua_do_transition_tg_pt() will always return
* success.
*/
core_alua_do_transition_tg_pt(l_tg_pt_gp, l_port, l_nacl,
md_buf, new_state, explict);
atomic_dec(&lu_gp->lu_gp_ref_cnt);
smp_mb__after_atomic_dec();
kfree(md_buf);
return 0;
}
/*
* For all other LU groups aside from 'default_lu_gp', walk all of
* the associated storage objects looking for a matching target port
* group ID from the local target port group.
*/
spin_lock(&lu_gp->lu_gp_lock);
list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list,
lu_gp_mem_list) {
dev = lu_gp_mem->lu_gp_mem_dev;
su_dev = dev->se_sub_dev;
atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt);
smp_mb__after_atomic_inc();
spin_unlock(&lu_gp->lu_gp_lock);
spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp,
&T10_ALUA(su_dev)->tg_pt_gps_list,
tg_pt_gp_list) {
if (!(tg_pt_gp->tg_pt_gp_valid_id))
continue;
/*
* If the target behavior port asymmetric access state
* is changed for any target port group accessiable via
* a logical unit within a LU group, the target port
* behavior group asymmetric access states for the same
* target port group accessible via other logical units
* in that LU group will also change.
*/
if (l_tg_pt_gp->tg_pt_gp_id != tg_pt_gp->tg_pt_gp_id)
continue;
if (l_tg_pt_gp == tg_pt_gp) {
port = l_port;
nacl = l_nacl;
} else {
port = NULL;
nacl = NULL;
}
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_inc();
spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
/*
* core_alua_do_transition_tg_pt() will always return
* success.
*/
core_alua_do_transition_tg_pt(tg_pt_gp, port,
nacl, md_buf, new_state, explict);
spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
smp_mb__after_atomic_dec();
}
spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
spin_lock(&lu_gp->lu_gp_lock);
atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt);
smp_mb__after_atomic_dec();
}
spin_unlock(&lu_gp->lu_gp_lock);
printk(KERN_INFO "Successfully processed LU Group: %s all ALUA TG PT"
" Group IDs: %hu %s transition to primary state: %s\n",
config_item_name(&lu_gp->lu_gp_group.cg_item),
l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
core_alua_dump_state(new_state));
atomic_dec(&lu_gp->lu_gp_ref_cnt);
smp_mb__after_atomic_dec();
kfree(md_buf);
return 0;
}
/*
* Called with tg_pt_gp_mem->sep_tg_pt_md_mutex held
*/
static int core_alua_update_tpg_secondary_metadata(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
struct se_port *port,
unsigned char *md_buf,
u32 md_buf_len)
{
struct se_portal_group *se_tpg = port->sep_tpg;
char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
int len;
memset(path, 0, ALUA_METADATA_PATH_LEN);
memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
len = snprintf(wwn, ALUA_SECONDARY_METADATA_WWN_LEN, "%s",
TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg));
if (TPG_TFO(se_tpg)->tpg_get_tag != NULL)
snprintf(wwn+len, ALUA_SECONDARY_METADATA_WWN_LEN-len, "+%hu",
TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
len = snprintf(md_buf, md_buf_len, "alua_tg_pt_offline=%d\n"
"alua_tg_pt_status=0x%02x\n",
atomic_read(&port->sep_tg_pt_secondary_offline),
port->sep_tg_pt_secondary_stat);
snprintf(path, ALUA_METADATA_PATH_LEN, "/var/target/alua/%s/%s/lun_%u",
TPG_TFO(se_tpg)->get_fabric_name(), wwn,
port->sep_lun->unpacked_lun);
return core_alua_write_tpg_metadata(path, md_buf, len);
}
static int core_alua_set_tg_pt_secondary_state(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
struct se_port *port,
int explict,
int offline)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
unsigned char *md_buf;
u32 md_buf_len;
int trans_delay_msecs;
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
if (!(tg_pt_gp)) {
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
printk(KERN_ERR "Unable to complete secondary state"
" transition\n");
return -1;
}
trans_delay_msecs = tg_pt_gp->tg_pt_gp_trans_delay_msecs;
/*
* Set the secondary ALUA target port access state to OFFLINE
* or release the previously secondary state for struct se_port
*/
if (offline)
atomic_set(&port->sep_tg_pt_secondary_offline, 1);
else
atomic_set(&port->sep_tg_pt_secondary_offline, 0);
md_buf_len = tg_pt_gp->tg_pt_gp_md_buf_len;
port->sep_tg_pt_secondary_stat = (explict) ?
ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
" to secondary access state: %s\n", (explict) ? "explict" :
"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
/*
* Do the optional transition delay after we set the secondary
* ALUA access state.
*/
if (trans_delay_msecs != 0)
msleep_interruptible(trans_delay_msecs);
/*
* See if we need to update the ALUA fabric port metadata for
* secondary state and status
*/
if (port->sep_tg_pt_secondary_write_md) {
md_buf = kzalloc(md_buf_len, GFP_KERNEL);
if (!(md_buf)) {
printk(KERN_ERR "Unable to allocate md_buf for"
" secondary ALUA access metadata\n");
return -1;
}
mutex_lock(&port->sep_tg_pt_md_mutex);
core_alua_update_tpg_secondary_metadata(tg_pt_gp_mem, port,
md_buf, md_buf_len);
mutex_unlock(&port->sep_tg_pt_md_mutex);
kfree(md_buf);
}
return 0;
}
struct t10_alua_lu_gp *
core_alua_allocate_lu_gp(const char *name, int def_group)
{
struct t10_alua_lu_gp *lu_gp;
lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
if (!(lu_gp)) {
printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n");
return ERR_PTR(-ENOMEM);;
}
INIT_LIST_HEAD(&lu_gp->lu_gp_list);
INIT_LIST_HEAD(&lu_gp->lu_gp_mem_list);
spin_lock_init(&lu_gp->lu_gp_lock);
atomic_set(&lu_gp->lu_gp_ref_cnt, 0);
if (def_group) {
lu_gp->lu_gp_id = se_global->alua_lu_gps_counter++;;
lu_gp->lu_gp_valid_id = 1;
se_global->alua_lu_gps_count++;
}
return lu_gp;
}
int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *lu_gp, u16 lu_gp_id)
{
struct t10_alua_lu_gp *lu_gp_tmp;
u16 lu_gp_id_tmp;
/*
* The lu_gp->lu_gp_id may only be set once..
*/
if (lu_gp->lu_gp_valid_id) {
printk(KERN_WARNING "ALUA LU Group already has a valid ID,"
" ignoring request\n");
return -1;
}
spin_lock(&se_global->lu_gps_lock);
if (se_global->alua_lu_gps_count == 0x0000ffff) {
printk(KERN_ERR "Maximum ALUA se_global->alua_lu_gps_count:"
" 0x0000ffff reached\n");
spin_unlock(&se_global->lu_gps_lock);
kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
return -1;
}
again:
lu_gp_id_tmp = (lu_gp_id != 0) ? lu_gp_id :
se_global->alua_lu_gps_counter++;
list_for_each_entry(lu_gp_tmp, &se_global->g_lu_gps_list, lu_gp_list) {
if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
if (!(lu_gp_id))
goto again;
printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu"
" already exists, ignoring request\n",
lu_gp_id);
spin_unlock(&se_global->lu_gps_lock);
return -1;
}
}
lu_gp->lu_gp_id = lu_gp_id_tmp;
lu_gp->lu_gp_valid_id = 1;
list_add_tail(&lu_gp->lu_gp_list, &se_global->g_lu_gps_list);
se_global->alua_lu_gps_count++;
spin_unlock(&se_global->lu_gps_lock);
return 0;
}
static struct t10_alua_lu_gp_member *
core_alua_allocate_lu_gp_mem(struct se_device *dev)
{
struct t10_alua_lu_gp_member *lu_gp_mem;
lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
if (!(lu_gp_mem)) {
printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp_member\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
spin_lock_init(&lu_gp_mem->lu_gp_mem_lock);
atomic_set(&lu_gp_mem->lu_gp_mem_ref_cnt, 0);
lu_gp_mem->lu_gp_mem_dev = dev;
dev->dev_alua_lu_gp_mem = lu_gp_mem;
return lu_gp_mem;
}
void core_alua_free_lu_gp(struct t10_alua_lu_gp *lu_gp)
{
struct t10_alua_lu_gp_member *lu_gp_mem, *lu_gp_mem_tmp;
/*
* Once we have reached this point, config_item_put() has
* already been called from target_core_alua_drop_lu_gp().
*
* Here, we remove the *lu_gp from the global list so that
* no associations can be made while we are releasing
* struct t10_alua_lu_gp.
*/
spin_lock(&se_global->lu_gps_lock);
atomic_set(&lu_gp->lu_gp_shutdown, 1);
list_del(&lu_gp->lu_gp_list);
se_global->alua_lu_gps_count--;
spin_unlock(&se_global->lu_gps_lock);
/*
* Allow struct t10_alua_lu_gp * referenced by core_alua_get_lu_gp_by_name()
* in target_core_configfs.c:target_core_store_alua_lu_gp() to be
* released with core_alua_put_lu_gp_from_name()
*/
while (atomic_read(&lu_gp->lu_gp_ref_cnt))
cpu_relax();
/*
* Release reference to struct t10_alua_lu_gp * from all associated
* struct se_device.
*/
spin_lock(&lu_gp->lu_gp_lock);
list_for_each_entry_safe(lu_gp_mem, lu_gp_mem_tmp,
&lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
if (lu_gp_mem->lu_gp_assoc) {
list_del(&lu_gp_mem->lu_gp_mem_list);
lu_gp->lu_gp_members--;
lu_gp_mem->lu_gp_assoc = 0;
}
spin_unlock(&lu_gp->lu_gp_lock);
/*
*
* lu_gp_mem is assoicated with a single
* struct se_device->dev_alua_lu_gp_mem, and is released when
* struct se_device is released via core_alua_free_lu_gp_mem().
*
* If the passed lu_gp does NOT match the default_lu_gp, assume
* we want to re-assocate a given lu_gp_mem with default_lu_gp.
*/
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
if (lu_gp != se_global->default_lu_gp)
__core_alua_attach_lu_gp_mem(lu_gp_mem,
se_global->default_lu_gp);
else
lu_gp_mem->lu_gp = NULL;
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
spin_lock(&lu_gp->lu_gp_lock);
}
spin_unlock(&lu_gp->lu_gp_lock);
kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
}
void core_alua_free_lu_gp_mem(struct se_device *dev)
{
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct t10_alua *alua = T10_ALUA(su_dev);
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem;
if (alua->alua_type != SPC3_ALUA_EMULATED)
return;
lu_gp_mem = dev->dev_alua_lu_gp_mem;
if (!(lu_gp_mem))
return;
while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
cpu_relax();
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
if ((lu_gp)) {
spin_lock(&lu_gp->lu_gp_lock);
if (lu_gp_mem->lu_gp_assoc) {
list_del(&lu_gp_mem->lu_gp_mem_list);
lu_gp->lu_gp_members--;
lu_gp_mem->lu_gp_assoc = 0;
}
spin_unlock(&lu_gp->lu_gp_lock);
lu_gp_mem->lu_gp = NULL;
}
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
kmem_cache_free(t10_alua_lu_gp_mem_cache, lu_gp_mem);
}
struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *name)
{
struct t10_alua_lu_gp *lu_gp;
struct config_item *ci;
spin_lock(&se_global->lu_gps_lock);
list_for_each_entry(lu_gp, &se_global->g_lu_gps_list, lu_gp_list) {
if (!(lu_gp->lu_gp_valid_id))
continue;
ci = &lu_gp->lu_gp_group.cg_item;
if (!(strcmp(config_item_name(ci), name))) {
atomic_inc(&lu_gp->lu_gp_ref_cnt);
spin_unlock(&se_global->lu_gps_lock);
return lu_gp;
}
}
spin_unlock(&se_global->lu_gps_lock);
return NULL;
}
void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *lu_gp)
{
spin_lock(&se_global->lu_gps_lock);
atomic_dec(&lu_gp->lu_gp_ref_cnt);
spin_unlock(&se_global->lu_gps_lock);
}
/*
* Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
*/
void __core_alua_attach_lu_gp_mem(
struct t10_alua_lu_gp_member *lu_gp_mem,
struct t10_alua_lu_gp *lu_gp)
{
spin_lock(&lu_gp->lu_gp_lock);
lu_gp_mem->lu_gp = lu_gp;
lu_gp_mem->lu_gp_assoc = 1;
list_add_tail(&lu_gp_mem->lu_gp_mem_list, &lu_gp->lu_gp_mem_list);
lu_gp->lu_gp_members++;
spin_unlock(&lu_gp->lu_gp_lock);
}
/*
* Called with struct t10_alua_lu_gp_member->lu_gp_mem_lock
*/
void __core_alua_drop_lu_gp_mem(
struct t10_alua_lu_gp_member *lu_gp_mem,
struct t10_alua_lu_gp *lu_gp)
{
spin_lock(&lu_gp->lu_gp_lock);
list_del(&lu_gp_mem->lu_gp_mem_list);
lu_gp_mem->lu_gp = NULL;
lu_gp_mem->lu_gp_assoc = 0;
lu_gp->lu_gp_members--;
spin_unlock(&lu_gp->lu_gp_lock);
}
struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
struct se_subsystem_dev *su_dev,
const char *name,
int def_group)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
if (!(tg_pt_gp)) {
printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp\n");
return NULL;
}
INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_mem_list);
mutex_init(&tg_pt_gp->tg_pt_gp_md_mutex);
spin_lock_init(&tg_pt_gp->tg_pt_gp_lock);
atomic_set(&tg_pt_gp->tg_pt_gp_ref_cnt, 0);
tg_pt_gp->tg_pt_gp_su_dev = su_dev;
tg_pt_gp->tg_pt_gp_md_buf_len = ALUA_MD_BUF_LEN;
atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state,
ALUA_ACCESS_STATE_ACTIVE_OPTMIZED);
/*
* Enable both explict and implict ALUA support by default
*/
tg_pt_gp->tg_pt_gp_alua_access_type =
TPGS_EXPLICT_ALUA | TPGS_IMPLICT_ALUA;
/*
* Set the default Active/NonOptimized Delay in milliseconds
*/
tg_pt_gp->tg_pt_gp_nonop_delay_msecs = ALUA_DEFAULT_NONOP_DELAY_MSECS;
tg_pt_gp->tg_pt_gp_trans_delay_msecs = ALUA_DEFAULT_TRANS_DELAY_MSECS;
if (def_group) {
spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
tg_pt_gp->tg_pt_gp_id =
T10_ALUA(su_dev)->alua_tg_pt_gps_counter++;
tg_pt_gp->tg_pt_gp_valid_id = 1;
T10_ALUA(su_dev)->alua_tg_pt_gps_count++;
list_add_tail(&tg_pt_gp->tg_pt_gp_list,
&T10_ALUA(su_dev)->tg_pt_gps_list);
spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
}
return tg_pt_gp;
}
int core_alua_set_tg_pt_gp_id(
struct t10_alua_tg_pt_gp *tg_pt_gp,
u16 tg_pt_gp_id)
{
struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
struct t10_alua_tg_pt_gp *tg_pt_gp_tmp;
u16 tg_pt_gp_id_tmp;
/*
* The tg_pt_gp->tg_pt_gp_id may only be set once..
*/
if (tg_pt_gp->tg_pt_gp_valid_id) {
printk(KERN_WARNING "ALUA TG PT Group already has a valid ID,"
" ignoring request\n");
return -1;
}
spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
if (T10_ALUA(su_dev)->alua_tg_pt_gps_count == 0x0000ffff) {
printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:"
" 0x0000ffff reached\n");
spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
return -1;
}
again:
tg_pt_gp_id_tmp = (tg_pt_gp_id != 0) ? tg_pt_gp_id :
T10_ALUA(su_dev)->alua_tg_pt_gps_counter++;
list_for_each_entry(tg_pt_gp_tmp, &T10_ALUA(su_dev)->tg_pt_gps_list,
tg_pt_gp_list) {
if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
if (!(tg_pt_gp_id))
goto again;
printk(KERN_ERR "ALUA Target Port Group ID: %hu already"
" exists, ignoring request\n", tg_pt_gp_id);
spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
return -1;
}
}
tg_pt_gp->tg_pt_gp_id = tg_pt_gp_id_tmp;
tg_pt_gp->tg_pt_gp_valid_id = 1;
list_add_tail(&tg_pt_gp->tg_pt_gp_list,
&T10_ALUA(su_dev)->tg_pt_gps_list);
T10_ALUA(su_dev)->alua_tg_pt_gps_count++;
spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
return 0;
}
struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
struct se_port *port)
{
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
GFP_KERNEL);
if (!(tg_pt_gp_mem)) {
printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp_member\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
spin_lock_init(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
atomic_set(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt, 0);
tg_pt_gp_mem->tg_pt = port;
port->sep_alua_tg_pt_gp_mem = tg_pt_gp_mem;
atomic_set(&port->sep_tg_pt_gp_active, 1);
return tg_pt_gp_mem;
}
void core_alua_free_tg_pt_gp(
struct t10_alua_tg_pt_gp *tg_pt_gp)
{
struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem, *tg_pt_gp_mem_tmp;
/*
* Once we have reached this point, config_item_put() has already
* been called from target_core_alua_drop_tg_pt_gp().
*
* Here we remove *tg_pt_gp from the global list so that
* no assications *OR* explict ALUA via SET_TARGET_PORT_GROUPS
* can be made while we are releasing struct t10_alua_tg_pt_gp.
*/
spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
list_del(&tg_pt_gp->tg_pt_gp_list);
T10_ALUA(su_dev)->alua_tg_pt_gps_counter--;
spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
/*
* Allow a struct t10_alua_tg_pt_gp_member * referenced by
* core_alua_get_tg_pt_gp_by_name() in
* target_core_configfs.c:target_core_store_alua_tg_pt_gp()
* to be released with core_alua_put_tg_pt_gp_from_name().
*/
while (atomic_read(&tg_pt_gp->tg_pt_gp_ref_cnt))
cpu_relax();
/*
* Release reference to struct t10_alua_tg_pt_gp from all associated
* struct se_port.
*/
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
list_for_each_entry_safe(tg_pt_gp_mem, tg_pt_gp_mem_tmp,
&tg_pt_gp->tg_pt_gp_mem_list, tg_pt_gp_mem_list) {
if (tg_pt_gp_mem->tg_pt_gp_assoc) {
list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
tg_pt_gp->tg_pt_gp_members--;
tg_pt_gp_mem->tg_pt_gp_assoc = 0;
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
/*
* tg_pt_gp_mem is assoicated with a single
* se_port->sep_alua_tg_pt_gp_mem, and is released via
* core_alua_free_tg_pt_gp_mem().
*
* If the passed tg_pt_gp does NOT match the default_tg_pt_gp,
* assume we want to re-assocate a given tg_pt_gp_mem with
* default_tg_pt_gp.
*/
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
if (tg_pt_gp != T10_ALUA(su_dev)->default_tg_pt_gp) {
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
T10_ALUA(su_dev)->default_tg_pt_gp);
} else
tg_pt_gp_mem->tg_pt_gp = NULL;
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
}
void core_alua_free_tg_pt_gp_mem(struct se_port *port)
{
struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
struct t10_alua *alua = T10_ALUA(su_dev);
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
if (alua->alua_type != SPC3_ALUA_EMULATED)
return;
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
if (!(tg_pt_gp_mem))
return;
while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
cpu_relax();
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
if ((tg_pt_gp)) {
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
if (tg_pt_gp_mem->tg_pt_gp_assoc) {
list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
tg_pt_gp->tg_pt_gp_members--;
tg_pt_gp_mem->tg_pt_gp_assoc = 0;
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
tg_pt_gp_mem->tg_pt_gp = NULL;
}
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
kmem_cache_free(t10_alua_tg_pt_gp_mem_cache, tg_pt_gp_mem);
}
static struct t10_alua_tg_pt_gp *core_alua_get_tg_pt_gp_by_name(
struct se_subsystem_dev *su_dev,
const char *name)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct config_item *ci;
spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
list_for_each_entry(tg_pt_gp, &T10_ALUA(su_dev)->tg_pt_gps_list,
tg_pt_gp_list) {
if (!(tg_pt_gp->tg_pt_gp_valid_id))
continue;
ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
if (!(strcmp(config_item_name(ci), name))) {
atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
return tg_pt_gp;
}
}
spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
return NULL;
}
static void core_alua_put_tg_pt_gp_from_name(
struct t10_alua_tg_pt_gp *tg_pt_gp)
{
struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
spin_lock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
spin_unlock(&T10_ALUA(su_dev)->tg_pt_gps_lock);
}
/*
* Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
*/
void __core_alua_attach_tg_pt_gp_mem(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
struct t10_alua_tg_pt_gp *tg_pt_gp)
{
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
tg_pt_gp_mem->tg_pt_gp = tg_pt_gp;
tg_pt_gp_mem->tg_pt_gp_assoc = 1;
list_add_tail(&tg_pt_gp_mem->tg_pt_gp_mem_list,
&tg_pt_gp->tg_pt_gp_mem_list);
tg_pt_gp->tg_pt_gp_members++;
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
}
/*
* Called with struct t10_alua_tg_pt_gp_member->tg_pt_gp_mem_lock held
*/
static void __core_alua_drop_tg_pt_gp_mem(
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem,
struct t10_alua_tg_pt_gp *tg_pt_gp)
{
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
tg_pt_gp_mem->tg_pt_gp = NULL;
tg_pt_gp_mem->tg_pt_gp_assoc = 0;
tg_pt_gp->tg_pt_gp_members--;
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
}
ssize_t core_alua_show_tg_pt_gp_info(struct se_port *port, char *page)
{
struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
struct config_item *tg_pt_ci;
struct t10_alua *alua = T10_ALUA(su_dev);
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
ssize_t len = 0;
if (alua->alua_type != SPC3_ALUA_EMULATED)
return len;
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
if (!(tg_pt_gp_mem))
return len;
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
if ((tg_pt_gp)) {
tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
" %hu\nTG Port Primary Access State: %s\nTG Port "
"Primary Access Status: %s\nTG Port Secondary Access"
" State: %s\nTG Port Secondary Access Status: %s\n",
config_item_name(tg_pt_ci), tg_pt_gp->tg_pt_gp_id,
core_alua_dump_state(atomic_read(
&tg_pt_gp->tg_pt_gp_alua_access_state)),
core_alua_dump_status(
tg_pt_gp->tg_pt_gp_alua_access_status),
(atomic_read(&port->sep_tg_pt_secondary_offline)) ?
"Offline" : "None",
core_alua_dump_status(port->sep_tg_pt_secondary_stat));
}
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
return len;
}
ssize_t core_alua_store_tg_pt_gp_info(
struct se_port *port,
const char *page,
size_t count)
{
struct se_portal_group *tpg;
struct se_lun *lun;
struct se_subsystem_dev *su_dev = port->sep_lun->lun_se_dev->se_sub_dev;
struct t10_alua_tg_pt_gp *tg_pt_gp = NULL, *tg_pt_gp_new = NULL;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
unsigned char buf[TG_PT_GROUP_NAME_BUF];
int move = 0;
tpg = port->sep_tpg;
lun = port->sep_lun;
if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for"
" %s/tpgt_%hu/%s\n", TPG_TFO(tpg)->tpg_get_wwn(tpg),
TPG_TFO(tpg)->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item));
return -EINVAL;
}
if (count > TG_PT_GROUP_NAME_BUF) {
printk(KERN_ERR "ALUA Target Port Group alias too large!\n");
return -EINVAL;
}
memset(buf, 0, TG_PT_GROUP_NAME_BUF);
memcpy(buf, page, count);
/*
* Any ALUA target port group alias besides "NULL" means we will be
* making a new group association.
*/
if (strcmp(strstrip(buf), "NULL")) {
/*
* core_alua_get_tg_pt_gp_by_name() will increment reference to
* struct t10_alua_tg_pt_gp. This reference is released with
* core_alua_put_tg_pt_gp_from_name() below.
*/
tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
strstrip(buf));
if (!(tg_pt_gp_new))
return -ENODEV;
}
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
if (!(tg_pt_gp_mem)) {
if (tg_pt_gp_new)
core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
printk(KERN_ERR "NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
return -EINVAL;
}
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
if ((tg_pt_gp)) {
/*
* Clearing an existing tg_pt_gp association, and replacing
* with the default_tg_pt_gp.
*/
if (!(tg_pt_gp_new)) {
printk(KERN_INFO "Target_Core_ConfigFS: Moving"
" %s/tpgt_%hu/%s from ALUA Target Port Group:"
" alua/%s, ID: %hu back to"
" default_tg_pt_gp\n",
TPG_TFO(tpg)->tpg_get_wwn(tpg),
TPG_TFO(tpg)->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item),
config_item_name(
&tg_pt_gp->tg_pt_gp_group.cg_item),
tg_pt_gp->tg_pt_gp_id);
__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
T10_ALUA(su_dev)->default_tg_pt_gp);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
return count;
}
/*
* Removing existing association of tg_pt_gp_mem with tg_pt_gp
*/
__core_alua_drop_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp);
move = 1;
}
/*
* Associate tg_pt_gp_mem with tg_pt_gp_new.
*/
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
" Target Port Group: alua/%s, ID: %hu\n", (move) ?
"Moving" : "Adding", TPG_TFO(tpg)->tpg_get_wwn(tpg),
TPG_TFO(tpg)->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item),
config_item_name(&tg_pt_gp_new->tg_pt_gp_group.cg_item),
tg_pt_gp_new->tg_pt_gp_id);
core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
return count;
}
ssize_t core_alua_show_access_type(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
if ((tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA) &&
(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA))
return sprintf(page, "Implict and Explict\n");
else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)
return sprintf(page, "Implict\n");
else if (tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA)
return sprintf(page, "Explict\n");
else
return sprintf(page, "None\n");
}
ssize_t core_alua_store_access_type(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
{
unsigned long tmp;
int ret;
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
printk(KERN_ERR "Unable to extract alua_access_type\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
printk(KERN_ERR "Illegal value for alua_access_type:"
" %lu\n", tmp);
return -EINVAL;
}
if (tmp == 3)
tg_pt_gp->tg_pt_gp_alua_access_type =
TPGS_IMPLICT_ALUA | TPGS_EXPLICT_ALUA;
else if (tmp == 2)
tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_EXPLICT_ALUA;
else if (tmp == 1)
tg_pt_gp->tg_pt_gp_alua_access_type = TPGS_IMPLICT_ALUA;
else
tg_pt_gp->tg_pt_gp_alua_access_type = 0;
return count;
}
ssize_t core_alua_show_nonop_delay_msecs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_nonop_delay_msecs);
}
ssize_t core_alua_store_nonop_delay_msecs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
{
unsigned long tmp;
int ret;
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
printk(KERN_ERR "Unable to extract nonop_delay_msecs\n");
return -EINVAL;
}
if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
printk(KERN_ERR "Passed nonop_delay_msecs: %lu, exceeds"
" ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
ALUA_MAX_NONOP_DELAY_MSECS);
return -EINVAL;
}
tg_pt_gp->tg_pt_gp_nonop_delay_msecs = (int)tmp;
return count;
}
ssize_t core_alua_show_trans_delay_msecs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_trans_delay_msecs);
}
ssize_t core_alua_store_trans_delay_msecs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
{
unsigned long tmp;
int ret;
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
printk(KERN_ERR "Unable to extract trans_delay_msecs\n");
return -EINVAL;
}
if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
printk(KERN_ERR "Passed trans_delay_msecs: %lu, exceeds"
" ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
ALUA_MAX_TRANS_DELAY_MSECS);
return -EINVAL;
}
tg_pt_gp->tg_pt_gp_trans_delay_msecs = (int)tmp;
return count;
}
ssize_t core_alua_show_preferred_bit(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_pref);
}
ssize_t core_alua_store_preferred_bit(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
{
unsigned long tmp;
int ret;
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
printk(KERN_ERR "Unable to extract preferred ALUA value\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
printk(KERN_ERR "Illegal value for preferred ALUA: %lu\n", tmp);
return -EINVAL;
}
tg_pt_gp->tg_pt_gp_pref = (int)tmp;
return count;
}
ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
{
if (!(lun->lun_sep))
return -ENODEV;
return sprintf(page, "%d\n",
atomic_read(&lun->lun_sep->sep_tg_pt_secondary_offline));
}
ssize_t core_alua_store_offline_bit(
struct se_lun *lun,
const char *page,
size_t count)
{
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
unsigned long tmp;
int ret;
if (!(lun->lun_sep))
return -ENODEV;
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
printk(KERN_ERR "Unable to extract alua_tg_pt_offline value\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
printk(KERN_ERR "Illegal value for alua_tg_pt_offline: %lu\n",
tmp);
return -EINVAL;
}
tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
if (!(tg_pt_gp_mem)) {
printk(KERN_ERR "Unable to locate *tg_pt_gp_mem\n");
return -EINVAL;
}
ret = core_alua_set_tg_pt_secondary_state(tg_pt_gp_mem,
lun->lun_sep, 0, (int)tmp);
if (ret < 0)
return -EINVAL;
return count;
}
ssize_t core_alua_show_secondary_status(
struct se_lun *lun,
char *page)
{
return sprintf(page, "%d\n", lun->lun_sep->sep_tg_pt_secondary_stat);
}
ssize_t core_alua_store_secondary_status(
struct se_lun *lun,
const char *page,
size_t count)
{
unsigned long tmp;
int ret;
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
printk(KERN_ERR "Unable to extract alua_tg_pt_status\n");
return -EINVAL;
}
if ((tmp != ALUA_STATUS_NONE) &&
(tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
(tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
printk(KERN_ERR "Illegal value for alua_tg_pt_status: %lu\n",
tmp);
return -EINVAL;
}
lun->lun_sep->sep_tg_pt_secondary_stat = (int)tmp;
return count;
}
ssize_t core_alua_show_secondary_write_metadata(
struct se_lun *lun,
char *page)
{
return sprintf(page, "%d\n",
lun->lun_sep->sep_tg_pt_secondary_write_md);
}
ssize_t core_alua_store_secondary_write_metadata(
struct se_lun *lun,
const char *page,
size_t count)
{
unsigned long tmp;
int ret;
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
printk(KERN_ERR "Unable to extract alua_tg_pt_write_md\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
printk(KERN_ERR "Illegal value for alua_tg_pt_write_md:"
" %lu\n", tmp);
return -EINVAL;
}
lun->lun_sep->sep_tg_pt_secondary_write_md = (int)tmp;
return count;
}
int core_setup_alua(struct se_device *dev, int force_pt)
{
struct se_subsystem_dev *su_dev = dev->se_sub_dev;
struct t10_alua *alua = T10_ALUA(su_dev);
struct t10_alua_lu_gp_member *lu_gp_mem;
/*
* If this device is from Target_Core_Mod/pSCSI, use the ALUA logic
* of the Underlying SCSI hardware. In Linux/SCSI terms, this can
* cause a problem because libata and some SATA RAID HBAs appear
* under Linux/SCSI, but emulate SCSI logic themselves.
*/
if (((TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) &&
!(DEV_ATTRIB(dev)->emulate_alua)) || force_pt) {
alua->alua_type = SPC_ALUA_PASSTHROUGH;
alua->alua_state_check = &core_alua_state_check_nop;
printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
" emulation\n", TRANSPORT(dev)->name);
return 0;
}
/*
* If SPC-3 or above is reported by real or emulated struct se_device,
* use emulated ALUA.
*/
if (TRANSPORT(dev)->get_device_rev(dev) >= SCSI_3) {
printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3"
" device\n", TRANSPORT(dev)->name);
/*
* Assoicate this struct se_device with the default ALUA
* LUN Group.
*/
lu_gp_mem = core_alua_allocate_lu_gp_mem(dev);
if (IS_ERR(lu_gp_mem) || !lu_gp_mem)
return -1;
alua->alua_type = SPC3_ALUA_EMULATED;
alua->alua_state_check = &core_alua_state_check;
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
__core_alua_attach_lu_gp_mem(lu_gp_mem,
se_global->default_lu_gp);
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
printk(KERN_INFO "%s: Adding to default ALUA LU Group:"
" core/alua/lu_gps/default_lu_gp\n",
TRANSPORT(dev)->name);
} else {
alua->alua_type = SPC2_ALUA_DISABLED;
alua->alua_state_check = &core_alua_state_check_nop;
printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2"
" device\n", TRANSPORT(dev)->name);
}
return 0;
}
#ifndef TARGET_CORE_ALUA_H
#define TARGET_CORE_ALUA_H
/*
* INQUIRY response data, TPGS Field
*
* from spc4r17 section 6.4.2 Table 135
*/
#define TPGS_NO_ALUA 0x00
#define TPGS_IMPLICT_ALUA 0x10
#define TPGS_EXPLICT_ALUA 0x20
/*
* ASYMMETRIC ACCESS STATE field
*
* from spc4r17 section 6.27 Table 245
*/
#define ALUA_ACCESS_STATE_ACTIVE_OPTMIZED 0x0
#define ALUA_ACCESS_STATE_ACTIVE_NON_OPTIMIZED 0x1
#define ALUA_ACCESS_STATE_STANDBY 0x2
#define ALUA_ACCESS_STATE_UNAVAILABLE 0x3
#define ALUA_ACCESS_STATE_OFFLINE 0xe
#define ALUA_ACCESS_STATE_TRANSITION 0xf
/*
* REPORT_TARGET_PORT_GROUP STATUS CODE
*
* from spc4r17 section 6.27 Table 246
*/
#define ALUA_STATUS_NONE 0x00
#define ALUA_STATUS_ALTERED_BY_EXPLICT_STPG 0x01
#define ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA 0x02
/*
* From spc4r17, Table D.1: ASC and ASCQ Assignement
*/
#define ASCQ_04H_ALUA_STATE_TRANSITION 0x0a
#define ASCQ_04H_ALUA_TG_PT_STANDBY 0x0b
#define ASCQ_04H_ALUA_TG_PT_UNAVAILABLE 0x0c
#define ASCQ_04H_ALUA_OFFLINE 0x12
/*
* Used as the default for Active/NonOptimized delay (in milliseconds)
* This can also be changed via configfs on a per target port group basis..
*/
#define ALUA_DEFAULT_NONOP_DELAY_MSECS 100
#define ALUA_MAX_NONOP_DELAY_MSECS 10000 /* 10 seconds */
/*
* Used for implict and explict ALUA transitional delay, that is disabled
* by default, and is intended to be used for debugging client side ALUA code.
*/
#define ALUA_DEFAULT_TRANS_DELAY_MSECS 0
#define ALUA_MAX_TRANS_DELAY_MSECS 30000 /* 30 seconds */
/*
* Used by core_alua_update_tpg_primary_metadata() and
* core_alua_update_tpg_secondary_metadata()
*/
#define ALUA_METADATA_PATH_LEN 512
/*
* Used by core_alua_update_tpg_secondary_metadata()
*/
#define ALUA_SECONDARY_METADATA_WWN_LEN 256
extern struct kmem_cache *t10_alua_lu_gp_cache;
extern struct kmem_cache *t10_alua_lu_gp_mem_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_cache;
extern struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
extern int core_emulate_report_target_port_groups(struct se_cmd *);
extern int core_emulate_set_target_port_groups(struct se_cmd *);
extern int core_alua_check_nonop_delay(struct se_cmd *);
extern int core_alua_do_port_transition(struct t10_alua_tg_pt_gp *,
struct se_device *, struct se_port *,
struct se_node_acl *, int, int);
extern char *core_alua_dump_status(int);
extern struct t10_alua_lu_gp *core_alua_allocate_lu_gp(const char *, int);
extern int core_alua_set_lu_gp_id(struct t10_alua_lu_gp *, u16);
extern void core_alua_free_lu_gp(struct t10_alua_lu_gp *);
extern void core_alua_free_lu_gp_mem(struct se_device *);
extern struct t10_alua_lu_gp *core_alua_get_lu_gp_by_name(const char *);
extern void core_alua_put_lu_gp_from_name(struct t10_alua_lu_gp *);
extern void __core_alua_attach_lu_gp_mem(struct t10_alua_lu_gp_member *,
struct t10_alua_lu_gp *);
extern void __core_alua_drop_lu_gp_mem(struct t10_alua_lu_gp_member *,
struct t10_alua_lu_gp *);
extern void core_alua_drop_lu_gp_dev(struct se_device *);
extern struct t10_alua_tg_pt_gp *core_alua_allocate_tg_pt_gp(
struct se_subsystem_dev *, const char *, int);
extern int core_alua_set_tg_pt_gp_id(struct t10_alua_tg_pt_gp *, u16);
extern struct t10_alua_tg_pt_gp_member *core_alua_allocate_tg_pt_gp_mem(
struct se_port *);
extern void core_alua_free_tg_pt_gp(struct t10_alua_tg_pt_gp *);
extern void core_alua_free_tg_pt_gp_mem(struct se_port *);
extern void __core_alua_attach_tg_pt_gp_mem(struct t10_alua_tg_pt_gp_member *,
struct t10_alua_tg_pt_gp *);
extern ssize_t core_alua_show_tg_pt_gp_info(struct se_port *, char *);
extern ssize_t core_alua_store_tg_pt_gp_info(struct se_port *, const char *,
size_t);
extern ssize_t core_alua_show_access_type(struct t10_alua_tg_pt_gp *, char *);
extern ssize_t core_alua_store_access_type(struct t10_alua_tg_pt_gp *,
const char *, size_t);
extern ssize_t core_alua_show_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
char *);
extern ssize_t core_alua_store_nonop_delay_msecs(struct t10_alua_tg_pt_gp *,
const char *, size_t);
extern ssize_t core_alua_show_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
char *);
extern ssize_t core_alua_store_trans_delay_msecs(struct t10_alua_tg_pt_gp *,
const char *, size_t);
extern ssize_t core_alua_show_preferred_bit(struct t10_alua_tg_pt_gp *,
char *);
extern ssize_t core_alua_store_preferred_bit(struct t10_alua_tg_pt_gp *,
const char *, size_t);
extern ssize_t core_alua_show_offline_bit(struct se_lun *, char *);
extern ssize_t core_alua_store_offline_bit(struct se_lun *, const char *,
size_t);
extern ssize_t core_alua_show_secondary_status(struct se_lun *, char *);
extern ssize_t core_alua_store_secondary_status(struct se_lun *,
const char *, size_t);
extern ssize_t core_alua_show_secondary_write_metadata(struct se_lun *,
char *);
extern ssize_t core_alua_store_secondary_write_metadata(struct se_lun *,
const char *, size_t);
extern int core_setup_alua(struct se_device *, int);
#endif /* TARGET_CORE_ALUA_H */
/*
* CDB emulation for non-READ/WRITE commands.
*
* Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <asm/unaligned.h>
#include <scsi/scsi.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include "target_core_ua.h"
static void
target_fill_alua_data(struct se_port *port, unsigned char *buf)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
/*
* Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS.
*/
buf[5] = 0x80;
/*
* Set TPGS field for explict and/or implict ALUA access type
* and opteration.
*
* See spc4r17 section 6.4.2 Table 135
*/
if (!port)
return;
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
if (!tg_pt_gp_mem)
return;
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
if (tg_pt_gp)
buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type;
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
}
static int
target_emulate_inquiry_std(struct se_cmd *cmd)
{
struct se_lun *lun = SE_LUN(cmd);
struct se_device *dev = SE_DEV(cmd);
unsigned char *buf = cmd->t_task->t_task_buf;
/*
* Make sure we at least have 6 bytes of INQUIRY response
* payload going back for EVPD=0
*/
if (cmd->data_length < 6) {
printk(KERN_ERR "SCSI Inquiry payload length: %u"
" too small for EVPD=0\n", cmd->data_length);
return -1;
}
buf[0] = dev->transport->get_device_type(dev);
if (buf[0] == TYPE_TAPE)
buf[1] = 0x80;
buf[2] = dev->transport->get_device_rev(dev);
/*
* Enable SCCS and TPGS fields for Emulated ALUA
*/
if (T10_ALUA(dev->se_sub_dev)->alua_type == SPC3_ALUA_EMULATED)
target_fill_alua_data(lun->lun_sep, buf);
if (cmd->data_length < 8) {
buf[4] = 1; /* Set additional length to 1 */
return 0;
}
buf[7] = 0x32; /* Sync=1 and CmdQue=1 */
/*
* Do not include vendor, product, reversion info in INQUIRY
* response payload for cdbs with a small allocation length.
*/
if (cmd->data_length < 36) {
buf[4] = 3; /* Set additional length to 3 */
return 0;
}
snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
snprintf((unsigned char *)&buf[16], 16, "%s",
&DEV_T10_WWN(dev)->model[0]);
snprintf((unsigned char *)&buf[32], 4, "%s",
&DEV_T10_WWN(dev)->revision[0]);
buf[4] = 31; /* Set additional length to 31 */
return 0;
}
/* supported vital product data pages */
static int
target_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf)
{
buf[1] = 0x00;
if (cmd->data_length < 8)
return 0;
buf[4] = 0x0;
/*
* Only report the INQUIRY EVPD=1 pages after a valid NAA
* Registered Extended LUN WWN has been set via ConfigFS
* during device creation/restart.
*/
if (SE_DEV(cmd)->se_sub_dev->su_dev_flags &
SDF_EMULATED_VPD_UNIT_SERIAL) {
buf[3] = 3;
buf[5] = 0x80;
buf[6] = 0x83;
buf[7] = 0x86;
}
return 0;
}
/* unit serial number */
static int
target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = SE_DEV(cmd);
u16 len = 0;
buf[1] = 0x80;
if (dev->se_sub_dev->su_dev_flags &
SDF_EMULATED_VPD_UNIT_SERIAL) {
u32 unit_serial_len;
unit_serial_len =
strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
unit_serial_len++; /* For NULL Terminator */
if (((len + 4) + unit_serial_len) > cmd->data_length) {
len += unit_serial_len;
buf[2] = ((len >> 8) & 0xff);
buf[3] = (len & 0xff);
return 0;
}
len += sprintf((unsigned char *)&buf[4], "%s",
&DEV_T10_WWN(dev)->unit_serial[0]);
len++; /* Extra Byte for NULL Terminator */
buf[3] = len;
}
return 0;
}
/*
* Device identification VPD, for a complete list of
* DESIGNATOR TYPEs see spc4r17 Table 459.
*/
static int
target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = SE_DEV(cmd);
struct se_lun *lun = SE_LUN(cmd);
struct se_port *port = NULL;
struct se_portal_group *tpg = NULL;
struct t10_alua_lu_gp_member *lu_gp_mem;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
unsigned char binary, binary_new;
unsigned char *prod = &DEV_T10_WWN(dev)->model[0];
u32 prod_len;
u32 unit_serial_len, off = 0;
int i;
u16 len = 0, id_len;
buf[1] = 0x83;
off = 4;
/*
* NAA IEEE Registered Extended Assigned designator format, see
* spc4r17 section 7.7.3.6.5
*
* We depend upon a target_core_mod/ConfigFS provided
* /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial
* value in order to return the NAA id.
*/
if (!(dev->se_sub_dev->su_dev_flags & SDF_EMULATED_VPD_UNIT_SERIAL))
goto check_t10_vend_desc;
if (off + 20 > cmd->data_length)
goto check_t10_vend_desc;
/* CODE SET == Binary */
buf[off++] = 0x1;
/* Set ASSOICATION == addressed logical unit: 0)b */
buf[off] = 0x00;
/* Identifier/Designator type == NAA identifier */
buf[off++] = 0x3;
off++;
/* Identifier/Designator length */
buf[off++] = 0x10;
/*
* Start NAA IEEE Registered Extended Identifier/Designator
*/
buf[off++] = (0x6 << 4);
/*
* Use OpenFabrics IEEE Company ID: 00 14 05
*/
buf[off++] = 0x01;
buf[off++] = 0x40;
buf[off] = (0x5 << 4);
/*
* Return ConfigFS Unit Serial Number information for
* VENDOR_SPECIFIC_IDENTIFIER and
* VENDOR_SPECIFIC_IDENTIFIER_EXTENTION
*/
binary = transport_asciihex_to_binaryhex(
&DEV_T10_WWN(dev)->unit_serial[0]);
buf[off++] |= (binary & 0xf0) >> 4;
for (i = 0; i < 24; i += 2) {
binary_new = transport_asciihex_to_binaryhex(
&DEV_T10_WWN(dev)->unit_serial[i+2]);
buf[off] = (binary & 0x0f) << 4;
buf[off++] |= (binary_new & 0xf0) >> 4;
binary = binary_new;
}
len = 20;
off = (len + 4);
check_t10_vend_desc:
/*
* T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4
*/
id_len = 8; /* For Vendor field */
prod_len = 4; /* For VPD Header */
prod_len += 8; /* For Vendor field */
prod_len += strlen(prod);
prod_len++; /* For : */
if (dev->se_sub_dev->su_dev_flags &
SDF_EMULATED_VPD_UNIT_SERIAL) {
unit_serial_len =
strlen(&DEV_T10_WWN(dev)->unit_serial[0]);
unit_serial_len++; /* For NULL Terminator */
if ((len + (id_len + 4) +
(prod_len + unit_serial_len)) >
cmd->data_length) {
len += (prod_len + unit_serial_len);
goto check_port;
}
id_len += sprintf((unsigned char *)&buf[off+12],
"%s:%s", prod,
&DEV_T10_WWN(dev)->unit_serial[0]);
}
buf[off] = 0x2; /* ASCII */
buf[off+1] = 0x1; /* T10 Vendor ID */
buf[off+2] = 0x0;
memcpy((unsigned char *)&buf[off+4], "LIO-ORG", 8);
/* Extra Byte for NULL Terminator */
id_len++;
/* Identifier Length */
buf[off+3] = id_len;
/* Header size for Designation descriptor */
len += (id_len + 4);
off += (id_len + 4);
/*
* struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD
*/
check_port:
port = lun->lun_sep;
if (port) {
struct t10_alua_lu_gp *lu_gp;
u32 padding, scsi_name_len;
u16 lu_gp_id = 0;
u16 tg_pt_gp_id = 0;
u16 tpgt;
tpg = port->sep_tpg;
/*
* Relative target port identifer, see spc4r17
* section 7.7.3.7
*
* Get the PROTOCOL IDENTIFIER as defined by spc4r17
* section 7.5.1 Table 362
*/
if (((len + 4) + 8) > cmd->data_length) {
len += 8;
goto check_tpgi;
}
buf[off] =
(TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
buf[off++] |= 0x1; /* CODE SET == Binary */
buf[off] = 0x80; /* Set PIV=1 */
/* Set ASSOICATION == target port: 01b */
buf[off] |= 0x10;
/* DESIGNATOR TYPE == Relative target port identifer */
buf[off++] |= 0x4;
off++; /* Skip over Reserved */
buf[off++] = 4; /* DESIGNATOR LENGTH */
/* Skip over Obsolete field in RTPI payload
* in Table 472 */
off += 2;
buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
buf[off++] = (port->sep_rtpi & 0xff);
len += 8; /* Header size + Designation descriptor */
/*
* Target port group identifier, see spc4r17
* section 7.7.3.8
*
* Get the PROTOCOL IDENTIFIER as defined by spc4r17
* section 7.5.1 Table 362
*/
check_tpgi:
if (T10_ALUA(dev->se_sub_dev)->alua_type !=
SPC3_ALUA_EMULATED)
goto check_scsi_name;
if (((len + 4) + 8) > cmd->data_length) {
len += 8;
goto check_lu_gp;
}
tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
if (!tg_pt_gp_mem)
goto check_lu_gp;
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
if (!(tg_pt_gp)) {
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
goto check_lu_gp;
}
tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id;
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
buf[off] =
(TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
buf[off++] |= 0x1; /* CODE SET == Binary */
buf[off] = 0x80; /* Set PIV=1 */
/* Set ASSOICATION == target port: 01b */
buf[off] |= 0x10;
/* DESIGNATOR TYPE == Target port group identifier */
buf[off++] |= 0x5;
off++; /* Skip over Reserved */
buf[off++] = 4; /* DESIGNATOR LENGTH */
off += 2; /* Skip over Reserved Field */
buf[off++] = ((tg_pt_gp_id >> 8) & 0xff);
buf[off++] = (tg_pt_gp_id & 0xff);
len += 8; /* Header size + Designation descriptor */
/*
* Logical Unit Group identifier, see spc4r17
* section 7.7.3.8
*/
check_lu_gp:
if (((len + 4) + 8) > cmd->data_length) {
len += 8;
goto check_scsi_name;
}
lu_gp_mem = dev->dev_alua_lu_gp_mem;
if (!(lu_gp_mem))
goto check_scsi_name;
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
if (!(lu_gp)) {
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
goto check_scsi_name;
}
lu_gp_id = lu_gp->lu_gp_id;
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
buf[off++] |= 0x1; /* CODE SET == Binary */
/* DESIGNATOR TYPE == Logical Unit Group identifier */
buf[off++] |= 0x6;
off++; /* Skip over Reserved */
buf[off++] = 4; /* DESIGNATOR LENGTH */
off += 2; /* Skip over Reserved Field */
buf[off++] = ((lu_gp_id >> 8) & 0xff);
buf[off++] = (lu_gp_id & 0xff);
len += 8; /* Header size + Designation descriptor */
/*
* SCSI name string designator, see spc4r17
* section 7.7.3.11
*
* Get the PROTOCOL IDENTIFIER as defined by spc4r17
* section 7.5.1 Table 362
*/
check_scsi_name:
scsi_name_len = strlen(TPG_TFO(tpg)->tpg_get_wwn(tpg));
/* UTF-8 ",t,0x<16-bit TPGT>" + NULL Terminator */
scsi_name_len += 10;
/* Check for 4-byte padding */
padding = ((-scsi_name_len) & 3);
if (padding != 0)
scsi_name_len += padding;
/* Header size + Designation descriptor */
scsi_name_len += 4;
if (((len + 4) + scsi_name_len) > cmd->data_length) {
len += scsi_name_len;
goto set_len;
}
buf[off] =
(TPG_TFO(tpg)->get_fabric_proto_ident(tpg) << 4);
buf[off++] |= 0x3; /* CODE SET == UTF-8 */
buf[off] = 0x80; /* Set PIV=1 */
/* Set ASSOICATION == target port: 01b */
buf[off] |= 0x10;
/* DESIGNATOR TYPE == SCSI name string */
buf[off++] |= 0x8;
off += 2; /* Skip over Reserved and length */
/*
* SCSI name string identifer containing, $FABRIC_MOD
* dependent information. For LIO-Target and iSCSI
* Target Port, this means "<iSCSI name>,t,0x<TPGT> in
* UTF-8 encoding.
*/
tpgt = TPG_TFO(tpg)->tpg_get_tag(tpg);
scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x",
TPG_TFO(tpg)->tpg_get_wwn(tpg), tpgt);
scsi_name_len += 1 /* Include NULL terminator */;
/*
* The null-terminated, null-padded (see 4.4.2) SCSI
* NAME STRING field contains a UTF-8 format string.
* The number of bytes in the SCSI NAME STRING field
* (i.e., the value in the DESIGNATOR LENGTH field)
* shall be no larger than 256 and shall be a multiple
* of four.
*/
if (padding)
scsi_name_len += padding;
buf[off-1] = scsi_name_len;
off += scsi_name_len;
/* Header size + Designation descriptor */
len += (scsi_name_len + 4);
}
set_len:
buf[2] = ((len >> 8) & 0xff);
buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */
return 0;
}
/* Extended INQUIRY Data VPD Page */
static int
target_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
{
if (cmd->data_length < 60)
return 0;
buf[1] = 0x86;
buf[2] = 0x3c;
/* Set HEADSUP, ORDSUP, SIMPSUP */
buf[5] = 0x07;
/* If WriteCache emulation is enabled, set V_SUP */
if (DEV_ATTRIB(SE_DEV(cmd))->emulate_write_cache > 0)
buf[6] = 0x01;
return 0;
}
/* Block Limits VPD page */
static int
target_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = SE_DEV(cmd);
int have_tp = 0;
/*
* Following sbc3r22 section 6.5.3 Block Limits VPD page, when
* emulate_tpu=1 or emulate_tpws=1 we will be expect a
* different page length for Thin Provisioning.
*/
if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
have_tp = 1;
if (cmd->data_length < (0x10 + 4)) {
printk(KERN_INFO "Received data_length: %u"
" too small for EVPD 0xb0\n",
cmd->data_length);
return -1;
}
if (have_tp && cmd->data_length < (0x3c + 4)) {
printk(KERN_INFO "Received data_length: %u"
" too small for TPE=1 EVPD 0xb0\n",
cmd->data_length);
have_tp = 0;
}
buf[0] = dev->transport->get_device_type(dev);
buf[1] = 0xb0;
buf[3] = have_tp ? 0x3c : 0x10;
/*
* Set OPTIMAL TRANSFER LENGTH GRANULARITY
*/
put_unaligned_be16(1, &buf[6]);
/*
* Set MAXIMUM TRANSFER LENGTH
*/
put_unaligned_be32(DEV_ATTRIB(dev)->max_sectors, &buf[8]);
/*
* Set OPTIMAL TRANSFER LENGTH
*/
put_unaligned_be32(DEV_ATTRIB(dev)->optimal_sectors, &buf[12]);
/*
* Exit now if we don't support TP or the initiator sent a too
* short buffer.
*/
if (!have_tp || cmd->data_length < (0x3c + 4))
return 0;
/*
* Set MAXIMUM UNMAP LBA COUNT
*/
put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_lba_count, &buf[20]);
/*
* Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT
*/
put_unaligned_be32(DEV_ATTRIB(dev)->max_unmap_block_desc_count,
&buf[24]);
/*
* Set OPTIMAL UNMAP GRANULARITY
*/
put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity, &buf[28]);
/*
* UNMAP GRANULARITY ALIGNMENT
*/
put_unaligned_be32(DEV_ATTRIB(dev)->unmap_granularity_alignment,
&buf[32]);
if (DEV_ATTRIB(dev)->unmap_granularity_alignment != 0)
buf[32] |= 0x80; /* Set the UGAVALID bit */
return 0;
}
/* Thin Provisioning VPD */
static int
target_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = SE_DEV(cmd);
/*
* From sbc3r22 section 6.5.4 Thin Provisioning VPD page:
*
* The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to
* zero, then the page length shall be set to 0004h. If the DP bit
* is set to one, then the page length shall be set to the value
* defined in table 162.
*/
buf[0] = dev->transport->get_device_type(dev);
buf[1] = 0xb2;
/*
* Set Hardcoded length mentioned above for DP=0
*/
put_unaligned_be16(0x0004, &buf[2]);
/*
* The THRESHOLD EXPONENT field indicates the threshold set size in
* LBAs as a power of 2 (i.e., the threshold set size is equal to
* 2(threshold exponent)).
*
* Note that this is currently set to 0x00 as mkp says it will be
* changing again. We can enable this once it has settled in T10
* and is actually used by Linux/SCSI ML code.
*/
buf[4] = 0x00;
/*
* A TPU bit set to one indicates that the device server supports
* the UNMAP command (see 5.25). A TPU bit set to zero indicates
* that the device server does not support the UNMAP command.
*/
if (DEV_ATTRIB(dev)->emulate_tpu != 0)
buf[5] = 0x80;
/*
* A TPWS bit set to one indicates that the device server supports
* the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs.
* A TPWS bit set to zero indicates that the device server does not
* support the use of the WRITE SAME (16) command to unmap LBAs.
*/
if (DEV_ATTRIB(dev)->emulate_tpws != 0)
buf[5] |= 0x40;
return 0;
}
static int
target_emulate_inquiry(struct se_cmd *cmd)
{
struct se_device *dev = SE_DEV(cmd);
unsigned char *buf = cmd->t_task->t_task_buf;
unsigned char *cdb = cmd->t_task->t_task_cdb;
if (!(cdb[1] & 0x1))
return target_emulate_inquiry_std(cmd);
/*
* Make sure we at least have 4 bytes of INQUIRY response
* payload for 0x00 going back for EVPD=1. Note that 0x80
* and 0x83 will check for enough payload data length and
* jump to set_len: label when there is not enough inquiry EVPD
* payload length left for the next outgoing EVPD metadata
*/
if (cmd->data_length < 4) {
printk(KERN_ERR "SCSI Inquiry payload length: %u"
" too small for EVPD=1\n", cmd->data_length);
return -1;
}
buf[0] = dev->transport->get_device_type(dev);
switch (cdb[2]) {
case 0x00:
return target_emulate_evpd_00(cmd, buf);
case 0x80:
return target_emulate_evpd_80(cmd, buf);
case 0x83:
return target_emulate_evpd_83(cmd, buf);
case 0x86:
return target_emulate_evpd_86(cmd, buf);
case 0xb0:
return target_emulate_evpd_b0(cmd, buf);
case 0xb2:
return target_emulate_evpd_b2(cmd, buf);
default:
printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]);
return -1;
}
return 0;
}
static int
target_emulate_readcapacity(struct se_cmd *cmd)
{
struct se_device *dev = SE_DEV(cmd);
unsigned char *buf = cmd->t_task->t_task_buf;
u32 blocks = dev->transport->get_blocks(dev);
buf[0] = (blocks >> 24) & 0xff;
buf[1] = (blocks >> 16) & 0xff;
buf[2] = (blocks >> 8) & 0xff;
buf[3] = blocks & 0xff;
buf[4] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
buf[5] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
buf[6] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
buf[7] = DEV_ATTRIB(dev)->block_size & 0xff;
/*
* Set max 32-bit blocks to signal SERVICE ACTION READ_CAPACITY_16
*/
if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
put_unaligned_be32(0xFFFFFFFF, &buf[0]);
return 0;
}
static int
target_emulate_readcapacity_16(struct se_cmd *cmd)
{
struct se_device *dev = SE_DEV(cmd);
unsigned char *buf = cmd->t_task->t_task_buf;
unsigned long long blocks = dev->transport->get_blocks(dev);
buf[0] = (blocks >> 56) & 0xff;
buf[1] = (blocks >> 48) & 0xff;
buf[2] = (blocks >> 40) & 0xff;
buf[3] = (blocks >> 32) & 0xff;
buf[4] = (blocks >> 24) & 0xff;
buf[5] = (blocks >> 16) & 0xff;
buf[6] = (blocks >> 8) & 0xff;
buf[7] = blocks & 0xff;
buf[8] = (DEV_ATTRIB(dev)->block_size >> 24) & 0xff;
buf[9] = (DEV_ATTRIB(dev)->block_size >> 16) & 0xff;
buf[10] = (DEV_ATTRIB(dev)->block_size >> 8) & 0xff;
buf[11] = DEV_ATTRIB(dev)->block_size & 0xff;
/*
* Set Thin Provisioning Enable bit following sbc3r22 in section
* READ CAPACITY (16) byte 14 if emulate_tpu or emulate_tpws is enabled.
*/
if (DEV_ATTRIB(dev)->emulate_tpu || DEV_ATTRIB(dev)->emulate_tpws)
buf[14] = 0x80;
return 0;
}
static int
target_modesense_rwrecovery(unsigned char *p)
{
p[0] = 0x01;
p[1] = 0x0a;
return 12;
}
static int
target_modesense_control(struct se_device *dev, unsigned char *p)
{
p[0] = 0x0a;
p[1] = 0x0a;
p[2] = 2;
/*
* From spc4r17, section 7.4.6 Control mode Page
*
* Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b
*
* 00b: The logical unit shall clear any unit attention condition
* reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
* status and shall not establish a unit attention condition when a com-
* mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT
* status.
*
* 10b: The logical unit shall not clear any unit attention condition
* reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
* status and shall not establish a unit attention condition when
* a command is completed with BUSY, TASK SET FULL, or RESERVATION
* CONFLICT status.
*
* 11b a The logical unit shall not clear any unit attention condition
* reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION
* status and shall establish a unit attention condition for the
* initiator port associated with the I_T nexus on which the BUSY,
* TASK SET FULL, or RESERVATION CONFLICT status is being returned.
* Depending on the status, the additional sense code shall be set to
* PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS
* RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE
* command, a unit attention condition shall be established only once
* for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless
* to the number of commands completed with one of those status codes.
*/
p[4] = (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 2) ? 0x30 :
(DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00;
/*
* From spc4r17, section 7.4.6 Control mode Page
*
* Task Aborted Status (TAS) bit set to zero.
*
* A task aborted status (TAS) bit set to zero specifies that aborted
* tasks shall be terminated by the device server without any response
* to the application client. A TAS bit set to one specifies that tasks
* aborted by the actions of an I_T nexus other than the I_T nexus on
* which the command was received shall be completed with TASK ABORTED
* status (see SAM-4).
*/
p[5] = (DEV_ATTRIB(dev)->emulate_tas) ? 0x40 : 0x00;
p[8] = 0xff;
p[9] = 0xff;
p[11] = 30;
return 12;
}
static int
target_modesense_caching(struct se_device *dev, unsigned char *p)
{
p[0] = 0x08;
p[1] = 0x12;
if (DEV_ATTRIB(dev)->emulate_write_cache > 0)
p[2] = 0x04; /* Write Cache Enable */
p[12] = 0x20; /* Disabled Read Ahead */
return 20;
}
static void
target_modesense_write_protect(unsigned char *buf, int type)
{
/*
* I believe that the WP bit (bit 7) in the mode header is the same for
* all device types..
*/
switch (type) {
case TYPE_DISK:
case TYPE_TAPE:
default:
buf[0] |= 0x80; /* WP bit */
break;
}
}
static void
target_modesense_dpofua(unsigned char *buf, int type)
{
switch (type) {
case TYPE_DISK:
buf[0] |= 0x10; /* DPOFUA bit */
break;
default:
break;
}
}
static int
target_emulate_modesense(struct se_cmd *cmd, int ten)
{
struct se_device *dev = SE_DEV(cmd);
char *cdb = cmd->t_task->t_task_cdb;
unsigned char *rbuf = cmd->t_task->t_task_buf;
int type = dev->transport->get_device_type(dev);
int offset = (ten) ? 8 : 4;
int length = 0;
unsigned char buf[SE_MODE_PAGE_BUF];
memset(buf, 0, SE_MODE_PAGE_BUF);
switch (cdb[2] & 0x3f) {
case 0x01:
length = target_modesense_rwrecovery(&buf[offset]);
break;
case 0x08:
length = target_modesense_caching(dev, &buf[offset]);
break;
case 0x0a:
length = target_modesense_control(dev, &buf[offset]);
break;
case 0x3f:
length = target_modesense_rwrecovery(&buf[offset]);
length += target_modesense_caching(dev, &buf[offset+length]);
length += target_modesense_control(dev, &buf[offset+length]);
break;
default:
printk(KERN_ERR "Got Unknown Mode Page: 0x%02x\n",
cdb[2] & 0x3f);
return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
}
offset += length;
if (ten) {
offset -= 2;
buf[0] = (offset >> 8) & 0xff;
buf[1] = offset & 0xff;
if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
(cmd->se_deve &&
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
target_modesense_write_protect(&buf[3], type);
if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
(DEV_ATTRIB(dev)->emulate_fua_write > 0))
target_modesense_dpofua(&buf[3], type);
if ((offset + 2) > cmd->data_length)
offset = cmd->data_length;
} else {
offset -= 1;
buf[0] = offset & 0xff;
if ((SE_LUN(cmd)->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
(cmd->se_deve &&
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
target_modesense_write_protect(&buf[2], type);
if ((DEV_ATTRIB(dev)->emulate_write_cache > 0) &&
(DEV_ATTRIB(dev)->emulate_fua_write > 0))
target_modesense_dpofua(&buf[2], type);
if ((offset + 1) > cmd->data_length)
offset = cmd->data_length;
}
memcpy(rbuf, buf, offset);
return 0;
}
static int
target_emulate_request_sense(struct se_cmd *cmd)
{
unsigned char *cdb = cmd->t_task->t_task_cdb;
unsigned char *buf = cmd->t_task->t_task_buf;
u8 ua_asc = 0, ua_ascq = 0;
if (cdb[1] & 0x01) {
printk(KERN_ERR "REQUEST_SENSE description emulation not"
" supported\n");
return PYX_TRANSPORT_INVALID_CDB_FIELD;
}
if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) {
/*
* CURRENT ERROR, UNIT ATTENTION
*/
buf[0] = 0x70;
buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
/*
* Make sure request data length is enough for additional
* sense data.
*/
if (cmd->data_length <= 18) {
buf[7] = 0x00;
return 0;
}
/*
* The Additional Sense Code (ASC) from the UNIT ATTENTION
*/
buf[SPC_ASC_KEY_OFFSET] = ua_asc;
buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq;
buf[7] = 0x0A;
} else {
/*
* CURRENT ERROR, NO SENSE
*/
buf[0] = 0x70;
buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
/*
* Make sure request data length is enough for additional
* sense data.
*/
if (cmd->data_length <= 18) {
buf[7] = 0x00;
return 0;
}
/*
* NO ADDITIONAL SENSE INFORMATION
*/
buf[SPC_ASC_KEY_OFFSET] = 0x00;
buf[7] = 0x0A;
}
return 0;
}
/*
* Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
* Note this is not used for TCM/pSCSI passthrough
*/
static int
target_emulate_unmap(struct se_task *task)
{
struct se_cmd *cmd = TASK_CMD(task);
struct se_device *dev = SE_DEV(cmd);
unsigned char *buf = cmd->t_task->t_task_buf, *ptr = NULL;
unsigned char *cdb = &cmd->t_task->t_task_cdb[0];
sector_t lba;
unsigned int size = cmd->data_length, range;
int ret, offset;
unsigned short dl, bd_dl;
/* First UNMAP block descriptor starts at 8 byte offset */
offset = 8;
size -= 8;
dl = get_unaligned_be16(&cdb[0]);
bd_dl = get_unaligned_be16(&cdb[2]);
ptr = &buf[offset];
printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
while (size) {
lba = get_unaligned_be64(&ptr[0]);
range = get_unaligned_be32(&ptr[8]);
printk(KERN_INFO "UNMAP: Using lba: %llu and range: %u\n",
(unsigned long long)lba, range);
ret = dev->transport->do_discard(dev, lba, range);
if (ret < 0) {
printk(KERN_ERR "blkdev_issue_discard() failed: %d\n",
ret);
return -1;
}
ptr += 16;
size -= 16;
}
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
return 0;
}
/*
* Used for TCM/IBLOCK and TCM/FILEIO for block/blk-lib.c level discard support.
* Note this is not used for TCM/pSCSI passthrough
*/
static int
target_emulate_write_same(struct se_task *task)
{
struct se_cmd *cmd = TASK_CMD(task);
struct se_device *dev = SE_DEV(cmd);
sector_t lba = cmd->t_task->t_task_lba;
unsigned int range;
int ret;
range = (cmd->data_length / DEV_ATTRIB(dev)->block_size);
printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %u\n",
(unsigned long long)lba, range);
ret = dev->transport->do_discard(dev, lba, range);
if (ret < 0) {
printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n");
return -1;
}
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
return 0;
}
int
transport_emulate_control_cdb(struct se_task *task)
{
struct se_cmd *cmd = TASK_CMD(task);
struct se_device *dev = SE_DEV(cmd);
unsigned short service_action;
int ret = 0;
switch (cmd->t_task->t_task_cdb[0]) {
case INQUIRY:
ret = target_emulate_inquiry(cmd);
break;
case READ_CAPACITY:
ret = target_emulate_readcapacity(cmd);
break;
case MODE_SENSE:
ret = target_emulate_modesense(cmd, 0);
break;
case MODE_SENSE_10:
ret = target_emulate_modesense(cmd, 1);
break;
case SERVICE_ACTION_IN:
switch (cmd->t_task->t_task_cdb[1] & 0x1f) {
case SAI_READ_CAPACITY_16:
ret = target_emulate_readcapacity_16(cmd);
break;
default:
printk(KERN_ERR "Unsupported SA: 0x%02x\n",
cmd->t_task->t_task_cdb[1] & 0x1f);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
break;
case REQUEST_SENSE:
ret = target_emulate_request_sense(cmd);
break;
case UNMAP:
if (!dev->transport->do_discard) {
printk(KERN_ERR "UNMAP emulation not supported for: %s\n",
dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
ret = target_emulate_unmap(task);
break;
case WRITE_SAME_16:
if (!dev->transport->do_discard) {
printk(KERN_ERR "WRITE_SAME_16 emulation not supported"
" for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
ret = target_emulate_write_same(task);
break;
case VARIABLE_LENGTH_CMD:
service_action =
get_unaligned_be16(&cmd->t_task->t_task_cdb[8]);
switch (service_action) {
case WRITE_SAME_32:
if (!dev->transport->do_discard) {
printk(KERN_ERR "WRITE_SAME_32 SA emulation not"
" supported for: %s\n",
dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
ret = target_emulate_write_same(task);
break;
default:
printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:"
" 0x%02x\n", service_action);
break;
}
break;
case SYNCHRONIZE_CACHE:
case 0x91: /* SYNCHRONIZE_CACHE_16: */
if (!dev->transport->do_sync_cache) {
printk(KERN_ERR
"SYNCHRONIZE_CACHE emulation not supported"
" for: %s\n", dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
dev->transport->do_sync_cache(task);
break;
case ALLOW_MEDIUM_REMOVAL:
case ERASE:
case REZERO_UNIT:
case SEEK_10:
case SPACE:
case START_STOP:
case TEST_UNIT_READY:
case VERIFY:
case WRITE_FILEMARKS:
break;
default:
printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n",
cmd->t_task->t_task_cdb[0], dev->transport->name);
return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
}
if (ret < 0)
return ret;
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
}
/*******************************************************************************
* Filename: target_core_configfs.c
*
* This file contains ConfigFS logic for the Generic Target Engine project.
*
* Copyright (c) 2008-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* based on configfs Copyright (C) 2005 Oracle. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
****************************************************************************/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/version.h>
#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/unistd.h>
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/syscalls.h>
#include <linux/configfs.h>
#include <linux/proc_fs.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_fabric_configfs.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "target_core_alua.h"
#include "target_core_hba.h"
#include "target_core_pr.h"
#include "target_core_rd.h"
static struct list_head g_tf_list;
static struct mutex g_tf_lock;
struct target_core_configfs_attribute {
struct configfs_attribute attr;
ssize_t (*show)(void *, char *);
ssize_t (*store)(void *, const char *, size_t);
};
static inline struct se_hba *
item_to_hba(struct config_item *item)
{
return container_of(to_config_group(item), struct se_hba, hba_group);
}
/*
* Attributes for /sys/kernel/config/target/
*/
static ssize_t target_core_attr_show(struct config_item *item,
struct configfs_attribute *attr,
char *page)
{
return sprintf(page, "Target Engine Core ConfigFS Infrastructure %s"
" on %s/%s on "UTS_RELEASE"\n", TARGET_CORE_CONFIGFS_VERSION,
utsname()->sysname, utsname()->machine);
}
static struct configfs_item_operations target_core_fabric_item_ops = {
.show_attribute = target_core_attr_show,
};
static struct configfs_attribute target_core_item_attr_version = {
.ca_owner = THIS_MODULE,
.ca_name = "version",
.ca_mode = S_IRUGO,
};
static struct target_fabric_configfs *target_core_get_fabric(
const char *name)
{
struct target_fabric_configfs *tf;
if (!(name))
return NULL;
mutex_lock(&g_tf_lock);
list_for_each_entry(tf, &g_tf_list, tf_list) {
if (!(strcmp(tf->tf_name, name))) {
atomic_inc(&tf->tf_access_cnt);
mutex_unlock(&g_tf_lock);
return tf;
}
}
mutex_unlock(&g_tf_lock);
return NULL;
}
/*
* Called from struct target_core_group_ops->make_group()
*/
static struct config_group *target_core_register_fabric(
struct config_group *group,
const char *name)
{
struct target_fabric_configfs *tf;
int ret;
printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> group: %p name:"
" %s\n", group, name);
/*
* Ensure that TCM subsystem plugins are loaded at this point for
* using the RAMDISK_DR virtual LUN 0 and all other struct se_port
* LUN symlinks.
*/
if (transport_subsystem_check_init() < 0)
return ERR_PTR(-EINVAL);
/*
* Below are some hardcoded request_module() calls to automatically
* local fabric modules when the following is called:
*
* mkdir -p /sys/kernel/config/target/$MODULE_NAME
*
* Note that this does not limit which TCM fabric module can be
* registered, but simply provids auto loading logic for modules with
* mkdir(2) system calls with known TCM fabric modules.
*/
if (!(strncmp(name, "iscsi", 5))) {
/*
* Automatically load the LIO Target fabric module when the
* following is called:
*
* mkdir -p $CONFIGFS/target/iscsi
*/
ret = request_module("iscsi_target_mod");
if (ret < 0) {
printk(KERN_ERR "request_module() failed for"
" iscsi_target_mod.ko: %d\n", ret);
return ERR_PTR(-EINVAL);
}
} else if (!(strncmp(name, "loopback", 8))) {
/*
* Automatically load the tcm_loop fabric module when the
* following is called:
*
* mkdir -p $CONFIGFS/target/loopback
*/
ret = request_module("tcm_loop");
if (ret < 0) {
printk(KERN_ERR "request_module() failed for"
" tcm_loop.ko: %d\n", ret);
return ERR_PTR(-EINVAL);
}
}
tf = target_core_get_fabric(name);
if (!(tf)) {
printk(KERN_ERR "target_core_get_fabric() failed for %s\n",
name);
return ERR_PTR(-EINVAL);
}
printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Located fabric:"
" %s\n", tf->tf_name);
/*
* On a successful target_core_get_fabric() look, the returned
* struct target_fabric_configfs *tf will contain a usage reference.
*/
printk(KERN_INFO "Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
&TF_CIT_TMPL(tf)->tfc_wwn_cit);
tf->tf_group.default_groups = tf->tf_default_groups;
tf->tf_group.default_groups[0] = &tf->tf_disc_group;
tf->tf_group.default_groups[1] = NULL;
config_group_init_type_name(&tf->tf_group, name,
&TF_CIT_TMPL(tf)->tfc_wwn_cit);
config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
&TF_CIT_TMPL(tf)->tfc_discovery_cit);
printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
" %s\n", tf->tf_group.cg_item.ci_name);
/*
* Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item()
*/
tf->tf_ops.tf_subsys = tf->tf_subsys;
tf->tf_fabric = &tf->tf_group.cg_item;
printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
" for %s\n", name);
return &tf->tf_group;
}
/*
* Called from struct target_core_group_ops->drop_item()
*/
static void target_core_deregister_fabric(
struct config_group *group,
struct config_item *item)
{
struct target_fabric_configfs *tf = container_of(
to_config_group(item), struct target_fabric_configfs, tf_group);
struct config_group *tf_group;
struct config_item *df_item;
int i;
printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
" tf list\n", config_item_name(item));
printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> located fabric:"
" %s\n", tf->tf_name);
atomic_dec(&tf->tf_access_cnt);
printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing"
" tf->tf_fabric for %s\n", tf->tf_name);
tf->tf_fabric = NULL;
printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
" %s\n", config_item_name(item));
tf_group = &tf->tf_group;
for (i = 0; tf_group->default_groups[i]; i++) {
df_item = &tf_group->default_groups[i]->cg_item;
tf_group->default_groups[i] = NULL;
config_item_put(df_item);
}
config_item_put(item);
}
static struct configfs_group_operations target_core_fabric_group_ops = {
.make_group = &target_core_register_fabric,
.drop_item = &target_core_deregister_fabric,
};
/*
* All item attributes appearing in /sys/kernel/target/ appear here.
*/
static struct configfs_attribute *target_core_fabric_item_attrs[] = {
&target_core_item_attr_version,
NULL,
};
/*
* Provides Fabrics Groups and Item Attributes for /sys/kernel/config/target/
*/
static struct config_item_type target_core_fabrics_item = {
.ct_item_ops = &target_core_fabric_item_ops,
.ct_group_ops = &target_core_fabric_group_ops,
.ct_attrs = target_core_fabric_item_attrs,
.ct_owner = THIS_MODULE,
};
static struct configfs_subsystem target_core_fabrics = {
.su_group = {
.cg_item = {
.ci_namebuf = "target",
.ci_type = &target_core_fabrics_item,
},
},
};
static struct configfs_subsystem *target_core_subsystem[] = {
&target_core_fabrics,
NULL,
};
/*##############################################################################
// Start functions called by external Target Fabrics Modules
//############################################################################*/
/*
* First function called by fabric modules to:
*
* 1) Allocate a struct target_fabric_configfs and save the *fabric_cit pointer.
* 2) Add struct target_fabric_configfs to g_tf_list
* 3) Return struct target_fabric_configfs to fabric module to be passed
* into target_fabric_configfs_register().
*/
struct target_fabric_configfs *target_fabric_configfs_init(
struct module *fabric_mod,
const char *name)
{
struct target_fabric_configfs *tf;
if (!(fabric_mod)) {
printk(KERN_ERR "Missing struct module *fabric_mod pointer\n");
return NULL;
}
if (!(name)) {
printk(KERN_ERR "Unable to locate passed fabric name\n");
return NULL;
}
if (strlen(name) > TARGET_FABRIC_NAME_SIZE) {
printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC"
"_NAME_SIZE\n", name);
return NULL;
}
tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
if (!(tf))
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&tf->tf_list);
atomic_set(&tf->tf_access_cnt, 0);
/*
* Setup the default generic struct config_item_type's (cits) in
* struct target_fabric_configfs->tf_cit_tmpl
*/
tf->tf_module = fabric_mod;
target_fabric_setup_cits(tf);
tf->tf_subsys = target_core_subsystem[0];
snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", name);
mutex_lock(&g_tf_lock);
list_add_tail(&tf->tf_list, &g_tf_list);
mutex_unlock(&g_tf_lock);
printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
">>>>>>>>>>>>>>\n");
printk(KERN_INFO "Initialized struct target_fabric_configfs: %p for"
" %s\n", tf, tf->tf_name);
return tf;
}
EXPORT_SYMBOL(target_fabric_configfs_init);
/*
* Called by fabric plugins after FAILED target_fabric_configfs_register() call.
*/
void target_fabric_configfs_free(
struct target_fabric_configfs *tf)
{
mutex_lock(&g_tf_lock);
list_del(&tf->tf_list);
mutex_unlock(&g_tf_lock);
kfree(tf);
}
EXPORT_SYMBOL(target_fabric_configfs_free);
/*
* Perform a sanity check of the passed tf->tf_ops before completing
* TCM fabric module registration.
*/
static int target_fabric_tf_ops_check(
struct target_fabric_configfs *tf)
{
struct target_core_fabric_ops *tfo = &tf->tf_ops;
if (!(tfo->get_fabric_name)) {
printk(KERN_ERR "Missing tfo->get_fabric_name()\n");
return -EINVAL;
}
if (!(tfo->get_fabric_proto_ident)) {
printk(KERN_ERR "Missing tfo->get_fabric_proto_ident()\n");
return -EINVAL;
}
if (!(tfo->tpg_get_wwn)) {
printk(KERN_ERR "Missing tfo->tpg_get_wwn()\n");
return -EINVAL;
}
if (!(tfo->tpg_get_tag)) {
printk(KERN_ERR "Missing tfo->tpg_get_tag()\n");
return -EINVAL;
}
if (!(tfo->tpg_get_default_depth)) {
printk(KERN_ERR "Missing tfo->tpg_get_default_depth()\n");
return -EINVAL;
}
if (!(tfo->tpg_get_pr_transport_id)) {
printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id()\n");
return -EINVAL;
}
if (!(tfo->tpg_get_pr_transport_id_len)) {
printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id_len()\n");
return -EINVAL;
}
if (!(tfo->tpg_check_demo_mode)) {
printk(KERN_ERR "Missing tfo->tpg_check_demo_mode()\n");
return -EINVAL;
}
if (!(tfo->tpg_check_demo_mode_cache)) {
printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_cache()\n");
return -EINVAL;
}
if (!(tfo->tpg_check_demo_mode_write_protect)) {
printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_write_protect()\n");
return -EINVAL;
}
if (!(tfo->tpg_check_prod_mode_write_protect)) {
printk(KERN_ERR "Missing tfo->tpg_check_prod_mode_write_protect()\n");
return -EINVAL;
}
if (!(tfo->tpg_alloc_fabric_acl)) {
printk(KERN_ERR "Missing tfo->tpg_alloc_fabric_acl()\n");
return -EINVAL;
}
if (!(tfo->tpg_release_fabric_acl)) {
printk(KERN_ERR "Missing tfo->tpg_release_fabric_acl()\n");
return -EINVAL;
}
if (!(tfo->tpg_get_inst_index)) {
printk(KERN_ERR "Missing tfo->tpg_get_inst_index()\n");
return -EINVAL;
}
if (!(tfo->release_cmd_to_pool)) {
printk(KERN_ERR "Missing tfo->release_cmd_to_pool()\n");
return -EINVAL;
}
if (!(tfo->release_cmd_direct)) {
printk(KERN_ERR "Missing tfo->release_cmd_direct()\n");
return -EINVAL;
}
if (!(tfo->shutdown_session)) {
printk(KERN_ERR "Missing tfo->shutdown_session()\n");
return -EINVAL;
}
if (!(tfo->close_session)) {
printk(KERN_ERR "Missing tfo->close_session()\n");
return -EINVAL;
}
if (!(tfo->stop_session)) {
printk(KERN_ERR "Missing tfo->stop_session()\n");
return -EINVAL;
}
if (!(tfo->fall_back_to_erl0)) {
printk(KERN_ERR "Missing tfo->fall_back_to_erl0()\n");
return -EINVAL;
}
if (!(tfo->sess_logged_in)) {
printk(KERN_ERR "Missing tfo->sess_logged_in()\n");
return -EINVAL;
}
if (!(tfo->sess_get_index)) {
printk(KERN_ERR "Missing tfo->sess_get_index()\n");
return -EINVAL;
}
if (!(tfo->write_pending)) {
printk(KERN_ERR "Missing tfo->write_pending()\n");
return -EINVAL;
}
if (!(tfo->write_pending_status)) {
printk(KERN_ERR "Missing tfo->write_pending_status()\n");
return -EINVAL;
}
if (!(tfo->set_default_node_attributes)) {
printk(KERN_ERR "Missing tfo->set_default_node_attributes()\n");
return -EINVAL;
}
if (!(tfo->get_task_tag)) {
printk(KERN_ERR "Missing tfo->get_task_tag()\n");
return -EINVAL;
}
if (!(tfo->get_cmd_state)) {
printk(KERN_ERR "Missing tfo->get_cmd_state()\n");
return -EINVAL;
}
if (!(tfo->new_cmd_failure)) {
printk(KERN_ERR "Missing tfo->new_cmd_failure()\n");
return -EINVAL;
}
if (!(tfo->queue_data_in)) {
printk(KERN_ERR "Missing tfo->queue_data_in()\n");
return -EINVAL;
}
if (!(tfo->queue_status)) {
printk(KERN_ERR "Missing tfo->queue_status()\n");
return -EINVAL;
}
if (!(tfo->queue_tm_rsp)) {
printk(KERN_ERR "Missing tfo->queue_tm_rsp()\n");
return -EINVAL;
}
if (!(tfo->set_fabric_sense_len)) {
printk(KERN_ERR "Missing tfo->set_fabric_sense_len()\n");
return -EINVAL;
}
if (!(tfo->get_fabric_sense_len)) {
printk(KERN_ERR "Missing tfo->get_fabric_sense_len()\n");
return -EINVAL;
}
if (!(tfo->is_state_remove)) {
printk(KERN_ERR "Missing tfo->is_state_remove()\n");
return -EINVAL;
}
if (!(tfo->pack_lun)) {
printk(KERN_ERR "Missing tfo->pack_lun()\n");
return -EINVAL;
}
/*
* We at least require tfo->fabric_make_wwn(), tfo->fabric_drop_wwn()
* tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
* target_core_fabric_configfs.c WWN+TPG group context code.
*/
if (!(tfo->fabric_make_wwn)) {
printk(KERN_ERR "Missing tfo->fabric_make_wwn()\n");
return -EINVAL;
}
if (!(tfo->fabric_drop_wwn)) {
printk(KERN_ERR "Missing tfo->fabric_drop_wwn()\n");
return -EINVAL;
}
if (!(tfo->fabric_make_tpg)) {
printk(KERN_ERR "Missing tfo->fabric_make_tpg()\n");
return -EINVAL;
}
if (!(tfo->fabric_drop_tpg)) {
printk(KERN_ERR "Missing tfo->fabric_drop_tpg()\n");
return -EINVAL;
}
return 0;
}
/*
* Called 2nd from fabric module with returned parameter of
* struct target_fabric_configfs * from target_fabric_configfs_init().
*
* Upon a successful registration, the new fabric's struct config_item is
* return. Also, a pointer to this struct is set in the passed
* struct target_fabric_configfs.
*/
int target_fabric_configfs_register(
struct target_fabric_configfs *tf)
{
struct config_group *su_group;
int ret;
if (!(tf)) {
printk(KERN_ERR "Unable to locate target_fabric_configfs"
" pointer\n");
return -EINVAL;
}
if (!(tf->tf_subsys)) {
printk(KERN_ERR "Unable to target struct config_subsystem"
" pointer\n");
return -EINVAL;
}
su_group = &tf->tf_subsys->su_group;
if (!(su_group)) {
printk(KERN_ERR "Unable to locate target struct config_group"
" pointer\n");
return -EINVAL;
}
ret = target_fabric_tf_ops_check(tf);
if (ret < 0)
return ret;
printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
">>>>>>>>>>\n");
return 0;
}
EXPORT_SYMBOL(target_fabric_configfs_register);
void target_fabric_configfs_deregister(
struct target_fabric_configfs *tf)
{
struct config_group *su_group;
struct configfs_subsystem *su;
if (!(tf)) {
printk(KERN_ERR "Unable to locate passed target_fabric_"
"configfs\n");
return;
}
su = tf->tf_subsys;
if (!(su)) {
printk(KERN_ERR "Unable to locate passed tf->tf_subsys"
" pointer\n");
return;
}
su_group = &tf->tf_subsys->su_group;
if (!(su_group)) {
printk(KERN_ERR "Unable to locate target struct config_group"
" pointer\n");
return;
}
printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
">>>>>>>>>>>>\n");
mutex_lock(&g_tf_lock);
if (atomic_read(&tf->tf_access_cnt)) {
mutex_unlock(&g_tf_lock);
printk(KERN_ERR "Non zero tf->tf_access_cnt for fabric %s\n",
tf->tf_name);
BUG();
}
list_del(&tf->tf_list);
mutex_unlock(&g_tf_lock);
printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
" %s\n", tf->tf_name);
tf->tf_module = NULL;
tf->tf_subsys = NULL;
kfree(tf);
printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
">>>>>\n");
return;
}
EXPORT_SYMBOL(target_fabric_configfs_deregister);
/*##############################################################################
// Stop functions called by external Target Fabrics Modules
//############################################################################*/
/* Start functions for struct config_item_type target_core_dev_attrib_cit */
#define DEF_DEV_ATTRIB_SHOW(_name) \
static ssize_t target_core_dev_show_attr_##_name( \
struct se_dev_attrib *da, \
char *page) \
{ \
struct se_device *dev; \
struct se_subsystem_dev *se_dev = da->da_sub_dev; \
ssize_t rb; \
\
spin_lock(&se_dev->se_dev_lock); \
dev = se_dev->se_dev_ptr; \
if (!(dev)) { \
spin_unlock(&se_dev->se_dev_lock); \
return -ENODEV; \
} \
rb = snprintf(page, PAGE_SIZE, "%u\n", (u32)DEV_ATTRIB(dev)->_name); \
spin_unlock(&se_dev->se_dev_lock); \
\
return rb; \
}
#define DEF_DEV_ATTRIB_STORE(_name) \
static ssize_t target_core_dev_store_attr_##_name( \
struct se_dev_attrib *da, \
const char *page, \
size_t count) \
{ \
struct se_device *dev; \
struct se_subsystem_dev *se_dev = da->da_sub_dev; \
unsigned long val; \
int ret; \
\
spin_lock(&se_dev->se_dev_lock); \
dev = se_dev->se_dev_ptr; \
if (!(dev)) { \
spin_unlock(&se_dev->se_dev_lock); \
return -ENODEV; \
} \
ret = strict_strtoul(page, 0, &val); \
if (ret < 0) { \
spin_unlock(&se_dev->se_dev_lock); \
printk(KERN_ERR "strict_strtoul() failed with" \
" ret: %d\n", ret); \
return -EINVAL; \
} \
ret = se_dev_set_##_name(dev, (u32)val); \
spin_unlock(&se_dev->se_dev_lock); \
\
return (!ret) ? count : -EINVAL; \
}
#define DEF_DEV_ATTRIB(_name) \
DEF_DEV_ATTRIB_SHOW(_name); \
DEF_DEV_ATTRIB_STORE(_name);
#define DEF_DEV_ATTRIB_RO(_name) \
DEF_DEV_ATTRIB_SHOW(_name);
CONFIGFS_EATTR_STRUCT(target_core_dev_attrib, se_dev_attrib);
#define SE_DEV_ATTR(_name, _mode) \
static struct target_core_dev_attrib_attribute \
target_core_dev_attrib_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
target_core_dev_show_attr_##_name, \
target_core_dev_store_attr_##_name);
#define SE_DEV_ATTR_RO(_name); \
static struct target_core_dev_attrib_attribute \
target_core_dev_attrib_##_name = \
__CONFIGFS_EATTR_RO(_name, \
target_core_dev_show_attr_##_name);
DEF_DEV_ATTRIB(emulate_dpo);
SE_DEV_ATTR(emulate_dpo, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(emulate_fua_write);
SE_DEV_ATTR(emulate_fua_write, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(emulate_fua_read);
SE_DEV_ATTR(emulate_fua_read, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(emulate_write_cache);
SE_DEV_ATTR(emulate_write_cache, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(emulate_ua_intlck_ctrl);
SE_DEV_ATTR(emulate_ua_intlck_ctrl, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(emulate_tas);
SE_DEV_ATTR(emulate_tas, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(emulate_tpu);
SE_DEV_ATTR(emulate_tpu, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(emulate_tpws);
SE_DEV_ATTR(emulate_tpws, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(enforce_pr_isids);
SE_DEV_ATTR(enforce_pr_isids, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB_RO(hw_block_size);
SE_DEV_ATTR_RO(hw_block_size);
DEF_DEV_ATTRIB(block_size);
SE_DEV_ATTR(block_size, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB_RO(hw_max_sectors);
SE_DEV_ATTR_RO(hw_max_sectors);
DEF_DEV_ATTRIB(max_sectors);
SE_DEV_ATTR(max_sectors, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(optimal_sectors);
SE_DEV_ATTR(optimal_sectors, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB_RO(hw_queue_depth);
SE_DEV_ATTR_RO(hw_queue_depth);
DEF_DEV_ATTRIB(queue_depth);
SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(task_timeout);
SE_DEV_ATTR(task_timeout, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(max_unmap_lba_count);
SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(max_unmap_block_desc_count);
SE_DEV_ATTR(max_unmap_block_desc_count, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(unmap_granularity);
SE_DEV_ATTR(unmap_granularity, S_IRUGO | S_IWUSR);
DEF_DEV_ATTRIB(unmap_granularity_alignment);
SE_DEV_ATTR(unmap_granularity_alignment, S_IRUGO | S_IWUSR);
CONFIGFS_EATTR_OPS(target_core_dev_attrib, se_dev_attrib, da_group);
static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
&target_core_dev_attrib_emulate_dpo.attr,
&target_core_dev_attrib_emulate_fua_write.attr,
&target_core_dev_attrib_emulate_fua_read.attr,
&target_core_dev_attrib_emulate_write_cache.attr,
&target_core_dev_attrib_emulate_ua_intlck_ctrl.attr,
&target_core_dev_attrib_emulate_tas.attr,
&target_core_dev_attrib_emulate_tpu.attr,
&target_core_dev_attrib_emulate_tpws.attr,
&target_core_dev_attrib_enforce_pr_isids.attr,
&target_core_dev_attrib_hw_block_size.attr,
&target_core_dev_attrib_block_size.attr,
&target_core_dev_attrib_hw_max_sectors.attr,
&target_core_dev_attrib_max_sectors.attr,
&target_core_dev_attrib_optimal_sectors.attr,
&target_core_dev_attrib_hw_queue_depth.attr,
&target_core_dev_attrib_queue_depth.attr,
&target_core_dev_attrib_task_timeout.attr,
&target_core_dev_attrib_max_unmap_lba_count.attr,
&target_core_dev_attrib_max_unmap_block_desc_count.attr,
&target_core_dev_attrib_unmap_granularity.attr,
&target_core_dev_attrib_unmap_granularity_alignment.attr,
NULL,
};
static struct configfs_item_operations target_core_dev_attrib_ops = {
.show_attribute = target_core_dev_attrib_attr_show,
.store_attribute = target_core_dev_attrib_attr_store,
};
static struct config_item_type target_core_dev_attrib_cit = {
.ct_item_ops = &target_core_dev_attrib_ops,
.ct_attrs = target_core_dev_attrib_attrs,
.ct_owner = THIS_MODULE,
};
/* End functions for struct config_item_type target_core_dev_attrib_cit */
/* Start functions for struct config_item_type target_core_dev_wwn_cit */
CONFIGFS_EATTR_STRUCT(target_core_dev_wwn, t10_wwn);
#define SE_DEV_WWN_ATTR(_name, _mode) \
static struct target_core_dev_wwn_attribute target_core_dev_wwn_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
target_core_dev_wwn_show_attr_##_name, \
target_core_dev_wwn_store_attr_##_name);
#define SE_DEV_WWN_ATTR_RO(_name); \
do { \
static struct target_core_dev_wwn_attribute \
target_core_dev_wwn_##_name = \
__CONFIGFS_EATTR_RO(_name, \
target_core_dev_wwn_show_attr_##_name); \
} while (0);
/*
* VPD page 0x80 Unit serial
*/
static ssize_t target_core_dev_wwn_show_attr_vpd_unit_serial(
struct t10_wwn *t10_wwn,
char *page)
{
struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
struct se_device *dev;
dev = se_dev->se_dev_ptr;
if (!(dev))
return -ENODEV;
return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
&t10_wwn->unit_serial[0]);
}
static ssize_t target_core_dev_wwn_store_attr_vpd_unit_serial(
struct t10_wwn *t10_wwn,
const char *page,
size_t count)
{
struct se_subsystem_dev *su_dev = t10_wwn->t10_sub_dev;
struct se_device *dev;
unsigned char buf[INQUIRY_VPD_SERIAL_LEN];
/*
* If Linux/SCSI subsystem_api_t plugin got a VPD Unit Serial
* from the struct scsi_device level firmware, do not allow
* VPD Unit Serial to be emulated.
*
* Note this struct scsi_device could also be emulating VPD
* information from its drivers/scsi LLD. But for now we assume
* it is doing 'the right thing' wrt a world wide unique
* VPD Unit Serial Number that OS dependent multipath can depend on.
*/
if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) {
printk(KERN_ERR "Underlying SCSI device firmware provided VPD"
" Unit Serial, ignoring request\n");
return -EOPNOTSUPP;
}
if ((strlen(page) + 1) > INQUIRY_VPD_SERIAL_LEN) {
printk(KERN_ERR "Emulated VPD Unit Serial exceeds"
" INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
return -EOVERFLOW;
}
/*
* Check to see if any active $FABRIC_MOD exports exist. If they
* do exist, fail here as changing this information on the fly
* (underneath the initiator side OS dependent multipath code)
* could cause negative effects.
*/
dev = su_dev->se_dev_ptr;
if ((dev)) {
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
printk(KERN_ERR "Unable to set VPD Unit Serial while"
" active %d $FABRIC_MOD exports exist\n",
atomic_read(&dev->dev_export_obj.obj_access_count));
return -EINVAL;
}
}
/*
* This currently assumes ASCII encoding for emulated VPD Unit Serial.
*
* Also, strip any newline added from the userspace
* echo $UUID > $TARGET/$HBA/$STORAGE_OBJECT/wwn/vpd_unit_serial
*/
memset(buf, 0, INQUIRY_VPD_SERIAL_LEN);
snprintf(buf, INQUIRY_VPD_SERIAL_LEN, "%s", page);
snprintf(su_dev->t10_wwn.unit_serial, INQUIRY_VPD_SERIAL_LEN,
"%s", strstrip(buf));
su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL;
printk(KERN_INFO "Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
" %s\n", su_dev->t10_wwn.unit_serial);
return count;
}
SE_DEV_WWN_ATTR(vpd_unit_serial, S_IRUGO | S_IWUSR);
/*
* VPD page 0x83 Protocol Identifier
*/
static ssize_t target_core_dev_wwn_show_attr_vpd_protocol_identifier(
struct t10_wwn *t10_wwn,
char *page)
{
struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev;
struct se_device *dev;
struct t10_vpd *vpd;
unsigned char buf[VPD_TMP_BUF_SIZE];
ssize_t len = 0;
dev = se_dev->se_dev_ptr;
if (!(dev))
return -ENODEV;
memset(buf, 0, VPD_TMP_BUF_SIZE);
spin_lock(&t10_wwn->t10_vpd_lock);
list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
if (!(vpd->protocol_identifier_set))
continue;
transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
if ((len + strlen(buf) > PAGE_SIZE))
break;
len += sprintf(page+len, "%s", buf);
}
spin_unlock(&t10_wwn->t10_vpd_lock);
return len;
}
static ssize_t target_core_dev_wwn_store_attr_vpd_protocol_identifier(
struct t10_wwn *t10_wwn,
const char *page,
size_t count)
{
return -ENOSYS;
}
SE_DEV_WWN_ATTR(vpd_protocol_identifier, S_IRUGO | S_IWUSR);
/*
* Generic wrapper for dumping VPD identifiers by association.
*/
#define DEF_DEV_WWN_ASSOC_SHOW(_name, _assoc) \
static ssize_t target_core_dev_wwn_show_attr_##_name( \
struct t10_wwn *t10_wwn, \
char *page) \
{ \
struct se_subsystem_dev *se_dev = t10_wwn->t10_sub_dev; \
struct se_device *dev; \
struct t10_vpd *vpd; \
unsigned char buf[VPD_TMP_BUF_SIZE]; \
ssize_t len = 0; \
\
dev = se_dev->se_dev_ptr; \
if (!(dev)) \
return -ENODEV; \
\
spin_lock(&t10_wwn->t10_vpd_lock); \
list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) { \
if (vpd->association != _assoc) \
continue; \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE); \
if ((len + strlen(buf) > PAGE_SIZE)) \
break; \
len += sprintf(page+len, "%s", buf); \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
if ((len + strlen(buf) > PAGE_SIZE)) \
break; \
len += sprintf(page+len, "%s", buf); \
\
memset(buf, 0, VPD_TMP_BUF_SIZE); \
transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
if ((len + strlen(buf) > PAGE_SIZE)) \
break; \
len += sprintf(page+len, "%s", buf); \
} \
spin_unlock(&t10_wwn->t10_vpd_lock); \
\
return len; \
}
/*
* VPD page 0x83 Assoication: Logical Unit
*/
DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_logical_unit, 0x00);
static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_logical_unit(
struct t10_wwn *t10_wwn,
const char *page,
size_t count)
{
return -ENOSYS;
}
SE_DEV_WWN_ATTR(vpd_assoc_logical_unit, S_IRUGO | S_IWUSR);
/*
* VPD page 0x83 Association: Target Port
*/
DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_target_port, 0x10);
static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_target_port(
struct t10_wwn *t10_wwn,
const char *page,
size_t count)
{
return -ENOSYS;
}
SE_DEV_WWN_ATTR(vpd_assoc_target_port, S_IRUGO | S_IWUSR);
/*
* VPD page 0x83 Association: SCSI Target Device
*/
DEF_DEV_WWN_ASSOC_SHOW(vpd_assoc_scsi_target_device, 0x20);
static ssize_t target_core_dev_wwn_store_attr_vpd_assoc_scsi_target_device(
struct t10_wwn *t10_wwn,
const char *page,
size_t count)
{
return -ENOSYS;
}
SE_DEV_WWN_ATTR(vpd_assoc_scsi_target_device, S_IRUGO | S_IWUSR);
CONFIGFS_EATTR_OPS(target_core_dev_wwn, t10_wwn, t10_wwn_group);
static struct configfs_attribute *target_core_dev_wwn_attrs[] = {
&target_core_dev_wwn_vpd_unit_serial.attr,
&target_core_dev_wwn_vpd_protocol_identifier.attr,
&target_core_dev_wwn_vpd_assoc_logical_unit.attr,
&target_core_dev_wwn_vpd_assoc_target_port.attr,
&target_core_dev_wwn_vpd_assoc_scsi_target_device.attr,
NULL,
};
static struct configfs_item_operations target_core_dev_wwn_ops = {
.show_attribute = target_core_dev_wwn_attr_show,
.store_attribute = target_core_dev_wwn_attr_store,
};
static struct config_item_type target_core_dev_wwn_cit = {
.ct_item_ops = &target_core_dev_wwn_ops,
.ct_attrs = target_core_dev_wwn_attrs,
.ct_owner = THIS_MODULE,
};
/* End functions for struct config_item_type target_core_dev_wwn_cit */
/* Start functions for struct config_item_type target_core_dev_pr_cit */
CONFIGFS_EATTR_STRUCT(target_core_dev_pr, se_subsystem_dev);
#define SE_DEV_PR_ATTR(_name, _mode) \
static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
target_core_dev_pr_show_attr_##_name, \
target_core_dev_pr_store_attr_##_name);
#define SE_DEV_PR_ATTR_RO(_name); \
static struct target_core_dev_pr_attribute target_core_dev_pr_##_name = \
__CONFIGFS_EATTR_RO(_name, \
target_core_dev_pr_show_attr_##_name);
/*
* res_holder
*/
static ssize_t target_core_dev_pr_show_spc3_res(
struct se_device *dev,
char *page,
ssize_t *len)
{
struct se_node_acl *se_nacl;
struct t10_pr_registration *pr_reg;
char i_buf[PR_REG_ISID_ID_LEN];
int prf_isid;
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
if (!(pr_reg)) {
*len += sprintf(page + *len, "No SPC-3 Reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
return *len;
}
se_nacl = pr_reg->pr_reg_nacl;
prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
PR_REG_ISID_ID_LEN);
*len += sprintf(page + *len, "SPC-3 Reservation: %s Initiator: %s%s\n",
TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
se_nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
spin_unlock(&dev->dev_reservation_lock);
return *len;
}
static ssize_t target_core_dev_pr_show_spc2_res(
struct se_device *dev,
char *page,
ssize_t *len)
{
struct se_node_acl *se_nacl;
spin_lock(&dev->dev_reservation_lock);
se_nacl = dev->dev_reserved_node_acl;
if (!(se_nacl)) {
*len += sprintf(page + *len, "No SPC-2 Reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
return *len;
}
*len += sprintf(page + *len, "SPC-2 Reservation: %s Initiator: %s\n",
TPG_TFO(se_nacl->se_tpg)->get_fabric_name(),
se_nacl->initiatorname);
spin_unlock(&dev->dev_reservation_lock);
return *len;
}
static ssize_t target_core_dev_pr_show_attr_res_holder(
struct se_subsystem_dev *su_dev,
char *page)
{
ssize_t len = 0;
if (!(su_dev->se_dev_ptr))
return -ENODEV;
switch (T10_RES(su_dev)->res_type) {
case SPC3_PERSISTENT_RESERVATIONS:
target_core_dev_pr_show_spc3_res(su_dev->se_dev_ptr,
page, &len);
break;
case SPC2_RESERVATIONS:
target_core_dev_pr_show_spc2_res(su_dev->se_dev_ptr,
page, &len);
break;
case SPC_PASSTHROUGH:
len += sprintf(page+len, "Passthrough\n");
break;
default:
len += sprintf(page+len, "Unknown\n");
break;
}
return len;
}
SE_DEV_PR_ATTR_RO(res_holder);
/*
* res_pr_all_tgt_pts
*/
static ssize_t target_core_dev_pr_show_attr_res_pr_all_tgt_pts(
struct se_subsystem_dev *su_dev,
char *page)
{
struct se_device *dev;
struct t10_pr_registration *pr_reg;
ssize_t len = 0;
dev = su_dev->se_dev_ptr;
if (!(dev))
return -ENODEV;
if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
return len;
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
if (!(pr_reg)) {
len = sprintf(page, "No SPC-3 Reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
return len;
}
/*
* See All Target Ports (ALL_TG_PT) bit in spcr17, section 6.14.3
* Basic PERSISTENT RESERVER OUT parameter list, page 290
*/
if (pr_reg->pr_reg_all_tg_pt)
len = sprintf(page, "SPC-3 Reservation: All Target"
" Ports registration\n");
else
len = sprintf(page, "SPC-3 Reservation: Single"
" Target Port registration\n");
spin_unlock(&dev->dev_reservation_lock);
return len;
}
SE_DEV_PR_ATTR_RO(res_pr_all_tgt_pts);
/*
* res_pr_generation
*/
static ssize_t target_core_dev_pr_show_attr_res_pr_generation(
struct se_subsystem_dev *su_dev,
char *page)
{
if (!(su_dev->se_dev_ptr))
return -ENODEV;
if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
return sprintf(page, "0x%08x\n", T10_RES(su_dev)->pr_generation);
}
SE_DEV_PR_ATTR_RO(res_pr_generation);
/*
* res_pr_holder_tg_port
*/
static ssize_t target_core_dev_pr_show_attr_res_pr_holder_tg_port(
struct se_subsystem_dev *su_dev,
char *page)
{
struct se_device *dev;
struct se_node_acl *se_nacl;
struct se_lun *lun;
struct se_portal_group *se_tpg;
struct t10_pr_registration *pr_reg;
struct target_core_fabric_ops *tfo;
ssize_t len = 0;
dev = su_dev->se_dev_ptr;
if (!(dev))
return -ENODEV;
if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
return len;
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
if (!(pr_reg)) {
len = sprintf(page, "No SPC-3 Reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
return len;
}
se_nacl = pr_reg->pr_reg_nacl;
se_tpg = se_nacl->se_tpg;
lun = pr_reg->pr_reg_tg_pt_lun;
tfo = TPG_TFO(se_tpg);
len += sprintf(page+len, "SPC-3 Reservation: %s"
" Target Node Endpoint: %s\n", tfo->get_fabric_name(),
tfo->tpg_get_wwn(se_tpg));
len += sprintf(page+len, "SPC-3 Reservation: Relative Port"
" Identifer Tag: %hu %s Portal Group Tag: %hu"
" %s Logical Unit: %u\n", lun->lun_sep->sep_rtpi,
tfo->get_fabric_name(), tfo->tpg_get_tag(se_tpg),
tfo->get_fabric_name(), lun->unpacked_lun);
spin_unlock(&dev->dev_reservation_lock);
return len;
}
SE_DEV_PR_ATTR_RO(res_pr_holder_tg_port);
/*
* res_pr_registered_i_pts
*/
static ssize_t target_core_dev_pr_show_attr_res_pr_registered_i_pts(
struct se_subsystem_dev *su_dev,
char *page)
{
struct target_core_fabric_ops *tfo;
struct t10_pr_registration *pr_reg;
unsigned char buf[384];
char i_buf[PR_REG_ISID_ID_LEN];
ssize_t len = 0;
int reg_count = 0, prf_isid;
if (!(su_dev->se_dev_ptr))
return -ENODEV;
if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
return len;
len += sprintf(page+len, "SPC-3 PR Registrations:\n");
spin_lock(&T10_RES(su_dev)->registration_lock);
list_for_each_entry(pr_reg, &T10_RES(su_dev)->registration_list,
pr_reg_list) {
memset(buf, 0, 384);
memset(i_buf, 0, PR_REG_ISID_ID_LEN);
tfo = pr_reg->pr_reg_nacl->se_tpg->se_tpg_tfo;
prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
PR_REG_ISID_ID_LEN);
sprintf(buf, "%s Node: %s%s Key: 0x%016Lx PRgen: 0x%08x\n",
tfo->get_fabric_name(),
pr_reg->pr_reg_nacl->initiatorname, (prf_isid) ?
&i_buf[0] : "", pr_reg->pr_res_key,
pr_reg->pr_res_generation);
if ((len + strlen(buf) > PAGE_SIZE))
break;
len += sprintf(page+len, "%s", buf);
reg_count++;
}
spin_unlock(&T10_RES(su_dev)->registration_lock);
if (!(reg_count))
len += sprintf(page+len, "None\n");
return len;
}
SE_DEV_PR_ATTR_RO(res_pr_registered_i_pts);
/*
* res_pr_type
*/
static ssize_t target_core_dev_pr_show_attr_res_pr_type(
struct se_subsystem_dev *su_dev,
char *page)
{
struct se_device *dev;
struct t10_pr_registration *pr_reg;
ssize_t len = 0;
dev = su_dev->se_dev_ptr;
if (!(dev))
return -ENODEV;
if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
return len;
spin_lock(&dev->dev_reservation_lock);
pr_reg = dev->dev_pr_res_holder;
if (!(pr_reg)) {
len = sprintf(page, "No SPC-3 Reservation holder\n");
spin_unlock(&dev->dev_reservation_lock);
return len;
}
len = sprintf(page, "SPC-3 Reservation Type: %s\n",
core_scsi3_pr_dump_type(pr_reg->pr_res_type));
spin_unlock(&dev->dev_reservation_lock);
return len;
}
SE_DEV_PR_ATTR_RO(res_pr_type);
/*
* res_type
*/
static ssize_t target_core_dev_pr_show_attr_res_type(
struct se_subsystem_dev *su_dev,
char *page)
{
ssize_t len = 0;
if (!(su_dev->se_dev_ptr))
return -ENODEV;
switch (T10_RES(su_dev)->res_type) {
case SPC3_PERSISTENT_RESERVATIONS:
len = sprintf(page, "SPC3_PERSISTENT_RESERVATIONS\n");
break;
case SPC2_RESERVATIONS:
len = sprintf(page, "SPC2_RESERVATIONS\n");
break;
case SPC_PASSTHROUGH:
len = sprintf(page, "SPC_PASSTHROUGH\n");
break;
default:
len = sprintf(page, "UNKNOWN\n");
break;
}
return len;
}
SE_DEV_PR_ATTR_RO(res_type);
/*
* res_aptpl_active
*/
static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
struct se_subsystem_dev *su_dev,
char *page)
{
if (!(su_dev->se_dev_ptr))
return -ENODEV;
if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
return sprintf(page, "APTPL Bit Status: %s\n",
(T10_RES(su_dev)->pr_aptpl_active) ? "Activated" : "Disabled");
}
SE_DEV_PR_ATTR_RO(res_aptpl_active);
/*
* res_aptpl_metadata
*/
static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
struct se_subsystem_dev *su_dev,
char *page)
{
if (!(su_dev->se_dev_ptr))
return -ENODEV;
if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
return sprintf(page, "Ready to process PR APTPL metadata..\n");
}
enum {
Opt_initiator_fabric, Opt_initiator_node, Opt_initiator_sid,
Opt_sa_res_key, Opt_res_holder, Opt_res_type, Opt_res_scope,
Opt_res_all_tg_pt, Opt_mapped_lun, Opt_target_fabric,
Opt_target_node, Opt_tpgt, Opt_port_rtpi, Opt_target_lun, Opt_err
};
static match_table_t tokens = {
{Opt_initiator_fabric, "initiator_fabric=%s"},
{Opt_initiator_node, "initiator_node=%s"},
{Opt_initiator_sid, "initiator_sid=%s"},
{Opt_sa_res_key, "sa_res_key=%s"},
{Opt_res_holder, "res_holder=%d"},
{Opt_res_type, "res_type=%d"},
{Opt_res_scope, "res_scope=%d"},
{Opt_res_all_tg_pt, "res_all_tg_pt=%d"},
{Opt_mapped_lun, "mapped_lun=%d"},
{Opt_target_fabric, "target_fabric=%s"},
{Opt_target_node, "target_node=%s"},
{Opt_tpgt, "tpgt=%d"},
{Opt_port_rtpi, "port_rtpi=%d"},
{Opt_target_lun, "target_lun=%d"},
{Opt_err, NULL}
};
static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
struct se_subsystem_dev *su_dev,
const char *page,
size_t count)
{
struct se_device *dev;
unsigned char *i_fabric, *t_fabric, *i_port = NULL, *t_port = NULL;
unsigned char *isid = NULL;
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
unsigned long long tmp_ll;
u64 sa_res_key = 0;
u32 mapped_lun = 0, target_lun = 0;
int ret = -1, res_holder = 0, all_tg_pt = 0, arg, token;
u16 port_rpti = 0, tpgt = 0;
u8 type = 0, scope;
dev = su_dev->se_dev_ptr;
if (!(dev))
return -ENODEV;
if (T10_RES(su_dev)->res_type != SPC3_PERSISTENT_RESERVATIONS)
return 0;
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
printk(KERN_INFO "Unable to process APTPL metadata while"
" active fabric exports exist\n");
return -EINVAL;
}
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
return -ENOMEM;
orig = opts;
while ((ptr = strsep(&opts, ",")) != NULL) {
if (!*ptr)
continue;
token = match_token(ptr, tokens, args);
switch (token) {
case Opt_initiator_fabric:
i_fabric = match_strdup(&args[0]);
break;
case Opt_initiator_node:
i_port = match_strdup(&args[0]);
if (strlen(i_port) > PR_APTPL_MAX_IPORT_LEN) {
printk(KERN_ERR "APTPL metadata initiator_node="
" exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
PR_APTPL_MAX_IPORT_LEN);
ret = -EINVAL;
break;
}
break;
case Opt_initiator_sid:
isid = match_strdup(&args[0]);
if (strlen(isid) > PR_REG_ISID_LEN) {
printk(KERN_ERR "APTPL metadata initiator_isid"
"= exceeds PR_REG_ISID_LEN: %d\n",
PR_REG_ISID_LEN);
ret = -EINVAL;
break;
}
break;
case Opt_sa_res_key:
arg_p = match_strdup(&args[0]);
ret = strict_strtoull(arg_p, 0, &tmp_ll);
if (ret < 0) {
printk(KERN_ERR "strict_strtoull() failed for"
" sa_res_key=\n");
goto out;
}
sa_res_key = (u64)tmp_ll;
break;
/*
* PR APTPL Metadata for Reservation
*/
case Opt_res_holder:
match_int(args, &arg);
res_holder = arg;
break;
case Opt_res_type:
match_int(args, &arg);
type = (u8)arg;
break;
case Opt_res_scope:
match_int(args, &arg);
scope = (u8)arg;
break;
case Opt_res_all_tg_pt:
match_int(args, &arg);
all_tg_pt = (int)arg;
break;
case Opt_mapped_lun:
match_int(args, &arg);
mapped_lun = (u32)arg;
break;
/*
* PR APTPL Metadata for Target Port
*/
case Opt_target_fabric:
t_fabric = match_strdup(&args[0]);
break;
case Opt_target_node:
t_port = match_strdup(&args[0]);
if (strlen(t_port) > PR_APTPL_MAX_TPORT_LEN) {
printk(KERN_ERR "APTPL metadata target_node="
" exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
PR_APTPL_MAX_TPORT_LEN);
ret = -EINVAL;
break;
}
break;
case Opt_tpgt:
match_int(args, &arg);
tpgt = (u16)arg;
break;
case Opt_port_rtpi:
match_int(args, &arg);
port_rpti = (u16)arg;
break;
case Opt_target_lun:
match_int(args, &arg);
target_lun = (u32)arg;
break;
default:
break;
}
}
if (!(i_port) || !(t_port) || !(sa_res_key)) {
printk(KERN_ERR "Illegal parameters for APTPL registration\n");
ret = -EINVAL;
goto out;
}
if (res_holder && !(type)) {
printk(KERN_ERR "Illegal PR type: 0x%02x for reservation"
" holder\n", type);
ret = -EINVAL;
goto out;
}
ret = core_scsi3_alloc_aptpl_registration(T10_RES(su_dev), sa_res_key,
i_port, isid, mapped_lun, t_port, tpgt, target_lun,
res_holder, all_tg_pt, type);
out:
kfree(orig);
return (ret == 0) ? count : ret;
}
SE_DEV_PR_ATTR(res_aptpl_metadata, S_IRUGO | S_IWUSR);
CONFIGFS_EATTR_OPS(target_core_dev_pr, se_subsystem_dev, se_dev_pr_group);
static struct configfs_attribute *target_core_dev_pr_attrs[] = {
&target_core_dev_pr_res_holder.attr,
&target_core_dev_pr_res_pr_all_tgt_pts.attr,
&target_core_dev_pr_res_pr_generation.attr,
&target_core_dev_pr_res_pr_holder_tg_port.attr,
&target_core_dev_pr_res_pr_registered_i_pts.attr,
&target_core_dev_pr_res_pr_type.attr,
&target_core_dev_pr_res_type.attr,
&target_core_dev_pr_res_aptpl_active.attr,
&target_core_dev_pr_res_aptpl_metadata.attr,
NULL,
};
static struct configfs_item_operations target_core_dev_pr_ops = {
.show_attribute = target_core_dev_pr_attr_show,
.store_attribute = target_core_dev_pr_attr_store,
};
static struct config_item_type target_core_dev_pr_cit = {
.ct_item_ops = &target_core_dev_pr_ops,
.ct_attrs = target_core_dev_pr_attrs,
.ct_owner = THIS_MODULE,
};
/* End functions for struct config_item_type target_core_dev_pr_cit */
/* Start functions for struct config_item_type target_core_dev_cit */
static ssize_t target_core_show_dev_info(void *p, char *page)
{
struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
struct se_hba *hba = se_dev->se_dev_hba;
struct se_subsystem_api *t = hba->transport;
int bl = 0;
ssize_t read_bytes = 0;
if (!(se_dev->se_dev_ptr))
return -ENODEV;
transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
read_bytes += bl;
read_bytes += t->show_configfs_dev_params(hba, se_dev, page+read_bytes);
return read_bytes;
}
static struct target_core_configfs_attribute target_core_attr_dev_info = {
.attr = { .ca_owner = THIS_MODULE,
.ca_name = "info",
.ca_mode = S_IRUGO },
.show = target_core_show_dev_info,
.store = NULL,
};
static ssize_t target_core_store_dev_control(
void *p,
const char *page,
size_t count)
{
struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
struct se_hba *hba = se_dev->se_dev_hba;
struct se_subsystem_api *t = hba->transport;
if (!(se_dev->se_dev_su_ptr)) {
printk(KERN_ERR "Unable to locate struct se_subsystem_dev>se"
"_dev_su_ptr\n");
return -EINVAL;
}
return t->set_configfs_dev_params(hba, se_dev, page, count);
}
static struct target_core_configfs_attribute target_core_attr_dev_control = {
.attr = { .ca_owner = THIS_MODULE,
.ca_name = "control",
.ca_mode = S_IWUSR },
.show = NULL,
.store = target_core_store_dev_control,
};
static ssize_t target_core_show_dev_alias(void *p, char *page)
{
struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
return 0;
return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_alias);
}
static ssize_t target_core_store_dev_alias(
void *p,
const char *page,
size_t count)
{
struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
struct se_hba *hba = se_dev->se_dev_hba;
ssize_t read_bytes;
if (count > (SE_DEV_ALIAS_LEN-1)) {
printk(KERN_ERR "alias count: %d exceeds"
" SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
SE_DEV_ALIAS_LEN-1);
return -EINVAL;
}
se_dev->su_dev_flags |= SDF_USING_ALIAS;
read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
"%s", page);
printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set alias: %s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&se_dev->se_dev_group.cg_item),
se_dev->se_dev_alias);
return read_bytes;
}
static struct target_core_configfs_attribute target_core_attr_dev_alias = {
.attr = { .ca_owner = THIS_MODULE,
.ca_name = "alias",
.ca_mode = S_IRUGO | S_IWUSR },
.show = target_core_show_dev_alias,
.store = target_core_store_dev_alias,
};
static ssize_t target_core_show_dev_udev_path(void *p, char *page)
{
struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
return 0;
return snprintf(page, PAGE_SIZE, "%s\n", se_dev->se_dev_udev_path);
}
static ssize_t target_core_store_dev_udev_path(
void *p,
const char *page,
size_t count)
{
struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
struct se_hba *hba = se_dev->se_dev_hba;
ssize_t read_bytes;
if (count > (SE_UDEV_PATH_LEN-1)) {
printk(KERN_ERR "udev_path count: %d exceeds"
" SE_UDEV_PATH_LEN-1: %u\n", (int)count,
SE_UDEV_PATH_LEN-1);
return -EINVAL;
}
se_dev->su_dev_flags |= SDF_USING_UDEV_PATH;
read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
"%s", page);
printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&se_dev->se_dev_group.cg_item),
se_dev->se_dev_udev_path);
return read_bytes;
}
static struct target_core_configfs_attribute target_core_attr_dev_udev_path = {
.attr = { .ca_owner = THIS_MODULE,
.ca_name = "udev_path",
.ca_mode = S_IRUGO | S_IWUSR },
.show = target_core_show_dev_udev_path,
.store = target_core_store_dev_udev_path,
};
static ssize_t target_core_store_dev_enable(
void *p,
const char *page,
size_t count)
{
struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
struct se_device *dev;
struct se_hba *hba = se_dev->se_dev_hba;
struct se_subsystem_api *t = hba->transport;
char *ptr;
ptr = strstr(page, "1");
if (!(ptr)) {
printk(KERN_ERR "For dev_enable ops, only valid value"
" is \"1\"\n");
return -EINVAL;
}
if ((se_dev->se_dev_ptr)) {
printk(KERN_ERR "se_dev->se_dev_ptr already set for storage"
" object\n");
return -EEXIST;
}
if (t->check_configfs_dev_params(hba, se_dev) < 0)
return -EINVAL;
dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
if (!(dev) || IS_ERR(dev))
return -EINVAL;
se_dev->se_dev_ptr = dev;
printk(KERN_INFO "Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
" %p\n", se_dev->se_dev_ptr);
return count;
}
static struct target_core_configfs_attribute target_core_attr_dev_enable = {
.attr = { .ca_owner = THIS_MODULE,
.ca_name = "enable",
.ca_mode = S_IWUSR },
.show = NULL,
.store = target_core_store_dev_enable,
};
static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
{
struct se_device *dev;
struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
struct config_item *lu_ci;
struct t10_alua_lu_gp *lu_gp;
struct t10_alua_lu_gp_member *lu_gp_mem;
ssize_t len = 0;
dev = su_dev->se_dev_ptr;
if (!(dev))
return -ENODEV;
if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED)
return len;
lu_gp_mem = dev->dev_alua_lu_gp_mem;
if (!(lu_gp_mem)) {
printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
" pointer\n");
return -EINVAL;
}
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
if ((lu_gp)) {
lu_ci = &lu_gp->lu_gp_group.cg_item;
len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
config_item_name(lu_ci), lu_gp->lu_gp_id);
}
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
return len;
}
static ssize_t target_core_store_alua_lu_gp(
void *p,
const char *page,
size_t count)
{
struct se_device *dev;
struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
struct se_hba *hba = su_dev->se_dev_hba;
struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
struct t10_alua_lu_gp_member *lu_gp_mem;
unsigned char buf[LU_GROUP_NAME_BUF];
int move = 0;
dev = su_dev->se_dev_ptr;
if (!(dev))
return -ENODEV;
if (T10_ALUA(su_dev)->alua_type != SPC3_ALUA_EMULATED) {
printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&su_dev->se_dev_group.cg_item));
return -EINVAL;
}
if (count > LU_GROUP_NAME_BUF) {
printk(KERN_ERR "ALUA LU Group Alias too large!\n");
return -EINVAL;
}
memset(buf, 0, LU_GROUP_NAME_BUF);
memcpy(buf, page, count);
/*
* Any ALUA logical unit alias besides "NULL" means we will be
* making a new group association.
*/
if (strcmp(strstrip(buf), "NULL")) {
/*
* core_alua_get_lu_gp_by_name() will increment reference to
* struct t10_alua_lu_gp. This reference is released with
* core_alua_get_lu_gp_by_name below().
*/
lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
if (!(lu_gp_new))
return -ENODEV;
}
lu_gp_mem = dev->dev_alua_lu_gp_mem;
if (!(lu_gp_mem)) {
if (lu_gp_new)
core_alua_put_lu_gp_from_name(lu_gp_new);
printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
" pointer\n");
return -EINVAL;
}
spin_lock(&lu_gp_mem->lu_gp_mem_lock);
lu_gp = lu_gp_mem->lu_gp;
if ((lu_gp)) {
/*
* Clearing an existing lu_gp association, and replacing
* with NULL
*/
if (!(lu_gp_new)) {
printk(KERN_INFO "Target_Core_ConfigFS: Releasing %s/%s"
" from ALUA LU Group: core/alua/lu_gps/%s, ID:"
" %hu\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&su_dev->se_dev_group.cg_item),
config_item_name(&lu_gp->lu_gp_group.cg_item),
lu_gp->lu_gp_id);
__core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
return count;
}
/*
* Removing existing association of lu_gp_mem with lu_gp
*/
__core_alua_drop_lu_gp_mem(lu_gp_mem, lu_gp);
move = 1;
}
/*
* Associate lu_gp_mem with lu_gp_new.
*/
__core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
printk(KERN_INFO "Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
" core/alua/lu_gps/%s, ID: %hu\n",
(move) ? "Moving" : "Adding",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&su_dev->se_dev_group.cg_item),
config_item_name(&lu_gp_new->lu_gp_group.cg_item),
lu_gp_new->lu_gp_id);
core_alua_put_lu_gp_from_name(lu_gp_new);
return count;
}
static struct target_core_configfs_attribute target_core_attr_dev_alua_lu_gp = {
.attr = { .ca_owner = THIS_MODULE,
.ca_name = "alua_lu_gp",
.ca_mode = S_IRUGO | S_IWUSR },
.show = target_core_show_alua_lu_gp,
.store = target_core_store_alua_lu_gp,
};
static struct configfs_attribute *lio_core_dev_attrs[] = {
&target_core_attr_dev_info.attr,
&target_core_attr_dev_control.attr,
&target_core_attr_dev_alias.attr,
&target_core_attr_dev_udev_path.attr,
&target_core_attr_dev_enable.attr,
&target_core_attr_dev_alua_lu_gp.attr,
NULL,
};
static void target_core_dev_release(struct config_item *item)
{
struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
struct se_subsystem_dev, se_dev_group);
struct config_group *dev_cg;
if (!(se_dev))
return;
dev_cg = &se_dev->se_dev_group;
kfree(dev_cg->default_groups);
}
static ssize_t target_core_dev_show(struct config_item *item,
struct configfs_attribute *attr,
char *page)
{
struct se_subsystem_dev *se_dev = container_of(
to_config_group(item), struct se_subsystem_dev,
se_dev_group);
struct target_core_configfs_attribute *tc_attr = container_of(
attr, struct target_core_configfs_attribute, attr);
if (!(tc_attr->show))
return -EINVAL;
return tc_attr->show((void *)se_dev, page);
}
static ssize_t target_core_dev_store(struct config_item *item,
struct configfs_attribute *attr,
const char *page, size_t count)
{
struct se_subsystem_dev *se_dev = container_of(
to_config_group(item), struct se_subsystem_dev,
se_dev_group);
struct target_core_configfs_attribute *tc_attr = container_of(
attr, struct target_core_configfs_attribute, attr);
if (!(tc_attr->store))
return -EINVAL;
return tc_attr->store((void *)se_dev, page, count);
}
static struct configfs_item_operations target_core_dev_item_ops = {
.release = target_core_dev_release,
.show_attribute = target_core_dev_show,
.store_attribute = target_core_dev_store,
};
static struct config_item_type target_core_dev_cit = {
.ct_item_ops = &target_core_dev_item_ops,
.ct_attrs = lio_core_dev_attrs,
.ct_owner = THIS_MODULE,
};
/* End functions for struct config_item_type target_core_dev_cit */
/* Start functions for struct config_item_type target_core_alua_lu_gp_cit */
CONFIGFS_EATTR_STRUCT(target_core_alua_lu_gp, t10_alua_lu_gp);
#define SE_DEV_ALUA_LU_ATTR(_name, _mode) \
static struct target_core_alua_lu_gp_attribute \
target_core_alua_lu_gp_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
target_core_alua_lu_gp_show_attr_##_name, \
target_core_alua_lu_gp_store_attr_##_name);
#define SE_DEV_ALUA_LU_ATTR_RO(_name) \
static struct target_core_alua_lu_gp_attribute \
target_core_alua_lu_gp_##_name = \
__CONFIGFS_EATTR_RO(_name, \
target_core_alua_lu_gp_show_attr_##_name);
/*
* lu_gp_id
*/
static ssize_t target_core_alua_lu_gp_show_attr_lu_gp_id(
struct t10_alua_lu_gp *lu_gp,
char *page)
{
if (!(lu_gp->lu_gp_valid_id))
return 0;
return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
}
static ssize_t target_core_alua_lu_gp_store_attr_lu_gp_id(
struct t10_alua_lu_gp *lu_gp,
const char *page,
size_t count)
{
struct config_group *alua_lu_gp_cg = &lu_gp->lu_gp_group;
unsigned long lu_gp_id;
int ret;
ret = strict_strtoul(page, 0, &lu_gp_id);
if (ret < 0) {
printk(KERN_ERR "strict_strtoul() returned %d for"
" lu_gp_id\n", ret);
return -EINVAL;
}
if (lu_gp_id > 0x0000ffff) {
printk(KERN_ERR "ALUA lu_gp_id: %lu exceeds maximum:"
" 0x0000ffff\n", lu_gp_id);
return -EINVAL;
}
ret = core_alua_set_lu_gp_id(lu_gp, (u16)lu_gp_id);
if (ret < 0)
return -EINVAL;
printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Logical Unit"
" Group: core/alua/lu_gps/%s to ID: %hu\n",
config_item_name(&alua_lu_gp_cg->cg_item),
lu_gp->lu_gp_id);
return count;
}
SE_DEV_ALUA_LU_ATTR(lu_gp_id, S_IRUGO | S_IWUSR);
/*
* members
*/
static ssize_t target_core_alua_lu_gp_show_attr_members(
struct t10_alua_lu_gp *lu_gp,
char *page)
{
struct se_device *dev;
struct se_hba *hba;
struct se_subsystem_dev *su_dev;
struct t10_alua_lu_gp_member *lu_gp_mem;
ssize_t len = 0, cur_len;
unsigned char buf[LU_GROUP_NAME_BUF];
memset(buf, 0, LU_GROUP_NAME_BUF);
spin_lock(&lu_gp->lu_gp_lock);
list_for_each_entry(lu_gp_mem, &lu_gp->lu_gp_mem_list, lu_gp_mem_list) {
dev = lu_gp_mem->lu_gp_mem_dev;
su_dev = dev->se_sub_dev;
hba = su_dev->se_dev_hba;
cur_len = snprintf(buf, LU_GROUP_NAME_BUF, "%s/%s\n",
config_item_name(&hba->hba_group.cg_item),
config_item_name(&su_dev->se_dev_group.cg_item));
cur_len++; /* Extra byte for NULL terminator */
if ((cur_len + len) > PAGE_SIZE) {
printk(KERN_WARNING "Ran out of lu_gp_show_attr"
"_members buffer\n");
break;
}
memcpy(page+len, buf, cur_len);
len += cur_len;
}
spin_unlock(&lu_gp->lu_gp_lock);
return len;
}
SE_DEV_ALUA_LU_ATTR_RO(members);
CONFIGFS_EATTR_OPS(target_core_alua_lu_gp, t10_alua_lu_gp, lu_gp_group);
static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = {
&target_core_alua_lu_gp_lu_gp_id.attr,
&target_core_alua_lu_gp_members.attr,
NULL,
};
static struct configfs_item_operations target_core_alua_lu_gp_ops = {
.show_attribute = target_core_alua_lu_gp_attr_show,
.store_attribute = target_core_alua_lu_gp_attr_store,
};
static struct config_item_type target_core_alua_lu_gp_cit = {
.ct_item_ops = &target_core_alua_lu_gp_ops,
.ct_attrs = target_core_alua_lu_gp_attrs,
.ct_owner = THIS_MODULE,
};
/* End functions for struct config_item_type target_core_alua_lu_gp_cit */
/* Start functions for struct config_item_type target_core_alua_lu_gps_cit */
static struct config_group *target_core_alua_create_lu_gp(
struct config_group *group,
const char *name)
{
struct t10_alua_lu_gp *lu_gp;
struct config_group *alua_lu_gp_cg = NULL;
struct config_item *alua_lu_gp_ci = NULL;
lu_gp = core_alua_allocate_lu_gp(name, 0);
if (IS_ERR(lu_gp))
return NULL;
alua_lu_gp_cg = &lu_gp->lu_gp_group;
alua_lu_gp_ci = &alua_lu_gp_cg->cg_item;
config_group_init_type_name(alua_lu_gp_cg, name,
&target_core_alua_lu_gp_cit);
printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Logical Unit"
" Group: core/alua/lu_gps/%s\n",
config_item_name(alua_lu_gp_ci));
return alua_lu_gp_cg;
}
static void target_core_alua_drop_lu_gp(
struct config_group *group,
struct config_item *item)
{
struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
struct t10_alua_lu_gp, lu_gp_group);
printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit"
" Group: core/alua/lu_gps/%s, ID: %hu\n",
config_item_name(item), lu_gp->lu_gp_id);
config_item_put(item);
core_alua_free_lu_gp(lu_gp);
}
static struct configfs_group_operations target_core_alua_lu_gps_group_ops = {
.make_group = &target_core_alua_create_lu_gp,
.drop_item = &target_core_alua_drop_lu_gp,
};
static struct config_item_type target_core_alua_lu_gps_cit = {
.ct_item_ops = NULL,
.ct_group_ops = &target_core_alua_lu_gps_group_ops,
.ct_owner = THIS_MODULE,
};
/* End functions for struct config_item_type target_core_alua_lu_gps_cit */
/* Start functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
CONFIGFS_EATTR_STRUCT(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp);
#define SE_DEV_ALUA_TG_PT_ATTR(_name, _mode) \
static struct target_core_alua_tg_pt_gp_attribute \
target_core_alua_tg_pt_gp_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
target_core_alua_tg_pt_gp_show_attr_##_name, \
target_core_alua_tg_pt_gp_store_attr_##_name);
#define SE_DEV_ALUA_TG_PT_ATTR_RO(_name) \
static struct target_core_alua_tg_pt_gp_attribute \
target_core_alua_tg_pt_gp_##_name = \
__CONFIGFS_EATTR_RO(_name, \
target_core_alua_tg_pt_gp_show_attr_##_name);
/*
* alua_access_state
*/
static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_state(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
return sprintf(page, "%d\n",
atomic_read(&tg_pt_gp->tg_pt_gp_alua_access_state));
}
static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_state(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
{
struct se_subsystem_dev *su_dev = tg_pt_gp->tg_pt_gp_su_dev;
unsigned long tmp;
int new_state, ret;
if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
printk(KERN_ERR "Unable to do implict ALUA on non valid"
" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
return -EINVAL;
}
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
printk("Unable to extract new ALUA access state from"
" %s\n", page);
return -EINVAL;
}
new_state = (int)tmp;
if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) {
printk(KERN_ERR "Unable to process implict configfs ALUA"
" transition while TPGS_IMPLICT_ALUA is diabled\n");
return -EINVAL;
}
ret = core_alua_do_port_transition(tg_pt_gp, su_dev->se_dev_ptr,
NULL, NULL, new_state, 0);
return (!ret) ? count : -EINVAL;
}
SE_DEV_ALUA_TG_PT_ATTR(alua_access_state, S_IRUGO | S_IWUSR);
/*
* alua_access_status
*/
static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_status(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
return sprintf(page, "%s\n",
core_alua_dump_status(tg_pt_gp->tg_pt_gp_alua_access_status));
}
static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_status(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
{
unsigned long tmp;
int new_status, ret;
if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
printk(KERN_ERR "Unable to do set ALUA access status on non"
" valid tg_pt_gp ID: %hu\n",
tg_pt_gp->tg_pt_gp_valid_id);
return -EINVAL;
}
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
printk(KERN_ERR "Unable to extract new ALUA access status"
" from %s\n", page);
return -EINVAL;
}
new_status = (int)tmp;
if ((new_status != ALUA_STATUS_NONE) &&
(new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
(new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
printk(KERN_ERR "Illegal ALUA access status: 0x%02x\n",
new_status);
return -EINVAL;
}
tg_pt_gp->tg_pt_gp_alua_access_status = new_status;
return count;
}
SE_DEV_ALUA_TG_PT_ATTR(alua_access_status, S_IRUGO | S_IWUSR);
/*
* alua_access_type
*/
static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_access_type(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
return core_alua_show_access_type(tg_pt_gp, page);
}
static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_access_type(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
{
return core_alua_store_access_type(tg_pt_gp, page, count);
}
SE_DEV_ALUA_TG_PT_ATTR(alua_access_type, S_IRUGO | S_IWUSR);
/*
* alua_write_metadata
*/
static ssize_t target_core_alua_tg_pt_gp_show_attr_alua_write_metadata(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
return sprintf(page, "%d\n", tg_pt_gp->tg_pt_gp_write_metadata);
}
static ssize_t target_core_alua_tg_pt_gp_store_attr_alua_write_metadata(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
{
unsigned long tmp;
int ret;
ret = strict_strtoul(page, 0, &tmp);
if (ret < 0) {
printk(KERN_ERR "Unable to extract alua_write_metadata\n");
return -EINVAL;
}
if ((tmp != 0) && (tmp != 1)) {
printk(KERN_ERR "Illegal value for alua_write_metadata:"
" %lu\n", tmp);
return -EINVAL;
}
tg_pt_gp->tg_pt_gp_write_metadata = (int)tmp;
return count;
}
SE_DEV_ALUA_TG_PT_ATTR(alua_write_metadata, S_IRUGO | S_IWUSR);
/*
* nonop_delay_msecs
*/
static ssize_t target_core_alua_tg_pt_gp_show_attr_nonop_delay_msecs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
return core_alua_show_nonop_delay_msecs(tg_pt_gp, page);
}
static ssize_t target_core_alua_tg_pt_gp_store_attr_nonop_delay_msecs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
{
return core_alua_store_nonop_delay_msecs(tg_pt_gp, page, count);
}
SE_DEV_ALUA_TG_PT_ATTR(nonop_delay_msecs, S_IRUGO | S_IWUSR);
/*
* trans_delay_msecs
*/
static ssize_t target_core_alua_tg_pt_gp_show_attr_trans_delay_msecs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
return core_alua_show_trans_delay_msecs(tg_pt_gp, page);
}
static ssize_t target_core_alua_tg_pt_gp_store_attr_trans_delay_msecs(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
{
return core_alua_store_trans_delay_msecs(tg_pt_gp, page, count);
}
SE_DEV_ALUA_TG_PT_ATTR(trans_delay_msecs, S_IRUGO | S_IWUSR);
/*
* preferred
*/
static ssize_t target_core_alua_tg_pt_gp_show_attr_preferred(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
return core_alua_show_preferred_bit(tg_pt_gp, page);
}
static ssize_t target_core_alua_tg_pt_gp_store_attr_preferred(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
{
return core_alua_store_preferred_bit(tg_pt_gp, page, count);
}
SE_DEV_ALUA_TG_PT_ATTR(preferred, S_IRUGO | S_IWUSR);
/*
* tg_pt_gp_id
*/
static ssize_t target_core_alua_tg_pt_gp_show_attr_tg_pt_gp_id(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
if (!(tg_pt_gp->tg_pt_gp_valid_id))
return 0;
return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
}
static ssize_t target_core_alua_tg_pt_gp_store_attr_tg_pt_gp_id(
struct t10_alua_tg_pt_gp *tg_pt_gp,
const char *page,
size_t count)
{
struct config_group *alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
unsigned long tg_pt_gp_id;
int ret;
ret = strict_strtoul(page, 0, &tg_pt_gp_id);
if (ret < 0) {
printk(KERN_ERR "strict_strtoul() returned %d for"
" tg_pt_gp_id\n", ret);
return -EINVAL;
}
if (tg_pt_gp_id > 0x0000ffff) {
printk(KERN_ERR "ALUA tg_pt_gp_id: %lu exceeds maximum:"
" 0x0000ffff\n", tg_pt_gp_id);
return -EINVAL;
}
ret = core_alua_set_tg_pt_gp_id(tg_pt_gp, (u16)tg_pt_gp_id);
if (ret < 0)
return -EINVAL;
printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Target Port Group: "
"core/alua/tg_pt_gps/%s to ID: %hu\n",
config_item_name(&alua_tg_pt_gp_cg->cg_item),
tg_pt_gp->tg_pt_gp_id);
return count;
}
SE_DEV_ALUA_TG_PT_ATTR(tg_pt_gp_id, S_IRUGO | S_IWUSR);
/*
* members
*/
static ssize_t target_core_alua_tg_pt_gp_show_attr_members(
struct t10_alua_tg_pt_gp *tg_pt_gp,
char *page)
{
struct se_port *port;
struct se_portal_group *tpg;
struct se_lun *lun;
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
ssize_t len = 0, cur_len;
unsigned char buf[TG_PT_GROUP_NAME_BUF];
memset(buf, 0, TG_PT_GROUP_NAME_BUF);
spin_lock(&tg_pt_gp->tg_pt_gp_lock);
list_for_each_entry(tg_pt_gp_mem, &tg_pt_gp->tg_pt_gp_mem_list,
tg_pt_gp_mem_list) {
port = tg_pt_gp_mem->tg_pt;
tpg = port->sep_tpg;
lun = port->sep_lun;
cur_len = snprintf(buf, TG_PT_GROUP_NAME_BUF, "%s/%s/tpgt_%hu"
"/%s\n", TPG_TFO(tpg)->get_fabric_name(),
TPG_TFO(tpg)->tpg_get_wwn(tpg),
TPG_TFO(tpg)->tpg_get_tag(tpg),
config_item_name(&lun->lun_group.cg_item));
cur_len++; /* Extra byte for NULL terminator */
if ((cur_len + len) > PAGE_SIZE) {
printk(KERN_WARNING "Ran out of lu_gp_show_attr"
"_members buffer\n");
break;
}
memcpy(page+len, buf, cur_len);
len += cur_len;
}
spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
return len;
}
SE_DEV_ALUA_TG_PT_ATTR_RO(members);
CONFIGFS_EATTR_OPS(target_core_alua_tg_pt_gp, t10_alua_tg_pt_gp,
tg_pt_gp_group);
static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = {
&target_core_alua_tg_pt_gp_alua_access_state.attr,
&target_core_alua_tg_pt_gp_alua_access_status.attr,
&target_core_alua_tg_pt_gp_alua_access_type.attr,
&target_core_alua_tg_pt_gp_alua_write_metadata.attr,
&target_core_alua_tg_pt_gp_nonop_delay_msecs.attr,
&target_core_alua_tg_pt_gp_trans_delay_msecs.attr,
&target_core_alua_tg_pt_gp_preferred.attr,
&target_core_alua_tg_pt_gp_tg_pt_gp_id.attr,
&target_core_alua_tg_pt_gp_members.attr,
NULL,
};
static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = {
.show_attribute = target_core_alua_tg_pt_gp_attr_show,
.store_attribute = target_core_alua_tg_pt_gp_attr_store,
};
static struct config_item_type target_core_alua_tg_pt_gp_cit = {
.ct_item_ops = &target_core_alua_tg_pt_gp_ops,
.ct_attrs = target_core_alua_tg_pt_gp_attrs,
.ct_owner = THIS_MODULE,
};
/* End functions for struct config_item_type target_core_alua_tg_pt_gp_cit */
/* Start functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
static struct config_group *target_core_alua_create_tg_pt_gp(
struct config_group *group,
const char *name)
{
struct t10_alua *alua = container_of(group, struct t10_alua,
alua_tg_pt_gps_group);
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct se_subsystem_dev *su_dev = alua->t10_sub_dev;
struct config_group *alua_tg_pt_gp_cg = NULL;
struct config_item *alua_tg_pt_gp_ci = NULL;
tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0);
if (!(tg_pt_gp))
return NULL;
alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
alua_tg_pt_gp_ci = &alua_tg_pt_gp_cg->cg_item;
config_group_init_type_name(alua_tg_pt_gp_cg, name,
&target_core_alua_tg_pt_gp_cit);
printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Target Port"
" Group: alua/tg_pt_gps/%s\n",
config_item_name(alua_tg_pt_gp_ci));
return alua_tg_pt_gp_cg;
}
static void target_core_alua_drop_tg_pt_gp(
struct config_group *group,
struct config_item *item)
{
struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
struct t10_alua_tg_pt_gp, tg_pt_gp_group);
printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port"
" Group: alua/tg_pt_gps/%s, ID: %hu\n",
config_item_name(item), tg_pt_gp->tg_pt_gp_id);
config_item_put(item);
core_alua_free_tg_pt_gp(tg_pt_gp);
}
static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = {
.make_group = &target_core_alua_create_tg_pt_gp,
.drop_item = &target_core_alua_drop_tg_pt_gp,
};
static struct config_item_type target_core_alua_tg_pt_gps_cit = {
.ct_group_ops = &target_core_alua_tg_pt_gps_group_ops,
.ct_owner = THIS_MODULE,
};
/* End functions for struct config_item_type target_core_alua_tg_pt_gps_cit */
/* Start functions for struct config_item_type target_core_alua_cit */
/*
* target_core_alua_cit is a ConfigFS group that lives under
* /sys/kernel/config/target/core/alua. There are default groups
* core/alua/lu_gps and core/alua/tg_pt_gps that are attached to
* target_core_alua_cit in target_core_init_configfs() below.
*/
static struct config_item_type target_core_alua_cit = {
.ct_item_ops = NULL,
.ct_attrs = NULL,
.ct_owner = THIS_MODULE,
};
/* End functions for struct config_item_type target_core_alua_cit */
/* Start functions for struct config_item_type target_core_hba_cit */
static struct config_group *target_core_make_subdev(
struct config_group *group,
const char *name)
{
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct se_subsystem_dev *se_dev;
struct se_subsystem_api *t;
struct config_item *hba_ci = &group->cg_item;
struct se_hba *hba = item_to_hba(hba_ci);
struct config_group *dev_cg = NULL, *tg_pt_gp_cg = NULL;
if (mutex_lock_interruptible(&hba->hba_access_mutex))
return NULL;
/*
* Locate the struct se_subsystem_api from parent's struct se_hba.
*/
t = hba->transport;
se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
if (!se_dev) {
printk(KERN_ERR "Unable to allocate memory for"
" struct se_subsystem_dev\n");
goto unlock;
}
INIT_LIST_HEAD(&se_dev->g_se_dev_list);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
spin_lock_init(&se_dev->t10_reservation.registration_lock);
spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
spin_lock_init(&se_dev->se_dev_lock);
se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
se_dev->t10_wwn.t10_sub_dev = se_dev;
se_dev->t10_alua.t10_sub_dev = se_dev;
se_dev->se_dev_attrib.da_sub_dev = se_dev;
se_dev->se_dev_hba = hba;
dev_cg = &se_dev->se_dev_group;
dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 6,
GFP_KERNEL);
if (!(dev_cg->default_groups))
goto out;
/*
* Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
* for ->allocate_virtdevice()
*
* se_dev->se_dev_ptr will be set after ->create_virtdev()
* has been called successfully in the next level up in the
* configfs tree for device object's struct config_group.
*/
se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
if (!(se_dev->se_dev_su_ptr)) {
printk(KERN_ERR "Unable to locate subsystem dependent pointer"
" from allocate_virtdevice()\n");
goto out;
}
spin_lock(&se_global->g_device_lock);
list_add_tail(&se_dev->g_se_dev_list, &se_global->g_se_dev_list);
spin_unlock(&se_global->g_device_lock);
config_group_init_type_name(&se_dev->se_dev_group, name,
&target_core_dev_cit);
config_group_init_type_name(&se_dev->se_dev_attrib.da_group, "attrib",
&target_core_dev_attrib_cit);
config_group_init_type_name(&se_dev->se_dev_pr_group, "pr",
&target_core_dev_pr_cit);
config_group_init_type_name(&se_dev->t10_wwn.t10_wwn_group, "wwn",
&target_core_dev_wwn_cit);
config_group_init_type_name(&se_dev->t10_alua.alua_tg_pt_gps_group,
"alua", &target_core_alua_tg_pt_gps_cit);
dev_cg->default_groups[0] = &se_dev->se_dev_attrib.da_group;
dev_cg->default_groups[1] = &se_dev->se_dev_pr_group;
dev_cg->default_groups[2] = &se_dev->t10_wwn.t10_wwn_group;
dev_cg->default_groups[3] = &se_dev->t10_alua.alua_tg_pt_gps_group;
dev_cg->default_groups[4] = NULL;
/*
* Add core/$HBA/$DEV/alua/tg_pt_gps/default_tg_pt_gp
*/
tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
if (!(tg_pt_gp))
goto out;
tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
if (!(tg_pt_gp_cg->default_groups)) {
printk(KERN_ERR "Unable to allocate tg_pt_gp_cg->"
"default_groups\n");
goto out;
}
config_group_init_type_name(&tg_pt_gp->tg_pt_gp_group,
"default_tg_pt_gp", &target_core_alua_tg_pt_gp_cit);
tg_pt_gp_cg->default_groups[0] = &tg_pt_gp->tg_pt_gp_group;
tg_pt_gp_cg->default_groups[1] = NULL;
T10_ALUA(se_dev)->default_tg_pt_gp = tg_pt_gp;
printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
" %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
mutex_unlock(&hba->hba_access_mutex);
return &se_dev->se_dev_group;
out:
if (T10_ALUA(se_dev)->default_tg_pt_gp) {
core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
}
if (tg_pt_gp_cg)
kfree(tg_pt_gp_cg->default_groups);
if (dev_cg)
kfree(dev_cg->default_groups);
if (se_dev->se_dev_su_ptr)
t->free_device(se_dev->se_dev_su_ptr);
kfree(se_dev);
unlock:
mutex_unlock(&hba->hba_access_mutex);
return NULL;
}
static void target_core_drop_subdev(
struct config_group *group,
struct config_item *item)
{
struct se_subsystem_dev *se_dev = container_of(to_config_group(item),
struct se_subsystem_dev, se_dev_group);
struct se_hba *hba;
struct se_subsystem_api *t;
struct config_item *df_item;
struct config_group *dev_cg, *tg_pt_gp_cg;
int i, ret;
hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item);
if (mutex_lock_interruptible(&hba->hba_access_mutex))
goto out;
t = hba->transport;
spin_lock(&se_global->g_device_lock);
list_del(&se_dev->g_se_dev_list);
spin_unlock(&se_global->g_device_lock);
tg_pt_gp_cg = &T10_ALUA(se_dev)->alua_tg_pt_gps_group;
for (i = 0; tg_pt_gp_cg->default_groups[i]; i++) {
df_item = &tg_pt_gp_cg->default_groups[i]->cg_item;
tg_pt_gp_cg->default_groups[i] = NULL;
config_item_put(df_item);
}
kfree(tg_pt_gp_cg->default_groups);
core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp);
T10_ALUA(se_dev)->default_tg_pt_gp = NULL;
dev_cg = &se_dev->se_dev_group;
for (i = 0; dev_cg->default_groups[i]; i++) {
df_item = &dev_cg->default_groups[i]->cg_item;
dev_cg->default_groups[i] = NULL;
config_item_put(df_item);
}
config_item_put(item);
/*
* This pointer will set when the storage is enabled with:
* `echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
*/
if (se_dev->se_dev_ptr) {
printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
"virtual_device() for se_dev_ptr: %p\n",
se_dev->se_dev_ptr);
ret = se_free_virtual_device(se_dev->se_dev_ptr, hba);
if (ret < 0)
goto hba_out;
} else {
/*
* Release struct se_subsystem_dev->se_dev_su_ptr..
*/
printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
"device() for se_dev_su_ptr: %p\n",
se_dev->se_dev_su_ptr);
t->free_device(se_dev->se_dev_su_ptr);
}
printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
"_dev_t: %p\n", se_dev);
hba_out:
mutex_unlock(&hba->hba_access_mutex);
out:
kfree(se_dev);
}
static struct configfs_group_operations target_core_hba_group_ops = {
.make_group = target_core_make_subdev,
.drop_item = target_core_drop_subdev,
};
CONFIGFS_EATTR_STRUCT(target_core_hba, se_hba);
#define SE_HBA_ATTR(_name, _mode) \
static struct target_core_hba_attribute \
target_core_hba_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
target_core_hba_show_attr_##_name, \
target_core_hba_store_attr_##_name);
#define SE_HBA_ATTR_RO(_name) \
static struct target_core_hba_attribute \
target_core_hba_##_name = \
__CONFIGFS_EATTR_RO(_name, \
target_core_hba_show_attr_##_name);
static ssize_t target_core_hba_show_attr_hba_info(
struct se_hba *hba,
char *page)
{
return sprintf(page, "HBA Index: %d plugin: %s version: %s\n",
hba->hba_id, hba->transport->name,
TARGET_CORE_CONFIGFS_VERSION);
}
SE_HBA_ATTR_RO(hba_info);
static ssize_t target_core_hba_show_attr_hba_mode(struct se_hba *hba,
char *page)
{
int hba_mode = 0;
if (hba->hba_flags & HBA_FLAGS_PSCSI_MODE)
hba_mode = 1;
return sprintf(page, "%d\n", hba_mode);
}
static ssize_t target_core_hba_store_attr_hba_mode(struct se_hba *hba,
const char *page, size_t count)
{
struct se_subsystem_api *transport = hba->transport;
unsigned long mode_flag;
int ret;
if (transport->pmode_enable_hba == NULL)
return -EINVAL;
ret = strict_strtoul(page, 0, &mode_flag);
if (ret < 0) {
printk(KERN_ERR "Unable to extract hba mode flag: %d\n", ret);
return -EINVAL;
}
spin_lock(&hba->device_lock);
if (!(list_empty(&hba->hba_dev_list))) {
printk(KERN_ERR "Unable to set hba_mode with active devices\n");
spin_unlock(&hba->device_lock);
return -EINVAL;
}
spin_unlock(&hba->device_lock);
ret = transport->pmode_enable_hba(hba, mode_flag);
if (ret < 0)
return -EINVAL;
if (ret > 0)
hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
else if (ret == 0)
hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
return count;
}
SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR);
CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group);
static struct configfs_attribute *target_core_hba_attrs[] = {
&target_core_hba_hba_info.attr,
&target_core_hba_hba_mode.attr,
NULL,
};
static struct configfs_item_operations target_core_hba_item_ops = {
.show_attribute = target_core_hba_attr_show,
.store_attribute = target_core_hba_attr_store,
};
static struct config_item_type target_core_hba_cit = {
.ct_item_ops = &target_core_hba_item_ops,
.ct_group_ops = &target_core_hba_group_ops,
.ct_attrs = target_core_hba_attrs,
.ct_owner = THIS_MODULE,
};
static struct config_group *target_core_call_addhbatotarget(
struct config_group *group,
const char *name)
{
char *se_plugin_str, *str, *str2;
struct se_hba *hba;
char buf[TARGET_CORE_NAME_MAX_LEN];
unsigned long plugin_dep_id = 0;
int ret;
memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
if (strlen(name) > TARGET_CORE_NAME_MAX_LEN) {
printk(KERN_ERR "Passed *name strlen(): %d exceeds"
" TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
TARGET_CORE_NAME_MAX_LEN);
return ERR_PTR(-ENAMETOOLONG);
}
snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
str = strstr(buf, "_");
if (!(str)) {
printk(KERN_ERR "Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
return ERR_PTR(-EINVAL);
}
se_plugin_str = buf;
/*
* Special case for subsystem plugins that have "_" in their names.
* Namely rd_direct and rd_mcp..
*/
str2 = strstr(str+1, "_");
if ((str2)) {
*str2 = '\0'; /* Terminate for *se_plugin_str */
str2++; /* Skip to start of plugin dependent ID */
str = str2;
} else {
*str = '\0'; /* Terminate for *se_plugin_str */
str++; /* Skip to start of plugin dependent ID */
}
ret = strict_strtoul(str, 0, &plugin_dep_id);
if (ret < 0) {
printk(KERN_ERR "strict_strtoul() returned %d for"
" plugin_dep_id\n", ret);
return ERR_PTR(-EINVAL);
}
/*
* Load up TCM subsystem plugins if they have not already been loaded.
*/
if (transport_subsystem_check_init() < 0)
return ERR_PTR(-EINVAL);
hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);
if (IS_ERR(hba))
return ERR_CAST(hba);
config_group_init_type_name(&hba->hba_group, name,
&target_core_hba_cit);
return &hba->hba_group;
}
static void target_core_call_delhbafromtarget(
struct config_group *group,
struct config_item *item)
{
struct se_hba *hba = item_to_hba(item);
config_item_put(item);
core_delete_hba(hba);
}
static struct configfs_group_operations target_core_group_ops = {
.make_group = target_core_call_addhbatotarget,
.drop_item = target_core_call_delhbafromtarget,
};
static struct config_item_type target_core_cit = {
.ct_item_ops = NULL,
.ct_group_ops = &target_core_group_ops,
.ct_attrs = NULL,
.ct_owner = THIS_MODULE,
};
/* Stop functions for struct config_item_type target_core_hba_cit */
static int target_core_init_configfs(void)
{
struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
struct config_group *lu_gp_cg = NULL;
struct configfs_subsystem *subsys;
struct proc_dir_entry *scsi_target_proc = NULL;
struct t10_alua_lu_gp *lu_gp;
int ret;
printk(KERN_INFO "TARGET_CORE[0]: Loading Generic Kernel Storage"
" Engine: %s on %s/%s on "UTS_RELEASE"\n",
TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
subsys = target_core_subsystem[0];
config_group_init(&subsys->su_group);
mutex_init(&subsys->su_mutex);
INIT_LIST_HEAD(&g_tf_list);
mutex_init(&g_tf_lock);
init_scsi_index_table();
ret = init_se_global();
if (ret < 0)
return -1;
/*
* Create $CONFIGFS/target/core default group for HBA <-> Storage Object
* and ALUA Logical Unit Group and Target Port Group infrastructure.
*/
target_cg = &subsys->su_group;
target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
if (!(target_cg->default_groups)) {
printk(KERN_ERR "Unable to allocate target_cg->default_groups\n");
goto out_global;
}
config_group_init_type_name(&se_global->target_core_hbagroup,
"core", &target_core_cit);
target_cg->default_groups[0] = &se_global->target_core_hbagroup;
target_cg->default_groups[1] = NULL;
/*
* Create ALUA infrastructure under /sys/kernel/config/target/core/alua/
*/
hba_cg = &se_global->target_core_hbagroup;
hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
if (!(hba_cg->default_groups)) {
printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n");
goto out_global;
}
config_group_init_type_name(&se_global->alua_group,
"alua", &target_core_alua_cit);
hba_cg->default_groups[0] = &se_global->alua_group;
hba_cg->default_groups[1] = NULL;
/*
* Add ALUA Logical Unit Group and Target Port Group ConfigFS
* groups under /sys/kernel/config/target/core/alua/
*/
alua_cg = &se_global->alua_group;
alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
if (!(alua_cg->default_groups)) {
printk(KERN_ERR "Unable to allocate alua_cg->default_groups\n");
goto out_global;
}
config_group_init_type_name(&se_global->alua_lu_gps_group,
"lu_gps", &target_core_alua_lu_gps_cit);
alua_cg->default_groups[0] = &se_global->alua_lu_gps_group;
alua_cg->default_groups[1] = NULL;
/*
* Add core/alua/lu_gps/default_lu_gp
*/
lu_gp = core_alua_allocate_lu_gp("default_lu_gp", 1);
if (IS_ERR(lu_gp))
goto out_global;
lu_gp_cg = &se_global->alua_lu_gps_group;
lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
GFP_KERNEL);
if (!(lu_gp_cg->default_groups)) {
printk(KERN_ERR "Unable to allocate lu_gp_cg->default_groups\n");
goto out_global;
}
config_group_init_type_name(&lu_gp->lu_gp_group, "default_lu_gp",
&target_core_alua_lu_gp_cit);
lu_gp_cg->default_groups[0] = &lu_gp->lu_gp_group;
lu_gp_cg->default_groups[1] = NULL;
se_global->default_lu_gp = lu_gp;
/*
* Register the target_core_mod subsystem with configfs.
*/
ret = configfs_register_subsystem(subsys);
if (ret < 0) {
printk(KERN_ERR "Error %d while registering subsystem %s\n",
ret, subsys->su_group.cg_item.ci_namebuf);
goto out_global;
}
printk(KERN_INFO "TARGET_CORE[0]: Initialized ConfigFS Fabric"
" Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s"
" on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
/*
* Register built-in RAMDISK subsystem logic for virtual LUN 0
*/
ret = rd_module_init();
if (ret < 0)
goto out;
if (core_dev_setup_virtual_lun0() < 0)
goto out;
scsi_target_proc = proc_mkdir("scsi_target", 0);
if (!(scsi_target_proc)) {
printk(KERN_ERR "proc_mkdir(scsi_target, 0) failed\n");
goto out;
}
ret = init_scsi_target_mib();
if (ret < 0)
goto out;
return 0;
out:
configfs_unregister_subsystem(subsys);
if (scsi_target_proc)
remove_proc_entry("scsi_target", 0);
core_dev_release_virtual_lun0();
rd_module_exit();
out_global:
if (se_global->default_lu_gp) {
core_alua_free_lu_gp(se_global->default_lu_gp);
se_global->default_lu_gp = NULL;
}
if (lu_gp_cg)
kfree(lu_gp_cg->default_groups);
if (alua_cg)
kfree(alua_cg->default_groups);
if (hba_cg)
kfree(hba_cg->default_groups);
kfree(target_cg->default_groups);
release_se_global();
return -1;
}
static void target_core_exit_configfs(void)
{
struct configfs_subsystem *subsys;
struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
struct config_item *item;
int i;
se_global->in_shutdown = 1;
subsys = target_core_subsystem[0];
lu_gp_cg = &se_global->alua_lu_gps_group;
for (i = 0; lu_gp_cg->default_groups[i]; i++) {
item = &lu_gp_cg->default_groups[i]->cg_item;
lu_gp_cg->default_groups[i] = NULL;
config_item_put(item);
}
kfree(lu_gp_cg->default_groups);
core_alua_free_lu_gp(se_global->default_lu_gp);
se_global->default_lu_gp = NULL;
alua_cg = &se_global->alua_group;
for (i = 0; alua_cg->default_groups[i]; i++) {
item = &alua_cg->default_groups[i]->cg_item;
alua_cg->default_groups[i] = NULL;
config_item_put(item);
}
kfree(alua_cg->default_groups);
hba_cg = &se_global->target_core_hbagroup;
for (i = 0; hba_cg->default_groups[i]; i++) {
item = &hba_cg->default_groups[i]->cg_item;
hba_cg->default_groups[i] = NULL;
config_item_put(item);
}
kfree(hba_cg->default_groups);
for (i = 0; subsys->su_group.default_groups[i]; i++) {
item = &subsys->su_group.default_groups[i]->cg_item;
subsys->su_group.default_groups[i] = NULL;
config_item_put(item);
}
kfree(subsys->su_group.default_groups);
configfs_unregister_subsystem(subsys);
printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric"
" Infrastructure\n");
remove_scsi_target_mib();
remove_proc_entry("scsi_target", 0);
core_dev_release_virtual_lun0();
rd_module_exit();
release_se_global();
return;
}
MODULE_DESCRIPTION("Target_Core_Mod/ConfigFS");
MODULE_AUTHOR("nab@Linux-iSCSI.org");
MODULE_LICENSE("GPL");
module_init(target_core_init_configfs);
module_exit(target_core_exit_configfs);
/*******************************************************************************
* Filename: target_core_device.c (based on iscsi_target_device.c)
*
* This file contains the iSCSI Virtual Device and Disk Transport
* agnostic related functions.
*
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/net.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
#include <linux/kthread.h>
#include <linux/in.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include "target_core_alua.h"
#include "target_core_hba.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
static void se_dev_start(struct se_device *dev);
static void se_dev_stop(struct se_device *dev);
int transport_get_lun_for_cmd(
struct se_cmd *se_cmd,
unsigned char *cdb,
u32 unpacked_lun)
{
struct se_dev_entry *deve;
struct se_lun *se_lun = NULL;
struct se_session *se_sess = SE_SESS(se_cmd);
unsigned long flags;
int read_only = 0;
spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
deve = se_cmd->se_deve =
&SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
if (se_cmd) {
deve->total_cmds++;
deve->total_bytes += se_cmd->data_length;
if (se_cmd->data_direction == DMA_TO_DEVICE) {
if (deve->lun_flags &
TRANSPORT_LUNFLAGS_READ_ONLY) {
read_only = 1;
goto out;
}
deve->write_bytes += se_cmd->data_length;
} else if (se_cmd->data_direction ==
DMA_FROM_DEVICE) {
deve->read_bytes += se_cmd->data_length;
}
}
deve->deve_cmds++;
se_lun = se_cmd->se_lun = deve->se_lun;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
out:
spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
if (!se_lun) {
if (read_only) {
se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
" Access for 0x%08x\n",
CMD_TFO(se_cmd)->get_fabric_name(),
unpacked_lun);
return -1;
} else {
/*
* Use the se_portal_group->tpg_virt_lun0 to allow for
* REPORT_LUNS, et al to be returned when no active
* MappedLUN=0 exists for this Initiator Port.
*/
if (unpacked_lun != 0) {
se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
" Access for 0x%08x\n",
CMD_TFO(se_cmd)->get_fabric_name(),
unpacked_lun);
return -1;
}
/*
* Force WRITE PROTECT for virtual LUN 0
*/
if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
(se_cmd->data_direction != DMA_NONE)) {
se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
return -1;
}
#if 0
printk("TARGET_CORE[%s]: Using virtual LUN0! :-)\n",
CMD_TFO(se_cmd)->get_fabric_name());
#endif
se_lun = se_cmd->se_lun = &se_sess->se_tpg->tpg_virt_lun0;
se_cmd->orig_fe_lun = 0;
se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
}
}
/*
* Determine if the struct se_lun is online.
*/
/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
return -1;
}
{
struct se_device *dev = se_lun->lun_se_dev;
spin_lock(&dev->stats_lock);
dev->num_cmds++;
if (se_cmd->data_direction == DMA_TO_DEVICE)
dev->write_bytes += se_cmd->data_length;
else if (se_cmd->data_direction == DMA_FROM_DEVICE)
dev->read_bytes += se_cmd->data_length;
spin_unlock(&dev->stats_lock);
}
/*
* Add the iscsi_cmd_t to the struct se_lun's cmd list. This list is used
* for tracking state of struct se_cmds during LUN shutdown events.
*/
spin_lock_irqsave(&se_lun->lun_cmd_lock, flags);
list_add_tail(&se_cmd->se_lun_list, &se_lun->lun_cmd_list);
atomic_set(&T_TASK(se_cmd)->transport_lun_active, 1);
#if 0
printk(KERN_INFO "Adding ITT: 0x%08x to LUN LIST[%d]\n",
CMD_TFO(se_cmd)->get_task_tag(se_cmd), se_lun->unpacked_lun);
#endif
spin_unlock_irqrestore(&se_lun->lun_cmd_lock, flags);
return 0;
}
EXPORT_SYMBOL(transport_get_lun_for_cmd);
int transport_get_lun_for_tmr(
struct se_cmd *se_cmd,
u32 unpacked_lun)
{
struct se_device *dev = NULL;
struct se_dev_entry *deve;
struct se_lun *se_lun = NULL;
struct se_session *se_sess = SE_SESS(se_cmd);
struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
deve = se_cmd->se_deve =
&SE_NODE_ACL(se_sess)->device_list[unpacked_lun];
if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
se_lun = se_cmd->se_lun = se_tmr->tmr_lun = deve->se_lun;
dev = se_tmr->tmr_dev = se_lun->lun_se_dev;
se_cmd->pr_res_key = deve->pr_res_key;
se_cmd->orig_fe_lun = unpacked_lun;
se_cmd->se_orig_obj_ptr = SE_LUN(se_cmd)->lun_se_dev;
/* se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; */
}
spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
if (!se_lun) {
printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
" Access for 0x%08x\n",
CMD_TFO(se_cmd)->get_fabric_name(),
unpacked_lun);
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
return -1;
}
/*
* Determine if the struct se_lun is online.
*/
/* #warning FIXME: Check for LUN_RESET + UNIT Attention */
if (se_dev_check_online(se_lun->lun_se_dev) != 0) {
se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
return -1;
}
spin_lock(&dev->se_tmr_lock);
list_add_tail(&se_tmr->tmr_list, &dev->dev_tmr_list);
spin_unlock(&dev->se_tmr_lock);
return 0;
}
EXPORT_SYMBOL(transport_get_lun_for_tmr);
/*
* This function is called from core_scsi3_emulate_pro_register_and_move()
* and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_ref_count
* when a matching rtpi is found.
*/
struct se_dev_entry *core_get_se_deve_from_rtpi(
struct se_node_acl *nacl,
u16 rtpi)
{
struct se_dev_entry *deve;
struct se_lun *lun;
struct se_port *port;
struct se_portal_group *tpg = nacl->se_tpg;
u32 i;
spin_lock_irq(&nacl->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
deve = &nacl->device_list[i];
if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
continue;
lun = deve->se_lun;
if (!(lun)) {
printk(KERN_ERR "%s device entries device pointer is"
" NULL, but Initiator has access.\n",
TPG_TFO(tpg)->get_fabric_name());
continue;
}
port = lun->lun_sep;
if (!(port)) {
printk(KERN_ERR "%s device entries device pointer is"
" NULL, but Initiator has access.\n",
TPG_TFO(tpg)->get_fabric_name());
continue;
}
if (port->sep_rtpi != rtpi)
continue;
atomic_inc(&deve->pr_ref_count);
smp_mb__after_atomic_inc();
spin_unlock_irq(&nacl->device_list_lock);
return deve;
}
spin_unlock_irq(&nacl->device_list_lock);
return NULL;
}
int core_free_device_list_for_node(
struct se_node_acl *nacl,
struct se_portal_group *tpg)
{
struct se_dev_entry *deve;
struct se_lun *lun;
u32 i;
if (!nacl->device_list)
return 0;
spin_lock_irq(&nacl->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
deve = &nacl->device_list[i];
if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
continue;
if (!deve->se_lun) {
printk(KERN_ERR "%s device entries device pointer is"
" NULL, but Initiator has access.\n",
TPG_TFO(tpg)->get_fabric_name());
continue;
}
lun = deve->se_lun;
spin_unlock_irq(&nacl->device_list_lock);
core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
spin_lock_irq(&nacl->device_list_lock);
}
spin_unlock_irq(&nacl->device_list_lock);
kfree(nacl->device_list);
nacl->device_list = NULL;
return 0;
}
void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
{
struct se_dev_entry *deve;
spin_lock_irq(&se_nacl->device_list_lock);
deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
deve->deve_cmds--;
spin_unlock_irq(&se_nacl->device_list_lock);
return;
}
void core_update_device_list_access(
u32 mapped_lun,
u32 lun_access,
struct se_node_acl *nacl)
{
struct se_dev_entry *deve;
spin_lock_irq(&nacl->device_list_lock);
deve = &nacl->device_list[mapped_lun];
if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
} else {
deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
}
spin_unlock_irq(&nacl->device_list_lock);
return;
}
/* core_update_device_list_for_node():
*
*
*/
int core_update_device_list_for_node(
struct se_lun *lun,
struct se_lun_acl *lun_acl,
u32 mapped_lun,
u32 lun_access,
struct se_node_acl *nacl,
struct se_portal_group *tpg,
int enable)
{
struct se_port *port = lun->lun_sep;
struct se_dev_entry *deve = &nacl->device_list[mapped_lun];
int trans = 0;
/*
* If the MappedLUN entry is being disabled, the entry in
* port->sep_alua_list must be removed now before clearing the
* struct se_dev_entry pointers below as logic in
* core_alua_do_transition_tg_pt() depends on these being present.
*/
if (!(enable)) {
/*
* deve->se_lun_acl will be NULL for demo-mode created LUNs
* that have not been explictly concerted to MappedLUNs ->
* struct se_lun_acl.
*/
if (!(deve->se_lun_acl))
return 0;
spin_lock_bh(&port->sep_alua_lock);
list_del(&deve->alua_port_list);
spin_unlock_bh(&port->sep_alua_lock);
}
spin_lock_irq(&nacl->device_list_lock);
if (enable) {
/*
* Check if the call is handling demo mode -> explict LUN ACL
* transition. This transition must be for the same struct se_lun
* + mapped_lun that was setup in demo mode..
*/
if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
if (deve->se_lun_acl != NULL) {
printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
" already set for demo mode -> explict"
" LUN ACL transition\n");
return -1;
}
if (deve->se_lun != lun) {
printk(KERN_ERR "struct se_dev_entry->se_lun does"
" match passed struct se_lun for demo mode"
" -> explict LUN ACL transition\n");
return -1;
}
deve->se_lun_acl = lun_acl;
trans = 1;
} else {
deve->se_lun = lun;
deve->se_lun_acl = lun_acl;
deve->mapped_lun = mapped_lun;
deve->lun_flags |= TRANSPORT_LUNFLAGS_INITIATOR_ACCESS;
}
if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
} else {
deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_WRITE;
deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_ONLY;
}
if (trans) {
spin_unlock_irq(&nacl->device_list_lock);
return 0;
}
deve->creation_time = get_jiffies_64();
deve->attach_count++;
spin_unlock_irq(&nacl->device_list_lock);
spin_lock_bh(&port->sep_alua_lock);
list_add_tail(&deve->alua_port_list, &port->sep_alua_list);
spin_unlock_bh(&port->sep_alua_lock);
return 0;
}
/*
* Wait for any in process SPEC_I_PT=1 or REGISTER_AND_MOVE
* PR operation to complete.
*/
spin_unlock_irq(&nacl->device_list_lock);
while (atomic_read(&deve->pr_ref_count) != 0)
cpu_relax();
spin_lock_irq(&nacl->device_list_lock);
/*
* Disable struct se_dev_entry LUN ACL mapping
*/
core_scsi3_ua_release_all(deve);
deve->se_lun = NULL;
deve->se_lun_acl = NULL;
deve->lun_flags = 0;
deve->creation_time = 0;
deve->attach_count--;
spin_unlock_irq(&nacl->device_list_lock);
core_scsi3_free_pr_reg_from_nacl(lun->lun_se_dev, nacl);
return 0;
}
/* core_clear_lun_from_tpg():
*
*
*/
void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
{
struct se_node_acl *nacl;
struct se_dev_entry *deve;
u32 i;
spin_lock_bh(&tpg->acl_node_lock);
list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
spin_unlock_bh(&tpg->acl_node_lock);
spin_lock_irq(&nacl->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
deve = &nacl->device_list[i];
if (lun != deve->se_lun)
continue;
spin_unlock_irq(&nacl->device_list_lock);
core_update_device_list_for_node(lun, NULL,
deve->mapped_lun, TRANSPORT_LUNFLAGS_NO_ACCESS,
nacl, tpg, 0);
spin_lock_irq(&nacl->device_list_lock);
}
spin_unlock_irq(&nacl->device_list_lock);
spin_lock_bh(&tpg->acl_node_lock);
}
spin_unlock_bh(&tpg->acl_node_lock);
return;
}
static struct se_port *core_alloc_port(struct se_device *dev)
{
struct se_port *port, *port_tmp;
port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
if (!(port)) {
printk(KERN_ERR "Unable to allocate struct se_port\n");
return NULL;
}
INIT_LIST_HEAD(&port->sep_alua_list);
INIT_LIST_HEAD(&port->sep_list);
atomic_set(&port->sep_tg_pt_secondary_offline, 0);
spin_lock_init(&port->sep_alua_lock);
mutex_init(&port->sep_tg_pt_md_mutex);
spin_lock(&dev->se_port_lock);
if (dev->dev_port_count == 0x0000ffff) {
printk(KERN_WARNING "Reached dev->dev_port_count =="
" 0x0000ffff\n");
spin_unlock(&dev->se_port_lock);
return NULL;
}
again:
/*
* Allocate the next RELATIVE TARGET PORT IDENTIFER for this struct se_device
* Here is the table from spc4r17 section 7.7.3.8.
*
* Table 473 -- RELATIVE TARGET PORT IDENTIFIER field
*
* Code Description
* 0h Reserved
* 1h Relative port 1, historically known as port A
* 2h Relative port 2, historically known as port B
* 3h to FFFFh Relative port 3 through 65 535
*/
port->sep_rtpi = dev->dev_rpti_counter++;
if (!(port->sep_rtpi))
goto again;
list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
/*
* Make sure RELATIVE TARGET PORT IDENTIFER is unique
* for 16-bit wrap..
*/
if (port->sep_rtpi == port_tmp->sep_rtpi)
goto again;
}
spin_unlock(&dev->se_port_lock);
return port;
}
static void core_export_port(
struct se_device *dev,
struct se_portal_group *tpg,
struct se_port *port,
struct se_lun *lun)
{
struct se_subsystem_dev *su_dev = SU_DEV(dev);
struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem = NULL;
spin_lock(&dev->se_port_lock);
spin_lock(&lun->lun_sep_lock);
port->sep_tpg = tpg;
port->sep_lun = lun;
lun->lun_sep = port;
spin_unlock(&lun->lun_sep_lock);
list_add_tail(&port->sep_list, &dev->dev_sep_list);
spin_unlock(&dev->se_port_lock);
if (T10_ALUA(su_dev)->alua_type == SPC3_ALUA_EMULATED) {
tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
printk(KERN_ERR "Unable to allocate t10_alua_tg_pt"
"_gp_member_t\n");
return;
}
spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
T10_ALUA(su_dev)->default_tg_pt_gp);
spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port"
" Group: alua/default_tg_pt_gp\n",
TRANSPORT(dev)->name, TPG_TFO(tpg)->get_fabric_name());
}
dev->dev_port_count++;
port->sep_index = port->sep_rtpi; /* RELATIVE TARGET PORT IDENTIFER */
}
/*
* Called with struct se_device->se_port_lock spinlock held.
*/
static void core_release_port(struct se_device *dev, struct se_port *port)
{
/*
* Wait for any port reference for PR ALL_TG_PT=1 operation
* to complete in __core_scsi3_alloc_registration()
*/
spin_unlock(&dev->se_port_lock);
if (atomic_read(&port->sep_tg_pt_ref_cnt))
cpu_relax();
spin_lock(&dev->se_port_lock);
core_alua_free_tg_pt_gp_mem(port);
list_del(&port->sep_list);
dev->dev_port_count--;
kfree(port);
return;
}
int core_dev_export(
struct se_device *dev,
struct se_portal_group *tpg,
struct se_lun *lun)
{
struct se_port *port;
port = core_alloc_port(dev);
if (!(port))
return -1;
lun->lun_se_dev = dev;
se_dev_start(dev);
atomic_inc(&dev->dev_export_obj.obj_access_count);
core_export_port(dev, tpg, port, lun);
return 0;
}
void core_dev_unexport(
struct se_device *dev,
struct se_portal_group *tpg,
struct se_lun *lun)
{
struct se_port *port = lun->lun_sep;
spin_lock(&lun->lun_sep_lock);
if (lun->lun_se_dev == NULL) {
spin_unlock(&lun->lun_sep_lock);
return;
}
spin_unlock(&lun->lun_sep_lock);
spin_lock(&dev->se_port_lock);
atomic_dec(&dev->dev_export_obj.obj_access_count);
core_release_port(dev, port);
spin_unlock(&dev->se_port_lock);
se_dev_stop(dev);
lun->lun_se_dev = NULL;
}
int transport_core_report_lun_response(struct se_cmd *se_cmd)
{
struct se_dev_entry *deve;
struct se_lun *se_lun;
struct se_session *se_sess = SE_SESS(se_cmd);
struct se_task *se_task;
unsigned char *buf = (unsigned char *)T_TASK(se_cmd)->t_task_buf;
u32 cdb_offset = 0, lun_count = 0, offset = 8;
u64 i, lun;
list_for_each_entry(se_task, &T_TASK(se_cmd)->t_task_list, t_list)
break;
if (!(se_task)) {
printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n");
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
/*
* If no struct se_session pointer is present, this struct se_cmd is
* coming via a target_core_mod PASSTHROUGH op, and not through
* a $FABRIC_MOD. In that case, report LUN=0 only.
*/
if (!(se_sess)) {
lun = 0;
buf[offset++] = ((lun >> 56) & 0xff);
buf[offset++] = ((lun >> 48) & 0xff);
buf[offset++] = ((lun >> 40) & 0xff);
buf[offset++] = ((lun >> 32) & 0xff);
buf[offset++] = ((lun >> 24) & 0xff);
buf[offset++] = ((lun >> 16) & 0xff);
buf[offset++] = ((lun >> 8) & 0xff);
buf[offset++] = (lun & 0xff);
lun_count = 1;
goto done;
}
spin_lock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
deve = &SE_NODE_ACL(se_sess)->device_list[i];
if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
continue;
se_lun = deve->se_lun;
/*
* We determine the correct LUN LIST LENGTH even once we
* have reached the initial allocation length.
* See SPC2-R20 7.19.
*/
lun_count++;
if ((cdb_offset + 8) >= se_cmd->data_length)
continue;
lun = cpu_to_be64(CMD_TFO(se_cmd)->pack_lun(deve->mapped_lun));
buf[offset++] = ((lun >> 56) & 0xff);
buf[offset++] = ((lun >> 48) & 0xff);
buf[offset++] = ((lun >> 40) & 0xff);
buf[offset++] = ((lun >> 32) & 0xff);
buf[offset++] = ((lun >> 24) & 0xff);
buf[offset++] = ((lun >> 16) & 0xff);
buf[offset++] = ((lun >> 8) & 0xff);
buf[offset++] = (lun & 0xff);
cdb_offset += 8;
}
spin_unlock_irq(&SE_NODE_ACL(se_sess)->device_list_lock);
/*
* See SPC3 r07, page 159.
*/
done:
lun_count *= 8;
buf[0] = ((lun_count >> 24) & 0xff);
buf[1] = ((lun_count >> 16) & 0xff);
buf[2] = ((lun_count >> 8) & 0xff);
buf[3] = (lun_count & 0xff);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
}
/* se_release_device_for_hba():
*
*
*/
void se_release_device_for_hba(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
if ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
(dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) ||
(dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) ||
(dev->dev_status & TRANSPORT_DEVICE_OFFLINE_ACTIVATED) ||
(dev->dev_status & TRANSPORT_DEVICE_OFFLINE_DEACTIVATED))
se_dev_stop(dev);
if (dev->dev_ptr) {
kthread_stop(dev->process_thread);
if (dev->transport->free_device)
dev->transport->free_device(dev->dev_ptr);
}
spin_lock(&hba->device_lock);
list_del(&dev->dev_list);
hba->dev_count--;
spin_unlock(&hba->device_lock);
core_scsi3_free_all_registrations(dev);
se_release_vpd_for_dev(dev);
kfree(dev->dev_status_queue_obj);
kfree(dev->dev_queue_obj);
kfree(dev);
return;
}
void se_release_vpd_for_dev(struct se_device *dev)
{
struct t10_vpd *vpd, *vpd_tmp;
spin_lock(&DEV_T10_WWN(dev)->t10_vpd_lock);
list_for_each_entry_safe(vpd, vpd_tmp,
&DEV_T10_WWN(dev)->t10_vpd_list, vpd_list) {
list_del(&vpd->vpd_list);
kfree(vpd);
}
spin_unlock(&DEV_T10_WWN(dev)->t10_vpd_lock);
return;
}
/*
* Called with struct se_hba->device_lock held.
*/
void se_clear_dev_ports(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
struct se_lun *lun;
struct se_portal_group *tpg;
struct se_port *sep, *sep_tmp;
spin_lock(&dev->se_port_lock);
list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
spin_unlock(&dev->se_port_lock);
spin_unlock(&hba->device_lock);
lun = sep->sep_lun;
tpg = sep->sep_tpg;
spin_lock(&lun->lun_sep_lock);
if (lun->lun_se_dev == NULL) {
spin_unlock(&lun->lun_sep_lock);
continue;
}
spin_unlock(&lun->lun_sep_lock);
core_dev_del_lun(tpg, lun->unpacked_lun);
spin_lock(&hba->device_lock);
spin_lock(&dev->se_port_lock);
}
spin_unlock(&dev->se_port_lock);
return;
}
/* se_free_virtual_device():
*
* Used for IBLOCK, RAMDISK, and FILEIO Transport Drivers.
*/
int se_free_virtual_device(struct se_device *dev, struct se_hba *hba)
{
spin_lock(&hba->device_lock);
se_clear_dev_ports(dev);
spin_unlock(&hba->device_lock);
core_alua_free_lu_gp_mem(dev);
se_release_device_for_hba(dev);
return 0;
}
static void se_dev_start(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
spin_lock(&hba->device_lock);
atomic_inc(&dev->dev_obj.obj_access_count);
if (atomic_read(&dev->dev_obj.obj_access_count) == 1) {
if (dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED) {
dev->dev_status &= ~TRANSPORT_DEVICE_DEACTIVATED;
dev->dev_status |= TRANSPORT_DEVICE_ACTIVATED;
} else if (dev->dev_status &
TRANSPORT_DEVICE_OFFLINE_DEACTIVATED) {
dev->dev_status &=
~TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
}
}
spin_unlock(&hba->device_lock);
}
static void se_dev_stop(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
spin_lock(&hba->device_lock);
atomic_dec(&dev->dev_obj.obj_access_count);
if (atomic_read(&dev->dev_obj.obj_access_count) == 0) {
if (dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) {
dev->dev_status &= ~TRANSPORT_DEVICE_ACTIVATED;
dev->dev_status |= TRANSPORT_DEVICE_DEACTIVATED;
} else if (dev->dev_status &
TRANSPORT_DEVICE_OFFLINE_ACTIVATED) {
dev->dev_status &= ~TRANSPORT_DEVICE_OFFLINE_ACTIVATED;
dev->dev_status |= TRANSPORT_DEVICE_OFFLINE_DEACTIVATED;
}
}
spin_unlock(&hba->device_lock);
while (atomic_read(&hba->dev_mib_access_count))
cpu_relax();
}
int se_dev_check_online(struct se_device *dev)
{
int ret;
spin_lock_irq(&dev->dev_status_lock);
ret = ((dev->dev_status & TRANSPORT_DEVICE_ACTIVATED) ||
(dev->dev_status & TRANSPORT_DEVICE_DEACTIVATED)) ? 0 : 1;
spin_unlock_irq(&dev->dev_status_lock);
return ret;
}
int se_dev_check_shutdown(struct se_device *dev)
{
int ret;
spin_lock_irq(&dev->dev_status_lock);
ret = (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN);
spin_unlock_irq(&dev->dev_status_lock);
return ret;
}
void se_dev_set_default_attribs(
struct se_device *dev,
struct se_dev_limits *dev_limits)
{
struct queue_limits *limits = &dev_limits->limits;
DEV_ATTRIB(dev)->emulate_dpo = DA_EMULATE_DPO;
DEV_ATTRIB(dev)->emulate_fua_write = DA_EMULATE_FUA_WRITE;
DEV_ATTRIB(dev)->emulate_fua_read = DA_EMULATE_FUA_READ;
DEV_ATTRIB(dev)->emulate_write_cache = DA_EMULATE_WRITE_CACHE;
DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = DA_EMULATE_UA_INTLLCK_CTRL;
DEV_ATTRIB(dev)->emulate_tas = DA_EMULATE_TAS;
DEV_ATTRIB(dev)->emulate_tpu = DA_EMULATE_TPU;
DEV_ATTRIB(dev)->emulate_tpws = DA_EMULATE_TPWS;
DEV_ATTRIB(dev)->emulate_reservations = DA_EMULATE_RESERVATIONS;
DEV_ATTRIB(dev)->emulate_alua = DA_EMULATE_ALUA;
DEV_ATTRIB(dev)->enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
/*
* The TPU=1 and TPWS=1 settings will be set in TCM/IBLOCK
* iblock_create_virtdevice() from struct queue_limits values
* if blk_queue_discard()==1
*/
DEV_ATTRIB(dev)->max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
DEV_ATTRIB(dev)->max_unmap_block_desc_count =
DA_MAX_UNMAP_BLOCK_DESC_COUNT;
DEV_ATTRIB(dev)->unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
DEV_ATTRIB(dev)->unmap_granularity_alignment =
DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
/*
* block_size is based on subsystem plugin dependent requirements.
*/
DEV_ATTRIB(dev)->hw_block_size = limits->logical_block_size;
DEV_ATTRIB(dev)->block_size = limits->logical_block_size;
/*
* max_sectors is based on subsystem plugin dependent requirements.
*/
DEV_ATTRIB(dev)->hw_max_sectors = limits->max_hw_sectors;
DEV_ATTRIB(dev)->max_sectors = limits->max_sectors;
/*
* Set optimal_sectors from max_sectors, which can be lowered via
* configfs.
*/
DEV_ATTRIB(dev)->optimal_sectors = limits->max_sectors;
/*
* queue_depth is based on subsystem plugin dependent requirements.
*/
DEV_ATTRIB(dev)->hw_queue_depth = dev_limits->hw_queue_depth;
DEV_ATTRIB(dev)->queue_depth = dev_limits->queue_depth;
}
int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
{
if (task_timeout > DA_TASK_TIMEOUT_MAX) {
printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then"
" DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
return -1;
} else {
DEV_ATTRIB(dev)->task_timeout = task_timeout;
printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n",
dev, task_timeout);
}
return 0;
}
int se_dev_set_max_unmap_lba_count(
struct se_device *dev,
u32 max_unmap_lba_count)
{
DEV_ATTRIB(dev)->max_unmap_lba_count = max_unmap_lba_count;
printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n",
dev, DEV_ATTRIB(dev)->max_unmap_lba_count);
return 0;
}
int se_dev_set_max_unmap_block_desc_count(
struct se_device *dev,
u32 max_unmap_block_desc_count)
{
DEV_ATTRIB(dev)->max_unmap_block_desc_count = max_unmap_block_desc_count;
printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n",
dev, DEV_ATTRIB(dev)->max_unmap_block_desc_count);
return 0;
}
int se_dev_set_unmap_granularity(
struct se_device *dev,
u32 unmap_granularity)
{
DEV_ATTRIB(dev)->unmap_granularity = unmap_granularity;
printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n",
dev, DEV_ATTRIB(dev)->unmap_granularity);
return 0;
}
int se_dev_set_unmap_granularity_alignment(
struct se_device *dev,
u32 unmap_granularity_alignment)
{
DEV_ATTRIB(dev)->unmap_granularity_alignment = unmap_granularity_alignment;
printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n",
dev, DEV_ATTRIB(dev)->unmap_granularity_alignment);
return 0;
}
int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
printk(KERN_ERR "Illegal value %d\n", flag);
return -1;
}
if (TRANSPORT(dev)->dpo_emulated == NULL) {
printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated is NULL\n");
return -1;
}
if (TRANSPORT(dev)->dpo_emulated(dev) == 0) {
printk(KERN_ERR "TRANSPORT(dev)->dpo_emulated not supported\n");
return -1;
}
DEV_ATTRIB(dev)->emulate_dpo = flag;
printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation"
" bit: %d\n", dev, DEV_ATTRIB(dev)->emulate_dpo);
return 0;
}
int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
printk(KERN_ERR "Illegal value %d\n", flag);
return -1;
}
if (TRANSPORT(dev)->fua_write_emulated == NULL) {
printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated is NULL\n");
return -1;
}
if (TRANSPORT(dev)->fua_write_emulated(dev) == 0) {
printk(KERN_ERR "TRANSPORT(dev)->fua_write_emulated not supported\n");
return -1;
}
DEV_ATTRIB(dev)->emulate_fua_write = flag;
printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
dev, DEV_ATTRIB(dev)->emulate_fua_write);
return 0;
}
int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
printk(KERN_ERR "Illegal value %d\n", flag);
return -1;
}
if (TRANSPORT(dev)->fua_read_emulated == NULL) {
printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated is NULL\n");
return -1;
}
if (TRANSPORT(dev)->fua_read_emulated(dev) == 0) {
printk(KERN_ERR "TRANSPORT(dev)->fua_read_emulated not supported\n");
return -1;
}
DEV_ATTRIB(dev)->emulate_fua_read = flag;
printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n",
dev, DEV_ATTRIB(dev)->emulate_fua_read);
return 0;
}
int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
printk(KERN_ERR "Illegal value %d\n", flag);
return -1;
}
if (TRANSPORT(dev)->write_cache_emulated == NULL) {
printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated is NULL\n");
return -1;
}
if (TRANSPORT(dev)->write_cache_emulated(dev) == 0) {
printk(KERN_ERR "TRANSPORT(dev)->write_cache_emulated not supported\n");
return -1;
}
DEV_ATTRIB(dev)->emulate_write_cache = flag;
printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
dev, DEV_ATTRIB(dev)->emulate_write_cache);
return 0;
}
int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1) && (flag != 2)) {
printk(KERN_ERR "Illegal value %d\n", flag);
return -1;
}
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
printk(KERN_ERR "dev[%p]: Unable to change SE Device"
" UA_INTRLCK_CTRL while dev_export_obj: %d count"
" exists\n", dev,
atomic_read(&dev->dev_export_obj.obj_access_count));
return -1;
}
DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl = flag;
printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
dev, DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl);
return 0;
}
int se_dev_set_emulate_tas(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
printk(KERN_ERR "Illegal value %d\n", flag);
return -1;
}
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while"
" dev_export_obj: %d count exists\n", dev,
atomic_read(&dev->dev_export_obj.obj_access_count));
return -1;
}
DEV_ATTRIB(dev)->emulate_tas = flag;
printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
dev, (DEV_ATTRIB(dev)->emulate_tas) ? "Enabled" : "Disabled");
return 0;
}
int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
printk(KERN_ERR "Illegal value %d\n", flag);
return -1;
}
/*
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
printk(KERN_ERR "Generic Block Discard not supported\n");
return -ENOSYS;
}
DEV_ATTRIB(dev)->emulate_tpu = flag;
printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
dev, flag);
return 0;
}
int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
printk(KERN_ERR "Illegal value %d\n", flag);
return -1;
}
/*
* We expect this value to be non-zero when generic Block Layer
* Discard supported is detected iblock_create_virtdevice().
*/
if (!(DEV_ATTRIB(dev)->max_unmap_block_desc_count)) {
printk(KERN_ERR "Generic Block Discard not supported\n");
return -ENOSYS;
}
DEV_ATTRIB(dev)->emulate_tpws = flag;
printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
dev, flag);
return 0;
}
int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
{
if ((flag != 0) && (flag != 1)) {
printk(KERN_ERR "Illegal value %d\n", flag);
return -1;
}
DEV_ATTRIB(dev)->enforce_pr_isids = flag;
printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
(DEV_ATTRIB(dev)->enforce_pr_isids) ? "Enabled" : "Disabled");
return 0;
}
/*
* Note, this can only be called on unexported SE Device Object.
*/
int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
{
u32 orig_queue_depth = dev->queue_depth;
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while"
" dev_export_obj: %d count exists\n", dev,
atomic_read(&dev->dev_export_obj.obj_access_count));
return -1;
}
if (!(queue_depth)) {
printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue"
"_depth\n", dev);
return -1;
}
if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
printk(KERN_ERR "dev[%p]: Passed queue_depth: %u"
" exceeds TCM/SE_Device TCQ: %u\n",
dev, queue_depth,
DEV_ATTRIB(dev)->hw_queue_depth);
return -1;
}
} else {
if (queue_depth > DEV_ATTRIB(dev)->queue_depth) {
if (queue_depth > DEV_ATTRIB(dev)->hw_queue_depth) {
printk(KERN_ERR "dev[%p]: Passed queue_depth:"
" %u exceeds TCM/SE_Device MAX"
" TCQ: %u\n", dev, queue_depth,
DEV_ATTRIB(dev)->hw_queue_depth);
return -1;
}
}
}
DEV_ATTRIB(dev)->queue_depth = dev->queue_depth = queue_depth;
if (queue_depth > orig_queue_depth)
atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
else if (queue_depth < orig_queue_depth)
atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n",
dev, queue_depth);
return 0;
}
int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
{
int force = 0; /* Force setting for VDEVS */
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
printk(KERN_ERR "dev[%p]: Unable to change SE Device"
" max_sectors while dev_export_obj: %d count exists\n",
dev, atomic_read(&dev->dev_export_obj.obj_access_count));
return -1;
}
if (!(max_sectors)) {
printk(KERN_ERR "dev[%p]: Illegal ZERO value for"
" max_sectors\n", dev);
return -1;
}
if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than"
" DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
DA_STATUS_MAX_SECTORS_MIN);
return -1;
}
if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
if (max_sectors > DEV_ATTRIB(dev)->hw_max_sectors) {
printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
" greater than TCM/SE_Device max_sectors:"
" %u\n", dev, max_sectors,
DEV_ATTRIB(dev)->hw_max_sectors);
return -1;
}
} else {
if (!(force) && (max_sectors >
DEV_ATTRIB(dev)->hw_max_sectors)) {
printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
" greater than TCM/SE_Device max_sectors"
": %u, use force=1 to override.\n", dev,
max_sectors, DEV_ATTRIB(dev)->hw_max_sectors);
return -1;
}
if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
" greater than DA_STATUS_MAX_SECTORS_MAX:"
" %u\n", dev, max_sectors,
DA_STATUS_MAX_SECTORS_MAX);
return -1;
}
}
DEV_ATTRIB(dev)->max_sectors = max_sectors;
printk("dev[%p]: SE Device max_sectors changed to %u\n",
dev, max_sectors);
return 0;
}
int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
{
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
printk(KERN_ERR "dev[%p]: Unable to change SE Device"
" optimal_sectors while dev_export_obj: %d count exists\n",
dev, atomic_read(&dev->dev_export_obj.obj_access_count));
return -EINVAL;
}
if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be"
" changed for TCM/pSCSI\n", dev);
return -EINVAL;
}
if (optimal_sectors > DEV_ATTRIB(dev)->max_sectors) {
printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be"
" greater than max_sectors: %u\n", dev,
optimal_sectors, DEV_ATTRIB(dev)->max_sectors);
return -EINVAL;
}
DEV_ATTRIB(dev)->optimal_sectors = optimal_sectors;
printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n",
dev, optimal_sectors);
return 0;
}
int se_dev_set_block_size(struct se_device *dev, u32 block_size)
{
if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size"
" while dev_export_obj: %d count exists\n", dev,
atomic_read(&dev->dev_export_obj.obj_access_count));
return -1;
}
if ((block_size != 512) &&
(block_size != 1024) &&
(block_size != 2048) &&
(block_size != 4096)) {
printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u"
" for SE device, must be 512, 1024, 2048 or 4096\n",
dev, block_size);
return -1;
}
if (TRANSPORT(dev)->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
printk(KERN_ERR "dev[%p]: Not allowed to change block_size for"
" Physical Device, use for Linux/SCSI to change"
" block_size for underlying hardware\n", dev);
return -1;
}
DEV_ATTRIB(dev)->block_size = block_size;
printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n",
dev, block_size);
return 0;
}
struct se_lun *core_dev_add_lun(
struct se_portal_group *tpg,
struct se_hba *hba,
struct se_device *dev,
u32 lun)
{
struct se_lun *lun_p;
u32 lun_access = 0;
if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n",
atomic_read(&dev->dev_access_obj.obj_access_count));
return NULL;
}
lun_p = core_tpg_pre_addlun(tpg, lun);
if ((IS_ERR(lun_p)) || !(lun_p))
return NULL;
if (dev->dev_flags & DF_READ_ONLY)
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
else
lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
return NULL;
printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
" CORE HBA: %u\n", TPG_TFO(tpg)->get_fabric_name(),
TPG_TFO(tpg)->tpg_get_tag(tpg), lun_p->unpacked_lun,
TPG_TFO(tpg)->get_fabric_name(), hba->hba_id);
/*
* Update LUN maps for dynamically added initiators when
* generate_node_acl is enabled.
*/
if (TPG_TFO(tpg)->tpg_check_demo_mode(tpg)) {
struct se_node_acl *acl;
spin_lock_bh(&tpg->acl_node_lock);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
if (acl->dynamic_node_acl) {
spin_unlock_bh(&tpg->acl_node_lock);
core_tpg_add_node_to_devs(acl, tpg);
spin_lock_bh(&tpg->acl_node_lock);
}
}
spin_unlock_bh(&tpg->acl_node_lock);
}
return lun_p;
}
/* core_dev_del_lun():
*
*
*/
int core_dev_del_lun(
struct se_portal_group *tpg,
u32 unpacked_lun)
{
struct se_lun *lun;
int ret = 0;
lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
if (!(lun))
return ret;
core_tpg_post_dellun(tpg, lun);
printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
" device object\n", TPG_TFO(tpg)->get_fabric_name(),
TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun,
TPG_TFO(tpg)->get_fabric_name());
return 0;
}
struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
{
struct se_lun *lun;
spin_lock(&tpg->tpg_lun_lock);
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
"_PER_TPG-1: %u for Target Portal Group: %hu\n",
TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
TRANSPORT_MAX_LUNS_PER_TPG-1,
TPG_TFO(tpg)->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return NULL;
}
lun = &tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
printk(KERN_ERR "%s Logical Unit Number: %u is not free on"
" Target Portal Group: %hu, ignoring request.\n",
TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
TPG_TFO(tpg)->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return NULL;
}
spin_unlock(&tpg->tpg_lun_lock);
return lun;
}
/* core_dev_get_lun():
*
*
*/
static struct se_lun *core_dev_get_lun(struct se_portal_group *tpg, u32 unpacked_lun)
{
struct se_lun *lun;
spin_lock(&tpg->tpg_lun_lock);
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
"_TPG-1: %u for Target Portal Group: %hu\n",
TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
TRANSPORT_MAX_LUNS_PER_TPG-1,
TPG_TFO(tpg)->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return NULL;
}
lun = &tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
" Target Portal Group: %hu, ignoring request.\n",
TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
TPG_TFO(tpg)->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return NULL;
}
spin_unlock(&tpg->tpg_lun_lock);
return lun;
}
struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
struct se_portal_group *tpg,
u32 mapped_lun,
char *initiatorname,
int *ret)
{
struct se_lun_acl *lacl;
struct se_node_acl *nacl;
if (strlen(initiatorname) > TRANSPORT_IQN_LEN) {
printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
TPG_TFO(tpg)->get_fabric_name());
*ret = -EOVERFLOW;
return NULL;
}
nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
if (!(nacl)) {
*ret = -EINVAL;
return NULL;
}
lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
if (!(lacl)) {
printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n");
*ret = -ENOMEM;
return NULL;
}
INIT_LIST_HEAD(&lacl->lacl_list);
lacl->mapped_lun = mapped_lun;
lacl->se_lun_nacl = nacl;
snprintf(lacl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
return lacl;
}
int core_dev_add_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_lun_acl *lacl,
u32 unpacked_lun,
u32 lun_access)
{
struct se_lun *lun;
struct se_node_acl *nacl;
lun = core_dev_get_lun(tpg, unpacked_lun);
if (!(lun)) {
printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
" Target Portal Group: %hu, ignoring request.\n",
TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
TPG_TFO(tpg)->tpg_get_tag(tpg));
return -EINVAL;
}
nacl = lacl->se_lun_nacl;
if (!(nacl))
return -EINVAL;
if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE))
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
lacl->se_lun = lun;
if (core_update_device_list_for_node(lun, lacl, lacl->mapped_lun,
lun_access, nacl, tpg, 1) < 0)
return -EINVAL;
spin_lock(&lun->lun_acl_lock);
list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
atomic_inc(&lun->lun_acl_count);
smp_mb__after_atomic_inc();
spin_unlock(&lun->lun_acl_lock);
printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
" InitiatorNode: %s\n", TPG_TFO(tpg)->get_fabric_name(),
TPG_TFO(tpg)->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
lacl->initiatorname);
/*
* Check to see if there are any existing persistent reservation APTPL
* pre-registrations that need to be enabled for this LUN ACL..
*/
core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl);
return 0;
}
/* core_dev_del_initiator_node_lun_acl():
*
*
*/
int core_dev_del_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_lun *lun,
struct se_lun_acl *lacl)
{
struct se_node_acl *nacl;
nacl = lacl->se_lun_nacl;
if (!(nacl))
return -EINVAL;
spin_lock(&lun->lun_acl_lock);
list_del(&lacl->lacl_list);
atomic_dec(&lun->lun_acl_count);
smp_mb__after_atomic_dec();
spin_unlock(&lun->lun_acl_lock);
core_update_device_list_for_node(lun, NULL, lacl->mapped_lun,
TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
lacl->se_lun = NULL;
printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for"
" InitiatorNode: %s Mapped LUN: %u\n",
TPG_TFO(tpg)->get_fabric_name(),
TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
lacl->initiatorname, lacl->mapped_lun);
return 0;
}
void core_dev_free_initiator_node_lun_acl(
struct se_portal_group *tpg,
struct se_lun_acl *lacl)
{
printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
" Mapped LUN: %u\n", TPG_TFO(tpg)->get_fabric_name(),
TPG_TFO(tpg)->tpg_get_tag(tpg),
TPG_TFO(tpg)->get_fabric_name(),
lacl->initiatorname, lacl->mapped_lun);
kfree(lacl);
}
int core_dev_setup_virtual_lun0(void)
{
struct se_hba *hba;
struct se_device *dev;
struct se_subsystem_dev *se_dev = NULL;
struct se_subsystem_api *t;
char buf[16];
int ret;
hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE);
if (IS_ERR(hba))
return PTR_ERR(hba);
se_global->g_lun0_hba = hba;
t = hba->transport;
se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
if (!(se_dev)) {
printk(KERN_ERR "Unable to allocate memory for"
" struct se_subsystem_dev\n");
ret = -ENOMEM;
goto out;
}
INIT_LIST_HEAD(&se_dev->g_se_dev_list);
INIT_LIST_HEAD(&se_dev->t10_wwn.t10_vpd_list);
spin_lock_init(&se_dev->t10_wwn.t10_vpd_lock);
INIT_LIST_HEAD(&se_dev->t10_reservation.registration_list);
INIT_LIST_HEAD(&se_dev->t10_reservation.aptpl_reg_list);
spin_lock_init(&se_dev->t10_reservation.registration_lock);
spin_lock_init(&se_dev->t10_reservation.aptpl_reg_lock);
INIT_LIST_HEAD(&se_dev->t10_alua.tg_pt_gps_list);
spin_lock_init(&se_dev->t10_alua.tg_pt_gps_lock);
spin_lock_init(&se_dev->se_dev_lock);
se_dev->t10_reservation.pr_aptpl_buf_len = PR_APTPL_BUF_LEN;
se_dev->t10_wwn.t10_sub_dev = se_dev;
se_dev->t10_alua.t10_sub_dev = se_dev;
se_dev->se_dev_attrib.da_sub_dev = se_dev;
se_dev->se_dev_hba = hba;
se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
if (!(se_dev->se_dev_su_ptr)) {
printk(KERN_ERR "Unable to locate subsystem dependent pointer"
" from allocate_virtdevice()\n");
ret = -ENOMEM;
goto out;
}
se_global->g_lun0_su_dev = se_dev;
memset(buf, 0, 16);
sprintf(buf, "rd_pages=8");
t->set_configfs_dev_params(hba, se_dev, buf, sizeof(buf));
dev = t->create_virtdevice(hba, se_dev, se_dev->se_dev_su_ptr);
if (!(dev) || IS_ERR(dev)) {
ret = -ENOMEM;
goto out;
}
se_dev->se_dev_ptr = dev;
se_global->g_lun0_dev = dev;
return 0;
out:
se_global->g_lun0_su_dev = NULL;
kfree(se_dev);
if (se_global->g_lun0_hba) {
core_delete_hba(se_global->g_lun0_hba);
se_global->g_lun0_hba = NULL;
}
return ret;
}
void core_dev_release_virtual_lun0(void)
{
struct se_hba *hba = se_global->g_lun0_hba;
struct se_subsystem_dev *su_dev = se_global->g_lun0_su_dev;
if (!(hba))
return;
if (se_global->g_lun0_dev)
se_free_virtual_device(se_global->g_lun0_dev, hba);
kfree(su_dev);
core_delete_hba(hba);
}
/*******************************************************************************
* Filename: target_core_fabric_configfs.c
*
* This file contains generic fabric module configfs infrastructure for
* TCM v4.x code
*
* Copyright (c) 2010 Rising Tide Systems
* Copyright (c) 2010 Linux-iSCSI.org
*
* Copyright (c) 2010 Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
****************************************************************************/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/version.h>
#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/unistd.h>
#include <linux/string.h>
#include <linux/syscalls.h>
#include <linux/configfs.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_fabric_configfs.h>
#include <target/target_core_configfs.h>
#include <target/configfs_macros.h>
#include "target_core_alua.h"
#include "target_core_hba.h"
#include "target_core_pr.h"
#define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \
static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
{ \
struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \
struct config_item_type *cit = &tfc->tfc_##_name##_cit; \
\
cit->ct_item_ops = _item_ops; \
cit->ct_group_ops = _group_ops; \
cit->ct_attrs = _attrs; \
cit->ct_owner = tf->tf_module; \
printk("Setup generic %s\n", __stringify(_name)); \
}
/* Start of tfc_tpg_mappedlun_cit */
static int target_fabric_mappedlun_link(
struct config_item *lun_acl_ci,
struct config_item *lun_ci)
{
struct se_dev_entry *deve;
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
struct se_lun_acl, se_lun_group);
struct se_portal_group *se_tpg;
struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s;
int ret = 0, lun_access;
/*
* Ensure that the source port exists
*/
if (!(lun->lun_sep) || !(lun->lun_sep->sep_tpg)) {
printk(KERN_ERR "Source se_lun->lun_sep or lun->lun_sep->sep"
"_tpg does not exist\n");
return -EINVAL;
}
se_tpg = lun->lun_sep->sep_tpg;
nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item;
tpg_ci = &nacl_ci->ci_group->cg_item;
wwn_ci = &tpg_ci->ci_group->cg_item;
tpg_ci_s = &lun_ci->ci_parent->ci_group->cg_item;
wwn_ci_s = &tpg_ci_s->ci_group->cg_item;
/*
* Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT
*/
if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) {
printk(KERN_ERR "Illegal Initiator ACL SymLink outside of %s\n",
config_item_name(wwn_ci));
return -EINVAL;
}
if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) {
printk(KERN_ERR "Illegal Initiator ACL Symlink outside of %s"
" TPGT: %s\n", config_item_name(wwn_ci),
config_item_name(tpg_ci));
return -EINVAL;
}
/*
* If this struct se_node_acl was dynamically generated with
* tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags,
* which be will write protected (READ-ONLY) when
* tpg_1/attrib/demo_mode_write_protect=1
*/
spin_lock_irq(&lacl->se_lun_nacl->device_list_lock);
deve = &lacl->se_lun_nacl->device_list[lacl->mapped_lun];
if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)
lun_access = deve->lun_flags;
else
lun_access =
(TPG_TFO(se_tpg)->tpg_check_prod_mode_write_protect(
se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
TRANSPORT_LUNFLAGS_READ_WRITE;
spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
/*
* Determine the actual mapped LUN value user wants..
*
* This value is what the SCSI Initiator actually sees the
* iscsi/$IQN/$TPGT/lun/lun_* as on their SCSI Initiator Ports.
*/
ret = core_dev_add_initiator_node_lun_acl(se_tpg, lacl,
lun->unpacked_lun, lun_access);
return (ret < 0) ? -EINVAL : 0;
}
static int target_fabric_mappedlun_unlink(
struct config_item *lun_acl_ci,
struct config_item *lun_ci)
{
struct se_lun *lun;
struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
struct se_lun_acl, se_lun_group);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve = &nacl->device_list[lacl->mapped_lun];
struct se_portal_group *se_tpg;
/*
* Determine if the underlying MappedLUN has already been released..
*/
if (!(deve->se_lun))
return 0;
lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
se_tpg = lun->lun_sep->sep_tpg;
core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl);
return 0;
}
CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl);
#define TCM_MAPPEDLUN_ATTR(_name, _mode) \
static struct target_fabric_mappedlun_attribute target_fabric_mappedlun_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
target_fabric_mappedlun_show_##_name, \
target_fabric_mappedlun_store_##_name);
static ssize_t target_fabric_mappedlun_show_write_protect(
struct se_lun_acl *lacl,
char *page)
{
struct se_node_acl *se_nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t len;
spin_lock_irq(&se_nacl->device_list_lock);
deve = &se_nacl->device_list[lacl->mapped_lun];
len = sprintf(page, "%d\n",
(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ?
1 : 0);
spin_unlock_irq(&se_nacl->device_list_lock);
return len;
}
static ssize_t target_fabric_mappedlun_store_write_protect(
struct se_lun_acl *lacl,
const char *page,
size_t count)
{
struct se_node_acl *se_nacl = lacl->se_lun_nacl;
struct se_portal_group *se_tpg = se_nacl->se_tpg;
unsigned long op;
if (strict_strtoul(page, 0, &op))
return -EINVAL;
if ((op != 1) && (op != 0))
return -EINVAL;
core_update_device_list_access(lacl->mapped_lun, (op) ?
TRANSPORT_LUNFLAGS_READ_ONLY :
TRANSPORT_LUNFLAGS_READ_WRITE,
lacl->se_lun_nacl);
printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s"
" Mapped LUN: %u Write Protect bit to %s\n",
TPG_TFO(se_tpg)->get_fabric_name(),
lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
return count;
}
TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR);
CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group);
static struct configfs_attribute *target_fabric_mappedlun_attrs[] = {
&target_fabric_mappedlun_write_protect.attr,
NULL,
};
static struct configfs_item_operations target_fabric_mappedlun_item_ops = {
.show_attribute = target_fabric_mappedlun_attr_show,
.store_attribute = target_fabric_mappedlun_attr_store,
.allow_link = target_fabric_mappedlun_link,
.drop_link = target_fabric_mappedlun_unlink,
};
TF_CIT_SETUP(tpg_mappedlun, &target_fabric_mappedlun_item_ops, NULL,
target_fabric_mappedlun_attrs);
/* End of tfc_tpg_mappedlun_cit */
/* Start of tfc_tpg_nacl_attrib_cit */
CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group);
static struct configfs_item_operations target_fabric_nacl_attrib_item_ops = {
.show_attribute = target_fabric_nacl_attrib_attr_show,
.store_attribute = target_fabric_nacl_attrib_attr_store,
};
TF_CIT_SETUP(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL, NULL);
/* End of tfc_tpg_nacl_attrib_cit */
/* Start of tfc_tpg_nacl_auth_cit */
CONFIGFS_EATTR_OPS(target_fabric_nacl_auth, se_node_acl, acl_auth_group);
static struct configfs_item_operations target_fabric_nacl_auth_item_ops = {
.show_attribute = target_fabric_nacl_auth_attr_show,
.store_attribute = target_fabric_nacl_auth_attr_store,
};
TF_CIT_SETUP(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL, NULL);
/* End of tfc_tpg_nacl_auth_cit */
/* Start of tfc_tpg_nacl_param_cit */
CONFIGFS_EATTR_OPS(target_fabric_nacl_param, se_node_acl, acl_param_group);
static struct configfs_item_operations target_fabric_nacl_param_item_ops = {
.show_attribute = target_fabric_nacl_param_attr_show,
.store_attribute = target_fabric_nacl_param_attr_store,
};
TF_CIT_SETUP(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL, NULL);
/* End of tfc_tpg_nacl_param_cit */
/* Start of tfc_tpg_nacl_base_cit */
CONFIGFS_EATTR_OPS(target_fabric_nacl_base, se_node_acl, acl_group);
static struct config_group *target_fabric_make_mappedlun(
struct config_group *group,
const char *name)
{
struct se_node_acl *se_nacl = container_of(group,
struct se_node_acl, acl_group);
struct se_portal_group *se_tpg = se_nacl->se_tpg;
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_lun_acl *lacl;
struct config_item *acl_ci;
char *buf;
unsigned long mapped_lun;
int ret = 0;
acl_ci = &group->cg_item;
if (!(acl_ci)) {
printk(KERN_ERR "Unable to locatel acl_ci\n");
return NULL;
}
buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
if (!(buf)) {
printk(KERN_ERR "Unable to allocate memory for name buf\n");
return ERR_PTR(-ENOMEM);
}
snprintf(buf, strlen(name) + 1, "%s", name);
/*
* Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID.
*/
if (strstr(buf, "lun_") != buf) {
printk(KERN_ERR "Unable to locate \"lun_\" from buf: %s"
" name: %s\n", buf, name);
ret = -EINVAL;
goto out;
}
/*
* Determine the Mapped LUN value. This is what the SCSI Initiator
* Port will actually see.
*/
if (strict_strtoul(buf + 4, 0, &mapped_lun) || mapped_lun > UINT_MAX) {
ret = -EINVAL;
goto out;
}
lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
config_item_name(acl_ci), &ret);
if (!(lacl))
goto out;
config_group_init_type_name(&lacl->se_lun_group, name,
&TF_CIT_TMPL(tf)->tfc_tpg_mappedlun_cit);
kfree(buf);
return &lacl->se_lun_group;
out:
kfree(buf);
return ERR_PTR(ret);
}
static void target_fabric_drop_mappedlun(
struct config_group *group,
struct config_item *item)
{
struct se_lun_acl *lacl = container_of(to_config_group(item),
struct se_lun_acl, se_lun_group);
struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg;
config_item_put(item);
core_dev_free_initiator_node_lun_acl(se_tpg, lacl);
}
static struct configfs_item_operations target_fabric_nacl_base_item_ops = {
.show_attribute = target_fabric_nacl_base_attr_show,
.store_attribute = target_fabric_nacl_base_attr_store,
};
static struct configfs_group_operations target_fabric_nacl_base_group_ops = {
.make_group = target_fabric_make_mappedlun,
.drop_item = target_fabric_drop_mappedlun,
};
TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops,
&target_fabric_nacl_base_group_ops, NULL);
/* End of tfc_tpg_nacl_base_cit */
/* Start of tfc_tpg_nacl_cit */
static struct config_group *target_fabric_make_nodeacl(
struct config_group *group,
const char *name)
{
struct se_portal_group *se_tpg = container_of(group,
struct se_portal_group, tpg_acl_group);
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_node_acl *se_nacl;
struct config_group *nacl_cg;
if (!(tf->tf_ops.fabric_make_nodeacl)) {
printk(KERN_ERR "tf->tf_ops.fabric_make_nodeacl is NULL\n");
return ERR_PTR(-ENOSYS);
}
se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);
if (IS_ERR(se_nacl))
return ERR_PTR(PTR_ERR(se_nacl));
nacl_cg = &se_nacl->acl_group;
nacl_cg->default_groups = se_nacl->acl_default_groups;
nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group;
nacl_cg->default_groups[1] = &se_nacl->acl_auth_group;
nacl_cg->default_groups[2] = &se_nacl->acl_param_group;
nacl_cg->default_groups[3] = NULL;
config_group_init_type_name(&se_nacl->acl_group, name,
&TF_CIT_TMPL(tf)->tfc_tpg_nacl_base_cit);
config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib",
&TF_CIT_TMPL(tf)->tfc_tpg_nacl_attrib_cit);
config_group_init_type_name(&se_nacl->acl_auth_group, "auth",
&TF_CIT_TMPL(tf)->tfc_tpg_nacl_auth_cit);
config_group_init_type_name(&se_nacl->acl_param_group, "param",
&TF_CIT_TMPL(tf)->tfc_tpg_nacl_param_cit);
return &se_nacl->acl_group;
}
static void target_fabric_drop_nodeacl(
struct config_group *group,
struct config_item *item)
{
struct se_portal_group *se_tpg = container_of(group,
struct se_portal_group, tpg_acl_group);
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_node_acl *se_nacl = container_of(to_config_group(item),
struct se_node_acl, acl_group);
struct config_item *df_item;
struct config_group *nacl_cg;
int i;
nacl_cg = &se_nacl->acl_group;
for (i = 0; nacl_cg->default_groups[i]; i++) {
df_item = &nacl_cg->default_groups[i]->cg_item;
nacl_cg->default_groups[i] = NULL;
config_item_put(df_item);
}
config_item_put(item);
tf->tf_ops.fabric_drop_nodeacl(se_nacl);
}
static struct configfs_group_operations target_fabric_nacl_group_ops = {
.make_group = target_fabric_make_nodeacl,
.drop_item = target_fabric_drop_nodeacl,
};
TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL);
/* End of tfc_tpg_nacl_cit */
/* Start of tfc_tpg_np_base_cit */
CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group);
static struct configfs_item_operations target_fabric_np_base_item_ops = {
.show_attribute = target_fabric_np_base_attr_show,
.store_attribute = target_fabric_np_base_attr_store,
};
TF_CIT_SETUP(tpg_np_base, &target_fabric_np_base_item_ops, NULL, NULL);
/* End of tfc_tpg_np_base_cit */
/* Start of tfc_tpg_np_cit */
static struct config_group *target_fabric_make_np(
struct config_group *group,
const char *name)
{
struct se_portal_group *se_tpg = container_of(group,
struct se_portal_group, tpg_np_group);
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_tpg_np *se_tpg_np;
if (!(tf->tf_ops.fabric_make_np)) {
printk(KERN_ERR "tf->tf_ops.fabric_make_np is NULL\n");
return ERR_PTR(-ENOSYS);
}
se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name);
if (!(se_tpg_np) || IS_ERR(se_tpg_np))
return ERR_PTR(-EINVAL);
config_group_init_type_name(&se_tpg_np->tpg_np_group, name,
&TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit);
return &se_tpg_np->tpg_np_group;
}
static void target_fabric_drop_np(
struct config_group *group,
struct config_item *item)
{
struct se_portal_group *se_tpg = container_of(group,
struct se_portal_group, tpg_np_group);
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
struct se_tpg_np *se_tpg_np = container_of(to_config_group(item),
struct se_tpg_np, tpg_np_group);
config_item_put(item);
tf->tf_ops.fabric_drop_np(se_tpg_np);
}
static struct configfs_group_operations target_fabric_np_group_ops = {
.make_group = &target_fabric_make_np,
.drop_item = &target_fabric_drop_np,
};
TF_CIT_SETUP(tpg_np, NULL, &target_fabric_np_group_ops, NULL);
/* End of tfc_tpg_np_cit */
/* Start of tfc_tpg_port_cit */
CONFIGFS_EATTR_STRUCT(target_fabric_port, se_lun);
#define TCM_PORT_ATTR(_name, _mode) \
static struct target_fabric_port_attribute target_fabric_port_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
target_fabric_port_show_attr_##_name, \
target_fabric_port_store_attr_##_name);
#define TCM_PORT_ATTOR_RO(_name) \
__CONFIGFS_EATTR_RO(_name, \
target_fabric_port_show_attr_##_name);
/*
* alua_tg_pt_gp
*/
static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp(
struct se_lun *lun,
char *page)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
return -ENODEV;
return core_alua_show_tg_pt_gp_info(lun->lun_sep, page);
}
static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp(
struct se_lun *lun,
const char *page,
size_t count)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
return -ENODEV;
return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count);
}
TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR);
/*
* alua_tg_pt_offline
*/
static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline(
struct se_lun *lun,
char *page)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
return -ENODEV;
return core_alua_show_offline_bit(lun, page);
}
static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline(
struct se_lun *lun,
const char *page,
size_t count)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
return -ENODEV;
return core_alua_store_offline_bit(lun, page, count);
}
TCM_PORT_ATTR(alua_tg_pt_offline, S_IRUGO | S_IWUSR);
/*
* alua_tg_pt_status
*/
static ssize_t target_fabric_port_show_attr_alua_tg_pt_status(
struct se_lun *lun,
char *page)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
return -ENODEV;
return core_alua_show_secondary_status(lun, page);
}
static ssize_t target_fabric_port_store_attr_alua_tg_pt_status(
struct se_lun *lun,
const char *page,
size_t count)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
return -ENODEV;
return core_alua_store_secondary_status(lun, page, count);
}
TCM_PORT_ATTR(alua_tg_pt_status, S_IRUGO | S_IWUSR);
/*
* alua_tg_pt_write_md
*/
static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md(
struct se_lun *lun,
char *page)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
return -ENODEV;
return core_alua_show_secondary_write_metadata(lun, page);
}
static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md(
struct se_lun *lun,
const char *page,
size_t count)
{
if (!(lun))
return -ENODEV;
if (!(lun->lun_sep))
return -ENODEV;
return core_alua_store_secondary_write_metadata(lun, page, count);
}
TCM_PORT_ATTR(alua_tg_pt_write_md, S_IRUGO | S_IWUSR);
static struct configfs_attribute *target_fabric_port_attrs[] = {
&target_fabric_port_alua_tg_pt_gp.attr,
&target_fabric_port_alua_tg_pt_offline.attr,
&target_fabric_port_alua_tg_pt_status.attr,
&target_fabric_port_alua_tg_pt_write_md.attr,
NULL,
};
CONFIGFS_EATTR_OPS(target_fabric_port, se_lun, lun_group);
static int target_fabric_port_link(
struct config_item *lun_ci,
struct config_item *se_dev_ci)
{
struct config_item *tpg_ci;
struct se_device *dev;
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
struct se_lun *lun_p;
struct se_portal_group *se_tpg;
struct se_subsystem_dev *se_dev = container_of(
to_config_group(se_dev_ci), struct se_subsystem_dev,
se_dev_group);
struct target_fabric_configfs *tf;
int ret;
tpg_ci = &lun_ci->ci_parent->ci_group->cg_item;
se_tpg = container_of(to_config_group(tpg_ci),
struct se_portal_group, tpg_group);
tf = se_tpg->se_tpg_wwn->wwn_tf;
if (lun->lun_se_dev != NULL) {
printk(KERN_ERR "Port Symlink already exists\n");
return -EEXIST;
}
dev = se_dev->se_dev_ptr;
if (!(dev)) {
printk(KERN_ERR "Unable to locate struct se_device pointer from"
" %s\n", config_item_name(se_dev_ci));
ret = -ENODEV;
goto out;
}
lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
lun->unpacked_lun);
if ((IS_ERR(lun_p)) || !(lun_p)) {
printk(KERN_ERR "core_dev_add_lun() failed\n");
ret = -EINVAL;
goto out;
}
if (tf->tf_ops.fabric_post_link) {
/*
* Call the optional fabric_post_link() to allow a
* fabric module to setup any additional state once
* core_dev_add_lun() has been called..
*/
tf->tf_ops.fabric_post_link(se_tpg, lun);
}
return 0;
out:
return ret;
}
static int target_fabric_port_unlink(
struct config_item *lun_ci,
struct config_item *se_dev_ci)
{
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg;
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
if (tf->tf_ops.fabric_pre_unlink) {
/*
* Call the optional fabric_pre_unlink() to allow a
* fabric module to release any additional stat before
* core_dev_del_lun() is called.
*/
tf->tf_ops.fabric_pre_unlink(se_tpg, lun);
}
core_dev_del_lun(se_tpg, lun->unpacked_lun);
return 0;
}
static struct configfs_item_operations target_fabric_port_item_ops = {
.show_attribute = target_fabric_port_attr_show,
.store_attribute = target_fabric_port_attr_store,
.allow_link = target_fabric_port_link,
.drop_link = target_fabric_port_unlink,
};
TF_CIT_SETUP(tpg_port, &target_fabric_port_item_ops, NULL, target_fabric_port_attrs);
/* End of tfc_tpg_port_cit */
/* Start of tfc_tpg_lun_cit */
static struct config_group *target_fabric_make_lun(
struct config_group *group,
const char *name)
{
struct se_lun *lun;
struct se_portal_group *se_tpg = container_of(group,
struct se_portal_group, tpg_lun_group);
struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
unsigned long unpacked_lun;
if (strstr(name, "lun_") != name) {
printk(KERN_ERR "Unable to locate \'_\" in"
" \"lun_$LUN_NUMBER\"\n");
return ERR_PTR(-EINVAL);
}
if (strict_strtoul(name + 4, 0, &unpacked_lun) || unpacked_lun > UINT_MAX)
return ERR_PTR(-EINVAL);
lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
if (!(lun))
return ERR_PTR(-EINVAL);
config_group_init_type_name(&lun->lun_group, name,
&TF_CIT_TMPL(tf)->tfc_tpg_port_cit);
return &lun->lun_group;
}
static void target_fabric_drop_lun(
struct config_group *group,
struct config_item *item)
{
config_item_put(item);
}
static struct configfs_group_operations target_fabric_lun_group_ops = {
.make_group = &target_fabric_make_lun,
.drop_item = &target_fabric_drop_lun,
};
TF_CIT_SETUP(tpg_lun, NULL, &target_fabric_lun_group_ops, NULL);
/* End of tfc_tpg_lun_cit */
/* Start of tfc_tpg_attrib_cit */
CONFIGFS_EATTR_OPS(target_fabric_tpg_attrib, se_portal_group, tpg_attrib_group);
static struct configfs_item_operations target_fabric_tpg_attrib_item_ops = {
.show_attribute = target_fabric_tpg_attrib_attr_show,
.store_attribute = target_fabric_tpg_attrib_attr_store,
};
TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL);
/* End of tfc_tpg_attrib_cit */
/* Start of tfc_tpg_param_cit */
CONFIGFS_EATTR_OPS(target_fabric_tpg_param, se_portal_group, tpg_param_group);
static struct configfs_item_operations target_fabric_tpg_param_item_ops = {
.show_attribute = target_fabric_tpg_param_attr_show,
.store_attribute = target_fabric_tpg_param_attr_store,
};
TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL);
/* End of tfc_tpg_param_cit */
/* Start of tfc_tpg_base_cit */
/*
* For use with TF_TPG_ATTR() and TF_TPG_ATTR_RO()
*/
CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group);
static struct configfs_item_operations target_fabric_tpg_base_item_ops = {
.show_attribute = target_fabric_tpg_attr_show,
.store_attribute = target_fabric_tpg_attr_store,
};
TF_CIT_SETUP(tpg_base, &target_fabric_tpg_base_item_ops, NULL, NULL);
/* End of tfc_tpg_base_cit */
/* Start of tfc_tpg_cit */
static struct config_group *target_fabric_make_tpg(
struct config_group *group,
const char *name)
{
struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
struct target_fabric_configfs *tf = wwn->wwn_tf;
struct se_portal_group *se_tpg;
if (!(tf->tf_ops.fabric_make_tpg)) {
printk(KERN_ERR "tf->tf_ops.fabric_make_tpg is NULL\n");
return ERR_PTR(-ENOSYS);
}
se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name);
if (!(se_tpg) || IS_ERR(se_tpg))
return ERR_PTR(-EINVAL);
/*
* Setup default groups from pre-allocated se_tpg->tpg_default_groups
*/
se_tpg->tpg_group.default_groups = se_tpg->tpg_default_groups;
se_tpg->tpg_group.default_groups[0] = &se_tpg->tpg_lun_group;
se_tpg->tpg_group.default_groups[1] = &se_tpg->tpg_np_group;
se_tpg->tpg_group.default_groups[2] = &se_tpg->tpg_acl_group;
se_tpg->tpg_group.default_groups[3] = &se_tpg->tpg_attrib_group;
se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_param_group;
se_tpg->tpg_group.default_groups[5] = NULL;
config_group_init_type_name(&se_tpg->tpg_group, name,
&TF_CIT_TMPL(tf)->tfc_tpg_base_cit);
config_group_init_type_name(&se_tpg->tpg_lun_group, "lun",
&TF_CIT_TMPL(tf)->tfc_tpg_lun_cit);
config_group_init_type_name(&se_tpg->tpg_np_group, "np",
&TF_CIT_TMPL(tf)->tfc_tpg_np_cit);
config_group_init_type_name(&se_tpg->tpg_acl_group, "acls",
&TF_CIT_TMPL(tf)->tfc_tpg_nacl_cit);
config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib",
&TF_CIT_TMPL(tf)->tfc_tpg_attrib_cit);
config_group_init_type_name(&se_tpg->tpg_param_group, "param",
&TF_CIT_TMPL(tf)->tfc_tpg_param_cit);
return &se_tpg->tpg_group;
}
static void target_fabric_drop_tpg(
struct config_group *group,
struct config_item *item)
{
struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group);
struct target_fabric_configfs *tf = wwn->wwn_tf;
struct se_portal_group *se_tpg = container_of(to_config_group(item),
struct se_portal_group, tpg_group);
struct config_group *tpg_cg = &se_tpg->tpg_group;
struct config_item *df_item;
int i;
/*
* Release default groups, but do not release tpg_cg->default_groups
* memory as it is statically allocated at se_tpg->tpg_default_groups.
*/
for (i = 0; tpg_cg->default_groups[i]; i++) {
df_item = &tpg_cg->default_groups[i]->cg_item;
tpg_cg->default_groups[i] = NULL;
config_item_put(df_item);
}
config_item_put(item);
tf->tf_ops.fabric_drop_tpg(se_tpg);
}
static struct configfs_group_operations target_fabric_tpg_group_ops = {
.make_group = target_fabric_make_tpg,
.drop_item = target_fabric_drop_tpg,
};
TF_CIT_SETUP(tpg, NULL, &target_fabric_tpg_group_ops, NULL);
/* End of tfc_tpg_cit */
/* Start of tfc_wwn_cit */
static struct config_group *target_fabric_make_wwn(
struct config_group *group,
const char *name)
{
struct target_fabric_configfs *tf = container_of(group,
struct target_fabric_configfs, tf_group);
struct se_wwn *wwn;
if (!(tf->tf_ops.fabric_make_wwn)) {
printk(KERN_ERR "tf->tf_ops.fabric_make_wwn is NULL\n");
return ERR_PTR(-ENOSYS);
}
wwn = tf->tf_ops.fabric_make_wwn(tf, group, name);
if (!(wwn) || IS_ERR(wwn))
return ERR_PTR(-EINVAL);
wwn->wwn_tf = tf;
config_group_init_type_name(&wwn->wwn_group, name,
&TF_CIT_TMPL(tf)->tfc_tpg_cit);
return &wwn->wwn_group;
}
static void target_fabric_drop_wwn(
struct config_group *group,
struct config_item *item)
{
struct target_fabric_configfs *tf = container_of(group,
struct target_fabric_configfs, tf_group);
struct se_wwn *wwn = container_of(to_config_group(item),
struct se_wwn, wwn_group);
config_item_put(item);
tf->tf_ops.fabric_drop_wwn(wwn);
}
static struct configfs_group_operations target_fabric_wwn_group_ops = {
.make_group = target_fabric_make_wwn,
.drop_item = target_fabric_drop_wwn,
};
/*
* For use with TF_WWN_ATTR() and TF_WWN_ATTR_RO()
*/
CONFIGFS_EATTR_OPS(target_fabric_wwn, target_fabric_configfs, tf_group);
static struct configfs_item_operations target_fabric_wwn_item_ops = {
.show_attribute = target_fabric_wwn_attr_show,
.store_attribute = target_fabric_wwn_attr_store,
};
TF_CIT_SETUP(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops, NULL);
/* End of tfc_wwn_cit */
/* Start of tfc_discovery_cit */
CONFIGFS_EATTR_OPS(target_fabric_discovery, target_fabric_configfs,
tf_disc_group);
static struct configfs_item_operations target_fabric_discovery_item_ops = {
.show_attribute = target_fabric_discovery_attr_show,
.store_attribute = target_fabric_discovery_attr_store,
};
TF_CIT_SETUP(discovery, &target_fabric_discovery_item_ops, NULL, NULL);
/* End of tfc_discovery_cit */
int target_fabric_setup_cits(struct target_fabric_configfs *tf)
{
target_fabric_setup_discovery_cit(tf);
target_fabric_setup_wwn_cit(tf);
target_fabric_setup_tpg_cit(tf);
target_fabric_setup_tpg_base_cit(tf);
target_fabric_setup_tpg_port_cit(tf);
target_fabric_setup_tpg_lun_cit(tf);
target_fabric_setup_tpg_np_cit(tf);
target_fabric_setup_tpg_np_base_cit(tf);
target_fabric_setup_tpg_attrib_cit(tf);
target_fabric_setup_tpg_param_cit(tf);
target_fabric_setup_tpg_nacl_cit(tf);
target_fabric_setup_tpg_nacl_base_cit(tf);
target_fabric_setup_tpg_nacl_attrib_cit(tf);
target_fabric_setup_tpg_nacl_auth_cit(tf);
target_fabric_setup_tpg_nacl_param_cit(tf);
target_fabric_setup_tpg_mappedlun_cit(tf);
return 0;
}
/*******************************************************************************
* Filename: target_core_fabric_lib.c
*
* This file contains generic high level protocol identifier and PR
* handlers for TCM fabric modules
*
* Copyright (c) 2010 Rising Tide Systems, Inc.
* Copyright (c) 2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/string.h>
#include <linux/ctype.h>
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_configfs.h>
#include "target_core_hba.h"
#include "target_core_pr.h"
/*
* Handlers for Serial Attached SCSI (SAS)
*/
u8 sas_get_fabric_proto_ident(struct se_portal_group *se_tpg)
{
/*
* Return a SAS Serial SCSI Protocol identifier for loopback operations
* This is defined in section 7.5.1 Table 362 in spc4r17
*/
return 0x6;
}
EXPORT_SYMBOL(sas_get_fabric_proto_ident);
u32 sas_get_pr_transport_id(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code,
unsigned char *buf)
{
unsigned char binary, *ptr;
int i;
u32 off = 4;
/*
* Set PROTOCOL IDENTIFIER to 6h for SAS
*/
buf[0] = 0x06;
/*
* From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
* over SAS Serial SCSI Protocol
*/
ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */
for (i = 0; i < 16; i += 2) {
binary = transport_asciihex_to_binaryhex(&ptr[i]);
buf[off++] = binary;
}
/*
* The SAS Transport ID is a hardcoded 24-byte length
*/
return 24;
}
EXPORT_SYMBOL(sas_get_pr_transport_id);
u32 sas_get_pr_transport_id_len(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code)
{
*format_code = 0;
/*
* From spc4r17, 7.5.4.7 TransportID for initiator ports using SCSI
* over SAS Serial SCSI Protocol
*
* The SAS Transport ID is a hardcoded 24-byte length
*/
return 24;
}
EXPORT_SYMBOL(sas_get_pr_transport_id_len);
/*
* Used for handling SCSI fabric dependent TransportIDs in SPC-3 and above
* Persistent Reservation SPEC_I_PT=1 and PROUT REGISTER_AND_MOVE operations.
*/
char *sas_parse_pr_out_transport_id(
struct se_portal_group *se_tpg,
const char *buf,
u32 *out_tid_len,
char **port_nexus_ptr)
{
/*
* Assume the FORMAT CODE 00b from spc4r17, 7.5.4.7 TransportID
* for initiator ports using SCSI over SAS Serial SCSI Protocol
*
* The TransportID for a SAS Initiator Port is of fixed size of
* 24 bytes, and SAS does not contain a I_T nexus identifier,
* so we return the **port_nexus_ptr set to NULL.
*/
*port_nexus_ptr = NULL;
*out_tid_len = 24;
return (char *)&buf[4];
}
EXPORT_SYMBOL(sas_parse_pr_out_transport_id);
/*
* Handlers for Fibre Channel Protocol (FCP)
*/
u8 fc_get_fabric_proto_ident(struct se_portal_group *se_tpg)
{
return 0x0; /* 0 = fcp-2 per SPC4 section 7.5.1 */
}
EXPORT_SYMBOL(fc_get_fabric_proto_ident);
u32 fc_get_pr_transport_id_len(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code)
{
*format_code = 0;
/*
* The FC Transport ID is a hardcoded 24-byte length
*/
return 24;
}
EXPORT_SYMBOL(fc_get_pr_transport_id_len);
u32 fc_get_pr_transport_id(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code,
unsigned char *buf)
{
unsigned char binary, *ptr;
int i;
u32 off = 8;
/*
* PROTOCOL IDENTIFIER is 0h for FCP-2
*
* From spc4r17, 7.5.4.2 TransportID for initiator ports using
* SCSI over Fibre Channel
*
* We convert the ASCII formatted N Port name into a binary
* encoded TransportID.
*/
ptr = &se_nacl->initiatorname[0];
for (i = 0; i < 24; ) {
if (!(strncmp(&ptr[i], ":", 1))) {
i++;
continue;
}
binary = transport_asciihex_to_binaryhex(&ptr[i]);
buf[off++] = binary;
i += 2;
}
/*
* The FC Transport ID is a hardcoded 24-byte length
*/
return 24;
}
EXPORT_SYMBOL(fc_get_pr_transport_id);
char *fc_parse_pr_out_transport_id(
struct se_portal_group *se_tpg,
const char *buf,
u32 *out_tid_len,
char **port_nexus_ptr)
{
/*
* The TransportID for a FC N Port is of fixed size of
* 24 bytes, and FC does not contain a I_T nexus identifier,
* so we return the **port_nexus_ptr set to NULL.
*/
*port_nexus_ptr = NULL;
*out_tid_len = 24;
return (char *)&buf[8];
}
EXPORT_SYMBOL(fc_parse_pr_out_transport_id);
/*
* Handlers for Internet Small Computer Systems Interface (iSCSI)
*/
u8 iscsi_get_fabric_proto_ident(struct se_portal_group *se_tpg)
{
/*
* This value is defined for "Internet SCSI (iSCSI)"
* in spc4r17 section 7.5.1 Table 362
*/
return 0x5;
}
EXPORT_SYMBOL(iscsi_get_fabric_proto_ident);
u32 iscsi_get_pr_transport_id(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code,
unsigned char *buf)
{
u32 off = 4, padding = 0;
u16 len = 0;
spin_lock_irq(&se_nacl->nacl_sess_lock);
/*
* Set PROTOCOL IDENTIFIER to 5h for iSCSI
*/
buf[0] = 0x05;
/*
* From spc4r17 Section 7.5.4.6: TransportID for initiator
* ports using SCSI over iSCSI.
*
* The null-terminated, null-padded (see 4.4.2) ISCSI NAME field
* shall contain the iSCSI name of an iSCSI initiator node (see
* RFC 3720). The first ISCSI NAME field byte containing an ASCII
* null character terminates the ISCSI NAME field without regard for
* the specified length of the iSCSI TransportID or the contents of
* the ADDITIONAL LENGTH field.
*/
len = sprintf(&buf[off], "%s", se_nacl->initiatorname);
/*
* Add Extra byte for NULL terminator
*/
len++;
/*
* If there is ISID present with the registration and *format code == 1
* 1, use iSCSI Initiator port TransportID format.
*
* Otherwise use iSCSI Initiator device TransportID format that
* does not contain the ASCII encoded iSCSI Initiator iSID value
* provied by the iSCSi Initiator during the iSCSI login process.
*/
if ((*format_code == 1) && (pr_reg->isid_present_at_reg)) {
/*
* Set FORMAT CODE 01b for iSCSI Initiator port TransportID
* format.
*/
buf[0] |= 0x40;
/*
* From spc4r17 Section 7.5.4.6: TransportID for initiator
* ports using SCSI over iSCSI. Table 390
*
* The SEPARATOR field shall contain the five ASCII
* characters ",i,0x".
*
* The null-terminated, null-padded ISCSI INITIATOR SESSION ID
* field shall contain the iSCSI initiator session identifier
* (see RFC 3720) in the form of ASCII characters that are the
* hexadecimal digits converted from the binary iSCSI initiator
* session identifier value. The first ISCSI INITIATOR SESSION
* ID field byte containing an ASCII null character
*/
buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
buf[off+len] = 0x69; off++; /* ASCII Character: "i" */
buf[off+len] = 0x2c; off++; /* ASCII Character: "," */
buf[off+len] = 0x30; off++; /* ASCII Character: "0" */
buf[off+len] = 0x78; off++; /* ASCII Character: "x" */
len += 5;
buf[off+len] = pr_reg->pr_reg_isid[0]; off++;
buf[off+len] = pr_reg->pr_reg_isid[1]; off++;
buf[off+len] = pr_reg->pr_reg_isid[2]; off++;
buf[off+len] = pr_reg->pr_reg_isid[3]; off++;
buf[off+len] = pr_reg->pr_reg_isid[4]; off++;
buf[off+len] = pr_reg->pr_reg_isid[5]; off++;
buf[off+len] = '\0'; off++;
len += 7;
}
spin_unlock_irq(&se_nacl->nacl_sess_lock);
/*
* The ADDITIONAL LENGTH field specifies the number of bytes that follow
* in the TransportID. The additional length shall be at least 20 and
* shall be a multiple of four.
*/
padding = ((-len) & 3);
if (padding != 0)
len += padding;
buf[2] = ((len >> 8) & 0xff);
buf[3] = (len & 0xff);
/*
* Increment value for total payload + header length for
* full status descriptor
*/
len += 4;
return len;
}
EXPORT_SYMBOL(iscsi_get_pr_transport_id);
u32 iscsi_get_pr_transport_id_len(
struct se_portal_group *se_tpg,
struct se_node_acl *se_nacl,
struct t10_pr_registration *pr_reg,
int *format_code)
{
u32 len = 0, padding = 0;
spin_lock_irq(&se_nacl->nacl_sess_lock);
len = strlen(se_nacl->initiatorname);
/*
* Add extra byte for NULL terminator
*/
len++;
/*
* If there is ISID present with the registration, use format code:
* 01b: iSCSI Initiator port TransportID format
*
* If there is not an active iSCSI session, use format code:
* 00b: iSCSI Initiator device TransportID format
*/
if (pr_reg->isid_present_at_reg) {
len += 5; /* For ",i,0x" ASCII seperator */
len += 7; /* For iSCSI Initiator Session ID + Null terminator */
*format_code = 1;
} else
*format_code = 0;
spin_unlock_irq(&se_nacl->nacl_sess_lock);
/*
* The ADDITIONAL LENGTH field specifies the number of bytes that follow
* in the TransportID. The additional length shall be at least 20 and
* shall be a multiple of four.
*/
padding = ((-len) & 3);
if (padding != 0)
len += padding;
/*
* Increment value for total payload + header length for
* full status descriptor
*/
len += 4;
return len;
}
EXPORT_SYMBOL(iscsi_get_pr_transport_id_len);
char *iscsi_parse_pr_out_transport_id(
struct se_portal_group *se_tpg,
const char *buf,
u32 *out_tid_len,
char **port_nexus_ptr)
{
char *p;
u32 tid_len, padding;
int i;
u16 add_len;
u8 format_code = (buf[0] & 0xc0);
/*
* Check for FORMAT CODE 00b or 01b from spc4r17, section 7.5.4.6:
*
* TransportID for initiator ports using SCSI over iSCSI,
* from Table 388 -- iSCSI TransportID formats.
*
* 00b Initiator port is identified using the world wide unique
* SCSI device name of the iSCSI initiator
* device containing the initiator port (see table 389).
* 01b Initiator port is identified using the world wide unique
* initiator port identifier (see table 390).10b to 11b
* Reserved
*/
if ((format_code != 0x00) && (format_code != 0x40)) {
printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI"
" Initiator Transport ID\n", format_code);
return NULL;
}
/*
* If the caller wants the TransportID Length, we set that value for the
* entire iSCSI Tarnsport ID now.
*/
if (out_tid_len != NULL) {
add_len = ((buf[2] >> 8) & 0xff);
add_len |= (buf[3] & 0xff);
tid_len = strlen((char *)&buf[4]);
tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
tid_len += 1; /* Add one byte for NULL terminator */
padding = ((-tid_len) & 3);
if (padding != 0)
tid_len += padding;
if ((add_len + 4) != tid_len) {
printk(KERN_INFO "LIO-Target Extracted add_len: %hu "
"does not match calculated tid_len: %u,"
" using tid_len instead\n", add_len+4, tid_len);
*out_tid_len = tid_len;
} else
*out_tid_len = (add_len + 4);
}
/*
* Check for ',i,0x' seperator between iSCSI Name and iSCSI Initiator
* Session ID as defined in Table 390 - iSCSI initiator port TransportID
* format.
*/
if (format_code == 0x40) {
p = strstr((char *)&buf[4], ",i,0x");
if (!(p)) {
printk(KERN_ERR "Unable to locate \",i,0x\" seperator"
" for Initiator port identifier: %s\n",
(char *)&buf[4]);
return NULL;
}
*p = '\0'; /* Terminate iSCSI Name */
p += 5; /* Skip over ",i,0x" seperator */
*port_nexus_ptr = p;
/*
* Go ahead and do the lower case conversion of the received
* 12 ASCII characters representing the ISID in the TransportID
* for comparision against the running iSCSI session's ISID from
* iscsi_target.c:lio_sess_get_initiator_sid()
*/
for (i = 0; i < 12; i++) {
if (isdigit(*p)) {
p++;
continue;
}
*p = tolower(*p);
p++;
}
}
return (char *)&buf[4];
}
EXPORT_SYMBOL(iscsi_parse_pr_out_transport_id);
/*******************************************************************************
* Filename: target_core_file.c
*
* This file contains the Storage Engine <-> FILEIO transport specific functions
*
* Copyright (c) 2005 PyX Technologies, Inc.
* Copyright (c) 2005-2006 SBE, Inc. All Rights Reserved.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/version.h>
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_transport.h>
#include "target_core_file.h"
#if 1
#define DEBUG_FD_CACHE(x...) printk(x)
#else
#define DEBUG_FD_CACHE(x...)
#endif
#if 1
#define DEBUG_FD_FUA(x...) printk(x)
#else
#define DEBUG_FD_FUA(x...)
#endif
static struct se_subsystem_api fileio_template;
/* fd_attach_hba(): (Part of se_subsystem_api_t template)
*
*
*/
static int fd_attach_hba(struct se_hba *hba, u32 host_id)
{
struct fd_host *fd_host;
fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
if (!(fd_host)) {
printk(KERN_ERR "Unable to allocate memory for struct fd_host\n");
return -1;
}
fd_host->fd_host_id = host_id;
atomic_set(&hba->left_queue_depth, FD_HBA_QUEUE_DEPTH);
atomic_set(&hba->max_queue_depth, FD_HBA_QUEUE_DEPTH);
hba->hba_ptr = (void *) fd_host;
printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
TARGET_CORE_MOD_VERSION);
printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
" Target Core with TCQ Depth: %d MaxSectors: %u\n",
hba->hba_id, fd_host->fd_host_id,
atomic_read(&hba->max_queue_depth), FD_MAX_SECTORS);
return 0;
}
static void fd_detach_hba(struct se_hba *hba)
{
struct fd_host *fd_host = hba->hba_ptr;
printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
" Target Core\n", hba->hba_id, fd_host->fd_host_id);
kfree(fd_host);
hba->hba_ptr = NULL;
}
static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
{
struct fd_dev *fd_dev;
struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
if (!(fd_dev)) {
printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n");
return NULL;
}
fd_dev->fd_host = fd_host;
printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name);
return fd_dev;
}
/* fd_create_virtdevice(): (Part of se_subsystem_api_t template)
*
*
*/
static struct se_device *fd_create_virtdevice(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
void *p)
{
char *dev_p = NULL;
struct se_device *dev;
struct se_dev_limits dev_limits;
struct queue_limits *limits;
struct fd_dev *fd_dev = (struct fd_dev *) p;
struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
mm_segment_t old_fs;
struct file *file;
struct inode *inode = NULL;
int dev_flags = 0, flags;
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
old_fs = get_fs();
set_fs(get_ds());
dev_p = getname(fd_dev->fd_dev_name);
set_fs(old_fs);
if (IS_ERR(dev_p)) {
printk(KERN_ERR "getname(%s) failed: %lu\n",
fd_dev->fd_dev_name, IS_ERR(dev_p));
goto fail;
}
#if 0
if (di->no_create_file)
flags = O_RDWR | O_LARGEFILE;
else
flags = O_RDWR | O_CREAT | O_LARGEFILE;
#else
flags = O_RDWR | O_CREAT | O_LARGEFILE;
#endif
/* flags |= O_DIRECT; */
/*
* If fd_buffered_io=1 has not been set explictly (the default),
* use O_SYNC to force FILEIO writes to disk.
*/
if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO))
flags |= O_SYNC;
file = filp_open(dev_p, flags, 0600);
if (IS_ERR(file) || !file || !file->f_dentry) {
printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
goto fail;
}
fd_dev->fd_file = file;
/*
* If using a block backend with this struct file, we extract
* fd_dev->fd_[block,dev]_size from struct block_device.
*
* Otherwise, we use the passed fd_size= from configfs
*/
inode = file->f_mapping->host;
if (S_ISBLK(inode->i_mode)) {
struct request_queue *q;
/*
* Setup the local scope queue_limits from struct request_queue->limits
* to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
*/
q = bdev_get_queue(inode->i_bdev);
limits = &dev_limits.limits;
limits->logical_block_size = bdev_logical_block_size(inode->i_bdev);
limits->max_hw_sectors = queue_max_hw_sectors(q);
limits->max_sectors = queue_max_sectors(q);
/*
* Determine the number of bytes from i_size_read() minus
* one (1) logical sector from underlying struct block_device
*/
fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
fd_dev->fd_block_size);
printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct"
" block_device blocks: %llu logical_block_size: %d\n",
fd_dev->fd_dev_size,
div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
} else {
if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
printk(KERN_ERR "FILEIO: Missing fd_dev_size="
" parameter, and no backing struct"
" block_device\n");
goto fail;
}
limits = &dev_limits.limits;
limits->logical_block_size = FD_BLOCKSIZE;
limits->max_hw_sectors = FD_MAX_SECTORS;
limits->max_sectors = FD_MAX_SECTORS;
fd_dev->fd_block_size = FD_BLOCKSIZE;
}
dev_limits.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
dev_limits.queue_depth = FD_DEVICE_QUEUE_DEPTH;
dev = transport_add_device_to_core_hba(hba, &fileio_template,
se_dev, dev_flags, (void *)fd_dev,
&dev_limits, "FILEIO", FD_VERSION);
if (!(dev))
goto fail;
fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
fd_dev->fd_queue_depth = dev->queue_depth;
printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
fd_dev->fd_dev_name, fd_dev->fd_dev_size);
putname(dev_p);
return dev;
fail:
if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL);
fd_dev->fd_file = NULL;
}
putname(dev_p);
return NULL;
}
/* fd_free_device(): (Part of se_subsystem_api_t template)
*
*
*/
static void fd_free_device(void *p)
{
struct fd_dev *fd_dev = (struct fd_dev *) p;
if (fd_dev->fd_file) {
filp_close(fd_dev->fd_file, NULL);
fd_dev->fd_file = NULL;
}
kfree(fd_dev);
}
static inline struct fd_request *FILE_REQ(struct se_task *task)
{
return container_of(task, struct fd_request, fd_task);
}
static struct se_task *
fd_alloc_task(struct se_cmd *cmd)
{
struct fd_request *fd_req;
fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
if (!(fd_req)) {
printk(KERN_ERR "Unable to allocate struct fd_request\n");
return NULL;
}
fd_req->fd_dev = SE_DEV(cmd)->dev_ptr;
return &fd_req->fd_task;
}
static int fd_do_readv(struct se_task *task)
{
struct fd_request *req = FILE_REQ(task);
struct file *fd = req->fd_dev->fd_file;
struct scatterlist *sg = task->task_sg;
struct iovec *iov;
mm_segment_t old_fs;
loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
int ret = 0, i;
iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
if (!(iov)) {
printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n");
return -1;
}
for (i = 0; i < task->task_sg_num; i++) {
iov[i].iov_len = sg[i].length;
iov[i].iov_base = sg_virt(&sg[i]);
}
old_fs = get_fs();
set_fs(get_ds());
ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos);
set_fs(old_fs);
kfree(iov);
/*
* Return zeros and GOOD status even if the READ did not return
* the expected virt_size for struct file w/o a backing struct
* block_device.
*/
if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
if (ret < 0 || ret != task->task_size) {
printk(KERN_ERR "vfs_readv() returned %d,"
" expecting %d for S_ISBLK\n", ret,
(int)task->task_size);
return -1;
}
} else {
if (ret < 0) {
printk(KERN_ERR "vfs_readv() returned %d for non"
" S_ISBLK\n", ret);
return -1;
}
}
return 1;
}
static int fd_do_writev(struct se_task *task)
{
struct fd_request *req = FILE_REQ(task);
struct file *fd = req->fd_dev->fd_file;
struct scatterlist *sg = task->task_sg;
struct iovec *iov;
mm_segment_t old_fs;
loff_t pos = (task->task_lba * DEV_ATTRIB(task->se_dev)->block_size);
int ret, i = 0;
iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
if (!(iov)) {
printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n");
return -1;
}
for (i = 0; i < task->task_sg_num; i++) {
iov[i].iov_len = sg[i].length;
iov[i].iov_base = sg_virt(&sg[i]);
}
old_fs = get_fs();
set_fs(get_ds());
ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos);
set_fs(old_fs);
kfree(iov);
if (ret < 0 || ret != task->task_size) {
printk(KERN_ERR "vfs_writev() returned %d\n", ret);
return -1;
}
return 1;
}
static void fd_emulate_sync_cache(struct se_task *task)
{
struct se_cmd *cmd = TASK_CMD(task);
struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = dev->dev_ptr;
int immed = (cmd->t_task->t_task_cdb[1] & 0x2);
loff_t start, end;
int ret;
/*
* If the Immediate bit is set, queue up the GOOD response
* for this SYNCHRONIZE_CACHE op
*/
if (immed)
transport_complete_sync_cache(cmd, 1);
/*
* Determine if we will be flushing the entire device.
*/
if (cmd->t_task->t_task_lba == 0 && cmd->data_length == 0) {
start = 0;
end = LLONG_MAX;
} else {
start = cmd->t_task->t_task_lba * DEV_ATTRIB(dev)->block_size;
if (cmd->data_length)
end = start + cmd->data_length;
else
end = LLONG_MAX;
}
ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
if (ret != 0)
printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
if (!immed)
transport_complete_sync_cache(cmd, ret == 0);
}
/*
* Tell TCM Core that we are capable of WriteCache emulation for
* an underlying struct se_device.
*/
static int fd_emulated_write_cache(struct se_device *dev)
{
return 1;
}
static int fd_emulated_dpo(struct se_device *dev)
{
return 0;
}
/*
* Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
* for TYPE_DISK.
*/
static int fd_emulated_fua_write(struct se_device *dev)
{
return 1;
}
static int fd_emulated_fua_read(struct se_device *dev)
{
return 0;
}
/*
* WRITE Force Unit Access (FUA) emulation on a per struct se_task
* LBA range basis..
*/
static void fd_emulate_write_fua(struct se_cmd *cmd, struct se_task *task)
{
struct se_device *dev = cmd->se_dev;
struct fd_dev *fd_dev = dev->dev_ptr;
loff_t start = task->task_lba * DEV_ATTRIB(dev)->block_size;
loff_t end = start + task->task_size;
int ret;
DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
task->task_lba, task->task_size);
ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
if (ret != 0)
printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
}
static int fd_do_task(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = cmd->se_dev;
int ret = 0;
/*
* Call vectorized fileio functions to map struct scatterlist
* physical memory addresses to struct iovec virtual memory.
*/
if (task->task_data_direction == DMA_FROM_DEVICE) {
ret = fd_do_readv(task);
} else {
ret = fd_do_writev(task);
if (ret > 0 &&
DEV_ATTRIB(dev)->emulate_write_cache > 0 &&
DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
T_TASK(cmd)->t_tasks_fua) {
/*
* We might need to be a bit smarter here
* and return some sense data to let the initiator
* know the FUA WRITE cache sync failed..?
*/
fd_emulate_write_fua(cmd, task);
}
}
if (ret < 0)
return ret;
if (ret) {
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
}
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
}
/* fd_free_task(): (Part of se_subsystem_api_t template)
*
*
*/
static void fd_free_task(struct se_task *task)
{
struct fd_request *req = FILE_REQ(task);
kfree(req);
}
enum {
Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io, Opt_err
};
static match_table_t tokens = {
{Opt_fd_dev_name, "fd_dev_name=%s"},
{Opt_fd_dev_size, "fd_dev_size=%s"},
{Opt_fd_buffered_io, "fd_buffered_id=%d"},
{Opt_err, NULL}
};
static ssize_t fd_set_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
const char *page, ssize_t count)
{
struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
char *orig, *ptr, *arg_p, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
return -ENOMEM;
orig = opts;
while ((ptr = strsep(&opts, ",")) != NULL) {
if (!*ptr)
continue;
token = match_token(ptr, tokens, args);
switch (token) {
case Opt_fd_dev_name:
snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
"%s", match_strdup(&args[0]));
printk(KERN_INFO "FILEIO: Referencing Path: %s\n",
fd_dev->fd_dev_name);
fd_dev->fbd_flags |= FBDF_HAS_PATH;
break;
case Opt_fd_dev_size:
arg_p = match_strdup(&args[0]);
ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
if (ret < 0) {
printk(KERN_ERR "strict_strtoull() failed for"
" fd_dev_size=\n");
goto out;
}
printk(KERN_INFO "FILEIO: Referencing Size: %llu"
" bytes\n", fd_dev->fd_dev_size);
fd_dev->fbd_flags |= FBDF_HAS_SIZE;
break;
case Opt_fd_buffered_io:
match_int(args, &arg);
if (arg != 1) {
printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg);
ret = -EINVAL;
goto out;
}
printk(KERN_INFO "FILEIO: Using buffered I/O"
" operations for struct fd_dev\n");
fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
break;
default:
break;
}
}
out:
kfree(orig);
return (!ret) ? count : ret;
}
static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
{
struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
printk(KERN_ERR "Missing fd_dev_name=\n");
return -1;
}
return 0;
}
static ssize_t fd_show_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
char *b)
{
struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
ssize_t bl = 0;
bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n",
fd_dev->fd_dev_name, fd_dev->fd_dev_size,
(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
"Buffered" : "Synchronous");
return bl;
}
/* fd_get_cdb(): (Part of se_subsystem_api_t template)
*
*
*/
static unsigned char *fd_get_cdb(struct se_task *task)
{
struct fd_request *req = FILE_REQ(task);
return req->fd_scsi_cdb;
}
/* fd_get_device_rev(): (Part of se_subsystem_api_t template)
*
*
*/
static u32 fd_get_device_rev(struct se_device *dev)
{
return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
}
/* fd_get_device_type(): (Part of se_subsystem_api_t template)
*
*
*/
static u32 fd_get_device_type(struct se_device *dev)
{
return TYPE_DISK;
}
static sector_t fd_get_blocks(struct se_device *dev)
{
struct fd_dev *fd_dev = dev->dev_ptr;
unsigned long long blocks_long = div_u64(fd_dev->fd_dev_size,
DEV_ATTRIB(dev)->block_size);
return blocks_long;
}
static struct se_subsystem_api fileio_template = {
.name = "fileio",
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
.attach_hba = fd_attach_hba,
.detach_hba = fd_detach_hba,
.allocate_virtdevice = fd_allocate_virtdevice,
.create_virtdevice = fd_create_virtdevice,
.free_device = fd_free_device,
.dpo_emulated = fd_emulated_dpo,
.fua_write_emulated = fd_emulated_fua_write,
.fua_read_emulated = fd_emulated_fua_read,
.write_cache_emulated = fd_emulated_write_cache,
.alloc_task = fd_alloc_task,
.do_task = fd_do_task,
.do_sync_cache = fd_emulate_sync_cache,
.free_task = fd_free_task,
.check_configfs_dev_params = fd_check_configfs_dev_params,
.set_configfs_dev_params = fd_set_configfs_dev_params,
.show_configfs_dev_params = fd_show_configfs_dev_params,
.get_cdb = fd_get_cdb,
.get_device_rev = fd_get_device_rev,
.get_device_type = fd_get_device_type,
.get_blocks = fd_get_blocks,
};
static int __init fileio_module_init(void)
{
return transport_subsystem_register(&fileio_template);
}
static void fileio_module_exit(void)
{
transport_subsystem_release(&fileio_template);
}
MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
MODULE_AUTHOR("nab@Linux-iSCSI.org");
MODULE_LICENSE("GPL");
module_init(fileio_module_init);
module_exit(fileio_module_exit);
#ifndef TARGET_CORE_FILE_H
#define TARGET_CORE_FILE_H
#define FD_VERSION "4.0"
#define FD_MAX_DEV_NAME 256
/* Maximum queuedepth for the FILEIO HBA */
#define FD_HBA_QUEUE_DEPTH 256
#define FD_DEVICE_QUEUE_DEPTH 32
#define FD_MAX_DEVICE_QUEUE_DEPTH 128
#define FD_BLOCKSIZE 512
#define FD_MAX_SECTORS 1024
#define RRF_EMULATE_CDB 0x01
#define RRF_GOT_LBA 0x02
struct fd_request {
struct se_task fd_task;
/* SCSI CDB from iSCSI Command PDU */
unsigned char fd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
/* FILEIO device */
struct fd_dev *fd_dev;
} ____cacheline_aligned;
#define FBDF_HAS_PATH 0x01
#define FBDF_HAS_SIZE 0x02
#define FDBD_USE_BUFFERED_IO 0x04
struct fd_dev {
u32 fbd_flags;
unsigned char fd_dev_name[FD_MAX_DEV_NAME];
/* Unique Ramdisk Device ID in Ramdisk HBA */
u32 fd_dev_id;
/* Number of SG tables in sg_table_array */
u32 fd_table_count;
u32 fd_queue_depth;
u32 fd_block_size;
unsigned long long fd_dev_size;
struct file *fd_file;
/* FILEIO HBA device is connected to */
struct fd_host *fd_host;
} ____cacheline_aligned;
struct fd_host {
u32 fd_host_dev_id_count;
/* Unique FILEIO Host ID */
u32 fd_host_id;
} ____cacheline_aligned;
#endif /* TARGET_CORE_FILE_H */
/*******************************************************************************
* Filename: target_core_hba.c
*
* This file copntains the iSCSI HBA Transport related functions.
*
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/net.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
#include <linux/in.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_transport.h>
#include "target_core_hba.h"
static LIST_HEAD(subsystem_list);
static DEFINE_MUTEX(subsystem_mutex);
int transport_subsystem_register(struct se_subsystem_api *sub_api)
{
struct se_subsystem_api *s;
INIT_LIST_HEAD(&sub_api->sub_api_list);
mutex_lock(&subsystem_mutex);
list_for_each_entry(s, &subsystem_list, sub_api_list) {
if (!(strcmp(s->name, sub_api->name))) {
printk(KERN_ERR "%p is already registered with"
" duplicate name %s, unable to process"
" request\n", s, s->name);
mutex_unlock(&subsystem_mutex);
return -EEXIST;
}
}
list_add_tail(&sub_api->sub_api_list, &subsystem_list);
mutex_unlock(&subsystem_mutex);
printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:"
" %p\n", sub_api->name, sub_api->owner);
return 0;
}
EXPORT_SYMBOL(transport_subsystem_register);
void transport_subsystem_release(struct se_subsystem_api *sub_api)
{
mutex_lock(&subsystem_mutex);
list_del(&sub_api->sub_api_list);
mutex_unlock(&subsystem_mutex);
}
EXPORT_SYMBOL(transport_subsystem_release);
static struct se_subsystem_api *core_get_backend(const char *sub_name)
{
struct se_subsystem_api *s;
mutex_lock(&subsystem_mutex);
list_for_each_entry(s, &subsystem_list, sub_api_list) {
if (!strcmp(s->name, sub_name))
goto found;
}
mutex_unlock(&subsystem_mutex);
return NULL;
found:
if (s->owner && !try_module_get(s->owner))
s = NULL;
mutex_unlock(&subsystem_mutex);
return s;
}
struct se_hba *
core_alloc_hba(const char *plugin_name, u32 plugin_dep_id, u32 hba_flags)
{
struct se_hba *hba;
int ret = 0;
hba = kzalloc(sizeof(*hba), GFP_KERNEL);
if (!hba) {
printk(KERN_ERR "Unable to allocate struct se_hba\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&hba->hba_dev_list);
spin_lock_init(&hba->device_lock);
spin_lock_init(&hba->hba_queue_lock);
mutex_init(&hba->hba_access_mutex);
hba->hba_index = scsi_get_new_index(SCSI_INST_INDEX);
hba->hba_flags |= hba_flags;
atomic_set(&hba->max_queue_depth, 0);
atomic_set(&hba->left_queue_depth, 0);
hba->transport = core_get_backend(plugin_name);
if (!hba->transport) {
ret = -EINVAL;
goto out_free_hba;
}
ret = hba->transport->attach_hba(hba, plugin_dep_id);
if (ret < 0)
goto out_module_put;
spin_lock(&se_global->hba_lock);
hba->hba_id = se_global->g_hba_id_counter++;
list_add_tail(&hba->hba_list, &se_global->g_hba_list);
spin_unlock(&se_global->hba_lock);
printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target"
" Core\n", hba->hba_id);
return hba;
out_module_put:
if (hba->transport->owner)
module_put(hba->transport->owner);
hba->transport = NULL;
out_free_hba:
kfree(hba);
return ERR_PTR(ret);
}
int
core_delete_hba(struct se_hba *hba)
{
struct se_device *dev, *dev_tmp;
spin_lock(&hba->device_lock);
list_for_each_entry_safe(dev, dev_tmp, &hba->hba_dev_list, dev_list) {
se_clear_dev_ports(dev);
spin_unlock(&hba->device_lock);
se_release_device_for_hba(dev);
spin_lock(&hba->device_lock);
}
spin_unlock(&hba->device_lock);
hba->transport->detach_hba(hba);
spin_lock(&se_global->hba_lock);
list_del(&hba->hba_list);
spin_unlock(&se_global->hba_lock);
printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target"
" Core\n", hba->hba_id);
if (hba->transport->owner)
module_put(hba->transport->owner);
hba->transport = NULL;
kfree(hba);
return 0;
}
#ifndef TARGET_CORE_HBA_H
#define TARGET_CORE_HBA_H
extern struct se_hba *core_alloc_hba(const char *, u32, u32);
extern int core_delete_hba(struct se_hba *);
#endif /* TARGET_CORE_HBA_H */
/*******************************************************************************
* Filename: target_core_iblock.c
*
* This file contains the Storage Engine <-> Linux BlockIO transport
* specific functions.
*
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/version.h>
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
#include <linux/bio.h>
#include <linux/genhd.h>
#include <linux/file.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_transport.h>
#include "target_core_iblock.h"
#if 0
#define DEBUG_IBLOCK(x...) printk(x)
#else
#define DEBUG_IBLOCK(x...)
#endif
static struct se_subsystem_api iblock_template;
static void iblock_bio_done(struct bio *, int);
/* iblock_attach_hba(): (Part of se_subsystem_api_t template)
*
*
*/
static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
{
struct iblock_hba *ib_host;
ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL);
if (!(ib_host)) {
printk(KERN_ERR "Unable to allocate memory for"
" struct iblock_hba\n");
return -ENOMEM;
}
ib_host->iblock_host_id = host_id;
atomic_set(&hba->left_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
atomic_set(&hba->max_queue_depth, IBLOCK_HBA_QUEUE_DEPTH);
hba->hba_ptr = (void *) ib_host;
printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic"
" Target Core TCQ Depth: %d\n", hba->hba_id,
ib_host->iblock_host_id, atomic_read(&hba->max_queue_depth));
return 0;
}
static void iblock_detach_hba(struct se_hba *hba)
{
struct iblock_hba *ib_host = hba->hba_ptr;
printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
" Target Core\n", hba->hba_id, ib_host->iblock_host_id);
kfree(ib_host);
hba->hba_ptr = NULL;
}
static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
{
struct iblock_dev *ib_dev = NULL;
struct iblock_hba *ib_host = hba->hba_ptr;
ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
if (!(ib_dev)) {
printk(KERN_ERR "Unable to allocate struct iblock_dev\n");
return NULL;
}
ib_dev->ibd_host = ib_host;
printk(KERN_INFO "IBLOCK: Allocated ib_dev for %s\n", name);
return ib_dev;
}
static struct se_device *iblock_create_virtdevice(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
void *p)
{
struct iblock_dev *ib_dev = p;
struct se_device *dev;
struct se_dev_limits dev_limits;
struct block_device *bd = NULL;
struct request_queue *q;
struct queue_limits *limits;
u32 dev_flags = 0;
if (!(ib_dev)) {
printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n");
return 0;
}
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
/*
* These settings need to be made tunable..
*/
ib_dev->ibd_bio_set = bioset_create(32, 64);
if (!(ib_dev->ibd_bio_set)) {
printk(KERN_ERR "IBLOCK: Unable to create bioset()\n");
return 0;
}
printk(KERN_INFO "IBLOCK: Created bio_set()\n");
/*
* iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
* must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
*/
printk(KERN_INFO "IBLOCK: Claiming struct block_device: %s\n",
ib_dev->ibd_udev_path);
bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev);
if (!(bd))
goto failed;
/*
* Setup the local scope queue_limits from struct request_queue->limits
* to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
*/
q = bdev_get_queue(bd);
limits = &dev_limits.limits;
limits->logical_block_size = bdev_logical_block_size(bd);
limits->max_hw_sectors = queue_max_hw_sectors(q);
limits->max_sectors = queue_max_sectors(q);
dev_limits.hw_queue_depth = IBLOCK_MAX_DEVICE_QUEUE_DEPTH;
dev_limits.queue_depth = IBLOCK_DEVICE_QUEUE_DEPTH;
ib_dev->ibd_major = MAJOR(bd->bd_dev);
ib_dev->ibd_minor = MINOR(bd->bd_dev);
ib_dev->ibd_bd = bd;
dev = transport_add_device_to_core_hba(hba,
&iblock_template, se_dev, dev_flags, (void *)ib_dev,
&dev_limits, "IBLOCK", IBLOCK_VERSION);
if (!(dev))
goto failed;
ib_dev->ibd_depth = dev->queue_depth;
/*
* Check if the underlying struct block_device request_queue supports
* the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
* in ATA and we need to set TPE=1
*/
if (blk_queue_discard(bdev_get_queue(bd))) {
struct request_queue *q = bdev_get_queue(bd);
DEV_ATTRIB(dev)->max_unmap_lba_count =
q->limits.max_discard_sectors;
/*
* Currently hardcoded to 1 in Linux/SCSI code..
*/
DEV_ATTRIB(dev)->max_unmap_block_desc_count = 1;
DEV_ATTRIB(dev)->unmap_granularity =
q->limits.discard_granularity;
DEV_ATTRIB(dev)->unmap_granularity_alignment =
q->limits.discard_alignment;
printk(KERN_INFO "IBLOCK: BLOCK Discard support available,"
" disabled by default\n");
}
return dev;
failed:
if (ib_dev->ibd_bio_set) {
bioset_free(ib_dev->ibd_bio_set);
ib_dev->ibd_bio_set = NULL;
}
ib_dev->ibd_bd = NULL;
ib_dev->ibd_major = 0;
ib_dev->ibd_minor = 0;
return NULL;
}
static void iblock_free_device(void *p)
{
struct iblock_dev *ib_dev = p;
blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
bioset_free(ib_dev->ibd_bio_set);
kfree(ib_dev);
}
static inline struct iblock_req *IBLOCK_REQ(struct se_task *task)
{
return container_of(task, struct iblock_req, ib_task);
}
static struct se_task *
iblock_alloc_task(struct se_cmd *cmd)
{
struct iblock_req *ib_req;
ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
if (!(ib_req)) {
printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n");
return NULL;
}
ib_req->ib_dev = SE_DEV(cmd)->dev_ptr;
atomic_set(&ib_req->ib_bio_cnt, 0);
return &ib_req->ib_task;
}
static unsigned long long iblock_emulate_read_cap_with_block_size(
struct se_device *dev,
struct block_device *bd,
struct request_queue *q)
{
unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
bdev_logical_block_size(bd)) - 1);
u32 block_size = bdev_logical_block_size(bd);
if (block_size == DEV_ATTRIB(dev)->block_size)
return blocks_long;
switch (block_size) {
case 4096:
switch (DEV_ATTRIB(dev)->block_size) {
case 2048:
blocks_long <<= 1;
break;
case 1024:
blocks_long <<= 2;
break;
case 512:
blocks_long <<= 3;
default:
break;
}
break;
case 2048:
switch (DEV_ATTRIB(dev)->block_size) {
case 4096:
blocks_long >>= 1;
break;
case 1024:
blocks_long <<= 1;
break;
case 512:
blocks_long <<= 2;
break;
default:
break;
}
break;
case 1024:
switch (DEV_ATTRIB(dev)->block_size) {
case 4096:
blocks_long >>= 2;
break;
case 2048:
blocks_long >>= 1;
break;
case 512:
blocks_long <<= 1;
break;
default:
break;
}
break;
case 512:
switch (DEV_ATTRIB(dev)->block_size) {
case 4096:
blocks_long >>= 3;
break;
case 2048:
blocks_long >>= 2;
break;
case 1024:
blocks_long >>= 1;
break;
default:
break;
}
break;
default:
break;
}
return blocks_long;
}
/*
* Emulate SYCHRONIZE_CACHE_*
*/
static void iblock_emulate_sync_cache(struct se_task *task)
{
struct se_cmd *cmd = TASK_CMD(task);
struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
int immed = (T_TASK(cmd)->t_task_cdb[1] & 0x2);
sector_t error_sector;
int ret;
/*
* If the Immediate bit is set, queue up the GOOD response
* for this SYNCHRONIZE_CACHE op
*/
if (immed)
transport_complete_sync_cache(cmd, 1);
/*
* blkdev_issue_flush() does not support a specifying a range, so
* we have to flush the entire cache.
*/
ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector);
if (ret != 0) {
printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d "
" error_sector: %llu\n", ret,
(unsigned long long)error_sector);
}
if (!immed)
transport_complete_sync_cache(cmd, ret == 0);
}
/*
* Tell TCM Core that we are capable of WriteCache emulation for
* an underlying struct se_device.
*/
static int iblock_emulated_write_cache(struct se_device *dev)
{
return 1;
}
static int iblock_emulated_dpo(struct se_device *dev)
{
return 0;
}
/*
* Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs
* for TYPE_DISK.
*/
static int iblock_emulated_fua_write(struct se_device *dev)
{
return 1;
}
static int iblock_emulated_fua_read(struct se_device *dev)
{
return 0;
}
static int iblock_do_task(struct se_task *task)
{
struct se_device *dev = task->task_se_cmd->se_dev;
struct iblock_req *req = IBLOCK_REQ(task);
struct iblock_dev *ibd = (struct iblock_dev *)req->ib_dev;
struct request_queue *q = bdev_get_queue(ibd->ibd_bd);
struct bio *bio = req->ib_bio, *nbio = NULL;
int rw;
if (task->task_data_direction == DMA_TO_DEVICE) {
/*
* Force data to disk if we pretend to not have a volatile
* write cache, or the initiator set the Force Unit Access bit.
*/
if (DEV_ATTRIB(dev)->emulate_write_cache == 0 ||
(DEV_ATTRIB(dev)->emulate_fua_write > 0 &&
T_TASK(task->task_se_cmd)->t_tasks_fua))
rw = WRITE_FUA;
else
rw = WRITE;
} else {
rw = READ;
}
while (bio) {
nbio = bio->bi_next;
bio->bi_next = NULL;
DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p"
" bio->bi_sector: %llu\n", task, bio, bio->bi_sector);
submit_bio(rw, bio);
bio = nbio;
}
if (q->unplug_fn)
q->unplug_fn(q);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
}
static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)
{
struct iblock_dev *ibd = dev->dev_ptr;
struct block_device *bd = ibd->ibd_bd;
int barrier = 0;
return blkdev_issue_discard(bd, lba, range, GFP_KERNEL, barrier);
}
static void iblock_free_task(struct se_task *task)
{
struct iblock_req *req = IBLOCK_REQ(task);
struct bio *bio, *hbio = req->ib_bio;
/*
* We only release the bio(s) here if iblock_bio_done() has not called
* bio_put() -> iblock_bio_destructor().
*/
while (hbio != NULL) {
bio = hbio;
hbio = hbio->bi_next;
bio->bi_next = NULL;
bio_put(bio);
}
kfree(req);
}
enum {
Opt_udev_path, Opt_force, Opt_err
};
static match_table_t tokens = {
{Opt_udev_path, "udev_path=%s"},
{Opt_force, "force=%d"},
{Opt_err, NULL}
};
static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
struct se_subsystem_dev *se_dev,
const char *page, ssize_t count)
{
struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
return -ENOMEM;
orig = opts;
while ((ptr = strsep(&opts, ",")) != NULL) {
if (!*ptr)
continue;
token = match_token(ptr, tokens, args);
switch (token) {
case Opt_udev_path:
if (ib_dev->ibd_bd) {
printk(KERN_ERR "Unable to set udev_path= while"
" ib_dev->ibd_bd exists\n");
ret = -EEXIST;
goto out;
}
ret = snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
"%s", match_strdup(&args[0]));
printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n",
ib_dev->ibd_udev_path);
ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
break;
case Opt_force:
match_int(args, &arg);
ib_dev->ibd_force = arg;
printk(KERN_INFO "IBLOCK: Set force=%d\n",
ib_dev->ibd_force);
break;
default:
break;
}
}
out:
kfree(orig);
return (!ret) ? count : ret;
}
static ssize_t iblock_check_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev)
{
struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n");
return -1;
}
return 0;
}
static ssize_t iblock_show_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
char *b)
{
struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
struct block_device *bd = ibd->ibd_bd;
char buf[BDEVNAME_SIZE];
ssize_t bl = 0;
if (bd)
bl += sprintf(b + bl, "iBlock device: %s",
bdevname(bd, buf));
if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH) {
bl += sprintf(b + bl, " UDEV PATH: %s\n",
ibd->ibd_udev_path);
} else
bl += sprintf(b + bl, "\n");
bl += sprintf(b + bl, " ");
if (bd) {
bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
ibd->ibd_major, ibd->ibd_minor, (!bd->bd_contains) ?
"" : (bd->bd_holder == (struct iblock_dev *)ibd) ?
"CLAIMED: IBLOCK" : "CLAIMED: OS");
} else {
bl += sprintf(b + bl, "Major: %d Minor: %d\n",
ibd->ibd_major, ibd->ibd_minor);
}
return bl;
}
static void iblock_bio_destructor(struct bio *bio)
{
struct se_task *task = bio->bi_private;
struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
bio_free(bio, ib_dev->ibd_bio_set);
}
static struct bio *iblock_get_bio(
struct se_task *task,
struct iblock_req *ib_req,
struct iblock_dev *ib_dev,
int *ret,
sector_t lba,
u32 sg_num)
{
struct bio *bio;
bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
if (!(bio)) {
printk(KERN_ERR "Unable to allocate memory for bio\n");
*ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
return NULL;
}
DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:"
" %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set);
DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size);
bio->bi_bdev = ib_dev->ibd_bd;
bio->bi_private = (void *) task;
bio->bi_destructor = iblock_bio_destructor;
bio->bi_end_io = &iblock_bio_done;
bio->bi_sector = lba;
atomic_inc(&ib_req->ib_bio_cnt);
DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector);
DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n",
atomic_read(&ib_req->ib_bio_cnt));
return bio;
}
static int iblock_map_task_SG(struct se_task *task)
{
struct se_cmd *cmd = task->task_se_cmd;
struct se_device *dev = SE_DEV(cmd);
struct iblock_dev *ib_dev = task->se_dev->dev_ptr;
struct iblock_req *ib_req = IBLOCK_REQ(task);
struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
struct scatterlist *sg;
int ret = 0;
u32 i, sg_num = task->task_sg_num;
sector_t block_lba;
/*
* Do starting conversion up from non 512-byte blocksize with
* struct se_task SCSI blocksize into Linux/Block 512 units for BIO.
*/
if (DEV_ATTRIB(dev)->block_size == 4096)
block_lba = (task->task_lba << 3);
else if (DEV_ATTRIB(dev)->block_size == 2048)
block_lba = (task->task_lba << 2);
else if (DEV_ATTRIB(dev)->block_size == 1024)
block_lba = (task->task_lba << 1);
else if (DEV_ATTRIB(dev)->block_size == 512)
block_lba = task->task_lba;
else {
printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:"
" %u\n", DEV_ATTRIB(dev)->block_size);
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num);
if (!(bio))
return ret;
ib_req->ib_bio = bio;
hbio = tbio = bio;
/*
* Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
* from TCM struct se_mem -> task->task_sg -> struct scatterlist memory.
*/
for_each_sg(task->task_sg, sg, task->task_sg_num, i) {
DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:"
" %p len: %u offset: %u\n", task, bio, sg_page(sg),
sg->length, sg->offset);
again:
ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset);
if (ret != sg->length) {
DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n",
bio->bi_sector);
DEBUG_IBLOCK("** task->task_size: %u\n",
task->task_size);
DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n",
bio->bi_max_vecs);
DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n",
bio->bi_vcnt);
bio = iblock_get_bio(task, ib_req, ib_dev, &ret,
block_lba, sg_num);
if (!(bio))
goto fail;
tbio = tbio->bi_next = bio;
DEBUG_IBLOCK("-----------------> Added +1 bio: %p to"
" list, Going to again\n", bio);
goto again;
}
/* Always in 512 byte units for Linux/Block */
block_lba += sg->length >> IBLOCK_LBA_SHIFT;
sg_num--;
DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented"
" sg_num to %u\n", task, sg_num);
DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba"
" to %llu\n", task, block_lba);
DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:"
" %u\n", task, bio->bi_vcnt);
}
return 0;
fail:
while (hbio) {
bio = hbio;
hbio = hbio->bi_next;
bio->bi_next = NULL;
bio_put(bio);
}
return ret;
}
static unsigned char *iblock_get_cdb(struct se_task *task)
{
return IBLOCK_REQ(task)->ib_scsi_cdb;
}
static u32 iblock_get_device_rev(struct se_device *dev)
{
return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
}
static u32 iblock_get_device_type(struct se_device *dev)
{
return TYPE_DISK;
}
static sector_t iblock_get_blocks(struct se_device *dev)
{
struct iblock_dev *ibd = dev->dev_ptr;
struct block_device *bd = ibd->ibd_bd;
struct request_queue *q = bdev_get_queue(bd);
return iblock_emulate_read_cap_with_block_size(dev, bd, q);
}
static void iblock_bio_done(struct bio *bio, int err)
{
struct se_task *task = bio->bi_private;
struct iblock_req *ibr = IBLOCK_REQ(task);
/*
* Set -EIO if !BIO_UPTODATE and the passed is still err=0
*/
if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err))
err = -EIO;
if (err != 0) {
printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p,"
" err: %d\n", bio, err);
/*
* Bump the ib_bio_err_cnt and release bio.
*/
atomic_inc(&ibr->ib_bio_err_cnt);
smp_mb__after_atomic_inc();
bio_put(bio);
/*
* Wait to complete the task until the last bio as completed.
*/
if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
return;
ibr->ib_bio = NULL;
transport_complete_task(task, 0);
return;
}
DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
task, bio, task->task_lba, bio->bi_sector, err);
/*
* bio_put() will call iblock_bio_destructor() to release the bio back
* to ibr->ib_bio_set.
*/
bio_put(bio);
/*
* Wait to complete the task until the last bio as completed.
*/
if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
return;
/*
* Return GOOD status for task if zero ib_bio_err_cnt exists.
*/
ibr->ib_bio = NULL;
transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt)));
}
static struct se_subsystem_api iblock_template = {
.name = "iblock",
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
.map_task_SG = iblock_map_task_SG,
.attach_hba = iblock_attach_hba,
.detach_hba = iblock_detach_hba,
.allocate_virtdevice = iblock_allocate_virtdevice,
.create_virtdevice = iblock_create_virtdevice,
.free_device = iblock_free_device,
.dpo_emulated = iblock_emulated_dpo,
.fua_write_emulated = iblock_emulated_fua_write,
.fua_read_emulated = iblock_emulated_fua_read,
.write_cache_emulated = iblock_emulated_write_cache,
.alloc_task = iblock_alloc_task,
.do_task = iblock_do_task,
.do_discard = iblock_do_discard,
.do_sync_cache = iblock_emulate_sync_cache,
.free_task = iblock_free_task,
.check_configfs_dev_params = iblock_check_configfs_dev_params,
.set_configfs_dev_params = iblock_set_configfs_dev_params,
.show_configfs_dev_params = iblock_show_configfs_dev_params,
.get_cdb = iblock_get_cdb,
.get_device_rev = iblock_get_device_rev,
.get_device_type = iblock_get_device_type,
.get_blocks = iblock_get_blocks,
};
static int __init iblock_module_init(void)
{
return transport_subsystem_register(&iblock_template);
}
static void iblock_module_exit(void)
{
transport_subsystem_release(&iblock_template);
}
MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
MODULE_AUTHOR("nab@Linux-iSCSI.org");
MODULE_LICENSE("GPL");
module_init(iblock_module_init);
module_exit(iblock_module_exit);
#ifndef TARGET_CORE_IBLOCK_H
#define TARGET_CORE_IBLOCK_H
#define IBLOCK_VERSION "4.0"
#define IBLOCK_HBA_QUEUE_DEPTH 512
#define IBLOCK_DEVICE_QUEUE_DEPTH 32
#define IBLOCK_MAX_DEVICE_QUEUE_DEPTH 128
#define IBLOCK_MAX_CDBS 16
#define IBLOCK_LBA_SHIFT 9
struct iblock_req {
struct se_task ib_task;
unsigned char ib_scsi_cdb[TCM_MAX_COMMAND_SIZE];
atomic_t ib_bio_cnt;
atomic_t ib_bio_err_cnt;
struct bio *ib_bio;
struct iblock_dev *ib_dev;
} ____cacheline_aligned;
#define IBDF_HAS_UDEV_PATH 0x01
#define IBDF_HAS_FORCE 0x02
struct iblock_dev {
unsigned char ibd_udev_path[SE_UDEV_PATH_LEN];
int ibd_force;
int ibd_major;
int ibd_minor;
u32 ibd_depth;
u32 ibd_flags;
struct bio_set *ibd_bio_set;
struct block_device *ibd_bd;
struct iblock_hba *ibd_host;
} ____cacheline_aligned;
struct iblock_hba {
int iblock_host_id;
} ____cacheline_aligned;
#endif /* TARGET_CORE_IBLOCK_H */
/*******************************************************************************
* Filename: target_core_mib.c
*
* Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/version.h>
#include <generated/utsrelease.h>
#include <linux/utsname.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/blkdev.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_configfs.h>
#include "target_core_hba.h"
#include "target_core_mib.h"
/* SCSI mib table index */
static struct scsi_index_table scsi_index_table;
#ifndef INITIAL_JIFFIES
#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
#endif
/* SCSI Instance Table */
#define SCSI_INST_SW_INDEX 1
#define SCSI_TRANSPORT_INDEX 1
#define NONE "None"
#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
static inline int list_is_first(const struct list_head *list,
const struct list_head *head)
{
return list->prev == head;
}
static void *locate_hba_start(
struct seq_file *seq,
loff_t *pos)
{
spin_lock(&se_global->g_device_lock);
return seq_list_start(&se_global->g_se_dev_list, *pos);
}
static void *locate_hba_next(
struct seq_file *seq,
void *v,
loff_t *pos)
{
return seq_list_next(v, &se_global->g_se_dev_list, pos);
}
static void locate_hba_stop(struct seq_file *seq, void *v)
{
spin_unlock(&se_global->g_device_lock);
}
/****************************************************************************
* SCSI MIB Tables
****************************************************************************/
/*
* SCSI Instance Table
*/
static void *scsi_inst_seq_start(
struct seq_file *seq,
loff_t *pos)
{
spin_lock(&se_global->hba_lock);
return seq_list_start(&se_global->g_hba_list, *pos);
}
static void *scsi_inst_seq_next(
struct seq_file *seq,
void *v,
loff_t *pos)
{
return seq_list_next(v, &se_global->g_hba_list, pos);
}
static void scsi_inst_seq_stop(struct seq_file *seq, void *v)
{
spin_unlock(&se_global->hba_lock);
}
static int scsi_inst_seq_show(struct seq_file *seq, void *v)
{
struct se_hba *hba = list_entry(v, struct se_hba, hba_list);
if (list_is_first(&hba->hba_list, &se_global->g_hba_list))
seq_puts(seq, "inst sw_indx\n");
seq_printf(seq, "%u %u\n", hba->hba_index, SCSI_INST_SW_INDEX);
seq_printf(seq, "plugin: %s version: %s\n",
hba->transport->name, TARGET_CORE_VERSION);
return 0;
}
static const struct seq_operations scsi_inst_seq_ops = {
.start = scsi_inst_seq_start,
.next = scsi_inst_seq_next,
.stop = scsi_inst_seq_stop,
.show = scsi_inst_seq_show
};
static int scsi_inst_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &scsi_inst_seq_ops);
}
static const struct file_operations scsi_inst_seq_fops = {
.owner = THIS_MODULE,
.open = scsi_inst_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/*
* SCSI Device Table
*/
static void *scsi_dev_seq_start(struct seq_file *seq, loff_t *pos)
{
return locate_hba_start(seq, pos);
}
static void *scsi_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
return locate_hba_next(seq, v, pos);
}
static void scsi_dev_seq_stop(struct seq_file *seq, void *v)
{
locate_hba_stop(seq, v);
}
static int scsi_dev_seq_show(struct seq_file *seq, void *v)
{
struct se_hba *hba;
struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
g_se_dev_list);
struct se_device *dev = se_dev->se_dev_ptr;
char str[28];
int k;
if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
seq_puts(seq, "inst indx role ports\n");
if (!(dev))
return 0;
hba = dev->se_hba;
if (!(hba)) {
/* Log error ? */
return 0;
}
seq_printf(seq, "%u %u %s %u\n", hba->hba_index,
dev->dev_index, "Target", dev->dev_port_count);
memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
/* vendor */
for (k = 0; k < 8; k++)
str[k] = ISPRINT(DEV_T10_WWN(dev)->vendor[k]) ?
DEV_T10_WWN(dev)->vendor[k] : 0x20;
str[k] = 0x20;
/* model */
for (k = 0; k < 16; k++)
str[k+9] = ISPRINT(DEV_T10_WWN(dev)->model[k]) ?
DEV_T10_WWN(dev)->model[k] : 0x20;
str[k + 9] = 0;
seq_printf(seq, "dev_alias: %s\n", str);
return 0;
}
static const struct seq_operations scsi_dev_seq_ops = {
.start = scsi_dev_seq_start,
.next = scsi_dev_seq_next,
.stop = scsi_dev_seq_stop,
.show = scsi_dev_seq_show
};
static int scsi_dev_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &scsi_dev_seq_ops);
}
static const struct file_operations scsi_dev_seq_fops = {
.owner = THIS_MODULE,
.open = scsi_dev_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/*
* SCSI Port Table
*/
static void *scsi_port_seq_start(struct seq_file *seq, loff_t *pos)
{
return locate_hba_start(seq, pos);
}
static void *scsi_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
return locate_hba_next(seq, v, pos);
}
static void scsi_port_seq_stop(struct seq_file *seq, void *v)
{
locate_hba_stop(seq, v);
}
static int scsi_port_seq_show(struct seq_file *seq, void *v)
{
struct se_hba *hba;
struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
g_se_dev_list);
struct se_device *dev = se_dev->se_dev_ptr;
struct se_port *sep, *sep_tmp;
if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
seq_puts(seq, "inst device indx role busy_count\n");
if (!(dev))
return 0;
hba = dev->se_hba;
if (!(hba)) {
/* Log error ? */
return 0;
}
/* FIXME: scsiPortBusyStatuses count */
spin_lock(&dev->se_port_lock);
list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
seq_printf(seq, "%u %u %u %s%u %u\n", hba->hba_index,
dev->dev_index, sep->sep_index, "Device",
dev->dev_index, 0);
}
spin_unlock(&dev->se_port_lock);
return 0;
}
static const struct seq_operations scsi_port_seq_ops = {
.start = scsi_port_seq_start,
.next = scsi_port_seq_next,
.stop = scsi_port_seq_stop,
.show = scsi_port_seq_show
};
static int scsi_port_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &scsi_port_seq_ops);
}
static const struct file_operations scsi_port_seq_fops = {
.owner = THIS_MODULE,
.open = scsi_port_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/*
* SCSI Transport Table
*/
static void *scsi_transport_seq_start(struct seq_file *seq, loff_t *pos)
{
return locate_hba_start(seq, pos);
}
static void *scsi_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
return locate_hba_next(seq, v, pos);
}
static void scsi_transport_seq_stop(struct seq_file *seq, void *v)
{
locate_hba_stop(seq, v);
}
static int scsi_transport_seq_show(struct seq_file *seq, void *v)
{
struct se_hba *hba;
struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
g_se_dev_list);
struct se_device *dev = se_dev->se_dev_ptr;
struct se_port *se, *se_tmp;
struct se_portal_group *tpg;
struct t10_wwn *wwn;
char buf[64];
if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
seq_puts(seq, "inst device indx dev_name\n");
if (!(dev))
return 0;
hba = dev->se_hba;
if (!(hba)) {
/* Log error ? */
return 0;
}
wwn = DEV_T10_WWN(dev);
spin_lock(&dev->se_port_lock);
list_for_each_entry_safe(se, se_tmp, &dev->dev_sep_list, sep_list) {
tpg = se->sep_tpg;
sprintf(buf, "scsiTransport%s",
TPG_TFO(tpg)->get_fabric_name());
seq_printf(seq, "%u %s %u %s+%s\n",
hba->hba_index, /* scsiTransportIndex */
buf, /* scsiTransportType */
(TPG_TFO(tpg)->tpg_get_inst_index != NULL) ?
TPG_TFO(tpg)->tpg_get_inst_index(tpg) :
0,
TPG_TFO(tpg)->tpg_get_wwn(tpg),
(strlen(wwn->unit_serial)) ?
/* scsiTransportDevName */
wwn->unit_serial : wwn->vendor);
}
spin_unlock(&dev->se_port_lock);
return 0;
}
static const struct seq_operations scsi_transport_seq_ops = {
.start = scsi_transport_seq_start,
.next = scsi_transport_seq_next,
.stop = scsi_transport_seq_stop,
.show = scsi_transport_seq_show
};
static int scsi_transport_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &scsi_transport_seq_ops);
}
static const struct file_operations scsi_transport_seq_fops = {
.owner = THIS_MODULE,
.open = scsi_transport_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/*
* SCSI Target Device Table
*/
static void *scsi_tgt_dev_seq_start(struct seq_file *seq, loff_t *pos)
{
return locate_hba_start(seq, pos);
}
static void *scsi_tgt_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
return locate_hba_next(seq, v, pos);
}
static void scsi_tgt_dev_seq_stop(struct seq_file *seq, void *v)
{
locate_hba_stop(seq, v);
}
#define LU_COUNT 1 /* for now */
static int scsi_tgt_dev_seq_show(struct seq_file *seq, void *v)
{
struct se_hba *hba;
struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
g_se_dev_list);
struct se_device *dev = se_dev->se_dev_ptr;
int non_accessible_lus = 0;
char status[16];
if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
seq_puts(seq, "inst indx num_LUs status non_access_LUs"
" resets\n");
if (!(dev))
return 0;
hba = dev->se_hba;
if (!(hba)) {
/* Log error ? */
return 0;
}
switch (dev->dev_status) {
case TRANSPORT_DEVICE_ACTIVATED:
strcpy(status, "activated");
break;
case TRANSPORT_DEVICE_DEACTIVATED:
strcpy(status, "deactivated");
non_accessible_lus = 1;
break;
case TRANSPORT_DEVICE_SHUTDOWN:
strcpy(status, "shutdown");
non_accessible_lus = 1;
break;
case TRANSPORT_DEVICE_OFFLINE_ACTIVATED:
case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED:
strcpy(status, "offline");
non_accessible_lus = 1;
break;
default:
sprintf(status, "unknown(%d)", dev->dev_status);
non_accessible_lus = 1;
}
seq_printf(seq, "%u %u %u %s %u %u\n",
hba->hba_index, dev->dev_index, LU_COUNT,
status, non_accessible_lus, dev->num_resets);
return 0;
}
static const struct seq_operations scsi_tgt_dev_seq_ops = {
.start = scsi_tgt_dev_seq_start,
.next = scsi_tgt_dev_seq_next,
.stop = scsi_tgt_dev_seq_stop,
.show = scsi_tgt_dev_seq_show
};
static int scsi_tgt_dev_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &scsi_tgt_dev_seq_ops);
}
static const struct file_operations scsi_tgt_dev_seq_fops = {
.owner = THIS_MODULE,
.open = scsi_tgt_dev_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/*
* SCSI Target Port Table
*/
static void *scsi_tgt_port_seq_start(struct seq_file *seq, loff_t *pos)
{
return locate_hba_start(seq, pos);
}
static void *scsi_tgt_port_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
return locate_hba_next(seq, v, pos);
}
static void scsi_tgt_port_seq_stop(struct seq_file *seq, void *v)
{
locate_hba_stop(seq, v);
}
static int scsi_tgt_port_seq_show(struct seq_file *seq, void *v)
{
struct se_hba *hba;
struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
g_se_dev_list);
struct se_device *dev = se_dev->se_dev_ptr;
struct se_port *sep, *sep_tmp;
struct se_portal_group *tpg;
u32 rx_mbytes, tx_mbytes;
unsigned long long num_cmds;
char buf[64];
if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
seq_puts(seq, "inst device indx name port_index in_cmds"
" write_mbytes read_mbytes hs_in_cmds\n");
if (!(dev))
return 0;
hba = dev->se_hba;
if (!(hba)) {
/* Log error ? */
return 0;
}
spin_lock(&dev->se_port_lock);
list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) {
tpg = sep->sep_tpg;
sprintf(buf, "%sPort#",
TPG_TFO(tpg)->get_fabric_name());
seq_printf(seq, "%u %u %u %s%d %s%s%d ",
hba->hba_index,
dev->dev_index,
sep->sep_index,
buf, sep->sep_index,
TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+",
TPG_TFO(tpg)->tpg_get_tag(tpg));
spin_lock(&sep->sep_lun->lun_sep_lock);
num_cmds = sep->sep_stats.cmd_pdus;
rx_mbytes = (sep->sep_stats.rx_data_octets >> 20);
tx_mbytes = (sep->sep_stats.tx_data_octets >> 20);
spin_unlock(&sep->sep_lun->lun_sep_lock);
seq_printf(seq, "%llu %u %u %u\n", num_cmds,
rx_mbytes, tx_mbytes, 0);
}
spin_unlock(&dev->se_port_lock);
return 0;
}
static const struct seq_operations scsi_tgt_port_seq_ops = {
.start = scsi_tgt_port_seq_start,
.next = scsi_tgt_port_seq_next,
.stop = scsi_tgt_port_seq_stop,
.show = scsi_tgt_port_seq_show
};
static int scsi_tgt_port_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &scsi_tgt_port_seq_ops);
}
static const struct file_operations scsi_tgt_port_seq_fops = {
.owner = THIS_MODULE,
.open = scsi_tgt_port_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/*
* SCSI Authorized Initiator Table:
* It contains the SCSI Initiators authorized to be attached to one of the
* local Target ports.
* Iterates through all active TPGs and extracts the info from the ACLs
*/
static void *scsi_auth_intr_seq_start(struct seq_file *seq, loff_t *pos)
{
spin_lock_bh(&se_global->se_tpg_lock);
return seq_list_start(&se_global->g_se_tpg_list, *pos);
}
static void *scsi_auth_intr_seq_next(struct seq_file *seq, void *v,
loff_t *pos)
{
return seq_list_next(v, &se_global->g_se_tpg_list, pos);
}
static void scsi_auth_intr_seq_stop(struct seq_file *seq, void *v)
{
spin_unlock_bh(&se_global->se_tpg_lock);
}
static int scsi_auth_intr_seq_show(struct seq_file *seq, void *v)
{
struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
se_tpg_list);
struct se_dev_entry *deve;
struct se_lun *lun;
struct se_node_acl *se_nacl;
int j;
if (list_is_first(&se_tpg->se_tpg_list,
&se_global->g_se_tpg_list))
seq_puts(seq, "inst dev port indx dev_or_port intr_name "
"map_indx att_count num_cmds read_mbytes "
"write_mbytes hs_num_cmds creation_time row_status\n");
if (!(se_tpg))
return 0;
spin_lock(&se_tpg->acl_node_lock);
list_for_each_entry(se_nacl, &se_tpg->acl_node_list, acl_list) {
atomic_inc(&se_nacl->mib_ref_count);
smp_mb__after_atomic_inc();
spin_unlock(&se_tpg->acl_node_lock);
spin_lock_irq(&se_nacl->device_list_lock);
for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
deve = &se_nacl->device_list[j];
if (!(deve->lun_flags &
TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
(!deve->se_lun))
continue;
lun = deve->se_lun;
if (!lun->lun_se_dev)
continue;
seq_printf(seq, "%u %u %u %u %u %s %u %u %u %u %u %u"
" %u %s\n",
/* scsiInstIndex */
(TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
0,
/* scsiDeviceIndex */
lun->lun_se_dev->dev_index,
/* scsiAuthIntrTgtPortIndex */
TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
/* scsiAuthIntrIndex */
se_nacl->acl_index,
/* scsiAuthIntrDevOrPort */
1,
/* scsiAuthIntrName */
se_nacl->initiatorname[0] ?
se_nacl->initiatorname : NONE,
/* FIXME: scsiAuthIntrLunMapIndex */
0,
/* scsiAuthIntrAttachedTimes */
deve->attach_count,
/* scsiAuthIntrOutCommands */
deve->total_cmds,
/* scsiAuthIntrReadMegaBytes */
(u32)(deve->read_bytes >> 20),
/* scsiAuthIntrWrittenMegaBytes */
(u32)(deve->write_bytes >> 20),
/* FIXME: scsiAuthIntrHSOutCommands */
0,
/* scsiAuthIntrLastCreation */
(u32)(((u32)deve->creation_time -
INITIAL_JIFFIES) * 100 / HZ),
/* FIXME: scsiAuthIntrRowStatus */
"Ready");
}
spin_unlock_irq(&se_nacl->device_list_lock);
spin_lock(&se_tpg->acl_node_lock);
atomic_dec(&se_nacl->mib_ref_count);
smp_mb__after_atomic_dec();
}
spin_unlock(&se_tpg->acl_node_lock);
return 0;
}
static const struct seq_operations scsi_auth_intr_seq_ops = {
.start = scsi_auth_intr_seq_start,
.next = scsi_auth_intr_seq_next,
.stop = scsi_auth_intr_seq_stop,
.show = scsi_auth_intr_seq_show
};
static int scsi_auth_intr_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &scsi_auth_intr_seq_ops);
}
static const struct file_operations scsi_auth_intr_seq_fops = {
.owner = THIS_MODULE,
.open = scsi_auth_intr_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/*
* SCSI Attached Initiator Port Table:
* It lists the SCSI Initiators attached to one of the local Target ports.
* Iterates through all active TPGs and use active sessions from each TPG
* to list the info fo this table.
*/
static void *scsi_att_intr_port_seq_start(struct seq_file *seq, loff_t *pos)
{
spin_lock_bh(&se_global->se_tpg_lock);
return seq_list_start(&se_global->g_se_tpg_list, *pos);
}
static void *scsi_att_intr_port_seq_next(struct seq_file *seq, void *v,
loff_t *pos)
{
return seq_list_next(v, &se_global->g_se_tpg_list, pos);
}
static void scsi_att_intr_port_seq_stop(struct seq_file *seq, void *v)
{
spin_unlock_bh(&se_global->se_tpg_lock);
}
static int scsi_att_intr_port_seq_show(struct seq_file *seq, void *v)
{
struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group,
se_tpg_list);
struct se_dev_entry *deve;
struct se_lun *lun;
struct se_node_acl *se_nacl;
struct se_session *se_sess;
unsigned char buf[64];
int j;
if (list_is_first(&se_tpg->se_tpg_list,
&se_global->g_se_tpg_list))
seq_puts(seq, "inst dev port indx port_auth_indx port_name"
" port_ident\n");
if (!(se_tpg))
return 0;
spin_lock(&se_tpg->session_lock);
list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
if ((TPG_TFO(se_tpg)->sess_logged_in(se_sess)) ||
(!se_sess->se_node_acl) ||
(!se_sess->se_node_acl->device_list))
continue;
atomic_inc(&se_sess->mib_ref_count);
smp_mb__after_atomic_inc();
se_nacl = se_sess->se_node_acl;
atomic_inc(&se_nacl->mib_ref_count);
smp_mb__after_atomic_inc();
spin_unlock(&se_tpg->session_lock);
spin_lock_irq(&se_nacl->device_list_lock);
for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) {
deve = &se_nacl->device_list[j];
if (!(deve->lun_flags &
TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) ||
(!deve->se_lun))
continue;
lun = deve->se_lun;
if (!lun->lun_se_dev)
continue;
memset(buf, 0, 64);
if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL)
TPG_TFO(se_tpg)->sess_get_initiator_sid(
se_sess, (unsigned char *)&buf[0], 64);
seq_printf(seq, "%u %u %u %u %u %s+i+%s\n",
/* scsiInstIndex */
(TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ?
TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) :
0,
/* scsiDeviceIndex */
lun->lun_se_dev->dev_index,
/* scsiPortIndex */
TPG_TFO(se_tpg)->tpg_get_tag(se_tpg),
/* scsiAttIntrPortIndex */
(TPG_TFO(se_tpg)->sess_get_index != NULL) ?
TPG_TFO(se_tpg)->sess_get_index(se_sess) :
0,
/* scsiAttIntrPortAuthIntrIdx */
se_nacl->acl_index,
/* scsiAttIntrPortName */
se_nacl->initiatorname[0] ?
se_nacl->initiatorname : NONE,
/* scsiAttIntrPortIdentifier */
buf);
}
spin_unlock_irq(&se_nacl->device_list_lock);
spin_lock(&se_tpg->session_lock);
atomic_dec(&se_nacl->mib_ref_count);
smp_mb__after_atomic_dec();
atomic_dec(&se_sess->mib_ref_count);
smp_mb__after_atomic_dec();
}
spin_unlock(&se_tpg->session_lock);
return 0;
}
static const struct seq_operations scsi_att_intr_port_seq_ops = {
.start = scsi_att_intr_port_seq_start,
.next = scsi_att_intr_port_seq_next,
.stop = scsi_att_intr_port_seq_stop,
.show = scsi_att_intr_port_seq_show
};
static int scsi_att_intr_port_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &scsi_att_intr_port_seq_ops);
}
static const struct file_operations scsi_att_intr_port_seq_fops = {
.owner = THIS_MODULE,
.open = scsi_att_intr_port_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/*
* SCSI Logical Unit Table
*/
static void *scsi_lu_seq_start(struct seq_file *seq, loff_t *pos)
{
return locate_hba_start(seq, pos);
}
static void *scsi_lu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
return locate_hba_next(seq, v, pos);
}
static void scsi_lu_seq_stop(struct seq_file *seq, void *v)
{
locate_hba_stop(seq, v);
}
#define SCSI_LU_INDEX 1
static int scsi_lu_seq_show(struct seq_file *seq, void *v)
{
struct se_hba *hba;
struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev,
g_se_dev_list);
struct se_device *dev = se_dev->se_dev_ptr;
int j;
char str[28];
if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list))
seq_puts(seq, "inst dev indx LUN lu_name vend prod rev"
" dev_type status state-bit num_cmds read_mbytes"
" write_mbytes resets full_stat hs_num_cmds creation_time\n");
if (!(dev))
return 0;
hba = dev->se_hba;
if (!(hba)) {
/* Log error ? */
return 0;
}
/* Fix LU state, if we can read it from the device */
seq_printf(seq, "%u %u %u %llu %s", hba->hba_index,
dev->dev_index, SCSI_LU_INDEX,
(unsigned long long)0, /* FIXME: scsiLuDefaultLun */
(strlen(DEV_T10_WWN(dev)->unit_serial)) ?
/* scsiLuWwnName */
(char *)&DEV_T10_WWN(dev)->unit_serial[0] :
"None");
memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28);
/* scsiLuVendorId */
for (j = 0; j < 8; j++)
str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ?
DEV_T10_WWN(dev)->vendor[j] : 0x20;
str[8] = 0;
seq_printf(seq, " %s", str);
/* scsiLuProductId */
for (j = 0; j < 16; j++)
str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ?
DEV_T10_WWN(dev)->model[j] : 0x20;
str[16] = 0;
seq_printf(seq, " %s", str);
/* scsiLuRevisionId */
for (j = 0; j < 4; j++)
str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ?
DEV_T10_WWN(dev)->revision[j] : 0x20;
str[4] = 0;
seq_printf(seq, " %s", str);
seq_printf(seq, " %u %s %s %llu %u %u %u %u %u %u\n",
/* scsiLuPeripheralType */
TRANSPORT(dev)->get_device_type(dev),
(dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ?
"available" : "notavailable", /* scsiLuStatus */
"exposed", /* scsiLuState */
(unsigned long long)dev->num_cmds,
/* scsiLuReadMegaBytes */
(u32)(dev->read_bytes >> 20),
/* scsiLuWrittenMegaBytes */
(u32)(dev->write_bytes >> 20),
dev->num_resets, /* scsiLuInResets */
0, /* scsiLuOutTaskSetFullStatus */
0, /* scsiLuHSInCommands */
(u32)(((u32)dev->creation_time - INITIAL_JIFFIES) *
100 / HZ));
return 0;
}
static const struct seq_operations scsi_lu_seq_ops = {
.start = scsi_lu_seq_start,
.next = scsi_lu_seq_next,
.stop = scsi_lu_seq_stop,
.show = scsi_lu_seq_show
};
static int scsi_lu_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &scsi_lu_seq_ops);
}
static const struct file_operations scsi_lu_seq_fops = {
.owner = THIS_MODULE,
.open = scsi_lu_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/****************************************************************************/
/*
* Remove proc fs entries
*/
void remove_scsi_target_mib(void)
{
remove_proc_entry("scsi_target/mib/scsi_inst", NULL);
remove_proc_entry("scsi_target/mib/scsi_dev", NULL);
remove_proc_entry("scsi_target/mib/scsi_port", NULL);
remove_proc_entry("scsi_target/mib/scsi_transport", NULL);
remove_proc_entry("scsi_target/mib/scsi_tgt_dev", NULL);
remove_proc_entry("scsi_target/mib/scsi_tgt_port", NULL);
remove_proc_entry("scsi_target/mib/scsi_auth_intr", NULL);
remove_proc_entry("scsi_target/mib/scsi_att_intr_port", NULL);
remove_proc_entry("scsi_target/mib/scsi_lu", NULL);
remove_proc_entry("scsi_target/mib", NULL);
}
/*
* Create proc fs entries for the mib tables
*/
int init_scsi_target_mib(void)
{
struct proc_dir_entry *dir_entry;
struct proc_dir_entry *scsi_inst_entry;
struct proc_dir_entry *scsi_dev_entry;
struct proc_dir_entry *scsi_port_entry;
struct proc_dir_entry *scsi_transport_entry;
struct proc_dir_entry *scsi_tgt_dev_entry;
struct proc_dir_entry *scsi_tgt_port_entry;
struct proc_dir_entry *scsi_auth_intr_entry;
struct proc_dir_entry *scsi_att_intr_port_entry;
struct proc_dir_entry *scsi_lu_entry;
dir_entry = proc_mkdir("scsi_target/mib", NULL);
if (!(dir_entry)) {
printk(KERN_ERR "proc_mkdir() failed.\n");
return -1;
}
scsi_inst_entry =
create_proc_entry("scsi_target/mib/scsi_inst", 0, NULL);
if (scsi_inst_entry)
scsi_inst_entry->proc_fops = &scsi_inst_seq_fops;
else
goto error;
scsi_dev_entry =
create_proc_entry("scsi_target/mib/scsi_dev", 0, NULL);
if (scsi_dev_entry)
scsi_dev_entry->proc_fops = &scsi_dev_seq_fops;
else
goto error;
scsi_port_entry =
create_proc_entry("scsi_target/mib/scsi_port", 0, NULL);
if (scsi_port_entry)
scsi_port_entry->proc_fops = &scsi_port_seq_fops;
else
goto error;
scsi_transport_entry =
create_proc_entry("scsi_target/mib/scsi_transport", 0, NULL);
if (scsi_transport_entry)
scsi_transport_entry->proc_fops = &scsi_transport_seq_fops;
else
goto error;
scsi_tgt_dev_entry =
create_proc_entry("scsi_target/mib/scsi_tgt_dev", 0, NULL);
if (scsi_tgt_dev_entry)
scsi_tgt_dev_entry->proc_fops = &scsi_tgt_dev_seq_fops;
else
goto error;
scsi_tgt_port_entry =
create_proc_entry("scsi_target/mib/scsi_tgt_port", 0, NULL);
if (scsi_tgt_port_entry)
scsi_tgt_port_entry->proc_fops = &scsi_tgt_port_seq_fops;
else
goto error;
scsi_auth_intr_entry =
create_proc_entry("scsi_target/mib/scsi_auth_intr", 0, NULL);
if (scsi_auth_intr_entry)
scsi_auth_intr_entry->proc_fops = &scsi_auth_intr_seq_fops;
else
goto error;
scsi_att_intr_port_entry =
create_proc_entry("scsi_target/mib/scsi_att_intr_port", 0, NULL);
if (scsi_att_intr_port_entry)
scsi_att_intr_port_entry->proc_fops =
&scsi_att_intr_port_seq_fops;
else
goto error;
scsi_lu_entry = create_proc_entry("scsi_target/mib/scsi_lu", 0, NULL);
if (scsi_lu_entry)
scsi_lu_entry->proc_fops = &scsi_lu_seq_fops;
else
goto error;
return 0;
error:
printk(KERN_ERR "create_proc_entry() failed.\n");
remove_scsi_target_mib();
return -1;
}
/*
* Initialize the index table for allocating unique row indexes to various mib
* tables
*/
void init_scsi_index_table(void)
{
memset(&scsi_index_table, 0, sizeof(struct scsi_index_table));
spin_lock_init(&scsi_index_table.lock);
}
/*
* Allocate a new row index for the entry type specified
*/
u32 scsi_get_new_index(scsi_index_t type)
{
u32 new_index;
if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) {
printk(KERN_ERR "Invalid index type %d\n", type);
return -1;
}
spin_lock(&scsi_index_table.lock);
new_index = ++scsi_index_table.scsi_mib_index[type];
if (new_index == 0)
new_index = ++scsi_index_table.scsi_mib_index[type];
spin_unlock(&scsi_index_table.lock);
return new_index;
}
EXPORT_SYMBOL(scsi_get_new_index);
#ifndef TARGET_CORE_MIB_H
#define TARGET_CORE_MIB_H
typedef enum {
SCSI_INST_INDEX,
SCSI_DEVICE_INDEX,
SCSI_AUTH_INTR_INDEX,
SCSI_INDEX_TYPE_MAX
} scsi_index_t;
struct scsi_index_table {
spinlock_t lock;
u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
} ____cacheline_aligned;
/* SCSI Port stats */
struct scsi_port_stats {
u64 cmd_pdus;
u64 tx_data_octets;
u64 rx_data_octets;
} ____cacheline_aligned;
extern int init_scsi_target_mib(void);
extern void remove_scsi_target_mib(void);
extern void init_scsi_index_table(void);
extern u32 scsi_get_new_index(scsi_index_t);
#endif /*** TARGET_CORE_MIB_H ***/
This source diff could not be displayed because it is too large. You can view the blob instead.
#ifndef TARGET_CORE_PR_H
#define TARGET_CORE_PR_H
/*
* PERSISTENT_RESERVE_OUT service action codes
*
* spc4r17 section 6.14.2 Table 171
*/
#define PRO_REGISTER 0x00
#define PRO_RESERVE 0x01
#define PRO_RELEASE 0x02
#define PRO_CLEAR 0x03
#define PRO_PREEMPT 0x04
#define PRO_PREEMPT_AND_ABORT 0x05
#define PRO_REGISTER_AND_IGNORE_EXISTING_KEY 0x06
#define PRO_REGISTER_AND_MOVE 0x07
/*
* PERSISTENT_RESERVE_IN service action codes
*
* spc4r17 section 6.13.1 Table 159
*/
#define PRI_READ_KEYS 0x00
#define PRI_READ_RESERVATION 0x01
#define PRI_REPORT_CAPABILITIES 0x02
#define PRI_READ_FULL_STATUS 0x03
/*
* PERSISTENT_RESERVE_ SCOPE field
*
* spc4r17 section 6.13.3.3 Table 163
*/
#define PR_SCOPE_LU_SCOPE 0x00
/*
* PERSISTENT_RESERVE_* TYPE field
*
* spc4r17 section 6.13.3.4 Table 164
*/
#define PR_TYPE_WRITE_EXCLUSIVE 0x01
#define PR_TYPE_EXCLUSIVE_ACCESS 0x03
#define PR_TYPE_WRITE_EXCLUSIVE_REGONLY 0x05
#define PR_TYPE_EXCLUSIVE_ACCESS_REGONLY 0x06
#define PR_TYPE_WRITE_EXCLUSIVE_ALLREG 0x07
#define PR_TYPE_EXCLUSIVE_ACCESS_ALLREG 0x08
#define PR_APTPL_MAX_IPORT_LEN 256
#define PR_APTPL_MAX_TPORT_LEN 256
extern struct kmem_cache *t10_pr_reg_cache;
extern int core_pr_dump_initiator_port(struct t10_pr_registration *,
char *, u32);
extern int core_scsi2_emulate_crh(struct se_cmd *);
extern int core_scsi3_alloc_aptpl_registration(
struct t10_reservation_template *, u64,
unsigned char *, unsigned char *, u32,
unsigned char *, u16, u32, int, int, u8);
extern int core_scsi3_check_aptpl_registration(struct se_device *,
struct se_portal_group *, struct se_lun *,
struct se_lun_acl *);
extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
struct se_node_acl *);
extern void core_scsi3_free_all_registrations(struct se_device *);
extern unsigned char *core_scsi3_pr_dump_type(int);
extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *,
struct se_cmd *);
extern int core_scsi3_emulate_pr(struct se_cmd *);
extern int core_setup_reservations(struct se_device *, int);
#endif /* TARGET_CORE_PR_H */
/*******************************************************************************
* Filename: target_core_pscsi.c
*
* This file contains the generic target mode <-> Linux SCSI subsystem plugin.
*
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/version.h>
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
#include <linux/blkdev.h>
#include <linux/blk_types.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
#include <linux/genhd.h>
#include <linux/cdrom.h>
#include <linux/file.h>
#include <scsi/scsi.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_host.h>
#include <scsi/libsas.h> /* For TASK_ATTR_* */
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_transport.h>
#include "target_core_pscsi.h"
#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
static struct se_subsystem_api pscsi_template;
static void pscsi_req_done(struct request *, int);
/* pscsi_get_sh():
*
*
*/
static struct Scsi_Host *pscsi_get_sh(u32 host_no)
{
struct Scsi_Host *sh = NULL;
sh = scsi_host_lookup(host_no);
if (IS_ERR(sh)) {
printk(KERN_ERR "Unable to locate SCSI HBA with Host ID:"
" %u\n", host_no);
return NULL;
}
return sh;
}
/* pscsi_attach_hba():
*
* pscsi_get_sh() used scsi_host_lookup() to locate struct Scsi_Host.
* from the passed SCSI Host ID.
*/
static int pscsi_attach_hba(struct se_hba *hba, u32 host_id)
{
int hba_depth;
struct pscsi_hba_virt *phv;
phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
if (!(phv)) {
printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n");
return -1;
}
phv->phv_host_id = host_id;
phv->phv_mode = PHV_VIRUTAL_HOST_ID;
hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
atomic_set(&hba->left_queue_depth, hba_depth);
atomic_set(&hba->max_queue_depth, hba_depth);
hba->hba_ptr = (void *)phv;
printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic"
" Target Core with TCQ Depth: %d\n", hba->hba_id,
atomic_read(&hba->max_queue_depth));
return 0;
}
static void pscsi_detach_hba(struct se_hba *hba)
{
struct pscsi_hba_virt *phv = hba->hba_ptr;
struct Scsi_Host *scsi_host = phv->phv_lld_host;
if (scsi_host) {
scsi_host_put(scsi_host);
printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from"
" Generic Target Core\n", hba->hba_id,
(scsi_host->hostt->name) ? (scsi_host->hostt->name) :
"Unknown");
} else
printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA"
" from Generic Target Core\n", hba->hba_id);
kfree(phv);
hba->hba_ptr = NULL;
}
static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
{
struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
struct Scsi_Host *sh = phv->phv_lld_host;
int hba_depth = PSCSI_VIRTUAL_HBA_DEPTH;
/*
* Release the struct Scsi_Host
*/
if (!(mode_flag)) {
if (!(sh))
return 0;
phv->phv_lld_host = NULL;
phv->phv_mode = PHV_VIRUTAL_HOST_ID;
atomic_set(&hba->left_queue_depth, hba_depth);
atomic_set(&hba->max_queue_depth, hba_depth);
printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
" %s\n", hba->hba_id, (sh->hostt->name) ?
(sh->hostt->name) : "Unknown");
scsi_host_put(sh);
return 0;
}
/*
* Otherwise, locate struct Scsi_Host from the original passed
* pSCSI Host ID and enable for phba mode
*/
sh = pscsi_get_sh(phv->phv_host_id);
if (!(sh)) {
printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for"
" phv_host_id: %d\n", phv->phv_host_id);
return -1;
}
/*
* Usually the SCSI LLD will use the hostt->can_queue value to define
* its HBA TCQ depth. Some other drivers (like 2.6 megaraid) don't set
* this at all and set sh->can_queue at runtime.
*/
hba_depth = (sh->hostt->can_queue > sh->can_queue) ?
sh->hostt->can_queue : sh->can_queue;
atomic_set(&hba->left_queue_depth, hba_depth);
atomic_set(&hba->max_queue_depth, hba_depth);
phv->phv_lld_host = sh;
phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown");
return 1;
}
static void pscsi_tape_read_blocksize(struct se_device *dev,
struct scsi_device *sdev)
{
unsigned char cdb[MAX_COMMAND_SIZE], *buf;
int ret;
buf = kzalloc(12, GFP_KERNEL);
if (!buf)
return;
memset(cdb, 0, MAX_COMMAND_SIZE);
cdb[0] = MODE_SENSE;
cdb[4] = 0x0c; /* 12 bytes */
ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, 12, NULL,
HZ, 1, NULL);
if (ret)
goto out_free;
/*
* If MODE_SENSE still returns zero, set the default value to 1024.
*/
sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]);
if (!sdev->sector_size)
sdev->sector_size = 1024;
out_free:
kfree(buf);
}
static void
pscsi_set_inquiry_info(struct scsi_device *sdev, struct t10_wwn *wwn)
{
unsigned char *buf;
if (sdev->inquiry_len < INQUIRY_LEN)
return;
buf = sdev->inquiry;
if (!buf)
return;
/*
* Use sdev->inquiry from drivers/scsi/scsi_scan.c:scsi_alloc_sdev()
*/
memcpy(&wwn->vendor[0], &buf[8], sizeof(wwn->vendor));
memcpy(&wwn->model[0], &buf[16], sizeof(wwn->model));
memcpy(&wwn->revision[0], &buf[32], sizeof(wwn->revision));
}
static int
pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn)
{
unsigned char cdb[MAX_COMMAND_SIZE], *buf;
int ret;
buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
if (!buf)
return -1;
memset(cdb, 0, MAX_COMMAND_SIZE);
cdb[0] = INQUIRY;
cdb[1] = 0x01; /* Query VPD */
cdb[2] = 0x80; /* Unit Serial Number */
cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff;
cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff);
ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL);
if (ret)
goto out_free;
snprintf(&wwn->unit_serial[0], INQUIRY_VPD_SERIAL_LEN, "%s", &buf[4]);
wwn->t10_sub_dev->su_dev_flags |= SDF_FIRMWARE_VPD_UNIT_SERIAL;
kfree(buf);
return 0;
out_free:
kfree(buf);
return -1;
}
static void
pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev,
struct t10_wwn *wwn)
{
unsigned char cdb[MAX_COMMAND_SIZE], *buf, *page_83;
int ident_len, page_len, off = 4, ret;
struct t10_vpd *vpd;
buf = kzalloc(INQUIRY_VPD_SERIAL_LEN, GFP_KERNEL);
if (!buf)
return;
memset(cdb, 0, MAX_COMMAND_SIZE);
cdb[0] = INQUIRY;
cdb[1] = 0x01; /* Query VPD */
cdb[2] = 0x83; /* Device Identifier */
cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff;
cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff);
ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf,
INQUIRY_VPD_DEVICE_IDENTIFIER_LEN,
NULL, HZ, 1, NULL);
if (ret)
goto out;
page_len = (buf[2] << 8) | buf[3];
while (page_len > 0) {
/* Grab a pointer to the Identification descriptor */
page_83 = &buf[off];
ident_len = page_83[3];
if (!ident_len) {
printk(KERN_ERR "page_83[3]: identifier"
" length zero!\n");
break;
}
printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len);
vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
if (!vpd) {
printk(KERN_ERR "Unable to allocate memory for"
" struct t10_vpd\n");
goto out;
}
INIT_LIST_HEAD(&vpd->vpd_list);
transport_set_vpd_proto_id(vpd, page_83);
transport_set_vpd_assoc(vpd, page_83);
if (transport_set_vpd_ident_type(vpd, page_83) < 0) {
off += (ident_len + 4);
page_len -= (ident_len + 4);
kfree(vpd);
continue;
}
if (transport_set_vpd_ident(vpd, page_83) < 0) {
off += (ident_len + 4);
page_len -= (ident_len + 4);
kfree(vpd);
continue;
}
list_add_tail(&vpd->vpd_list, &wwn->t10_vpd_list);
off += (ident_len + 4);
page_len -= (ident_len + 4);
}
out:
kfree(buf);
}
/* pscsi_add_device_to_list():
*
*
*/
static struct se_device *pscsi_add_device_to_list(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
struct pscsi_dev_virt *pdv,
struct scsi_device *sd,
int dev_flags)
{
struct se_device *dev;
struct se_dev_limits dev_limits;
struct request_queue *q;
struct queue_limits *limits;
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
if (!sd->queue_depth) {
sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
printk(KERN_ERR "Set broken SCSI Device %d:%d:%d"
" queue_depth to %d\n", sd->channel, sd->id,
sd->lun, sd->queue_depth);
}
/*
* Setup the local scope queue_limits from struct request_queue->limits
* to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
*/
q = sd->request_queue;
limits = &dev_limits.limits;
limits->logical_block_size = sd->sector_size;
limits->max_hw_sectors = (sd->host->max_sectors > queue_max_hw_sectors(q)) ?
queue_max_hw_sectors(q) : sd->host->max_sectors;
limits->max_sectors = (sd->host->max_sectors > queue_max_sectors(q)) ?
queue_max_sectors(q) : sd->host->max_sectors;
dev_limits.hw_queue_depth = sd->queue_depth;
dev_limits.queue_depth = sd->queue_depth;
/*
* Setup our standard INQUIRY info into se_dev->t10_wwn
*/
pscsi_set_inquiry_info(sd, &se_dev->t10_wwn);
/*
* Set the pointer pdv->pdv_sd to from passed struct scsi_device,
* which has already been referenced with Linux SCSI code with
* scsi_device_get() in this file's pscsi_create_virtdevice().
*
* The passthrough operations called by the transport_add_device_*
* function below will require this pointer to be set for passthroug
* ops.
*
* For the shutdown case in pscsi_free_device(), this struct
* scsi_device reference is released with Linux SCSI code
* scsi_device_put() and the pdv->pdv_sd cleared.
*/
pdv->pdv_sd = sd;
dev = transport_add_device_to_core_hba(hba, &pscsi_template,
se_dev, dev_flags, (void *)pdv,
&dev_limits, NULL, NULL);
if (!(dev)) {
pdv->pdv_sd = NULL;
return NULL;
}
/*
* Locate VPD WWN Information used for various purposes within
* the Storage Engine.
*/
if (!pscsi_get_inquiry_vpd_serial(sd, &se_dev->t10_wwn)) {
/*
* If VPD Unit Serial returned GOOD status, try
* VPD Device Identification page (0x83).
*/
pscsi_get_inquiry_vpd_device_ident(sd, &se_dev->t10_wwn);
}
/*
* For TYPE_TAPE, attempt to determine blocksize with MODE_SENSE.
*/
if (sd->type == TYPE_TAPE)
pscsi_tape_read_blocksize(dev, sd);
return dev;
}
static void *pscsi_allocate_virtdevice(struct se_hba *hba, const char *name)
{
struct pscsi_dev_virt *pdv;
pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL);
if (!(pdv)) {
printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n");
return NULL;
}
pdv->pdv_se_hba = hba;
printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name);
return (void *)pdv;
}
/*
* Called with struct Scsi_Host->host_lock called.
*/
static struct se_device *pscsi_create_type_disk(
struct scsi_device *sd,
struct pscsi_dev_virt *pdv,
struct se_subsystem_dev *se_dev,
struct se_hba *hba)
{
struct se_device *dev;
struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host;
struct block_device *bd;
u32 dev_flags = 0;
if (scsi_device_get(sd)) {
printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock);
return NULL;
}
spin_unlock_irq(sh->host_lock);
/*
* Claim exclusive struct block_device access to struct scsi_device
* for TYPE_DISK using supplied udev_path
*/
bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
if (!(bd)) {
printk("pSCSI: blkdev_get_by_path() failed\n");
scsi_device_put(sd);
return NULL;
}
pdv->pdv_bd = bd;
dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
if (!(dev)) {
blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
scsi_device_put(sd);
return NULL;
}
printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
return dev;
}
/*
* Called with struct Scsi_Host->host_lock called.
*/
static struct se_device *pscsi_create_type_rom(
struct scsi_device *sd,
struct pscsi_dev_virt *pdv,
struct se_subsystem_dev *se_dev,
struct se_hba *hba)
{
struct se_device *dev;
struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host;
u32 dev_flags = 0;
if (scsi_device_get(sd)) {
printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
sh->host_no, sd->channel, sd->id, sd->lun);
spin_unlock_irq(sh->host_lock);
return NULL;
}
spin_unlock_irq(sh->host_lock);
dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
if (!(dev)) {
scsi_device_put(sd);
return NULL;
}
printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun);
return dev;
}
/*
*Called with struct Scsi_Host->host_lock called.
*/
static struct se_device *pscsi_create_type_other(
struct scsi_device *sd,
struct pscsi_dev_virt *pdv,
struct se_subsystem_dev *se_dev,
struct se_hba *hba)
{
struct se_device *dev;
struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
struct Scsi_Host *sh = sd->host;
u32 dev_flags = 0;
spin_unlock_irq(sh->host_lock);
dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
if (!(dev))
return NULL;
printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
sd->channel, sd->id, sd->lun);
return dev;
}
static struct se_device *pscsi_create_virtdevice(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
void *p)
{
struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p;
struct se_device *dev;
struct scsi_device *sd;
struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
struct Scsi_Host *sh = phv->phv_lld_host;
int legacy_mode_enable = 0;
if (!(pdv)) {
printk(KERN_ERR "Unable to locate struct pscsi_dev_virt"
" parameter\n");
return NULL;
}
/*
* If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
* struct Scsi_Host we will need to bring the TCM/pSCSI object online
*/
if (!(sh)) {
if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
printk(KERN_ERR "pSCSI: Unable to locate struct"
" Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
return NULL;
}
/*
* For the newer PHV_VIRUTAL_HOST_ID struct scsi_device
* reference, we enforce that udev_path has been set
*/
if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
printk(KERN_ERR "pSCSI: udev_path attribute has not"
" been set before ENABLE=1\n");
return NULL;
}
/*
* If no scsi_host_id= was passed for PHV_VIRUTAL_HOST_ID,
* use the original TCM hba ID to reference Linux/SCSI Host No
* and enable for PHV_LLD_SCSI_HOST_NO mode.
*/
if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
spin_lock(&hba->device_lock);
if (!(list_empty(&hba->hba_dev_list))) {
printk(KERN_ERR "pSCSI: Unable to set hba_mode"
" with active devices\n");
spin_unlock(&hba->device_lock);
return NULL;
}
spin_unlock(&hba->device_lock);
if (pscsi_pmode_enable_hba(hba, 1) != 1)
return NULL;
legacy_mode_enable = 1;
hba->hba_flags |= HBA_FLAGS_PSCSI_MODE;
sh = phv->phv_lld_host;
} else {
sh = pscsi_get_sh(pdv->pdv_host_id);
if (!(sh)) {
printk(KERN_ERR "pSCSI: Unable to locate"
" pdv_host_id: %d\n", pdv->pdv_host_id);
return NULL;
}
}
} else {
if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) {
printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while"
" struct Scsi_Host exists\n");
return NULL;
}
}
spin_lock_irq(sh->host_lock);
list_for_each_entry(sd, &sh->__devices, siblings) {
if ((pdv->pdv_channel_id != sd->channel) ||
(pdv->pdv_target_id != sd->id) ||
(pdv->pdv_lun_id != sd->lun))
continue;
/*
* Functions will release the held struct scsi_host->host_lock
* before calling calling pscsi_add_device_to_list() to register
* struct scsi_device with target_core_mod.
*/
switch (sd->type) {
case TYPE_DISK:
dev = pscsi_create_type_disk(sd, pdv, se_dev, hba);
break;
case TYPE_ROM:
dev = pscsi_create_type_rom(sd, pdv, se_dev, hba);
break;
default:
dev = pscsi_create_type_other(sd, pdv, se_dev, hba);
break;
}
if (!(dev)) {
if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
scsi_host_put(sh);
else if (legacy_mode_enable) {
pscsi_pmode_enable_hba(hba, 0);
hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
}
pdv->pdv_sd = NULL;
return NULL;
}
return dev;
}
spin_unlock_irq(sh->host_lock);
printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id);
if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
scsi_host_put(sh);
else if (legacy_mode_enable) {
pscsi_pmode_enable_hba(hba, 0);
hba->hba_flags &= ~HBA_FLAGS_PSCSI_MODE;
}
return NULL;
}
/* pscsi_free_device(): (Part of se_subsystem_api_t template)
*
*
*/
static void pscsi_free_device(void *p)
{
struct pscsi_dev_virt *pdv = p;
struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
struct scsi_device *sd = pdv->pdv_sd;
if (sd) {
/*
* Release exclusive pSCSI internal struct block_device claim for
* struct scsi_device with TYPE_DISK from pscsi_create_type_disk()
*/
if ((sd->type == TYPE_DISK) && pdv->pdv_bd) {
blkdev_put(pdv->pdv_bd,
FMODE_WRITE|FMODE_READ|FMODE_EXCL);
pdv->pdv_bd = NULL;
}
/*
* For HBA mode PHV_LLD_SCSI_HOST_NO, release the reference
* to struct Scsi_Host now.
*/
if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
(phv->phv_lld_host != NULL))
scsi_host_put(phv->phv_lld_host);
if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
scsi_device_put(sd);
pdv->pdv_sd = NULL;
}
kfree(pdv);
}
static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task)
{
return container_of(task, struct pscsi_plugin_task, pscsi_task);
}
/* pscsi_transport_complete():
*
*
*/
static int pscsi_transport_complete(struct se_task *task)
{
struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
struct scsi_device *sd = pdv->pdv_sd;
int result;
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
unsigned char *cdb = &pt->pscsi_cdb[0];
result = pt->pscsi_result;
/*
* Hack to make sure that Write-Protect modepage is set if R/O mode is
* forced.
*/
if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
(status_byte(result) << 1) == SAM_STAT_GOOD) {
if (!TASK_CMD(task)->se_deve)
goto after_mode_sense;
if (TASK_CMD(task)->se_deve->lun_flags &
TRANSPORT_LUNFLAGS_READ_ONLY) {
unsigned char *buf = (unsigned char *)
T_TASK(task->task_se_cmd)->t_task_buf;
if (cdb[0] == MODE_SENSE_10) {
if (!(buf[3] & 0x80))
buf[3] |= 0x80;
} else {
if (!(buf[2] & 0x80))
buf[2] |= 0x80;
}
}
}
after_mode_sense:
if (sd->type != TYPE_TAPE)
goto after_mode_select;
/*
* Hack to correctly obtain the initiator requested blocksize for
* TYPE_TAPE. Since this value is dependent upon each tape media,
* struct scsi_device->sector_size will not contain the correct value
* by default, so we go ahead and set it so
* TRANSPORT(dev)->get_blockdev() returns the correct value to the
* storage engine.
*/
if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) &&
(status_byte(result) << 1) == SAM_STAT_GOOD) {
unsigned char *buf;
struct scatterlist *sg = task->task_sg;
u16 bdl;
u32 blocksize;
buf = sg_virt(&sg[0]);
if (!(buf)) {
printk(KERN_ERR "Unable to get buf for scatterlist\n");
goto after_mode_select;
}
if (cdb[0] == MODE_SELECT)
bdl = (buf[3]);
else
bdl = (buf[6] << 8) | (buf[7]);
if (!bdl)
goto after_mode_select;
if (cdb[0] == MODE_SELECT)
blocksize = (buf[9] << 16) | (buf[10] << 8) |
(buf[11]);
else
blocksize = (buf[13] << 16) | (buf[14] << 8) |
(buf[15]);
sd->sector_size = blocksize;
}
after_mode_select:
if (status_byte(result) & CHECK_CONDITION)
return 1;
return 0;
}
static struct se_task *
pscsi_alloc_task(struct se_cmd *cmd)
{
struct pscsi_plugin_task *pt;
unsigned char *cdb = T_TASK(cmd)->t_task_cdb;
pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL);
if (!pt) {
printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n");
return NULL;
}
/*
* If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation,
* allocate the extended CDB buffer for per struct se_task context
* pt->pscsi_cdb now.
*/
if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb) {
pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
if (!(pt->pscsi_cdb)) {
printk(KERN_ERR "pSCSI: Unable to allocate extended"
" pt->pscsi_cdb\n");
return NULL;
}
} else
pt->pscsi_cdb = &pt->__pscsi_cdb[0];
return &pt->pscsi_task;
}
static inline void pscsi_blk_init_request(
struct se_task *task,
struct pscsi_plugin_task *pt,
struct request *req,
int bidi_read)
{
/*
* Defined as "scsi command" in include/linux/blkdev.h.
*/
req->cmd_type = REQ_TYPE_BLOCK_PC;
/*
* For the extra BIDI-COMMAND READ struct request we do not
* need to setup the remaining structure members
*/
if (bidi_read)
return;
/*
* Setup the done function pointer for struct request,
* also set the end_io_data pointer.to struct se_task.
*/
req->end_io = pscsi_req_done;
req->end_io_data = (void *)task;
/*
* Load the referenced struct se_task's SCSI CDB into
* include/linux/blkdev.h:struct request->cmd
*/
req->cmd_len = scsi_command_size(pt->pscsi_cdb);
req->cmd = &pt->pscsi_cdb[0];
/*
* Setup pointer for outgoing sense data.
*/
req->sense = (void *)&pt->pscsi_sense[0];
req->sense_len = 0;
}
/*
* Used for pSCSI data payloads for all *NON* SCF_SCSI_DATA_SG_IO_CDB
*/
static int pscsi_blk_get_request(struct se_task *task)
{
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue,
(task->task_data_direction == DMA_TO_DEVICE),
GFP_KERNEL);
if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) {
printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n",
IS_ERR(pt->pscsi_req));
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
/*
* Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC,
* and setup rq callback, CDB and sense.
*/
pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
return 0;
}
/* pscsi_do_task(): (Part of se_subsystem_api_t template)
*
*
*/
static int pscsi_do_task(struct se_task *task)
{
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
/*
* Set the struct request->timeout value based on peripheral
* device type from SCSI.
*/
if (pdv->pdv_sd->type == TYPE_DISK)
pt->pscsi_req->timeout = PS_TIMEOUT_DISK;
else
pt->pscsi_req->timeout = PS_TIMEOUT_OTHER;
pt->pscsi_req->retries = PS_RETRY;
/*
* Queue the struct request into the struct scsi_device->request_queue.
* Also check for HEAD_OF_QUEUE SAM TASK attr from received se_cmd
* descriptor
*/
blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req,
(task->task_se_cmd->sam_task_attr == TASK_ATTR_HOQ),
pscsi_req_done);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
}
static void pscsi_free_task(struct se_task *task)
{
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
struct se_cmd *cmd = task->task_se_cmd;
/*
* Release the extended CDB allocation from pscsi_alloc_task()
* if one exists.
*/
if (T_TASK(cmd)->t_task_cdb != T_TASK(cmd)->__t_task_cdb)
kfree(pt->pscsi_cdb);
/*
* We do not release the bio(s) here associated with this task, as
* this is handled by bio_put() and pscsi_bi_endio().
*/
kfree(pt);
}
enum {
Opt_scsi_host_id, Opt_scsi_channel_id, Opt_scsi_target_id,
Opt_scsi_lun_id, Opt_err
};
static match_table_t tokens = {
{Opt_scsi_host_id, "scsi_host_id=%d"},
{Opt_scsi_channel_id, "scsi_channel_id=%d"},
{Opt_scsi_target_id, "scsi_target_id=%d"},
{Opt_scsi_lun_id, "scsi_lun_id=%d"},
{Opt_err, NULL}
};
static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
struct se_subsystem_dev *se_dev,
const char *page,
ssize_t count)
{
struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
struct pscsi_hba_virt *phv = hba->hba_ptr;
char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
return -ENOMEM;
orig = opts;
while ((ptr = strsep(&opts, ",")) != NULL) {
if (!*ptr)
continue;
token = match_token(ptr, tokens, args);
switch (token) {
case Opt_scsi_host_id:
if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
printk(KERN_ERR "PSCSI[%d]: Unable to accept"
" scsi_host_id while phv_mode =="
" PHV_LLD_SCSI_HOST_NO\n",
phv->phv_host_id);
ret = -EINVAL;
goto out;
}
match_int(args, &arg);
pdv->pdv_host_id = arg;
printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:"
" %d\n", phv->phv_host_id, pdv->pdv_host_id);
pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
break;
case Opt_scsi_channel_id:
match_int(args, &arg);
pdv->pdv_channel_id = arg;
printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel"
" ID: %d\n", phv->phv_host_id,
pdv->pdv_channel_id);
pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
break;
case Opt_scsi_target_id:
match_int(args, &arg);
pdv->pdv_target_id = arg;
printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target"
" ID: %d\n", phv->phv_host_id,
pdv->pdv_target_id);
pdv->pdv_flags |= PDF_HAS_TARGET_ID;
break;
case Opt_scsi_lun_id:
match_int(args, &arg);
pdv->pdv_lun_id = arg;
printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:"
" %d\n", phv->phv_host_id, pdv->pdv_lun_id);
pdv->pdv_flags |= PDF_HAS_LUN_ID;
break;
default:
break;
}
}
out:
kfree(orig);
return (!ret) ? count : ret;
}
static ssize_t pscsi_check_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev)
{
struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
!(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
!(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and"
" scsi_lun_id= parameters\n");
return -1;
}
return 0;
}
static ssize_t pscsi_show_configfs_dev_params(struct se_hba *hba,
struct se_subsystem_dev *se_dev,
char *b)
{
struct pscsi_hba_virt *phv = hba->hba_ptr;
struct pscsi_dev_virt *pdv = se_dev->se_dev_su_ptr;
struct scsi_device *sd = pdv->pdv_sd;
unsigned char host_id[16];
ssize_t bl;
int i;
if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
snprintf(host_id, 16, "%d", pdv->pdv_host_id);
else
snprintf(host_id, 16, "PHBA Mode");
bl = sprintf(b, "SCSI Device Bus Location:"
" Channel ID: %d Target ID: %d LUN: %d Host ID: %s\n",
pdv->pdv_channel_id, pdv->pdv_target_id, pdv->pdv_lun_id,
host_id);
if (sd) {
bl += sprintf(b + bl, " ");
bl += sprintf(b + bl, "Vendor: ");
for (i = 0; i < 8; i++) {
if (ISPRINT(sd->vendor[i])) /* printable character? */
bl += sprintf(b + bl, "%c", sd->vendor[i]);
else
bl += sprintf(b + bl, " ");
}
bl += sprintf(b + bl, " Model: ");
for (i = 0; i < 16; i++) {
if (ISPRINT(sd->model[i])) /* printable character ? */
bl += sprintf(b + bl, "%c", sd->model[i]);
else
bl += sprintf(b + bl, " ");
}
bl += sprintf(b + bl, " Rev: ");
for (i = 0; i < 4; i++) {
if (ISPRINT(sd->rev[i])) /* printable character ? */
bl += sprintf(b + bl, "%c", sd->rev[i]);
else
bl += sprintf(b + bl, " ");
}
bl += sprintf(b + bl, "\n");
}
return bl;
}
static void pscsi_bi_endio(struct bio *bio, int error)
{
bio_put(bio);
}
static inline struct bio *pscsi_get_bio(struct pscsi_dev_virt *pdv, int sg_num)
{
struct bio *bio;
/*
* Use bio_malloc() following the comment in for bio -> struct request
* in block/blk-core.c:blk_make_request()
*/
bio = bio_kmalloc(GFP_KERNEL, sg_num);
if (!(bio)) {
printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n");
return NULL;
}
bio->bi_end_io = pscsi_bi_endio;
return bio;
}
#if 0
#define DEBUG_PSCSI(x...) printk(x)
#else
#define DEBUG_PSCSI(x...)
#endif
static int __pscsi_map_task_SG(
struct se_task *task,
struct scatterlist *task_sg,
u32 task_sg_num,
int bidi_read)
{
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
struct page *page;
struct scatterlist *sg;
u32 data_len = task->task_size, i, len, bytes, off;
int nr_pages = (task->task_size + task_sg[0].offset +
PAGE_SIZE - 1) >> PAGE_SHIFT;
int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
int rw = (task->task_data_direction == DMA_TO_DEVICE);
if (!task->task_size)
return 0;
/*
* For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup
* the bio_vec maplist from TC< struct se_mem -> task->task_sg ->
* struct scatterlist memory. The struct se_task->task_sg[] currently needs
* to be attached to struct bios for submission to Linux/SCSI using
* struct request to struct scsi_device->request_queue.
*
* Note that this will be changing post v2.6.28 as Target_Core_Mod/pSCSI
* is ported to upstream SCSI passthrough functionality that accepts
* struct scatterlist->page_link or struct page as a paraemeter.
*/
DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages);
for_each_sg(task_sg, sg, task_sg_num, i) {
page = sg_page(sg);
off = sg->offset;
len = sg->length;
DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i,
page, len, off);
while (len > 0 && data_len > 0) {
bytes = min_t(unsigned int, len, PAGE_SIZE - off);
bytes = min(bytes, data_len);
if (!(bio)) {
nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
nr_pages -= nr_vecs;
/*
* Calls bio_kmalloc() and sets bio->bi_end_io()
*/
bio = pscsi_get_bio(pdv, nr_vecs);
if (!(bio))
goto fail;
if (rw)
bio->bi_rw |= REQ_WRITE;
DEBUG_PSCSI("PSCSI: Allocated bio: %p,"
" dir: %s nr_vecs: %d\n", bio,
(rw) ? "rw" : "r", nr_vecs);
/*
* Set *hbio pointer to handle the case:
* nr_pages > BIO_MAX_PAGES, where additional
* bios need to be added to complete a given
* struct se_task
*/
if (!hbio)
hbio = tbio = bio;
else
tbio = tbio->bi_next = bio;
}
DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d"
" bio: %p page: %p len: %d off: %d\n", i, bio,
page, len, off);
rc = bio_add_pc_page(pdv->pdv_sd->request_queue,
bio, page, bytes, off);
if (rc != bytes)
goto fail;
DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
bio->bi_vcnt, nr_vecs);
if (bio->bi_vcnt > nr_vecs) {
DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:"
" %d i: %d bio: %p, allocating another"
" bio\n", bio->bi_vcnt, i, bio);
/*
* Clear the pointer so that another bio will
* be allocated with pscsi_get_bio() above, the
* current bio has already been set *tbio and
* bio->bi_next.
*/
bio = NULL;
}
page++;
len -= bytes;
data_len -= bytes;
off = 0;
}
}
/*
* Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND
* primary SCSI WRITE poayload mapped for struct se_task->task_sg[]
*/
if (!(bidi_read)) {
/*
* Starting with v2.6.31, call blk_make_request() passing in *hbio to
* allocate the pSCSI task a struct request.
*/
pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue,
hbio, GFP_KERNEL);
if (!(pt->pscsi_req)) {
printk(KERN_ERR "pSCSI: blk_make_request() failed\n");
goto fail;
}
/*
* Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC,
* and setup rq callback, CDB and sense.
*/
pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
return task->task_sg_num;
}
/*
* Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND
* SCSI READ paylaod mapped for struct se_task->task_sg_bidi[]
*/
pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue,
hbio, GFP_KERNEL);
if (!(pt->pscsi_req->next_rq)) {
printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n");
goto fail;
}
pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1);
return task->task_sg_num;
fail:
while (hbio) {
bio = hbio;
hbio = hbio->bi_next;
bio->bi_next = NULL;
bio_endio(bio, 0);
}
return ret;
}
static int pscsi_map_task_SG(struct se_task *task)
{
int ret;
/*
* Setup the main struct request for the task->task_sg[] payload
*/
ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0);
if (ret >= 0 && task->task_sg_bidi) {
/*
* If present, set up the extra BIDI-COMMAND SCSI READ
* struct request and payload.
*/
ret = __pscsi_map_task_SG(task, task->task_sg_bidi,
task->task_sg_num, 1);
}
if (ret < 0)
return PYX_TRANSPORT_LU_COMM_FAILURE;
return 0;
}
/* pscsi_map_task_non_SG():
*
*
*/
static int pscsi_map_task_non_SG(struct se_task *task)
{
struct se_cmd *cmd = TASK_CMD(task);
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr;
int ret = 0;
if (pscsi_blk_get_request(task) < 0)
return PYX_TRANSPORT_LU_COMM_FAILURE;
if (!task->task_size)
return 0;
ret = blk_rq_map_kern(pdv->pdv_sd->request_queue,
pt->pscsi_req, T_TASK(cmd)->t_task_buf,
task->task_size, GFP_KERNEL);
if (ret < 0) {
printk(KERN_ERR "PSCSI: blk_rq_map_kern() failed: %d\n", ret);
return PYX_TRANSPORT_LU_COMM_FAILURE;
}
return 0;
}
static int pscsi_CDB_none(struct se_task *task)
{
return pscsi_blk_get_request(task);
}
/* pscsi_get_cdb():
*
*
*/
static unsigned char *pscsi_get_cdb(struct se_task *task)
{
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
return pt->pscsi_cdb;
}
/* pscsi_get_sense_buffer():
*
*
*/
static unsigned char *pscsi_get_sense_buffer(struct se_task *task)
{
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
return (unsigned char *)&pt->pscsi_sense[0];
}
/* pscsi_get_device_rev():
*
*
*/
static u32 pscsi_get_device_rev(struct se_device *dev)
{
struct pscsi_dev_virt *pdv = dev->dev_ptr;
struct scsi_device *sd = pdv->pdv_sd;
return (sd->scsi_level - 1) ? sd->scsi_level - 1 : 1;
}
/* pscsi_get_device_type():
*
*
*/
static u32 pscsi_get_device_type(struct se_device *dev)
{
struct pscsi_dev_virt *pdv = dev->dev_ptr;
struct scsi_device *sd = pdv->pdv_sd;
return sd->type;
}
static sector_t pscsi_get_blocks(struct se_device *dev)
{
struct pscsi_dev_virt *pdv = dev->dev_ptr;
if (pdv->pdv_bd && pdv->pdv_bd->bd_part)
return pdv->pdv_bd->bd_part->nr_sects;
dump_stack();
return 0;
}
/* pscsi_handle_SAM_STATUS_failures():
*
*
*/
static inline void pscsi_process_SAM_status(
struct se_task *task,
struct pscsi_plugin_task *pt)
{
task->task_scsi_status = status_byte(pt->pscsi_result);
if ((task->task_scsi_status)) {
task->task_scsi_status <<= 1;
printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:"
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
pt->pscsi_result);
}
switch (host_byte(pt->pscsi_result)) {
case DID_OK:
transport_complete_task(task, (!task->task_scsi_status));
break;
default:
printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:"
" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
pt->pscsi_result);
task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
task->task_error_status = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
TASK_CMD(task)->transport_error_status =
PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
transport_complete_task(task, 0);
break;
}
return;
}
static void pscsi_req_done(struct request *req, int uptodate)
{
struct se_task *task = req->end_io_data;
struct pscsi_plugin_task *pt = PSCSI_TASK(task);
pt->pscsi_result = req->errors;
pt->pscsi_resid = req->resid_len;
pscsi_process_SAM_status(task, pt);
/*
* Release BIDI-READ if present
*/
if (req->next_rq != NULL)
__blk_put_request(req->q, req->next_rq);
__blk_put_request(req->q, req);
pt->pscsi_req = NULL;
}
static struct se_subsystem_api pscsi_template = {
.name = "pscsi",
.owner = THIS_MODULE,
.transport_type = TRANSPORT_PLUGIN_PHBA_PDEV,
.cdb_none = pscsi_CDB_none,
.map_task_non_SG = pscsi_map_task_non_SG,
.map_task_SG = pscsi_map_task_SG,
.attach_hba = pscsi_attach_hba,
.detach_hba = pscsi_detach_hba,
.pmode_enable_hba = pscsi_pmode_enable_hba,
.allocate_virtdevice = pscsi_allocate_virtdevice,
.create_virtdevice = pscsi_create_virtdevice,
.free_device = pscsi_free_device,
.transport_complete = pscsi_transport_complete,
.alloc_task = pscsi_alloc_task,
.do_task = pscsi_do_task,
.free_task = pscsi_free_task,
.check_configfs_dev_params = pscsi_check_configfs_dev_params,
.set_configfs_dev_params = pscsi_set_configfs_dev_params,
.show_configfs_dev_params = pscsi_show_configfs_dev_params,
.get_cdb = pscsi_get_cdb,
.get_sense_buffer = pscsi_get_sense_buffer,
.get_device_rev = pscsi_get_device_rev,
.get_device_type = pscsi_get_device_type,
.get_blocks = pscsi_get_blocks,
};
static int __init pscsi_module_init(void)
{
return transport_subsystem_register(&pscsi_template);
}
static void pscsi_module_exit(void)
{
transport_subsystem_release(&pscsi_template);
}
MODULE_DESCRIPTION("TCM PSCSI subsystem plugin");
MODULE_AUTHOR("nab@Linux-iSCSI.org");
MODULE_LICENSE("GPL");
module_init(pscsi_module_init);
module_exit(pscsi_module_exit);
#ifndef TARGET_CORE_PSCSI_H
#define TARGET_CORE_PSCSI_H
#define PSCSI_VERSION "v4.0"
#define PSCSI_VIRTUAL_HBA_DEPTH 2048
/* used in pscsi_find_alloc_len() */
#ifndef INQUIRY_DATA_SIZE
#define INQUIRY_DATA_SIZE 0x24
#endif
/* used in pscsi_add_device_to_list() */
#define PSCSI_DEFAULT_QUEUEDEPTH 1
#define PS_RETRY 5
#define PS_TIMEOUT_DISK (15*HZ)
#define PS_TIMEOUT_OTHER (500*HZ)
#include <linux/device.h>
#include <scsi/scsi_driver.h>
#include <scsi/scsi_device.h>
#include <linux/kref.h>
#include <linux/kobject.h>
struct pscsi_plugin_task {
struct se_task pscsi_task;
unsigned char *pscsi_cdb;
unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE];
unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
int pscsi_direction;
int pscsi_result;
u32 pscsi_resid;
struct request *pscsi_req;
} ____cacheline_aligned;
#define PDF_HAS_CHANNEL_ID 0x01
#define PDF_HAS_TARGET_ID 0x02
#define PDF_HAS_LUN_ID 0x04
#define PDF_HAS_VPD_UNIT_SERIAL 0x08
#define PDF_HAS_VPD_DEV_IDENT 0x10
#define PDF_HAS_VIRT_HOST_ID 0x20
struct pscsi_dev_virt {
int pdv_flags;
int pdv_host_id;
int pdv_channel_id;
int pdv_target_id;
int pdv_lun_id;
struct block_device *pdv_bd;
struct scsi_device *pdv_sd;
struct se_hba *pdv_se_hba;
} ____cacheline_aligned;
typedef enum phv_modes {
PHV_VIRUTAL_HOST_ID,
PHV_LLD_SCSI_HOST_NO
} phv_modes_t;
struct pscsi_hba_virt {
int phv_host_id;
phv_modes_t phv_mode;
struct Scsi_Host *phv_lld_host;
} ____cacheline_aligned;
#endif /*** TARGET_CORE_PSCSI_H ***/
/*******************************************************************************
* Filename: target_core_rd.c
*
* This file contains the Storage Engine <-> Ramdisk transport
* specific functions.
*
* Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/version.h>
#include <linux/string.h>
#include <linux/parser.h>
#include <linux/timer.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include "target_core_rd.h"
static struct se_subsystem_api rd_dr_template;
static struct se_subsystem_api rd_mcp_template;
/* #define DEBUG_RAMDISK_MCP */
/* #define DEBUG_RAMDISK_DR */
/* rd_attach_hba(): (Part of se_subsystem_api_t template)
*
*
*/
static int rd_attach_hba(struct se_hba *hba, u32 host_id)
{
struct rd_host *rd_host;
rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
if (!(rd_host)) {
printk(KERN_ERR "Unable to allocate memory for struct rd_host\n");
return -ENOMEM;
}
rd_host->rd_host_id = host_id;
atomic_set(&hba->left_queue_depth, RD_HBA_QUEUE_DEPTH);
atomic_set(&hba->max_queue_depth, RD_HBA_QUEUE_DEPTH);
hba->hba_ptr = (void *) rd_host;
printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
" Generic Target Core Stack %s\n", hba->hba_id,
RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
" Target Core TCQ Depth: %d MaxSectors: %u\n", hba->hba_id,
rd_host->rd_host_id, atomic_read(&hba->max_queue_depth),
RD_MAX_SECTORS);
return 0;
}
static void rd_detach_hba(struct se_hba *hba)
{
struct rd_host *rd_host = hba->hba_ptr;
printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
kfree(rd_host);
hba->hba_ptr = NULL;
}
/* rd_release_device_space():
*
*
*/
static void rd_release_device_space(struct rd_dev *rd_dev)
{
u32 i, j, page_count = 0, sg_per_table;
struct rd_dev_sg_table *sg_table;
struct page *pg;
struct scatterlist *sg;
if (!rd_dev->sg_table_array || !rd_dev->sg_table_count)
return;
sg_table = rd_dev->sg_table_array;
for (i = 0; i < rd_dev->sg_table_count; i++) {
sg = sg_table[i].sg_table;
sg_per_table = sg_table[i].rd_sg_count;
for (j = 0; j < sg_per_table; j++) {
pg = sg_page(&sg[j]);
if ((pg)) {
__free_page(pg);
page_count++;
}
}
kfree(sg);
}
printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk"
" Device ID: %u, pages %u in %u tables total bytes %lu\n",
rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
kfree(sg_table);
rd_dev->sg_table_array = NULL;
rd_dev->sg_table_count = 0;
}
/* rd_build_device_space():
*
*
*/
static int rd_build_device_space(struct rd_dev *rd_dev)
{
u32 i = 0, j, page_offset = 0, sg_per_table, sg_tables, total_sg_needed;
u32 max_sg_per_table = (RD_MAX_ALLOCATION_SIZE /
sizeof(struct scatterlist));
struct rd_dev_sg_table *sg_table;
struct page *pg;
struct scatterlist *sg;
if (rd_dev->rd_page_count <= 0) {
printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
rd_dev->rd_page_count);
return -1;
}
total_sg_needed = rd_dev->rd_page_count;
sg_tables = (total_sg_needed / max_sg_per_table) + 1;
sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
if (!(sg_table)) {
printk(KERN_ERR "Unable to allocate memory for Ramdisk"
" scatterlist tables\n");
return -1;
}
rd_dev->sg_table_array = sg_table;
rd_dev->sg_table_count = sg_tables;
while (total_sg_needed) {
sg_per_table = (total_sg_needed > max_sg_per_table) ?
max_sg_per_table : total_sg_needed;
sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
GFP_KERNEL);
if (!(sg)) {
printk(KERN_ERR "Unable to allocate scatterlist array"
" for struct rd_dev\n");
return -1;
}
sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
sg_table[i].sg_table = sg;
sg_table[i].rd_sg_count = sg_per_table;
sg_table[i].page_start_offset = page_offset;
sg_table[i++].page_end_offset = (page_offset + sg_per_table)
- 1;
for (j = 0; j < sg_per_table; j++) {
pg = alloc_pages(GFP_KERNEL, 0);
if (!(pg)) {
printk(KERN_ERR "Unable to allocate scatterlist"
" pages for struct rd_dev_sg_table\n");
return -1;
}
sg_assign_page(&sg[j], pg);
sg[j].length = PAGE_SIZE;
}
page_offset += sg_per_table;
total_sg_needed -= sg_per_table;
}
printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
rd_dev->rd_dev_id, rd_dev->rd_page_count,
rd_dev->sg_table_count);
return 0;
}
static void *rd_allocate_virtdevice(
struct se_hba *hba,
const char *name,
int rd_direct)
{
struct rd_dev *rd_dev;
struct rd_host *rd_host = hba->hba_ptr;
rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
if (!(rd_dev)) {
printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n");
return NULL;
}
rd_dev->rd_host = rd_host;
rd_dev->rd_direct = rd_direct;
return rd_dev;
}
static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name)
{
return rd_allocate_virtdevice(hba, name, 1);
}
static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
{
return rd_allocate_virtdevice(hba, name, 0);
}
/* rd_create_virtdevice():
*
*
*/
static struct se_device *rd_create_virtdevice(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
void *p,
int rd_direct)
{
struct se_device *dev;
struct se_dev_limits dev_limits;
struct rd_dev *rd_dev = p;
struct rd_host *rd_host = hba->hba_ptr;
int dev_flags = 0;
char prod[16], rev[4];
memset(&dev_limits, 0, sizeof(struct se_dev_limits));
if (rd_build_device_space(rd_dev) < 0)
goto fail;
snprintf(prod, 16, "RAMDISK-%s", (rd_dev->rd_direct) ? "DR" : "MCP");
snprintf(rev, 4, "%s", (rd_dev->rd_direct) ? RD_DR_VERSION :
RD_MCP_VERSION);
dev_limits.limits.logical_block_size = RD_BLOCKSIZE;
dev_limits.limits.max_hw_sectors = RD_MAX_SECTORS;
dev_limits.limits.max_sectors = RD_MAX_SECTORS;
dev_limits.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
dev = transport_add_device_to_core_hba(hba,
(rd_dev->rd_direct) ? &rd_dr_template :
&rd_mcp_template, se_dev, dev_flags, (void *)rd_dev,
&dev_limits, prod, rev);
if (!(dev))
goto fail;
rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
rd_dev->rd_queue_depth = dev->queue_depth;
printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
" %u pages in %u tables, %lu total bytes\n",
rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
"DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
rd_dev->sg_table_count,
(unsigned long)(rd_dev->rd_page_count * PAGE_SIZE));
return dev;
fail:
rd_release_device_space(rd_dev);
return NULL;
}
static struct se_device *rd_DIRECT_create_virtdevice(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
void *p)
{
return rd_create_virtdevice(hba, se_dev, p, 1);
}
static struct se_device *rd_MEMCPY_create_virtdevice(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
void *p)
{
return rd_create_virtdevice(hba, se_dev, p, 0);
}
/* rd_free_device(): (Part of se_subsystem_api_t template)
*
*
*/
static void rd_free_device(void *p)
{
struct rd_dev *rd_dev = p;
rd_release_device_space(rd_dev);
kfree(rd_dev);
}
static inline struct rd_request *RD_REQ(struct se_task *task)
{
return container_of(task, struct rd_request, rd_task);
}
static struct se_task *
rd_alloc_task(struct se_cmd *cmd)
{
struct rd_request *rd_req;
rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
if (!rd_req) {
printk(KERN_ERR "Unable to allocate struct rd_request\n");
return NULL;
}
rd_req->rd_dev = SE_DEV(cmd)->dev_ptr;
return &rd_req->rd_task;
}
/* rd_get_sg_table():
*
*
*/
static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)
{
u32 i;
struct rd_dev_sg_table *sg_table;
for (i = 0; i < rd_dev->sg_table_count; i++) {
sg_table = &rd_dev->sg_table_array[i];
if ((sg_table->page_start_offset <= page) &&
(sg_table->page_end_offset >= page))
return sg_table;
}
printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n",
page);
return NULL;
}
/* rd_MEMCPY_read():
*
*
*/
static int rd_MEMCPY_read(struct rd_request *req)
{
struct se_task *task = &req->rd_task;
struct rd_dev *dev = req->rd_dev;
struct rd_dev_sg_table *table;
struct scatterlist *sg_d, *sg_s;
void *dst, *src;
u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
u32 length, page_end = 0, table_sg_end;
u32 rd_offset = req->rd_offset;
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
return -1;
table_sg_end = (table->page_end_offset - req->rd_page);
sg_d = task->task_sg;
sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
" %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
req->rd_page, req->rd_offset);
#endif
src_offset = rd_offset;
while (req->rd_size) {
if ((sg_d[i].length - dst_offset) <
(sg_s[j].length - src_offset)) {
length = (sg_d[i].length - dst_offset);
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d"
" offset: %u sg_s[%d].length: %u\n", i,
&sg_d[i], sg_d[i].length, sg_d[i].offset, j,
sg_s[j].length);
printk(KERN_INFO "Step 1 - length: %u dst_offset: %u"
" src_offset: %u\n", length, dst_offset,
src_offset);
#endif
if (length > req->rd_size)
length = req->rd_size;
dst = sg_virt(&sg_d[i++]) + dst_offset;
if (!dst)
BUG();
src = sg_virt(&sg_s[j]) + src_offset;
if (!src)
BUG();
dst_offset = 0;
src_offset = length;
page_end = 0;
} else {
length = (sg_s[j].length - src_offset);
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d"
" offset: %u sg_s[%d].length: %u\n", i,
&sg_d[i], sg_d[i].length, sg_d[i].offset,
j, sg_s[j].length);
printk(KERN_INFO "Step 2 - length: %u dst_offset: %u"
" src_offset: %u\n", length, dst_offset,
src_offset);
#endif
if (length > req->rd_size)
length = req->rd_size;
dst = sg_virt(&sg_d[i]) + dst_offset;
if (!dst)
BUG();
if (sg_d[i].length == length) {
i++;
dst_offset = 0;
} else
dst_offset = length;
src = sg_virt(&sg_s[j++]) + src_offset;
if (!src)
BUG();
src_offset = 0;
page_end = 1;
}
memcpy(dst, src, length);
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
" i: %u, j: %u\n", req->rd_page,
(req->rd_size - length), length, i, j);
#endif
req->rd_size -= length;
if (!(req->rd_size))
return 0;
if (!page_end)
continue;
if (++req->rd_page <= table->page_end_offset) {
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "page: %u in same page table\n",
req->rd_page);
#endif
continue;
}
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "getting new page table for page: %u\n",
req->rd_page);
#endif
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
return -1;
sg_s = &table->sg_table[j = 0];
}
return 0;
}
/* rd_MEMCPY_write():
*
*
*/
static int rd_MEMCPY_write(struct rd_request *req)
{
struct se_task *task = &req->rd_task;
struct rd_dev *dev = req->rd_dev;
struct rd_dev_sg_table *table;
struct scatterlist *sg_d, *sg_s;
void *dst, *src;
u32 i = 0, j = 0, dst_offset = 0, src_offset = 0;
u32 length, page_end = 0, table_sg_end;
u32 rd_offset = req->rd_offset;
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
return -1;
table_sg_end = (table->page_end_offset - req->rd_page);
sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
sg_s = task->task_sg;
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
" Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
req->rd_page, req->rd_offset);
#endif
dst_offset = rd_offset;
while (req->rd_size) {
if ((sg_s[i].length - src_offset) <
(sg_d[j].length - dst_offset)) {
length = (sg_s[i].length - src_offset);
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d"
" offset: %d sg_d[%d].length: %u\n", i,
&sg_s[i], sg_s[i].length, sg_s[i].offset,
j, sg_d[j].length);
printk(KERN_INFO "Step 1 - length: %u src_offset: %u"
" dst_offset: %u\n", length, src_offset,
dst_offset);
#endif
if (length > req->rd_size)
length = req->rd_size;
src = sg_virt(&sg_s[i++]) + src_offset;
if (!src)
BUG();
dst = sg_virt(&sg_d[j]) + dst_offset;
if (!dst)
BUG();
src_offset = 0;
dst_offset = length;
page_end = 0;
} else {
length = (sg_d[j].length - dst_offset);
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d"
" offset: %d sg_d[%d].length: %u\n", i,
&sg_s[i], sg_s[i].length, sg_s[i].offset,
j, sg_d[j].length);
printk(KERN_INFO "Step 2 - length: %u src_offset: %u"
" dst_offset: %u\n", length, src_offset,
dst_offset);
#endif
if (length > req->rd_size)
length = req->rd_size;
src = sg_virt(&sg_s[i]) + src_offset;
if (!src)
BUG();
if (sg_s[i].length == length) {
i++;
src_offset = 0;
} else
src_offset = length;
dst = sg_virt(&sg_d[j++]) + dst_offset;
if (!dst)
BUG();
dst_offset = 0;
page_end = 1;
}
memcpy(dst, src, length);
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
" i: %u, j: %u\n", req->rd_page,
(req->rd_size - length), length, i, j);
#endif
req->rd_size -= length;
if (!(req->rd_size))
return 0;
if (!page_end)
continue;
if (++req->rd_page <= table->page_end_offset) {
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "page: %u in same page table\n",
req->rd_page);
#endif
continue;
}
#ifdef DEBUG_RAMDISK_MCP
printk(KERN_INFO "getting new page table for page: %u\n",
req->rd_page);
#endif
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
return -1;
sg_d = &table->sg_table[j = 0];
}
return 0;
}
/* rd_MEMCPY_do_task(): (Part of se_subsystem_api_t template)
*
*
*/
static int rd_MEMCPY_do_task(struct se_task *task)
{
struct se_device *dev = task->se_dev;
struct rd_request *req = RD_REQ(task);
unsigned long long lba;
int ret;
req->rd_page = (task->task_lba * DEV_ATTRIB(dev)->block_size) / PAGE_SIZE;
lba = task->task_lba;
req->rd_offset = (do_div(lba,
(PAGE_SIZE / DEV_ATTRIB(dev)->block_size))) *
DEV_ATTRIB(dev)->block_size;
req->rd_size = task->task_size;
if (task->task_data_direction == DMA_FROM_DEVICE)
ret = rd_MEMCPY_read(req);
else
ret = rd_MEMCPY_write(req);
if (ret != 0)
return ret;
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
}
/* rd_DIRECT_with_offset():
*
*
*/
static int rd_DIRECT_with_offset(
struct se_task *task,
struct list_head *se_mem_list,
u32 *se_mem_cnt,
u32 *task_offset)
{
struct rd_request *req = RD_REQ(task);
struct rd_dev *dev = req->rd_dev;
struct rd_dev_sg_table *table;
struct se_mem *se_mem;
struct scatterlist *sg_s;
u32 j = 0, set_offset = 1;
u32 get_next_table = 0, offset_length, table_sg_end;
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
return -1;
table_sg_end = (table->page_end_offset - req->rd_page);
sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
#ifdef DEBUG_RAMDISK_DR
printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n",
(task->task_data_direction == DMA_TO_DEVICE) ?
"Write" : "Read",
task->task_lba, req->rd_size, req->rd_page, req->rd_offset);
#endif
while (req->rd_size) {
se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
if (!(se_mem)) {
printk(KERN_ERR "Unable to allocate struct se_mem\n");
return -1;
}
INIT_LIST_HEAD(&se_mem->se_list);
if (set_offset) {
offset_length = sg_s[j].length - req->rd_offset;
if (offset_length > req->rd_size)
offset_length = req->rd_size;
se_mem->se_page = sg_page(&sg_s[j++]);
se_mem->se_off = req->rd_offset;
se_mem->se_len = offset_length;
set_offset = 0;
get_next_table = (j > table_sg_end);
goto check_eot;
}
offset_length = (req->rd_size < req->rd_offset) ?
req->rd_size : req->rd_offset;
se_mem->se_page = sg_page(&sg_s[j]);
se_mem->se_len = offset_length;
set_offset = 1;
check_eot:
#ifdef DEBUG_RAMDISK_DR
printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u"
" se_mem: %p, se_page: %p se_off: %u se_len: %u\n",
req->rd_page, req->rd_size, offset_length, j, se_mem,
se_mem->se_page, se_mem->se_off, se_mem->se_len);
#endif
list_add_tail(&se_mem->se_list, se_mem_list);
(*se_mem_cnt)++;
req->rd_size -= offset_length;
if (!(req->rd_size))
goto out;
if (!set_offset && !get_next_table)
continue;
if (++req->rd_page <= table->page_end_offset) {
#ifdef DEBUG_RAMDISK_DR
printk(KERN_INFO "page: %u in same page table\n",
req->rd_page);
#endif
continue;
}
#ifdef DEBUG_RAMDISK_DR
printk(KERN_INFO "getting new page table for page: %u\n",
req->rd_page);
#endif
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
return -1;
sg_s = &table->sg_table[j = 0];
}
out:
T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
#ifdef DEBUG_RAMDISK_DR
printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
*se_mem_cnt);
#endif
return 0;
}
/* rd_DIRECT_without_offset():
*
*
*/
static int rd_DIRECT_without_offset(
struct se_task *task,
struct list_head *se_mem_list,
u32 *se_mem_cnt,
u32 *task_offset)
{
struct rd_request *req = RD_REQ(task);
struct rd_dev *dev = req->rd_dev;
struct rd_dev_sg_table *table;
struct se_mem *se_mem;
struct scatterlist *sg_s;
u32 length, j = 0;
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
return -1;
sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
#ifdef DEBUG_RAMDISK_DR
printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n",
(task->task_data_direction == DMA_TO_DEVICE) ?
"Write" : "Read",
task->task_lba, req->rd_size, req->rd_page);
#endif
while (req->rd_size) {
se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
if (!(se_mem)) {
printk(KERN_ERR "Unable to allocate struct se_mem\n");
return -1;
}
INIT_LIST_HEAD(&se_mem->se_list);
length = (req->rd_size < sg_s[j].length) ?
req->rd_size : sg_s[j].length;
se_mem->se_page = sg_page(&sg_s[j++]);
se_mem->se_len = length;
#ifdef DEBUG_RAMDISK_DR
printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p,"
" se_page: %p se_off: %u se_len: %u\n", req->rd_page,
req->rd_size, j, se_mem, se_mem->se_page,
se_mem->se_off, se_mem->se_len);
#endif
list_add_tail(&se_mem->se_list, se_mem_list);
(*se_mem_cnt)++;
req->rd_size -= length;
if (!(req->rd_size))
goto out;
if (++req->rd_page <= table->page_end_offset) {
#ifdef DEBUG_RAMDISK_DR
printk("page: %u in same page table\n",
req->rd_page);
#endif
continue;
}
#ifdef DEBUG_RAMDISK_DR
printk(KERN_INFO "getting new page table for page: %u\n",
req->rd_page);
#endif
table = rd_get_sg_table(dev, req->rd_page);
if (!(table))
return -1;
sg_s = &table->sg_table[j = 0];
}
out:
T_TASK(task->task_se_cmd)->t_tasks_se_num += *se_mem_cnt;
#ifdef DEBUG_RAMDISK_DR
printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
*se_mem_cnt);
#endif
return 0;
}
/* rd_DIRECT_do_se_mem_map():
*
*
*/
static int rd_DIRECT_do_se_mem_map(
struct se_task *task,
struct list_head *se_mem_list,
void *in_mem,
struct se_mem *in_se_mem,
struct se_mem **out_se_mem,
u32 *se_mem_cnt,
u32 *task_offset_in)
{
struct se_cmd *cmd = task->task_se_cmd;
struct rd_request *req = RD_REQ(task);
u32 task_offset = *task_offset_in;
unsigned long long lba;
int ret;
req->rd_page = ((task->task_lba * DEV_ATTRIB(task->se_dev)->block_size) /
PAGE_SIZE);
lba = task->task_lba;
req->rd_offset = (do_div(lba,
(PAGE_SIZE / DEV_ATTRIB(task->se_dev)->block_size))) *
DEV_ATTRIB(task->se_dev)->block_size;
req->rd_size = task->task_size;
if (req->rd_offset)
ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt,
task_offset_in);
else
ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt,
task_offset_in);
if (ret < 0)
return ret;
if (CMD_TFO(cmd)->task_sg_chaining == 0)
return 0;
/*
* Currently prevent writers from multiple HW fabrics doing
* pci_map_sg() to RD_DR's internal scatterlist memory.
*/
if (cmd->data_direction == DMA_TO_DEVICE) {
printk(KERN_ERR "DMA_TO_DEVICE not supported for"
" RAMDISK_DR with task_sg_chaining=1\n");
return -1;
}
/*
* Special case for if task_sg_chaining is enabled, then
* we setup struct se_task->task_sg[], as it will be used by
* transport_do_task_sg_chain() for creating chainged SGLs
* across multiple struct se_task->task_sg[].
*/
if (!(transport_calc_sg_num(task,
list_entry(T_TASK(cmd)->t_mem_list->next,
struct se_mem, se_list),
task_offset)))
return -1;
return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
list_entry(T_TASK(cmd)->t_mem_list->next,
struct se_mem, se_list),
out_se_mem, se_mem_cnt, task_offset_in);
}
/* rd_DIRECT_do_task(): (Part of se_subsystem_api_t template)
*
*
*/
static int rd_DIRECT_do_task(struct se_task *task)
{
/*
* At this point the locally allocated RD tables have been mapped
* to struct se_mem elements in rd_DIRECT_do_se_mem_map().
*/
task->task_scsi_status = GOOD;
transport_complete_task(task, 1);
return PYX_TRANSPORT_SENT_TO_TRANSPORT;
}
/* rd_free_task(): (Part of se_subsystem_api_t template)
*
*
*/
static void rd_free_task(struct se_task *task)
{
kfree(RD_REQ(task));
}
enum {
Opt_rd_pages, Opt_err
};
static match_table_t tokens = {
{Opt_rd_pages, "rd_pages=%d"},
{Opt_err, NULL}
};
static ssize_t rd_set_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
const char *page,
ssize_t count)
{
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
char *orig, *ptr, *opts;
substring_t args[MAX_OPT_ARGS];
int ret = 0, arg, token;
opts = kstrdup(page, GFP_KERNEL);
if (!opts)
return -ENOMEM;
orig = opts;
while ((ptr = strsep(&opts, ",")) != NULL) {
if (!*ptr)
continue;
token = match_token(ptr, tokens, args);
switch (token) {
case Opt_rd_pages:
match_int(args, &arg);
rd_dev->rd_page_count = arg;
printk(KERN_INFO "RAMDISK: Referencing Page"
" Count: %u\n", rd_dev->rd_page_count);
rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
break;
default:
break;
}
}
kfree(orig);
return (!ret) ? count : ret;
}
static ssize_t rd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
{
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
printk(KERN_INFO "Missing rd_pages= parameter\n");
return -1;
}
return 0;
}
static ssize_t rd_show_configfs_dev_params(
struct se_hba *hba,
struct se_subsystem_dev *se_dev,
char *b)
{
struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
ssize_t bl = sprintf(b, "TCM RamDisk ID: %u RamDisk Makeup: %s\n",
rd_dev->rd_dev_id, (rd_dev->rd_direct) ?
"rd_direct" : "rd_mcp");
bl += sprintf(b + bl, " PAGES/PAGE_SIZE: %u*%lu"
" SG_table_count: %u\n", rd_dev->rd_page_count,
PAGE_SIZE, rd_dev->sg_table_count);
return bl;
}
/* rd_get_cdb(): (Part of se_subsystem_api_t template)
*
*
*/
static unsigned char *rd_get_cdb(struct se_task *task)
{
struct rd_request *req = RD_REQ(task);
return req->rd_scsi_cdb;
}
static u32 rd_get_device_rev(struct se_device *dev)
{
return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
}
static u32 rd_get_device_type(struct se_device *dev)
{
return TYPE_DISK;
}
static sector_t rd_get_blocks(struct se_device *dev)
{
struct rd_dev *rd_dev = dev->dev_ptr;
unsigned long long blocks_long = ((rd_dev->rd_page_count * PAGE_SIZE) /
DEV_ATTRIB(dev)->block_size) - 1;
return blocks_long;
}
static struct se_subsystem_api rd_dr_template = {
.name = "rd_dr",
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
.attach_hba = rd_attach_hba,
.detach_hba = rd_detach_hba,
.allocate_virtdevice = rd_DIRECT_allocate_virtdevice,
.create_virtdevice = rd_DIRECT_create_virtdevice,
.free_device = rd_free_device,
.alloc_task = rd_alloc_task,
.do_task = rd_DIRECT_do_task,
.free_task = rd_free_task,
.check_configfs_dev_params = rd_check_configfs_dev_params,
.set_configfs_dev_params = rd_set_configfs_dev_params,
.show_configfs_dev_params = rd_show_configfs_dev_params,
.get_cdb = rd_get_cdb,
.get_device_rev = rd_get_device_rev,
.get_device_type = rd_get_device_type,
.get_blocks = rd_get_blocks,
.do_se_mem_map = rd_DIRECT_do_se_mem_map,
};
static struct se_subsystem_api rd_mcp_template = {
.name = "rd_mcp",
.transport_type = TRANSPORT_PLUGIN_VHBA_VDEV,
.attach_hba = rd_attach_hba,
.detach_hba = rd_detach_hba,
.allocate_virtdevice = rd_MEMCPY_allocate_virtdevice,
.create_virtdevice = rd_MEMCPY_create_virtdevice,
.free_device = rd_free_device,
.alloc_task = rd_alloc_task,
.do_task = rd_MEMCPY_do_task,
.free_task = rd_free_task,
.check_configfs_dev_params = rd_check_configfs_dev_params,
.set_configfs_dev_params = rd_set_configfs_dev_params,
.show_configfs_dev_params = rd_show_configfs_dev_params,
.get_cdb = rd_get_cdb,
.get_device_rev = rd_get_device_rev,
.get_device_type = rd_get_device_type,
.get_blocks = rd_get_blocks,
};
int __init rd_module_init(void)
{
int ret;
ret = transport_subsystem_register(&rd_dr_template);
if (ret < 0)
return ret;
ret = transport_subsystem_register(&rd_mcp_template);
if (ret < 0) {
transport_subsystem_release(&rd_dr_template);
return ret;
}
return 0;
}
void rd_module_exit(void)
{
transport_subsystem_release(&rd_dr_template);
transport_subsystem_release(&rd_mcp_template);
}
#ifndef TARGET_CORE_RD_H
#define TARGET_CORE_RD_H
#define RD_HBA_VERSION "v4.0"
#define RD_DR_VERSION "4.0"
#define RD_MCP_VERSION "4.0"
/* Largest piece of memory kmalloc can allocate */
#define RD_MAX_ALLOCATION_SIZE 65536
/* Maximum queuedepth for the Ramdisk HBA */
#define RD_HBA_QUEUE_DEPTH 256
#define RD_DEVICE_QUEUE_DEPTH 32
#define RD_MAX_DEVICE_QUEUE_DEPTH 128
#define RD_BLOCKSIZE 512
#define RD_MAX_SECTORS 1024
extern struct kmem_cache *se_mem_cache;
/* Used in target_core_init_configfs() for virtual LUN 0 access */
int __init rd_module_init(void);
void rd_module_exit(void);
#define RRF_EMULATE_CDB 0x01
#define RRF_GOT_LBA 0x02
struct rd_request {
struct se_task rd_task;
/* SCSI CDB from iSCSI Command PDU */
unsigned char rd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
/* Offset from start of page */
u32 rd_offset;
/* Starting page in Ramdisk for request */
u32 rd_page;
/* Total number of pages needed for request */
u32 rd_page_count;
/* Scatterlist count */
u32 rd_size;
/* Ramdisk device */
struct rd_dev *rd_dev;
} ____cacheline_aligned;
struct rd_dev_sg_table {
u32 page_start_offset;
u32 page_end_offset;
u32 rd_sg_count;
struct scatterlist *sg_table;
} ____cacheline_aligned;
#define RDF_HAS_PAGE_COUNT 0x01
struct rd_dev {
int rd_direct;
u32 rd_flags;
/* Unique Ramdisk Device ID in Ramdisk HBA */
u32 rd_dev_id;
/* Total page count for ramdisk device */
u32 rd_page_count;
/* Number of SG tables in sg_table_array */
u32 sg_table_count;
u32 rd_queue_depth;
/* Array of rd_dev_sg_table_t containing scatterlists */
struct rd_dev_sg_table *sg_table_array;
/* Ramdisk HBA device is connected to */
struct rd_host *rd_host;
} ____cacheline_aligned;
struct rd_host {
u32 rd_host_dev_id_count;
u32 rd_host_id; /* Unique Ramdisk Host ID */
} ____cacheline_aligned;
#endif /* TARGET_CORE_RD_H */
/*******************************************************************************
* Filename: target_core_scdb.c
*
* This file contains the generic target engine Split CDB related functions.
*
* Copyright (c) 2004-2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/net.h>
#include <linux/string.h>
#include <scsi/scsi.h>
#include <asm/unaligned.h>
#include <target/target_core_base.h>
#include <target/target_core_transport.h>
#include "target_core_scdb.h"
/* split_cdb_XX_6():
*
* 21-bit LBA w/ 8-bit SECTORS
*/
void split_cdb_XX_6(
unsigned long long lba,
u32 *sectors,
unsigned char *cdb)
{
cdb[1] = (lba >> 16) & 0x1f;
cdb[2] = (lba >> 8) & 0xff;
cdb[3] = lba & 0xff;
cdb[4] = *sectors & 0xff;
}
/* split_cdb_XX_10():
*
* 32-bit LBA w/ 16-bit SECTORS
*/
void split_cdb_XX_10(
unsigned long long lba,
u32 *sectors,
unsigned char *cdb)
{
put_unaligned_be32(lba, &cdb[2]);
put_unaligned_be16(*sectors, &cdb[7]);
}
/* split_cdb_XX_12():
*
* 32-bit LBA w/ 32-bit SECTORS
*/
void split_cdb_XX_12(
unsigned long long lba,
u32 *sectors,
unsigned char *cdb)
{
put_unaligned_be32(lba, &cdb[2]);
put_unaligned_be32(*sectors, &cdb[6]);
}
/* split_cdb_XX_16():
*
* 64-bit LBA w/ 32-bit SECTORS
*/
void split_cdb_XX_16(
unsigned long long lba,
u32 *sectors,
unsigned char *cdb)
{
put_unaligned_be64(lba, &cdb[2]);
put_unaligned_be32(*sectors, &cdb[10]);
}
/*
* split_cdb_XX_32():
*
* 64-bit LBA w/ 32-bit SECTORS such as READ_32, WRITE_32 and emulated XDWRITEREAD_32
*/
void split_cdb_XX_32(
unsigned long long lba,
u32 *sectors,
unsigned char *cdb)
{
put_unaligned_be64(lba, &cdb[12]);
put_unaligned_be32(*sectors, &cdb[28]);
}
#ifndef TARGET_CORE_SCDB_H
#define TARGET_CORE_SCDB_H
extern void split_cdb_XX_6(unsigned long long, u32 *, unsigned char *);
extern void split_cdb_XX_10(unsigned long long, u32 *, unsigned char *);
extern void split_cdb_XX_12(unsigned long long, u32 *, unsigned char *);
extern void split_cdb_XX_16(unsigned long long, u32 *, unsigned char *);
extern void split_cdb_XX_32(unsigned long long, u32 *, unsigned char *);
#endif /* TARGET_CORE_SCDB_H */
/*******************************************************************************
* Filename: target_core_tmr.c
*
* This file contains SPC-3 task management infrastructure
*
* Copyright (c) 2009,2010 Rising Tide Systems
* Copyright (c) 2009,2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/version.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_tmr.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_configfs.h>
#include "target_core_alua.h"
#include "target_core_pr.h"
#define DEBUG_LUN_RESET
#ifdef DEBUG_LUN_RESET
#define DEBUG_LR(x...) printk(KERN_INFO x)
#else
#define DEBUG_LR(x...)
#endif
struct se_tmr_req *core_tmr_alloc_req(
struct se_cmd *se_cmd,
void *fabric_tmr_ptr,
u8 function)
{
struct se_tmr_req *tmr;
tmr = kmem_cache_zalloc(se_tmr_req_cache, GFP_KERNEL);
if (!(tmr)) {
printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
return ERR_PTR(-ENOMEM);
}
tmr->task_cmd = se_cmd;
tmr->fabric_tmr_ptr = fabric_tmr_ptr;
tmr->function = function;
INIT_LIST_HEAD(&tmr->tmr_list);
return tmr;
}
EXPORT_SYMBOL(core_tmr_alloc_req);
void core_tmr_release_req(
struct se_tmr_req *tmr)
{
struct se_device *dev = tmr->tmr_dev;
spin_lock(&dev->se_tmr_lock);
list_del(&tmr->tmr_list);
kmem_cache_free(se_tmr_req_cache, tmr);
spin_unlock(&dev->se_tmr_lock);
}
static void core_tmr_handle_tas_abort(
struct se_node_acl *tmr_nacl,
struct se_cmd *cmd,
int tas,
int fe_count)
{
if (!(fe_count)) {
transport_cmd_finish_abort(cmd, 1);
return;
}
/*
* TASK ABORTED status (TAS) bit support
*/
if (((tmr_nacl != NULL) &&
(tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
transport_send_task_abort(cmd);
transport_cmd_finish_abort(cmd, 0);
}
int core_tmr_lun_reset(
struct se_device *dev,
struct se_tmr_req *tmr,
struct list_head *preempt_and_abort_list,
struct se_cmd *prout_cmd)
{
struct se_cmd *cmd;
struct se_queue_req *qr, *qr_tmp;
struct se_node_acl *tmr_nacl = NULL;
struct se_portal_group *tmr_tpg = NULL;
struct se_queue_obj *qobj = dev->dev_queue_obj;
struct se_tmr_req *tmr_p, *tmr_pp;
struct se_task *task, *task_tmp;
unsigned long flags;
int fe_count, state, tas;
/*
* TASK_ABORTED status bit, this is configurable via ConfigFS
* struct se_device attributes. spc4r17 section 7.4.6 Control mode page
*
* A task aborted status (TAS) bit set to zero specifies that aborted
* tasks shall be terminated by the device server without any response
* to the application client. A TAS bit set to one specifies that tasks
* aborted by the actions of an I_T nexus other than the I_T nexus on
* which the command was received shall be completed with TASK ABORTED
* status (see SAM-4).
*/
tas = DEV_ATTRIB(dev)->emulate_tas;
/*
* Determine if this se_tmr is coming from a $FABRIC_MOD
* or struct se_device passthrough..
*/
if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
if (tmr_nacl && tmr_tpg) {
DEBUG_LR("LUN_RESET: TMR caller fabric: %s"
" initiator port %s\n",
TPG_TFO(tmr_tpg)->get_fabric_name(),
tmr_nacl->initiatorname);
}
}
DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
TRANSPORT(dev)->name, tas);
/*
* Release all pending and outgoing TMRs aside from the received
* LUN_RESET tmr..
*/
spin_lock(&dev->se_tmr_lock);
list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {
/*
* Allow the received TMR to return with FUNCTION_COMPLETE.
*/
if (tmr && (tmr_p == tmr))
continue;
cmd = tmr_p->task_cmd;
if (!(cmd)) {
printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n");
continue;
}
/*
* If this function was called with a valid pr_res_key
* parameter (eg: for PROUT PREEMPT_AND_ABORT service action
* skip non regisration key matching TMRs.
*/
if ((preempt_and_abort_list != NULL) &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
spin_unlock(&dev->se_tmr_lock);
spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
if (!(atomic_read(&T_TASK(cmd)->t_transport_active))) {
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
spin_lock(&dev->se_tmr_lock);
continue;
}
if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
spin_lock(&dev->se_tmr_lock);
continue;
}
DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
" Response: 0x%02x, t_state: %d\n",
(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
tmr_p->function, tmr_p->response, cmd->t_state);
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
transport_cmd_finish_abort_tmr(cmd);
spin_lock(&dev->se_tmr_lock);
}
spin_unlock(&dev->se_tmr_lock);
/*
* Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.
* This is following sam4r17, section 5.6 Aborting commands, Table 38
* for TMR LUN_RESET:
*
* a) "Yes" indicates that each command that is aborted on an I_T nexus
* other than the one that caused the SCSI device condition is
* completed with TASK ABORTED status, if the TAS bit is set to one in
* the Control mode page (see SPC-4). "No" indicates that no status is
* returned for aborted commands.
*
* d) If the logical unit reset is caused by a particular I_T nexus
* (e.g., by a LOGICAL UNIT RESET task management function), then "yes"
* (TASK_ABORTED status) applies.
*
* Otherwise (e.g., if triggered by a hard reset), "no"
* (no TASK_ABORTED SAM status) applies.
*
* Note that this seems to be independent of TAS (Task Aborted Status)
* in the Control Mode Page.
*/
spin_lock_irqsave(&dev->execute_task_lock, flags);
list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
t_state_list) {
if (!(TASK_CMD(task))) {
printk(KERN_ERR "TASK_CMD(task) is NULL!\n");
continue;
}
cmd = TASK_CMD(task);
if (!T_TASK(cmd)) {
printk(KERN_ERR "T_TASK(cmd) is NULL for task: %p cmd:"
" %p ITT: 0x%08x\n", task, cmd,
CMD_TFO(cmd)->get_task_tag(cmd));
continue;
}
/*
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
if ((preempt_and_abort_list != NULL) &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
/*
* Not aborting PROUT PREEMPT_AND_ABORT CDB..
*/
if (prout_cmd == cmd)
continue;
list_del(&task->t_state_list);
atomic_set(&task->task_state_active, 0);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
DEBUG_LR("LUN_RESET: %s cmd: %p task: %p"
" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
"def_t_state: %d/%d cdb: 0x%02x\n",
(preempt_and_abort_list) ? "Preempt" : "", cmd, task,
CMD_TFO(cmd)->get_task_tag(cmd), 0,
CMD_TFO(cmd)->get_cmd_state(cmd), cmd->t_state,
cmd->deferred_t_state, T_TASK(cmd)->t_task_cdb[0]);
DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
" t_task_cdbs: %d t_task_cdbs_left: %d"
" t_task_cdbs_sent: %d -- t_transport_active: %d"
" t_transport_stop: %d t_transport_sent: %d\n",
CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
T_TASK(cmd)->t_task_cdbs,
atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
atomic_read(&T_TASK(cmd)->t_transport_active),
atomic_read(&T_TASK(cmd)->t_transport_stop),
atomic_read(&T_TASK(cmd)->t_transport_sent));
if (atomic_read(&task->task_active)) {
atomic_set(&task->task_stop, 1);
spin_unlock_irqrestore(
&T_TASK(cmd)->t_state_lock, flags);
DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown"
" for dev: %p\n", task, dev);
wait_for_completion(&task->task_stop_comp);
DEBUG_LR("LUN_RESET Completed task: %p shutdown for"
" dev: %p\n", task, dev);
spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
atomic_dec(&T_TASK(cmd)->t_task_cdbs_left);
atomic_set(&task->task_active, 0);
atomic_set(&task->task_stop, 0);
}
__transport_stop_task_timer(task, &flags);
if (!(atomic_dec_and_test(&T_TASK(cmd)->t_task_cdbs_ex_left))) {
spin_unlock_irqrestore(
&T_TASK(cmd)->t_state_lock, flags);
DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for"
" t_task_cdbs_ex_left: %d\n", task, dev,
atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left));
spin_lock_irqsave(&dev->execute_task_lock, flags);
continue;
}
fe_count = atomic_read(&T_TASK(cmd)->t_fe_count);
if (atomic_read(&T_TASK(cmd)->t_transport_active)) {
DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
" task: %p, t_fe_count: %d dev: %p\n", task,
fe_count, dev);
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
flags);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
spin_lock_irqsave(&dev->execute_task_lock, flags);
continue;
}
DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
" t_fe_count: %d dev: %p\n", task, fe_count, dev);
spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
spin_lock_irqsave(&dev->execute_task_lock, flags);
}
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
/*
* Release all commands remaining in the struct se_device cmd queue.
*
* This follows the same logic as above for the struct se_device
* struct se_task state list, where commands are returned with
* TASK_ABORTED status, if there is an outstanding $FABRIC_MOD
* reference, otherwise the struct se_cmd is released.
*/
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
list_for_each_entry_safe(qr, qr_tmp, &qobj->qobj_list, qr_list) {
cmd = (struct se_cmd *)qr->cmd;
if (!(cmd)) {
/*
* Skip these for non PREEMPT_AND_ABORT usage..
*/
if (preempt_and_abort_list != NULL)
continue;
atomic_dec(&qobj->queue_cnt);
list_del(&qr->qr_list);
kfree(qr);
continue;
}
/*
* For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key.
*/
if ((preempt_and_abort_list != NULL) &&
(core_scsi3_check_cdb_abort_and_preempt(
preempt_and_abort_list, cmd) != 0))
continue;
/*
* Not aborting PROUT PREEMPT_AND_ABORT CDB..
*/
if (prout_cmd == cmd)
continue;
atomic_dec(&T_TASK(cmd)->t_transport_queue_active);
atomic_dec(&qobj->queue_cnt);
list_del(&qr->qr_list);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
state = qr->state;
kfree(qr);
DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
"Preempt" : "", cmd, state,
atomic_read(&T_TASK(cmd)->t_fe_count));
/*
* Signal that the command has failed via cmd->se_cmd_flags,
* and call TFO->new_cmd_failure() to wakeup any fabric
* dependent code used to wait for unsolicited data out
* allocation to complete. The fabric module is expected
* to dump any remaining unsolicited data out for the aborted
* command at this point.
*/
transport_new_cmd_failure(cmd);
core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,
atomic_read(&T_TASK(cmd)->t_fe_count));
spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
}
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
/*
* Clear any legacy SPC-2 reservation when called during
* LOGICAL UNIT RESET
*/
if (!(preempt_and_abort_list) &&
(dev->dev_flags & DF_SPC2_RESERVATIONS)) {
spin_lock(&dev->dev_reservation_lock);
dev->dev_reserved_node_acl = NULL;
dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
spin_unlock(&dev->dev_reservation_lock);
printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
}
spin_lock(&dev->stats_lock);
dev->num_resets++;
spin_unlock(&dev->stats_lock);
DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
(preempt_and_abort_list) ? "Preempt" : "TMR",
TRANSPORT(dev)->name);
return 0;
}
/*******************************************************************************
* Filename: target_core_tpg.c
*
* This file contains generic Target Portal Group related functions.
*
* Copyright (c) 2002, 2003, 2004, 2005 PyX Technologies, Inc.
* Copyright (c) 2005, 2006, 2007 SBE, Inc.
* Copyright (c) 2007-2010 Rising Tide Systems
* Copyright (c) 2008-2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/net.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/smp_lock.h>
#include <linux/in.h>
#include <net/sock.h>
#include <net/tcp.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_tpg.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include "target_core_hba.h"
/* core_clear_initiator_node_from_tpg():
*
*
*/
static void core_clear_initiator_node_from_tpg(
struct se_node_acl *nacl,
struct se_portal_group *tpg)
{
int i;
struct se_dev_entry *deve;
struct se_lun *lun;
struct se_lun_acl *acl, *acl_tmp;
spin_lock_irq(&nacl->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
deve = &nacl->device_list[i];
if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
continue;
if (!deve->se_lun) {
printk(KERN_ERR "%s device entries device pointer is"
" NULL, but Initiator has access.\n",
TPG_TFO(tpg)->get_fabric_name());
continue;
}
lun = deve->se_lun;
spin_unlock_irq(&nacl->device_list_lock);
core_update_device_list_for_node(lun, NULL, deve->mapped_lun,
TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg, 0);
spin_lock(&lun->lun_acl_lock);
list_for_each_entry_safe(acl, acl_tmp,
&lun->lun_acl_list, lacl_list) {
if (!(strcmp(acl->initiatorname,
nacl->initiatorname)) &&
(acl->mapped_lun == deve->mapped_lun))
break;
}
if (!acl) {
printk(KERN_ERR "Unable to locate struct se_lun_acl for %s,"
" mapped_lun: %u\n", nacl->initiatorname,
deve->mapped_lun);
spin_unlock(&lun->lun_acl_lock);
spin_lock_irq(&nacl->device_list_lock);
continue;
}
list_del(&acl->lacl_list);
spin_unlock(&lun->lun_acl_lock);
spin_lock_irq(&nacl->device_list_lock);
kfree(acl);
}
spin_unlock_irq(&nacl->device_list_lock);
}
/* __core_tpg_get_initiator_node_acl():
*
* spin_lock_bh(&tpg->acl_node_lock); must be held when calling
*/
struct se_node_acl *__core_tpg_get_initiator_node_acl(
struct se_portal_group *tpg,
const char *initiatorname)
{
struct se_node_acl *acl;
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
if (!(strcmp(acl->initiatorname, initiatorname)))
return acl;
}
return NULL;
}
/* core_tpg_get_initiator_node_acl():
*
*
*/
struct se_node_acl *core_tpg_get_initiator_node_acl(
struct se_portal_group *tpg,
unsigned char *initiatorname)
{
struct se_node_acl *acl;
spin_lock_bh(&tpg->acl_node_lock);
list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
if (!(strcmp(acl->initiatorname, initiatorname)) &&
(!(acl->dynamic_node_acl))) {
spin_unlock_bh(&tpg->acl_node_lock);
return acl;
}
}
spin_unlock_bh(&tpg->acl_node_lock);
return NULL;
}
/* core_tpg_add_node_to_devs():
*
*
*/
void core_tpg_add_node_to_devs(
struct se_node_acl *acl,
struct se_portal_group *tpg)
{
int i = 0;
u32 lun_access = 0;
struct se_lun *lun;
struct se_device *dev;
spin_lock(&tpg->tpg_lun_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
lun = &tpg->tpg_lun_list[i];
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE)
continue;
spin_unlock(&tpg->tpg_lun_lock);
dev = lun->lun_se_dev;
/*
* By default in LIO-Target $FABRIC_MOD,
* demo_mode_write_protect is ON, or READ_ONLY;
*/
if (!(TPG_TFO(tpg)->tpg_check_demo_mode_write_protect(tpg))) {
if (dev->dev_flags & DF_READ_ONLY)
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
else
lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
} else {
/*
* Allow only optical drives to issue R/W in default RO
* demo mode.
*/
if (TRANSPORT(dev)->get_device_type(dev) == TYPE_DISK)
lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
else
lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
}
printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
" access for LUN in Demo Mode\n",
TPG_TFO(tpg)->get_fabric_name(),
TPG_TFO(tpg)->tpg_get_tag(tpg), lun->unpacked_lun,
(lun_access == TRANSPORT_LUNFLAGS_READ_WRITE) ?
"READ-WRITE" : "READ-ONLY");
core_update_device_list_for_node(lun, NULL, lun->unpacked_lun,
lun_access, acl, tpg, 1);
spin_lock(&tpg->tpg_lun_lock);
}
spin_unlock(&tpg->tpg_lun_lock);
}
/* core_set_queue_depth_for_node():
*
*
*/
static int core_set_queue_depth_for_node(
struct se_portal_group *tpg,
struct se_node_acl *acl)
{
if (!acl->queue_depth) {
printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0,"
"defaulting to 1.\n", TPG_TFO(tpg)->get_fabric_name(),
acl->initiatorname);
acl->queue_depth = 1;
}
return 0;
}
/* core_create_device_list_for_node():
*
*
*/
static int core_create_device_list_for_node(struct se_node_acl *nacl)
{
struct se_dev_entry *deve;
int i;
nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
if (!(nacl->device_list)) {
printk(KERN_ERR "Unable to allocate memory for"
" struct se_node_acl->device_list\n");
return -1;
}
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
deve = &nacl->device_list[i];
atomic_set(&deve->ua_count, 0);
atomic_set(&deve->pr_ref_count, 0);
spin_lock_init(&deve->ua_lock);
INIT_LIST_HEAD(&deve->alua_port_list);
INIT_LIST_HEAD(&deve->ua_list);
}
return 0;
}
/* core_tpg_check_initiator_node_acl()
*
*
*/
struct se_node_acl *core_tpg_check_initiator_node_acl(
struct se_portal_group *tpg,
unsigned char *initiatorname)
{
struct se_node_acl *acl;
acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
if ((acl))
return acl;
if (!(TPG_TFO(tpg)->tpg_check_demo_mode(tpg)))
return NULL;
acl = TPG_TFO(tpg)->tpg_alloc_fabric_acl(tpg);
if (!(acl))
return NULL;
INIT_LIST_HEAD(&acl->acl_list);
INIT_LIST_HEAD(&acl->acl_sess_list);
spin_lock_init(&acl->device_list_lock);
spin_lock_init(&acl->nacl_sess_lock);
atomic_set(&acl->acl_pr_ref_count, 0);
atomic_set(&acl->mib_ref_count, 0);
acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg);
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
spin_lock_init(&acl->stats_lock);
acl->dynamic_node_acl = 1;
TPG_TFO(tpg)->set_default_node_attributes(acl);
if (core_create_device_list_for_node(acl) < 0) {
TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
return NULL;
}
if (core_set_queue_depth_for_node(tpg, acl) < 0) {
core_free_device_list_for_node(acl, tpg);
TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
return NULL;
}
core_tpg_add_node_to_devs(acl, tpg);
spin_lock_bh(&tpg->acl_node_lock);
list_add_tail(&acl->acl_list, &tpg->acl_node_list);
tpg->num_node_acls++;
spin_unlock_bh(&tpg->acl_node_lock);
printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
TPG_TFO(tpg)->get_fabric_name(), initiatorname);
return acl;
}
EXPORT_SYMBOL(core_tpg_check_initiator_node_acl);
void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl)
{
while (atomic_read(&nacl->acl_pr_ref_count) != 0)
cpu_relax();
}
void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl)
{
while (atomic_read(&nacl->mib_ref_count) != 0)
cpu_relax();
}
void core_tpg_clear_object_luns(struct se_portal_group *tpg)
{
int i, ret;
struct se_lun *lun;
spin_lock(&tpg->tpg_lun_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
lun = &tpg->tpg_lun_list[i];
if ((lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) ||
(lun->lun_se_dev == NULL))
continue;
spin_unlock(&tpg->tpg_lun_lock);
ret = core_dev_del_lun(tpg, lun->unpacked_lun);
spin_lock(&tpg->tpg_lun_lock);
}
spin_unlock(&tpg->tpg_lun_lock);
}
EXPORT_SYMBOL(core_tpg_clear_object_luns);
/* core_tpg_add_initiator_node_acl():
*
*
*/
struct se_node_acl *core_tpg_add_initiator_node_acl(
struct se_portal_group *tpg,
struct se_node_acl *se_nacl,
const char *initiatorname,
u32 queue_depth)
{
struct se_node_acl *acl = NULL;
spin_lock_bh(&tpg->acl_node_lock);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
if ((acl)) {
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL"
" for %s\n", TPG_TFO(tpg)->get_fabric_name(),
TPG_TFO(tpg)->tpg_get_tag(tpg), initiatorname);
spin_unlock_bh(&tpg->acl_node_lock);
/*
* Release the locally allocated struct se_node_acl
* because * core_tpg_add_initiator_node_acl() returned
* a pointer to an existing demo mode node ACL.
*/
if (se_nacl)
TPG_TFO(tpg)->tpg_release_fabric_acl(tpg,
se_nacl);
goto done;
}
printk(KERN_ERR "ACL entry for %s Initiator"
" Node %s already exists for TPG %u, ignoring"
" request.\n", TPG_TFO(tpg)->get_fabric_name(),
initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
spin_unlock_bh(&tpg->acl_node_lock);
return ERR_PTR(-EEXIST);
}
spin_unlock_bh(&tpg->acl_node_lock);
if (!(se_nacl)) {
printk("struct se_node_acl pointer is NULL\n");
return ERR_PTR(-EINVAL);
}
/*
* For v4.x logic the se_node_acl_s is hanging off a fabric
* dependent structure allocated via
* struct target_core_fabric_ops->fabric_make_nodeacl()
*/
acl = se_nacl;
INIT_LIST_HEAD(&acl->acl_list);
INIT_LIST_HEAD(&acl->acl_sess_list);
spin_lock_init(&acl->device_list_lock);
spin_lock_init(&acl->nacl_sess_lock);
atomic_set(&acl->acl_pr_ref_count, 0);
acl->queue_depth = queue_depth;
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
spin_lock_init(&acl->stats_lock);
TPG_TFO(tpg)->set_default_node_attributes(acl);
if (core_create_device_list_for_node(acl) < 0) {
TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
return ERR_PTR(-ENOMEM);
}
if (core_set_queue_depth_for_node(tpg, acl) < 0) {
core_free_device_list_for_node(acl, tpg);
TPG_TFO(tpg)->tpg_release_fabric_acl(tpg, acl);
return ERR_PTR(-EINVAL);
}
spin_lock_bh(&tpg->acl_node_lock);
list_add_tail(&acl->acl_list, &tpg->acl_node_list);
tpg->num_node_acls++;
spin_unlock_bh(&tpg->acl_node_lock);
done:
printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
TPG_TFO(tpg)->get_fabric_name(), initiatorname);
return acl;
}
EXPORT_SYMBOL(core_tpg_add_initiator_node_acl);
/* core_tpg_del_initiator_node_acl():
*
*
*/
int core_tpg_del_initiator_node_acl(
struct se_portal_group *tpg,
struct se_node_acl *acl,
int force)
{
struct se_session *sess, *sess_tmp;
int dynamic_acl = 0;
spin_lock_bh(&tpg->acl_node_lock);
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
dynamic_acl = 1;
}
list_del(&acl->acl_list);
tpg->num_node_acls--;
spin_unlock_bh(&tpg->acl_node_lock);
spin_lock_bh(&tpg->session_lock);
list_for_each_entry_safe(sess, sess_tmp,
&tpg->tpg_sess_list, sess_list) {
if (sess->se_node_acl != acl)
continue;
/*
* Determine if the session needs to be closed by our context.
*/
if (!(TPG_TFO(tpg)->shutdown_session(sess)))
continue;
spin_unlock_bh(&tpg->session_lock);
/*
* If the $FABRIC_MOD session for the Initiator Node ACL exists,
* forcefully shutdown the $FABRIC_MOD session/nexus.
*/
TPG_TFO(tpg)->close_session(sess);
spin_lock_bh(&tpg->session_lock);
}
spin_unlock_bh(&tpg->session_lock);
core_tpg_wait_for_nacl_pr_ref(acl);
core_tpg_wait_for_mib_ref(acl);
core_clear_initiator_node_from_tpg(acl, tpg);
core_free_device_list_for_node(acl, tpg);
printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
" Initiator Node: %s\n", TPG_TFO(tpg)->get_fabric_name(),
TPG_TFO(tpg)->tpg_get_tag(tpg), acl->queue_depth,
TPG_TFO(tpg)->get_fabric_name(), acl->initiatorname);
return 0;
}
EXPORT_SYMBOL(core_tpg_del_initiator_node_acl);
/* core_tpg_set_initiator_node_queue_depth():
*
*
*/
int core_tpg_set_initiator_node_queue_depth(
struct se_portal_group *tpg,
unsigned char *initiatorname,
u32 queue_depth,
int force)
{
struct se_session *sess, *init_sess = NULL;
struct se_node_acl *acl;
int dynamic_acl = 0;
spin_lock_bh(&tpg->acl_node_lock);
acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
if (!(acl)) {
printk(KERN_ERR "Access Control List entry for %s Initiator"
" Node %s does not exists for TPG %hu, ignoring"
" request.\n", TPG_TFO(tpg)->get_fabric_name(),
initiatorname, TPG_TFO(tpg)->tpg_get_tag(tpg));
spin_unlock_bh(&tpg->acl_node_lock);
return -ENODEV;
}
if (acl->dynamic_node_acl) {
acl->dynamic_node_acl = 0;
dynamic_acl = 1;
}
spin_unlock_bh(&tpg->acl_node_lock);
spin_lock_bh(&tpg->session_lock);
list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) {
if (sess->se_node_acl != acl)
continue;
if (!force) {
printk(KERN_ERR "Unable to change queue depth for %s"
" Initiator Node: %s while session is"
" operational. To forcefully change the queue"
" depth and force session reinstatement"
" use the \"force=1\" parameter.\n",
TPG_TFO(tpg)->get_fabric_name(), initiatorname);
spin_unlock_bh(&tpg->session_lock);
spin_lock_bh(&tpg->acl_node_lock);
if (dynamic_acl)
acl->dynamic_node_acl = 1;
spin_unlock_bh(&tpg->acl_node_lock);
return -EEXIST;
}
/*
* Determine if the session needs to be closed by our context.
*/
if (!(TPG_TFO(tpg)->shutdown_session(sess)))
continue;
init_sess = sess;
break;
}
/*
* User has requested to change the queue depth for a Initiator Node.
* Change the value in the Node's struct se_node_acl, and call
* core_set_queue_depth_for_node() to add the requested queue depth.
*
* Finally call TPG_TFO(tpg)->close_session() to force session
* reinstatement to occur if there is an active session for the
* $FABRIC_MOD Initiator Node in question.
*/
acl->queue_depth = queue_depth;
if (core_set_queue_depth_for_node(tpg, acl) < 0) {
spin_unlock_bh(&tpg->session_lock);
/*
* Force session reinstatement if
* core_set_queue_depth_for_node() failed, because we assume
* the $FABRIC_MOD has already the set session reinstatement
* bit from TPG_TFO(tpg)->shutdown_session() called above.
*/
if (init_sess)
TPG_TFO(tpg)->close_session(init_sess);
spin_lock_bh(&tpg->acl_node_lock);
if (dynamic_acl)
acl->dynamic_node_acl = 1;
spin_unlock_bh(&tpg->acl_node_lock);
return -EINVAL;
}
spin_unlock_bh(&tpg->session_lock);
/*
* If the $FABRIC_MOD session for the Initiator Node ACL exists,
* forcefully shutdown the $FABRIC_MOD session/nexus.
*/
if (init_sess)
TPG_TFO(tpg)->close_session(init_sess);
printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator"
" Node: %s on %s Target Portal Group: %u\n", queue_depth,
initiatorname, TPG_TFO(tpg)->get_fabric_name(),
TPG_TFO(tpg)->tpg_get_tag(tpg));
spin_lock_bh(&tpg->acl_node_lock);
if (dynamic_acl)
acl->dynamic_node_acl = 1;
spin_unlock_bh(&tpg->acl_node_lock);
return 0;
}
EXPORT_SYMBOL(core_tpg_set_initiator_node_queue_depth);
static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
{
/* Set in core_dev_setup_virtual_lun0() */
struct se_device *dev = se_global->g_lun0_dev;
struct se_lun *lun = &se_tpg->tpg_virt_lun0;
u32 lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
int ret;
lun->unpacked_lun = 0;
lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp);
INIT_LIST_HEAD(&lun->lun_acl_list);
INIT_LIST_HEAD(&lun->lun_cmd_list);
spin_lock_init(&lun->lun_acl_lock);
spin_lock_init(&lun->lun_cmd_lock);
spin_lock_init(&lun->lun_sep_lock);
ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
if (ret < 0)
return -1;
return 0;
}
static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
{
struct se_lun *lun = &se_tpg->tpg_virt_lun0;
core_tpg_post_dellun(se_tpg, lun);
}
int core_tpg_register(
struct target_core_fabric_ops *tfo,
struct se_wwn *se_wwn,
struct se_portal_group *se_tpg,
void *tpg_fabric_ptr,
int se_tpg_type)
{
struct se_lun *lun;
u32 i;
se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
if (!(se_tpg->tpg_lun_list)) {
printk(KERN_ERR "Unable to allocate struct se_portal_group->"
"tpg_lun_list\n");
return -ENOMEM;
}
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
lun = &se_tpg->tpg_lun_list[i];
lun->unpacked_lun = i;
lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
atomic_set(&lun->lun_acl_count, 0);
init_completion(&lun->lun_shutdown_comp);
INIT_LIST_HEAD(&lun->lun_acl_list);
INIT_LIST_HEAD(&lun->lun_cmd_list);
spin_lock_init(&lun->lun_acl_lock);
spin_lock_init(&lun->lun_cmd_lock);
spin_lock_init(&lun->lun_sep_lock);
}
se_tpg->se_tpg_type = se_tpg_type;
se_tpg->se_tpg_fabric_ptr = tpg_fabric_ptr;
se_tpg->se_tpg_tfo = tfo;
se_tpg->se_tpg_wwn = se_wwn;
atomic_set(&se_tpg->tpg_pr_ref_count, 0);
INIT_LIST_HEAD(&se_tpg->acl_node_list);
INIT_LIST_HEAD(&se_tpg->se_tpg_list);
INIT_LIST_HEAD(&se_tpg->tpg_sess_list);
spin_lock_init(&se_tpg->acl_node_lock);
spin_lock_init(&se_tpg->session_lock);
spin_lock_init(&se_tpg->tpg_lun_lock);
if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) {
if (core_tpg_setup_virtual_lun0(se_tpg) < 0) {
kfree(se_tpg);
return -ENOMEM;
}
}
spin_lock_bh(&se_global->se_tpg_lock);
list_add_tail(&se_tpg->se_tpg_list, &se_global->g_se_tpg_list);
spin_unlock_bh(&se_global->se_tpg_lock);
printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
" endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
"Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
"None" : tfo->tpg_get_wwn(se_tpg), tfo->tpg_get_tag(se_tpg));
return 0;
}
EXPORT_SYMBOL(core_tpg_register);
int core_tpg_deregister(struct se_portal_group *se_tpg)
{
printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
" for endpoint: %s Portal Tag %u\n",
(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
"Normal" : "Discovery", TPG_TFO(se_tpg)->get_fabric_name(),
TPG_TFO(se_tpg)->tpg_get_wwn(se_tpg),
TPG_TFO(se_tpg)->tpg_get_tag(se_tpg));
spin_lock_bh(&se_global->se_tpg_lock);
list_del(&se_tpg->se_tpg_list);
spin_unlock_bh(&se_global->se_tpg_lock);
while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0)
cpu_relax();
if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
core_tpg_release_virtual_lun0(se_tpg);
se_tpg->se_tpg_fabric_ptr = NULL;
kfree(se_tpg->tpg_lun_list);
return 0;
}
EXPORT_SYMBOL(core_tpg_deregister);
struct se_lun *core_tpg_pre_addlun(
struct se_portal_group *tpg,
u32 unpacked_lun)
{
struct se_lun *lun;
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
"-1: %u for Target Portal Group: %u\n",
TPG_TFO(tpg)->get_fabric_name(),
unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
TPG_TFO(tpg)->tpg_get_tag(tpg));
return ERR_PTR(-EOVERFLOW);
}
spin_lock(&tpg->tpg_lun_lock);
lun = &tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
printk(KERN_ERR "TPG Logical Unit Number: %u is already active"
" on %s Target Portal Group: %u, ignoring request.\n",
unpacked_lun, TPG_TFO(tpg)->get_fabric_name(),
TPG_TFO(tpg)->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return ERR_PTR(-EINVAL);
}
spin_unlock(&tpg->tpg_lun_lock);
return lun;
}
int core_tpg_post_addlun(
struct se_portal_group *tpg,
struct se_lun *lun,
u32 lun_access,
void *lun_ptr)
{
if (core_dev_export(lun_ptr, tpg, lun) < 0)
return -1;
spin_lock(&tpg->tpg_lun_lock);
lun->lun_access = lun_access;
lun->lun_status = TRANSPORT_LUN_STATUS_ACTIVE;
spin_unlock(&tpg->tpg_lun_lock);
return 0;
}
static void core_tpg_shutdown_lun(
struct se_portal_group *tpg,
struct se_lun *lun)
{
core_clear_lun_from_tpg(lun, tpg);
transport_clear_lun_from_sessions(lun);
}
struct se_lun *core_tpg_pre_dellun(
struct se_portal_group *tpg,
u32 unpacked_lun,
int *ret)
{
struct se_lun *lun;
if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
"-1: %u for Target Portal Group: %u\n",
TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
TRANSPORT_MAX_LUNS_PER_TPG-1,
TPG_TFO(tpg)->tpg_get_tag(tpg));
return ERR_PTR(-EOVERFLOW);
}
spin_lock(&tpg->tpg_lun_lock);
lun = &tpg->tpg_lun_list[unpacked_lun];
if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
" Target Portal Group: %u, ignoring request.\n",
TPG_TFO(tpg)->get_fabric_name(), unpacked_lun,
TPG_TFO(tpg)->tpg_get_tag(tpg));
spin_unlock(&tpg->tpg_lun_lock);
return ERR_PTR(-ENODEV);
}
spin_unlock(&tpg->tpg_lun_lock);
return lun;
}
int core_tpg_post_dellun(
struct se_portal_group *tpg,
struct se_lun *lun)
{
core_tpg_shutdown_lun(tpg, lun);
core_dev_unexport(lun->lun_se_dev, tpg, lun);
spin_lock(&tpg->tpg_lun_lock);
lun->lun_status = TRANSPORT_LUN_STATUS_FREE;
spin_unlock(&tpg->tpg_lun_lock);
return 0;
}
This source diff could not be displayed because it is too large. You can view the blob instead.
/*******************************************************************************
* Filename: target_core_ua.c
*
* This file contains logic for SPC-3 Unit Attention emulation
*
* Copyright (c) 2009,2010 Rising Tide Systems
* Copyright (c) 2009,2010 Linux-iSCSI.org
*
* Nicholas A. Bellinger <nab@kernel.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
******************************************************************************/
#include <linux/version.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <target/target_core_base.h>
#include <target/target_core_device.h>
#include <target/target_core_transport.h>
#include <target/target_core_fabric_ops.h>
#include <target/target_core_configfs.h>
#include "target_core_alua.h"
#include "target_core_hba.h"
#include "target_core_pr.h"
#include "target_core_ua.h"
int core_scsi3_ua_check(
struct se_cmd *cmd,
unsigned char *cdb)
{
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
if (!(sess))
return 0;
nacl = sess->se_node_acl;
if (!(nacl))
return 0;
deve = &nacl->device_list[cmd->orig_fe_lun];
if (!(atomic_read(&deve->ua_count)))
return 0;
/*
* From sam4r14, section 5.14 Unit attention condition:
*
* a) if an INQUIRY command enters the enabled command state, the
* device server shall process the INQUIRY command and shall neither
* report nor clear any unit attention condition;
* b) if a REPORT LUNS command enters the enabled command state, the
* device server shall process the REPORT LUNS command and shall not
* report any unit attention condition;
* e) if a REQUEST SENSE command enters the enabled command state while
* a unit attention condition exists for the SCSI initiator port
* associated with the I_T nexus on which the REQUEST SENSE command
* was received, then the device server shall process the command
* and either:
*/
switch (cdb[0]) {
case INQUIRY:
case REPORT_LUNS:
case REQUEST_SENSE:
return 0;
default:
return -1;
}
return -1;
}
int core_scsi3_ua_allocate(
struct se_node_acl *nacl,
u32 unpacked_lun,
u8 asc,
u8 ascq)
{
struct se_dev_entry *deve;
struct se_ua *ua, *ua_p, *ua_tmp;
/*
* PASSTHROUGH OPS
*/
if (!(nacl))
return -1;
ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
if (!(ua)) {
printk(KERN_ERR "Unable to allocate struct se_ua\n");
return -1;
}
INIT_LIST_HEAD(&ua->ua_dev_list);
INIT_LIST_HEAD(&ua->ua_nacl_list);
ua->ua_nacl = nacl;
ua->ua_asc = asc;
ua->ua_ascq = ascq;
spin_lock_irq(&nacl->device_list_lock);
deve = &nacl->device_list[unpacked_lun];
spin_lock(&deve->ua_lock);
list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
/*
* Do not report the same UNIT ATTENTION twice..
*/
if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) {
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
kmem_cache_free(se_ua_cache, ua);
return 0;
}
/*
* Attach the highest priority Unit Attention to
* the head of the list following sam4r14,
* Section 5.14 Unit Attention Condition:
*
* POWER ON, RESET, OR BUS DEVICE RESET OCCURRED highest
* POWER ON OCCURRED or
* DEVICE INTERNAL RESET
* SCSI BUS RESET OCCURRED or
* MICROCODE HAS BEEN CHANGED or
* protocol specific
* BUS DEVICE RESET FUNCTION OCCURRED
* I_T NEXUS LOSS OCCURRED
* COMMANDS CLEARED BY POWER LOSS NOTIFICATION
* all others Lowest
*
* Each of the ASCQ codes listed above are defined in
* the 29h ASC family, see spc4r17 Table D.1
*/
if (ua_p->ua_asc == 0x29) {
if ((asc == 0x29) && (ascq > ua_p->ua_ascq))
list_add(&ua->ua_nacl_list,
&deve->ua_list);
else
list_add_tail(&ua->ua_nacl_list,
&deve->ua_list);
} else if (ua_p->ua_asc == 0x2a) {
/*
* Incoming Family 29h ASCQ codes will override
* Family 2AHh ASCQ codes for Unit Attention condition.
*/
if ((asc == 0x29) || (ascq > ua_p->ua_asc))
list_add(&ua->ua_nacl_list,
&deve->ua_list);
else
list_add_tail(&ua->ua_nacl_list,
&deve->ua_list);
} else
list_add_tail(&ua->ua_nacl_list,
&deve->ua_list);
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
atomic_inc(&deve->ua_count);
smp_mb__after_atomic_inc();
return 0;
}
list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
" 0x%02x, ASCQ: 0x%02x\n",
TPG_TFO(nacl->se_tpg)->get_fabric_name(), unpacked_lun,
asc, ascq);
atomic_inc(&deve->ua_count);
smp_mb__after_atomic_inc();
return 0;
}
void core_scsi3_ua_release_all(
struct se_dev_entry *deve)
{
struct se_ua *ua, *ua_p;
spin_lock(&deve->ua_lock);
list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
atomic_dec(&deve->ua_count);
smp_mb__after_atomic_dec();
}
spin_unlock(&deve->ua_lock);
}
void core_scsi3_ua_for_check_condition(
struct se_cmd *cmd,
u8 *asc,
u8 *ascq)
{
struct se_device *dev = SE_DEV(cmd);
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
struct se_ua *ua = NULL, *ua_p;
int head = 1;
if (!(sess))
return;
nacl = sess->se_node_acl;
if (!(nacl))
return;
spin_lock_irq(&nacl->device_list_lock);
deve = &nacl->device_list[cmd->orig_fe_lun];
if (!(atomic_read(&deve->ua_count))) {
spin_unlock_irq(&nacl->device_list_lock);
return;
}
/*
* The highest priority Unit Attentions are placed at the head of the
* struct se_dev_entry->ua_list, and will be returned in CHECK_CONDITION +
* sense data for the received CDB.
*/
spin_lock(&deve->ua_lock);
list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
/*
* For ua_intlck_ctrl code not equal to 00b, only report the
* highest priority UNIT_ATTENTION and ASC/ASCQ without
* clearing it.
*/
if (DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) {
*asc = ua->ua_asc;
*ascq = ua->ua_ascq;
break;
}
/*
* Otherwise for the default 00b, release the UNIT ATTENTION
* condition. Return the ASC/ASCQ of the higest priority UA
* (head of the list) in the outgoing CHECK_CONDITION + sense.
*/
if (head) {
*asc = ua->ua_asc;
*ascq = ua->ua_ascq;
head = 0;
}
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
atomic_dec(&deve->ua_count);
smp_mb__after_atomic_dec();
}
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with"
" INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
TPG_TFO(nacl->se_tpg)->get_fabric_name(),
(DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl != 0) ? "Reporting" :
"Releasing", DEV_ATTRIB(dev)->emulate_ua_intlck_ctrl,
cmd->orig_fe_lun, T_TASK(cmd)->t_task_cdb[0], *asc, *ascq);
}
int core_scsi3_ua_clear_for_request_sense(
struct se_cmd *cmd,
u8 *asc,
u8 *ascq)
{
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
struct se_ua *ua = NULL, *ua_p;
int head = 1;
if (!(sess))
return -1;
nacl = sess->se_node_acl;
if (!(nacl))
return -1;
spin_lock_irq(&nacl->device_list_lock);
deve = &nacl->device_list[cmd->orig_fe_lun];
if (!(atomic_read(&deve->ua_count))) {
spin_unlock_irq(&nacl->device_list_lock);
return -1;
}
/*
* The highest priority Unit Attentions are placed at the head of the
* struct se_dev_entry->ua_list. The First (and hence highest priority)
* ASC/ASCQ will be returned in REQUEST_SENSE payload data for the
* matching struct se_lun.
*
* Once the returning ASC/ASCQ values are set, we go ahead and
* release all of the Unit Attention conditions for the assoicated
* struct se_lun.
*/
spin_lock(&deve->ua_lock);
list_for_each_entry_safe(ua, ua_p, &deve->ua_list, ua_nacl_list) {
if (head) {
*asc = ua->ua_asc;
*ascq = ua->ua_ascq;
head = 0;
}
list_del(&ua->ua_nacl_list);
kmem_cache_free(se_ua_cache, ua);
atomic_dec(&deve->ua_count);
smp_mb__after_atomic_dec();
}
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped"
" LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
" ASCQ: 0x%02x\n", TPG_TFO(nacl->se_tpg)->get_fabric_name(),
cmd->orig_fe_lun, *asc, *ascq);
return (head) ? -1 : 0;
}
#ifndef TARGET_CORE_UA_H
/*
* From spc4r17, Table D.1: ASC and ASCQ Assignement
*/
#define ASCQ_29H_POWER_ON_RESET_OR_BUS_DEVICE_RESET_OCCURED 0x00
#define ASCQ_29H_POWER_ON_OCCURRED 0x01
#define ASCQ_29H_SCSI_BUS_RESET_OCCURED 0x02
#define ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED 0x03
#define ASCQ_29H_DEVICE_INTERNAL_RESET 0x04
#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_SINGLE_ENDED 0x05
#define ASCQ_29H_TRANSCEIVER_MODE_CHANGED_TO_LVD 0x06
#define ASCQ_29H_NEXUS_LOSS_OCCURRED 0x07
#define ASCQ_2AH_PARAMETERS_CHANGED 0x00
#define ASCQ_2AH_MODE_PARAMETERS_CHANGED 0x01
#define ASCQ_2AH_LOG_PARAMETERS_CHANGED 0x02
#define ASCQ_2AH_RESERVATIONS_PREEMPTED 0x03
#define ASCQ_2AH_RESERVATIONS_RELEASED 0x04
#define ASCQ_2AH_REGISTRATIONS_PREEMPTED 0x05
#define ASCQ_2AH_ASYMMETRIC_ACCESS_STATE_CHANGED 0x06
#define ASCQ_2AH_IMPLICT_ASYMMETRIC_ACCESS_STATE_TRANSITION_FAILED 0x07
#define ASCQ_2AH_PRIORITY_CHANGED 0x08
#define ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS 0x09
extern struct kmem_cache *se_ua_cache;
extern int core_scsi3_ua_check(struct se_cmd *, unsigned char *);
extern int core_scsi3_ua_allocate(struct se_node_acl *, u32, u8, u8);
extern void core_scsi3_ua_release_all(struct se_dev_entry *);
extern void core_scsi3_ua_for_check_condition(struct se_cmd *, u8 *, u8 *);
extern int core_scsi3_ua_clear_for_request_sense(struct se_cmd *,
u8 *, u8 *);
#endif /* TARGET_CORE_UA_H */
/* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* configfs_macros.h - extends macros for configfs
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*
* Based on sysfs:
* sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
*
* Based on kobject.h:
* Copyright (c) 2002-2003 Patrick Mochel
* Copyright (c) 2002-2003 Open Source Development Labs
*
* configfs Copyright (C) 2005 Oracle. All rights reserved.
*
* Added CONFIGFS_EATTR() macros from original configfs.h macros
* Copright (C) 2008-2009 Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* Please read Documentation/filesystems/configfs.txt before using the
* configfs interface, ESPECIALLY the parts about reference counts and
* item destructors.
*/
#ifndef _CONFIGFS_MACROS_H_
#define _CONFIGFS_MACROS_H_
#include <linux/configfs.h>
/*
* Users often need to create attribute structures for their configurable
* attributes, containing a configfs_attribute member and function pointers
* for the show() and store() operations on that attribute. If they don't
* need anything else on the extended attribute structure, they can use
* this macro to define it. The argument _name isends up as
* 'struct _name_attribute, as well as names of to CONFIGFS_ATTR_OPS() below.
* The argument _item is the name of the structure containing the
* struct config_item or struct config_group structure members
*/
#define CONFIGFS_EATTR_STRUCT(_name, _item) \
struct _name##_attribute { \
struct configfs_attribute attr; \
ssize_t (*show)(struct _item *, char *); \
ssize_t (*store)(struct _item *, const char *, size_t); \
}
/*
* With the extended attribute structure, users can use this macro
* (similar to sysfs' __ATTR) to make defining attributes easier.
* An example:
* #define MYITEM_EATTR(_name, _mode, _show, _store) \
* struct myitem_attribute childless_attr_##_name = \
* __CONFIGFS_EATTR(_name, _mode, _show, _store)
*/
#define __CONFIGFS_EATTR(_name, _mode, _show, _store) \
{ \
.attr = { \
.ca_name = __stringify(_name), \
.ca_mode = _mode, \
.ca_owner = THIS_MODULE, \
}, \
.show = _show, \
.store = _store, \
}
/* Here is a readonly version, only requiring a show() operation */
#define __CONFIGFS_EATTR_RO(_name, _show) \
{ \
.attr = { \
.ca_name = __stringify(_name), \
.ca_mode = 0444, \
.ca_owner = THIS_MODULE, \
}, \
.show = _show, \
}
/*
* With these extended attributes, the simple show_attribute() and
* store_attribute() operations need to call the show() and store() of the
* attributes. This is a common pattern, so we provide a macro to define
* them. The argument _name is the name of the attribute defined by
* CONFIGFS_ATTR_STRUCT(). The argument _item is the name of the structure
* containing the struct config_item or struct config_group structure member.
* The argument _item_member is the actual name of the struct config_* struct
* in your _item structure. Meaning my_structure->some_config_group.
* ^^_item^^^^^ ^^_item_member^^^
* This macro expects the attributes to be named "struct <name>_attribute".
*/
#define CONFIGFS_EATTR_OPS_TO_FUNC(_name, _item, _item_member) \
static struct _item *to_##_name(struct config_item *ci) \
{ \
return (ci) ? container_of(to_config_group(ci), struct _item, \
_item_member) : NULL; \
}
#define CONFIGFS_EATTR_OPS_SHOW(_name, _item) \
static ssize_t _name##_attr_show(struct config_item *item, \
struct configfs_attribute *attr, \
char *page) \
{ \
struct _item *_item = to_##_name(item); \
struct _name##_attribute * _name##_attr = \
container_of(attr, struct _name##_attribute, attr); \
ssize_t ret = 0; \
\
if (_name##_attr->show) \
ret = _name##_attr->show(_item, page); \
return ret; \
}
#define CONFIGFS_EATTR_OPS_STORE(_name, _item) \
static ssize_t _name##_attr_store(struct config_item *item, \
struct configfs_attribute *attr, \
const char *page, size_t count) \
{ \
struct _item *_item = to_##_name(item); \
struct _name##_attribute * _name##_attr = \
container_of(attr, struct _name##_attribute, attr); \
ssize_t ret = -EINVAL; \
\
if (_name##_attr->store) \
ret = _name##_attr->store(_item, page, count); \
return ret; \
}
#define CONFIGFS_EATTR_OPS(_name, _item, _item_member) \
CONFIGFS_EATTR_OPS_TO_FUNC(_name, _item, _item_member); \
CONFIGFS_EATTR_OPS_SHOW(_name, _item); \
CONFIGFS_EATTR_OPS_STORE(_name, _item);
#define CONFIGFS_EATTR_OPS_RO(_name, _item, _item_member) \
CONFIGFS_EATTR_OPS_TO_FUNC(_name, _item, _item_member); \
CONFIGFS_EATTR_OPS_SHOW(_name, _item);
#endif /* _CONFIGFS_MACROS_H_ */
#ifndef TARGET_CORE_BASE_H
#define TARGET_CORE_BASE_H
#include <linux/in.h>
#include <linux/configfs.h>
#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
#include <scsi/scsi_cmnd.h>
#include <net/sock.h>
#include <net/tcp.h>
#include "target_core_mib.h"
#define TARGET_CORE_MOD_VERSION "v4.0.0-rc6"
#define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT))
/* Used by transport_generic_allocate_iovecs() */
#define TRANSPORT_IOV_DATA_BUFFER 5
/* Maximum Number of LUNs per Target Portal Group */
#define TRANSPORT_MAX_LUNS_PER_TPG 256
/*
* By default we use 32-byte CDBs in TCM Core and subsystem plugin code.
*
* Note that both include/scsi/scsi_cmnd.h:MAX_COMMAND_SIZE and
* include/linux/blkdev.h:BLOCK_MAX_CDB as of v2.6.36-rc4 still use
* 16-byte CDBs by default and require an extra allocation for
* 32-byte CDBs to becasue of legacy issues.
*
* Within TCM Core there are no such legacy limitiations, so we go ahead
* use 32-byte CDBs by default and use include/scsi/scsi.h:scsi_command_size()
* within all TCM Core and subsystem plugin code.
*/
#define TCM_MAX_COMMAND_SIZE 32
/*
* From include/scsi/scsi_cmnd.h:SCSI_SENSE_BUFFERSIZE, currently
* defined 96, but the real limit is 252 (or 260 including the header)
*/
#define TRANSPORT_SENSE_BUFFER SCSI_SENSE_BUFFERSIZE
/* Used by transport_send_check_condition_and_sense() */
#define SPC_SENSE_KEY_OFFSET 2
#define SPC_ASC_KEY_OFFSET 12
#define SPC_ASCQ_KEY_OFFSET 13
#define TRANSPORT_IQN_LEN 224
/* Used by target_core_store_alua_lu_gp() and target_core_alua_lu_gp_show_attr_members() */
#define LU_GROUP_NAME_BUF 256
/* Used by core_alua_store_tg_pt_gp_info() and target_core_alua_tg_pt_gp_show_attr_members() */
#define TG_PT_GROUP_NAME_BUF 256
/* Used to parse VPD into struct t10_vpd */
#define VPD_TMP_BUF_SIZE 128
/* Used by transport_generic_cmd_sequencer() */
#define READ_BLOCK_LEN 6
#define READ_CAP_LEN 8
#define READ_POSITION_LEN 20
#define INQUIRY_LEN 36
/* Used by transport_get_inquiry_vpd_serial() */
#define INQUIRY_VPD_SERIAL_LEN 254
/* Used by transport_get_inquiry_vpd_device_ident() */
#define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN 254
/* struct se_hba->hba_flags */
enum hba_flags_table {
HBA_FLAGS_INTERNAL_USE = 0x01,
HBA_FLAGS_PSCSI_MODE = 0x02,
};
/* struct se_lun->lun_status */
enum transport_lun_status_table {
TRANSPORT_LUN_STATUS_FREE = 0,
TRANSPORT_LUN_STATUS_ACTIVE = 1,
};
/* struct se_portal_group->se_tpg_type */
enum transport_tpg_type_table {
TRANSPORT_TPG_TYPE_NORMAL = 0,
TRANSPORT_TPG_TYPE_DISCOVERY = 1,
};
/* Used for generate timer flags */
enum timer_flags_table {
TF_RUNNING = 0x01,
TF_STOP = 0x02,
};
/* Special transport agnostic struct se_cmd->t_states */
enum transport_state_table {
TRANSPORT_NO_STATE = 0,
TRANSPORT_NEW_CMD = 1,
TRANSPORT_DEFERRED_CMD = 2,
TRANSPORT_WRITE_PENDING = 3,
TRANSPORT_PROCESS_WRITE = 4,
TRANSPORT_PROCESSING = 5,
TRANSPORT_COMPLETE_OK = 6,
TRANSPORT_COMPLETE_FAILURE = 7,
TRANSPORT_COMPLETE_TIMEOUT = 8,
TRANSPORT_PROCESS_TMR = 9,
TRANSPORT_TMR_COMPLETE = 10,
TRANSPORT_ISTATE_PROCESSING = 11,
TRANSPORT_ISTATE_PROCESSED = 12,
TRANSPORT_KILL = 13,
TRANSPORT_REMOVE = 14,
TRANSPORT_FREE = 15,
TRANSPORT_NEW_CMD_MAP = 16,
};
/* Used for struct se_cmd->se_cmd_flags */
enum se_cmd_flags_table {
SCF_SUPPORTED_SAM_OPCODE = 0x00000001,
SCF_TRANSPORT_TASK_SENSE = 0x00000002,
SCF_EMULATED_TASK_SENSE = 0x00000004,
SCF_SCSI_DATA_SG_IO_CDB = 0x00000008,
SCF_SCSI_CONTROL_SG_IO_CDB = 0x00000010,
SCF_SCSI_CONTROL_NONSG_IO_CDB = 0x00000020,
SCF_SCSI_NON_DATA_CDB = 0x00000040,
SCF_SCSI_CDB_EXCEPTION = 0x00000080,
SCF_SCSI_RESERVATION_CONFLICT = 0x00000100,
SCF_CMD_PASSTHROUGH_NOALLOC = 0x00000200,
SCF_SE_CMD_FAILED = 0x00000400,
SCF_SE_LUN_CMD = 0x00000800,
SCF_SE_ALLOW_EOO = 0x00001000,
SCF_SE_DISABLE_ONLINE_CHECK = 0x00002000,
SCF_SENT_CHECK_CONDITION = 0x00004000,
SCF_OVERFLOW_BIT = 0x00008000,
SCF_UNDERFLOW_BIT = 0x00010000,
SCF_SENT_DELAYED_TAS = 0x00020000,
SCF_ALUA_NON_OPTIMIZED = 0x00040000,
SCF_DELAYED_CMD_FROM_SAM_ATTR = 0x00080000,
SCF_PASSTHROUGH_SG_TO_MEM = 0x00100000,
SCF_PASSTHROUGH_CONTIG_TO_SG = 0x00200000,
SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC = 0x00400000,
SCF_EMULATE_SYNC_CACHE = 0x00800000,
SCF_EMULATE_CDB_ASYNC = 0x01000000,
SCF_EMULATE_SYNC_UNMAP = 0x02000000
};
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
enum transport_lunflags_table {
TRANSPORT_LUNFLAGS_NO_ACCESS = 0x00,
TRANSPORT_LUNFLAGS_INITIATOR_ACCESS = 0x01,
TRANSPORT_LUNFLAGS_READ_ONLY = 0x02,
TRANSPORT_LUNFLAGS_READ_WRITE = 0x04,
};
/* struct se_device->dev_status */
enum transport_device_status_table {
TRANSPORT_DEVICE_ACTIVATED = 0x01,
TRANSPORT_DEVICE_DEACTIVATED = 0x02,
TRANSPORT_DEVICE_QUEUE_FULL = 0x04,
TRANSPORT_DEVICE_SHUTDOWN = 0x08,
TRANSPORT_DEVICE_OFFLINE_ACTIVATED = 0x10,
TRANSPORT_DEVICE_OFFLINE_DEACTIVATED = 0x20,
};
/*
* Used by transport_send_check_condition_and_sense() and se_cmd->scsi_sense_reason
* to signal which ASC/ASCQ sense payload should be built.
*/
enum tcm_sense_reason_table {
TCM_NON_EXISTENT_LUN = 0x01,
TCM_UNSUPPORTED_SCSI_OPCODE = 0x02,
TCM_INCORRECT_AMOUNT_OF_DATA = 0x03,
TCM_UNEXPECTED_UNSOLICITED_DATA = 0x04,
TCM_SERVICE_CRC_ERROR = 0x05,
TCM_SNACK_REJECTED = 0x06,
TCM_SECTOR_COUNT_TOO_MANY = 0x07,
TCM_INVALID_CDB_FIELD = 0x08,
TCM_INVALID_PARAMETER_LIST = 0x09,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE = 0x0a,
TCM_UNKNOWN_MODE_PAGE = 0x0b,
TCM_WRITE_PROTECTED = 0x0c,
TCM_CHECK_CONDITION_ABORT_CMD = 0x0d,
TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e,
TCM_CHECK_CONDITION_NOT_READY = 0x0f,
};
struct se_obj {
atomic_t obj_access_count;
} ____cacheline_aligned;
/*
* Used by TCM Core internally to signal if ALUA emulation is enabled or
* disabled, or running in with TCM/pSCSI passthrough mode
*/
typedef enum {
SPC_ALUA_PASSTHROUGH,
SPC2_ALUA_DISABLED,
SPC3_ALUA_EMULATED
} t10_alua_index_t;
/*
* Used by TCM Core internally to signal if SAM Task Attribute emulation
* is enabled or disabled, or running in with TCM/pSCSI passthrough mode
*/
typedef enum {
SAM_TASK_ATTR_PASSTHROUGH,
SAM_TASK_ATTR_UNTAGGED,
SAM_TASK_ATTR_EMULATED
} t10_task_attr_index_t;
struct se_cmd;
struct t10_alua {
t10_alua_index_t alua_type;
/* ALUA Target Port Group ID */
u16 alua_tg_pt_gps_counter;
u32 alua_tg_pt_gps_count;
spinlock_t tg_pt_gps_lock;
struct se_subsystem_dev *t10_sub_dev;
/* Used for default ALUA Target Port Group */
struct t10_alua_tg_pt_gp *default_tg_pt_gp;
/* Used for default ALUA Target Port Group ConfigFS group */
struct config_group alua_tg_pt_gps_group;
int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *);
struct list_head tg_pt_gps_list;
} ____cacheline_aligned;
struct t10_alua_lu_gp {
u16 lu_gp_id;
int lu_gp_valid_id;
u32 lu_gp_members;
atomic_t lu_gp_shutdown;
atomic_t lu_gp_ref_cnt;
spinlock_t lu_gp_lock;
struct config_group lu_gp_group;
struct list_head lu_gp_list;
struct list_head lu_gp_mem_list;
} ____cacheline_aligned;
struct t10_alua_lu_gp_member {
int lu_gp_assoc:1;
atomic_t lu_gp_mem_ref_cnt;
spinlock_t lu_gp_mem_lock;
struct t10_alua_lu_gp *lu_gp;
struct se_device *lu_gp_mem_dev;
struct list_head lu_gp_mem_list;
} ____cacheline_aligned;
struct t10_alua_tg_pt_gp {
u16 tg_pt_gp_id;
int tg_pt_gp_valid_id;
int tg_pt_gp_alua_access_status;
int tg_pt_gp_alua_access_type;
int tg_pt_gp_nonop_delay_msecs;
int tg_pt_gp_trans_delay_msecs;
int tg_pt_gp_pref;
int tg_pt_gp_write_metadata;
/* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */
#define ALUA_MD_BUF_LEN 1024
u32 tg_pt_gp_md_buf_len;
u32 tg_pt_gp_members;
atomic_t tg_pt_gp_alua_access_state;
atomic_t tg_pt_gp_ref_cnt;
spinlock_t tg_pt_gp_lock;
struct mutex tg_pt_gp_md_mutex;
struct se_subsystem_dev *tg_pt_gp_su_dev;
struct config_group tg_pt_gp_group;
struct list_head tg_pt_gp_list;
struct list_head tg_pt_gp_mem_list;
} ____cacheline_aligned;
struct t10_alua_tg_pt_gp_member {
int tg_pt_gp_assoc:1;
atomic_t tg_pt_gp_mem_ref_cnt;
spinlock_t tg_pt_gp_mem_lock;
struct t10_alua_tg_pt_gp *tg_pt_gp;
struct se_port *tg_pt;
struct list_head tg_pt_gp_mem_list;
} ____cacheline_aligned;
struct t10_vpd {
unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN];
int protocol_identifier_set;
u32 protocol_identifier;
u32 device_identifier_code_set;
u32 association;
u32 device_identifier_type;
struct list_head vpd_list;
} ____cacheline_aligned;
struct t10_wwn {
unsigned char vendor[8];
unsigned char model[16];
unsigned char revision[4];
unsigned char unit_serial[INQUIRY_VPD_SERIAL_LEN];
spinlock_t t10_vpd_lock;
struct se_subsystem_dev *t10_sub_dev;
struct config_group t10_wwn_group;
struct list_head t10_vpd_list;
} ____cacheline_aligned;
/*
* Used by TCM Core internally to signal if >= SPC-3 peristent reservations
* emulation is enabled or disabled, or running in with TCM/pSCSI passthrough
* mode
*/
typedef enum {
SPC_PASSTHROUGH,
SPC2_RESERVATIONS,
SPC3_PERSISTENT_RESERVATIONS
} t10_reservations_index_t;
struct t10_pr_registration {
/* Used for fabrics that contain WWN+ISID */
#define PR_REG_ISID_LEN 16
/* PR_REG_ISID_LEN + ',i,0x' */
#define PR_REG_ISID_ID_LEN (PR_REG_ISID_LEN + 5)
char pr_reg_isid[PR_REG_ISID_LEN];
/* Used during APTPL metadata reading */
#define PR_APTPL_MAX_IPORT_LEN 256
unsigned char pr_iport[PR_APTPL_MAX_IPORT_LEN];
/* Used during APTPL metadata reading */
#define PR_APTPL_MAX_TPORT_LEN 256
unsigned char pr_tport[PR_APTPL_MAX_TPORT_LEN];
/* For writing out live meta data */
unsigned char *pr_aptpl_buf;
u16 pr_aptpl_rpti;
u16 pr_reg_tpgt;
/* Reservation effects all target ports */
int pr_reg_all_tg_pt;
/* Activate Persistence across Target Power Loss */
int pr_reg_aptpl;
int pr_res_holder;
int pr_res_type;
int pr_res_scope;
/* Used for fabric initiator WWPNs using a ISID */
int isid_present_at_reg:1;
u32 pr_res_mapped_lun;
u32 pr_aptpl_target_lun;
u32 pr_res_generation;
u64 pr_reg_bin_isid;
u64 pr_res_key;
atomic_t pr_res_holders;
struct se_node_acl *pr_reg_nacl;
struct se_dev_entry *pr_reg_deve;
struct se_lun *pr_reg_tg_pt_lun;
struct list_head pr_reg_list;
struct list_head pr_reg_abort_list;
struct list_head pr_reg_aptpl_list;
struct list_head pr_reg_atp_list;
struct list_head pr_reg_atp_mem_list;
} ____cacheline_aligned;
/*
* This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS,
* SPC2_RESERVATIONS or SPC_PASSTHROUGH in drivers/target/target_core_pr.c:
* core_setup_reservations()
*/
struct t10_reservation_ops {
int (*t10_reservation_check)(struct se_cmd *, u32 *);
int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
int (*t10_pr_register)(struct se_cmd *);
int (*t10_pr_clear)(struct se_cmd *);
};
struct t10_reservation_template {
/* Reservation effects all target ports */
int pr_all_tg_pt;
/* Activate Persistence across Target Power Loss enabled
* for SCSI device */
int pr_aptpl_active;
/* Used by struct t10_reservation_template->pr_aptpl_buf_len */
#define PR_APTPL_BUF_LEN 8192
u32 pr_aptpl_buf_len;
u32 pr_generation;
t10_reservations_index_t res_type;
spinlock_t registration_lock;
spinlock_t aptpl_reg_lock;
/*
* This will always be set by one individual I_T Nexus.
* However with all_tg_pt=1, other I_T Nexus from the
* same initiator can access PR reg/res info on a different
* target port.
*
* There is also the 'All Registrants' case, where there is
* a single *pr_res_holder of the reservation, but all
* registrations are considered reservation holders.
*/
struct se_node_acl *pr_res_holder;
struct list_head registration_list;
struct list_head aptpl_reg_list;
struct t10_reservation_ops pr_ops;
} ____cacheline_aligned;
struct se_queue_req {
int state;
void *cmd;
struct list_head qr_list;
} ____cacheline_aligned;
struct se_queue_obj {
atomic_t queue_cnt;
spinlock_t cmd_queue_lock;
struct list_head qobj_list;
wait_queue_head_t thread_wq;
} ____cacheline_aligned;
/*
* Used one per struct se_cmd to hold all extra struct se_task
* metadata. This structure is setup and allocated in
* drivers/target/target_core_transport.c:__transport_alloc_se_cmd()
*/
struct se_transport_task {
unsigned char *t_task_cdb;
unsigned char __t_task_cdb[TCM_MAX_COMMAND_SIZE];
unsigned long long t_task_lba;
int t_tasks_failed;
int t_tasks_fua;
int t_tasks_bidi:1;
u32 t_task_cdbs;
u32 t_tasks_check;
u32 t_tasks_no;
u32 t_tasks_sectors;
u32 t_tasks_se_num;
u32 t_tasks_se_bidi_num;
u32 t_tasks_sg_chained_no;
atomic_t t_fe_count;
atomic_t t_se_count;
atomic_t t_task_cdbs_left;
atomic_t t_task_cdbs_ex_left;
atomic_t t_task_cdbs_timeout_left;
atomic_t t_task_cdbs_sent;
atomic_t t_transport_aborted;
atomic_t t_transport_active;
atomic_t t_transport_complete;
atomic_t t_transport_queue_active;
atomic_t t_transport_sent;
atomic_t t_transport_stop;
atomic_t t_transport_timeout;
atomic_t transport_dev_active;
atomic_t transport_lun_active;
atomic_t transport_lun_fe_stop;
atomic_t transport_lun_stop;
spinlock_t t_state_lock;
struct completion t_transport_stop_comp;
struct completion transport_lun_fe_stop_comp;
struct completion transport_lun_stop_comp;
struct scatterlist *t_tasks_sg_chained;
struct scatterlist t_tasks_sg_bounce;
void *t_task_buf;
/*
* Used for pre-registered fabric SGL passthrough WRITE and READ
* with the special SCF_PASSTHROUGH_CONTIG_TO_SG case for TCM_Loop
* and other HW target mode fabric modules.
*/
struct scatterlist *t_task_pt_sgl;
struct list_head *t_mem_list;
/* Used for BIDI READ */
struct list_head *t_mem_bidi_list;
struct list_head t_task_list;
} ____cacheline_aligned;
struct se_task {
unsigned char task_sense;
struct scatterlist *task_sg;
struct scatterlist *task_sg_bidi;
u8 task_scsi_status;
u8 task_flags;
int task_error_status;
int task_state_flags;
int task_padded_sg:1;
unsigned long long task_lba;
u32 task_no;
u32 task_sectors;
u32 task_size;
u32 task_sg_num;
u32 task_sg_offset;
enum dma_data_direction task_data_direction;
struct se_cmd *task_se_cmd;
struct se_device *se_dev;
struct completion task_stop_comp;
atomic_t task_active;
atomic_t task_execute_queue;
atomic_t task_timeout;
atomic_t task_sent;
atomic_t task_stop;
atomic_t task_state_active;
struct timer_list task_timer;
struct se_device *se_obj_ptr;
struct list_head t_list;
struct list_head t_execute_list;
struct list_head t_state_list;
} ____cacheline_aligned;
#define TASK_CMD(task) ((struct se_cmd *)task->task_se_cmd)
#define TASK_DEV(task) ((struct se_device *)task->se_dev)
struct se_cmd {
/* SAM response code being sent to initiator */
u8 scsi_status;
u8 scsi_asc;
u8 scsi_ascq;
u8 scsi_sense_reason;
u16 scsi_sense_length;
/* Delay for ALUA Active/NonOptimized state access in milliseconds */
int alua_nonop_delay;
/* See include/linux/dma-mapping.h */
enum dma_data_direction data_direction;
/* For SAM Task Attribute */
int sam_task_attr;
/* Transport protocol dependent state, see transport_state_table */
enum transport_state_table t_state;
/* Transport protocol dependent state for out of order CmdSNs */
int deferred_t_state;
/* Transport specific error status */
int transport_error_status;
/* See se_cmd_flags_table */
u32 se_cmd_flags;
u32 se_ordered_id;
/* Total size in bytes associated with command */
u32 data_length;
/* SCSI Presented Data Transfer Length */
u32 cmd_spdtl;
u32 residual_count;
u32 orig_fe_lun;
/* Persistent Reservation key */
u64 pr_res_key;
atomic_t transport_sent;
/* Used for sense data */
void *sense_buffer;
struct list_head se_delayed_list;
struct list_head se_ordered_list;
struct list_head se_lun_list;
struct se_device *se_dev;
struct se_dev_entry *se_deve;
struct se_device *se_obj_ptr;
struct se_device *se_orig_obj_ptr;
struct se_lun *se_lun;
/* Only used for internal passthrough and legacy TCM fabric modules */
struct se_session *se_sess;
struct se_tmr_req *se_tmr_req;
/* t_task is setup to t_task_backstore in transport_init_se_cmd() */
struct se_transport_task *t_task;
struct se_transport_task t_task_backstore;
struct target_core_fabric_ops *se_tfo;
int (*transport_emulate_cdb)(struct se_cmd *);
void (*transport_split_cdb)(unsigned long long, u32 *, unsigned char *);
void (*transport_wait_for_tasks)(struct se_cmd *, int, int);
void (*transport_complete_callback)(struct se_cmd *);
} ____cacheline_aligned;
#define T_TASK(cmd) ((struct se_transport_task *)(cmd->t_task))
#define CMD_TFO(cmd) ((struct target_core_fabric_ops *)cmd->se_tfo)
struct se_tmr_req {
/* Task Management function to be preformed */
u8 function;
/* Task Management response to send */
u8 response;
int call_transport;
/* Reference to ITT that Task Mgmt should be preformed */
u32 ref_task_tag;
/* 64-bit encoded SAM LUN from $FABRIC_MOD TMR header */
u64 ref_task_lun;
void *fabric_tmr_ptr;
struct se_cmd *task_cmd;
struct se_cmd *ref_cmd;
struct se_device *tmr_dev;
struct se_lun *tmr_lun;
struct list_head tmr_list;
} ____cacheline_aligned;
struct se_ua {
u8 ua_asc;
u8 ua_ascq;
struct se_node_acl *ua_nacl;
struct list_head ua_dev_list;
struct list_head ua_nacl_list;
} ____cacheline_aligned;
struct se_node_acl {
char initiatorname[TRANSPORT_IQN_LEN];
/* Used to signal demo mode created ACL, disabled by default */
int dynamic_node_acl:1;
u32 queue_depth;
u32 acl_index;
u64 num_cmds;
u64 read_bytes;
u64 write_bytes;
spinlock_t stats_lock;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
atomic_t acl_pr_ref_count;
/* Used for MIB access */
atomic_t mib_ref_count;
struct se_dev_entry *device_list;
struct se_session *nacl_sess;
struct se_portal_group *se_tpg;
spinlock_t device_list_lock;
spinlock_t nacl_sess_lock;
struct config_group acl_group;
struct config_group acl_attrib_group;
struct config_group acl_auth_group;
struct config_group acl_param_group;
struct config_group *acl_default_groups[4];
struct list_head acl_list;
struct list_head acl_sess_list;
} ____cacheline_aligned;
struct se_session {
/* Used for MIB access */
atomic_t mib_ref_count;
u64 sess_bin_isid;
struct se_node_acl *se_node_acl;
struct se_portal_group *se_tpg;
void *fabric_sess_ptr;
struct list_head sess_list;
struct list_head sess_acl_list;
} ____cacheline_aligned;
#define SE_SESS(cmd) ((struct se_session *)(cmd)->se_sess)
#define SE_NODE_ACL(sess) ((struct se_node_acl *)(sess)->se_node_acl)
struct se_device;
struct se_transform_info;
struct scatterlist;
struct se_lun_acl {
char initiatorname[TRANSPORT_IQN_LEN];
u32 mapped_lun;
struct se_node_acl *se_lun_nacl;
struct se_lun *se_lun;
struct list_head lacl_list;
struct config_group se_lun_group;
} ____cacheline_aligned;
struct se_dev_entry {
int def_pr_registered:1;
/* See transport_lunflags_table */
u32 lun_flags;
u32 deve_cmds;
u32 mapped_lun;
u32 average_bytes;
u32 last_byte_count;
u32 total_cmds;
u32 total_bytes;
u64 pr_res_key;
u64 creation_time;
u32 attach_count;
u64 read_bytes;
u64 write_bytes;
atomic_t ua_count;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
atomic_t pr_ref_count;
struct se_lun_acl *se_lun_acl;
spinlock_t ua_lock;
struct se_lun *se_lun;
struct list_head alua_port_list;
struct list_head ua_list;
} ____cacheline_aligned;
struct se_dev_limits {
/* Max supported HW queue depth */
u32 hw_queue_depth;
/* Max supported virtual queue depth */
u32 queue_depth;
/* From include/linux/blkdev.h for the other HW/SW limits. */
struct queue_limits limits;
} ____cacheline_aligned;
struct se_dev_attrib {
int emulate_dpo;
int emulate_fua_write;
int emulate_fua_read;
int emulate_write_cache;
int emulate_ua_intlck_ctrl;
int emulate_tas;
int emulate_tpu;
int emulate_tpws;
int emulate_reservations;
int emulate_alua;
int enforce_pr_isids;
u32 hw_block_size;
u32 block_size;
u32 hw_max_sectors;
u32 max_sectors;
u32 optimal_sectors;
u32 hw_queue_depth;
u32 queue_depth;
u32 task_timeout;
u32 max_unmap_lba_count;
u32 max_unmap_block_desc_count;
u32 unmap_granularity;
u32 unmap_granularity_alignment;
struct se_subsystem_dev *da_sub_dev;
struct config_group da_group;
} ____cacheline_aligned;
struct se_subsystem_dev {
/* Used for struct se_subsystem_dev-->se_dev_alias, must be less than PAGE_SIZE */
#define SE_DEV_ALIAS_LEN 512
unsigned char se_dev_alias[SE_DEV_ALIAS_LEN];
/* Used for struct se_subsystem_dev->se_dev_udev_path[], must be less than PAGE_SIZE */
#define SE_UDEV_PATH_LEN 512
unsigned char se_dev_udev_path[SE_UDEV_PATH_LEN];
u32 su_dev_flags;
struct se_hba *se_dev_hba;
struct se_device *se_dev_ptr;
struct se_dev_attrib se_dev_attrib;
/* T10 Asymmetric Logical Unit Assignment for Target Ports */
struct t10_alua t10_alua;
/* T10 Inquiry and VPD WWN Information */
struct t10_wwn t10_wwn;
/* T10 SPC-2 + SPC-3 Reservations */
struct t10_reservation_template t10_reservation;
spinlock_t se_dev_lock;
void *se_dev_su_ptr;
struct list_head g_se_dev_list;
struct config_group se_dev_group;
/* For T10 Reservations */
struct config_group se_dev_pr_group;
} ____cacheline_aligned;
#define T10_ALUA(su_dev) (&(su_dev)->t10_alua)
#define T10_RES(su_dev) (&(su_dev)->t10_reservation)
#define T10_PR_OPS(su_dev) (&(su_dev)->t10_reservation.pr_ops)
struct se_device {
/* Set to 1 if thread is NOT sleeping on thread_sem */
u8 thread_active;
u8 dev_status_timer_flags;
/* RELATIVE TARGET PORT IDENTIFER Counter */
u16 dev_rpti_counter;
/* Used for SAM Task Attribute ordering */
u32 dev_cur_ordered_id;
u32 dev_flags;
u32 dev_port_count;
/* See transport_device_status_table */
u32 dev_status;
u32 dev_tcq_window_closed;
/* Physical device queue depth */
u32 queue_depth;
/* Used for SPC-2 reservations enforce of ISIDs */
u64 dev_res_bin_isid;
t10_task_attr_index_t dev_task_attr_type;
/* Pointer to transport specific device structure */
void *dev_ptr;
u32 dev_index;
u64 creation_time;
u32 num_resets;
u64 num_cmds;
u64 read_bytes;
u64 write_bytes;
spinlock_t stats_lock;
/* Active commands on this virtual SE device */
atomic_t active_cmds;
atomic_t simple_cmds;
atomic_t depth_left;
atomic_t dev_ordered_id;
atomic_t dev_tur_active;
atomic_t execute_tasks;
atomic_t dev_status_thr_count;
atomic_t dev_hoq_count;
atomic_t dev_ordered_sync;
struct se_obj dev_obj;
struct se_obj dev_access_obj;
struct se_obj dev_export_obj;
struct se_queue_obj *dev_queue_obj;
struct se_queue_obj *dev_status_queue_obj;
spinlock_t delayed_cmd_lock;
spinlock_t ordered_cmd_lock;
spinlock_t execute_task_lock;
spinlock_t state_task_lock;
spinlock_t dev_alua_lock;
spinlock_t dev_reservation_lock;
spinlock_t dev_state_lock;
spinlock_t dev_status_lock;
spinlock_t dev_status_thr_lock;
spinlock_t se_port_lock;
spinlock_t se_tmr_lock;
/* Used for legacy SPC-2 reservationsa */
struct se_node_acl *dev_reserved_node_acl;
/* Used for ALUA Logical Unit Group membership */
struct t10_alua_lu_gp_member *dev_alua_lu_gp_mem;
/* Used for SPC-3 Persistent Reservations */
struct t10_pr_registration *dev_pr_res_holder;
struct list_head dev_sep_list;
struct list_head dev_tmr_list;
struct timer_list dev_status_timer;
/* Pointer to descriptor for processing thread */
struct task_struct *process_thread;
pid_t process_thread_pid;
struct task_struct *dev_mgmt_thread;
struct list_head delayed_cmd_list;
struct list_head ordered_cmd_list;
struct list_head execute_task_list;
struct list_head state_task_list;
/* Pointer to associated SE HBA */
struct se_hba *se_hba;
struct se_subsystem_dev *se_sub_dev;
/* Pointer to template of function pointers for transport */
struct se_subsystem_api *transport;
/* Linked list for struct se_hba struct se_device list */
struct list_head dev_list;
/* Linked list for struct se_global->g_se_dev_list */
struct list_head g_se_dev_list;
} ____cacheline_aligned;
#define SE_DEV(cmd) ((struct se_device *)(cmd)->se_lun->lun_se_dev)
#define SU_DEV(dev) ((struct se_subsystem_dev *)(dev)->se_sub_dev)
#define DEV_ATTRIB(dev) (&(dev)->se_sub_dev->se_dev_attrib)
#define DEV_T10_WWN(dev) (&(dev)->se_sub_dev->t10_wwn)
struct se_hba {
u16 hba_tpgt;
u32 hba_id;
/* See hba_flags_table */
u32 hba_flags;
/* Virtual iSCSI devices attached. */
u32 dev_count;
u32 hba_index;
atomic_t dev_mib_access_count;
atomic_t load_balance_queue;
atomic_t left_queue_depth;
/* Maximum queue depth the HBA can handle. */
atomic_t max_queue_depth;
/* Pointer to transport specific host structure. */
void *hba_ptr;
/* Linked list for struct se_device */
struct list_head hba_dev_list;
struct list_head hba_list;
spinlock_t device_lock;
spinlock_t hba_queue_lock;
struct config_group hba_group;
struct mutex hba_access_mutex;
struct se_subsystem_api *transport;
} ____cacheline_aligned;
#define SE_HBA(d) ((struct se_hba *)(d)->se_hba)
struct se_lun {
/* See transport_lun_status_table */
enum transport_lun_status_table lun_status;
u32 lun_access;
u32 lun_flags;
u32 unpacked_lun;
atomic_t lun_acl_count;
spinlock_t lun_acl_lock;
spinlock_t lun_cmd_lock;
spinlock_t lun_sep_lock;
struct completion lun_shutdown_comp;
struct list_head lun_cmd_list;
struct list_head lun_acl_list;
struct se_device *lun_se_dev;
struct config_group lun_group;
struct se_port *lun_sep;
} ____cacheline_aligned;
#define SE_LUN(c) ((struct se_lun *)(c)->se_lun)
struct se_port {
/* RELATIVE TARGET PORT IDENTIFER */
u16 sep_rtpi;
int sep_tg_pt_secondary_stat;
int sep_tg_pt_secondary_write_md;
u32 sep_index;
struct scsi_port_stats sep_stats;
/* Used for ALUA Target Port Groups membership */
atomic_t sep_tg_pt_gp_active;
atomic_t sep_tg_pt_secondary_offline;
/* Used for PR ALL_TG_PT=1 */
atomic_t sep_tg_pt_ref_cnt;
spinlock_t sep_alua_lock;
struct mutex sep_tg_pt_md_mutex;
struct t10_alua_tg_pt_gp_member *sep_alua_tg_pt_gp_mem;
struct se_lun *sep_lun;
struct se_portal_group *sep_tpg;
struct list_head sep_alua_list;
struct list_head sep_list;
} ____cacheline_aligned;
struct se_tpg_np {
struct config_group tpg_np_group;
} ____cacheline_aligned;
struct se_portal_group {
/* Type of target portal group, see transport_tpg_type_table */
enum transport_tpg_type_table se_tpg_type;
/* Number of ACLed Initiator Nodes for this TPG */
u32 num_node_acls;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
atomic_t tpg_pr_ref_count;
/* Spinlock for adding/removing ACLed Nodes */
spinlock_t acl_node_lock;
/* Spinlock for adding/removing sessions */
spinlock_t session_lock;
spinlock_t tpg_lun_lock;
/* Pointer to $FABRIC_MOD portal group */
void *se_tpg_fabric_ptr;
struct list_head se_tpg_list;
/* linked list for initiator ACL list */
struct list_head acl_node_list;
struct se_lun *tpg_lun_list;
struct se_lun tpg_virt_lun0;
/* List of TCM sessions assoicated wth this TPG */
struct list_head tpg_sess_list;
/* Pointer to $FABRIC_MOD dependent code */
struct target_core_fabric_ops *se_tpg_tfo;
struct se_wwn *se_tpg_wwn;
struct config_group tpg_group;
struct config_group *tpg_default_groups[6];
struct config_group tpg_lun_group;
struct config_group tpg_np_group;
struct config_group tpg_acl_group;
struct config_group tpg_attrib_group;
struct config_group tpg_param_group;
} ____cacheline_aligned;
#define TPG_TFO(se_tpg) ((struct target_core_fabric_ops *)(se_tpg)->se_tpg_tfo)
struct se_wwn {
struct target_fabric_configfs *wwn_tf;
struct config_group wwn_group;
} ____cacheline_aligned;
struct se_global {
u16 alua_lu_gps_counter;
int g_sub_api_initialized;
u32 in_shutdown;
u32 alua_lu_gps_count;
u32 g_hba_id_counter;
struct config_group target_core_hbagroup;
struct config_group alua_group;
struct config_group alua_lu_gps_group;
struct list_head g_lu_gps_list;
struct list_head g_se_tpg_list;
struct list_head g_hba_list;
struct list_head g_se_dev_list;
struct se_hba *g_lun0_hba;
struct se_subsystem_dev *g_lun0_su_dev;
struct se_device *g_lun0_dev;
struct t10_alua_lu_gp *default_lu_gp;
spinlock_t g_device_lock;
spinlock_t hba_lock;
spinlock_t se_tpg_lock;
spinlock_t lu_gps_lock;
spinlock_t plugin_class_lock;
} ____cacheline_aligned;
#endif /* TARGET_CORE_BASE_H */
#define TARGET_CORE_CONFIGFS_VERSION TARGET_CORE_MOD_VERSION
#define TARGET_CORE_CONFIG_ROOT "/sys/kernel/config"
#define TARGET_CORE_NAME_MAX_LEN 64
#define TARGET_FABRIC_NAME_SIZE 32
extern struct target_fabric_configfs *target_fabric_configfs_init(
struct module *, const char *);
extern void target_fabric_configfs_free(struct target_fabric_configfs *);
extern int target_fabric_configfs_register(struct target_fabric_configfs *);
extern void target_fabric_configfs_deregister(struct target_fabric_configfs *);
struct target_fabric_configfs_template {
struct config_item_type tfc_discovery_cit;
struct config_item_type tfc_wwn_cit;
struct config_item_type tfc_tpg_cit;
struct config_item_type tfc_tpg_base_cit;
struct config_item_type tfc_tpg_lun_cit;
struct config_item_type tfc_tpg_port_cit;
struct config_item_type tfc_tpg_np_cit;
struct config_item_type tfc_tpg_np_base_cit;
struct config_item_type tfc_tpg_attrib_cit;
struct config_item_type tfc_tpg_param_cit;
struct config_item_type tfc_tpg_nacl_cit;
struct config_item_type tfc_tpg_nacl_base_cit;
struct config_item_type tfc_tpg_nacl_attrib_cit;
struct config_item_type tfc_tpg_nacl_auth_cit;
struct config_item_type tfc_tpg_nacl_param_cit;
struct config_item_type tfc_tpg_mappedlun_cit;
};
struct target_fabric_configfs {
char tf_name[TARGET_FABRIC_NAME_SIZE];
atomic_t tf_access_cnt;
struct list_head tf_list;
struct config_group tf_group;
struct config_group tf_disc_group;
struct config_group *tf_default_groups[2];
/* Pointer to fabric's config_item */
struct config_item *tf_fabric;
/* Passed from fabric modules */
struct config_item_type *tf_fabric_cit;
/* Pointer to target core subsystem */
struct configfs_subsystem *tf_subsys;
/* Pointer to fabric's struct module */
struct module *tf_module;
struct target_core_fabric_ops tf_ops;
struct target_fabric_configfs_template tf_cit_tmpl;
};
#define TF_CIT_TMPL(tf) (&(tf)->tf_cit_tmpl)
#ifndef TARGET_CORE_DEVICE_H
#define TARGET_CORE_DEVICE_H
extern int transport_get_lun_for_cmd(struct se_cmd *, unsigned char *, u32);
extern int transport_get_lun_for_tmr(struct se_cmd *, u32);
extern struct se_dev_entry *core_get_se_deve_from_rtpi(
struct se_node_acl *, u16);
extern int core_free_device_list_for_node(struct se_node_acl *,
struct se_portal_group *);
extern void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
extern void core_update_device_list_access(u32, u32, struct se_node_acl *);
extern int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *, u32,
u32, struct se_node_acl *,
struct se_portal_group *, int);
extern void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
extern int core_dev_export(struct se_device *, struct se_portal_group *,
struct se_lun *);
extern void core_dev_unexport(struct se_device *, struct se_portal_group *,
struct se_lun *);
extern int transport_core_report_lun_response(struct se_cmd *);
extern void se_release_device_for_hba(struct se_device *);
extern void se_release_vpd_for_dev(struct se_device *);
extern void se_clear_dev_ports(struct se_device *);
extern int se_free_virtual_device(struct se_device *, struct se_hba *);
extern int se_dev_check_online(struct se_device *);
extern int se_dev_check_shutdown(struct se_device *);
extern void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
extern int se_dev_set_task_timeout(struct se_device *, u32);
extern int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
extern int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
extern int se_dev_set_unmap_granularity(struct se_device *, u32);
extern int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
extern int se_dev_set_emulate_dpo(struct se_device *, int);
extern int se_dev_set_emulate_fua_write(struct se_device *, int);
extern int se_dev_set_emulate_fua_read(struct se_device *, int);
extern int se_dev_set_emulate_write_cache(struct se_device *, int);
extern int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
extern int se_dev_set_emulate_tas(struct se_device *, int);
extern int se_dev_set_emulate_tpu(struct se_device *, int);
extern int se_dev_set_emulate_tpws(struct se_device *, int);
extern int se_dev_set_enforce_pr_isids(struct se_device *, int);
extern int se_dev_set_queue_depth(struct se_device *, u32);
extern int se_dev_set_max_sectors(struct se_device *, u32);
extern int se_dev_set_optimal_sectors(struct se_device *, u32);
extern int se_dev_set_block_size(struct se_device *, u32);
extern struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
struct se_device *, u32);
extern int core_dev_del_lun(struct se_portal_group *, u32);
extern struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
extern struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
u32, char *, int *);
extern int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *, u32, u32);
extern int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun *, struct se_lun_acl *);
extern void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
struct se_lun_acl *lacl);
extern int core_dev_setup_virtual_lun0(void);
extern void core_dev_release_virtual_lun0(void);
#endif /* TARGET_CORE_DEVICE_H */
/*
* Used for tfc_wwn_cit attributes
*/
#include <target/configfs_macros.h>
CONFIGFS_EATTR_STRUCT(target_fabric_nacl_attrib, se_node_acl);
#define TF_NACL_ATTRIB_ATTR(_fabric, _name, _mode) \
static struct target_fabric_nacl_attrib_attribute _fabric##_nacl_attrib_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_fabric##_nacl_attrib_show_##_name, \
_fabric##_nacl_attrib_store_##_name);
CONFIGFS_EATTR_STRUCT(target_fabric_nacl_auth, se_node_acl);
#define TF_NACL_AUTH_ATTR(_fabric, _name, _mode) \
static struct target_fabric_nacl_auth_attribute _fabric##_nacl_auth_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_fabric##_nacl_auth_show_##_name, \
_fabric##_nacl_auth_store_##_name);
#define TF_NACL_AUTH_ATTR_RO(_fabric, _name) \
static struct target_fabric_nacl_auth_attribute _fabric##_nacl_auth_##_name = \
__CONFIGFS_EATTR_RO(_name, \
_fabric##_nacl_auth_show_##_name);
CONFIGFS_EATTR_STRUCT(target_fabric_nacl_param, se_node_acl);
#define TF_NACL_PARAM_ATTR(_fabric, _name, _mode) \
static struct target_fabric_nacl_param_attribute _fabric##_nacl_param_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_fabric##_nacl_param_show_##_name, \
_fabric##_nacl_param_store_##_name);
#define TF_NACL_PARAM_ATTR_RO(_fabric, _name) \
static struct target_fabric_nacl_param_attribute _fabric##_nacl_param_##_name = \
__CONFIGFS_EATTR_RO(_name, \
_fabric##_nacl_param_show_##_name);
CONFIGFS_EATTR_STRUCT(target_fabric_nacl_base, se_node_acl);
#define TF_NACL_BASE_ATTR(_fabric, _name, _mode) \
static struct target_fabric_nacl_base_attribute _fabric##_nacl_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_fabric##_nacl_show_##_name, \
_fabric##_nacl_store_##_name);
#define TF_NACL_BASE_ATTR_RO(_fabric, _name) \
static struct target_fabric_nacl_base_attribute _fabric##_nacl_##_name = \
__CONFIGFS_EATTR_RO(_name, \
_fabric##_nacl_show_##_name);
CONFIGFS_EATTR_STRUCT(target_fabric_np_base, se_tpg_np);
#define TF_NP_BASE_ATTR(_fabric, _name, _mode) \
static struct target_fabric_np_base_attribute _fabric##_np_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_fabric##_np_show_##_name, \
_fabric##_np_store_##_name);
CONFIGFS_EATTR_STRUCT(target_fabric_tpg_attrib, se_portal_group);
#define TF_TPG_ATTRIB_ATTR(_fabric, _name, _mode) \
static struct target_fabric_tpg_attrib_attribute _fabric##_tpg_attrib_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_fabric##_tpg_attrib_show_##_name, \
_fabric##_tpg_attrib_store_##_name);
CONFIGFS_EATTR_STRUCT(target_fabric_tpg_param, se_portal_group);
#define TF_TPG_PARAM_ATTR(_fabric, _name, _mode) \
static struct target_fabric_tpg_param_attribute _fabric##_tpg_param_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_fabric##_tpg_param_show_##_name, \
_fabric##_tpg_param_store_##_name);
CONFIGFS_EATTR_STRUCT(target_fabric_tpg, se_portal_group);
#define TF_TPG_BASE_ATTR(_fabric, _name, _mode) \
static struct target_fabric_tpg_attribute _fabric##_tpg_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_fabric##_tpg_show_##_name, \
_fabric##_tpg_store_##_name);
CONFIGFS_EATTR_STRUCT(target_fabric_wwn, target_fabric_configfs);
#define TF_WWN_ATTR(_fabric, _name, _mode) \
static struct target_fabric_wwn_attribute _fabric##_wwn_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_fabric##_wwn_show_attr_##_name, \
_fabric##_wwn_store_attr_##_name);
#define TF_WWN_ATTR_RO(_fabric, _name) \
static struct target_fabric_wwn_attribute _fabric##_wwn_##_name = \
__CONFIGFS_EATTR_RO(_name, \
_fabric##_wwn_show_attr_##_name);
CONFIGFS_EATTR_STRUCT(target_fabric_discovery, target_fabric_configfs);
#define TF_DISC_ATTR(_fabric, _name, _mode) \
static struct target_fabric_discovery_attribute _fabric##_disc_##_name = \
__CONFIGFS_EATTR(_name, _mode, \
_fabric##_disc_show_##_name, \
_fabric##_disc_store_##_name);
#define TF_DISC_ATTR_RO(_fabric, _name) \
static struct target_fabric_discovery_attribute _fabric##_disc_##_name = \
__CONFIGFS_EATTR_RO(_name, \
_fabric##_disc_show_##_name);
extern int target_fabric_setup_cits(struct target_fabric_configfs *);
#ifndef TARGET_CORE_FABRIC_LIB_H
#define TARGET_CORE_FABRIC_LIB_H
extern u8 sas_get_fabric_proto_ident(struct se_portal_group *);
extern u32 sas_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *, unsigned char *);
extern u32 sas_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *);
extern char *sas_parse_pr_out_transport_id(struct se_portal_group *,
const char *, u32 *, char **);
extern u8 fc_get_fabric_proto_ident(struct se_portal_group *);
extern u32 fc_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *, unsigned char *);
extern u32 fc_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *);
extern char *fc_parse_pr_out_transport_id(struct se_portal_group *,
const char *, u32 *, char **);
extern u8 iscsi_get_fabric_proto_ident(struct se_portal_group *);
extern u32 iscsi_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *, unsigned char *);
extern u32 iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
struct t10_pr_registration *, int *);
extern char *iscsi_parse_pr_out_transport_id(struct se_portal_group *,
const char *, u32 *, char **);
#endif /* TARGET_CORE_FABRIC_LIB_H */
/* Defined in target_core_configfs.h */
struct target_fabric_configfs;
struct target_core_fabric_ops {
struct configfs_subsystem *tf_subsys;
/*
* Optional to signal struct se_task->task_sg[] padding entries
* for scatterlist chaining using transport_do_task_sg_link(),
* disabled by default
*/
int task_sg_chaining:1;
char *(*get_fabric_name)(void);
u8 (*get_fabric_proto_ident)(struct se_portal_group *);
char *(*tpg_get_wwn)(struct se_portal_group *);
u16 (*tpg_get_tag)(struct se_portal_group *);
u32 (*tpg_get_default_depth)(struct se_portal_group *);
u32 (*tpg_get_pr_transport_id)(struct se_portal_group *,
struct se_node_acl *,
struct t10_pr_registration *, int *,
unsigned char *);
u32 (*tpg_get_pr_transport_id_len)(struct se_portal_group *,
struct se_node_acl *,
struct t10_pr_registration *, int *);
char *(*tpg_parse_pr_out_transport_id)(struct se_portal_group *,
const char *, u32 *, char **);
int (*tpg_check_demo_mode)(struct se_portal_group *);
int (*tpg_check_demo_mode_cache)(struct se_portal_group *);
int (*tpg_check_demo_mode_write_protect)(struct se_portal_group *);
int (*tpg_check_prod_mode_write_protect)(struct se_portal_group *);
struct se_node_acl *(*tpg_alloc_fabric_acl)(
struct se_portal_group *);
void (*tpg_release_fabric_acl)(struct se_portal_group *,
struct se_node_acl *);
u32 (*tpg_get_inst_index)(struct se_portal_group *);
/*
* Optional function pointer for TCM to perform command map
* from TCM processing thread context, for those struct se_cmd
* initally allocated in interrupt context.
*/
int (*new_cmd_map)(struct se_cmd *);
/*
* Optional function pointer for TCM fabric modules that use
* Linux/NET sockets to allocate struct iovec array to struct se_cmd
*/
int (*alloc_cmd_iovecs)(struct se_cmd *);
/*
* Optional to release struct se_cmd and fabric dependent allocated
* I/O descriptor in transport_cmd_check_stop()
*/
void (*check_stop_free)(struct se_cmd *);
void (*release_cmd_to_pool)(struct se_cmd *);
void (*release_cmd_direct)(struct se_cmd *);
/*
* Called with spin_lock_bh(struct se_portal_group->session_lock held.
*/
int (*shutdown_session)(struct se_session *);
void (*close_session)(struct se_session *);
void (*stop_session)(struct se_session *, int, int);
void (*fall_back_to_erl0)(struct se_session *);
int (*sess_logged_in)(struct se_session *);
u32 (*sess_get_index)(struct se_session *);
/*
* Used only for SCSI fabrics that contain multi-value TransportIDs
* (like iSCSI). All other SCSI fabrics should set this to NULL.
*/
u32 (*sess_get_initiator_sid)(struct se_session *,
unsigned char *, u32);
int (*write_pending)(struct se_cmd *);
int (*write_pending_status)(struct se_cmd *);
void (*set_default_node_attributes)(struct se_node_acl *);
u32 (*get_task_tag)(struct se_cmd *);
int (*get_cmd_state)(struct se_cmd *);
void (*new_cmd_failure)(struct se_cmd *);
int (*queue_data_in)(struct se_cmd *);
int (*queue_status)(struct se_cmd *);
int (*queue_tm_rsp)(struct se_cmd *);
u16 (*set_fabric_sense_len)(struct se_cmd *, u32);
u16 (*get_fabric_sense_len)(void);
int (*is_state_remove)(struct se_cmd *);
u64 (*pack_lun)(unsigned int);
/*
* fabric module calls for target_core_fabric_configfs.c
*/
struct se_wwn *(*fabric_make_wwn)(struct target_fabric_configfs *,
struct config_group *, const char *);
void (*fabric_drop_wwn)(struct se_wwn *);
struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
struct config_group *, const char *);
void (*fabric_drop_tpg)(struct se_portal_group *);
int (*fabric_post_link)(struct se_portal_group *,
struct se_lun *);
void (*fabric_pre_unlink)(struct se_portal_group *,
struct se_lun *);
struct se_tpg_np *(*fabric_make_np)(struct se_portal_group *,
struct config_group *, const char *);
void (*fabric_drop_np)(struct se_tpg_np *);
struct se_node_acl *(*fabric_make_nodeacl)(struct se_portal_group *,
struct config_group *, const char *);
void (*fabric_drop_nodeacl)(struct se_node_acl *);
};
#ifndef TARGET_CORE_TMR_H
#define TARGET_CORE_TMR_H
/* task management function values */
#ifdef ABORT_TASK
#undef ABORT_TASK
#endif /* ABORT_TASK */
#define ABORT_TASK 1
#ifdef ABORT_TASK_SET
#undef ABORT_TASK_SET
#endif /* ABORT_TASK_SET */
#define ABORT_TASK_SET 2
#ifdef CLEAR_ACA
#undef CLEAR_ACA
#endif /* CLEAR_ACA */
#define CLEAR_ACA 3
#ifdef CLEAR_TASK_SET
#undef CLEAR_TASK_SET
#endif /* CLEAR_TASK_SET */
#define CLEAR_TASK_SET 4
#define LUN_RESET 5
#define TARGET_WARM_RESET 6
#define TARGET_COLD_RESET 7
#define TASK_REASSIGN 8
/* task management response values */
#define TMR_FUNCTION_COMPLETE 0
#define TMR_TASK_DOES_NOT_EXIST 1
#define TMR_LUN_DOES_NOT_EXIST 2
#define TMR_TASK_STILL_ALLEGIANT 3
#define TMR_TASK_FAILOVER_NOT_SUPPORTED 4
#define TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED 5
#define TMR_FUNCTION_AUTHORIZATION_FAILED 6
#define TMR_FUNCTION_REJECTED 255
extern struct kmem_cache *se_tmr_req_cache;
extern struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8);
extern void core_tmr_release_req(struct se_tmr_req *);
extern int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
struct list_head *, struct se_cmd *);
#endif /* TARGET_CORE_TMR_H */
#ifndef TARGET_CORE_TPG_H
#define TARGET_CORE_TPG_H
extern struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
const char *);
extern struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
unsigned char *);
extern void core_tpg_add_node_to_devs(struct se_node_acl *,
struct se_portal_group *);
extern struct se_node_acl *core_tpg_check_initiator_node_acl(
struct se_portal_group *,
unsigned char *);
extern void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
extern void core_tpg_wait_for_mib_ref(struct se_node_acl *);
extern void core_tpg_clear_object_luns(struct se_portal_group *);
extern struct se_node_acl *core_tpg_add_initiator_node_acl(
struct se_portal_group *,
struct se_node_acl *,
const char *, u32);
extern int core_tpg_del_initiator_node_acl(struct se_portal_group *,
struct se_node_acl *, int);
extern int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
unsigned char *, u32, int);
extern int core_tpg_register(struct target_core_fabric_ops *,
struct se_wwn *,
struct se_portal_group *, void *,
int);
extern int core_tpg_deregister(struct se_portal_group *);
extern struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
extern int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *, u32,
void *);
extern struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32, int *);
extern int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
#endif /* TARGET_CORE_TPG_H */
#ifndef TARGET_CORE_TRANSPORT_H
#define TARGET_CORE_TRANSPORT_H
#define TARGET_CORE_VERSION TARGET_CORE_MOD_VERSION
/* Attempts before moving from SHORT to LONG */
#define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD 3
#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3 /* In milliseconds */
#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG 10 /* In milliseconds */
#define PYX_TRANSPORT_STATUS_INTERVAL 5 /* In seconds */
#define PYX_TRANSPORT_SENT_TO_TRANSPORT 0
#define PYX_TRANSPORT_WRITE_PENDING 1
#define PYX_TRANSPORT_UNKNOWN_SAM_OPCODE -1
#define PYX_TRANSPORT_HBA_QUEUE_FULL -2
#define PYX_TRANSPORT_REQ_TOO_MANY_SECTORS -3
#define PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES -4
#define PYX_TRANSPORT_INVALID_CDB_FIELD -5
#define PYX_TRANSPORT_INVALID_PARAMETER_LIST -6
#define PYX_TRANSPORT_LU_COMM_FAILURE -7
#define PYX_TRANSPORT_UNKNOWN_MODE_PAGE -8
#define PYX_TRANSPORT_WRITE_PROTECTED -9
#define PYX_TRANSPORT_TASK_TIMEOUT -10
#define PYX_TRANSPORT_RESERVATION_CONFLICT -11
#define PYX_TRANSPORT_ILLEGAL_REQUEST -12
#define PYX_TRANSPORT_USE_SENSE_REASON -13
#ifndef SAM_STAT_RESERVATION_CONFLICT
#define SAM_STAT_RESERVATION_CONFLICT 0x18
#endif
#define TRANSPORT_PLUGIN_FREE 0
#define TRANSPORT_PLUGIN_REGISTERED 1
#define TRANSPORT_PLUGIN_PHBA_PDEV 1
#define TRANSPORT_PLUGIN_VHBA_PDEV 2
#define TRANSPORT_PLUGIN_VHBA_VDEV 3
/* For SE OBJ Plugins, in seconds */
#define TRANSPORT_TIMEOUT_TUR 10
#define TRANSPORT_TIMEOUT_TYPE_DISK 60
#define TRANSPORT_TIMEOUT_TYPE_ROM 120
#define TRANSPORT_TIMEOUT_TYPE_TAPE 600
#define TRANSPORT_TIMEOUT_TYPE_OTHER 300
/* For se_task->task_state_flags */
#define TSF_EXCEPTION_CLEARED 0x01
/*
* struct se_subsystem_dev->su_dev_flags
*/
#define SDF_FIRMWARE_VPD_UNIT_SERIAL 0x00000001
#define SDF_EMULATED_VPD_UNIT_SERIAL 0x00000002
#define SDF_USING_UDEV_PATH 0x00000004
#define SDF_USING_ALIAS 0x00000008
/*
* struct se_device->dev_flags
*/
#define DF_READ_ONLY 0x00000001
#define DF_SPC2_RESERVATIONS 0x00000002
#define DF_SPC2_RESERVATIONS_WITH_ISID 0x00000004
/* struct se_dev_attrib sanity values */
/* 10 Minutes */
#define DA_TASK_TIMEOUT_MAX 600
/* Default max_unmap_lba_count */
#define DA_MAX_UNMAP_LBA_COUNT 0
/* Default max_unmap_block_desc_count */
#define DA_MAX_UNMAP_BLOCK_DESC_COUNT 0
/* Default unmap_granularity */
#define DA_UNMAP_GRANULARITY_DEFAULT 0
/* Default unmap_granularity_alignment */
#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
/* Emulation for Direct Page Out */
#define DA_EMULATE_DPO 0
/* Emulation for Forced Unit Access WRITEs */
#define DA_EMULATE_FUA_WRITE 1
/* Emulation for Forced Unit Access READs */
#define DA_EMULATE_FUA_READ 0
/* Emulation for WriteCache and SYNCHRONIZE_CACHE */
#define DA_EMULATE_WRITE_CACHE 0
/* Emulation for UNIT ATTENTION Interlock Control */
#define DA_EMULATE_UA_INTLLCK_CTRL 0
/* Emulation for TASK_ABORTED status (TAS) by default */
#define DA_EMULATE_TAS 1
/* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */
#define DA_EMULATE_TPU 0
/*
* Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using
* block/blk-lib.c:blkdev_issue_discard()
*/
#define DA_EMULATE_TPWS 0
/* No Emulation for PSCSI by default */
#define DA_EMULATE_RESERVATIONS 0
/* No Emulation for PSCSI by default */
#define DA_EMULATE_ALUA 0
/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
#define DA_ENFORCE_PR_ISIDS 1
#define DA_STATUS_MAX_SECTORS_MIN 16
#define DA_STATUS_MAX_SECTORS_MAX 8192
#define SE_MODE_PAGE_BUF 512
#define MOD_MAX_SECTORS(ms, bs) (ms % (PAGE_SIZE / bs))
struct se_mem;
struct se_subsystem_api;
extern int init_se_global(void);
extern void release_se_global(void);
extern void transport_init_queue_obj(struct se_queue_obj *);
extern int transport_subsystem_check_init(void);
extern int transport_subsystem_register(struct se_subsystem_api *);
extern void transport_subsystem_release(struct se_subsystem_api *);
extern void transport_load_plugins(void);
extern struct se_session *transport_init_session(void);
extern void __transport_register_session(struct se_portal_group *,
struct se_node_acl *,
struct se_session *, void *);
extern void transport_register_session(struct se_portal_group *,
struct se_node_acl *,
struct se_session *, void *);
extern void transport_free_session(struct se_session *);
extern void transport_deregister_session_configfs(struct se_session *);
extern void transport_deregister_session(struct se_session *);
extern void transport_cmd_finish_abort(struct se_cmd *, int);
extern void transport_cmd_finish_abort_tmr(struct se_cmd *);
extern void transport_complete_sync_cache(struct se_cmd *, int);
extern void transport_complete_task(struct se_task *, int);
extern void transport_add_task_to_execute_queue(struct se_task *,
struct se_task *,
struct se_device *);
unsigned char *transport_dump_cmd_direction(struct se_cmd *);
extern void transport_dump_dev_state(struct se_device *, char *, int *);
extern void transport_dump_dev_info(struct se_device *, struct se_lun *,
unsigned long long, char *, int *);
extern void transport_dump_vpd_proto_id(struct t10_vpd *,
unsigned char *, int);
extern void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
extern int transport_dump_vpd_assoc(struct t10_vpd *,
unsigned char *, int);
extern int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
extern int transport_dump_vpd_ident_type(struct t10_vpd *,
unsigned char *, int);
extern int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);
extern int transport_dump_vpd_ident(struct t10_vpd *,
unsigned char *, int);
extern int transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
extern struct se_device *transport_add_device_to_core_hba(struct se_hba *,
struct se_subsystem_api *,
struct se_subsystem_dev *, u32,
void *, struct se_dev_limits *,
const char *, const char *);
extern void transport_device_setup_cmd(struct se_cmd *);
extern void transport_init_se_cmd(struct se_cmd *,
struct target_core_fabric_ops *,
struct se_session *, u32, int, int,
unsigned char *);
extern void transport_free_se_cmd(struct se_cmd *);
extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
extern int transport_generic_handle_cdb(struct se_cmd *);
extern int transport_generic_handle_cdb_map(struct se_cmd *);
extern int transport_generic_handle_data(struct se_cmd *);
extern void transport_new_cmd_failure(struct se_cmd *);
extern int transport_generic_handle_tmr(struct se_cmd *);
extern void __transport_stop_task_timer(struct se_task *, unsigned long *);
extern unsigned char transport_asciihex_to_binaryhex(unsigned char val[2]);
extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
struct scatterlist *, u32);
extern int transport_clear_lun_from_sessions(struct se_lun *);
extern int transport_check_aborted_status(struct se_cmd *, int);
extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
extern void transport_send_task_abort(struct se_cmd *);
extern void transport_release_cmd_to_pool(struct se_cmd *);
extern void transport_generic_free_cmd(struct se_cmd *, int, int, int);
extern void transport_generic_wait_for_cmds(struct se_cmd *, int);
extern u32 transport_calc_sg_num(struct se_task *, struct se_mem *, u32);
extern int transport_map_mem_to_sg(struct se_task *, struct list_head *,
void *, struct se_mem *,
struct se_mem **, u32 *, u32 *);
extern void transport_do_task_sg_chain(struct se_cmd *);
extern void transport_generic_process_write(struct se_cmd *);
extern int transport_generic_do_tmr(struct se_cmd *);
/* From target_core_alua.c */
extern int core_alua_check_nonop_delay(struct se_cmd *);
/*
* Each se_transport_task_t can have N number of possible struct se_task's
* for the storage transport(s) to possibly execute.
* Used primarily for splitting up CDBs that exceed the physical storage
* HBA's maximum sector count per task.
*/
struct se_mem {
struct page *se_page;
u32 se_len;
u32 se_off;
struct list_head se_list;
} ____cacheline_aligned;
/*
* Each type of disk transport supported MUST have a template defined
* within its .h file.
*/
struct se_subsystem_api {
/*
* The Name. :-)
*/
char name[16];
/*
* Transport Type.
*/
u8 transport_type;
/*
* struct module for struct se_hba references
*/
struct module *owner;
/*
* Used for global se_subsystem_api list_head
*/
struct list_head sub_api_list;
/*
* For SCF_SCSI_NON_DATA_CDB
*/
int (*cdb_none)(struct se_task *);
/*
* For SCF_SCSI_CONTROL_NONSG_IO_CDB
*/
int (*map_task_non_SG)(struct se_task *);
/*
* For SCF_SCSI_DATA_SG_IO_CDB and SCF_SCSI_CONTROL_SG_IO_CDB
*/
int (*map_task_SG)(struct se_task *);
/*
* attach_hba():
*/
int (*attach_hba)(struct se_hba *, u32);
/*
* detach_hba():
*/
void (*detach_hba)(struct se_hba *);
/*
* pmode_hba(): Used for TCM/pSCSI subsystem plugin HBA ->
* Linux/SCSI struct Scsi_Host passthrough
*/
int (*pmode_enable_hba)(struct se_hba *, unsigned long);
/*
* allocate_virtdevice():
*/
void *(*allocate_virtdevice)(struct se_hba *, const char *);
/*
* create_virtdevice(): Only for Virtual HBAs
*/
struct se_device *(*create_virtdevice)(struct se_hba *,
struct se_subsystem_dev *, void *);
/*
* free_device():
*/
void (*free_device)(void *);
/*
* dpo_emulated():
*/
int (*dpo_emulated)(struct se_device *);
/*
* fua_write_emulated():
*/
int (*fua_write_emulated)(struct se_device *);
/*
* fua_read_emulated():
*/
int (*fua_read_emulated)(struct se_device *);
/*
* write_cache_emulated():
*/
int (*write_cache_emulated)(struct se_device *);
/*
* transport_complete():
*
* Use transport_generic_complete() for majority of DAS transport
* drivers. Provided out of convenience.
*/
int (*transport_complete)(struct se_task *task);
struct se_task *(*alloc_task)(struct se_cmd *);
/*
* do_task():
*/
int (*do_task)(struct se_task *);
/*
* Used by virtual subsystem plugins IBLOCK and FILEIO to emulate
* UNMAP and WRITE_SAME_* w/ UNMAP=1 <-> Linux/Block Discard
*/
int (*do_discard)(struct se_device *, sector_t, u32);
/*
* Used by virtual subsystem plugins IBLOCK and FILEIO to emulate
* SYNCHRONIZE_CACHE_* <-> Linux/Block blkdev_issue_flush()
*/
void (*do_sync_cache)(struct se_task *);
/*
* free_task():
*/
void (*free_task)(struct se_task *);
/*
* check_configfs_dev_params():
*/
ssize_t (*check_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *);
/*
* set_configfs_dev_params():
*/
ssize_t (*set_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
const char *, ssize_t);
/*
* show_configfs_dev_params():
*/
ssize_t (*show_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
char *);
/*
* get_cdb():
*/
unsigned char *(*get_cdb)(struct se_task *);
/*
* get_device_rev():
*/
u32 (*get_device_rev)(struct se_device *);
/*
* get_device_type():
*/
u32 (*get_device_type)(struct se_device *);
/*
* Get the sector_t from a subsystem backstore..
*/
sector_t (*get_blocks)(struct se_device *);
/*
* do_se_mem_map():
*/
int (*do_se_mem_map)(struct se_task *, struct list_head *, void *,
struct se_mem *, struct se_mem **, u32 *, u32 *);
/*
* get_sense_buffer():
*/
unsigned char *(*get_sense_buffer)(struct se_task *);
} ____cacheline_aligned;
#define TRANSPORT(dev) ((dev)->transport)
#define HBA_TRANSPORT(hba) ((hba)->transport)
extern struct se_global *se_global;
#endif /* TARGET_CORE_TRANSPORT_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment