Commit 29a05dee authored by Nicholas Bellinger's avatar Nicholas Bellinger

target: Convert se_node_acl->device_list[] to RCU hlist

This patch converts se_node_acl->device_list[] table for mappedluns
to modern RCU hlist_head usage in order to support an arbitrary number
of node_acl lun mappings.

It converts transport_lookup_*_lun() fast-path code to use RCU read path
primitives when looking up se_dev_entry.  It adds a new hlist_head at
se_node_acl->lun_entry_hlist for this purpose.

For transport_lookup_cmd_lun() code, it works with existing per-cpu
se_lun->lun_ref when associating se_cmd with se_lun + se_device.
Also, go ahead and update core_create_device_list_for_node() +
core_free_device_list_for_node() to use ->lun_entry_hlist.

It also converts se_dev_entry->pr_ref_count access to use modern
struct kref counting, and updates core_disable_device_list_for_node()
to kref_put() and block on se_deve->pr_comp waiting for outstanding PR
special-case PR references to drop, then invoke kfree_rcu() to wait
for the RCU grace period to complete before releasing memory.

So now that se_node_acl->lun_entry_hlist fast path access uses RCU
protected pointers, go ahead and convert remaining non-fast path
RCU updater code using ->lun_entry_lock to struct mutex to allow
callers to block while walking se_node_acl->lun_entry_hlist.

Finally drop the left-over core_clear_initiator_node_from_tpg() that
originally cleared lun_access during se_node_acl shutdown, as post
RCU conversion it now becomes duplicated logic.
Reviewed-by: default avatarHannes Reinecke <hare@suse.de>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Sagi Grimberg <sagig@mellanox.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: default avatarNicholas Bellinger <nab@linux-iscsi.org>
parent d2c27f0d
......@@ -1001,7 +1001,8 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
spin_lock_bh(&port->sep_alua_lock);
list_for_each_entry(se_deve, &port->sep_alua_list,
alua_port_list) {
lacl = se_deve->se_lun_acl;
lacl = rcu_dereference_check(se_deve->se_lun_acl,
lockdep_is_held(&port->sep_alua_lock));
/*
* se_deve->se_lun_acl pointer may be NULL for a
* entry created without explicit Node+MappedLUN ACLs
......
This diff is collapsed.
......@@ -123,16 +123,16 @@ static int target_fabric_mappedlun_link(
* which be will write protected (READ-ONLY) when
* tpg_1/attrib/demo_mode_write_protect=1
*/
spin_lock_irq(&lacl->se_lun_nacl->device_list_lock);
deve = lacl->se_lun_nacl->device_list[lacl->mapped_lun];
if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)
rcu_read_lock();
deve = target_nacl_find_deve(lacl->se_lun_nacl, lacl->mapped_lun);
if (deve)
lun_access = deve->lun_flags;
else
lun_access =
(se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect(
se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY :
TRANSPORT_LUNFLAGS_READ_WRITE;
spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock);
rcu_read_unlock();
/*
* Determine the actual mapped LUN value user wants..
*
......@@ -149,23 +149,13 @@ static int target_fabric_mappedlun_unlink(
struct config_item *lun_acl_ci,
struct config_item *lun_ci)
{
struct se_lun *lun;
struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci),
struct se_lun_acl, se_lun_group);
struct se_node_acl *nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve = nacl->device_list[lacl->mapped_lun];
struct se_portal_group *se_tpg;
/*
* Determine if the underlying MappedLUN has already been released..
*/
if (!deve->se_lun)
return 0;
lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
se_tpg = lun->lun_sep->sep_tpg;
struct se_lun *lun = container_of(to_config_group(lun_ci),
struct se_lun, lun_group);
struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg;
core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl);
return 0;
return core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl);
}
CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl);
......@@ -181,14 +171,15 @@ static ssize_t target_fabric_mappedlun_show_write_protect(
{
struct se_node_acl *se_nacl = lacl->se_lun_nacl;
struct se_dev_entry *deve;
ssize_t len;
ssize_t len = 0;
spin_lock_irq(&se_nacl->device_list_lock);
deve = se_nacl->device_list[lacl->mapped_lun];
rcu_read_lock();
deve = target_nacl_find_deve(se_nacl, lacl->mapped_lun);
if (deve) {
len = sprintf(page, "%d\n",
(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ?
1 : 0);
spin_unlock_irq(&se_nacl->device_list_lock);
(deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ? 1 : 0);
}
rcu_read_unlock();
return len;
}
......
......@@ -9,13 +9,15 @@ extern struct mutex g_device_mutex;
extern struct list_head g_device_list;
struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
int core_free_device_list_for_node(struct se_node_acl *,
void target_pr_kref_release(struct kref *);
void core_free_device_list_for_node(struct se_node_acl *,
struct se_portal_group *);
void core_update_device_list_access(u32, u32, struct se_node_acl *);
struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *, u32);
int core_enable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
u32, u32, struct se_node_acl *, struct se_portal_group *);
int core_disable_device_list_for_node(struct se_lun *, struct se_lun_acl *,
u32, u32, struct se_node_acl *, struct se_portal_group *);
void core_disable_device_list_for_node(struct se_lun *, struct se_dev_entry *,
struct se_node_acl *, struct se_portal_group *);
void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
int core_dev_export(struct se_device *, struct se_portal_group *,
struct se_lun *);
......
This diff is collapsed.
......@@ -47,6 +47,7 @@
#include <target/target_core_backend_configfs.h>
#include "target_core_alua.h"
#include "target_core_internal.h"
#include "target_core_pscsi.h"
#define ISPRINT(a) ((a >= ' ') && (a <= '~'))
......@@ -637,12 +638,14 @@ static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg,
* Hack to make sure that Write-Protect modepage is set if R/O mode is
* forced.
*/
if (!cmd->se_deve || !cmd->data_length)
if (!cmd->data_length)
goto after_mode_sense;
if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) &&
(status_byte(result) << 1) == SAM_STAT_GOOD) {
if (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) {
bool read_only = target_lun_is_rdonly(cmd);
if (read_only) {
unsigned char *buf;
buf = transport_kmap_data_sg(cmd);
......
......@@ -981,6 +981,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
int length = 0;
int ret;
int i;
bool read_only = target_lun_is_rdonly(cmd);;
memset(buf, 0, SE_MODE_PAGE_BUF);
......@@ -991,9 +992,7 @@ static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd)
length = ten ? 3 : 2;
/* DEVICE-SPECIFIC PARAMETER */
if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) ||
(cmd->se_deve &&
(cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)))
if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || read_only)
spc_modesense_write_protect(&buf[length], type);
/*
......@@ -1211,8 +1210,9 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
{
struct se_dev_entry *deve;
struct se_session *sess = cmd->se_sess;
struct se_node_acl *nacl;
unsigned char *buf;
u32 lun_count = 0, offset = 8, i;
u32 lun_count = 0, offset = 8;
if (cmd->data_length < 16) {
pr_warn("REPORT LUNS allocation length %u too small\n",
......@@ -1234,12 +1234,10 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
lun_count = 1;
goto done;
}
nacl = sess->se_node_acl;
spin_lock_irq(&sess->se_node_acl->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
deve = sess->se_node_acl->device_list[i];
if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
continue;
rcu_read_lock();
hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
/*
* We determine the correct LUN LIST LENGTH even once we
* have reached the initial allocation length.
......@@ -1252,7 +1250,7 @@ sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd)
int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]);
offset += 8;
}
spin_unlock_irq(&sess->se_node_acl->device_list_lock);
rcu_read_unlock();
/*
* See SPC3 r07, page 159.
......
This diff is collapsed.
......@@ -47,42 +47,6 @@ extern struct se_device *g_lun0_dev;
static DEFINE_SPINLOCK(tpg_lock);
static LIST_HEAD(tpg_list);
/* core_clear_initiator_node_from_tpg():
*
*
*/
static void core_clear_initiator_node_from_tpg(
struct se_node_acl *nacl,
struct se_portal_group *tpg)
{
int i;
struct se_dev_entry *deve;
struct se_lun *lun;
spin_lock_irq(&nacl->device_list_lock);
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
deve = nacl->device_list[i];
if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
continue;
if (!deve->se_lun) {
pr_err("%s device entries device pointer is"
" NULL, but Initiator has access.\n",
tpg->se_tpg_tfo->get_fabric_name());
continue;
}
lun = deve->se_lun;
spin_unlock_irq(&nacl->device_list_lock);
core_disable_device_list_for_node(lun, NULL, deve->mapped_lun,
TRANSPORT_LUNFLAGS_NO_ACCESS, nacl, tpg);
spin_lock_irq(&nacl->device_list_lock);
}
spin_unlock_irq(&nacl->device_list_lock);
}
/* __core_tpg_get_initiator_node_acl():
*
* spin_lock_bh(&tpg->acl_node_lock); must be held when calling
......@@ -225,35 +189,6 @@ static void *array_zalloc(int n, size_t size, gfp_t flags)
return a;
}
/* core_create_device_list_for_node():
*
*
*/
static int core_create_device_list_for_node(struct se_node_acl *nacl)
{
struct se_dev_entry *deve;
int i;
nacl->device_list = array_zalloc(TRANSPORT_MAX_LUNS_PER_TPG,
sizeof(struct se_dev_entry), GFP_KERNEL);
if (!nacl->device_list) {
pr_err("Unable to allocate memory for"
" struct se_node_acl->device_list\n");
return -ENOMEM;
}
for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
deve = nacl->device_list[i];
atomic_set(&deve->ua_count, 0);
atomic_set(&deve->pr_ref_count, 0);
spin_lock_init(&deve->ua_lock);
INIT_LIST_HEAD(&deve->alua_port_list);
INIT_LIST_HEAD(&deve->ua_list);
}
return 0;
}
static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
const unsigned char *initiatorname)
{
......@@ -266,10 +201,11 @@ static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
INIT_LIST_HEAD(&acl->acl_list);
INIT_LIST_HEAD(&acl->acl_sess_list);
INIT_HLIST_HEAD(&acl->lun_entry_hlist);
kref_init(&acl->acl_kref);
init_completion(&acl->acl_free_comp);
spin_lock_init(&acl->device_list_lock);
spin_lock_init(&acl->nacl_sess_lock);
mutex_init(&acl->lun_entry_mutex);
atomic_set(&acl->acl_pr_ref_count, 0);
if (tpg->se_tpg_tfo->tpg_get_default_depth)
acl->queue_depth = tpg->se_tpg_tfo->tpg_get_default_depth(tpg);
......@@ -281,15 +217,11 @@ static struct se_node_acl *target_alloc_node_acl(struct se_portal_group *tpg,
tpg->se_tpg_tfo->set_default_node_attributes(acl);
if (core_create_device_list_for_node(acl) < 0)
goto out_free_acl;
if (core_set_queue_depth_for_node(tpg, acl) < 0)
goto out_free_device_list;
goto out_free_acl;
return acl;
out_free_device_list:
core_free_device_list_for_node(acl, tpg);
out_free_acl:
kfree(acl);
return NULL;
......@@ -454,7 +386,6 @@ void core_tpg_del_initiator_node_acl(struct se_node_acl *acl)
wait_for_completion(&acl->acl_free_comp);
core_tpg_wait_for_nacl_pr_ref(acl);
core_clear_initiator_node_from_tpg(acl, tpg);
core_free_device_list_for_node(acl, tpg);
pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
......
......@@ -50,9 +50,17 @@ target_scsi3_ua_check(struct se_cmd *cmd)
if (!nacl)
return 0;
deve = nacl->device_list[cmd->orig_fe_lun];
if (!atomic_read(&deve->ua_count))
rcu_read_lock();
deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
if (!deve) {
rcu_read_unlock();
return 0;
}
if (!atomic_read(&deve->ua_count)) {
rcu_read_unlock();
return 0;
}
rcu_read_unlock();
/*
* From sam4r14, section 5.14 Unit attention condition:
*
......@@ -103,9 +111,12 @@ int core_scsi3_ua_allocate(
ua->ua_asc = asc;
ua->ua_ascq = ascq;
spin_lock_irq(&nacl->device_list_lock);
deve = nacl->device_list[unpacked_lun];
rcu_read_lock();
deve = target_nacl_find_deve(nacl, unpacked_lun);
if (!deve) {
rcu_read_unlock();
return -EINVAL;
}
spin_lock(&deve->ua_lock);
list_for_each_entry_safe(ua_p, ua_tmp, &deve->ua_list, ua_nacl_list) {
/*
......@@ -113,7 +124,7 @@ int core_scsi3_ua_allocate(
*/
if ((ua_p->ua_asc == asc) && (ua_p->ua_ascq == ascq)) {
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
rcu_read_unlock();
kmem_cache_free(se_ua_cache, ua);
return 0;
}
......@@ -158,14 +169,13 @@ int core_scsi3_ua_allocate(
list_add_tail(&ua->ua_nacl_list,
&deve->ua_list);
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
atomic_inc_mb(&deve->ua_count);
rcu_read_unlock();
return 0;
}
list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
" 0x%02x, ASCQ: 0x%02x\n",
......@@ -173,6 +183,7 @@ int core_scsi3_ua_allocate(
asc, ascq);
atomic_inc_mb(&deve->ua_count);
rcu_read_unlock();
return 0;
}
......@@ -210,10 +221,14 @@ void core_scsi3_ua_for_check_condition(
if (!nacl)
return;
spin_lock_irq(&nacl->device_list_lock);
deve = nacl->device_list[cmd->orig_fe_lun];
rcu_read_lock();
deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
if (!deve) {
rcu_read_unlock();
return;
}
if (!atomic_read(&deve->ua_count)) {
spin_unlock_irq(&nacl->device_list_lock);
rcu_read_unlock();
return;
}
/*
......@@ -249,7 +264,7 @@ void core_scsi3_ua_for_check_condition(
atomic_dec_mb(&deve->ua_count);
}
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
rcu_read_unlock();
pr_debug("[%s]: %s UNIT ATTENTION condition with"
" INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
......@@ -278,10 +293,14 @@ int core_scsi3_ua_clear_for_request_sense(
if (!nacl)
return -EINVAL;
spin_lock_irq(&nacl->device_list_lock);
deve = nacl->device_list[cmd->orig_fe_lun];
rcu_read_lock();
deve = target_nacl_find_deve(nacl, cmd->orig_fe_lun);
if (!deve) {
rcu_read_unlock();
return -EINVAL;
}
if (!atomic_read(&deve->ua_count)) {
spin_unlock_irq(&nacl->device_list_lock);
rcu_read_unlock();
return -EPERM;
}
/*
......@@ -307,7 +326,7 @@ int core_scsi3_ua_clear_for_request_sense(
atomic_dec_mb(&deve->ua_count);
}
spin_unlock(&deve->ua_lock);
spin_unlock_irq(&nacl->device_list_lock);
rcu_read_unlock();
pr_debug("[%s]: Released UNIT ATTENTION condition, mapped"
" LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
......
......@@ -101,7 +101,7 @@ int target_alloc_sgl(struct scatterlist **, unsigned int *, u32, bool);
sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *,
struct scatterlist *, u32, struct scatterlist *, u32);
void array_free(void *array, int n);
bool target_lun_is_rdonly(struct se_cmd *);
/* From target_core_configfs.c to setup default backend config_item_types */
void target_core_setup_sub_cits(struct se_subsystem_api *);
......
......@@ -160,10 +160,8 @@ enum se_cmd_flags_table {
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
enum transport_lunflags_table {
TRANSPORT_LUNFLAGS_NO_ACCESS = 0x00,
TRANSPORT_LUNFLAGS_INITIATOR_ACCESS = 0x01,
TRANSPORT_LUNFLAGS_READ_ONLY = 0x02,
TRANSPORT_LUNFLAGS_READ_WRITE = 0x04,
TRANSPORT_LUNFLAGS_READ_ONLY = 0x01,
TRANSPORT_LUNFLAGS_READ_WRITE = 0x02,
};
/*
......@@ -584,10 +582,10 @@ struct se_node_acl {
char acl_tag[MAX_ACL_TAG_SIZE];
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
atomic_t acl_pr_ref_count;
struct se_dev_entry **device_list;
struct hlist_head lun_entry_hlist;
struct se_session *nacl_sess;
struct se_portal_group *se_tpg;
spinlock_t device_list_lock;
struct mutex lun_entry_mutex;
spinlock_t nacl_sess_lock;
struct config_group acl_group;
struct config_group acl_attrib_group;
......@@ -644,20 +642,23 @@ struct se_dev_entry {
/* See transport_lunflags_table */
u32 lun_flags;
u32 mapped_lun;
u32 total_cmds;
u64 pr_res_key;
u64 creation_time;
u32 attach_count;
u64 read_bytes;
u64 write_bytes;
atomic_long_t total_cmds;
atomic_long_t read_bytes;
atomic_long_t write_bytes;
atomic_t ua_count;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
atomic_t pr_ref_count;
struct se_lun_acl *se_lun_acl;
struct kref pr_kref;
struct completion pr_comp;
struct se_lun_acl __rcu *se_lun_acl;
spinlock_t ua_lock;
struct se_lun *se_lun;
struct se_lun __rcu *se_lun;
struct list_head alua_port_list;
struct list_head ua_list;
struct hlist_node link;
struct rcu_head rcu_head;
};
struct se_dev_attrib {
......@@ -703,6 +704,7 @@ struct se_port_stat_grps {
};
struct se_lun {
u16 lun_rtpi;
#define SE_LUN_LINK_MAGIC 0xffff7771
u32 lun_link_magic;
/* See transport_lun_status_table */
......@@ -710,6 +712,7 @@ struct se_lun {
u32 lun_access;
u32 lun_flags;
u32 unpacked_lun;
u32 lun_index;
atomic_t lun_acl_count;
spinlock_t lun_acl_lock;
spinlock_t lun_sep_lock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment