Commit 1526d9f1 authored by Mike Christie's avatar Mike Christie Committed by Martin K. Petersen

scsi: target: Make state_list per CPU

Do a state_list/execute_task_lock per CPU, so we can do submissions from
different CPUs without contention with each other.

Note: tcm_fc was passing TARGET_SCF_USE_CPUID, but never set cpuid.  The
assumption is that it wanted to set the cpuid to the CPU it was submitting
from so it will get this behavior with this patch.

[mkp: s/printk/pr_err/ + resolve COMPARE AND WRITE patch conflict]

Link: https://lore.kernel.org/r/1604257174-4524-8-git-send-email-michael.christie@oracle.comReviewed-by: default avatarHimanshu Madhani <himanshu.madhani@oracle.com>
Signed-off-by: default avatarMike Christie <michael.christie@oracle.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 6f55b06f
...@@ -721,11 +721,24 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) ...@@ -721,11 +721,24 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
{ {
struct se_device *dev; struct se_device *dev;
struct se_lun *xcopy_lun; struct se_lun *xcopy_lun;
int i;
dev = hba->backend->ops->alloc_device(hba, name); dev = hba->backend->ops->alloc_device(hba, name);
if (!dev) if (!dev)
return NULL; return NULL;
dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
if (!dev->queues) {
dev->transport->free_device(dev);
return NULL;
}
dev->queue_cnt = nr_cpu_ids;
for (i = 0; i < dev->queue_cnt; i++) {
INIT_LIST_HEAD(&dev->queues[i].state_list);
spin_lock_init(&dev->queues[i].lock);
}
dev->se_hba = hba; dev->se_hba = hba;
dev->transport = hba->backend->ops; dev->transport = hba->backend->ops;
dev->transport_flags = dev->transport->transport_flags_default; dev->transport_flags = dev->transport->transport_flags_default;
...@@ -735,9 +748,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) ...@@ -735,9 +748,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
INIT_LIST_HEAD(&dev->dev_sep_list); INIT_LIST_HEAD(&dev->dev_sep_list);
INIT_LIST_HEAD(&dev->dev_tmr_list); INIT_LIST_HEAD(&dev->dev_tmr_list);
INIT_LIST_HEAD(&dev->delayed_cmd_list); INIT_LIST_HEAD(&dev->delayed_cmd_list);
INIT_LIST_HEAD(&dev->state_list);
INIT_LIST_HEAD(&dev->qf_cmd_list); INIT_LIST_HEAD(&dev->qf_cmd_list);
spin_lock_init(&dev->execute_task_lock);
spin_lock_init(&dev->delayed_cmd_lock); spin_lock_init(&dev->delayed_cmd_lock);
spin_lock_init(&dev->dev_reservation_lock); spin_lock_init(&dev->dev_reservation_lock);
spin_lock_init(&dev->se_port_lock); spin_lock_init(&dev->se_port_lock);
...@@ -1010,6 +1021,7 @@ void target_free_device(struct se_device *dev) ...@@ -1010,6 +1021,7 @@ void target_free_device(struct se_device *dev)
if (dev->transport->free_prot) if (dev->transport->free_prot)
dev->transport->free_prot(dev); dev->transport->free_prot(dev);
kfree(dev->queues);
dev->transport->free_device(dev); dev->transport->free_device(dev);
} }
......
...@@ -121,14 +121,19 @@ void core_tmr_abort_task( ...@@ -121,14 +121,19 @@ void core_tmr_abort_task(
unsigned long flags; unsigned long flags;
bool rc; bool rc;
u64 ref_tag; u64 ref_tag;
int i;
spin_lock_irqsave(&dev->execute_task_lock, flags); for (i = 0; i < dev->queue_cnt; i++) {
list_for_each_entry_safe(se_cmd, next, &dev->state_list, state_list) { spin_lock_irqsave(&dev->queues[i].lock, flags);
list_for_each_entry_safe(se_cmd, next, &dev->queues[i].state_list,
state_list) {
if (se_sess != se_cmd->se_sess) if (se_sess != se_cmd->se_sess)
continue; continue;
/* skip task management functions, including tmr->task_cmd */ /*
* skip task management functions, including
* tmr->task_cmd
*/
if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
continue; continue;
...@@ -136,7 +141,7 @@ void core_tmr_abort_task( ...@@ -136,7 +141,7 @@ void core_tmr_abort_task(
if (tmr->ref_task_tag != ref_tag) if (tmr->ref_task_tag != ref_tag)
continue; continue;
printk("ABORT_TASK: Found referenced %s task_tag: %llu\n", pr_err("ABORT_TASK: Found referenced %s task_tag: %llu\n",
se_cmd->se_tfo->fabric_name, ref_tag); se_cmd->se_tfo->fabric_name, ref_tag);
spin_lock(&se_sess->sess_cmd_lock); spin_lock(&se_sess->sess_cmd_lock);
...@@ -147,16 +152,14 @@ void core_tmr_abort_task( ...@@ -147,16 +152,14 @@ void core_tmr_abort_task(
list_move_tail(&se_cmd->state_list, &aborted_list); list_move_tail(&se_cmd->state_list, &aborted_list);
se_cmd->state_active = false; se_cmd->state_active = false;
spin_unlock_irqrestore(&dev->queues[i].lock, flags);
spin_unlock_irqrestore(&dev->execute_task_lock, flags);
/* /*
* Ensure that this ABORT request is visible to the LU RESET * Ensure that this ABORT request is visible to the LU
* code. * RESET code.
*/ */
if (!tmr->tmr_dev) if (!tmr->tmr_dev)
WARN_ON_ONCE(transport_lookup_tmr_lun(tmr->task_cmd) < WARN_ON_ONCE(transport_lookup_tmr_lun(tmr->task_cmd) < 0);
0);
if (dev->transport->tmr_notify) if (dev->transport->tmr_notify)
dev->transport->tmr_notify(dev, TMR_ABORT_TASK, dev->transport->tmr_notify(dev, TMR_ABORT_TASK,
...@@ -165,13 +168,14 @@ void core_tmr_abort_task( ...@@ -165,13 +168,14 @@ void core_tmr_abort_task(
list_del_init(&se_cmd->state_list); list_del_init(&se_cmd->state_list);
target_put_cmd_and_wait(se_cmd); target_put_cmd_and_wait(se_cmd);
printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for" pr_err("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for ref_tag: %llu\n",
" ref_tag: %llu\n", ref_tag); ref_tag);
tmr->response = TMR_FUNCTION_COMPLETE; tmr->response = TMR_FUNCTION_COMPLETE;
atomic_long_inc(&dev->aborts_complete); atomic_long_inc(&dev->aborts_complete);
return; return;
} }
spin_unlock_irqrestore(&dev->execute_task_lock, flags); spin_unlock_irqrestore(&dev->queues[i].lock, flags);
}
if (dev->transport->tmr_notify) if (dev->transport->tmr_notify)
dev->transport->tmr_notify(dev, TMR_ABORT_TASK, &aborted_list); dev->transport->tmr_notify(dev, TMR_ABORT_TASK, &aborted_list);
...@@ -273,7 +277,7 @@ static void core_tmr_drain_state_list( ...@@ -273,7 +277,7 @@ static void core_tmr_drain_state_list(
struct se_session *sess; struct se_session *sess;
struct se_cmd *cmd, *next; struct se_cmd *cmd, *next;
unsigned long flags; unsigned long flags;
int rc; int rc, i;
/* /*
* Complete outstanding commands with TASK_ABORTED SAM status. * Complete outstanding commands with TASK_ABORTED SAM status.
...@@ -297,13 +301,16 @@ static void core_tmr_drain_state_list( ...@@ -297,13 +301,16 @@ static void core_tmr_drain_state_list(
* Note that this seems to be independent of TAS (Task Aborted Status) * Note that this seems to be independent of TAS (Task Aborted Status)
* in the Control Mode Page. * in the Control Mode Page.
*/ */
spin_lock_irqsave(&dev->execute_task_lock, flags); for (i = 0; i < dev->queue_cnt; i++) {
list_for_each_entry_safe(cmd, next, &dev->state_list, state_list) { spin_lock_irqsave(&dev->queues[i].lock, flags);
list_for_each_entry_safe(cmd, next, &dev->queues[i].state_list,
state_list) {
/* /*
* For PREEMPT_AND_ABORT usage, only process commands * For PREEMPT_AND_ABORT usage, only process commands
* with a matching reservation key. * with a matching reservation key.
*/ */
if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd)) if (target_check_cdb_and_preempt(preempt_and_abort_list,
cmd))
continue; continue;
/* /*
...@@ -325,7 +332,8 @@ static void core_tmr_drain_state_list( ...@@ -325,7 +332,8 @@ static void core_tmr_drain_state_list(
list_move_tail(&cmd->state_list, &drain_task_list); list_move_tail(&cmd->state_list, &drain_task_list);
cmd->state_active = false; cmd->state_active = false;
} }
spin_unlock_irqrestore(&dev->execute_task_lock, flags); spin_unlock_irqrestore(&dev->queues[i].lock, flags);
}
if (dev->transport->tmr_notify) if (dev->transport->tmr_notify)
dev->transport->tmr_notify(dev, preempt_and_abort_list ? dev->transport->tmr_notify(dev, preempt_and_abort_list ?
......
...@@ -659,12 +659,12 @@ static void target_remove_from_state_list(struct se_cmd *cmd) ...@@ -659,12 +659,12 @@ static void target_remove_from_state_list(struct se_cmd *cmd)
if (!dev) if (!dev)
return; return;
spin_lock_irqsave(&dev->execute_task_lock, flags); spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags);
if (cmd->state_active) { if (cmd->state_active) {
list_del(&cmd->state_list); list_del(&cmd->state_list);
cmd->state_active = false; cmd->state_active = false;
} }
spin_unlock_irqrestore(&dev->execute_task_lock, flags); spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags);
} }
/* /*
...@@ -875,10 +875,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) ...@@ -875,10 +875,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
INIT_WORK(&cmd->work, success ? target_complete_ok_work : INIT_WORK(&cmd->work, success ? target_complete_ok_work :
target_complete_failure_work); target_complete_failure_work);
if (cmd->se_cmd_flags & SCF_USE_CPUID)
queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work); queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
else
queue_work(target_completion_wq, &cmd->work);
} }
EXPORT_SYMBOL(target_complete_cmd); EXPORT_SYMBOL(target_complete_cmd);
...@@ -906,12 +903,13 @@ static void target_add_to_state_list(struct se_cmd *cmd) ...@@ -906,12 +903,13 @@ static void target_add_to_state_list(struct se_cmd *cmd)
struct se_device *dev = cmd->se_dev; struct se_device *dev = cmd->se_dev;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&dev->execute_task_lock, flags); spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags);
if (!cmd->state_active) { if (!cmd->state_active) {
list_add_tail(&cmd->state_list, &dev->state_list); list_add_tail(&cmd->state_list,
&dev->queues[cmd->cpuid].state_list);
cmd->state_active = true; cmd->state_active = true;
} }
spin_unlock_irqrestore(&dev->execute_task_lock, flags); spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags);
} }
/* /*
...@@ -1399,6 +1397,9 @@ void transport_init_se_cmd( ...@@ -1399,6 +1397,9 @@ void transport_init_se_cmd(
cmd->sense_buffer = sense_buffer; cmd->sense_buffer = sense_buffer;
cmd->orig_fe_lun = unpacked_lun; cmd->orig_fe_lun = unpacked_lun;
if (!(cmd->se_cmd_flags & SCF_USE_CPUID))
cmd->cpuid = smp_processor_id();
cmd->state_active = false; cmd->state_active = false;
} }
EXPORT_SYMBOL(transport_init_se_cmd); EXPORT_SYMBOL(transport_init_se_cmd);
...@@ -1616,6 +1617,9 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess ...@@ -1616,6 +1617,9 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
BUG_ON(!se_tpg); BUG_ON(!se_tpg);
BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
BUG_ON(in_interrupt()); BUG_ON(in_interrupt());
if (flags & TARGET_SCF_USE_CPUID)
se_cmd->se_cmd_flags |= SCF_USE_CPUID;
/* /*
* Initialize se_cmd for target operation. From this point * Initialize se_cmd for target operation. From this point
* exceptions are handled by sending exception status via * exceptions are handled by sending exception status via
...@@ -1625,11 +1629,6 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess ...@@ -1625,11 +1629,6 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
data_length, data_dir, task_attr, sense, data_length, data_dir, task_attr, sense,
unpacked_lun); unpacked_lun);
if (flags & TARGET_SCF_USE_CPUID)
se_cmd->se_cmd_flags |= SCF_USE_CPUID;
else
se_cmd->cpuid = WORK_CPU_UNBOUND;
if (flags & TARGET_SCF_UNKNOWN_SIZE) if (flags & TARGET_SCF_UNKNOWN_SIZE)
se_cmd->unknown_data_length = 1; se_cmd->unknown_data_length = 1;
/* /*
......
...@@ -550,7 +550,7 @@ static void ft_send_work(struct work_struct *work) ...@@ -550,7 +550,7 @@ static void ft_send_work(struct work_struct *work)
if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb, if (target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, fcp->fc_cdb,
&cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun), &cmd->ft_sense_buffer[0], scsilun_to_int(&fcp->fc_lun),
ntohl(fcp->fc_dl), task_attr, data_dir, ntohl(fcp->fc_dl), task_attr, data_dir,
TARGET_SCF_ACK_KREF | TARGET_SCF_USE_CPUID)) TARGET_SCF_ACK_KREF))
goto err; goto err;
pr_debug("r_ctl %x target_submit_cmd %p\n", fh->fh_r_ctl, cmd); pr_debug("r_ctl %x target_submit_cmd %p\n", fh->fh_r_ctl, cmd);
......
...@@ -540,6 +540,10 @@ struct se_cmd { ...@@ -540,6 +540,10 @@ struct se_cmd {
unsigned int t_prot_nents; unsigned int t_prot_nents;
sense_reason_t pi_err; sense_reason_t pi_err;
u64 sense_info; u64 sense_info;
/*
* CPU LIO will execute the cmd on. Defaults to the CPU the cmd is
* initialized on. Drivers can override.
*/
int cpuid; int cpuid;
}; };
...@@ -760,6 +764,11 @@ struct se_dev_stat_grps { ...@@ -760,6 +764,11 @@ struct se_dev_stat_grps {
struct config_group scsi_lu_group; struct config_group scsi_lu_group;
}; };
struct se_device_queue {
struct list_head state_list;
spinlock_t lock;
};
struct se_device { struct se_device {
/* RELATIVE TARGET PORT IDENTIFER Counter */ /* RELATIVE TARGET PORT IDENTIFER Counter */
u16 dev_rpti_counter; u16 dev_rpti_counter;
...@@ -792,7 +801,6 @@ struct se_device { ...@@ -792,7 +801,6 @@ struct se_device {
atomic_t dev_qf_count; atomic_t dev_qf_count;
u32 export_count; u32 export_count;
spinlock_t delayed_cmd_lock; spinlock_t delayed_cmd_lock;
spinlock_t execute_task_lock;
spinlock_t dev_reservation_lock; spinlock_t dev_reservation_lock;
unsigned int dev_reservation_flags; unsigned int dev_reservation_flags;
#define DRF_SPC2_RESERVATIONS 0x00000001 #define DRF_SPC2_RESERVATIONS 0x00000001
...@@ -811,7 +819,6 @@ struct se_device { ...@@ -811,7 +819,6 @@ struct se_device {
struct list_head dev_tmr_list; struct list_head dev_tmr_list;
struct work_struct qf_work_queue; struct work_struct qf_work_queue;
struct list_head delayed_cmd_list; struct list_head delayed_cmd_list;
struct list_head state_list;
struct list_head qf_cmd_list; struct list_head qf_cmd_list;
/* Pointer to associated SE HBA */ /* Pointer to associated SE HBA */
struct se_hba *se_hba; struct se_hba *se_hba;
...@@ -838,6 +845,8 @@ struct se_device { ...@@ -838,6 +845,8 @@ struct se_device {
/* For se_lun->lun_se_dev RCU read-side critical access */ /* For se_lun->lun_se_dev RCU read-side critical access */
u32 hba_index; u32 hba_index;
struct rcu_head rcu_head; struct rcu_head rcu_head;
int queue_cnt;
struct se_device_queue *queues;
}; };
struct se_hba { struct se_hba {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment