Commit 2be18149 authored by Nicholas Bellinger's avatar Nicholas Bellinger

tcm_fc: Convert to wake_up_process and schedule_timeout_interruptible

This patch converts ft_queue_cmd() to use wake_up_process() and
ft_thread() to use schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT)
instead of wait_event_interruptible().  This fixes a potential race with
the wait_event_interruptible() conditional with qobj->queue_cnt in
ft_thread().

This patch also drops the unnecessary set_user_nice(current, -20) in
ft_thread(), and drops extra () around two if (!(acl)) conditionals in
tfc_conf.c.
Reported-by: default avatarChristoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarNicholas A. Bellinger <nab@linux-iscsi.org>
parent a1fa3759
...@@ -94,15 +94,17 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller) ...@@ -94,15 +94,17 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)
static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd) static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd)
{ {
struct se_queue_obj *qobj; struct ft_tpg *tpg = sess->tport->tpg;
struct se_queue_obj *qobj = &tpg->qobj;
unsigned long flags; unsigned long flags;
qobj = &sess->tport->tpg->qobj; qobj = &sess->tport->tpg->qobj;
spin_lock_irqsave(&qobj->cmd_queue_lock, flags); spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list); list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list);
spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
atomic_inc(&qobj->queue_cnt); atomic_inc(&qobj->queue_cnt);
wake_up_interruptible(&qobj->thread_wq); spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
wake_up_process(tpg->thread);
} }
static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj) static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj)
...@@ -688,15 +690,12 @@ int ft_thread(void *arg) ...@@ -688,15 +690,12 @@ int ft_thread(void *arg)
struct ft_tpg *tpg = arg; struct ft_tpg *tpg = arg;
struct se_queue_obj *qobj = &tpg->qobj; struct se_queue_obj *qobj = &tpg->qobj;
struct ft_cmd *cmd; struct ft_cmd *cmd;
int ret;
set_user_nice(current, -20);
while (!kthread_should_stop()) { while (!kthread_should_stop()) {
ret = wait_event_interruptible(qobj->thread_wq, schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT);
atomic_read(&qobj->queue_cnt) || kthread_should_stop()); if (kthread_should_stop())
if (ret < 0 || kthread_should_stop())
goto out; goto out;
cmd = ft_dequeue_cmd(qobj); cmd = ft_dequeue_cmd(qobj);
if (cmd) if (cmd)
ft_exec_req(cmd); ft_exec_req(cmd);
......
...@@ -223,7 +223,7 @@ static struct se_node_acl *ft_add_acl( ...@@ -223,7 +223,7 @@ static struct se_node_acl *ft_add_acl(
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL); acl = kzalloc(sizeof(struct ft_node_acl), GFP_KERNEL);
if (!(acl)) if (!acl)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
acl->node_auth.port_name = wwpn; acl->node_auth.port_name = wwpn;
...@@ -280,7 +280,7 @@ struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg) ...@@ -280,7 +280,7 @@ struct se_node_acl *ft_tpg_alloc_fabric_acl(struct se_portal_group *se_tpg)
struct ft_node_acl *acl; struct ft_node_acl *acl;
acl = kzalloc(sizeof(*acl), GFP_KERNEL); acl = kzalloc(sizeof(*acl), GFP_KERNEL);
if (!(acl)) { if (!acl) {
printk(KERN_ERR "Unable to allocate struct ft_node_acl\n"); printk(KERN_ERR "Unable to allocate struct ft_node_acl\n");
return NULL; return NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment