Commit ece0278c authored by James Smart's avatar James Smart Committed by Christoph Hellwig

nvmet-fc: remove redundant del_work_active flag

The transport has a del_work_active flag to avoid duplicate scheduling
of the del_work item. This is redundant with the checks that
schedule_work() makes.

Remove the del_work_active flag.
Signed-off-by: default avatarJames Smart <jsmart2021@gmail.com>
Reviewed-by: default avatarSagi Grimberg <sagi@grimberg.me>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 34efa232
...@@ -167,7 +167,6 @@ struct nvmet_fc_tgt_assoc { ...@@ -167,7 +167,6 @@ struct nvmet_fc_tgt_assoc {
struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1]; struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
struct kref ref; struct kref ref;
struct work_struct del_work; struct work_struct del_work;
atomic_t del_work_active;
}; };
...@@ -1090,7 +1089,6 @@ nvmet_fc_delete_assoc(struct work_struct *work) ...@@ -1090,7 +1089,6 @@ nvmet_fc_delete_assoc(struct work_struct *work)
container_of(work, struct nvmet_fc_tgt_assoc, del_work); container_of(work, struct nvmet_fc_tgt_assoc, del_work);
nvmet_fc_delete_target_assoc(assoc); nvmet_fc_delete_target_assoc(assoc);
atomic_set(&assoc->del_work_active, 0);
nvmet_fc_tgt_a_put(assoc); nvmet_fc_tgt_a_put(assoc);
} }
...@@ -1123,7 +1121,6 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle) ...@@ -1123,7 +1121,6 @@ nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport, void *hosthandle)
INIT_LIST_HEAD(&assoc->a_list); INIT_LIST_HEAD(&assoc->a_list);
kref_init(&assoc->ref); kref_init(&assoc->ref);
INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc); INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
atomic_set(&assoc->del_work_active, 0);
atomic_set(&assoc->terminating, 0); atomic_set(&assoc->terminating, 0);
while (needrandom) { while (needrandom) {
...@@ -1478,22 +1475,16 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport) ...@@ -1478,22 +1475,16 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
{ {
struct nvmet_fc_tgt_assoc *assoc, *next; struct nvmet_fc_tgt_assoc *assoc, *next;
unsigned long flags; unsigned long flags;
int ret;
spin_lock_irqsave(&tgtport->lock, flags); spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry_safe(assoc, next, list_for_each_entry_safe(assoc, next,
&tgtport->assoc_list, a_list) { &tgtport->assoc_list, a_list) {
if (!nvmet_fc_tgt_a_get(assoc)) if (!nvmet_fc_tgt_a_get(assoc))
continue; continue;
ret = atomic_cmpxchg(&assoc->del_work_active, 0, 1);
if (ret == 0) {
if (!schedule_work(&assoc->del_work)) if (!schedule_work(&assoc->del_work))
nvmet_fc_tgt_a_put(assoc);
} else {
/* already deleting - release local reference */ /* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc); nvmet_fc_tgt_a_put(assoc);
} }
}
spin_unlock_irqrestore(&tgtport->lock, flags); spin_unlock_irqrestore(&tgtport->lock, flags);
} }
...@@ -1534,7 +1525,6 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, ...@@ -1534,7 +1525,6 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
struct nvmet_fc_tgt_assoc *assoc, *next; struct nvmet_fc_tgt_assoc *assoc, *next;
unsigned long flags; unsigned long flags;
bool noassoc = true; bool noassoc = true;
int ret;
spin_lock_irqsave(&tgtport->lock, flags); spin_lock_irqsave(&tgtport->lock, flags);
list_for_each_entry_safe(assoc, next, list_for_each_entry_safe(assoc, next,
...@@ -1546,15 +1536,10 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port, ...@@ -1546,15 +1536,10 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
continue; continue;
assoc->hostport->invalid = 1; assoc->hostport->invalid = 1;
noassoc = false; noassoc = false;
ret = atomic_cmpxchg(&assoc->del_work_active, 0, 1);
if (ret == 0) {
if (!schedule_work(&assoc->del_work)) if (!schedule_work(&assoc->del_work))
nvmet_fc_tgt_a_put(assoc);
} else {
/* already deleting - release local reference */ /* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc); nvmet_fc_tgt_a_put(assoc);
} }
}
spin_unlock_irqrestore(&tgtport->lock, flags); spin_unlock_irqrestore(&tgtport->lock, flags);
/* if there's nothing to wait for - call the callback */ /* if there's nothing to wait for - call the callback */
...@@ -1574,7 +1559,6 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) ...@@ -1574,7 +1559,6 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
struct nvmet_fc_tgt_queue *queue; struct nvmet_fc_tgt_queue *queue;
unsigned long flags; unsigned long flags;
bool found_ctrl = false; bool found_ctrl = false;
int ret;
/* this is a bit ugly, but don't want to make locks layered */ /* this is a bit ugly, but don't want to make locks layered */
spin_lock_irqsave(&nvmet_fc_tgtlock, flags); spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
...@@ -1598,14 +1582,9 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl) ...@@ -1598,14 +1582,9 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
nvmet_fc_tgtport_put(tgtport); nvmet_fc_tgtport_put(tgtport);
if (found_ctrl) { if (found_ctrl) {
ret = atomic_cmpxchg(&assoc->del_work_active, 0, 1);
if (ret == 0) {
if (!schedule_work(&assoc->del_work)) if (!schedule_work(&assoc->del_work))
nvmet_fc_tgt_a_put(assoc);
} else {
/* already deleting - release local reference */ /* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc); nvmet_fc_tgt_a_put(assoc);
}
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment