[PATCH 2/7] nvmet-fc: use per-target workqueue when removing associations
Hannes Reinecke
hare at suse.de
Tue Sep 22 08:14:56 EDT 2020
When removing target ports all outstanding associations need to be
terminated / cleaned up. As this involves several exchanges on the
wire a synchronization point is required to establish when these
exchanges have ran their course and it's safe to delete the association.
So add a per-target workqueue and flush this workqueue to ensure
the association can really be deleted.
Signed-off-by: Hannes Reinecke <hare at suse.de>
---
drivers/nvme/target/fc.c | 29 +++++++++++++++++++++++------
1 file changed, 23 insertions(+), 6 deletions(-)
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 04ec0076ae59..63f5deb3b68a 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -99,6 +99,7 @@ struct nvmet_fc_tgtport {
struct list_head tgt_list; /* nvmet_fc_target_list */
struct device *dev; /* dev for dma mapping */
struct nvmet_fc_target_template *ops;
+ struct workqueue_struct *work_q;
struct nvmet_fc_ls_iod *iod;
spinlock_t lock;
@@ -1403,10 +1404,17 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
ida_init(&newrec->assoc_cnt);
newrec->max_sg_cnt = template->max_sgl_segments;
+ newrec->work_q = alloc_workqueue("ntfc%d", 0, 0,
+ newrec->fc_target_port.port_num);
+ if (!newrec->work_q) {
+ ret = -ENOMEM;
+ goto out_free_newrec;
+ }
+
ret = nvmet_fc_alloc_ls_iodlist(newrec);
if (ret) {
ret = -ENOMEM;
- goto out_free_newrec;
+ goto out_free_workq;
}
nvmet_fc_portentry_rebind_tgt(newrec);
@@ -1418,6 +1426,8 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
*portptr = &newrec->fc_target_port;
return 0;
+out_free_workq:
+ destroy_workqueue(newrec->work_q);
out_free_newrec:
put_device(dev);
out_ida_put:
@@ -1443,6 +1453,8 @@ nvmet_fc_free_tgtport(struct kref *ref)
list_del(&tgtport->tgt_list);
spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
+ destroy_workqueue(tgtport->work_q);
+
nvmet_fc_free_ls_iodlist(tgtport);
/* let the LLDD know we've finished tearing it down */
@@ -1481,11 +1493,13 @@ __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
&tgtport->assoc_list, a_list) {
if (!nvmet_fc_tgt_a_get(assoc))
continue;
- if (!schedule_work(&assoc->del_work))
+ if (!queue_work(tgtport->work_q, &assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
}
spin_unlock_irqrestore(&tgtport->lock, flags);
+
+ flush_workqueue(tgtport->work_q);
}
/**
@@ -1536,12 +1550,14 @@ nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
continue;
assoc->hostport->invalid = 1;
noassoc = false;
- if (!schedule_work(&assoc->del_work))
+ if (!queue_work(tgtport->work_q, &assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
}
spin_unlock_irqrestore(&tgtport->lock, flags);
+ flush_workqueue(tgtport->work_q);
+
/* if there's nothing to wait for - call the callback */
if (noassoc && tgtport->ops->host_release)
tgtport->ops->host_release(hosthandle);
@@ -1579,14 +1595,15 @@ nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
}
spin_unlock_irqrestore(&tgtport->lock, flags);
- nvmet_fc_tgtport_put(tgtport);
-
if (found_ctrl) {
- if (!schedule_work(&assoc->del_work))
+ if (!queue_work(tgtport->work_q, &assoc->del_work))
/* already deleting - release local reference */
nvmet_fc_tgt_a_put(assoc);
+ flush_workqueue(tgtport->work_q);
+ nvmet_fc_tgtport_put(tgtport);
return;
}
+ nvmet_fc_tgtport_put(tgtport);
spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
}
--
2.16.4
More information about the Linux-nvme
mailing list