[PATCH 4/7] nvmet-fc: use feature flag for virtual LLDD
Hannes Reinecke
hare at suse.de
Tue Sep 22 08:14:58 EDT 2020
Virtual LLDDs like fcloop don't need to do DMA, but still
might want to expose a device. So add a new feature flag
to mark these LLDDs instead of relying on a non-existing
struct device.
Signed-off-by: Hannes Reinecke <hare at suse.de>
---
drivers/nvme/target/fc.c | 93 +++++++++++++++++++++++-------------------
drivers/nvme/target/fcloop.c | 2 +-
include/linux/nvme-fc-driver.h | 2 +
3 files changed, 55 insertions(+), 42 deletions(-)
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 63f5deb3b68a..6f5784767d35 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -273,41 +273,50 @@ static void nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
* in the scatter list, setting all dma addresses to 0.
*/
+static bool fc_lldd_is_virtual(struct nvmet_fc_tgtport *tgtport)
+{
+ return !!(tgtport->ops->target_features & NVMET_FCTGTFEAT_VIRTUAL_DMA);
+}
+
static inline dma_addr_t
-fc_dma_map_single(struct device *dev, void *ptr, size_t size,
+fc_dma_map_single(struct nvmet_fc_tgtport *tgtport, void *ptr, size_t size,
enum dma_data_direction dir)
{
- return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
+ if (fc_lldd_is_virtual(tgtport))
+ return (dma_addr_t)0L;
+ return dma_map_single(tgtport->dev, ptr, size, dir);
}
static inline int
-fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+fc_dma_mapping_error(struct nvmet_fc_tgtport *tgtport, dma_addr_t dma_addr)
{
- return dev ? dma_mapping_error(dev, dma_addr) : 0;
+ if (fc_lldd_is_virtual(tgtport))
+ return 0;
+ return dma_mapping_error(tgtport->dev, dma_addr);
}
static inline void
-fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
- enum dma_data_direction dir)
+fc_dma_unmap_single(struct nvmet_fc_tgtport *tgtport, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
{
- if (dev)
- dma_unmap_single(dev, addr, size, dir);
+ if (!fc_lldd_is_virtual(tgtport))
+ dma_unmap_single(tgtport->dev, addr, size, dir);
}
static inline void
-fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
- enum dma_data_direction dir)
+fc_dma_sync_single_for_cpu(struct nvmet_fc_tgtport *tgtport, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
{
- if (dev)
- dma_sync_single_for_cpu(dev, addr, size, dir);
+ if (!fc_lldd_is_virtual(tgtport))
+ dma_sync_single_for_cpu(tgtport->dev, addr, size, dir);
}
static inline void
-fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
- enum dma_data_direction dir)
+fc_dma_sync_single_for_device(struct nvmet_fc_tgtport *tgtport, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
{
- if (dev)
- dma_sync_single_for_device(dev, addr, size, dir);
+ if (!fc_lldd_is_virtual(tgtport))
+ dma_sync_single_for_device(tgtport->dev, addr, size, dir);
}
/* pseudo dma_map_sg call */
@@ -329,18 +338,20 @@ fc_map_sg(struct scatterlist *sg, int nents)
}
static inline int
-fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
+fc_dma_map_sg(struct nvmet_fc_tgtport *tgtport, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
{
- return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
+ if (fc_lldd_is_virtual(tgtport))
+ return fc_map_sg(sg, nents);
+ return dma_map_sg(tgtport->dev, sg, nents, dir);
}
static inline void
-fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
- enum dma_data_direction dir)
+fc_dma_unmap_sg(struct nvmet_fc_tgtport *tgtport, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir)
{
- if (dev)
- dma_unmap_sg(dev, sg, nents, dir);
+ if (!fc_lldd_is_virtual(tgtport))
+ dma_unmap_sg(tgtport->dev, sg, nents, dir);
}
@@ -368,7 +379,7 @@ __nvmet_fc_finish_ls_req(struct nvmet_fc_ls_req_op *lsop)
spin_unlock_irqrestore(&tgtport->lock, flags);
- fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
+ fc_dma_unmap_single(tgtport, lsreq->rqstdma,
(lsreq->rqstlen + lsreq->rsplen),
DMA_BIDIRECTIONAL);
@@ -391,10 +402,10 @@ __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
lsop->req_queued = false;
INIT_LIST_HEAD(&lsop->lsreq_list);
- lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr,
+ lsreq->rqstdma = fc_dma_map_single(tgtport, lsreq->rqstaddr,
lsreq->rqstlen + lsreq->rsplen,
DMA_BIDIRECTIONAL);
- if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma))
+ if (fc_dma_mapping_error(tgtport, lsreq->rqstdma))
return -EFAULT;
lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen;
@@ -420,7 +431,7 @@ __nvmet_fc_send_ls_req(struct nvmet_fc_tgtport *tgtport,
lsop->req_queued = false;
list_del(&lsop->lsreq_list);
spin_unlock_irqrestore(&tgtport->lock, flags);
- fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma,
+ fc_dma_unmap_single(tgtport, lsreq->rqstdma,
(lsreq->rqstlen + lsreq->rsplen),
DMA_BIDIRECTIONAL);
return ret;
@@ -555,10 +566,10 @@ nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1];
- iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
+ iod->rspdma = fc_dma_map_single(tgtport, iod->rspbuf,
sizeof(*iod->rspbuf),
DMA_TO_DEVICE);
- if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
+ if (fc_dma_mapping_error(tgtport, iod->rspdma))
goto out_fail;
}
@@ -568,7 +579,7 @@ nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
kfree(iod->rqstbuf);
list_del(&iod->ls_rcv_list);
for (iod--, i--; i >= 0; iod--, i--) {
- fc_dma_unmap_single(tgtport->dev, iod->rspdma,
+ fc_dma_unmap_single(tgtport, iod->rspdma,
sizeof(*iod->rspbuf), DMA_TO_DEVICE);
kfree(iod->rqstbuf);
list_del(&iod->ls_rcv_list);
@@ -586,7 +597,7 @@ nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
int i;
for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
- fc_dma_unmap_single(tgtport->dev,
+ fc_dma_unmap_single(tgtport,
iod->rspdma, sizeof(*iod->rspbuf),
DMA_TO_DEVICE);
kfree(iod->rqstbuf);
@@ -640,12 +651,12 @@ nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
list_add_tail(&fod->fcp_list, &queue->fod_list);
spin_lock_init(&fod->flock);
- fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
+ fod->rspdma = fc_dma_map_single(tgtport, &fod->rspiubuf,
sizeof(fod->rspiubuf), DMA_TO_DEVICE);
- if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
+ if (fc_dma_mapping_error(tgtport, fod->rspdma)) {
list_del(&fod->fcp_list);
for (fod--, i--; i >= 0; fod--, i--) {
- fc_dma_unmap_single(tgtport->dev, fod->rspdma,
+ fc_dma_unmap_single(tgtport, fod->rspdma,
sizeof(fod->rspiubuf),
DMA_TO_DEVICE);
fod->rspdma = 0L;
@@ -666,7 +677,7 @@ nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
for (i = 0; i < queue->sqsize; fod++, i++) {
if (fod->rspdma)
- fc_dma_unmap_single(tgtport->dev, fod->rspdma,
+ fc_dma_unmap_single(tgtport, fod->rspdma,
sizeof(fod->rspiubuf), DMA_TO_DEVICE);
}
}
@@ -730,7 +741,7 @@ nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
struct nvmet_fc_defer_fcp_req *deferfcp;
unsigned long flags;
- fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
+ fc_dma_sync_single_for_cpu(tgtport, fod->rspdma,
sizeof(fod->rspiubuf), DMA_TO_DEVICE);
fcpreq->nvmet_fc_private = NULL;
@@ -1925,7 +1936,7 @@ nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private;
struct nvmet_fc_tgtport *tgtport = iod->tgtport;
- fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
+ fc_dma_sync_single_for_cpu(tgtport, iod->rspdma,
sizeof(*iod->rspbuf), DMA_TO_DEVICE);
nvmet_fc_free_ls_iod(tgtport, iod);
nvmet_fc_tgtport_put(tgtport);
@@ -1937,7 +1948,7 @@ nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
{
int ret;
- fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
+ fc_dma_sync_single_for_device(tgtport, iod->rspdma,
sizeof(*iod->rspbuf), DMA_TO_DEVICE);
ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp);
@@ -2091,7 +2102,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
fod->data_sg = sg;
fod->data_sg_cnt = nent;
- fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
+ fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport, sg, nent,
((fod->io_dir == NVMET_FCP_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE));
/* note: write from initiator perspective */
@@ -2109,7 +2120,7 @@ nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
if (!fod->data_sg || !fod->data_sg_cnt)
return;
- fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
+ fc_dma_unmap_sg(fod->tgtport, fod->data_sg, fod->data_sg_cnt,
((fod->io_dir == NVMET_FCP_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE));
sgl_free(fod->data_sg);
@@ -2193,7 +2204,7 @@ nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
fod->fcpreq->rsplen = sizeof(*ersp);
}
- fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
+ fc_dma_sync_single_for_device(tgtport, fod->rspdma,
sizeof(fod->rspiubuf), DMA_TO_DEVICE);
}
diff --git a/drivers/nvme/target/fcloop.c b/drivers/nvme/target/fcloop.c
index e56f323fa7d4..2ccb941efb21 100644
--- a/drivers/nvme/target/fcloop.c
+++ b/drivers/nvme/target/fcloop.c
@@ -1043,7 +1043,7 @@ static struct nvmet_fc_target_template tgttemplate = {
.max_dif_sgl_segments = FCLOOP_SGL_SEGS,
.dma_boundary = FCLOOP_DMABOUND_4G,
/* optional features */
- .target_features = 0,
+ .target_features = NVMET_FCTGTFEAT_VIRTUAL_DMA,
/* sizes of additional private data for data structures */
.target_priv_sz = sizeof(struct fcloop_tport),
.lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 2a38f2b477a5..675c7ef6df17 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -707,6 +707,8 @@ enum {
* sequence in one LLDD operation. Errors during Data
* sequence transmit must not allow RSP sequence to be sent.
*/
+ NVMET_FCTGTFEAT_VIRTUAL_DMA = (1 << 1),
+ /* Bit 1: Virtual LLDD with no DMA support */
};
--
2.16.4
More information about the Linux-nvme
mailing list