1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/slab.h>
8 #include <linux/blk-mq.h>
9 #include <linux/parser.h>
10 #include <linux/random.h>
11 #include <uapi/scsi/fc/fc_fs.h>
12 #include <uapi/scsi/fc/fc_els.h>
15 #include <linux/nvme-fc-driver.h>
16 #include <linux/nvme-fc.h>
17 #include "../host/fc.h"
20 /* *************************** Data Structures/Defines ****************** */
23 #define NVMET_LS_CTX_COUNT 256
25 /* for this implementation, assume small single frame rqst/rsp */
26 #define NVME_FC_MAX_LS_BUFFER_SIZE 2048
28 struct nvmet_fc_tgtport;
29 struct nvmet_fc_tgt_assoc;
31 struct nvmet_fc_ls_iod {
32 struct nvmefc_ls_rsp *lsrsp;
33 struct nvmefc_tgt_fcp_req *fcpreq; /* only if RS */
35 struct list_head ls_list; /* tgtport->ls_list */
37 struct nvmet_fc_tgtport *tgtport;
38 struct nvmet_fc_tgt_assoc *assoc;
45 struct scatterlist sg[2];
47 struct work_struct work;
48 } __aligned(sizeof(unsigned long long));
50 /* desired maximum for a single sequence - if sg list allows it */
51 #define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
53 enum nvmet_fcp_datadir {
60 struct nvmet_fc_fcp_iod {
61 struct nvmefc_tgt_fcp_req *fcpreq;
63 struct nvme_fc_cmd_iu cmdiubuf;
64 struct nvme_fc_ersp_iu rspiubuf;
66 struct scatterlist *next_sg;
67 struct scatterlist *data_sg;
70 enum nvmet_fcp_datadir io_dir;
78 struct work_struct defer_work;
80 struct nvmet_fc_tgtport *tgtport;
81 struct nvmet_fc_tgt_queue *queue;
83 struct list_head fcp_list; /* tgtport->fcp_list */
86 struct nvmet_fc_tgtport {
88 struct nvmet_fc_target_port fc_target_port;
90 struct list_head tgt_list; /* nvmet_fc_target_list */
91 struct device *dev; /* dev for dma mapping */
92 struct nvmet_fc_target_template *ops;
94 struct nvmet_fc_ls_iod *iod;
96 struct list_head ls_list;
97 struct list_head ls_busylist;
98 struct list_head assoc_list;
100 struct nvmet_fc_port_entry *pe;
105 struct nvmet_fc_port_entry {
106 struct nvmet_fc_tgtport *tgtport;
107 struct nvmet_port *port;
110 struct list_head pe_list;
113 struct nvmet_fc_defer_fcp_req {
114 struct list_head req_list;
115 struct nvmefc_tgt_fcp_req *fcp_req;
118 struct nvmet_fc_tgt_queue {
129 struct nvmet_cq nvme_cq;
130 struct nvmet_sq nvme_sq;
131 struct nvmet_fc_tgt_assoc *assoc;
132 struct list_head fod_list;
133 struct list_head pending_cmd_list;
134 struct list_head avail_defer_list;
135 struct workqueue_struct *work_q;
137 struct nvmet_fc_fcp_iod fod[]; /* array of fcp_iods */
138 } __aligned(sizeof(unsigned long long));
140 struct nvmet_fc_tgt_assoc {
143 struct nvmet_fc_tgtport *tgtport;
144 struct list_head a_list;
145 struct nvmet_fc_tgt_queue *queues[NVMET_NR_QUEUES + 1];
147 struct work_struct del_work;
152 nvmet_fc_iodnum(struct nvmet_fc_ls_iod *iodptr)
154 return (iodptr - iodptr->tgtport->iod);
158 nvmet_fc_fodnum(struct nvmet_fc_fcp_iod *fodptr)
160 return (fodptr - fodptr->queue->fod);
165 * Association and Connection IDs:
167 * Association ID will have random number in upper 6 bytes and zero
170 * Connection IDs will be Association ID with QID or'd in lower 2 bytes
172 * note: Association ID = Connection ID for queue 0
174 #define BYTES_FOR_QID sizeof(u16)
175 #define BYTES_FOR_QID_SHIFT (BYTES_FOR_QID * 8)
176 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
179 nvmet_fc_makeconnid(struct nvmet_fc_tgt_assoc *assoc, u16 qid)
181 return (assoc->association_id | qid);
185 nvmet_fc_getassociationid(u64 connectionid)
187 return connectionid & ~NVMET_FC_QUEUEID_MASK;
191 nvmet_fc_getqueueid(u64 connectionid)
193 return (u16)(connectionid & NVMET_FC_QUEUEID_MASK);
196 static inline struct nvmet_fc_tgtport *
197 targetport_to_tgtport(struct nvmet_fc_target_port *targetport)
199 return container_of(targetport, struct nvmet_fc_tgtport,
203 static inline struct nvmet_fc_fcp_iod *
204 nvmet_req_to_fod(struct nvmet_req *nvme_req)
206 return container_of(nvme_req, struct nvmet_fc_fcp_iod, req);
210 /* *************************** Globals **************************** */
213 static DEFINE_SPINLOCK(nvmet_fc_tgtlock);
215 static LIST_HEAD(nvmet_fc_target_list);
216 static DEFINE_IDA(nvmet_fc_tgtport_cnt);
217 static LIST_HEAD(nvmet_fc_portentry_list);
220 static void nvmet_fc_handle_ls_rqst_work(struct work_struct *work);
221 static void nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work);
222 static void nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc);
223 static int nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc);
224 static void nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue);
225 static int nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue);
226 static void nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport);
227 static int nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport);
228 static void nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
229 struct nvmet_fc_fcp_iod *fod);
230 static void nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc);
233 /* *********************** FC-NVME DMA Handling **************************** */
236 * The fcloop device passes in a NULL device pointer. Real LLD's will
237 * pass in a valid device pointer. If NULL is passed to the dma mapping
238 * routines, depending on the platform, it may or may not succeed, and
242 * Wrapper all the dma routines and check the dev pointer.
244 * If simple mappings (return just a dma address, we'll noop them,
245 * returning a dma address of 0.
247 * On more complex mappings (dma_map_sg), a pseudo routine fills
248 * in the scatter list, setting all dma addresses to 0.
251 static inline dma_addr_t
252 fc_dma_map_single(struct device *dev, void *ptr, size_t size,
253 enum dma_data_direction dir)
255 return dev ? dma_map_single(dev, ptr, size, dir) : (dma_addr_t)0L;
259 fc_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
261 return dev ? dma_mapping_error(dev, dma_addr) : 0;
265 fc_dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
266 enum dma_data_direction dir)
269 dma_unmap_single(dev, addr, size, dir);
273 fc_dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size,
274 enum dma_data_direction dir)
277 dma_sync_single_for_cpu(dev, addr, size, dir);
281 fc_dma_sync_single_for_device(struct device *dev, dma_addr_t addr, size_t size,
282 enum dma_data_direction dir)
285 dma_sync_single_for_device(dev, addr, size, dir);
288 /* pseudo dma_map_sg call */
290 fc_map_sg(struct scatterlist *sg, int nents)
292 struct scatterlist *s;
295 WARN_ON(nents == 0 || sg[0].length == 0);
297 for_each_sg(sg, s, nents, i) {
299 #ifdef CONFIG_NEED_SG_DMA_LENGTH
300 s->dma_length = s->length;
307 fc_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
308 enum dma_data_direction dir)
310 return dev ? dma_map_sg(dev, sg, nents, dir) : fc_map_sg(sg, nents);
314 fc_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
315 enum dma_data_direction dir)
318 dma_unmap_sg(dev, sg, nents, dir);
322 /* *********************** FC-NVME Port Management ************************ */
326 nvmet_fc_alloc_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
328 struct nvmet_fc_ls_iod *iod;
331 iod = kcalloc(NVMET_LS_CTX_COUNT, sizeof(struct nvmet_fc_ls_iod),
338 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
339 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work);
340 iod->tgtport = tgtport;
341 list_add_tail(&iod->ls_list, &tgtport->ls_list);
343 iod->rqstbuf = kcalloc(2, NVME_FC_MAX_LS_BUFFER_SIZE,
348 iod->rspbuf = iod->rqstbuf + NVME_FC_MAX_LS_BUFFER_SIZE;
350 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf,
351 NVME_FC_MAX_LS_BUFFER_SIZE,
353 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma))
361 list_del(&iod->ls_list);
362 for (iod--, i--; i >= 0; iod--, i--) {
363 fc_dma_unmap_single(tgtport->dev, iod->rspdma,
364 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
366 list_del(&iod->ls_list);
375 nvmet_fc_free_ls_iodlist(struct nvmet_fc_tgtport *tgtport)
377 struct nvmet_fc_ls_iod *iod = tgtport->iod;
380 for (i = 0; i < NVMET_LS_CTX_COUNT; iod++, i++) {
381 fc_dma_unmap_single(tgtport->dev,
382 iod->rspdma, NVME_FC_MAX_LS_BUFFER_SIZE,
385 list_del(&iod->ls_list);
390 static struct nvmet_fc_ls_iod *
391 nvmet_fc_alloc_ls_iod(struct nvmet_fc_tgtport *tgtport)
393 struct nvmet_fc_ls_iod *iod;
396 spin_lock_irqsave(&tgtport->lock, flags);
397 iod = list_first_entry_or_null(&tgtport->ls_list,
398 struct nvmet_fc_ls_iod, ls_list);
400 list_move_tail(&iod->ls_list, &tgtport->ls_busylist);
401 spin_unlock_irqrestore(&tgtport->lock, flags);
407 nvmet_fc_free_ls_iod(struct nvmet_fc_tgtport *tgtport,
408 struct nvmet_fc_ls_iod *iod)
412 spin_lock_irqsave(&tgtport->lock, flags);
413 list_move(&iod->ls_list, &tgtport->ls_list);
414 spin_unlock_irqrestore(&tgtport->lock, flags);
418 nvmet_fc_prep_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
419 struct nvmet_fc_tgt_queue *queue)
421 struct nvmet_fc_fcp_iod *fod = queue->fod;
424 for (i = 0; i < queue->sqsize; fod++, i++) {
425 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work);
426 fod->tgtport = tgtport;
430 fod->aborted = false;
432 list_add_tail(&fod->fcp_list, &queue->fod_list);
433 spin_lock_init(&fod->flock);
435 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf,
436 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
437 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) {
438 list_del(&fod->fcp_list);
439 for (fod--, i--; i >= 0; fod--, i--) {
440 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
441 sizeof(fod->rspiubuf),
444 list_del(&fod->fcp_list);
453 nvmet_fc_destroy_fcp_iodlist(struct nvmet_fc_tgtport *tgtport,
454 struct nvmet_fc_tgt_queue *queue)
456 struct nvmet_fc_fcp_iod *fod = queue->fod;
459 for (i = 0; i < queue->sqsize; fod++, i++) {
461 fc_dma_unmap_single(tgtport->dev, fod->rspdma,
462 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
466 static struct nvmet_fc_fcp_iod *
467 nvmet_fc_alloc_fcp_iod(struct nvmet_fc_tgt_queue *queue)
469 struct nvmet_fc_fcp_iod *fod;
471 lockdep_assert_held(&queue->qlock);
473 fod = list_first_entry_or_null(&queue->fod_list,
474 struct nvmet_fc_fcp_iod, fcp_list);
476 list_del(&fod->fcp_list);
479 * no queue reference is taken, as it was taken by the
480 * queue lookup just prior to the allocation. The iod
481 * will "inherit" that reference.
489 nvmet_fc_queue_fcp_req(struct nvmet_fc_tgtport *tgtport,
490 struct nvmet_fc_tgt_queue *queue,
491 struct nvmefc_tgt_fcp_req *fcpreq)
493 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
496 * put all admin cmds on hw queue id 0. All io commands go to
497 * the respective hw queue based on a modulo basis
499 fcpreq->hwqid = queue->qid ?
500 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0;
502 nvmet_fc_handle_fcp_rqst(tgtport, fod);
506 nvmet_fc_fcp_rqst_op_defer_work(struct work_struct *work)
508 struct nvmet_fc_fcp_iod *fod =
509 container_of(work, struct nvmet_fc_fcp_iod, defer_work);
511 /* Submit deferred IO for processing */
512 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq);
517 nvmet_fc_free_fcp_iod(struct nvmet_fc_tgt_queue *queue,
518 struct nvmet_fc_fcp_iod *fod)
520 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
521 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
522 struct nvmet_fc_defer_fcp_req *deferfcp;
525 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma,
526 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
528 fcpreq->nvmet_fc_private = NULL;
532 fod->aborted = false;
533 fod->writedataactive = false;
536 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq);
538 /* release the queue lookup reference on the completed IO */
539 nvmet_fc_tgt_q_put(queue);
541 spin_lock_irqsave(&queue->qlock, flags);
542 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
543 struct nvmet_fc_defer_fcp_req, req_list);
545 list_add_tail(&fod->fcp_list, &fod->queue->fod_list);
546 spin_unlock_irqrestore(&queue->qlock, flags);
550 /* Re-use the fod for the next pending cmd that was deferred */
551 list_del(&deferfcp->req_list);
553 fcpreq = deferfcp->fcp_req;
555 /* deferfcp can be reused for another IO at a later date */
556 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list);
558 spin_unlock_irqrestore(&queue->qlock, flags);
560 /* Save NVME CMD IO in fod */
561 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen);
563 /* Setup new fcpreq to be processed */
564 fcpreq->rspaddr = NULL;
566 fcpreq->nvmet_fc_private = fod;
567 fod->fcpreq = fcpreq;
570 /* inform LLDD IO is now being processed */
571 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq);
574 * Leave the queue lookup get reference taken when
575 * fod was originally allocated.
578 queue_work(queue->work_q, &fod->defer_work);
581 static struct nvmet_fc_tgt_queue *
582 nvmet_fc_alloc_target_queue(struct nvmet_fc_tgt_assoc *assoc,
585 struct nvmet_fc_tgt_queue *queue;
589 if (qid > NVMET_NR_QUEUES)
592 queue = kzalloc(struct_size(queue, fod, sqsize), GFP_KERNEL);
596 if (!nvmet_fc_tgt_a_get(assoc))
599 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0,
600 assoc->tgtport->fc_target_port.port_num,
606 queue->sqsize = sqsize;
607 queue->assoc = assoc;
608 INIT_LIST_HEAD(&queue->fod_list);
609 INIT_LIST_HEAD(&queue->avail_defer_list);
610 INIT_LIST_HEAD(&queue->pending_cmd_list);
611 atomic_set(&queue->connected, 0);
612 atomic_set(&queue->sqtail, 0);
613 atomic_set(&queue->rsn, 1);
614 atomic_set(&queue->zrspcnt, 0);
615 spin_lock_init(&queue->qlock);
616 kref_init(&queue->ref);
618 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue);
620 ret = nvmet_sq_init(&queue->nvme_sq);
622 goto out_fail_iodlist;
624 WARN_ON(assoc->queues[qid]);
625 spin_lock_irqsave(&assoc->tgtport->lock, flags);
626 assoc->queues[qid] = queue;
627 spin_unlock_irqrestore(&assoc->tgtport->lock, flags);
632 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue);
633 destroy_workqueue(queue->work_q);
635 nvmet_fc_tgt_a_put(assoc);
643 nvmet_fc_tgt_queue_free(struct kref *ref)
645 struct nvmet_fc_tgt_queue *queue =
646 container_of(ref, struct nvmet_fc_tgt_queue, ref);
649 spin_lock_irqsave(&queue->assoc->tgtport->lock, flags);
650 queue->assoc->queues[queue->qid] = NULL;
651 spin_unlock_irqrestore(&queue->assoc->tgtport->lock, flags);
653 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue);
655 nvmet_fc_tgt_a_put(queue->assoc);
657 destroy_workqueue(queue->work_q);
663 nvmet_fc_tgt_q_put(struct nvmet_fc_tgt_queue *queue)
665 kref_put(&queue->ref, nvmet_fc_tgt_queue_free);
669 nvmet_fc_tgt_q_get(struct nvmet_fc_tgt_queue *queue)
671 return kref_get_unless_zero(&queue->ref);
676 nvmet_fc_delete_target_queue(struct nvmet_fc_tgt_queue *queue)
678 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport;
679 struct nvmet_fc_fcp_iod *fod = queue->fod;
680 struct nvmet_fc_defer_fcp_req *deferfcp, *tempptr;
682 int i, writedataactive;
685 disconnect = atomic_xchg(&queue->connected, 0);
687 spin_lock_irqsave(&queue->qlock, flags);
688 /* abort outstanding io's */
689 for (i = 0; i < queue->sqsize; fod++, i++) {
691 spin_lock(&fod->flock);
693 writedataactive = fod->writedataactive;
694 spin_unlock(&fod->flock);
696 * only call lldd abort routine if waiting for
697 * writedata. other outstanding ops should finish
700 if (writedataactive) {
701 spin_lock(&fod->flock);
703 spin_unlock(&fod->flock);
704 tgtport->ops->fcp_abort(
705 &tgtport->fc_target_port, fod->fcpreq);
710 /* Cleanup defer'ed IOs in queue */
711 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list,
713 list_del(&deferfcp->req_list);
718 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list,
719 struct nvmet_fc_defer_fcp_req, req_list);
723 list_del(&deferfcp->req_list);
724 spin_unlock_irqrestore(&queue->qlock, flags);
726 tgtport->ops->defer_rcv(&tgtport->fc_target_port,
729 tgtport->ops->fcp_abort(&tgtport->fc_target_port,
732 tgtport->ops->fcp_req_release(&tgtport->fc_target_port,
735 /* release the queue lookup reference */
736 nvmet_fc_tgt_q_put(queue);
740 spin_lock_irqsave(&queue->qlock, flags);
742 spin_unlock_irqrestore(&queue->qlock, flags);
744 flush_workqueue(queue->work_q);
747 nvmet_sq_destroy(&queue->nvme_sq);
749 nvmet_fc_tgt_q_put(queue);
752 static struct nvmet_fc_tgt_queue *
753 nvmet_fc_find_target_queue(struct nvmet_fc_tgtport *tgtport,
756 struct nvmet_fc_tgt_assoc *assoc;
757 struct nvmet_fc_tgt_queue *queue;
758 u64 association_id = nvmet_fc_getassociationid(connection_id);
759 u16 qid = nvmet_fc_getqueueid(connection_id);
762 if (qid > NVMET_NR_QUEUES)
765 spin_lock_irqsave(&tgtport->lock, flags);
766 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
767 if (association_id == assoc->association_id) {
768 queue = assoc->queues[qid];
770 (!atomic_read(&queue->connected) ||
771 !nvmet_fc_tgt_q_get(queue)))
773 spin_unlock_irqrestore(&tgtport->lock, flags);
777 spin_unlock_irqrestore(&tgtport->lock, flags);
782 nvmet_fc_delete_assoc(struct work_struct *work)
784 struct nvmet_fc_tgt_assoc *assoc =
785 container_of(work, struct nvmet_fc_tgt_assoc, del_work);
787 nvmet_fc_delete_target_assoc(assoc);
788 nvmet_fc_tgt_a_put(assoc);
791 static struct nvmet_fc_tgt_assoc *
792 nvmet_fc_alloc_target_assoc(struct nvmet_fc_tgtport *tgtport)
794 struct nvmet_fc_tgt_assoc *assoc, *tmpassoc;
798 bool needrandom = true;
800 assoc = kzalloc(sizeof(*assoc), GFP_KERNEL);
804 idx = ida_simple_get(&tgtport->assoc_cnt, 0, 0, GFP_KERNEL);
808 if (!nvmet_fc_tgtport_get(tgtport))
811 assoc->tgtport = tgtport;
813 INIT_LIST_HEAD(&assoc->a_list);
814 kref_init(&assoc->ref);
815 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc);
818 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID);
819 ran = ran << BYTES_FOR_QID_SHIFT;
821 spin_lock_irqsave(&tgtport->lock, flags);
823 list_for_each_entry(tmpassoc, &tgtport->assoc_list, a_list)
824 if (ran == tmpassoc->association_id) {
829 assoc->association_id = ran;
830 list_add_tail(&assoc->a_list, &tgtport->assoc_list);
832 spin_unlock_irqrestore(&tgtport->lock, flags);
838 ida_simple_remove(&tgtport->assoc_cnt, idx);
845 nvmet_fc_target_assoc_free(struct kref *ref)
847 struct nvmet_fc_tgt_assoc *assoc =
848 container_of(ref, struct nvmet_fc_tgt_assoc, ref);
849 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
852 spin_lock_irqsave(&tgtport->lock, flags);
853 list_del(&assoc->a_list);
854 spin_unlock_irqrestore(&tgtport->lock, flags);
855 ida_simple_remove(&tgtport->assoc_cnt, assoc->a_id);
857 nvmet_fc_tgtport_put(tgtport);
861 nvmet_fc_tgt_a_put(struct nvmet_fc_tgt_assoc *assoc)
863 kref_put(&assoc->ref, nvmet_fc_target_assoc_free);
867 nvmet_fc_tgt_a_get(struct nvmet_fc_tgt_assoc *assoc)
869 return kref_get_unless_zero(&assoc->ref);
873 nvmet_fc_delete_target_assoc(struct nvmet_fc_tgt_assoc *assoc)
875 struct nvmet_fc_tgtport *tgtport = assoc->tgtport;
876 struct nvmet_fc_tgt_queue *queue;
880 spin_lock_irqsave(&tgtport->lock, flags);
881 for (i = NVMET_NR_QUEUES; i >= 0; i--) {
882 queue = assoc->queues[i];
884 if (!nvmet_fc_tgt_q_get(queue))
886 spin_unlock_irqrestore(&tgtport->lock, flags);
887 nvmet_fc_delete_target_queue(queue);
888 nvmet_fc_tgt_q_put(queue);
889 spin_lock_irqsave(&tgtport->lock, flags);
892 spin_unlock_irqrestore(&tgtport->lock, flags);
894 nvmet_fc_tgt_a_put(assoc);
897 static struct nvmet_fc_tgt_assoc *
898 nvmet_fc_find_target_assoc(struct nvmet_fc_tgtport *tgtport,
901 struct nvmet_fc_tgt_assoc *assoc;
902 struct nvmet_fc_tgt_assoc *ret = NULL;
905 spin_lock_irqsave(&tgtport->lock, flags);
906 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
907 if (association_id == assoc->association_id) {
909 nvmet_fc_tgt_a_get(assoc);
913 spin_unlock_irqrestore(&tgtport->lock, flags);
919 nvmet_fc_portentry_bind(struct nvmet_fc_tgtport *tgtport,
920 struct nvmet_fc_port_entry *pe,
921 struct nvmet_port *port)
923 lockdep_assert_held(&nvmet_fc_tgtlock);
925 pe->tgtport = tgtport;
931 pe->node_name = tgtport->fc_target_port.node_name;
932 pe->port_name = tgtport->fc_target_port.port_name;
933 INIT_LIST_HEAD(&pe->pe_list);
935 list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list);
939 nvmet_fc_portentry_unbind(struct nvmet_fc_port_entry *pe)
943 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
945 pe->tgtport->pe = NULL;
946 list_del(&pe->pe_list);
947 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
951 * called when a targetport deregisters. Breaks the relationship
952 * with the nvmet port, but leaves the port_entry in place so that
953 * re-registration can resume operation.
956 nvmet_fc_portentry_unbind_tgt(struct nvmet_fc_tgtport *tgtport)
958 struct nvmet_fc_port_entry *pe;
961 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
966 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
970 * called when a new targetport is registered. Looks in the
971 * existing nvmet port_entries to see if the nvmet layer is
972 * configured for the targetport's wwn's. (the targetport existed,
973 * nvmet configured, the lldd unregistered the tgtport, and is now
974 * reregistering the same targetport). If so, set the nvmet port
975 * port entry on the targetport.
978 nvmet_fc_portentry_rebind_tgt(struct nvmet_fc_tgtport *tgtport)
980 struct nvmet_fc_port_entry *pe;
983 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
984 list_for_each_entry(pe, &nvmet_fc_portentry_list, pe_list) {
985 if (tgtport->fc_target_port.node_name == pe->node_name &&
986 tgtport->fc_target_port.port_name == pe->port_name) {
987 WARN_ON(pe->tgtport);
989 pe->tgtport = tgtport;
993 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
997 * nvme_fc_register_targetport - transport entry point called by an
998 * LLDD to register the existence of a local
999 * NVME subystem FC port.
1000 * @pinfo: pointer to information about the port to be registered
1001 * @template: LLDD entrypoints and operational parameters for the port
1002 * @dev: physical hardware device node port corresponds to. Will be
1003 * used for DMA mappings
1004 * @portptr: pointer to a local port pointer. Upon success, the routine
1005 * will allocate a nvme_fc_local_port structure and place its
1006 * address in the local port pointer. Upon failure, local port
1007 * pointer will be set to NULL.
1010 * a completion status. Must be 0 upon success; a negative errno
1011 * (ex: -ENXIO) upon failure.
1014 nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
1015 struct nvmet_fc_target_template *template,
1017 struct nvmet_fc_target_port **portptr)
1019 struct nvmet_fc_tgtport *newrec;
1020 unsigned long flags;
1023 if (!template->xmt_ls_rsp || !template->fcp_op ||
1024 !template->fcp_abort ||
1025 !template->fcp_req_release || !template->targetport_delete ||
1026 !template->max_hw_queues || !template->max_sgl_segments ||
1027 !template->max_dif_sgl_segments || !template->dma_boundary) {
1029 goto out_regtgt_failed;
1032 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz),
1036 goto out_regtgt_failed;
1039 idx = ida_simple_get(&nvmet_fc_tgtport_cnt, 0, 0, GFP_KERNEL);
1042 goto out_fail_kfree;
1045 if (!get_device(dev) && dev) {
1050 newrec->fc_target_port.node_name = pinfo->node_name;
1051 newrec->fc_target_port.port_name = pinfo->port_name;
1052 newrec->fc_target_port.private = &newrec[1];
1053 newrec->fc_target_port.port_id = pinfo->port_id;
1054 newrec->fc_target_port.port_num = idx;
1055 INIT_LIST_HEAD(&newrec->tgt_list);
1057 newrec->ops = template;
1058 spin_lock_init(&newrec->lock);
1059 INIT_LIST_HEAD(&newrec->ls_list);
1060 INIT_LIST_HEAD(&newrec->ls_busylist);
1061 INIT_LIST_HEAD(&newrec->assoc_list);
1062 kref_init(&newrec->ref);
1063 ida_init(&newrec->assoc_cnt);
1064 newrec->max_sg_cnt = template->max_sgl_segments;
1066 ret = nvmet_fc_alloc_ls_iodlist(newrec);
1069 goto out_free_newrec;
1072 nvmet_fc_portentry_rebind_tgt(newrec);
1074 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1075 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list);
1076 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1078 *portptr = &newrec->fc_target_port;
1084 ida_simple_remove(&nvmet_fc_tgtport_cnt, idx);
1091 EXPORT_SYMBOL_GPL(nvmet_fc_register_targetport);
1095 nvmet_fc_free_tgtport(struct kref *ref)
1097 struct nvmet_fc_tgtport *tgtport =
1098 container_of(ref, struct nvmet_fc_tgtport, ref);
1099 struct device *dev = tgtport->dev;
1100 unsigned long flags;
1102 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1103 list_del(&tgtport->tgt_list);
1104 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1106 nvmet_fc_free_ls_iodlist(tgtport);
1108 /* let the LLDD know we've finished tearing it down */
1109 tgtport->ops->targetport_delete(&tgtport->fc_target_port);
1111 ida_simple_remove(&nvmet_fc_tgtport_cnt,
1112 tgtport->fc_target_port.port_num);
1114 ida_destroy(&tgtport->assoc_cnt);
1122 nvmet_fc_tgtport_put(struct nvmet_fc_tgtport *tgtport)
1124 kref_put(&tgtport->ref, nvmet_fc_free_tgtport);
1128 nvmet_fc_tgtport_get(struct nvmet_fc_tgtport *tgtport)
1130 return kref_get_unless_zero(&tgtport->ref);
1134 __nvmet_fc_free_assocs(struct nvmet_fc_tgtport *tgtport)
1136 struct nvmet_fc_tgt_assoc *assoc, *next;
1137 unsigned long flags;
1139 spin_lock_irqsave(&tgtport->lock, flags);
1140 list_for_each_entry_safe(assoc, next,
1141 &tgtport->assoc_list, a_list) {
1142 if (!nvmet_fc_tgt_a_get(assoc))
1144 if (!schedule_work(&assoc->del_work))
1145 nvmet_fc_tgt_a_put(assoc);
1147 spin_unlock_irqrestore(&tgtport->lock, flags);
1151 * nvmet_fc_invalidate_host - transport entry point called by an LLDD
1152 * to remove references to a hosthandle for LS's.
1154 * The nvmet-fc layer ensures that any references to the hosthandle
1155 * on the targetport are forgotten (set to NULL). The LLDD will
1156 * typically call this when a login with a remote host port has been
1157 * lost, thus LS's for the remote host port are no longer possible.
1159 * If an LS request is outstanding to the targetport/hosthandle (or
1160 * issued concurrently with the call to invalidate the host), the
1161 * LLDD is responsible for terminating/aborting the LS and completing
1162 * the LS request. It is recommended that these terminations/aborts
1163 * occur after calling to invalidate the host handle to avoid additional
1164 * retries by the nvmet-fc transport. The nvmet-fc transport may
1165 * continue to reference host handle while it cleans up outstanding
1166 * NVME associations. The nvmet-fc transport will call the
1167 * ops->host_release() callback to notify the LLDD that all references
1168 * are complete and the related host handle can be recovered.
1169 * Note: if there are no references, the callback may be called before
1170 * the invalidate host call returns.
1172 * @target_port: pointer to the (registered) target port that a prior
1173 * LS was received on and which supplied the transport the
1175 * @hosthandle: the handle (pointer) that represents the host port
1176 * that no longer has connectivity and that LS's should
1177 * no longer be directed to.
1180 nvmet_fc_invalidate_host(struct nvmet_fc_target_port *target_port,
1184 EXPORT_SYMBOL_GPL(nvmet_fc_invalidate_host);
1187 * nvmet layer has called to terminate an association
1190 nvmet_fc_delete_ctrl(struct nvmet_ctrl *ctrl)
1192 struct nvmet_fc_tgtport *tgtport, *next;
1193 struct nvmet_fc_tgt_assoc *assoc;
1194 struct nvmet_fc_tgt_queue *queue;
1195 unsigned long flags;
1196 bool found_ctrl = false;
1198 /* this is a bit ugly, but don't want to make locks layered */
1199 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1200 list_for_each_entry_safe(tgtport, next, &nvmet_fc_target_list,
1202 if (!nvmet_fc_tgtport_get(tgtport))
1204 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1206 spin_lock_irqsave(&tgtport->lock, flags);
1207 list_for_each_entry(assoc, &tgtport->assoc_list, a_list) {
1208 queue = assoc->queues[0];
1209 if (queue && queue->nvme_sq.ctrl == ctrl) {
1210 if (nvmet_fc_tgt_a_get(assoc))
1215 spin_unlock_irqrestore(&tgtport->lock, flags);
1217 nvmet_fc_tgtport_put(tgtport);
1220 if (!schedule_work(&assoc->del_work))
1221 nvmet_fc_tgt_a_put(assoc);
1225 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
1227 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
1231 * nvme_fc_unregister_targetport - transport entry point called by an
1232 * LLDD to deregister/remove a previously
1233 * registered a local NVME subsystem FC port.
1234 * @target_port: pointer to the (registered) target port that is to be
1238 * a completion status. Must be 0 upon success; a negative errno
1239 * (ex: -ENXIO) upon failure.
1242 nvmet_fc_unregister_targetport(struct nvmet_fc_target_port *target_port)
1244 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1246 nvmet_fc_portentry_unbind_tgt(tgtport);
1248 /* terminate any outstanding associations */
1249 __nvmet_fc_free_assocs(tgtport);
1251 nvmet_fc_tgtport_put(tgtport);
1255 EXPORT_SYMBOL_GPL(nvmet_fc_unregister_targetport);
1258 /* *********************** FC-NVME LS Handling **************************** */
1262 nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport,
1263 struct nvmet_fc_ls_iod *iod)
1265 struct fcnvme_ls_cr_assoc_rqst *rqst =
1266 (struct fcnvme_ls_cr_assoc_rqst *)iod->rqstbuf;
1267 struct fcnvme_ls_cr_assoc_acc *acc =
1268 (struct fcnvme_ls_cr_assoc_acc *)iod->rspbuf;
1269 struct nvmet_fc_tgt_queue *queue;
1272 memset(acc, 0, sizeof(*acc));
1275 * FC-NVME spec changes. There are initiators sending different
1276 * lengths as padding sizes for Create Association Cmd descriptor
1278 * Accept anything of "minimum" length. Assume format per 1.15
1279 * spec (with HOSTID reduced to 16 bytes), ignore how long the
1280 * trailing pad length is.
1282 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN)
1283 ret = VERR_CR_ASSOC_LEN;
1284 else if (be32_to_cpu(rqst->desc_list_len) <
1285 FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN)
1286 ret = VERR_CR_ASSOC_RQST_LEN;
1287 else if (rqst->assoc_cmd.desc_tag !=
1288 cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD))
1289 ret = VERR_CR_ASSOC_CMD;
1290 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) <
1291 FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN)
1292 ret = VERR_CR_ASSOC_CMD_LEN;
1293 else if (!rqst->assoc_cmd.ersp_ratio ||
1294 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >=
1295 be16_to_cpu(rqst->assoc_cmd.sqsize)))
1296 ret = VERR_ERSP_RATIO;
1299 /* new association w/ admin queue */
1300 iod->assoc = nvmet_fc_alloc_target_assoc(tgtport);
1302 ret = VERR_ASSOC_ALLOC_FAIL;
1304 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0,
1305 be16_to_cpu(rqst->assoc_cmd.sqsize));
1307 ret = VERR_QUEUE_ALLOC_FAIL;
1312 dev_err(tgtport->dev,
1313 "Create Association LS failed: %s\n",
1314 validation_errors[ret]);
1315 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1316 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1317 FCNVME_RJT_RC_LOGIC,
1318 FCNVME_RJT_EXP_NONE, 0);
1322 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio);
1323 atomic_set(&queue->connected, 1);
1324 queue->sqhd = 0; /* best place to init value */
1326 /* format a response */
1328 iod->lsrsp->rsplen = sizeof(*acc);
1330 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1332 sizeof(struct fcnvme_ls_cr_assoc_acc)),
1333 FCNVME_LS_CREATE_ASSOCIATION);
1334 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID);
1335 acc->associd.desc_len =
1337 sizeof(struct fcnvme_lsdesc_assoc_id));
1338 acc->associd.association_id =
1339 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0));
1340 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1341 acc->connectid.desc_len =
1343 sizeof(struct fcnvme_lsdesc_conn_id));
1344 acc->connectid.connection_id = acc->associd.association_id;
1348 nvmet_fc_ls_create_connection(struct nvmet_fc_tgtport *tgtport,
1349 struct nvmet_fc_ls_iod *iod)
1351 struct fcnvme_ls_cr_conn_rqst *rqst =
1352 (struct fcnvme_ls_cr_conn_rqst *)iod->rqstbuf;
1353 struct fcnvme_ls_cr_conn_acc *acc =
1354 (struct fcnvme_ls_cr_conn_acc *)iod->rspbuf;
1355 struct nvmet_fc_tgt_queue *queue;
1358 memset(acc, 0, sizeof(*acc));
1360 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst))
1361 ret = VERR_CR_CONN_LEN;
1362 else if (rqst->desc_list_len !=
1364 sizeof(struct fcnvme_ls_cr_conn_rqst)))
1365 ret = VERR_CR_CONN_RQST_LEN;
1366 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1367 ret = VERR_ASSOC_ID;
1368 else if (rqst->associd.desc_len !=
1370 sizeof(struct fcnvme_lsdesc_assoc_id)))
1371 ret = VERR_ASSOC_ID_LEN;
1372 else if (rqst->connect_cmd.desc_tag !=
1373 cpu_to_be32(FCNVME_LSDESC_CREATE_CONN_CMD))
1374 ret = VERR_CR_CONN_CMD;
1375 else if (rqst->connect_cmd.desc_len !=
1377 sizeof(struct fcnvme_lsdesc_cr_conn_cmd)))
1378 ret = VERR_CR_CONN_CMD_LEN;
1379 else if (!rqst->connect_cmd.ersp_ratio ||
1380 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >=
1381 be16_to_cpu(rqst->connect_cmd.sqsize)))
1382 ret = VERR_ERSP_RATIO;
1386 iod->assoc = nvmet_fc_find_target_assoc(tgtport,
1387 be64_to_cpu(rqst->associd.association_id));
1389 ret = VERR_NO_ASSOC;
1391 queue = nvmet_fc_alloc_target_queue(iod->assoc,
1392 be16_to_cpu(rqst->connect_cmd.qid),
1393 be16_to_cpu(rqst->connect_cmd.sqsize));
1395 ret = VERR_QUEUE_ALLOC_FAIL;
1397 /* release get taken in nvmet_fc_find_target_assoc */
1398 nvmet_fc_tgt_a_put(iod->assoc);
1403 dev_err(tgtport->dev,
1404 "Create Connection LS failed: %s\n",
1405 validation_errors[ret]);
1406 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1407 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1408 (ret == VERR_NO_ASSOC) ?
1409 FCNVME_RJT_RC_INV_ASSOC :
1410 FCNVME_RJT_RC_LOGIC,
1411 FCNVME_RJT_EXP_NONE, 0);
1415 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio);
1416 atomic_set(&queue->connected, 1);
1417 queue->sqhd = 0; /* best place to init value */
1419 /* format a response */
1421 iod->lsrsp->rsplen = sizeof(*acc);
1423 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1424 fcnvme_lsdesc_len(sizeof(struct fcnvme_ls_cr_conn_acc)),
1425 FCNVME_LS_CREATE_CONNECTION);
1426 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID);
1427 acc->connectid.desc_len =
1429 sizeof(struct fcnvme_lsdesc_conn_id));
1430 acc->connectid.connection_id =
1431 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc,
1432 be16_to_cpu(rqst->connect_cmd.qid)));
1436 nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
1437 struct nvmet_fc_ls_iod *iod)
1439 struct fcnvme_ls_disconnect_assoc_rqst *rqst =
1440 (struct fcnvme_ls_disconnect_assoc_rqst *)iod->rqstbuf;
1441 struct fcnvme_ls_disconnect_assoc_acc *acc =
1442 (struct fcnvme_ls_disconnect_assoc_acc *)iod->rspbuf;
1443 struct nvmet_fc_tgt_assoc *assoc;
1446 memset(acc, 0, sizeof(*acc));
1448 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_disconnect_assoc_rqst))
1449 ret = VERR_DISCONN_LEN;
1450 else if (rqst->desc_list_len !=
1452 sizeof(struct fcnvme_ls_disconnect_assoc_rqst)))
1453 ret = VERR_DISCONN_RQST_LEN;
1454 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID))
1455 ret = VERR_ASSOC_ID;
1456 else if (rqst->associd.desc_len !=
1458 sizeof(struct fcnvme_lsdesc_assoc_id)))
1459 ret = VERR_ASSOC_ID_LEN;
1460 else if (rqst->discon_cmd.desc_tag !=
1461 cpu_to_be32(FCNVME_LSDESC_DISCONN_CMD))
1462 ret = VERR_DISCONN_CMD;
1463 else if (rqst->discon_cmd.desc_len !=
1465 sizeof(struct fcnvme_lsdesc_disconn_cmd)))
1466 ret = VERR_DISCONN_CMD_LEN;
1468 * As the standard changed on the LS, check if old format and scope
1469 * something other than Association (e.g. 0).
1471 else if (rqst->discon_cmd.rsvd8[0])
1472 ret = VERR_DISCONN_SCOPE;
1474 /* match an active association */
1475 assoc = nvmet_fc_find_target_assoc(tgtport,
1476 be64_to_cpu(rqst->associd.association_id));
1479 ret = VERR_NO_ASSOC;
1483 dev_err(tgtport->dev,
1484 "Disconnect LS failed: %s\n",
1485 validation_errors[ret]);
1486 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc,
1487 NVME_FC_MAX_LS_BUFFER_SIZE, rqst->w0.ls_cmd,
1488 (ret == VERR_NO_ASSOC) ?
1489 FCNVME_RJT_RC_INV_ASSOC :
1490 FCNVME_RJT_RC_LOGIC,
1491 FCNVME_RJT_EXP_NONE, 0);
1495 /* format a response */
1497 iod->lsrsp->rsplen = sizeof(*acc);
1499 nvme_fc_format_rsp_hdr(acc, FCNVME_LS_ACC,
1501 sizeof(struct fcnvme_ls_disconnect_assoc_acc)),
1502 FCNVME_LS_DISCONNECT_ASSOC);
1504 /* release get taken in nvmet_fc_find_target_assoc */
1505 nvmet_fc_tgt_a_put(iod->assoc);
1507 nvmet_fc_delete_target_assoc(iod->assoc);
1511 /* *********************** NVME Ctrl Routines **************************** */
1514 static void nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req);
1516 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops;
1519 nvmet_fc_xmt_ls_rsp_done(struct nvmefc_ls_rsp *lsrsp)
1521 struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private;
1522 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1524 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma,
1525 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1526 nvmet_fc_free_ls_iod(tgtport, iod);
1527 nvmet_fc_tgtport_put(tgtport);
1531 nvmet_fc_xmt_ls_rsp(struct nvmet_fc_tgtport *tgtport,
1532 struct nvmet_fc_ls_iod *iod)
1536 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma,
1537 NVME_FC_MAX_LS_BUFFER_SIZE, DMA_TO_DEVICE);
1539 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp);
1541 nvmet_fc_xmt_ls_rsp_done(iod->lsrsp);
1545 * Actual processing routine for received FC-NVME LS Requests from the LLD
1548 nvmet_fc_handle_ls_rqst(struct nvmet_fc_tgtport *tgtport,
1549 struct nvmet_fc_ls_iod *iod)
1551 struct fcnvme_ls_rqst_w0 *w0 =
1552 (struct fcnvme_ls_rqst_w0 *)iod->rqstbuf;
1554 iod->lsrsp->nvme_fc_private = iod;
1555 iod->lsrsp->rspbuf = iod->rspbuf;
1556 iod->lsrsp->rspdma = iod->rspdma;
1557 iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done;
1558 /* Be preventative. handlers will later set to valid length */
1559 iod->lsrsp->rsplen = 0;
1565 * parse request input, execute the request, and format the
1568 switch (w0->ls_cmd) {
1569 case FCNVME_LS_CREATE_ASSOCIATION:
1570 /* Creates Association and initial Admin Queue/Connection */
1571 nvmet_fc_ls_create_association(tgtport, iod);
1573 case FCNVME_LS_CREATE_CONNECTION:
1574 /* Creates an IO Queue/Connection */
1575 nvmet_fc_ls_create_connection(tgtport, iod);
1577 case FCNVME_LS_DISCONNECT_ASSOC:
1578 /* Terminate a Queue/Connection or the Association */
1579 nvmet_fc_ls_disconnect(tgtport, iod);
1582 iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf,
1583 NVME_FC_MAX_LS_BUFFER_SIZE, w0->ls_cmd,
1584 FCNVME_RJT_RC_INVAL, FCNVME_RJT_EXP_NONE, 0);
1587 nvmet_fc_xmt_ls_rsp(tgtport, iod);
1591 * Actual processing routine for received FC-NVME LS Requests from the LLD
1594 nvmet_fc_handle_ls_rqst_work(struct work_struct *work)
1596 struct nvmet_fc_ls_iod *iod =
1597 container_of(work, struct nvmet_fc_ls_iod, work);
1598 struct nvmet_fc_tgtport *tgtport = iod->tgtport;
1600 nvmet_fc_handle_ls_rqst(tgtport, iod);
1605 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
1606 * upon the reception of a NVME LS request.
1608 * The nvmet-fc layer will copy payload to an internal structure for
1609 * processing. As such, upon completion of the routine, the LLDD may
1610 * immediately free/reuse the LS request buffer passed in the call.
1612 * If this routine returns error, the LLDD should abort the exchange.
1614 * @target_port: pointer to the (registered) target port the LS was
1616 * @lsrsp: pointer to a lsrsp structure to be used to reference
1617 * the exchange corresponding to the LS.
1618 * @lsreqbuf: pointer to the buffer containing the LS Request
1619 * @lsreqbuf_len: length, in bytes, of the received LS request
1622 nvmet_fc_rcv_ls_req(struct nvmet_fc_target_port *target_port,
1624 struct nvmefc_ls_rsp *lsrsp,
1625 void *lsreqbuf, u32 lsreqbuf_len)
1627 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
1628 struct nvmet_fc_ls_iod *iod;
1630 if (lsreqbuf_len > NVME_FC_MAX_LS_BUFFER_SIZE)
1633 if (!nvmet_fc_tgtport_get(tgtport))
1636 iod = nvmet_fc_alloc_ls_iod(tgtport);
1638 nvmet_fc_tgtport_put(tgtport);
1644 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len);
1645 iod->rqstdatalen = lsreqbuf_len;
1647 schedule_work(&iod->work);
1651 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_ls_req);
1655 * **********************
1656 * Start of FCP handling
1657 * **********************
1661 nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1663 struct scatterlist *sg;
1666 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent);
1671 fod->data_sg_cnt = nent;
1672 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent,
1673 ((fod->io_dir == NVMET_FCP_WRITE) ?
1674 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1675 /* note: write from initiator perspective */
1676 fod->next_sg = fod->data_sg;
1681 return NVME_SC_INTERNAL;
1685 nvmet_fc_free_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1687 if (!fod->data_sg || !fod->data_sg_cnt)
1690 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt,
1691 ((fod->io_dir == NVMET_FCP_WRITE) ?
1692 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1693 sgl_free(fod->data_sg);
1694 fod->data_sg = NULL;
1695 fod->data_sg_cnt = 0;
1700 queue_90percent_full(struct nvmet_fc_tgt_queue *q, u32 sqhd)
1704 /* egad, this is ugly. And sqtail is just a best guess */
1705 sqtail = atomic_read(&q->sqtail) % q->sqsize;
1707 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd);
1708 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9)));
1713 * May be a NVMET_FCOP_RSP or NVMET_FCOP_READDATA_RSP op
1716 nvmet_fc_prep_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1717 struct nvmet_fc_fcp_iod *fod)
1719 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf;
1720 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
1721 struct nvme_completion *cqe = &ersp->cqe;
1722 u32 *cqewd = (u32 *)cqe;
1723 bool send_ersp = false;
1724 u32 rsn, rspcnt, xfr_length;
1726 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP)
1727 xfr_length = fod->req.transfer_len;
1729 xfr_length = fod->offset;
1732 * check to see if we can send a 0's rsp.
1733 * Note: to send a 0's response, the NVME-FC host transport will
1734 * recreate the CQE. The host transport knows: sq id, SQHD (last
1735 * seen in an ersp), and command_id. Thus it will create a
1736 * zero-filled CQE with those known fields filled in. Transport
1737 * must send an ersp for any condition where the cqe won't match
1740 * Here are the FC-NVME mandated cases where we must send an ersp:
1741 * every N responses, where N=ersp_ratio
1742 * force fabric commands to send ersp's (not in FC-NVME but good
1744 * normal cmds: any time status is non-zero, or status is zero
1745 * but words 0 or 1 are non-zero.
1746 * the SQ is 90% or more full
1747 * the cmd is a fused command
1748 * transferred data length not equal to cmd iu length
1750 rspcnt = atomic_inc_return(&fod->queue->zrspcnt);
1751 if (!(rspcnt % fod->queue->ersp_ratio) ||
1752 nvme_is_fabrics((struct nvme_command *) sqe) ||
1753 xfr_length != fod->req.transfer_len ||
1754 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] ||
1755 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) ||
1756 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head)))
1759 /* re-set the fields */
1760 fod->fcpreq->rspaddr = ersp;
1761 fod->fcpreq->rspdma = fod->rspdma;
1764 memset(ersp, 0, NVME_FC_SIZEOF_ZEROS_RSP);
1765 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP;
1767 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32));
1768 rsn = atomic_inc_return(&fod->queue->rsn);
1769 ersp->rsn = cpu_to_be32(rsn);
1770 ersp->xfrd_len = cpu_to_be32(xfr_length);
1771 fod->fcpreq->rsplen = sizeof(*ersp);
1774 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma,
1775 sizeof(fod->rspiubuf), DMA_TO_DEVICE);
1778 static void nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq);
1781 nvmet_fc_abort_op(struct nvmet_fc_tgtport *tgtport,
1782 struct nvmet_fc_fcp_iod *fod)
1784 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1786 /* data no longer needed */
1787 nvmet_fc_free_tgt_pgs(fod);
1790 * if an ABTS was received or we issued the fcp_abort early
1791 * don't call abort routine again.
1793 /* no need to take lock - lock was taken earlier to get here */
1795 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq);
1797 nvmet_fc_free_fcp_iod(fod->queue, fod);
1801 nvmet_fc_xmt_fcp_rsp(struct nvmet_fc_tgtport *tgtport,
1802 struct nvmet_fc_fcp_iod *fod)
1806 fod->fcpreq->op = NVMET_FCOP_RSP;
1807 fod->fcpreq->timeout = 0;
1809 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1811 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1813 nvmet_fc_abort_op(tgtport, fod);
1817 nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1818 struct nvmet_fc_fcp_iod *fod, u8 op)
1820 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1821 struct scatterlist *sg = fod->next_sg;
1822 unsigned long flags;
1823 u32 remaininglen = fod->req.transfer_len - fod->offset;
1828 fcpreq->offset = fod->offset;
1829 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1832 * for next sequence:
1833 * break at a sg element boundary
1834 * attempt to keep sequence length capped at
1835 * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
1836 * be longer if a single sg element is larger
1837 * than that amount. This is done to avoid creating
1838 * a new sg list to use for the tgtport api.
1842 while (tlen < remaininglen &&
1843 fcpreq->sg_cnt < tgtport->max_sg_cnt &&
1844 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
1846 tlen += sg_dma_len(sg);
1849 if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
1851 tlen += min_t(u32, sg_dma_len(sg), remaininglen);
1854 if (tlen < remaininglen)
1857 fod->next_sg = NULL;
1859 fcpreq->transfer_length = tlen;
1860 fcpreq->transferred_length = 0;
1861 fcpreq->fcp_error = 0;
1865 * If the last READDATA request: check if LLDD supports
1866 * combined xfr with response.
1868 if ((op == NVMET_FCOP_READDATA) &&
1869 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) &&
1870 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) {
1871 fcpreq->op = NVMET_FCOP_READDATA_RSP;
1872 nvmet_fc_prep_fcp_rsp(tgtport, fod);
1875 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq);
1878 * should be ok to set w/o lock as its in the thread of
1879 * execution (not an async timer routine) and doesn't
1880 * contend with any clearing action
1884 if (op == NVMET_FCOP_WRITEDATA) {
1885 spin_lock_irqsave(&fod->flock, flags);
1886 fod->writedataactive = false;
1887 spin_unlock_irqrestore(&fod->flock, flags);
1888 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1889 } else /* NVMET_FCOP_READDATA or NVMET_FCOP_READDATA_RSP */ {
1890 fcpreq->fcp_error = ret;
1891 fcpreq->transferred_length = 0;
1892 nvmet_fc_xmt_fcp_op_done(fod->fcpreq);
1898 __nvmet_fc_fod_op_abort(struct nvmet_fc_fcp_iod *fod, bool abort)
1900 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1901 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1903 /* if in the middle of an io and we need to tear down */
1905 if (fcpreq->op == NVMET_FCOP_WRITEDATA) {
1906 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1910 nvmet_fc_abort_op(tgtport, fod);
1918 * actual done handler for FCP operations when completed by the lldd
1921 nvmet_fc_fod_op_done(struct nvmet_fc_fcp_iod *fod)
1923 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1924 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
1925 unsigned long flags;
1928 spin_lock_irqsave(&fod->flock, flags);
1930 fod->writedataactive = false;
1931 spin_unlock_irqrestore(&fod->flock, flags);
1933 switch (fcpreq->op) {
1935 case NVMET_FCOP_WRITEDATA:
1936 if (__nvmet_fc_fod_op_abort(fod, abort))
1938 if (fcpreq->fcp_error ||
1939 fcpreq->transferred_length != fcpreq->transfer_length) {
1940 spin_lock(&fod->flock);
1942 spin_unlock(&fod->flock);
1944 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL);
1948 fod->offset += fcpreq->transferred_length;
1949 if (fod->offset != fod->req.transfer_len) {
1950 spin_lock_irqsave(&fod->flock, flags);
1951 fod->writedataactive = true;
1952 spin_unlock_irqrestore(&fod->flock, flags);
1954 /* transfer the next chunk */
1955 nvmet_fc_transfer_fcp_data(tgtport, fod,
1956 NVMET_FCOP_WRITEDATA);
1960 /* data transfer complete, resume with nvmet layer */
1961 fod->req.execute(&fod->req);
1964 case NVMET_FCOP_READDATA:
1965 case NVMET_FCOP_READDATA_RSP:
1966 if (__nvmet_fc_fod_op_abort(fod, abort))
1968 if (fcpreq->fcp_error ||
1969 fcpreq->transferred_length != fcpreq->transfer_length) {
1970 nvmet_fc_abort_op(tgtport, fod);
1976 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) {
1977 /* data no longer needed */
1978 nvmet_fc_free_tgt_pgs(fod);
1979 nvmet_fc_free_fcp_iod(fod->queue, fod);
1983 fod->offset += fcpreq->transferred_length;
1984 if (fod->offset != fod->req.transfer_len) {
1985 /* transfer the next chunk */
1986 nvmet_fc_transfer_fcp_data(tgtport, fod,
1987 NVMET_FCOP_READDATA);
1991 /* data transfer complete, send response */
1993 /* data no longer needed */
1994 nvmet_fc_free_tgt_pgs(fod);
1996 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2000 case NVMET_FCOP_RSP:
2001 if (__nvmet_fc_fod_op_abort(fod, abort))
2003 nvmet_fc_free_fcp_iod(fod->queue, fod);
2012 nvmet_fc_xmt_fcp_op_done(struct nvmefc_tgt_fcp_req *fcpreq)
2014 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2016 nvmet_fc_fod_op_done(fod);
2020 * actual completion handler after execution by the nvmet layer
2023 __nvmet_fc_fcp_nvme_cmd_done(struct nvmet_fc_tgtport *tgtport,
2024 struct nvmet_fc_fcp_iod *fod, int status)
2026 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common;
2027 struct nvme_completion *cqe = &fod->rspiubuf.cqe;
2028 unsigned long flags;
2031 spin_lock_irqsave(&fod->flock, flags);
2033 spin_unlock_irqrestore(&fod->flock, flags);
2035 /* if we have a CQE, snoop the last sq_head value */
2037 fod->queue->sqhd = cqe->sq_head;
2040 nvmet_fc_abort_op(tgtport, fod);
2044 /* if an error handling the cmd post initial parsing */
2046 /* fudge up a failed CQE status for our transport error */
2047 memset(cqe, 0, sizeof(*cqe));
2048 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */
2049 cqe->sq_id = cpu_to_le16(fod->queue->qid);
2050 cqe->command_id = sqe->command_id;
2051 cqe->status = cpu_to_le16(status);
2055 * try to push the data even if the SQE status is non-zero.
2056 * There may be a status where data still was intended to
2059 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) {
2060 /* push the data over before sending rsp */
2061 nvmet_fc_transfer_fcp_data(tgtport, fod,
2062 NVMET_FCOP_READDATA);
2066 /* writes & no data - fall thru */
2069 /* data no longer needed */
2070 nvmet_fc_free_tgt_pgs(fod);
2072 nvmet_fc_xmt_fcp_rsp(tgtport, fod);
2077 nvmet_fc_fcp_nvme_cmd_done(struct nvmet_req *nvme_req)
2079 struct nvmet_fc_fcp_iod *fod = nvmet_req_to_fod(nvme_req);
2080 struct nvmet_fc_tgtport *tgtport = fod->tgtport;
2082 __nvmet_fc_fcp_nvme_cmd_done(tgtport, fod, 0);
2087 * Actual processing routine for received FC-NVME I/O Requests from the LLD
2090 nvmet_fc_handle_fcp_rqst(struct nvmet_fc_tgtport *tgtport,
2091 struct nvmet_fc_fcp_iod *fod)
2093 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf;
2094 u32 xfrlen = be32_to_cpu(cmdiu->data_len);
2098 * if there is no nvmet mapping to the targetport there
2099 * shouldn't be requests. just terminate them.
2102 goto transport_error;
2105 * Fused commands are currently not supported in the linux
2108 * As such, the implementation of the FC transport does not
2109 * look at the fused commands and order delivery to the upper
2110 * layer until we have both based on csn.
2113 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done;
2115 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) {
2116 fod->io_dir = NVMET_FCP_WRITE;
2117 if (!nvme_is_write(&cmdiu->sqe))
2118 goto transport_error;
2119 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) {
2120 fod->io_dir = NVMET_FCP_READ;
2121 if (nvme_is_write(&cmdiu->sqe))
2122 goto transport_error;
2124 fod->io_dir = NVMET_FCP_NODATA;
2126 goto transport_error;
2129 fod->req.cmd = &fod->cmdiubuf.sqe;
2130 fod->req.cqe = &fod->rspiubuf.cqe;
2131 fod->req.port = tgtport->pe->port;
2133 /* clear any response payload */
2134 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf));
2136 fod->data_sg = NULL;
2137 fod->data_sg_cnt = 0;
2139 ret = nvmet_req_init(&fod->req,
2140 &fod->queue->nvme_cq,
2141 &fod->queue->nvme_sq,
2142 &nvmet_fc_tgt_fcp_ops);
2144 /* bad SQE content or invalid ctrl state */
2145 /* nvmet layer has already called op done to send rsp. */
2149 fod->req.transfer_len = xfrlen;
2151 /* keep a running counter of tail position */
2152 atomic_inc(&fod->queue->sqtail);
2154 if (fod->req.transfer_len) {
2155 ret = nvmet_fc_alloc_tgt_pgs(fod);
2157 nvmet_req_complete(&fod->req, ret);
2161 fod->req.sg = fod->data_sg;
2162 fod->req.sg_cnt = fod->data_sg_cnt;
2165 if (fod->io_dir == NVMET_FCP_WRITE) {
2166 /* pull the data over before invoking nvmet layer */
2167 nvmet_fc_transfer_fcp_data(tgtport, fod, NVMET_FCOP_WRITEDATA);
2174 * can invoke the nvmet_layer now. If read data, cmd completion will
2177 fod->req.execute(&fod->req);
2181 nvmet_fc_abort_op(tgtport, fod);
2185 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2186 * upon the reception of a NVME FCP CMD IU.
2188 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2189 * layer for processing.
2191 * The nvmet_fc layer allocates a local job structure (struct
2192 * nvmet_fc_fcp_iod) from the queue for the io and copies the
2193 * CMD IU buffer to the job structure. As such, on a successful
2194 * completion (returns 0), the LLDD may immediately free/reuse
2195 * the CMD IU buffer passed in the call.
2197 * However, in some circumstances, due to the packetized nature of FC
2198 * and the api of the FC LLDD which may issue a hw command to send the
2199 * response, but the LLDD may not get the hw completion for that command
2200 * and upcall the nvmet_fc layer before a new command may be
2201 * asynchronously received - its possible for a command to be received
2202 * before the LLDD and nvmet_fc have recycled the job structure. It gives
2203 * the appearance of more commands received than fits in the sq.
2204 * To alleviate this scenario, a temporary queue is maintained in the
2205 * transport for pending LLDD requests waiting for a queue job structure.
2206 * In these "overrun" cases, a temporary queue element is allocated
2207 * the LLDD request and CMD iu buffer information remembered, and the
2208 * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2209 * structure is freed, it is immediately reallocated for anything on the
2210 * pending request list. The LLDDs defer_rcv() callback is called,
2211 * informing the LLDD that it may reuse the CMD IU buffer, and the io
2212 * is then started normally with the transport.
2214 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2215 * the completion as successful but must not reuse the CMD IU buffer
2216 * until the LLDD's defer_rcv() callback has been called for the
2217 * corresponding struct nvmefc_tgt_fcp_req pointer.
2219 * If there is any other condition in which an error occurs, the
2220 * transport will return a non-zero status indicating the error.
2221 * In all cases other than -EOVERFLOW, the transport has not accepted the
2222 * request and the LLDD should abort the exchange.
2224 * @target_port: pointer to the (registered) target port the FCP CMD IU
2226 * @fcpreq: pointer to a fcpreq request structure to be used to reference
2227 * the exchange corresponding to the FCP Exchange.
2228 * @cmdiubuf: pointer to the buffer containing the FCP CMD IU
2229 * @cmdiubuf_len: length, in bytes, of the received FCP CMD IU
2232 nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *target_port,
2233 struct nvmefc_tgt_fcp_req *fcpreq,
2234 void *cmdiubuf, u32 cmdiubuf_len)
2236 struct nvmet_fc_tgtport *tgtport = targetport_to_tgtport(target_port);
2237 struct nvme_fc_cmd_iu *cmdiu = cmdiubuf;
2238 struct nvmet_fc_tgt_queue *queue;
2239 struct nvmet_fc_fcp_iod *fod;
2240 struct nvmet_fc_defer_fcp_req *deferfcp;
2241 unsigned long flags;
2243 /* validate iu, so the connection id can be used to find the queue */
2244 if ((cmdiubuf_len != sizeof(*cmdiu)) ||
2245 (cmdiu->format_id != NVME_CMD_FORMAT_ID) ||
2246 (cmdiu->fc_id != NVME_CMD_FC_ID) ||
2247 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4)))
2250 queue = nvmet_fc_find_target_queue(tgtport,
2251 be64_to_cpu(cmdiu->connection_id));
2256 * note: reference taken by find_target_queue
2257 * After successful fod allocation, the fod will inherit the
2258 * ownership of that reference and will remove the reference
2259 * when the fod is freed.
2262 spin_lock_irqsave(&queue->qlock, flags);
2264 fod = nvmet_fc_alloc_fcp_iod(queue);
2266 spin_unlock_irqrestore(&queue->qlock, flags);
2268 fcpreq->nvmet_fc_private = fod;
2269 fod->fcpreq = fcpreq;
2271 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len);
2273 nvmet_fc_queue_fcp_req(tgtport, queue, fcpreq);
2278 if (!tgtport->ops->defer_rcv) {
2279 spin_unlock_irqrestore(&queue->qlock, flags);
2280 /* release the queue lookup reference */
2281 nvmet_fc_tgt_q_put(queue);
2285 deferfcp = list_first_entry_or_null(&queue->avail_defer_list,
2286 struct nvmet_fc_defer_fcp_req, req_list);
2288 /* Just re-use one that was previously allocated */
2289 list_del(&deferfcp->req_list);
2291 spin_unlock_irqrestore(&queue->qlock, flags);
2293 /* Now we need to dynamically allocate one */
2294 deferfcp = kmalloc(sizeof(*deferfcp), GFP_KERNEL);
2296 /* release the queue lookup reference */
2297 nvmet_fc_tgt_q_put(queue);
2300 spin_lock_irqsave(&queue->qlock, flags);
2303 /* For now, use rspaddr / rsplen to save payload information */
2304 fcpreq->rspaddr = cmdiubuf;
2305 fcpreq->rsplen = cmdiubuf_len;
2306 deferfcp->fcp_req = fcpreq;
2308 /* defer processing till a fod becomes available */
2309 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list);
2311 /* NOTE: the queue lookup reference is still valid */
2313 spin_unlock_irqrestore(&queue->qlock, flags);
2317 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_req);
2320 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2321 * upon the reception of an ABTS for a FCP command
2323 * Notify the transport that an ABTS has been received for a FCP command
2324 * that had been given to the transport via nvmet_fc_rcv_fcp_req(). The
2325 * LLDD believes the command is still being worked on
2326 * (template_ops->fcp_req_release() has not been called).
2328 * The transport will wait for any outstanding work (an op to the LLDD,
2329 * which the lldd should complete with error due to the ABTS; or the
2330 * completion from the nvmet layer of the nvme command), then will
2331 * stop processing and call the nvmet_fc_rcv_fcp_req() callback to
2332 * return the i/o context to the LLDD. The LLDD may send the BA_ACC
2333 * to the ABTS either after return from this function (assuming any
2334 * outstanding op work has been terminated) or upon the callback being
2337 * @target_port: pointer to the (registered) target port the FCP CMD IU
2339 * @fcpreq: pointer to the fcpreq request structure that corresponds
2340 * to the exchange that received the ABTS.
2343 nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *target_port,
2344 struct nvmefc_tgt_fcp_req *fcpreq)
2346 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private;
2347 struct nvmet_fc_tgt_queue *queue;
2348 unsigned long flags;
2350 if (!fod || fod->fcpreq != fcpreq)
2351 /* job appears to have already completed, ignore abort */
2356 spin_lock_irqsave(&queue->qlock, flags);
2359 * mark as abort. The abort handler, invoked upon completion
2360 * of any work, will detect the aborted status and do the
2363 spin_lock(&fod->flock);
2365 fod->aborted = true;
2366 spin_unlock(&fod->flock);
2368 spin_unlock_irqrestore(&queue->qlock, flags);
2370 EXPORT_SYMBOL_GPL(nvmet_fc_rcv_fcp_abort);
2373 struct nvmet_fc_traddr {
2379 __nvme_fc_parse_u64(substring_t *sstr, u64 *val)
2383 if (match_u64(sstr, &token64))
2391 * This routine validates and extracts the WWN's from the TRADDR string.
2392 * As kernel parsers need the 0x to determine number base, universally
2393 * build string to parse with 0x prefix before parsing name strings.
2396 nvme_fc_parse_traddr(struct nvmet_fc_traddr *traddr, char *buf, size_t blen)
2398 char name[2 + NVME_FC_TRADDR_HEXNAMELEN + 1];
2399 substring_t wwn = { name, &name[sizeof(name)-1] };
2400 int nnoffset, pnoffset;
2402 /* validate if string is one of the 2 allowed formats */
2403 if (strnlen(buf, blen) == NVME_FC_TRADDR_MAXLENGTH &&
2404 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) &&
2405 !strncmp(&buf[NVME_FC_TRADDR_MAX_PN_OFFSET],
2406 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) {
2407 nnoffset = NVME_FC_TRADDR_OXNNLEN;
2408 pnoffset = NVME_FC_TRADDR_MAX_PN_OFFSET +
2409 NVME_FC_TRADDR_OXNNLEN;
2410 } else if ((strnlen(buf, blen) == NVME_FC_TRADDR_MINLENGTH &&
2411 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) &&
2412 !strncmp(&buf[NVME_FC_TRADDR_MIN_PN_OFFSET],
2413 "pn-", NVME_FC_TRADDR_NNLEN))) {
2414 nnoffset = NVME_FC_TRADDR_NNLEN;
2415 pnoffset = NVME_FC_TRADDR_MIN_PN_OFFSET + NVME_FC_TRADDR_NNLEN;
2421 name[2 + NVME_FC_TRADDR_HEXNAMELEN] = 0;
2423 memcpy(&name[2], &buf[nnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2424 if (__nvme_fc_parse_u64(&wwn, &traddr->nn))
2427 memcpy(&name[2], &buf[pnoffset], NVME_FC_TRADDR_HEXNAMELEN);
2428 if (__nvme_fc_parse_u64(&wwn, &traddr->pn))
2434 pr_warn("%s: bad traddr string\n", __func__);
2439 nvmet_fc_add_port(struct nvmet_port *port)
2441 struct nvmet_fc_tgtport *tgtport;
2442 struct nvmet_fc_port_entry *pe;
2443 struct nvmet_fc_traddr traddr = { 0L, 0L };
2444 unsigned long flags;
2447 /* validate the address info */
2448 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) ||
2449 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC))
2452 /* map the traddr address info to a target port */
2454 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr,
2455 sizeof(port->disc_addr.traddr));
2459 pe = kzalloc(sizeof(*pe), GFP_KERNEL);
2464 spin_lock_irqsave(&nvmet_fc_tgtlock, flags);
2465 list_for_each_entry(tgtport, &nvmet_fc_target_list, tgt_list) {
2466 if ((tgtport->fc_target_port.node_name == traddr.nn) &&
2467 (tgtport->fc_target_port.port_name == traddr.pn)) {
2468 /* a FC port can only be 1 nvmet port id */
2470 nvmet_fc_portentry_bind(tgtport, pe, port);
2477 spin_unlock_irqrestore(&nvmet_fc_tgtlock, flags);
2486 nvmet_fc_remove_port(struct nvmet_port *port)
2488 struct nvmet_fc_port_entry *pe = port->priv;
2490 nvmet_fc_portentry_unbind(pe);
2496 nvmet_fc_discovery_chg(struct nvmet_port *port)
2498 struct nvmet_fc_port_entry *pe = port->priv;
2499 struct nvmet_fc_tgtport *tgtport = pe->tgtport;
2501 if (tgtport && tgtport->ops->discovery_event)
2502 tgtport->ops->discovery_event(&tgtport->fc_target_port);
2505 static const struct nvmet_fabrics_ops nvmet_fc_tgt_fcp_ops = {
2506 .owner = THIS_MODULE,
2507 .type = NVMF_TRTYPE_FC,
2509 .add_port = nvmet_fc_add_port,
2510 .remove_port = nvmet_fc_remove_port,
2511 .queue_response = nvmet_fc_fcp_nvme_cmd_done,
2512 .delete_ctrl = nvmet_fc_delete_ctrl,
2513 .discovery_chg = nvmet_fc_discovery_chg,
2516 static int __init nvmet_fc_init_module(void)
2518 return nvmet_register_transport(&nvmet_fc_tgt_fcp_ops);
2521 static void __exit nvmet_fc_exit_module(void)
2523 /* sanity check - all lports should be removed */
2524 if (!list_empty(&nvmet_fc_target_list))
2525 pr_warn("%s: targetport list not empty\n", __func__);
2527 nvmet_unregister_transport(&nvmet_fc_tgt_fcp_ops);
2529 ida_destroy(&nvmet_fc_tgtport_cnt);
2532 module_init(nvmet_fc_init_module);
2533 module_exit(nvmet_fc_exit_module);
2535 MODULE_LICENSE("GPL v2");