1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2016 Avago Technologies. All rights reserved.
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/module.h>
7 #include <linux/parser.h>
8 #include <uapi/scsi/fc/fc_fs.h>
10 #include "../host/nvme.h"
11 #include "../target/nvmet.h"
12 #include <linux/nvme-fc-driver.h>
13 #include <linux/nvme-fc.h>
18 NVMF_OPT_WWNN = 1 << 0,
19 NVMF_OPT_WWPN = 1 << 1,
20 NVMF_OPT_ROLES = 1 << 2,
21 NVMF_OPT_FCADDR = 1 << 3,
22 NVMF_OPT_LPWWNN = 1 << 4,
23 NVMF_OPT_LPWWPN = 1 << 5,
26 struct fcloop_ctrl_options {
36 static const match_table_t opt_tokens = {
37 { NVMF_OPT_WWNN, "wwnn=%s" },
38 { NVMF_OPT_WWPN, "wwpn=%s" },
39 { NVMF_OPT_ROLES, "roles=%d" },
40 { NVMF_OPT_FCADDR, "fcaddr=%x" },
41 { NVMF_OPT_LPWWNN, "lpwwnn=%s" },
42 { NVMF_OPT_LPWWPN, "lpwwpn=%s" },
43 { NVMF_OPT_ERR, NULL }
47 fcloop_parse_options(struct fcloop_ctrl_options *opts,
50 substring_t args[MAX_OPT_ARGS];
51 char *options, *o, *p;
55 options = o = kstrdup(buf, GFP_KERNEL);
59 while ((p = strsep(&o, ",\n")) != NULL) {
63 token = match_token(p, opt_tokens, args);
67 if (match_u64(args, &token64)) {
69 goto out_free_options;
74 if (match_u64(args, &token64)) {
76 goto out_free_options;
81 if (match_int(args, &token)) {
83 goto out_free_options;
88 if (match_hex(args, &token)) {
90 goto out_free_options;
95 if (match_u64(args, &token64)) {
97 goto out_free_options;
99 opts->lpwwnn = token64;
101 case NVMF_OPT_LPWWPN:
102 if (match_u64(args, &token64)) {
104 goto out_free_options;
106 opts->lpwwpn = token64;
109 pr_warn("unknown parameter or missing value '%s'\n", p);
111 goto out_free_options;
122 fcloop_parse_nm_options(struct device *dev, u64 *nname, u64 *pname,
125 substring_t args[MAX_OPT_ARGS];
126 char *options, *o, *p;
133 options = o = kstrdup(buf, GFP_KERNEL);
137 while ((p = strsep(&o, ",\n")) != NULL) {
141 token = match_token(p, opt_tokens, args);
144 if (match_u64(args, &token64)) {
146 goto out_free_options;
151 if (match_u64(args, &token64)) {
153 goto out_free_options;
158 pr_warn("unknown parameter or missing value '%s'\n", p);
160 goto out_free_options;
178 #define LPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
180 #define RPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN | \
181 NVMF_OPT_LPWWNN | NVMF_OPT_LPWWPN)
183 #define TGTPORT_OPTS (NVMF_OPT_WWNN | NVMF_OPT_WWPN)
186 static DEFINE_SPINLOCK(fcloop_lock);
187 static LIST_HEAD(fcloop_lports);
188 static LIST_HEAD(fcloop_nports);
190 struct fcloop_lport {
191 struct nvme_fc_local_port *localport;
192 struct list_head lport_list;
193 struct completion unreg_done;
196 struct fcloop_lport_priv {
197 struct fcloop_lport *lport;
200 struct fcloop_rport {
201 struct nvme_fc_remote_port *remoteport;
202 struct nvmet_fc_target_port *targetport;
203 struct fcloop_nport *nport;
204 struct fcloop_lport *lport;
207 struct fcloop_tport {
208 struct nvmet_fc_target_port *targetport;
209 struct nvme_fc_remote_port *remoteport;
210 struct fcloop_nport *nport;
211 struct fcloop_lport *lport;
214 struct fcloop_nport {
215 struct fcloop_rport *rport;
216 struct fcloop_tport *tport;
217 struct fcloop_lport *lport;
218 struct list_head nport_list;
226 struct fcloop_lsreq {
227 struct fcloop_tport *tport;
228 struct nvmefc_ls_req *lsreq;
229 struct work_struct work;
230 struct nvmefc_tgt_ls_req tgt_ls_req;
235 struct fcloop_tport *tport;
236 struct work_struct work;
243 INI_IO_COMPLETED = 3,
246 struct fcloop_fcpreq {
247 struct fcloop_tport *tport;
248 struct nvmefc_fcp_req *fcpreq;
255 struct work_struct fcp_rcv_work;
256 struct work_struct abort_rcv_work;
257 struct work_struct tio_done_work;
258 struct nvmefc_tgt_fcp_req tgt_fcp_req;
261 struct fcloop_ini_fcpreq {
262 struct nvmefc_fcp_req *fcpreq;
263 struct fcloop_fcpreq *tfcp_req;
267 static inline struct fcloop_lsreq *
268 tgt_ls_req_to_lsreq(struct nvmefc_tgt_ls_req *tgt_lsreq)
270 return container_of(tgt_lsreq, struct fcloop_lsreq, tgt_ls_req);
273 static inline struct fcloop_fcpreq *
274 tgt_fcp_req_to_fcpreq(struct nvmefc_tgt_fcp_req *tgt_fcpreq)
276 return container_of(tgt_fcpreq, struct fcloop_fcpreq, tgt_fcp_req);
281 fcloop_create_queue(struct nvme_fc_local_port *localport,
282 unsigned int qidx, u16 qsize,
290 fcloop_delete_queue(struct nvme_fc_local_port *localport,
291 unsigned int idx, void *handle)
297 * Transmit of LS RSP done (e.g. buffers all set). call back up
298 * initiator "done" flows.
301 fcloop_tgt_lsrqst_done_work(struct work_struct *work)
303 struct fcloop_lsreq *tls_req =
304 container_of(work, struct fcloop_lsreq, work);
305 struct fcloop_tport *tport = tls_req->tport;
306 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
308 if (!tport || tport->remoteport)
309 lsreq->done(lsreq, tls_req->status);
313 fcloop_ls_req(struct nvme_fc_local_port *localport,
314 struct nvme_fc_remote_port *remoteport,
315 struct nvmefc_ls_req *lsreq)
317 struct fcloop_lsreq *tls_req = lsreq->private;
318 struct fcloop_rport *rport = remoteport->private;
321 tls_req->lsreq = lsreq;
322 INIT_WORK(&tls_req->work, fcloop_tgt_lsrqst_done_work);
324 if (!rport->targetport) {
325 tls_req->status = -ECONNREFUSED;
326 tls_req->tport = NULL;
327 schedule_work(&tls_req->work);
332 tls_req->tport = rport->targetport->private;
333 ret = nvmet_fc_rcv_ls_req(rport->targetport, &tls_req->tgt_ls_req,
334 lsreq->rqstaddr, lsreq->rqstlen);
340 fcloop_xmt_ls_rsp(struct nvmet_fc_target_port *tport,
341 struct nvmefc_tgt_ls_req *tgt_lsreq)
343 struct fcloop_lsreq *tls_req = tgt_ls_req_to_lsreq(tgt_lsreq);
344 struct nvmefc_ls_req *lsreq = tls_req->lsreq;
346 memcpy(lsreq->rspaddr, tgt_lsreq->rspbuf,
347 ((lsreq->rsplen < tgt_lsreq->rsplen) ?
348 lsreq->rsplen : tgt_lsreq->rsplen));
349 tgt_lsreq->done(tgt_lsreq);
351 schedule_work(&tls_req->work);
357 * Simulate reception of RSCN and converting it to a initiator transport
358 * call to rescan a remote port.
361 fcloop_tgt_rscn_work(struct work_struct *work)
363 struct fcloop_rscn *tgt_rscn =
364 container_of(work, struct fcloop_rscn, work);
365 struct fcloop_tport *tport = tgt_rscn->tport;
367 if (tport->remoteport)
368 nvme_fc_rescan_remoteport(tport->remoteport);
373 fcloop_tgt_discovery_evt(struct nvmet_fc_target_port *tgtport)
375 struct fcloop_rscn *tgt_rscn;
377 tgt_rscn = kzalloc(sizeof(*tgt_rscn), GFP_KERNEL);
381 tgt_rscn->tport = tgtport->private;
382 INIT_WORK(&tgt_rscn->work, fcloop_tgt_rscn_work);
384 schedule_work(&tgt_rscn->work);
388 fcloop_tfcp_req_free(struct kref *ref)
390 struct fcloop_fcpreq *tfcp_req =
391 container_of(ref, struct fcloop_fcpreq, ref);
397 fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req)
399 kref_put(&tfcp_req->ref, fcloop_tfcp_req_free);
403 fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req)
405 return kref_get_unless_zero(&tfcp_req->ref);
409 fcloop_call_host_done(struct nvmefc_fcp_req *fcpreq,
410 struct fcloop_fcpreq *tfcp_req, int status)
412 struct fcloop_ini_fcpreq *inireq = NULL;
415 inireq = fcpreq->private;
416 spin_lock(&inireq->inilock);
417 inireq->tfcp_req = NULL;
418 spin_unlock(&inireq->inilock);
420 fcpreq->status = status;
421 fcpreq->done(fcpreq);
424 /* release original io reference on tgt struct */
425 fcloop_tfcp_req_put(tfcp_req);
429 fcloop_fcp_recv_work(struct work_struct *work)
431 struct fcloop_fcpreq *tfcp_req =
432 container_of(work, struct fcloop_fcpreq, fcp_rcv_work);
433 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq;
435 bool aborted = false;
437 spin_lock_irq(&tfcp_req->reqlock);
438 switch (tfcp_req->inistate) {
440 tfcp_req->inistate = INI_IO_ACTIVE;
446 spin_unlock_irq(&tfcp_req->reqlock);
450 spin_unlock_irq(&tfcp_req->reqlock);
452 if (unlikely(aborted))
455 ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport,
456 &tfcp_req->tgt_fcp_req,
457 fcpreq->cmdaddr, fcpreq->cmdlen);
459 fcloop_call_host_done(fcpreq, tfcp_req, ret);
465 fcloop_fcp_abort_recv_work(struct work_struct *work)
467 struct fcloop_fcpreq *tfcp_req =
468 container_of(work, struct fcloop_fcpreq, abort_rcv_work);
469 struct nvmefc_fcp_req *fcpreq;
470 bool completed = false;
472 spin_lock_irq(&tfcp_req->reqlock);
473 fcpreq = tfcp_req->fcpreq;
474 switch (tfcp_req->inistate) {
477 case INI_IO_COMPLETED:
481 spin_unlock_irq(&tfcp_req->reqlock);
485 spin_unlock_irq(&tfcp_req->reqlock);
487 if (unlikely(completed)) {
488 /* remove reference taken in original abort downcall */
489 fcloop_tfcp_req_put(tfcp_req);
493 if (tfcp_req->tport->targetport)
494 nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport,
495 &tfcp_req->tgt_fcp_req);
497 spin_lock_irq(&tfcp_req->reqlock);
498 tfcp_req->fcpreq = NULL;
499 spin_unlock_irq(&tfcp_req->reqlock);
501 fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED);
502 /* call_host_done releases reference for abort downcall */
506 * FCP IO operation done by target completion.
507 * call back up initiator "done" flows.
510 fcloop_tgt_fcprqst_done_work(struct work_struct *work)
512 struct fcloop_fcpreq *tfcp_req =
513 container_of(work, struct fcloop_fcpreq, tio_done_work);
514 struct nvmefc_fcp_req *fcpreq;
516 spin_lock_irq(&tfcp_req->reqlock);
517 fcpreq = tfcp_req->fcpreq;
518 tfcp_req->inistate = INI_IO_COMPLETED;
519 spin_unlock_irq(&tfcp_req->reqlock);
521 fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status);
526 fcloop_fcp_req(struct nvme_fc_local_port *localport,
527 struct nvme_fc_remote_port *remoteport,
528 void *hw_queue_handle,
529 struct nvmefc_fcp_req *fcpreq)
531 struct fcloop_rport *rport = remoteport->private;
532 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
533 struct fcloop_fcpreq *tfcp_req;
535 if (!rport->targetport)
536 return -ECONNREFUSED;
538 tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC);
542 inireq->fcpreq = fcpreq;
543 inireq->tfcp_req = tfcp_req;
544 spin_lock_init(&inireq->inilock);
546 tfcp_req->fcpreq = fcpreq;
547 tfcp_req->tport = rport->targetport->private;
548 tfcp_req->inistate = INI_IO_START;
549 spin_lock_init(&tfcp_req->reqlock);
550 INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work);
551 INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work);
552 INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work);
553 kref_init(&tfcp_req->ref);
555 schedule_work(&tfcp_req->fcp_rcv_work);
561 fcloop_fcp_copy_data(u8 op, struct scatterlist *data_sg,
562 struct scatterlist *io_sg, u32 offset, u32 length)
565 u32 data_len, io_len, tlen;
567 io_p = sg_virt(io_sg);
568 io_len = io_sg->length;
571 tlen = min_t(u32, offset, io_len);
575 io_sg = sg_next(io_sg);
576 io_p = sg_virt(io_sg);
577 io_len = io_sg->length;
582 data_p = sg_virt(data_sg);
583 data_len = data_sg->length;
586 tlen = min_t(u32, io_len, data_len);
587 tlen = min_t(u32, tlen, length);
589 if (op == NVMET_FCOP_WRITEDATA)
590 memcpy(data_p, io_p, tlen);
592 memcpy(io_p, data_p, tlen);
597 if ((!io_len) && (length)) {
598 io_sg = sg_next(io_sg);
599 io_p = sg_virt(io_sg);
600 io_len = io_sg->length;
605 if ((!data_len) && (length)) {
606 data_sg = sg_next(data_sg);
607 data_p = sg_virt(data_sg);
608 data_len = data_sg->length;
615 fcloop_fcp_op(struct nvmet_fc_target_port *tgtport,
616 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
618 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
619 struct nvmefc_fcp_req *fcpreq;
620 u32 rsplen = 0, xfrlen = 0;
621 int fcp_err = 0, active, aborted;
622 u8 op = tgt_fcpreq->op;
624 spin_lock_irq(&tfcp_req->reqlock);
625 fcpreq = tfcp_req->fcpreq;
626 active = tfcp_req->active;
627 aborted = tfcp_req->aborted;
628 tfcp_req->active = true;
629 spin_unlock_irq(&tfcp_req->reqlock);
631 if (unlikely(active))
632 /* illegal - call while i/o active */
635 if (unlikely(aborted)) {
636 /* target transport has aborted i/o prior */
637 spin_lock_irq(&tfcp_req->reqlock);
638 tfcp_req->active = false;
639 spin_unlock_irq(&tfcp_req->reqlock);
640 tgt_fcpreq->transferred_length = 0;
641 tgt_fcpreq->fcp_error = -ECANCELED;
642 tgt_fcpreq->done(tgt_fcpreq);
647 * if fcpreq is NULL, the I/O has been aborted (from
648 * initiator side). For the target side, act as if all is well
649 * but don't actually move data.
653 case NVMET_FCOP_WRITEDATA:
654 xfrlen = tgt_fcpreq->transfer_length;
656 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
657 fcpreq->first_sgl, tgt_fcpreq->offset,
659 fcpreq->transferred_length += xfrlen;
663 case NVMET_FCOP_READDATA:
664 case NVMET_FCOP_READDATA_RSP:
665 xfrlen = tgt_fcpreq->transfer_length;
667 fcloop_fcp_copy_data(op, tgt_fcpreq->sg,
668 fcpreq->first_sgl, tgt_fcpreq->offset,
670 fcpreq->transferred_length += xfrlen;
672 if (op == NVMET_FCOP_READDATA)
675 /* Fall-Thru to RSP handling */
680 rsplen = ((fcpreq->rsplen < tgt_fcpreq->rsplen) ?
681 fcpreq->rsplen : tgt_fcpreq->rsplen);
682 memcpy(fcpreq->rspaddr, tgt_fcpreq->rspaddr, rsplen);
683 if (rsplen < tgt_fcpreq->rsplen)
685 fcpreq->rcv_rsplen = rsplen;
688 tfcp_req->status = 0;
696 spin_lock_irq(&tfcp_req->reqlock);
697 tfcp_req->active = false;
698 spin_unlock_irq(&tfcp_req->reqlock);
700 tgt_fcpreq->transferred_length = xfrlen;
701 tgt_fcpreq->fcp_error = fcp_err;
702 tgt_fcpreq->done(tgt_fcpreq);
708 fcloop_tgt_fcp_abort(struct nvmet_fc_target_port *tgtport,
709 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
711 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
714 * mark aborted only in case there were 2 threads in transport
715 * (one doing io, other doing abort) and only kills ops posted
716 * after the abort request
718 spin_lock_irq(&tfcp_req->reqlock);
719 tfcp_req->aborted = true;
720 spin_unlock_irq(&tfcp_req->reqlock);
722 tfcp_req->status = NVME_SC_INTERNAL;
725 * nothing more to do. If io wasn't active, the transport should
726 * immediately call the req_release. If it was active, the op
727 * will complete, and the lldd should call req_release.
732 fcloop_fcp_req_release(struct nvmet_fc_target_port *tgtport,
733 struct nvmefc_tgt_fcp_req *tgt_fcpreq)
735 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq);
737 schedule_work(&tfcp_req->tio_done_work);
741 fcloop_ls_abort(struct nvme_fc_local_port *localport,
742 struct nvme_fc_remote_port *remoteport,
743 struct nvmefc_ls_req *lsreq)
748 fcloop_fcp_abort(struct nvme_fc_local_port *localport,
749 struct nvme_fc_remote_port *remoteport,
750 void *hw_queue_handle,
751 struct nvmefc_fcp_req *fcpreq)
753 struct fcloop_ini_fcpreq *inireq = fcpreq->private;
754 struct fcloop_fcpreq *tfcp_req;
757 spin_lock(&inireq->inilock);
758 tfcp_req = inireq->tfcp_req;
760 fcloop_tfcp_req_get(tfcp_req);
761 spin_unlock(&inireq->inilock);
764 /* abort has already been called */
767 /* break initiator/target relationship for io */
768 spin_lock_irq(&tfcp_req->reqlock);
769 switch (tfcp_req->inistate) {
772 tfcp_req->inistate = INI_IO_ABORTED;
774 case INI_IO_COMPLETED:
778 spin_unlock_irq(&tfcp_req->reqlock);
782 spin_unlock_irq(&tfcp_req->reqlock);
785 /* leave the reference while the work item is scheduled */
786 WARN_ON(!schedule_work(&tfcp_req->abort_rcv_work));
789 * as the io has already had the done callback made,
790 * nothing more to do. So release the reference taken above
792 fcloop_tfcp_req_put(tfcp_req);
797 fcloop_nport_free(struct kref *ref)
799 struct fcloop_nport *nport =
800 container_of(ref, struct fcloop_nport, ref);
803 spin_lock_irqsave(&fcloop_lock, flags);
804 list_del(&nport->nport_list);
805 spin_unlock_irqrestore(&fcloop_lock, flags);
811 fcloop_nport_put(struct fcloop_nport *nport)
813 kref_put(&nport->ref, fcloop_nport_free);
817 fcloop_nport_get(struct fcloop_nport *nport)
819 return kref_get_unless_zero(&nport->ref);
823 fcloop_localport_delete(struct nvme_fc_local_port *localport)
825 struct fcloop_lport_priv *lport_priv = localport->private;
826 struct fcloop_lport *lport = lport_priv->lport;
828 /* release any threads waiting for the unreg to complete */
829 complete(&lport->unreg_done);
833 fcloop_remoteport_delete(struct nvme_fc_remote_port *remoteport)
835 struct fcloop_rport *rport = remoteport->private;
837 fcloop_nport_put(rport->nport);
841 fcloop_targetport_delete(struct nvmet_fc_target_port *targetport)
843 struct fcloop_tport *tport = targetport->private;
845 fcloop_nport_put(tport->nport);
848 #define FCLOOP_HW_QUEUES 4
849 #define FCLOOP_SGL_SEGS 256
850 #define FCLOOP_DMABOUND_4G 0xFFFFFFFF
852 static struct nvme_fc_port_template fctemplate = {
853 .localport_delete = fcloop_localport_delete,
854 .remoteport_delete = fcloop_remoteport_delete,
855 .create_queue = fcloop_create_queue,
856 .delete_queue = fcloop_delete_queue,
857 .ls_req = fcloop_ls_req,
858 .fcp_io = fcloop_fcp_req,
859 .ls_abort = fcloop_ls_abort,
860 .fcp_abort = fcloop_fcp_abort,
861 .max_hw_queues = FCLOOP_HW_QUEUES,
862 .max_sgl_segments = FCLOOP_SGL_SEGS,
863 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
864 .dma_boundary = FCLOOP_DMABOUND_4G,
865 /* sizes of additional private data for data structures */
866 .local_priv_sz = sizeof(struct fcloop_lport_priv),
867 .remote_priv_sz = sizeof(struct fcloop_rport),
868 .lsrqst_priv_sz = sizeof(struct fcloop_lsreq),
869 .fcprqst_priv_sz = sizeof(struct fcloop_ini_fcpreq),
872 static struct nvmet_fc_target_template tgttemplate = {
873 .targetport_delete = fcloop_targetport_delete,
874 .xmt_ls_rsp = fcloop_xmt_ls_rsp,
875 .fcp_op = fcloop_fcp_op,
876 .fcp_abort = fcloop_tgt_fcp_abort,
877 .fcp_req_release = fcloop_fcp_req_release,
878 .discovery_event = fcloop_tgt_discovery_evt,
879 .max_hw_queues = FCLOOP_HW_QUEUES,
880 .max_sgl_segments = FCLOOP_SGL_SEGS,
881 .max_dif_sgl_segments = FCLOOP_SGL_SEGS,
882 .dma_boundary = FCLOOP_DMABOUND_4G,
883 /* optional features */
884 .target_features = 0,
885 /* sizes of additional private data for data structures */
886 .target_priv_sz = sizeof(struct fcloop_tport),
890 fcloop_create_local_port(struct device *dev, struct device_attribute *attr,
891 const char *buf, size_t count)
893 struct nvme_fc_port_info pinfo;
894 struct fcloop_ctrl_options *opts;
895 struct nvme_fc_local_port *localport;
896 struct fcloop_lport *lport;
897 struct fcloop_lport_priv *lport_priv;
901 lport = kzalloc(sizeof(*lport), GFP_KERNEL);
905 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
909 ret = fcloop_parse_options(opts, buf);
913 /* everything there ? */
914 if ((opts->mask & LPORT_OPTS) != LPORT_OPTS) {
919 memset(&pinfo, 0, sizeof(pinfo));
920 pinfo.node_name = opts->wwnn;
921 pinfo.port_name = opts->wwpn;
922 pinfo.port_role = opts->roles;
923 pinfo.port_id = opts->fcaddr;
925 ret = nvme_fc_register_localport(&pinfo, &fctemplate, NULL, &localport);
928 lport_priv = localport->private;
929 lport_priv->lport = lport;
931 lport->localport = localport;
932 INIT_LIST_HEAD(&lport->lport_list);
934 spin_lock_irqsave(&fcloop_lock, flags);
935 list_add_tail(&lport->lport_list, &fcloop_lports);
936 spin_unlock_irqrestore(&fcloop_lock, flags);
942 /* free only if we're going to fail */
946 return ret ? ret : count;
951 __unlink_local_port(struct fcloop_lport *lport)
953 list_del(&lport->lport_list);
957 __wait_localport_unreg(struct fcloop_lport *lport)
961 init_completion(&lport->unreg_done);
963 ret = nvme_fc_unregister_localport(lport->localport);
965 wait_for_completion(&lport->unreg_done);
974 fcloop_delete_local_port(struct device *dev, struct device_attribute *attr,
975 const char *buf, size_t count)
977 struct fcloop_lport *tlport, *lport = NULL;
978 u64 nodename, portname;
982 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
986 spin_lock_irqsave(&fcloop_lock, flags);
988 list_for_each_entry(tlport, &fcloop_lports, lport_list) {
989 if (tlport->localport->node_name == nodename &&
990 tlport->localport->port_name == portname) {
992 __unlink_local_port(lport);
996 spin_unlock_irqrestore(&fcloop_lock, flags);
1001 ret = __wait_localport_unreg(lport);
1003 return ret ? ret : count;
1006 static struct fcloop_nport *
1007 fcloop_alloc_nport(const char *buf, size_t count, bool remoteport)
1009 struct fcloop_nport *newnport, *nport = NULL;
1010 struct fcloop_lport *tmplport, *lport = NULL;
1011 struct fcloop_ctrl_options *opts;
1012 unsigned long flags;
1013 u32 opts_mask = (remoteport) ? RPORT_OPTS : TGTPORT_OPTS;
1016 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
1020 ret = fcloop_parse_options(opts, buf);
1024 /* everything there ? */
1025 if ((opts->mask & opts_mask) != opts_mask) {
1030 newnport = kzalloc(sizeof(*newnport), GFP_KERNEL);
1034 INIT_LIST_HEAD(&newnport->nport_list);
1035 newnport->node_name = opts->wwnn;
1036 newnport->port_name = opts->wwpn;
1037 if (opts->mask & NVMF_OPT_ROLES)
1038 newnport->port_role = opts->roles;
1039 if (opts->mask & NVMF_OPT_FCADDR)
1040 newnport->port_id = opts->fcaddr;
1041 kref_init(&newnport->ref);
1043 spin_lock_irqsave(&fcloop_lock, flags);
1045 list_for_each_entry(tmplport, &fcloop_lports, lport_list) {
1046 if (tmplport->localport->node_name == opts->wwnn &&
1047 tmplport->localport->port_name == opts->wwpn)
1048 goto out_invalid_opts;
1050 if (tmplport->localport->node_name == opts->lpwwnn &&
1051 tmplport->localport->port_name == opts->lpwwpn)
1057 goto out_invalid_opts;
1058 newnport->lport = lport;
1061 list_for_each_entry(nport, &fcloop_nports, nport_list) {
1062 if (nport->node_name == opts->wwnn &&
1063 nport->port_name == opts->wwpn) {
1064 if ((remoteport && nport->rport) ||
1065 (!remoteport && nport->tport)) {
1067 goto out_invalid_opts;
1070 fcloop_nport_get(nport);
1072 spin_unlock_irqrestore(&fcloop_lock, flags);
1075 nport->lport = lport;
1076 if (opts->mask & NVMF_OPT_ROLES)
1077 nport->port_role = opts->roles;
1078 if (opts->mask & NVMF_OPT_FCADDR)
1079 nport->port_id = opts->fcaddr;
1080 goto out_free_newnport;
1084 list_add_tail(&newnport->nport_list, &fcloop_nports);
1086 spin_unlock_irqrestore(&fcloop_lock, flags);
1092 spin_unlock_irqrestore(&fcloop_lock, flags);
1101 fcloop_create_remote_port(struct device *dev, struct device_attribute *attr,
1102 const char *buf, size_t count)
1104 struct nvme_fc_remote_port *remoteport;
1105 struct fcloop_nport *nport;
1106 struct fcloop_rport *rport;
1107 struct nvme_fc_port_info pinfo;
1110 nport = fcloop_alloc_nport(buf, count, true);
1114 memset(&pinfo, 0, sizeof(pinfo));
1115 pinfo.node_name = nport->node_name;
1116 pinfo.port_name = nport->port_name;
1117 pinfo.port_role = nport->port_role;
1118 pinfo.port_id = nport->port_id;
1120 ret = nvme_fc_register_remoteport(nport->lport->localport,
1121 &pinfo, &remoteport);
1122 if (ret || !remoteport) {
1123 fcloop_nport_put(nport);
1128 rport = remoteport->private;
1129 rport->remoteport = remoteport;
1130 rport->targetport = (nport->tport) ? nport->tport->targetport : NULL;
1132 nport->tport->remoteport = remoteport;
1133 nport->tport->lport = nport->lport;
1135 rport->nport = nport;
1136 rport->lport = nport->lport;
1137 nport->rport = rport;
1143 static struct fcloop_rport *
1144 __unlink_remote_port(struct fcloop_nport *nport)
1146 struct fcloop_rport *rport = nport->rport;
1148 if (rport && nport->tport)
1149 nport->tport->remoteport = NULL;
1150 nport->rport = NULL;
1156 __remoteport_unreg(struct fcloop_nport *nport, struct fcloop_rport *rport)
1161 return nvme_fc_unregister_remoteport(rport->remoteport);
1165 fcloop_delete_remote_port(struct device *dev, struct device_attribute *attr,
1166 const char *buf, size_t count)
1168 struct fcloop_nport *nport = NULL, *tmpport;
1169 static struct fcloop_rport *rport;
1170 u64 nodename, portname;
1171 unsigned long flags;
1174 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1178 spin_lock_irqsave(&fcloop_lock, flags);
1180 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1181 if (tmpport->node_name == nodename &&
1182 tmpport->port_name == portname && tmpport->rport) {
1184 rport = __unlink_remote_port(nport);
1189 spin_unlock_irqrestore(&fcloop_lock, flags);
1194 ret = __remoteport_unreg(nport, rport);
1196 return ret ? ret : count;
1200 fcloop_create_target_port(struct device *dev, struct device_attribute *attr,
1201 const char *buf, size_t count)
1203 struct nvmet_fc_target_port *targetport;
1204 struct fcloop_nport *nport;
1205 struct fcloop_tport *tport;
1206 struct nvmet_fc_port_info tinfo;
1209 nport = fcloop_alloc_nport(buf, count, false);
1213 tinfo.node_name = nport->node_name;
1214 tinfo.port_name = nport->port_name;
1215 tinfo.port_id = nport->port_id;
1217 ret = nvmet_fc_register_targetport(&tinfo, &tgttemplate, NULL,
1220 fcloop_nport_put(nport);
1225 tport = targetport->private;
1226 tport->targetport = targetport;
1227 tport->remoteport = (nport->rport) ? nport->rport->remoteport : NULL;
1229 nport->rport->targetport = targetport;
1230 tport->nport = nport;
1231 tport->lport = nport->lport;
1232 nport->tport = tport;
1238 static struct fcloop_tport *
1239 __unlink_target_port(struct fcloop_nport *nport)
1241 struct fcloop_tport *tport = nport->tport;
1243 if (tport && nport->rport)
1244 nport->rport->targetport = NULL;
1245 nport->tport = NULL;
1251 __targetport_unreg(struct fcloop_nport *nport, struct fcloop_tport *tport)
1256 return nvmet_fc_unregister_targetport(tport->targetport);
1260 fcloop_delete_target_port(struct device *dev, struct device_attribute *attr,
1261 const char *buf, size_t count)
1263 struct fcloop_nport *nport = NULL, *tmpport;
1264 struct fcloop_tport *tport = NULL;
1265 u64 nodename, portname;
1266 unsigned long flags;
1269 ret = fcloop_parse_nm_options(dev, &nodename, &portname, buf);
1273 spin_lock_irqsave(&fcloop_lock, flags);
1275 list_for_each_entry(tmpport, &fcloop_nports, nport_list) {
1276 if (tmpport->node_name == nodename &&
1277 tmpport->port_name == portname && tmpport->tport) {
1279 tport = __unlink_target_port(nport);
1284 spin_unlock_irqrestore(&fcloop_lock, flags);
1289 ret = __targetport_unreg(nport, tport);
1291 return ret ? ret : count;
1295 static DEVICE_ATTR(add_local_port, 0200, NULL, fcloop_create_local_port);
1296 static DEVICE_ATTR(del_local_port, 0200, NULL, fcloop_delete_local_port);
1297 static DEVICE_ATTR(add_remote_port, 0200, NULL, fcloop_create_remote_port);
1298 static DEVICE_ATTR(del_remote_port, 0200, NULL, fcloop_delete_remote_port);
1299 static DEVICE_ATTR(add_target_port, 0200, NULL, fcloop_create_target_port);
1300 static DEVICE_ATTR(del_target_port, 0200, NULL, fcloop_delete_target_port);
1302 static struct attribute *fcloop_dev_attrs[] = {
1303 &dev_attr_add_local_port.attr,
1304 &dev_attr_del_local_port.attr,
1305 &dev_attr_add_remote_port.attr,
1306 &dev_attr_del_remote_port.attr,
1307 &dev_attr_add_target_port.attr,
1308 &dev_attr_del_target_port.attr,
1312 static struct attribute_group fclopp_dev_attrs_group = {
1313 .attrs = fcloop_dev_attrs,
1316 static const struct attribute_group *fcloop_dev_attr_groups[] = {
1317 &fclopp_dev_attrs_group,
1321 static struct class *fcloop_class;
1322 static struct device *fcloop_device;
1325 static int __init fcloop_init(void)
1329 fcloop_class = class_create(THIS_MODULE, "fcloop");
1330 if (IS_ERR(fcloop_class)) {
1331 pr_err("couldn't register class fcloop\n");
1332 ret = PTR_ERR(fcloop_class);
1336 fcloop_device = device_create_with_groups(
1337 fcloop_class, NULL, MKDEV(0, 0), NULL,
1338 fcloop_dev_attr_groups, "ctl");
1339 if (IS_ERR(fcloop_device)) {
1340 pr_err("couldn't create ctl device!\n");
1341 ret = PTR_ERR(fcloop_device);
1342 goto out_destroy_class;
1345 get_device(fcloop_device);
1350 class_destroy(fcloop_class);
1354 static void __exit fcloop_exit(void)
1356 struct fcloop_lport *lport;
1357 struct fcloop_nport *nport;
1358 struct fcloop_tport *tport;
1359 struct fcloop_rport *rport;
1360 unsigned long flags;
1363 spin_lock_irqsave(&fcloop_lock, flags);
1366 nport = list_first_entry_or_null(&fcloop_nports,
1367 typeof(*nport), nport_list);
1371 tport = __unlink_target_port(nport);
1372 rport = __unlink_remote_port(nport);
1374 spin_unlock_irqrestore(&fcloop_lock, flags);
1376 ret = __targetport_unreg(nport, tport);
1378 pr_warn("%s: Failed deleting target port\n", __func__);
1380 ret = __remoteport_unreg(nport, rport);
1382 pr_warn("%s: Failed deleting remote port\n", __func__);
1384 spin_lock_irqsave(&fcloop_lock, flags);
1388 lport = list_first_entry_or_null(&fcloop_lports,
1389 typeof(*lport), lport_list);
1393 __unlink_local_port(lport);
1395 spin_unlock_irqrestore(&fcloop_lock, flags);
1397 ret = __wait_localport_unreg(lport);
1399 pr_warn("%s: Failed deleting local port\n", __func__);
1401 spin_lock_irqsave(&fcloop_lock, flags);
1404 spin_unlock_irqrestore(&fcloop_lock, flags);
1406 put_device(fcloop_device);
1408 device_destroy(fcloop_class, MKDEV(0, 0));
1409 class_destroy(fcloop_class);
1412 module_init(fcloop_init);
1413 module_exit(fcloop_exit);
1415 MODULE_LICENSE("GPL v2");