2 * Xen SCSI backend driver
4 * Copyright (c) 2008, FUJITSU Limited
6 * Based on the blkback driver code.
7 * Adaption to kernel taget core infrastructure taken from vhost/scsi.c
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version 2
11 * as published by the Free Software Foundation; or, when distributed
12 * separately from the Linux kernel or incorporated into other
13 * software packages, subject to the following license:
15 * Permission is hereby granted, free of charge, to any person obtaining a copy
16 * of this source file (the "Software"), to deal in the Software without
17 * restriction, including without limitation the rights to use, copy, modify,
18 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
19 * and to permit persons to whom the Software is furnished to do so, subject to
20 * the following conditions:
22 * The above copyright notice and this permission notice shall be included in
23 * all copies or substantial portions of the Software.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
26 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
27 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
28 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
29 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
30 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34 #define pr_fmt(fmt) "xen-pvscsi: " fmt
38 #include <linux/module.h>
39 #include <linux/utsname.h>
40 #include <linux/interrupt.h>
41 #include <linux/slab.h>
42 #include <linux/wait.h>
43 #include <linux/sched.h>
44 #include <linux/list.h>
45 #include <linux/gfp.h>
46 #include <linux/delay.h>
47 #include <linux/spinlock.h>
48 #include <linux/configfs.h>
50 #include <generated/utsrelease.h>
52 #include <scsi/scsi_host.h> /* SG_ALL */
54 #include <target/target_core_base.h>
55 #include <target/target_core_fabric.h>
57 #include <asm/hypervisor.h>
60 #include <xen/balloon.h>
61 #include <xen/events.h>
62 #include <xen/xenbus.h>
63 #include <xen/grant_table.h>
66 #include <xen/interface/grant_table.h>
67 #include <xen/interface/io/vscsiif.h>
69 #define VSCSI_VERSION "v0.1"
70 #define VSCSI_NAMELEN 32
73 unsigned int hst; /* host */
74 unsigned int chn; /* channel */
75 unsigned int tgt; /* target */
76 unsigned int lun; /* LUN */
80 struct ids_tuple v; /* translate from */
81 struct scsiback_tpg *tpg; /* translate to */
88 struct xenbus_device *dev;
93 struct vscsiif_back_ring ring;
96 atomic_t nr_unreplied_reqs;
99 struct list_head v2p_entry_lists;
101 wait_queue_head_t waiting_to_free;
104 /* theoretical maximum of grants for one request */
105 #define VSCSI_MAX_GRANTS (SG_ALL + VSCSIIF_SG_TABLESIZE)
108 * VSCSI_GRANT_BATCH is the maximum number of grants to be processed in one
109 * call to map/unmap grants. Don't choose it too large, as there are arrays
110 * with VSCSI_GRANT_BATCH elements allocated on the stack.
112 #define VSCSI_GRANT_BATCH 16
114 struct vscsibk_pend {
117 uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];
120 uint8_t sc_data_direction;
121 uint16_t n_sg; /* real length of SG list */
122 uint16_t n_grants; /* SG pages and potentially SG list */
126 struct vscsibk_info *info;
127 struct v2p_entry *v2p;
128 struct scatterlist *sgl;
130 uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
132 grant_handle_t grant_handles[VSCSI_MAX_GRANTS];
133 struct page *pages[VSCSI_MAX_GRANTS];
135 struct se_cmd se_cmd;
137 struct completion tmr_done;
140 #define VSCSI_DEFAULT_SESSION_TAGS 128
142 struct scsiback_nexus {
143 /* Pointer to TCM session for I_T Nexus */
144 struct se_session *tvn_se_sess;
147 struct scsiback_tport {
148 /* SCSI protocol the tport is providing */
150 /* Binary World Wide unique Port Name for pvscsi Target port */
152 /* ASCII formatted WWPN for pvscsi Target port */
153 char tport_name[VSCSI_NAMELEN];
154 /* Returned by scsiback_make_tport() */
155 struct se_wwn tport_wwn;
158 struct scsiback_tpg {
159 /* scsiback port target portal group tag for TCM */
161 /* track number of TPG Port/Lun Links wrt explicit I_T Nexus shutdown */
162 int tv_tpg_port_count;
163 /* xen-pvscsi references to tpg_nexus, protected by tv_tpg_mutex */
165 /* list for scsiback_list */
166 struct list_head tv_tpg_list;
167 /* Used to protect access for tpg_nexus */
168 struct mutex tv_tpg_mutex;
169 /* Pointer to the TCM pvscsi I_T Nexus for this TPG endpoint */
170 struct scsiback_nexus *tpg_nexus;
171 /* Pointer back to scsiback_tport */
172 struct scsiback_tport *tport;
173 /* Returned by scsiback_make_tpg() */
174 struct se_portal_group se_tpg;
175 /* alias used in xenstore */
176 char param_alias[VSCSI_NAMELEN];
177 /* list of info structures related to this target portal group */
178 struct list_head info_list;
181 #define SCSIBACK_INVALID_HANDLE (~0)
183 static bool log_print_stat;
184 module_param(log_print_stat, bool, 0644);
186 static int scsiback_max_buffer_pages = 1024;
187 module_param_named(max_buffer_pages, scsiback_max_buffer_pages, int, 0644);
188 MODULE_PARM_DESC(max_buffer_pages,
189 "Maximum number of free pages to keep in backend buffer");
191 static DEFINE_SPINLOCK(free_pages_lock);
192 static int free_pages_num;
193 static LIST_HEAD(scsiback_free_pages);
195 /* Global spinlock to protect scsiback TPG list */
196 static DEFINE_MUTEX(scsiback_mutex);
197 static LIST_HEAD(scsiback_list);
199 static void scsiback_get(struct vscsibk_info *info)
201 atomic_inc(&info->nr_unreplied_reqs);
204 static void scsiback_put(struct vscsibk_info *info)
206 if (atomic_dec_and_test(&info->nr_unreplied_reqs))
207 wake_up(&info->waiting_to_free);
210 static void put_free_pages(struct page **page, int num)
213 int i = free_pages_num + num, n = num;
217 if (i > scsiback_max_buffer_pages) {
218 n = min(num, i - scsiback_max_buffer_pages);
219 gnttab_free_pages(n, page + num - n);
222 spin_lock_irqsave(&free_pages_lock, flags);
223 for (i = 0; i < n; i++)
224 list_add(&page[i]->lru, &scsiback_free_pages);
226 spin_unlock_irqrestore(&free_pages_lock, flags);
229 static int get_free_page(struct page **page)
233 spin_lock_irqsave(&free_pages_lock, flags);
234 if (list_empty(&scsiback_free_pages)) {
235 spin_unlock_irqrestore(&free_pages_lock, flags);
236 return gnttab_alloc_pages(1, page);
238 page[0] = list_first_entry(&scsiback_free_pages, struct page, lru);
239 list_del(&page[0]->lru);
241 spin_unlock_irqrestore(&free_pages_lock, flags);
245 static unsigned long vaddr_page(struct page *page)
247 unsigned long pfn = page_to_pfn(page);
249 return (unsigned long)pfn_to_kaddr(pfn);
252 static unsigned long vaddr(struct vscsibk_pend *req, int seg)
254 return vaddr_page(req->pages[seg]);
257 static void scsiback_print_status(char *sense_buffer, int errors,
258 struct vscsibk_pend *pending_req)
260 struct scsiback_tpg *tpg = pending_req->v2p->tpg;
262 pr_err("[%s:%d] cmnd[0]=%02x -> st=%02x msg=%02x host=%02x drv=%02x\n",
263 tpg->tport->tport_name, pending_req->v2p->lun,
264 pending_req->cmnd[0], status_byte(errors), msg_byte(errors),
265 host_byte(errors), driver_byte(errors));
268 static void scsiback_fast_flush_area(struct vscsibk_pend *req)
270 struct gnttab_unmap_grant_ref unmap[VSCSI_GRANT_BATCH];
271 struct page *pages[VSCSI_GRANT_BATCH];
272 unsigned int i, invcount = 0;
273 grant_handle_t handle;
283 for (i = 0; i < req->n_grants; i++) {
284 handle = req->grant_handles[i];
285 if (handle == SCSIBACK_INVALID_HANDLE)
287 gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
288 GNTMAP_host_map, handle);
289 req->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
290 pages[invcount] = req->pages[i];
291 put_page(pages[invcount]);
293 if (invcount < VSCSI_GRANT_BATCH)
295 err = gnttab_unmap_refs(unmap, NULL, pages, invcount);
301 err = gnttab_unmap_refs(unmap, NULL, pages, invcount);
305 put_free_pages(req->pages, req->n_grants);
309 static void scsiback_free_translation_entry(struct kref *kref)
311 struct v2p_entry *entry = container_of(kref, struct v2p_entry, kref);
312 struct scsiback_tpg *tpg = entry->tpg;
314 mutex_lock(&tpg->tv_tpg_mutex);
315 tpg->tv_tpg_fe_count--;
316 mutex_unlock(&tpg->tv_tpg_mutex);
321 static void scsiback_send_response(struct vscsibk_info *info,
322 char *sense_buffer, int32_t result, uint32_t resid,
325 struct vscsiif_response *ring_res;
327 struct scsi_sense_hdr sshdr;
331 spin_lock_irqsave(&info->ring_lock, flags);
333 ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt);
334 info->ring.rsp_prod_pvt++;
336 ring_res->rslt = result;
337 ring_res->rqid = rqid;
339 if (sense_buffer != NULL &&
340 scsi_normalize_sense(sense_buffer, VSCSIIF_SENSE_BUFFERSIZE,
342 len = min_t(unsigned, 8 + sense_buffer[7],
343 VSCSIIF_SENSE_BUFFERSIZE);
344 memcpy(ring_res->sense_buffer, sense_buffer, len);
345 ring_res->sense_len = len;
347 ring_res->sense_len = 0;
350 ring_res->residual_len = resid;
352 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify);
353 spin_unlock_irqrestore(&info->ring_lock, flags);
356 notify_remote_via_irq(info->irq);
359 static void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
360 uint32_t resid, struct vscsibk_pend *pending_req)
362 scsiback_send_response(pending_req->info, sense_buffer, result,
363 resid, pending_req->rqid);
365 if (pending_req->v2p)
366 kref_put(&pending_req->v2p->kref,
367 scsiback_free_translation_entry);
370 static void scsiback_cmd_done(struct vscsibk_pend *pending_req)
372 struct vscsibk_info *info = pending_req->info;
373 unsigned char *sense_buffer;
377 sense_buffer = pending_req->sense_buffer;
378 resid = pending_req->se_cmd.residual_count;
379 errors = pending_req->result;
381 if (errors && log_print_stat)
382 scsiback_print_status(sense_buffer, errors, pending_req);
384 scsiback_fast_flush_area(pending_req);
385 scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req);
388 * Drop the extra KREF_ACK reference taken by target_submit_cmd_map_sgls()
389 * ahead of scsiback_check_stop_free() -> transport_generic_free_cmd()
390 * final se_cmd->cmd_kref put.
392 target_put_sess_cmd(&pending_req->se_cmd);
395 static void scsiback_cmd_exec(struct vscsibk_pend *pending_req)
397 struct se_cmd *se_cmd = &pending_req->se_cmd;
398 struct se_session *sess = pending_req->v2p->tpg->tpg_nexus->tvn_se_sess;
401 scsiback_get(pending_req->info);
402 se_cmd->tag = pending_req->rqid;
403 rc = target_submit_cmd_map_sgls(se_cmd, sess, pending_req->cmnd,
404 pending_req->sense_buffer, pending_req->v2p->lun,
405 pending_req->data_len, 0,
406 pending_req->sc_data_direction, TARGET_SCF_ACK_KREF,
407 pending_req->sgl, pending_req->n_sg,
410 transport_send_check_condition_and_sense(se_cmd,
411 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
412 transport_generic_free_cmd(se_cmd, 0);
416 static int scsiback_gnttab_data_map_batch(struct gnttab_map_grant_ref *map,
417 struct page **pg, grant_handle_t *grant, int cnt)
424 err = gnttab_map_refs(map, NULL, pg, cnt);
426 for (i = 0; i < cnt; i++) {
427 if (unlikely(map[i].status != GNTST_okay)) {
428 pr_err("invalid buffer -- could not remap it\n");
429 map[i].handle = SCSIBACK_INVALID_HANDLE;
434 grant[i] = map[i].handle;
439 static int scsiback_gnttab_data_map_list(struct vscsibk_pend *pending_req,
440 struct scsiif_request_segment *seg, struct page **pg,
441 grant_handle_t *grant, int cnt, u32 flags)
443 int mapcount = 0, i, err = 0;
444 struct gnttab_map_grant_ref map[VSCSI_GRANT_BATCH];
445 struct vscsibk_info *info = pending_req->info;
447 for (i = 0; i < cnt; i++) {
448 if (get_free_page(pg + mapcount)) {
449 put_free_pages(pg, mapcount);
450 pr_err("no grant page\n");
453 gnttab_set_map_op(&map[mapcount], vaddr_page(pg[mapcount]),
454 flags, seg[i].gref, info->domid);
456 if (mapcount < VSCSI_GRANT_BATCH)
458 err = scsiback_gnttab_data_map_batch(map, pg, grant, mapcount);
461 pending_req->n_grants += mapcount;
466 err = scsiback_gnttab_data_map_batch(map, pg, grant, mapcount);
467 pending_req->n_grants += mapcount;
471 static int scsiback_gnttab_data_map(struct vscsiif_request *ring_req,
472 struct vscsibk_pend *pending_req)
475 int i, err, n_segs, i_seg = 0;
477 struct scsiif_request_segment *seg;
478 unsigned long end_seg = 0;
479 unsigned int nr_segments = (unsigned int)ring_req->nr_segments;
480 unsigned int nr_sgl = 0;
481 struct scatterlist *sg;
482 grant_handle_t *grant;
484 pending_req->n_sg = 0;
485 pending_req->n_grants = 0;
486 pending_req->data_len = 0;
488 nr_segments &= ~VSCSIIF_SG_GRANT;
492 if (nr_segments > VSCSIIF_SG_TABLESIZE) {
493 pr_debug("invalid parameter nr_seg = %d\n",
494 ring_req->nr_segments);
498 if (ring_req->nr_segments & VSCSIIF_SG_GRANT) {
499 err = scsiback_gnttab_data_map_list(pending_req, ring_req->seg,
500 pending_req->pages, pending_req->grant_handles,
501 nr_segments, GNTMAP_host_map | GNTMAP_readonly);
504 nr_sgl = nr_segments;
506 for (i = 0; i < nr_sgl; i++) {
507 n_segs = ring_req->seg[i].length /
508 sizeof(struct scsiif_request_segment);
509 if ((unsigned)ring_req->seg[i].offset +
510 (unsigned)ring_req->seg[i].length > PAGE_SIZE ||
511 n_segs * sizeof(struct scsiif_request_segment) !=
512 ring_req->seg[i].length)
514 nr_segments += n_segs;
516 if (nr_segments > SG_ALL) {
517 pr_debug("invalid nr_seg = %d\n", nr_segments);
522 /* free of (sgl) in fast_flush_area() */
523 pending_req->sgl = kmalloc_array(nr_segments,
524 sizeof(struct scatterlist), GFP_KERNEL);
525 if (!pending_req->sgl)
528 sg_init_table(pending_req->sgl, nr_segments);
529 pending_req->n_sg = nr_segments;
531 flags = GNTMAP_host_map;
532 if (pending_req->sc_data_direction == DMA_TO_DEVICE)
533 flags |= GNTMAP_readonly;
535 pg = pending_req->pages + nr_sgl;
536 grant = pending_req->grant_handles + nr_sgl;
539 err = scsiback_gnttab_data_map_list(pending_req, seg,
540 pg, grant, nr_segments, flags);
544 for (i = 0; i < nr_sgl; i++) {
545 seg = (struct scsiif_request_segment *)(
546 vaddr(pending_req, i) + ring_req->seg[i].offset);
547 n_segs = ring_req->seg[i].length /
548 sizeof(struct scsiif_request_segment);
549 err = scsiback_gnttab_data_map_list(pending_req, seg,
550 pg, grant, n_segs, flags);
556 end_seg = vaddr(pending_req, 0) + ring_req->seg[0].offset;
557 seg = (struct scsiif_request_segment *)end_seg;
558 end_seg += ring_req->seg[0].length;
559 pg = pending_req->pages + nr_sgl;
562 for_each_sg(pending_req->sgl, sg, nr_segments, i) {
563 sg_set_page(sg, pg[i], seg->length, seg->offset);
564 pending_req->data_len += seg->length;
566 if (nr_sgl && (unsigned long)seg >= end_seg) {
568 end_seg = vaddr(pending_req, i_seg) +
569 ring_req->seg[i_seg].offset;
570 seg = (struct scsiif_request_segment *)end_seg;
571 end_seg += ring_req->seg[i_seg].length;
573 if (sg->offset >= PAGE_SIZE ||
574 sg->length > PAGE_SIZE ||
575 sg->offset + sg->length > PAGE_SIZE)
582 static void scsiback_disconnect(struct vscsibk_info *info)
584 wait_event(info->waiting_to_free,
585 atomic_read(&info->nr_unreplied_reqs) == 0);
587 unbind_from_irqhandler(info->irq, info);
589 xenbus_unmap_ring_vfree(info->dev, info->ring.sring);
592 static void scsiback_device_action(struct vscsibk_pend *pending_req,
593 enum tcm_tmreq_table act, int tag)
595 struct scsiback_tpg *tpg = pending_req->v2p->tpg;
596 struct scsiback_nexus *nexus = tpg->tpg_nexus;
597 struct se_cmd *se_cmd = &pending_req->se_cmd;
598 u64 unpacked_lun = pending_req->v2p->lun;
599 int rc, err = FAILED;
601 init_completion(&pending_req->tmr_done);
603 rc = target_submit_tmr(&pending_req->se_cmd, nexus->tvn_se_sess,
604 &pending_req->sense_buffer[0],
605 unpacked_lun, NULL, act, GFP_KERNEL,
606 tag, TARGET_SCF_ACK_KREF);
610 wait_for_completion(&pending_req->tmr_done);
612 err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ?
615 scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
616 transport_generic_free_cmd(&pending_req->se_cmd, 0);
620 scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
624 Perform virtual to physical translation
626 static struct v2p_entry *scsiback_do_translation(struct vscsibk_info *info,
629 struct v2p_entry *entry;
630 struct list_head *head = &(info->v2p_entry_lists);
633 spin_lock_irqsave(&info->v2p_lock, flags);
634 list_for_each_entry(entry, head, l) {
635 if ((entry->v.chn == v->chn) &&
636 (entry->v.tgt == v->tgt) &&
637 (entry->v.lun == v->lun)) {
638 kref_get(&entry->kref);
645 spin_unlock_irqrestore(&info->v2p_lock, flags);
649 static struct vscsibk_pend *scsiback_get_pend_req(struct vscsiif_back_ring *ring,
650 struct v2p_entry *v2p)
652 struct scsiback_tpg *tpg = v2p->tpg;
653 struct scsiback_nexus *nexus = tpg->tpg_nexus;
654 struct se_session *se_sess = nexus->tvn_se_sess;
655 struct vscsibk_pend *req;
658 tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
660 pr_err("Unable to obtain tag for vscsiif_request\n");
661 return ERR_PTR(-ENOMEM);
664 req = &((struct vscsibk_pend *)se_sess->sess_cmd_map)[tag];
665 memset(req, 0, sizeof(*req));
666 req->se_cmd.map_tag = tag;
667 req->se_cmd.map_cpu = cpu;
669 for (i = 0; i < VSCSI_MAX_GRANTS; i++)
670 req->grant_handles[i] = SCSIBACK_INVALID_HANDLE;
675 static struct vscsibk_pend *prepare_pending_reqs(struct vscsibk_info *info,
676 struct vscsiif_back_ring *ring,
677 struct vscsiif_request *ring_req)
679 struct vscsibk_pend *pending_req;
680 struct v2p_entry *v2p;
681 struct ids_tuple vir;
683 /* request range check from frontend */
684 if ((ring_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
685 (ring_req->sc_data_direction != DMA_TO_DEVICE) &&
686 (ring_req->sc_data_direction != DMA_FROM_DEVICE) &&
687 (ring_req->sc_data_direction != DMA_NONE)) {
688 pr_debug("invalid parameter data_dir = %d\n",
689 ring_req->sc_data_direction);
690 return ERR_PTR(-EINVAL);
692 if (ring_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
693 pr_debug("invalid parameter cmd_len = %d\n",
695 return ERR_PTR(-EINVAL);
698 vir.chn = ring_req->channel;
699 vir.tgt = ring_req->id;
700 vir.lun = ring_req->lun;
702 v2p = scsiback_do_translation(info, &vir);
704 pr_debug("the v2p of (chn:%d, tgt:%d, lun:%d) doesn't exist.\n",
705 vir.chn, vir.tgt, vir.lun);
706 return ERR_PTR(-ENODEV);
709 pending_req = scsiback_get_pend_req(ring, v2p);
710 if (IS_ERR(pending_req)) {
711 kref_put(&v2p->kref, scsiback_free_translation_entry);
712 return ERR_PTR(-ENOMEM);
714 pending_req->rqid = ring_req->rqid;
715 pending_req->info = info;
716 pending_req->v2p = v2p;
717 pending_req->sc_data_direction = ring_req->sc_data_direction;
718 pending_req->cmd_len = ring_req->cmd_len;
719 memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len);
724 static int scsiback_do_cmd_fn(struct vscsibk_info *info,
725 unsigned int *eoi_flags)
727 struct vscsiif_back_ring *ring = &info->ring;
728 struct vscsiif_request ring_req;
729 struct vscsibk_pend *pending_req;
735 rp = ring->sring->req_prod;
736 rmb(); /* guest system is accessing ring, too */
738 if (RING_REQUEST_PROD_OVERFLOW(ring, rp)) {
739 rc = ring->rsp_prod_pvt;
740 pr_warn("Dom%d provided bogus ring requests (%#x - %#x = %u). Halting ring processing\n",
741 info->domid, rp, rc, rp - rc);
746 *eoi_flags &= ~XEN_EOI_FLAG_SPURIOUS;
748 if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
751 RING_COPY_REQUEST(ring, rc, &ring_req);
752 ring->req_cons = ++rc;
754 pending_req = prepare_pending_reqs(info, ring, &ring_req);
755 if (IS_ERR(pending_req)) {
756 switch (PTR_ERR(pending_req)) {
758 result = DID_NO_CONNECT;
761 result = DRIVER_ERROR;
764 scsiback_send_response(info, NULL, result << 24, 0,
769 switch (ring_req.act) {
770 case VSCSIIF_ACT_SCSI_CDB:
771 if (scsiback_gnttab_data_map(&ring_req, pending_req)) {
772 scsiback_fast_flush_area(pending_req);
773 scsiback_do_resp_with_sense(NULL,
774 DRIVER_ERROR << 24, 0, pending_req);
775 transport_generic_free_cmd(&pending_req->se_cmd, 0);
777 scsiback_cmd_exec(pending_req);
780 case VSCSIIF_ACT_SCSI_ABORT:
781 scsiback_device_action(pending_req, TMR_ABORT_TASK,
784 case VSCSIIF_ACT_SCSI_RESET:
785 scsiback_device_action(pending_req, TMR_LUN_RESET, 0);
788 pr_err_ratelimited("invalid request\n");
789 scsiback_do_resp_with_sense(NULL, DRIVER_ERROR << 24, 0,
791 transport_generic_free_cmd(&pending_req->se_cmd, 0);
795 /* Yield point for this unbounded loop. */
799 RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
803 static irqreturn_t scsiback_irq_fn(int irq, void *dev_id)
805 struct vscsibk_info *info = dev_id;
807 unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
809 while ((rc = scsiback_do_cmd_fn(info, &eoi_flags)) > 0)
812 /* In case of a ring error we keep the event channel masked. */
814 xen_irq_lateeoi(irq, eoi_flags);
819 static int scsiback_init_sring(struct vscsibk_info *info, grant_ref_t ring_ref,
820 evtchn_port_t evtchn)
823 struct vscsiif_sring *sring;
829 err = xenbus_map_ring_valloc(info->dev, &ring_ref, 1, &area);
833 sring = (struct vscsiif_sring *)area;
834 BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);
836 err = bind_interdomain_evtchn_to_irq_lateeoi(info->domid, evtchn);
842 err = request_threaded_irq(info->irq, NULL, scsiback_irq_fn,
843 IRQF_ONESHOT, "vscsiif-backend", info);
850 unbind_from_irqhandler(info->irq, info);
853 xenbus_unmap_ring_vfree(info->dev, area);
858 static int scsiback_map(struct vscsibk_info *info)
860 struct xenbus_device *dev = info->dev;
861 unsigned int ring_ref;
862 evtchn_port_t evtchn;
865 err = xenbus_gather(XBT_NIL, dev->otherend,
866 "ring-ref", "%u", &ring_ref,
867 "event-channel", "%u", &evtchn, NULL);
869 xenbus_dev_fatal(dev, err, "reading %s ring", dev->otherend);
873 return scsiback_init_sring(info, ring_ref, evtchn);
877 Check for a translation entry being present
879 static struct v2p_entry *scsiback_chk_translation_entry(
880 struct vscsibk_info *info, struct ids_tuple *v)
882 struct list_head *head = &(info->v2p_entry_lists);
883 struct v2p_entry *entry;
885 list_for_each_entry(entry, head, l)
886 if ((entry->v.chn == v->chn) &&
887 (entry->v.tgt == v->tgt) &&
888 (entry->v.lun == v->lun))
895 Add a new translation entry
897 static int scsiback_add_translation_entry(struct vscsibk_info *info,
898 char *phy, struct ids_tuple *v)
901 struct v2p_entry *new;
904 unsigned long long unpacked_lun;
905 struct se_lun *se_lun;
906 struct scsiback_tpg *tpg_entry, *tpg = NULL;
907 char *error = "doesn't exist";
909 lunp = strrchr(phy, ':');
911 pr_err("illegal format of physical device %s\n", phy);
916 err = kstrtoull(lunp, 10, &unpacked_lun);
918 pr_err("lun number not valid: %s\n", lunp);
922 mutex_lock(&scsiback_mutex);
923 list_for_each_entry(tpg_entry, &scsiback_list, tv_tpg_list) {
924 if (!strcmp(phy, tpg_entry->tport->tport_name) ||
925 !strcmp(phy, tpg_entry->param_alias)) {
926 mutex_lock(&tpg_entry->se_tpg.tpg_lun_mutex);
927 hlist_for_each_entry(se_lun, &tpg_entry->se_tpg.tpg_lun_hlist, link) {
928 if (se_lun->unpacked_lun == unpacked_lun) {
929 if (!tpg_entry->tpg_nexus)
930 error = "nexus undefined";
936 mutex_unlock(&tpg_entry->se_tpg.tpg_lun_mutex);
941 mutex_lock(&tpg->tv_tpg_mutex);
942 tpg->tv_tpg_fe_count++;
943 mutex_unlock(&tpg->tv_tpg_mutex);
945 mutex_unlock(&scsiback_mutex);
948 pr_err("%s:%llu %s\n", phy, unpacked_lun, error);
952 new = kmalloc(sizeof(struct v2p_entry), GFP_KERNEL);
958 spin_lock_irqsave(&info->v2p_lock, flags);
960 /* Check double assignment to identical virtual ID */
961 if (scsiback_chk_translation_entry(info, v)) {
962 pr_warn("Virtual ID is already used. Assignment was not performed.\n");
967 /* Create a new translation entry and add to the list */
968 kref_init(&new->kref);
971 new->lun = unpacked_lun;
972 list_add_tail(&new->l, &info->v2p_entry_lists);
975 spin_unlock_irqrestore(&info->v2p_lock, flags);
979 mutex_lock(&tpg->tv_tpg_mutex);
980 tpg->tv_tpg_fe_count--;
981 mutex_unlock(&tpg->tv_tpg_mutex);
988 static void __scsiback_del_translation_entry(struct v2p_entry *entry)
991 kref_put(&entry->kref, scsiback_free_translation_entry);
995 Delete the translation entry specified
997 static int scsiback_del_translation_entry(struct vscsibk_info *info,
1000 struct v2p_entry *entry;
1001 unsigned long flags;
1004 spin_lock_irqsave(&info->v2p_lock, flags);
1005 /* Find out the translation entry specified */
1006 entry = scsiback_chk_translation_entry(info, v);
1008 __scsiback_del_translation_entry(entry);
1012 spin_unlock_irqrestore(&info->v2p_lock, flags);
1016 static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
1017 char *phy, struct ids_tuple *vir, int try)
1019 struct v2p_entry *entry;
1020 unsigned long flags;
1024 spin_lock_irqsave(&info->v2p_lock, flags);
1025 entry = scsiback_chk_translation_entry(info, vir);
1026 spin_unlock_irqrestore(&info->v2p_lock, flags);
1030 if (!scsiback_add_translation_entry(info, phy, vir)) {
1031 if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
1032 "%d", XenbusStateInitialised)) {
1033 pr_err("xenbus_printf error %s\n", state);
1034 scsiback_del_translation_entry(info, vir);
1037 err = xenbus_printf(XBT_NIL, info->dev->nodename, state,
1038 "%d", XenbusStateClosed);
1040 xenbus_dev_error(info->dev, err,
1041 "%s: writing %s", __func__, state);
1045 static void scsiback_do_del_lun(struct vscsibk_info *info, const char *state,
1046 struct ids_tuple *vir)
1048 if (!scsiback_del_translation_entry(info, vir)) {
1049 if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
1050 "%d", XenbusStateClosed))
1051 pr_err("xenbus_printf error %s\n", state);
1055 #define VSCSIBACK_OP_ADD_OR_DEL_LUN 1
1056 #define VSCSIBACK_OP_UPDATEDEV_STATE 2
1058 static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
1062 struct ids_tuple vir;
1065 char phy[VSCSI_NAMELEN];
1068 struct xenbus_device *dev = info->dev;
1071 snprintf(state, sizeof(state), "vscsi-devs/%s/state", ent);
1072 err = xenbus_scanf(XBT_NIL, dev->nodename, state, "%u", &device_state);
1073 if (XENBUS_EXIST_ERR(err))
1076 /* physical SCSI device */
1077 snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent);
1078 val = xenbus_read(XBT_NIL, dev->nodename, str, NULL);
1080 err = xenbus_printf(XBT_NIL, dev->nodename, state,
1081 "%d", XenbusStateClosed);
1083 xenbus_dev_error(info->dev, err,
1084 "%s: writing %s", __func__, state);
1087 strlcpy(phy, val, VSCSI_NAMELEN);
1090 /* virtual SCSI device */
1091 snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", ent);
1092 err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u",
1093 &vir.hst, &vir.chn, &vir.tgt, &vir.lun);
1094 if (XENBUS_EXIST_ERR(err)) {
1095 err = xenbus_printf(XBT_NIL, dev->nodename, state,
1096 "%d", XenbusStateClosed);
1098 xenbus_dev_error(info->dev, err,
1099 "%s: writing %s", __func__, state);
1104 case VSCSIBACK_OP_ADD_OR_DEL_LUN:
1105 switch (device_state) {
1106 case XenbusStateInitialising:
1107 scsiback_do_add_lun(info, state, phy, &vir, 0);
1109 case XenbusStateConnected:
1110 scsiback_do_add_lun(info, state, phy, &vir, 1);
1112 case XenbusStateClosing:
1113 scsiback_do_del_lun(info, state, &vir);
1120 case VSCSIBACK_OP_UPDATEDEV_STATE:
1121 if (device_state == XenbusStateInitialised) {
1122 /* modify vscsi-devs/dev-x/state */
1123 if (xenbus_printf(XBT_NIL, dev->nodename, state,
1124 "%d", XenbusStateConnected)) {
1125 pr_err("xenbus_printf error %s\n", str);
1126 scsiback_del_translation_entry(info, &vir);
1127 xenbus_printf(XBT_NIL, dev->nodename, state,
1128 "%d", XenbusStateClosed);
1132 /* When it is necessary, processing is added here. */
1138 static void scsiback_do_lun_hotplug(struct vscsibk_info *info, int op)
1142 unsigned int ndir = 0;
1144 dir = xenbus_directory(XBT_NIL, info->dev->nodename, "vscsi-devs",
1149 for (i = 0; i < ndir; i++)
1150 scsiback_do_1lun_hotplug(info, op, dir[i]);
1155 static void scsiback_frontend_changed(struct xenbus_device *dev,
1156 enum xenbus_state frontend_state)
1158 struct vscsibk_info *info = dev_get_drvdata(&dev->dev);
1160 switch (frontend_state) {
1161 case XenbusStateInitialising:
1164 case XenbusStateInitialised:
1165 if (scsiback_map(info))
1168 scsiback_do_lun_hotplug(info, VSCSIBACK_OP_ADD_OR_DEL_LUN);
1169 xenbus_switch_state(dev, XenbusStateConnected);
1172 case XenbusStateConnected:
1173 scsiback_do_lun_hotplug(info, VSCSIBACK_OP_UPDATEDEV_STATE);
1175 if (dev->state == XenbusStateConnected)
1178 xenbus_switch_state(dev, XenbusStateConnected);
1181 case XenbusStateClosing:
1183 scsiback_disconnect(info);
1185 xenbus_switch_state(dev, XenbusStateClosing);
1188 case XenbusStateClosed:
1189 xenbus_switch_state(dev, XenbusStateClosed);
1190 if (xenbus_dev_is_online(dev))
1192 fallthrough; /* if not online */
1193 case XenbusStateUnknown:
1194 device_unregister(&dev->dev);
1197 case XenbusStateReconfiguring:
1198 scsiback_do_lun_hotplug(info, VSCSIBACK_OP_ADD_OR_DEL_LUN);
1199 xenbus_switch_state(dev, XenbusStateReconfigured);
1204 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
1211 Release the translation entry specfied
1213 static void scsiback_release_translation_entry(struct vscsibk_info *info)
1215 struct v2p_entry *entry, *tmp;
1216 struct list_head *head = &(info->v2p_entry_lists);
1217 unsigned long flags;
1219 spin_lock_irqsave(&info->v2p_lock, flags);
1221 list_for_each_entry_safe(entry, tmp, head, l)
1222 __scsiback_del_translation_entry(entry);
1224 spin_unlock_irqrestore(&info->v2p_lock, flags);
1227 static int scsiback_remove(struct xenbus_device *dev)
1229 struct vscsibk_info *info = dev_get_drvdata(&dev->dev);
1232 scsiback_disconnect(info);
1234 scsiback_release_translation_entry(info);
1236 dev_set_drvdata(&dev->dev, NULL);
1241 static int scsiback_probe(struct xenbus_device *dev,
1242 const struct xenbus_device_id *id)
1246 struct vscsibk_info *info = kzalloc(sizeof(struct vscsibk_info),
1249 pr_debug("%s %p %d\n", __func__, dev, dev->otherend_id);
1252 xenbus_dev_fatal(dev, -ENOMEM, "allocating backend structure");
1256 dev_set_drvdata(&dev->dev, info);
1258 info->domid = dev->otherend_id;
1259 spin_lock_init(&info->ring_lock);
1260 atomic_set(&info->nr_unreplied_reqs, 0);
1261 init_waitqueue_head(&info->waiting_to_free);
1264 INIT_LIST_HEAD(&info->v2p_entry_lists);
1265 spin_lock_init(&info->v2p_lock);
1267 err = xenbus_printf(XBT_NIL, dev->nodename, "feature-sg-grant", "%u",
1270 xenbus_dev_error(dev, err, "writing feature-sg-grant");
1272 err = xenbus_switch_state(dev, XenbusStateInitWait);
1279 pr_warn("%s failed\n", __func__);
1280 scsiback_remove(dev);
1285 static char *scsiback_dump_proto_id(struct scsiback_tport *tport)
1287 switch (tport->tport_proto_id) {
1288 case SCSI_PROTOCOL_SAS:
1290 case SCSI_PROTOCOL_FCP:
1292 case SCSI_PROTOCOL_ISCSI:
1301 static char *scsiback_get_fabric_wwn(struct se_portal_group *se_tpg)
1303 struct scsiback_tpg *tpg = container_of(se_tpg,
1304 struct scsiback_tpg, se_tpg);
1305 struct scsiback_tport *tport = tpg->tport;
1307 return &tport->tport_name[0];
1310 static u16 scsiback_get_tag(struct se_portal_group *se_tpg)
1312 struct scsiback_tpg *tpg = container_of(se_tpg,
1313 struct scsiback_tpg, se_tpg);
1314 return tpg->tport_tpgt;
1317 static struct se_wwn *
1318 scsiback_make_tport(struct target_fabric_configfs *tf,
1319 struct config_group *group,
1322 struct scsiback_tport *tport;
1327 tport = kzalloc(sizeof(struct scsiback_tport), GFP_KERNEL);
1329 return ERR_PTR(-ENOMEM);
1331 tport->tport_wwpn = wwpn;
1333 * Determine the emulated Protocol Identifier and Target Port Name
1334 * based on the incoming configfs directory name.
1336 ptr = strstr(name, "naa.");
1338 tport->tport_proto_id = SCSI_PROTOCOL_SAS;
1341 ptr = strstr(name, "fc.");
1343 tport->tport_proto_id = SCSI_PROTOCOL_FCP;
1344 off = 3; /* Skip over "fc." */
1347 ptr = strstr(name, "iqn.");
1349 tport->tport_proto_id = SCSI_PROTOCOL_ISCSI;
1353 pr_err("Unable to locate prefix for emulated Target Port: %s\n", name);
1355 return ERR_PTR(-EINVAL);
1358 if (strlen(name) >= VSCSI_NAMELEN) {
1359 pr_err("Emulated %s Address: %s, exceeds max: %d\n", name,
1360 scsiback_dump_proto_id(tport), VSCSI_NAMELEN);
1362 return ERR_PTR(-EINVAL);
1364 snprintf(&tport->tport_name[0], VSCSI_NAMELEN, "%s", &name[off]);
1366 pr_debug("Allocated emulated Target %s Address: %s\n",
1367 scsiback_dump_proto_id(tport), name);
1369 return &tport->tport_wwn;
1372 static void scsiback_drop_tport(struct se_wwn *wwn)
1374 struct scsiback_tport *tport = container_of(wwn,
1375 struct scsiback_tport, tport_wwn);
1377 pr_debug("Deallocating emulated Target %s Address: %s\n",
1378 scsiback_dump_proto_id(tport), tport->tport_name);
1383 static u32 scsiback_tpg_get_inst_index(struct se_portal_group *se_tpg)
1388 static int scsiback_check_stop_free(struct se_cmd *se_cmd)
1390 return transport_generic_free_cmd(se_cmd, 0);
1393 static void scsiback_release_cmd(struct se_cmd *se_cmd)
1395 target_free_tag(se_cmd->se_sess, se_cmd);
1398 static u32 scsiback_sess_get_index(struct se_session *se_sess)
1403 static int scsiback_write_pending(struct se_cmd *se_cmd)
1405 /* Go ahead and process the write immediately */
1406 target_execute_cmd(se_cmd);
1411 static void scsiback_set_default_node_attrs(struct se_node_acl *nacl)
1415 static int scsiback_get_cmd_state(struct se_cmd *se_cmd)
1420 static int scsiback_queue_data_in(struct se_cmd *se_cmd)
1422 struct vscsibk_pend *pending_req = container_of(se_cmd,
1423 struct vscsibk_pend, se_cmd);
1425 pending_req->result = SAM_STAT_GOOD;
1426 scsiback_cmd_done(pending_req);
1430 static int scsiback_queue_status(struct se_cmd *se_cmd)
1432 struct vscsibk_pend *pending_req = container_of(se_cmd,
1433 struct vscsibk_pend, se_cmd);
1435 if (se_cmd->sense_buffer &&
1436 ((se_cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) ||
1437 (se_cmd->se_cmd_flags & SCF_EMULATED_TASK_SENSE)))
1438 pending_req->result = (DRIVER_SENSE << 24) |
1439 SAM_STAT_CHECK_CONDITION;
1441 pending_req->result = se_cmd->scsi_status;
1443 scsiback_cmd_done(pending_req);
1447 static void scsiback_queue_tm_rsp(struct se_cmd *se_cmd)
1449 struct vscsibk_pend *pending_req = container_of(se_cmd,
1450 struct vscsibk_pend, se_cmd);
1452 complete(&pending_req->tmr_done);
1455 static void scsiback_aborted_task(struct se_cmd *se_cmd)
1459 static ssize_t scsiback_tpg_param_alias_show(struct config_item *item,
1462 struct se_portal_group *se_tpg = param_to_tpg(item);
1463 struct scsiback_tpg *tpg = container_of(se_tpg, struct scsiback_tpg,
1467 mutex_lock(&tpg->tv_tpg_mutex);
1468 rb = snprintf(page, PAGE_SIZE, "%s\n", tpg->param_alias);
1469 mutex_unlock(&tpg->tv_tpg_mutex);
1474 static ssize_t scsiback_tpg_param_alias_store(struct config_item *item,
1475 const char *page, size_t count)
1477 struct se_portal_group *se_tpg = param_to_tpg(item);
1478 struct scsiback_tpg *tpg = container_of(se_tpg, struct scsiback_tpg,
1482 if (strlen(page) >= VSCSI_NAMELEN) {
1483 pr_err("param alias: %s, exceeds max: %d\n", page,
1488 mutex_lock(&tpg->tv_tpg_mutex);
1489 len = snprintf(tpg->param_alias, VSCSI_NAMELEN, "%s", page);
1490 if (tpg->param_alias[len - 1] == '\n')
1491 tpg->param_alias[len - 1] = '\0';
1492 mutex_unlock(&tpg->tv_tpg_mutex);
1497 CONFIGFS_ATTR(scsiback_tpg_param_, alias);
1499 static struct configfs_attribute *scsiback_param_attrs[] = {
1500 &scsiback_tpg_param_attr_alias,
1504 static int scsiback_alloc_sess_cb(struct se_portal_group *se_tpg,
1505 struct se_session *se_sess, void *p)
1507 struct scsiback_tpg *tpg = container_of(se_tpg,
1508 struct scsiback_tpg, se_tpg);
1514 static int scsiback_make_nexus(struct scsiback_tpg *tpg,
1517 struct scsiback_nexus *tv_nexus;
1520 mutex_lock(&tpg->tv_tpg_mutex);
1521 if (tpg->tpg_nexus) {
1522 pr_debug("tpg->tpg_nexus already exists\n");
1527 tv_nexus = kzalloc(sizeof(struct scsiback_nexus), GFP_KERNEL);
1533 tv_nexus->tvn_se_sess = target_setup_session(&tpg->se_tpg,
1534 VSCSI_DEFAULT_SESSION_TAGS,
1535 sizeof(struct vscsibk_pend),
1536 TARGET_PROT_NORMAL, name,
1537 tv_nexus, scsiback_alloc_sess_cb);
1538 if (IS_ERR(tv_nexus->tvn_se_sess)) {
1545 mutex_unlock(&tpg->tv_tpg_mutex);
1549 static int scsiback_drop_nexus(struct scsiback_tpg *tpg)
1551 struct se_session *se_sess;
1552 struct scsiback_nexus *tv_nexus;
1554 mutex_lock(&tpg->tv_tpg_mutex);
1555 tv_nexus = tpg->tpg_nexus;
1557 mutex_unlock(&tpg->tv_tpg_mutex);
1561 se_sess = tv_nexus->tvn_se_sess;
1563 mutex_unlock(&tpg->tv_tpg_mutex);
1567 if (tpg->tv_tpg_port_count != 0) {
1568 mutex_unlock(&tpg->tv_tpg_mutex);
1569 pr_err("Unable to remove xen-pvscsi I_T Nexus with active TPG port count: %d\n",
1570 tpg->tv_tpg_port_count);
1574 if (tpg->tv_tpg_fe_count != 0) {
1575 mutex_unlock(&tpg->tv_tpg_mutex);
1576 pr_err("Unable to remove xen-pvscsi I_T Nexus with active TPG frontend count: %d\n",
1577 tpg->tv_tpg_fe_count);
1581 pr_debug("Removing I_T Nexus to emulated %s Initiator Port: %s\n",
1582 scsiback_dump_proto_id(tpg->tport),
1583 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1586 * Release the SCSI I_T Nexus to the emulated xen-pvscsi Target Port
1588 target_remove_session(se_sess);
1589 tpg->tpg_nexus = NULL;
1590 mutex_unlock(&tpg->tv_tpg_mutex);
1596 static ssize_t scsiback_tpg_nexus_show(struct config_item *item, char *page)
1598 struct se_portal_group *se_tpg = to_tpg(item);
1599 struct scsiback_tpg *tpg = container_of(se_tpg,
1600 struct scsiback_tpg, se_tpg);
1601 struct scsiback_nexus *tv_nexus;
1604 mutex_lock(&tpg->tv_tpg_mutex);
1605 tv_nexus = tpg->tpg_nexus;
1607 mutex_unlock(&tpg->tv_tpg_mutex);
1610 ret = snprintf(page, PAGE_SIZE, "%s\n",
1611 tv_nexus->tvn_se_sess->se_node_acl->initiatorname);
1612 mutex_unlock(&tpg->tv_tpg_mutex);
1617 static ssize_t scsiback_tpg_nexus_store(struct config_item *item,
1618 const char *page, size_t count)
1620 struct se_portal_group *se_tpg = to_tpg(item);
1621 struct scsiback_tpg *tpg = container_of(se_tpg,
1622 struct scsiback_tpg, se_tpg);
1623 struct scsiback_tport *tport_wwn = tpg->tport;
1624 unsigned char i_port[VSCSI_NAMELEN], *ptr, *port_ptr;
1627 * Shutdown the active I_T nexus if 'NULL' is passed.
1629 if (!strncmp(page, "NULL", 4)) {
1630 ret = scsiback_drop_nexus(tpg);
1631 return (!ret) ? count : ret;
1634 * Otherwise make sure the passed virtual Initiator port WWN matches
1635 * the fabric protocol_id set in scsiback_make_tport(), and call
1636 * scsiback_make_nexus().
1638 if (strlen(page) >= VSCSI_NAMELEN) {
1639 pr_err("Emulated NAA Sas Address: %s, exceeds max: %d\n",
1640 page, VSCSI_NAMELEN);
1643 snprintf(&i_port[0], VSCSI_NAMELEN, "%s", page);
1645 ptr = strstr(i_port, "naa.");
1647 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_SAS) {
1648 pr_err("Passed SAS Initiator Port %s does not match target port protoid: %s\n",
1649 i_port, scsiback_dump_proto_id(tport_wwn));
1652 port_ptr = &i_port[0];
1655 ptr = strstr(i_port, "fc.");
1657 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_FCP) {
1658 pr_err("Passed FCP Initiator Port %s does not match target port protoid: %s\n",
1659 i_port, scsiback_dump_proto_id(tport_wwn));
1662 port_ptr = &i_port[3]; /* Skip over "fc." */
1665 ptr = strstr(i_port, "iqn.");
1667 if (tport_wwn->tport_proto_id != SCSI_PROTOCOL_ISCSI) {
1668 pr_err("Passed iSCSI Initiator Port %s does not match target port protoid: %s\n",
1669 i_port, scsiback_dump_proto_id(tport_wwn));
1672 port_ptr = &i_port[0];
1675 pr_err("Unable to locate prefix for emulated Initiator Port: %s\n",
1679 * Clear any trailing newline for the NAA WWN
1682 if (i_port[strlen(i_port) - 1] == '\n')
1683 i_port[strlen(i_port) - 1] = '\0';
1685 ret = scsiback_make_nexus(tpg, port_ptr);
1692 CONFIGFS_ATTR(scsiback_tpg_, nexus);
1694 static struct configfs_attribute *scsiback_tpg_attrs[] = {
1695 &scsiback_tpg_attr_nexus,
1700 scsiback_wwn_version_show(struct config_item *item, char *page)
1702 return sprintf(page, "xen-pvscsi fabric module %s on %s/%s on "
1704 VSCSI_VERSION, utsname()->sysname, utsname()->machine);
1707 CONFIGFS_ATTR_RO(scsiback_wwn_, version);
1709 static struct configfs_attribute *scsiback_wwn_attrs[] = {
1710 &scsiback_wwn_attr_version,
1714 static int scsiback_port_link(struct se_portal_group *se_tpg,
1717 struct scsiback_tpg *tpg = container_of(se_tpg,
1718 struct scsiback_tpg, se_tpg);
1720 mutex_lock(&tpg->tv_tpg_mutex);
1721 tpg->tv_tpg_port_count++;
1722 mutex_unlock(&tpg->tv_tpg_mutex);
1727 static void scsiback_port_unlink(struct se_portal_group *se_tpg,
1730 struct scsiback_tpg *tpg = container_of(se_tpg,
1731 struct scsiback_tpg, se_tpg);
1733 mutex_lock(&tpg->tv_tpg_mutex);
1734 tpg->tv_tpg_port_count--;
1735 mutex_unlock(&tpg->tv_tpg_mutex);
1738 static struct se_portal_group *
1739 scsiback_make_tpg(struct se_wwn *wwn, const char *name)
1741 struct scsiback_tport *tport = container_of(wwn,
1742 struct scsiback_tport, tport_wwn);
1744 struct scsiback_tpg *tpg;
1748 if (strstr(name, "tpgt_") != name)
1749 return ERR_PTR(-EINVAL);
1750 ret = kstrtou16(name + 5, 10, &tpgt);
1752 return ERR_PTR(ret);
1754 tpg = kzalloc(sizeof(struct scsiback_tpg), GFP_KERNEL);
1756 return ERR_PTR(-ENOMEM);
1758 mutex_init(&tpg->tv_tpg_mutex);
1759 INIT_LIST_HEAD(&tpg->tv_tpg_list);
1760 INIT_LIST_HEAD(&tpg->info_list);
1762 tpg->tport_tpgt = tpgt;
1764 ret = core_tpg_register(wwn, &tpg->se_tpg, tport->tport_proto_id);
1769 mutex_lock(&scsiback_mutex);
1770 list_add_tail(&tpg->tv_tpg_list, &scsiback_list);
1771 mutex_unlock(&scsiback_mutex);
1773 return &tpg->se_tpg;
1776 static void scsiback_drop_tpg(struct se_portal_group *se_tpg)
1778 struct scsiback_tpg *tpg = container_of(se_tpg,
1779 struct scsiback_tpg, se_tpg);
1781 mutex_lock(&scsiback_mutex);
1782 list_del(&tpg->tv_tpg_list);
1783 mutex_unlock(&scsiback_mutex);
1785 * Release the virtual I_T Nexus for this xen-pvscsi TPG
1787 scsiback_drop_nexus(tpg);
1789 * Deregister the se_tpg from TCM.
1791 core_tpg_deregister(se_tpg);
1795 static int scsiback_check_true(struct se_portal_group *se_tpg)
1800 static int scsiback_check_false(struct se_portal_group *se_tpg)
1805 static const struct target_core_fabric_ops scsiback_ops = {
1806 .module = THIS_MODULE,
1807 .fabric_name = "xen-pvscsi",
1808 .tpg_get_wwn = scsiback_get_fabric_wwn,
1809 .tpg_get_tag = scsiback_get_tag,
1810 .tpg_check_demo_mode = scsiback_check_true,
1811 .tpg_check_demo_mode_cache = scsiback_check_true,
1812 .tpg_check_demo_mode_write_protect = scsiback_check_false,
1813 .tpg_check_prod_mode_write_protect = scsiback_check_false,
1814 .tpg_get_inst_index = scsiback_tpg_get_inst_index,
1815 .check_stop_free = scsiback_check_stop_free,
1816 .release_cmd = scsiback_release_cmd,
1817 .sess_get_index = scsiback_sess_get_index,
1818 .sess_get_initiator_sid = NULL,
1819 .write_pending = scsiback_write_pending,
1820 .set_default_node_attributes = scsiback_set_default_node_attrs,
1821 .get_cmd_state = scsiback_get_cmd_state,
1822 .queue_data_in = scsiback_queue_data_in,
1823 .queue_status = scsiback_queue_status,
1824 .queue_tm_rsp = scsiback_queue_tm_rsp,
1825 .aborted_task = scsiback_aborted_task,
1827 * Setup callers for generic logic in target_core_fabric_configfs.c
1829 .fabric_make_wwn = scsiback_make_tport,
1830 .fabric_drop_wwn = scsiback_drop_tport,
1831 .fabric_make_tpg = scsiback_make_tpg,
1832 .fabric_drop_tpg = scsiback_drop_tpg,
1833 .fabric_post_link = scsiback_port_link,
1834 .fabric_pre_unlink = scsiback_port_unlink,
1836 .tfc_wwn_attrs = scsiback_wwn_attrs,
1837 .tfc_tpg_base_attrs = scsiback_tpg_attrs,
1838 .tfc_tpg_param_attrs = scsiback_param_attrs,
1841 static const struct xenbus_device_id scsiback_ids[] = {
1846 static struct xenbus_driver scsiback_driver = {
1847 .ids = scsiback_ids,
1848 .probe = scsiback_probe,
1849 .remove = scsiback_remove,
1850 .otherend_changed = scsiback_frontend_changed
1853 static int __init scsiback_init(void)
1860 pr_debug("xen-pvscsi: fabric module %s on %s/%s on "UTS_RELEASE"\n",
1861 VSCSI_VERSION, utsname()->sysname, utsname()->machine);
1863 ret = xenbus_register_backend(&scsiback_driver);
1867 ret = target_register_template(&scsiback_ops);
1869 goto out_unregister_xenbus;
1873 out_unregister_xenbus:
1874 xenbus_unregister_driver(&scsiback_driver);
1876 pr_err("%s: error %d\n", __func__, ret);
1880 static void __exit scsiback_exit(void)
1884 while (free_pages_num) {
1885 if (get_free_page(&page))
1887 gnttab_free_pages(1, &page);
1889 target_unregister_template(&scsiback_ops);
1890 xenbus_unregister_driver(&scsiback_driver);
1893 module_init(scsiback_init);
1894 module_exit(scsiback_exit);
1896 MODULE_DESCRIPTION("Xen SCSI backend driver");
1897 MODULE_LICENSE("Dual BSD/GPL");
1898 MODULE_ALIAS("xen-backend:vscsi");
1899 MODULE_AUTHOR("Juergen Gross <jgross@suse.com>");