1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3 * Filename: target_core_device.c (based on iscsi_target_device.c)
5 * This file contains the TCM Virtual Device and Disk Transport
6 * agnostic related functions.
8 * (c) Copyright 2003-2013 Datera, Inc.
10 * Nicholas A. Bellinger <nab@kernel.org>
12 ******************************************************************************/
14 #include <linux/net.h>
15 #include <linux/string.h>
16 #include <linux/delay.h>
17 #include <linux/timer.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/kthread.h>
22 #include <linux/export.h>
23 #include <linux/t10-pi.h>
24 #include <asm/unaligned.h>
27 #include <scsi/scsi_common.h>
28 #include <scsi/scsi_proto.h>
30 #include <target/target_core_base.h>
31 #include <target/target_core_backend.h>
32 #include <target/target_core_fabric.h>
34 #include "target_core_internal.h"
35 #include "target_core_alua.h"
36 #include "target_core_pr.h"
37 #include "target_core_ua.h"
39 static DEFINE_MUTEX(device_mutex);
40 static LIST_HEAD(device_list);
41 static DEFINE_IDR(devices_idr);
43 static struct se_hba *lun0_hba;
44 /* not static, needed by tpg.c */
45 struct se_device *g_lun0_dev;
48 transport_lookup_cmd_lun(struct se_cmd *se_cmd)
50 struct se_lun *se_lun = NULL;
51 struct se_session *se_sess = se_cmd->se_sess;
52 struct se_node_acl *nacl = se_sess->se_node_acl;
53 struct se_dev_entry *deve;
54 sense_reason_t ret = TCM_NO_SENSE;
57 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
59 atomic_long_inc(&deve->total_cmds);
61 if (se_cmd->data_direction == DMA_TO_DEVICE)
62 atomic_long_add(se_cmd->data_length,
64 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
65 atomic_long_add(se_cmd->data_length,
68 if ((se_cmd->data_direction == DMA_TO_DEVICE) &&
69 deve->lun_access_ro) {
70 pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
71 " Access for 0x%08llx\n",
72 se_cmd->se_tfo->fabric_name,
75 return TCM_WRITE_PROTECTED;
78 se_lun = deve->se_lun;
80 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
85 se_cmd->se_lun = se_lun;
86 se_cmd->pr_res_key = deve->pr_res_key;
87 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
88 se_cmd->lun_ref_active = true;
95 * Use the se_portal_group->tpg_virt_lun0 to allow for
96 * REPORT_LUNS, et al to be returned when no active
97 * MappedLUN=0 exists for this Initiator Port.
99 if (se_cmd->orig_fe_lun != 0) {
100 pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
101 " Access for 0x%08llx from %s\n",
102 se_cmd->se_tfo->fabric_name,
104 nacl->initiatorname);
105 return TCM_NON_EXISTENT_LUN;
109 * Force WRITE PROTECT for virtual LUN 0
111 if ((se_cmd->data_direction != DMA_FROM_DEVICE) &&
112 (se_cmd->data_direction != DMA_NONE))
113 return TCM_WRITE_PROTECTED;
115 se_lun = se_sess->se_tpg->tpg_virt_lun0;
116 if (!percpu_ref_tryget_live(&se_lun->lun_ref))
117 return TCM_NON_EXISTENT_LUN;
119 se_cmd->se_lun = se_sess->se_tpg->tpg_virt_lun0;
120 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
121 se_cmd->lun_ref_active = true;
124 * RCU reference protected by percpu se_lun->lun_ref taken above that
125 * must drop to zero (including initial reference) before this se_lun
126 * pointer can be kfree_rcu() by the final se_lun->lun_group put via
127 * target_core_fabric_configfs.c:target_fabric_port_release
129 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
130 atomic_long_inc(&se_cmd->se_dev->num_cmds);
132 if (se_cmd->data_direction == DMA_TO_DEVICE)
133 atomic_long_add(se_cmd->data_length,
134 &se_cmd->se_dev->write_bytes);
135 else if (se_cmd->data_direction == DMA_FROM_DEVICE)
136 atomic_long_add(se_cmd->data_length,
137 &se_cmd->se_dev->read_bytes);
141 EXPORT_SYMBOL(transport_lookup_cmd_lun);
143 int transport_lookup_tmr_lun(struct se_cmd *se_cmd)
145 struct se_dev_entry *deve;
146 struct se_lun *se_lun = NULL;
147 struct se_session *se_sess = se_cmd->se_sess;
148 struct se_node_acl *nacl = se_sess->se_node_acl;
149 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
152 deve = target_nacl_find_deve(nacl, se_cmd->orig_fe_lun);
154 se_lun = deve->se_lun;
156 if (!percpu_ref_tryget_live(&se_lun->lun_ref)) {
161 se_cmd->se_lun = se_lun;
162 se_cmd->pr_res_key = deve->pr_res_key;
163 se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD;
164 se_cmd->lun_ref_active = true;
170 pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
171 " Access for 0x%08llx for %s\n",
172 se_cmd->se_tfo->fabric_name,
174 nacl->initiatorname);
177 se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev);
178 se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev);
182 EXPORT_SYMBOL(transport_lookup_tmr_lun);
184 bool target_lun_is_rdonly(struct se_cmd *cmd)
186 struct se_session *se_sess = cmd->se_sess;
187 struct se_dev_entry *deve;
191 deve = target_nacl_find_deve(se_sess->se_node_acl, cmd->orig_fe_lun);
192 ret = deve && deve->lun_access_ro;
197 EXPORT_SYMBOL(target_lun_is_rdonly);
200 * This function is called from core_scsi3_emulate_pro_register_and_move()
201 * and core_scsi3_decode_spec_i_port(), and will increment &deve->pr_kref
202 * when a matching rtpi is found.
204 struct se_dev_entry *core_get_se_deve_from_rtpi(
205 struct se_node_acl *nacl,
208 struct se_dev_entry *deve;
210 struct se_portal_group *tpg = nacl->se_tpg;
213 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
216 pr_err("%s device entries device pointer is"
217 " NULL, but Initiator has access.\n",
218 tpg->se_tpg_tfo->fabric_name);
221 if (lun->lun_tpg->tpg_rtpi != rtpi)
224 kref_get(&deve->pr_kref);
234 void core_free_device_list_for_node(
235 struct se_node_acl *nacl,
236 struct se_portal_group *tpg)
238 struct se_dev_entry *deve;
240 mutex_lock(&nacl->lun_entry_mutex);
241 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
242 core_disable_device_list_for_node(deve->se_lun, deve, nacl, tpg);
243 mutex_unlock(&nacl->lun_entry_mutex);
246 void core_update_device_list_access(
249 struct se_node_acl *nacl)
251 struct se_dev_entry *deve;
253 mutex_lock(&nacl->lun_entry_mutex);
254 deve = target_nacl_find_deve(nacl, mapped_lun);
256 deve->lun_access_ro = lun_access_ro;
257 mutex_unlock(&nacl->lun_entry_mutex);
261 * Called with rcu_read_lock or nacl->device_list_lock held.
263 struct se_dev_entry *target_nacl_find_deve(struct se_node_acl *nacl, u64 mapped_lun)
265 struct se_dev_entry *deve;
267 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link)
268 if (deve->mapped_lun == mapped_lun)
273 EXPORT_SYMBOL(target_nacl_find_deve);
275 void target_pr_kref_release(struct kref *kref)
277 struct se_dev_entry *deve = container_of(kref, struct se_dev_entry,
279 complete(&deve->pr_comp);
283 * Establish UA condition on SCSI device - all LUNs
285 void target_dev_ua_allocate(struct se_device *dev, u8 asc, u8 ascq)
287 struct se_dev_entry *se_deve;
290 spin_lock(&dev->se_port_lock);
291 list_for_each_entry(lun, &dev->dev_sep_list, lun_dev_link) {
293 spin_lock(&lun->lun_deve_lock);
294 list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link)
295 core_scsi3_ua_allocate(se_deve, asc, ascq);
296 spin_unlock(&lun->lun_deve_lock);
298 spin_unlock(&dev->se_port_lock);
302 target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new,
305 struct se_dev_entry *tmp;
308 hlist_for_each_entry_rcu(tmp, &nacl->lun_entry_hlist, link) {
309 if (skip_new && tmp == new)
311 core_scsi3_ua_allocate(tmp, 0x3F,
312 ASCQ_3FH_REPORTED_LUNS_DATA_HAS_CHANGED);
317 int core_enable_device_list_for_node(
319 struct se_lun_acl *lun_acl,
322 struct se_node_acl *nacl,
323 struct se_portal_group *tpg)
325 struct se_dev_entry *orig, *new;
327 new = kzalloc(sizeof(*new), GFP_KERNEL);
329 pr_err("Unable to allocate se_dev_entry memory\n");
333 spin_lock_init(&new->ua_lock);
334 INIT_LIST_HEAD(&new->ua_list);
335 INIT_LIST_HEAD(&new->lun_link);
337 new->mapped_lun = mapped_lun;
338 kref_init(&new->pr_kref);
339 init_completion(&new->pr_comp);
341 new->lun_access_ro = lun_access_ro;
342 new->creation_time = get_jiffies_64();
345 mutex_lock(&nacl->lun_entry_mutex);
346 orig = target_nacl_find_deve(nacl, mapped_lun);
347 if (orig && orig->se_lun) {
348 struct se_lun *orig_lun = orig->se_lun;
350 if (orig_lun != lun) {
351 pr_err("Existing orig->se_lun doesn't match new lun"
352 " for dynamic -> explicit NodeACL conversion:"
353 " %s\n", nacl->initiatorname);
354 mutex_unlock(&nacl->lun_entry_mutex);
358 if (orig->se_lun_acl != NULL) {
359 pr_warn_ratelimited("Detected existing explicit"
360 " se_lun_acl->se_lun_group reference for %s"
361 " mapped_lun: %llu, failing\n",
362 nacl->initiatorname, mapped_lun);
363 mutex_unlock(&nacl->lun_entry_mutex);
369 new->se_lun_acl = lun_acl;
370 hlist_del_rcu(&orig->link);
371 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
372 mutex_unlock(&nacl->lun_entry_mutex);
374 spin_lock(&lun->lun_deve_lock);
375 list_del(&orig->lun_link);
376 list_add_tail(&new->lun_link, &lun->lun_deve_list);
377 spin_unlock(&lun->lun_deve_lock);
379 kref_put(&orig->pr_kref, target_pr_kref_release);
380 wait_for_completion(&orig->pr_comp);
382 target_luns_data_has_changed(nacl, new, true);
383 kfree_rcu(orig, rcu_head);
388 new->se_lun_acl = lun_acl;
389 hlist_add_head_rcu(&new->link, &nacl->lun_entry_hlist);
390 mutex_unlock(&nacl->lun_entry_mutex);
392 spin_lock(&lun->lun_deve_lock);
393 list_add_tail(&new->lun_link, &lun->lun_deve_list);
394 spin_unlock(&lun->lun_deve_lock);
396 target_luns_data_has_changed(nacl, new, true);
400 void core_disable_device_list_for_node(
402 struct se_dev_entry *orig,
403 struct se_node_acl *nacl,
404 struct se_portal_group *tpg)
407 * rcu_dereference_raw protected by se_lun->lun_group symlink
408 * reference to se_device->dev_group.
410 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
412 lockdep_assert_held(&nacl->lun_entry_mutex);
415 * If the MappedLUN entry is being disabled, the entry in
416 * lun->lun_deve_list must be removed now before clearing the
417 * struct se_dev_entry pointers below as logic in
418 * core_alua_do_transition_tg_pt() depends on these being present.
420 * deve->se_lun_acl will be NULL for demo-mode created LUNs
421 * that have not been explicitly converted to MappedLUNs ->
422 * struct se_lun_acl, but we remove deve->lun_link from
423 * lun->lun_deve_list. This also means that active UAs and
424 * NodeACL context specific PR metadata for demo-mode
425 * MappedLUN *deve will be released below..
427 spin_lock(&lun->lun_deve_lock);
428 list_del(&orig->lun_link);
429 spin_unlock(&lun->lun_deve_lock);
431 * Disable struct se_dev_entry LUN ACL mapping
433 core_scsi3_ua_release_all(orig);
435 hlist_del_rcu(&orig->link);
436 clear_bit(DEF_PR_REG_ACTIVE, &orig->deve_flags);
437 orig->lun_access_ro = false;
438 orig->creation_time = 0;
439 orig->attach_count--;
441 * Before firing off RCU callback, wait for any in process SPEC_I_PT=1
442 * or REGISTER_AND_MOVE PR operation to complete.
444 kref_put(&orig->pr_kref, target_pr_kref_release);
445 wait_for_completion(&orig->pr_comp);
447 kfree_rcu(orig, rcu_head);
449 core_scsi3_free_pr_reg_from_nacl(dev, nacl);
450 target_luns_data_has_changed(nacl, NULL, false);
453 /* core_clear_lun_from_tpg():
457 void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
459 struct se_node_acl *nacl;
460 struct se_dev_entry *deve;
462 mutex_lock(&tpg->acl_node_mutex);
463 list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
465 mutex_lock(&nacl->lun_entry_mutex);
466 hlist_for_each_entry_rcu(deve, &nacl->lun_entry_hlist, link) {
467 if (lun != deve->se_lun)
470 core_disable_device_list_for_node(lun, deve, nacl, tpg);
472 mutex_unlock(&nacl->lun_entry_mutex);
474 mutex_unlock(&tpg->acl_node_mutex);
477 static void se_release_vpd_for_dev(struct se_device *dev)
479 struct t10_vpd *vpd, *vpd_tmp;
481 spin_lock(&dev->t10_wwn.t10_vpd_lock);
482 list_for_each_entry_safe(vpd, vpd_tmp,
483 &dev->t10_wwn.t10_vpd_list, vpd_list) {
484 list_del(&vpd->vpd_list);
487 spin_unlock(&dev->t10_wwn.t10_vpd_lock);
490 static u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
492 u32 aligned_max_sectors;
495 * Limit max_sectors to a PAGE_SIZE aligned value for modern
496 * transport_allocate_data_tasks() operation.
498 alignment = max(1ul, PAGE_SIZE / block_size);
499 aligned_max_sectors = rounddown(max_sectors, alignment);
501 if (max_sectors != aligned_max_sectors)
502 pr_info("Rounding down aligned max_sectors from %u to %u\n",
503 max_sectors, aligned_max_sectors);
505 return aligned_max_sectors;
508 int core_dev_add_lun(
509 struct se_portal_group *tpg,
510 struct se_device *dev,
515 rc = core_tpg_add_lun(tpg, lun, false, dev);
519 pr_debug("%s_TPG[%u]_LUN[%llu] - Activated %s Logical Unit from"
520 " CORE HBA: %u\n", tpg->se_tpg_tfo->fabric_name,
521 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
522 tpg->se_tpg_tfo->fabric_name, dev->se_hba->hba_id);
524 * Update LUN maps for dynamically added initiators when
525 * generate_node_acl is enabled.
527 if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
528 struct se_node_acl *acl;
530 mutex_lock(&tpg->acl_node_mutex);
531 list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
532 if (acl->dynamic_node_acl &&
533 (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
534 !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
535 core_tpg_add_node_to_devs(acl, tpg, lun);
538 mutex_unlock(&tpg->acl_node_mutex);
544 /* core_dev_del_lun():
548 void core_dev_del_lun(
549 struct se_portal_group *tpg,
552 pr_debug("%s_TPG[%u]_LUN[%llu] - Deactivating %s Logical Unit from"
553 " device object\n", tpg->se_tpg_tfo->fabric_name,
554 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
555 tpg->se_tpg_tfo->fabric_name);
557 core_tpg_remove_lun(tpg, lun);
560 struct se_lun_acl *core_dev_init_initiator_node_lun_acl(
561 struct se_portal_group *tpg,
562 struct se_node_acl *nacl,
566 struct se_lun_acl *lacl;
568 if (strlen(nacl->initiatorname) >= TRANSPORT_IQN_LEN) {
569 pr_err("%s InitiatorName exceeds maximum size.\n",
570 tpg->se_tpg_tfo->fabric_name);
574 lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
576 pr_err("Unable to allocate memory for struct se_lun_acl.\n");
581 lacl->mapped_lun = mapped_lun;
582 lacl->se_lun_nacl = nacl;
587 int core_dev_add_initiator_node_lun_acl(
588 struct se_portal_group *tpg,
589 struct se_lun_acl *lacl,
593 struct se_node_acl *nacl = lacl->se_lun_nacl;
595 * rcu_dereference_raw protected by se_lun->lun_group symlink
596 * reference to se_device->dev_group.
598 struct se_device *dev = rcu_dereference_raw(lun->lun_se_dev);
603 if (lun->lun_access_ro)
604 lun_access_ro = true;
608 if (core_enable_device_list_for_node(lun, lacl, lacl->mapped_lun,
609 lun_access_ro, nacl, tpg) < 0)
612 pr_debug("%s_TPG[%hu]_LUN[%llu->%llu] - Added %s ACL for "
613 " InitiatorNode: %s\n", tpg->se_tpg_tfo->fabric_name,
614 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun, lacl->mapped_lun,
615 lun_access_ro ? "RO" : "RW",
616 nacl->initiatorname);
618 * Check to see if there are any existing persistent reservation APTPL
619 * pre-registrations that need to be enabled for this LUN ACL..
621 core_scsi3_check_aptpl_registration(dev, tpg, lun, nacl,
626 int core_dev_del_initiator_node_lun_acl(
628 struct se_lun_acl *lacl)
630 struct se_portal_group *tpg = lun->lun_tpg;
631 struct se_node_acl *nacl;
632 struct se_dev_entry *deve;
634 nacl = lacl->se_lun_nacl;
638 mutex_lock(&nacl->lun_entry_mutex);
639 deve = target_nacl_find_deve(nacl, lacl->mapped_lun);
641 core_disable_device_list_for_node(lun, deve, nacl, tpg);
642 mutex_unlock(&nacl->lun_entry_mutex);
644 pr_debug("%s_TPG[%hu]_LUN[%llu] - Removed ACL for"
645 " InitiatorNode: %s Mapped LUN: %llu\n",
646 tpg->se_tpg_tfo->fabric_name,
647 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
648 nacl->initiatorname, lacl->mapped_lun);
653 void core_dev_free_initiator_node_lun_acl(
654 struct se_portal_group *tpg,
655 struct se_lun_acl *lacl)
657 pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
658 " Mapped LUN: %llu\n", tpg->se_tpg_tfo->fabric_name,
659 tpg->se_tpg_tfo->tpg_get_tag(tpg),
660 tpg->se_tpg_tfo->fabric_name,
661 lacl->se_lun_nacl->initiatorname, lacl->mapped_lun);
666 static void scsi_dump_inquiry(struct se_device *dev)
668 struct t10_wwn *wwn = &dev->t10_wwn;
669 int device_type = dev->transport->get_device_type(dev);
672 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
674 pr_debug(" Vendor: %-" __stringify(INQUIRY_VENDOR_LEN) "s\n",
676 pr_debug(" Model: %-" __stringify(INQUIRY_MODEL_LEN) "s\n",
678 pr_debug(" Revision: %-" __stringify(INQUIRY_REVISION_LEN) "s\n",
680 pr_debug(" Type: %s ", scsi_device_type(device_type));
683 struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
685 struct se_device *dev;
686 struct se_lun *xcopy_lun;
689 dev = hba->backend->ops->alloc_device(hba, name);
693 dev->queues = kcalloc(nr_cpu_ids, sizeof(*dev->queues), GFP_KERNEL);
695 dev->transport->free_device(dev);
699 dev->queue_cnt = nr_cpu_ids;
700 for (i = 0; i < dev->queue_cnt; i++) {
701 struct se_device_queue *q;
704 INIT_LIST_HEAD(&q->state_list);
705 spin_lock_init(&q->lock);
707 init_llist_head(&q->sq.cmd_list);
708 INIT_WORK(&q->sq.work, target_queued_submit_work);
712 dev->transport = hba->backend->ops;
713 dev->transport_flags = dev->transport->transport_flags_default;
714 dev->prot_length = sizeof(struct t10_pi_tuple);
715 dev->hba_index = hba->hba_index;
717 INIT_LIST_HEAD(&dev->dev_sep_list);
718 INIT_LIST_HEAD(&dev->dev_tmr_list);
719 INIT_LIST_HEAD(&dev->delayed_cmd_list);
720 INIT_LIST_HEAD(&dev->qf_cmd_list);
721 spin_lock_init(&dev->delayed_cmd_lock);
722 spin_lock_init(&dev->dev_reservation_lock);
723 spin_lock_init(&dev->se_port_lock);
724 spin_lock_init(&dev->se_tmr_lock);
725 spin_lock_init(&dev->qf_cmd_lock);
726 sema_init(&dev->caw_sem, 1);
727 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
728 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
729 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
730 INIT_LIST_HEAD(&dev->t10_pr.aptpl_reg_list);
731 spin_lock_init(&dev->t10_pr.registration_lock);
732 spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
733 INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
734 spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
735 INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
736 spin_lock_init(&dev->t10_alua.lba_map_lock);
738 INIT_WORK(&dev->delayed_cmd_work, target_do_delayed_work);
739 mutex_init(&dev->lun_reset_mutex);
741 dev->t10_wwn.t10_dev = dev;
743 * Use OpenFabrics IEEE Company ID: 00 14 05
745 dev->t10_wwn.company_id = 0x001405;
747 dev->t10_alua.t10_dev = dev;
749 dev->dev_attrib.da_dev = dev;
750 dev->dev_attrib.emulate_model_alias = DA_EMULATE_MODEL_ALIAS;
751 dev->dev_attrib.emulate_dpo = 1;
752 dev->dev_attrib.emulate_fua_write = 1;
753 dev->dev_attrib.emulate_fua_read = 1;
754 dev->dev_attrib.emulate_write_cache = DA_EMULATE_WRITE_CACHE;
755 dev->dev_attrib.emulate_ua_intlck_ctrl = TARGET_UA_INTLCK_CTRL_CLEAR;
756 dev->dev_attrib.emulate_tas = DA_EMULATE_TAS;
757 dev->dev_attrib.emulate_tpu = DA_EMULATE_TPU;
758 dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
759 dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
760 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
761 dev->dev_attrib.emulate_pr = DA_EMULATE_PR;
762 dev->dev_attrib.emulate_rsoc = DA_EMULATE_RSOC;
763 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
764 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
765 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
766 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
767 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
768 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
769 dev->dev_attrib.max_unmap_block_desc_count =
770 DA_MAX_UNMAP_BLOCK_DESC_COUNT;
771 dev->dev_attrib.unmap_granularity = DA_UNMAP_GRANULARITY_DEFAULT;
772 dev->dev_attrib.unmap_granularity_alignment =
773 DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
774 dev->dev_attrib.unmap_zeroes_data =
775 DA_UNMAP_ZEROES_DATA_DEFAULT;
776 dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
777 dev->dev_attrib.submit_type = TARGET_FABRIC_DEFAULT_SUBMIT;
779 xcopy_lun = &dev->xcopy_lun;
780 rcu_assign_pointer(xcopy_lun->lun_se_dev, dev);
781 init_completion(&xcopy_lun->lun_shutdown_comp);
782 INIT_LIST_HEAD(&xcopy_lun->lun_deve_list);
783 INIT_LIST_HEAD(&xcopy_lun->lun_dev_link);
784 mutex_init(&xcopy_lun->lun_tg_pt_md_mutex);
785 xcopy_lun->lun_tpg = &xcopy_pt_tpg;
787 /* Preload the default INQUIRY const values */
788 strscpy(dev->t10_wwn.vendor, "LIO-ORG", sizeof(dev->t10_wwn.vendor));
789 strscpy(dev->t10_wwn.model, dev->transport->inquiry_prod,
790 sizeof(dev->t10_wwn.model));
791 strscpy(dev->t10_wwn.revision, dev->transport->inquiry_rev,
792 sizeof(dev->t10_wwn.revision));
798 * Check if the underlying struct block_device supports discard and if yes
799 * configure the UNMAP parameters.
801 bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
802 struct block_device *bdev)
804 int block_size = bdev_logical_block_size(bdev);
806 if (!bdev_max_discard_sectors(bdev))
809 attrib->max_unmap_lba_count =
810 bdev_max_discard_sectors(bdev) >> (ilog2(block_size) - 9);
812 * Currently hardcoded to 1 in Linux/SCSI code..
814 attrib->max_unmap_block_desc_count = 1;
815 attrib->unmap_granularity = bdev_discard_granularity(bdev) / block_size;
816 attrib->unmap_granularity_alignment =
817 bdev_discard_alignment(bdev) / block_size;
820 EXPORT_SYMBOL(target_configure_unmap_from_queue);
823 * Convert from blocksize advertised to the initiator to the 512 byte
824 * units unconditionally used by the Linux block layer.
826 sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
828 switch (dev->dev_attrib.block_size) {
839 EXPORT_SYMBOL(target_to_linux_sector);
841 struct devices_idr_iter {
842 int (*fn)(struct se_device *dev, void *data);
846 static int target_devices_idr_iter(int id, void *p, void *data)
847 __must_hold(&device_mutex)
849 struct devices_idr_iter *iter = data;
850 struct se_device *dev = p;
851 struct config_item *item;
855 * We add the device early to the idr, so it can be used
856 * by backend modules during configuration. We do not want
857 * to allow other callers to access partially setup devices,
858 * so we skip them here.
860 if (!target_dev_configured(dev))
863 item = config_item_get_unless_zero(&dev->dev_group.cg_item);
866 mutex_unlock(&device_mutex);
868 ret = iter->fn(dev, iter->data);
869 config_item_put(item);
871 mutex_lock(&device_mutex);
876 * target_for_each_device - iterate over configured devices
877 * @fn: iterator function
878 * @data: pointer to data that will be passed to fn
880 * fn must return 0 to continue looping over devices. non-zero will break
881 * from the loop and return that value to the caller.
883 int target_for_each_device(int (*fn)(struct se_device *dev, void *data),
886 struct devices_idr_iter iter = { .fn = fn, .data = data };
889 mutex_lock(&device_mutex);
890 ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter);
891 mutex_unlock(&device_mutex);
895 int target_configure_device(struct se_device *dev)
897 struct se_hba *hba = dev->se_hba;
900 if (target_dev_configured(dev)) {
901 pr_err("se_dev->se_dev_ptr already set for storage"
907 * Add early so modules like tcmu can use during its
910 mutex_lock(&device_mutex);
912 * Use cyclic to try and avoid collisions with devices
913 * that were recently removed.
915 id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL);
916 mutex_unlock(&device_mutex);
923 ret = dev->transport->configure_device(dev);
927 if (dev->transport->configure_unmap &&
928 dev->transport->configure_unmap(dev)) {
929 pr_debug("Discard support available, but disabled by default.\n");
933 * XXX: there is not much point to have two different values here..
935 dev->dev_attrib.block_size = dev->dev_attrib.hw_block_size;
936 dev->dev_attrib.queue_depth = dev->dev_attrib.hw_queue_depth;
939 * Align max_hw_sectors down to PAGE_SIZE I/O transfers
941 dev->dev_attrib.hw_max_sectors =
942 se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
943 dev->dev_attrib.hw_block_size);
944 dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
946 dev->creation_time = get_jiffies_64();
948 ret = core_setup_alua(dev);
950 goto out_destroy_device;
953 * Setup work_queue for QUEUE_FULL
955 INIT_WORK(&dev->qf_work_queue, target_qf_do_work);
957 scsi_dump_inquiry(dev);
959 spin_lock(&hba->device_lock);
961 spin_unlock(&hba->device_lock);
963 dev->dev_flags |= DF_CONFIGURED;
968 dev->transport->destroy_device(dev);
970 mutex_lock(&device_mutex);
971 idr_remove(&devices_idr, dev->dev_index);
972 mutex_unlock(&device_mutex);
974 se_release_vpd_for_dev(dev);
978 void target_free_device(struct se_device *dev)
980 struct se_hba *hba = dev->se_hba;
982 WARN_ON(!list_empty(&dev->dev_sep_list));
984 if (target_dev_configured(dev)) {
985 dev->transport->destroy_device(dev);
987 mutex_lock(&device_mutex);
988 idr_remove(&devices_idr, dev->dev_index);
989 mutex_unlock(&device_mutex);
991 spin_lock(&hba->device_lock);
993 spin_unlock(&hba->device_lock);
996 core_alua_free_lu_gp_mem(dev);
997 core_alua_set_lba_map(dev, NULL, 0, 0);
998 core_scsi3_free_all_registrations(dev);
999 se_release_vpd_for_dev(dev);
1001 if (dev->transport->free_prot)
1002 dev->transport->free_prot(dev);
1005 dev->transport->free_device(dev);
1008 int core_dev_setup_virtual_lun0(void)
1011 struct se_device *dev;
1012 char buf[] = "rd_pages=8,rd_nullio=1,rd_dummy=1";
1015 hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
1017 return PTR_ERR(hba);
1019 dev = target_alloc_device(hba, "virt_lun0");
1025 hba->backend->ops->set_configfs_dev_params(dev, buf, sizeof(buf));
1027 ret = target_configure_device(dev);
1029 goto out_free_se_dev;
1036 target_free_device(dev);
1038 core_delete_hba(hba);
1043 void core_dev_release_virtual_lun0(void)
1045 struct se_hba *hba = lun0_hba;
1051 target_free_device(g_lun0_dev);
1052 core_delete_hba(hba);
1056 * Common CDB parsing for kernel and user passthrough.
1059 passthrough_parse_cdb(struct se_cmd *cmd,
1060 sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
1062 unsigned char *cdb = cmd->t_task_cdb;
1063 struct se_device *dev = cmd->se_dev;
1067 * For REPORT LUNS we always need to emulate the response, for everything
1070 if (cdb[0] == REPORT_LUNS) {
1071 cmd->execute_cmd = spc_emulate_report_luns;
1072 return TCM_NO_SENSE;
1076 * With emulate_pr disabled, all reservation requests should fail,
1077 * regardless of whether or not TRANSPORT_FLAG_PASSTHROUGH_PGR is set.
1079 if (!dev->dev_attrib.emulate_pr &&
1080 ((cdb[0] == PERSISTENT_RESERVE_IN) ||
1081 (cdb[0] == PERSISTENT_RESERVE_OUT) ||
1082 (cdb[0] == RELEASE || cdb[0] == RELEASE_10) ||
1083 (cdb[0] == RESERVE || cdb[0] == RESERVE_10))) {
1084 return TCM_UNSUPPORTED_SCSI_OPCODE;
1088 * For PERSISTENT RESERVE IN/OUT, RELEASE, and RESERVE we need to
1089 * emulate the response, since tcmu does not have the information
1090 * required to process these commands.
1092 if (!(dev->transport_flags &
1093 TRANSPORT_FLAG_PASSTHROUGH_PGR)) {
1094 if (cdb[0] == PERSISTENT_RESERVE_IN) {
1095 cmd->execute_cmd = target_scsi3_emulate_pr_in;
1096 size = get_unaligned_be16(&cdb[7]);
1097 return target_cmd_size_check(cmd, size);
1099 if (cdb[0] == PERSISTENT_RESERVE_OUT) {
1100 cmd->execute_cmd = target_scsi3_emulate_pr_out;
1101 size = get_unaligned_be32(&cdb[5]);
1102 return target_cmd_size_check(cmd, size);
1105 if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) {
1106 cmd->execute_cmd = target_scsi2_reservation_release;
1107 if (cdb[0] == RELEASE_10)
1108 size = get_unaligned_be16(&cdb[7]);
1110 size = cmd->data_length;
1111 return target_cmd_size_check(cmd, size);
1113 if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) {
1114 cmd->execute_cmd = target_scsi2_reservation_reserve;
1115 if (cdb[0] == RESERVE_10)
1116 size = get_unaligned_be16(&cdb[7]);
1118 size = cmd->data_length;
1119 return target_cmd_size_check(cmd, size);
1123 /* Set DATA_CDB flag for ops that should have it */
1134 case WRITE_VERIFY_12:
1135 case WRITE_VERIFY_16:
1136 case COMPARE_AND_WRITE:
1137 case XDWRITEREAD_10:
1138 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1140 case VARIABLE_LENGTH_CMD:
1141 switch (get_unaligned_be16(&cdb[8])) {
1144 case WRITE_VERIFY_32:
1145 case XDWRITEREAD_32:
1146 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1151 cmd->execute_cmd = exec_cmd;
1153 return TCM_NO_SENSE;
1155 EXPORT_SYMBOL(passthrough_parse_cdb);