1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3 * Filename: target_core_transport.c
5 * This file contains the Generic Target Engine Core.
7 * (c) Copyright 2002-2013 Datera, Inc.
9 * Nicholas A. Bellinger <nab@kernel.org>
11 ******************************************************************************/
13 #include <linux/net.h>
14 #include <linux/delay.h>
15 #include <linux/string.h>
16 #include <linux/timer.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/kthread.h>
21 #include <linux/cdrom.h>
22 #include <linux/module.h>
23 #include <linux/ratelimit.h>
24 #include <linux/vmalloc.h>
25 #include <asm/unaligned.h>
28 #include <scsi/scsi_proto.h>
29 #include <scsi/scsi_common.h>
31 #include <target/target_core_base.h>
32 #include <target/target_core_backend.h>
33 #include <target/target_core_fabric.h>
35 #include "target_core_internal.h"
36 #include "target_core_alua.h"
37 #include "target_core_pr.h"
38 #include "target_core_ua.h"
40 #define CREATE_TRACE_POINTS
41 #include <trace/events/target.h>
43 static struct workqueue_struct *target_completion_wq;
44 static struct kmem_cache *se_sess_cache;
45 struct kmem_cache *se_ua_cache;
46 struct kmem_cache *t10_pr_reg_cache;
47 struct kmem_cache *t10_alua_lu_gp_cache;
48 struct kmem_cache *t10_alua_lu_gp_mem_cache;
49 struct kmem_cache *t10_alua_tg_pt_gp_cache;
50 struct kmem_cache *t10_alua_lba_map_cache;
51 struct kmem_cache *t10_alua_lba_map_mem_cache;
53 static void transport_complete_task_attr(struct se_cmd *cmd);
54 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
55 static void transport_handle_queue_full(struct se_cmd *cmd,
56 struct se_device *dev, int err, bool write_pending);
57 static void target_complete_ok_work(struct work_struct *work);
59 int init_se_kmem_caches(void)
61 se_sess_cache = kmem_cache_create("se_sess_cache",
62 sizeof(struct se_session), __alignof__(struct se_session),
65 pr_err("kmem_cache_create() for struct se_session"
69 se_ua_cache = kmem_cache_create("se_ua_cache",
70 sizeof(struct se_ua), __alignof__(struct se_ua),
73 pr_err("kmem_cache_create() for struct se_ua failed\n");
74 goto out_free_sess_cache;
76 t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
77 sizeof(struct t10_pr_registration),
78 __alignof__(struct t10_pr_registration), 0, NULL);
79 if (!t10_pr_reg_cache) {
80 pr_err("kmem_cache_create() for struct t10_pr_registration"
82 goto out_free_ua_cache;
84 t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
85 sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
87 if (!t10_alua_lu_gp_cache) {
88 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
90 goto out_free_pr_reg_cache;
92 t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
93 sizeof(struct t10_alua_lu_gp_member),
94 __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
95 if (!t10_alua_lu_gp_mem_cache) {
96 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
98 goto out_free_lu_gp_cache;
100 t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
101 sizeof(struct t10_alua_tg_pt_gp),
102 __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
103 if (!t10_alua_tg_pt_gp_cache) {
104 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
106 goto out_free_lu_gp_mem_cache;
108 t10_alua_lba_map_cache = kmem_cache_create(
109 "t10_alua_lba_map_cache",
110 sizeof(struct t10_alua_lba_map),
111 __alignof__(struct t10_alua_lba_map), 0, NULL);
112 if (!t10_alua_lba_map_cache) {
113 pr_err("kmem_cache_create() for t10_alua_lba_map_"
115 goto out_free_tg_pt_gp_cache;
117 t10_alua_lba_map_mem_cache = kmem_cache_create(
118 "t10_alua_lba_map_mem_cache",
119 sizeof(struct t10_alua_lba_map_member),
120 __alignof__(struct t10_alua_lba_map_member), 0, NULL);
121 if (!t10_alua_lba_map_mem_cache) {
122 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
124 goto out_free_lba_map_cache;
127 target_completion_wq = alloc_workqueue("target_completion",
129 if (!target_completion_wq)
130 goto out_free_lba_map_mem_cache;
134 out_free_lba_map_mem_cache:
135 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
136 out_free_lba_map_cache:
137 kmem_cache_destroy(t10_alua_lba_map_cache);
138 out_free_tg_pt_gp_cache:
139 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
140 out_free_lu_gp_mem_cache:
141 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
142 out_free_lu_gp_cache:
143 kmem_cache_destroy(t10_alua_lu_gp_cache);
144 out_free_pr_reg_cache:
145 kmem_cache_destroy(t10_pr_reg_cache);
147 kmem_cache_destroy(se_ua_cache);
149 kmem_cache_destroy(se_sess_cache);
154 void release_se_kmem_caches(void)
156 destroy_workqueue(target_completion_wq);
157 kmem_cache_destroy(se_sess_cache);
158 kmem_cache_destroy(se_ua_cache);
159 kmem_cache_destroy(t10_pr_reg_cache);
160 kmem_cache_destroy(t10_alua_lu_gp_cache);
161 kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
162 kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
163 kmem_cache_destroy(t10_alua_lba_map_cache);
164 kmem_cache_destroy(t10_alua_lba_map_mem_cache);
167 /* This code ensures unique mib indexes are handed out. */
168 static DEFINE_SPINLOCK(scsi_mib_index_lock);
169 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
172 * Allocate a new row index for the entry type specified
174 u32 scsi_get_new_index(scsi_index_t type)
178 BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
180 spin_lock(&scsi_mib_index_lock);
181 new_index = ++scsi_mib_index[type];
182 spin_unlock(&scsi_mib_index_lock);
187 void transport_subsystem_check_init(void)
190 static int sub_api_initialized;
192 if (sub_api_initialized)
195 ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock");
197 pr_err("Unable to load target_core_iblock\n");
199 ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file");
201 pr_err("Unable to load target_core_file\n");
203 ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi");
205 pr_err("Unable to load target_core_pscsi\n");
207 ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user");
209 pr_err("Unable to load target_core_user\n");
211 sub_api_initialized = 1;
214 static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
216 struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
218 wake_up(&sess->cmd_count_wq);
222 * transport_init_session - initialize a session object
223 * @se_sess: Session object pointer.
225 * The caller must have zero-initialized @se_sess before calling this function.
227 int transport_init_session(struct se_session *se_sess)
229 INIT_LIST_HEAD(&se_sess->sess_list);
230 INIT_LIST_HEAD(&se_sess->sess_acl_list);
231 spin_lock_init(&se_sess->sess_cmd_lock);
232 init_waitqueue_head(&se_sess->cmd_count_wq);
233 init_completion(&se_sess->stop_done);
234 atomic_set(&se_sess->stopped, 0);
235 return percpu_ref_init(&se_sess->cmd_count,
236 target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
238 EXPORT_SYMBOL(transport_init_session);
240 void transport_uninit_session(struct se_session *se_sess)
243 * Drivers like iscsi and loop do not call target_stop_session
244 * during session shutdown so we have to drop the ref taken at init
247 if (!atomic_read(&se_sess->stopped))
248 percpu_ref_put(&se_sess->cmd_count);
250 percpu_ref_exit(&se_sess->cmd_count);
254 * transport_alloc_session - allocate a session object and initialize it
255 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
257 struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
259 struct se_session *se_sess;
262 se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
264 pr_err("Unable to allocate struct se_session from"
266 return ERR_PTR(-ENOMEM);
268 ret = transport_init_session(se_sess);
270 kmem_cache_free(se_sess_cache, se_sess);
273 se_sess->sup_prot_ops = sup_prot_ops;
277 EXPORT_SYMBOL(transport_alloc_session);
280 * transport_alloc_session_tags - allocate target driver private data
281 * @se_sess: Session pointer.
282 * @tag_num: Maximum number of in-flight commands between initiator and target.
283 * @tag_size: Size in bytes of the private data a target driver associates with
286 int transport_alloc_session_tags(struct se_session *se_sess,
287 unsigned int tag_num, unsigned int tag_size)
291 se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num,
292 GFP_KERNEL | __GFP_RETRY_MAYFAIL);
293 if (!se_sess->sess_cmd_map) {
294 pr_err("Unable to allocate se_sess->sess_cmd_map\n");
298 rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1,
299 false, GFP_KERNEL, NUMA_NO_NODE);
301 pr_err("Unable to init se_sess->sess_tag_pool,"
302 " tag_num: %u\n", tag_num);
303 kvfree(se_sess->sess_cmd_map);
304 se_sess->sess_cmd_map = NULL;
310 EXPORT_SYMBOL(transport_alloc_session_tags);
313 * transport_init_session_tags - allocate a session and target driver private data
314 * @tag_num: Maximum number of in-flight commands between initiator and target.
315 * @tag_size: Size in bytes of the private data a target driver associates with
317 * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
319 static struct se_session *
320 transport_init_session_tags(unsigned int tag_num, unsigned int tag_size,
321 enum target_prot_op sup_prot_ops)
323 struct se_session *se_sess;
326 if (tag_num != 0 && !tag_size) {
327 pr_err("init_session_tags called with percpu-ida tag_num:"
328 " %u, but zero tag_size\n", tag_num);
329 return ERR_PTR(-EINVAL);
331 if (!tag_num && tag_size) {
332 pr_err("init_session_tags called with percpu-ida tag_size:"
333 " %u, but zero tag_num\n", tag_size);
334 return ERR_PTR(-EINVAL);
337 se_sess = transport_alloc_session(sup_prot_ops);
341 rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
343 transport_free_session(se_sess);
344 return ERR_PTR(-ENOMEM);
351 * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
353 void __transport_register_session(
354 struct se_portal_group *se_tpg,
355 struct se_node_acl *se_nacl,
356 struct se_session *se_sess,
357 void *fabric_sess_ptr)
359 const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
360 unsigned char buf[PR_REG_ISID_LEN];
363 se_sess->se_tpg = se_tpg;
364 se_sess->fabric_sess_ptr = fabric_sess_ptr;
366 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
368 * Only set for struct se_session's that will actually be moving I/O.
369 * eg: *NOT* discovery sessions.
374 * Determine if fabric allows for T10-PI feature bits exposed to
375 * initiators for device backends with !dev->dev_attrib.pi_prot_type.
377 * If so, then always save prot_type on a per se_node_acl node
378 * basis and re-instate the previous sess_prot_type to avoid
379 * disabling PI from below any previously initiator side
382 if (se_nacl->saved_prot_type)
383 se_sess->sess_prot_type = se_nacl->saved_prot_type;
384 else if (tfo->tpg_check_prot_fabric_only)
385 se_sess->sess_prot_type = se_nacl->saved_prot_type =
386 tfo->tpg_check_prot_fabric_only(se_tpg);
388 * If the fabric module supports an ISID based TransportID,
389 * save this value in binary from the fabric I_T Nexus now.
391 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
392 memset(&buf[0], 0, PR_REG_ISID_LEN);
393 se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
394 &buf[0], PR_REG_ISID_LEN);
395 se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
398 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
400 * The se_nacl->nacl_sess pointer will be set to the
401 * last active I_T Nexus for each struct se_node_acl.
403 se_nacl->nacl_sess = se_sess;
405 list_add_tail(&se_sess->sess_acl_list,
406 &se_nacl->acl_sess_list);
407 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
409 list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
411 pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
412 se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr);
414 EXPORT_SYMBOL(__transport_register_session);
416 void transport_register_session(
417 struct se_portal_group *se_tpg,
418 struct se_node_acl *se_nacl,
419 struct se_session *se_sess,
420 void *fabric_sess_ptr)
424 spin_lock_irqsave(&se_tpg->session_lock, flags);
425 __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
426 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
428 EXPORT_SYMBOL(transport_register_session);
431 target_setup_session(struct se_portal_group *tpg,
432 unsigned int tag_num, unsigned int tag_size,
433 enum target_prot_op prot_op,
434 const char *initiatorname, void *private,
435 int (*callback)(struct se_portal_group *,
436 struct se_session *, void *))
438 struct se_session *sess;
441 * If the fabric driver is using percpu-ida based pre allocation
442 * of I/O descriptor tags, go ahead and perform that setup now..
445 sess = transport_init_session_tags(tag_num, tag_size, prot_op);
447 sess = transport_alloc_session(prot_op);
452 sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
453 (unsigned char *)initiatorname);
454 if (!sess->se_node_acl) {
455 transport_free_session(sess);
456 return ERR_PTR(-EACCES);
459 * Go ahead and perform any remaining fabric setup that is
460 * required before transport_register_session().
462 if (callback != NULL) {
463 int rc = callback(tpg, sess, private);
465 transport_free_session(sess);
470 transport_register_session(tpg, sess->se_node_acl, sess, private);
473 EXPORT_SYMBOL(target_setup_session);
475 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
477 struct se_session *se_sess;
480 spin_lock_bh(&se_tpg->session_lock);
481 list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
482 if (!se_sess->se_node_acl)
484 if (!se_sess->se_node_acl->dynamic_node_acl)
486 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
489 len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
490 se_sess->se_node_acl->initiatorname);
491 len += 1; /* Include NULL terminator */
493 spin_unlock_bh(&se_tpg->session_lock);
497 EXPORT_SYMBOL(target_show_dynamic_sessions);
499 static void target_complete_nacl(struct kref *kref)
501 struct se_node_acl *nacl = container_of(kref,
502 struct se_node_acl, acl_kref);
503 struct se_portal_group *se_tpg = nacl->se_tpg;
505 if (!nacl->dynamic_stop) {
506 complete(&nacl->acl_free_comp);
510 mutex_lock(&se_tpg->acl_node_mutex);
511 list_del_init(&nacl->acl_list);
512 mutex_unlock(&se_tpg->acl_node_mutex);
514 core_tpg_wait_for_nacl_pr_ref(nacl);
515 core_free_device_list_for_node(nacl, se_tpg);
519 void target_put_nacl(struct se_node_acl *nacl)
521 kref_put(&nacl->acl_kref, target_complete_nacl);
523 EXPORT_SYMBOL(target_put_nacl);
525 void transport_deregister_session_configfs(struct se_session *se_sess)
527 struct se_node_acl *se_nacl;
530 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
532 se_nacl = se_sess->se_node_acl;
534 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
535 if (!list_empty(&se_sess->sess_acl_list))
536 list_del_init(&se_sess->sess_acl_list);
538 * If the session list is empty, then clear the pointer.
539 * Otherwise, set the struct se_session pointer from the tail
540 * element of the per struct se_node_acl active session list.
542 if (list_empty(&se_nacl->acl_sess_list))
543 se_nacl->nacl_sess = NULL;
545 se_nacl->nacl_sess = container_of(
546 se_nacl->acl_sess_list.prev,
547 struct se_session, sess_acl_list);
549 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
552 EXPORT_SYMBOL(transport_deregister_session_configfs);
554 void transport_free_session(struct se_session *se_sess)
556 struct se_node_acl *se_nacl = se_sess->se_node_acl;
559 * Drop the se_node_acl->nacl_kref obtained from within
560 * core_tpg_get_initiator_node_acl().
563 struct se_portal_group *se_tpg = se_nacl->se_tpg;
564 const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
567 se_sess->se_node_acl = NULL;
570 * Also determine if we need to drop the extra ->cmd_kref if
571 * it had been previously dynamically generated, and
572 * the endpoint is not caching dynamic ACLs.
574 mutex_lock(&se_tpg->acl_node_mutex);
575 if (se_nacl->dynamic_node_acl &&
576 !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
577 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
578 if (list_empty(&se_nacl->acl_sess_list))
579 se_nacl->dynamic_stop = true;
580 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
582 if (se_nacl->dynamic_stop)
583 list_del_init(&se_nacl->acl_list);
585 mutex_unlock(&se_tpg->acl_node_mutex);
587 if (se_nacl->dynamic_stop)
588 target_put_nacl(se_nacl);
590 target_put_nacl(se_nacl);
592 if (se_sess->sess_cmd_map) {
593 sbitmap_queue_free(&se_sess->sess_tag_pool);
594 kvfree(se_sess->sess_cmd_map);
596 transport_uninit_session(se_sess);
597 kmem_cache_free(se_sess_cache, se_sess);
599 EXPORT_SYMBOL(transport_free_session);
601 static int target_release_res(struct se_device *dev, void *data)
603 struct se_session *sess = data;
605 if (dev->reservation_holder == sess)
606 target_release_reservation(dev);
610 void transport_deregister_session(struct se_session *se_sess)
612 struct se_portal_group *se_tpg = se_sess->se_tpg;
616 transport_free_session(se_sess);
620 spin_lock_irqsave(&se_tpg->session_lock, flags);
621 list_del(&se_sess->sess_list);
622 se_sess->se_tpg = NULL;
623 se_sess->fabric_sess_ptr = NULL;
624 spin_unlock_irqrestore(&se_tpg->session_lock, flags);
627 * Since the session is being removed, release SPC-2
628 * reservations held by the session that is disappearing.
630 target_for_each_device(target_release_res, se_sess);
632 pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
633 se_tpg->se_tpg_tfo->fabric_name);
635 * If last kref is dropping now for an explicit NodeACL, awake sleeping
636 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
637 * removal context from within transport_free_session() code.
639 * For dynamic ACL, target_put_nacl() uses target_complete_nacl()
640 * to release all remaining generate_node_acl=1 created ACL resources.
643 transport_free_session(se_sess);
645 EXPORT_SYMBOL(transport_deregister_session);
647 void target_remove_session(struct se_session *se_sess)
649 transport_deregister_session_configfs(se_sess);
650 transport_deregister_session(se_sess);
652 EXPORT_SYMBOL(target_remove_session);
654 static void target_remove_from_state_list(struct se_cmd *cmd)
656 struct se_device *dev = cmd->se_dev;
662 spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags);
663 if (cmd->state_active) {
664 list_del(&cmd->state_list);
665 cmd->state_active = false;
667 spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags);
671 * This function is called by the target core after the target core has
672 * finished processing a SCSI command or SCSI TMF. Both the regular command
673 * processing code and the code for aborting commands can call this
674 * function. CMD_T_STOP is set if and only if another thread is waiting
675 * inside transport_wait_for_tasks() for t_transport_stop_comp.
677 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
681 target_remove_from_state_list(cmd);
684 * Clear struct se_cmd->se_lun before the handoff to FE.
688 spin_lock_irqsave(&cmd->t_state_lock, flags);
690 * Determine if frontend context caller is requesting the stopping of
691 * this command for frontend exceptions.
693 if (cmd->transport_state & CMD_T_STOP) {
694 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
695 __func__, __LINE__, cmd->tag);
697 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
699 complete_all(&cmd->t_transport_stop_comp);
702 cmd->transport_state &= ~CMD_T_ACTIVE;
703 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
706 * Some fabric modules like tcm_loop can release their internally
707 * allocated I/O reference and struct se_cmd now.
709 * Fabric modules are expected to return '1' here if the se_cmd being
710 * passed is released at this point, or zero if not being released.
712 return cmd->se_tfo->check_stop_free(cmd);
715 static void transport_lun_remove_cmd(struct se_cmd *cmd)
717 struct se_lun *lun = cmd->se_lun;
722 if (cmpxchg(&cmd->lun_ref_active, true, false))
723 percpu_ref_put(&lun->lun_ref);
726 static void target_complete_failure_work(struct work_struct *work)
728 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
730 transport_generic_request_failure(cmd,
731 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
735 * Used when asking transport to copy Sense Data from the underlying
736 * Linux/SCSI struct scsi_cmnd
738 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
740 struct se_device *dev = cmd->se_dev;
742 WARN_ON(!cmd->se_lun);
747 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
750 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
752 pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
753 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
754 return cmd->sense_buffer;
757 void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense)
759 unsigned char *cmd_sense_buf;
762 spin_lock_irqsave(&cmd->t_state_lock, flags);
763 cmd_sense_buf = transport_get_sense_buffer(cmd);
764 if (!cmd_sense_buf) {
765 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
769 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
770 memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length);
771 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
773 EXPORT_SYMBOL(transport_copy_sense_to_cmd);
775 static void target_handle_abort(struct se_cmd *cmd)
777 bool tas = cmd->transport_state & CMD_T_TAS;
778 bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF;
781 pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas);
784 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
785 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
786 pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
787 cmd->t_task_cdb[0], cmd->tag);
788 trace_target_cmd_complete(cmd);
789 ret = cmd->se_tfo->queue_status(cmd);
791 transport_handle_queue_full(cmd, cmd->se_dev,
796 cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED;
797 cmd->se_tfo->queue_tm_rsp(cmd);
801 * Allow the fabric driver to unmap any resources before
802 * releasing the descriptor via TFO->release_cmd().
804 cmd->se_tfo->aborted_task(cmd);
806 WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0);
808 * To do: establish a unit attention condition on the I_T
809 * nexus associated with cmd. See also the paragraph "Aborting
814 WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
816 transport_lun_remove_cmd(cmd);
818 transport_cmd_check_stop_to_fabric(cmd);
821 static void target_abort_work(struct work_struct *work)
823 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
825 target_handle_abort(cmd);
828 static bool target_cmd_interrupted(struct se_cmd *cmd)
832 if (cmd->transport_state & CMD_T_ABORTED) {
833 if (cmd->transport_complete_callback)
834 cmd->transport_complete_callback(cmd, false, &post_ret);
835 INIT_WORK(&cmd->work, target_abort_work);
836 queue_work(target_completion_wq, &cmd->work);
838 } else if (cmd->transport_state & CMD_T_STOP) {
839 if (cmd->transport_complete_callback)
840 cmd->transport_complete_callback(cmd, false, &post_ret);
841 complete_all(&cmd->t_transport_stop_comp);
848 /* May be called from interrupt context so must not sleep. */
849 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
854 if (target_cmd_interrupted(cmd))
857 cmd->scsi_status = scsi_status;
859 spin_lock_irqsave(&cmd->t_state_lock, flags);
860 switch (cmd->scsi_status) {
861 case SAM_STAT_CHECK_CONDITION:
862 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
872 cmd->t_state = TRANSPORT_COMPLETE;
873 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
874 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
876 INIT_WORK(&cmd->work, success ? target_complete_ok_work :
877 target_complete_failure_work);
878 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
880 EXPORT_SYMBOL(target_complete_cmd);
882 void target_set_cmd_data_length(struct se_cmd *cmd, int length)
884 if (length < cmd->data_length) {
885 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
886 cmd->residual_count += cmd->data_length - length;
888 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
889 cmd->residual_count = cmd->data_length - length;
892 cmd->data_length = length;
895 EXPORT_SYMBOL(target_set_cmd_data_length);
897 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
899 if (scsi_status == SAM_STAT_GOOD ||
900 cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) {
901 target_set_cmd_data_length(cmd, length);
904 target_complete_cmd(cmd, scsi_status);
906 EXPORT_SYMBOL(target_complete_cmd_with_length);
908 static void target_add_to_state_list(struct se_cmd *cmd)
910 struct se_device *dev = cmd->se_dev;
913 spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags);
914 if (!cmd->state_active) {
915 list_add_tail(&cmd->state_list,
916 &dev->queues[cmd->cpuid].state_list);
917 cmd->state_active = true;
919 spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags);
923 * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
925 static void transport_write_pending_qf(struct se_cmd *cmd);
926 static void transport_complete_qf(struct se_cmd *cmd);
928 void target_qf_do_work(struct work_struct *work)
930 struct se_device *dev = container_of(work, struct se_device,
932 LIST_HEAD(qf_cmd_list);
933 struct se_cmd *cmd, *cmd_tmp;
935 spin_lock_irq(&dev->qf_cmd_lock);
936 list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
937 spin_unlock_irq(&dev->qf_cmd_lock);
939 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
940 list_del(&cmd->se_qf_node);
941 atomic_dec_mb(&dev->dev_qf_count);
943 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
944 " context: %s\n", cmd->se_tfo->fabric_name, cmd,
945 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
946 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
949 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
950 transport_write_pending_qf(cmd);
951 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
952 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
953 transport_complete_qf(cmd);
957 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
959 switch (cmd->data_direction) {
962 case DMA_FROM_DEVICE:
966 case DMA_BIDIRECTIONAL:
975 void transport_dump_dev_state(
976 struct se_device *dev,
980 *bl += sprintf(b + *bl, "Status: ");
981 if (dev->export_count)
982 *bl += sprintf(b + *bl, "ACTIVATED");
984 *bl += sprintf(b + *bl, "DEACTIVATED");
986 *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth);
987 *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n",
988 dev->dev_attrib.block_size,
989 dev->dev_attrib.hw_max_sectors);
990 *bl += sprintf(b + *bl, " ");
993 void transport_dump_vpd_proto_id(
995 unsigned char *p_buf,
998 unsigned char buf[VPD_TMP_BUF_SIZE];
1001 memset(buf, 0, VPD_TMP_BUF_SIZE);
1002 len = sprintf(buf, "T10 VPD Protocol Identifier: ");
1004 switch (vpd->protocol_identifier) {
1006 sprintf(buf+len, "Fibre Channel\n");
1009 sprintf(buf+len, "Parallel SCSI\n");
1012 sprintf(buf+len, "SSA\n");
1015 sprintf(buf+len, "IEEE 1394\n");
1018 sprintf(buf+len, "SCSI Remote Direct Memory Access"
1022 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1025 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1028 sprintf(buf+len, "Automation/Drive Interface Transport"
1032 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1035 sprintf(buf+len, "Unknown 0x%02x\n",
1036 vpd->protocol_identifier);
1041 strncpy(p_buf, buf, p_buf_len);
1043 pr_debug("%s", buf);
1047 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1050 * Check if the Protocol Identifier Valid (PIV) bit is set..
1052 * from spc3r23.pdf section 7.5.1
1054 if (page_83[1] & 0x80) {
1055 vpd->protocol_identifier = (page_83[0] & 0xf0);
1056 vpd->protocol_identifier_set = 1;
1057 transport_dump_vpd_proto_id(vpd, NULL, 0);
1060 EXPORT_SYMBOL(transport_set_vpd_proto_id);
1062 int transport_dump_vpd_assoc(
1063 struct t10_vpd *vpd,
1064 unsigned char *p_buf,
1067 unsigned char buf[VPD_TMP_BUF_SIZE];
1071 memset(buf, 0, VPD_TMP_BUF_SIZE);
1072 len = sprintf(buf, "T10 VPD Identifier Association: ");
1074 switch (vpd->association) {
1076 sprintf(buf+len, "addressed logical unit\n");
1079 sprintf(buf+len, "target port\n");
1082 sprintf(buf+len, "SCSI target device\n");
1085 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1091 strncpy(p_buf, buf, p_buf_len);
1093 pr_debug("%s", buf);
1098 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1101 * The VPD identification association..
1103 * from spc3r23.pdf Section 7.6.3.1 Table 297
1105 vpd->association = (page_83[1] & 0x30);
1106 return transport_dump_vpd_assoc(vpd, NULL, 0);
1108 EXPORT_SYMBOL(transport_set_vpd_assoc);
1110 int transport_dump_vpd_ident_type(
1111 struct t10_vpd *vpd,
1112 unsigned char *p_buf,
1115 unsigned char buf[VPD_TMP_BUF_SIZE];
1119 memset(buf, 0, VPD_TMP_BUF_SIZE);
1120 len = sprintf(buf, "T10 VPD Identifier Type: ");
1122 switch (vpd->device_identifier_type) {
1124 sprintf(buf+len, "Vendor specific\n");
1127 sprintf(buf+len, "T10 Vendor ID based\n");
1130 sprintf(buf+len, "EUI-64 based\n");
1133 sprintf(buf+len, "NAA\n");
1136 sprintf(buf+len, "Relative target port identifier\n");
1139 sprintf(buf+len, "SCSI name string\n");
1142 sprintf(buf+len, "Unsupported: 0x%02x\n",
1143 vpd->device_identifier_type);
1149 if (p_buf_len < strlen(buf)+1)
1151 strncpy(p_buf, buf, p_buf_len);
1153 pr_debug("%s", buf);
1159 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1162 * The VPD identifier type..
1164 * from spc3r23.pdf Section 7.6.3.1 Table 298
1166 vpd->device_identifier_type = (page_83[1] & 0x0f);
1167 return transport_dump_vpd_ident_type(vpd, NULL, 0);
1169 EXPORT_SYMBOL(transport_set_vpd_ident_type);
1171 int transport_dump_vpd_ident(
1172 struct t10_vpd *vpd,
1173 unsigned char *p_buf,
1176 unsigned char buf[VPD_TMP_BUF_SIZE];
1179 memset(buf, 0, VPD_TMP_BUF_SIZE);
1181 switch (vpd->device_identifier_code_set) {
1182 case 0x01: /* Binary */
1183 snprintf(buf, sizeof(buf),
1184 "T10 VPD Binary Device Identifier: %s\n",
1185 &vpd->device_identifier[0]);
1187 case 0x02: /* ASCII */
1188 snprintf(buf, sizeof(buf),
1189 "T10 VPD ASCII Device Identifier: %s\n",
1190 &vpd->device_identifier[0]);
1192 case 0x03: /* UTF-8 */
1193 snprintf(buf, sizeof(buf),
1194 "T10 VPD UTF-8 Device Identifier: %s\n",
1195 &vpd->device_identifier[0]);
1198 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1199 " 0x%02x", vpd->device_identifier_code_set);
1205 strncpy(p_buf, buf, p_buf_len);
1207 pr_debug("%s", buf);
1213 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1215 static const char hex_str[] = "0123456789abcdef";
1216 int j = 0, i = 4; /* offset to start of the identifier */
1219 * The VPD Code Set (encoding)
1221 * from spc3r23.pdf Section 7.6.3.1 Table 296
1223 vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1224 switch (vpd->device_identifier_code_set) {
1225 case 0x01: /* Binary */
1226 vpd->device_identifier[j++] =
1227 hex_str[vpd->device_identifier_type];
1228 while (i < (4 + page_83[3])) {
1229 vpd->device_identifier[j++] =
1230 hex_str[(page_83[i] & 0xf0) >> 4];
1231 vpd->device_identifier[j++] =
1232 hex_str[page_83[i] & 0x0f];
1236 case 0x02: /* ASCII */
1237 case 0x03: /* UTF-8 */
1238 while (i < (4 + page_83[3]))
1239 vpd->device_identifier[j++] = page_83[i++];
1245 return transport_dump_vpd_ident(vpd, NULL, 0);
1247 EXPORT_SYMBOL(transport_set_vpd_ident);
1249 static sense_reason_t
1250 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
1255 if (!cmd->se_tfo->max_data_sg_nents)
1256 return TCM_NO_SENSE;
1258 * Check if fabric enforced maximum SGL entries per I/O descriptor
1259 * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT +
1260 * residual_count and reduce original cmd->data_length to maximum
1261 * length based on single PAGE_SIZE entry scatter-lists.
1263 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
1264 if (cmd->data_length > mtl) {
1266 * If an existing CDB overflow is present, calculate new residual
1267 * based on CDB size minus fabric maximum transfer length.
1269 * If an existing CDB underflow is present, calculate new residual
1270 * based on original cmd->data_length minus fabric maximum transfer
1273 * Otherwise, set the underflow residual based on cmd->data_length
1274 * minus fabric maximum transfer length.
1276 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1277 cmd->residual_count = (size - mtl);
1278 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1279 u32 orig_dl = size + cmd->residual_count;
1280 cmd->residual_count = (orig_dl - mtl);
1282 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1283 cmd->residual_count = (cmd->data_length - mtl);
1285 cmd->data_length = mtl;
1287 * Reset sbc_check_prot() calculated protection payload
1288 * length based upon the new smaller MTL.
1290 if (cmd->prot_length) {
1291 u32 sectors = (mtl / dev->dev_attrib.block_size);
1292 cmd->prot_length = dev->prot_length * sectors;
1295 return TCM_NO_SENSE;
1299 * target_cmd_size_check - Check whether there will be a residual.
1300 * @cmd: SCSI command.
1301 * @size: Data buffer size derived from CDB. The data buffer size provided by
1302 * the SCSI transport driver is available in @cmd->data_length.
1304 * Compare the data buffer size from the CDB with the data buffer limit from the transport
1305 * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary.
1307 * Note: target drivers set @cmd->data_length by calling transport_init_se_cmd().
1309 * Return: TCM_NO_SENSE
1312 target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1314 struct se_device *dev = cmd->se_dev;
1316 if (cmd->unknown_data_length) {
1317 cmd->data_length = size;
1318 } else if (size != cmd->data_length) {
1319 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
1320 " %u does not match SCSI CDB Length: %u for SAM Opcode:"
1321 " 0x%02x\n", cmd->se_tfo->fabric_name,
1322 cmd->data_length, size, cmd->t_task_cdb[0]);
1324 * For READ command for the overflow case keep the existing
1325 * fabric provided ->data_length. Otherwise for the underflow
1326 * case, reset ->data_length to the smaller SCSI expected data
1329 if (size > cmd->data_length) {
1330 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
1331 cmd->residual_count = (size - cmd->data_length);
1333 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1334 cmd->residual_count = (cmd->data_length - size);
1336 * Do not truncate ->data_length for WRITE command to
1339 if (cmd->data_direction == DMA_FROM_DEVICE) {
1340 cmd->data_length = size;
1344 if (cmd->data_direction == DMA_TO_DEVICE) {
1345 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1346 pr_err_ratelimited("Rejecting underflow/overflow"
1347 " for WRITE data CDB\n");
1348 return TCM_INVALID_FIELD_IN_COMMAND_IU;
1351 * Some fabric drivers like iscsi-target still expect to
1352 * always reject overflow writes. Reject this case until
1353 * full fabric driver level support for overflow writes
1354 * is introduced tree-wide.
1356 if (size > cmd->data_length) {
1357 pr_err_ratelimited("Rejecting overflow for"
1358 " WRITE control CDB\n");
1359 return TCM_INVALID_CDB_FIELD;
1364 return target_check_max_data_sg_nents(cmd, dev, size);
1369 * Used by fabric modules containing a local struct se_cmd within their
1370 * fabric dependent per I/O descriptor.
1372 * Preserves the value of @cmd->tag.
1374 void transport_init_se_cmd(
1376 const struct target_core_fabric_ops *tfo,
1377 struct se_session *se_sess,
1381 unsigned char *sense_buffer, u64 unpacked_lun)
1383 INIT_LIST_HEAD(&cmd->se_delayed_node);
1384 INIT_LIST_HEAD(&cmd->se_qf_node);
1385 INIT_LIST_HEAD(&cmd->se_cmd_list);
1386 INIT_LIST_HEAD(&cmd->state_list);
1387 init_completion(&cmd->t_transport_stop_comp);
1388 cmd->free_compl = NULL;
1389 cmd->abrt_compl = NULL;
1390 spin_lock_init(&cmd->t_state_lock);
1391 INIT_WORK(&cmd->work, NULL);
1392 kref_init(&cmd->cmd_kref);
1395 cmd->se_sess = se_sess;
1396 cmd->data_length = data_length;
1397 cmd->data_direction = data_direction;
1398 cmd->sam_task_attr = task_attr;
1399 cmd->sense_buffer = sense_buffer;
1400 cmd->orig_fe_lun = unpacked_lun;
1402 if (!(cmd->se_cmd_flags & SCF_USE_CPUID))
1403 cmd->cpuid = smp_processor_id();
1405 cmd->state_active = false;
1407 EXPORT_SYMBOL(transport_init_se_cmd);
1409 static sense_reason_t
1410 transport_check_alloc_task_attr(struct se_cmd *cmd)
1412 struct se_device *dev = cmd->se_dev;
1415 * Check if SAM Task Attribute emulation is enabled for this
1416 * struct se_device storage object
1418 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1421 if (cmd->sam_task_attr == TCM_ACA_TAG) {
1422 pr_debug("SAM Task Attribute ACA"
1423 " emulation is not supported\n");
1424 return TCM_INVALID_CDB_FIELD;
1431 target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb)
1435 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1437 * Ensure that the received CDB is less than the max (252 + 8) bytes
1438 * for VARIABLE_LENGTH_CMD
1440 if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1441 pr_err("Received SCSI CDB with command_size: %d that"
1442 " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1443 scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1444 ret = TCM_INVALID_CDB_FIELD;
1448 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1449 * allocate the additional extended CDB buffer now.. Otherwise
1450 * setup the pointer from __t_task_cdb to t_task_cdb.
1452 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1453 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1455 if (!cmd->t_task_cdb) {
1456 pr_err("Unable to allocate cmd->t_task_cdb"
1457 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1458 scsi_command_size(cdb),
1459 (unsigned long)sizeof(cmd->__t_task_cdb));
1460 ret = TCM_OUT_OF_RESOURCES;
1465 * Copy the original CDB into cmd->
1467 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1469 trace_target_sequencer_start(cmd);
1474 * Copy the CDB here to allow trace_target_cmd_complete() to
1475 * print the cdb to the trace buffers.
1477 memcpy(cmd->t_task_cdb, cdb, min(scsi_command_size(cdb),
1478 (unsigned int)TCM_MAX_COMMAND_SIZE));
1481 EXPORT_SYMBOL(target_cmd_init_cdb);
1484 target_cmd_parse_cdb(struct se_cmd *cmd)
1486 struct se_device *dev = cmd->se_dev;
1489 ret = dev->transport->parse_cdb(cmd);
1490 if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
1491 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
1492 cmd->se_tfo->fabric_name,
1493 cmd->se_sess->se_node_acl->initiatorname,
1494 cmd->t_task_cdb[0]);
1498 ret = transport_check_alloc_task_attr(cmd);
1502 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1503 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
1506 EXPORT_SYMBOL(target_cmd_parse_cdb);
1509 * Used by fabric module frontends to queue tasks directly.
1510 * May only be used from process context.
1512 int transport_handle_cdb_direct(
1521 pr_err("cmd->se_lun is NULL\n");
1526 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
1527 * outstanding descriptors are handled correctly during shutdown via
1528 * transport_wait_for_tasks()
1530 * Also, we don't take cmd->t_state_lock here as we only expect
1531 * this to be called for initial descriptor submission.
1533 cmd->t_state = TRANSPORT_NEW_CMD;
1534 cmd->transport_state |= CMD_T_ACTIVE;
1537 * transport_generic_new_cmd() is already handling QUEUE_FULL,
1538 * so follow TRANSPORT_NEW_CMD processing thread context usage
1539 * and call transport_generic_request_failure() if necessary..
1541 ret = transport_generic_new_cmd(cmd);
1543 transport_generic_request_failure(cmd, ret);
1546 EXPORT_SYMBOL(transport_handle_cdb_direct);
1549 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1550 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
1552 if (!sgl || !sgl_count)
1556 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
1557 * scatterlists already have been set to follow what the fabric
1558 * passes for the original expected data transfer length.
1560 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1561 pr_warn("Rejecting SCSI DATA overflow for fabric using"
1562 " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
1563 return TCM_INVALID_CDB_FIELD;
1566 cmd->t_data_sg = sgl;
1567 cmd->t_data_nents = sgl_count;
1568 cmd->t_bidi_data_sg = sgl_bidi;
1569 cmd->t_bidi_data_nents = sgl_bidi_count;
1571 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1576 * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
1577 * se_cmd + use pre-allocated SGL memory.
1579 * @se_cmd: command descriptor to submit
1580 * @se_sess: associated se_sess for endpoint
1581 * @cdb: pointer to SCSI CDB
1582 * @sense: pointer to SCSI sense buffer
1583 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1584 * @data_length: fabric expected data transfer length
1585 * @task_attr: SAM task attribute
1586 * @data_dir: DMA data direction
1587 * @flags: flags for command submission from target_sc_flags_tables
1588 * @sgl: struct scatterlist memory for unidirectional mapping
1589 * @sgl_count: scatterlist count for unidirectional mapping
1590 * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
1591 * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
1592 * @sgl_prot: struct scatterlist memory protection information
1593 * @sgl_prot_count: scatterlist count for protection information
1595 * Task tags are supported if the caller has set @se_cmd->tag.
1597 * Returns non zero to signal active I/O shutdown failure. All other
1598 * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1599 * but still return zero here.
1601 * This may only be called from process context, and also currently
1602 * assumes internal allocation of fabric payload buffer by target-core.
1604 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
1605 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1606 u32 data_length, int task_attr, int data_dir, int flags,
1607 struct scatterlist *sgl, u32 sgl_count,
1608 struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
1609 struct scatterlist *sgl_prot, u32 sgl_prot_count)
1611 struct se_portal_group *se_tpg;
1617 se_tpg = se_sess->se_tpg;
1619 BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1621 if (flags & TARGET_SCF_USE_CPUID)
1622 se_cmd->se_cmd_flags |= SCF_USE_CPUID;
1624 * Initialize se_cmd for target operation. From this point
1625 * exceptions are handled by sending exception status via
1626 * target_core_fabric_ops->queue_status() callback
1628 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1629 data_length, data_dir, task_attr, sense,
1632 if (flags & TARGET_SCF_UNKNOWN_SIZE)
1633 se_cmd->unknown_data_length = 1;
1635 * Obtain struct se_cmd->cmd_kref reference. A second kref_get here is
1636 * necessary for fabrics using TARGET_SCF_ACK_KREF that expect a second
1637 * kref_put() to happen during fabric packet acknowledgement.
1639 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1643 * Signal bidirectional data payloads to target-core
1645 if (flags & TARGET_SCF_BIDI_OP)
1646 se_cmd->se_cmd_flags |= SCF_BIDI;
1648 rc = target_cmd_init_cdb(se_cmd, cdb);
1650 transport_send_check_condition_and_sense(se_cmd, rc, 0);
1651 target_put_sess_cmd(se_cmd);
1656 * Locate se_lun pointer and attach it to struct se_cmd
1658 rc = transport_lookup_cmd_lun(se_cmd);
1660 transport_send_check_condition_and_sense(se_cmd, rc, 0);
1661 target_put_sess_cmd(se_cmd);
1665 rc = target_cmd_parse_cdb(se_cmd);
1667 transport_generic_request_failure(se_cmd, rc);
1672 * Save pointers for SGLs containing protection information,
1675 if (sgl_prot_count) {
1676 se_cmd->t_prot_sg = sgl_prot;
1677 se_cmd->t_prot_nents = sgl_prot_count;
1678 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
1682 * When a non zero sgl_count has been passed perform SGL passthrough
1683 * mapping for pre-allocated fabric memory instead of having target
1684 * core perform an internal SGL allocation..
1686 if (sgl_count != 0) {
1690 * A work-around for tcm_loop as some userspace code via
1691 * scsi-generic do not memset their associated read buffers,
1692 * so go ahead and do that here for type non-data CDBs. Also
1693 * note that this is currently guaranteed to be a single SGL
1694 * for this case by target core in target_setup_cmd_from_cdb()
1695 * -> transport_generic_cmd_sequencer().
1697 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
1698 se_cmd->data_direction == DMA_FROM_DEVICE) {
1699 unsigned char *buf = NULL;
1702 buf = kmap(sg_page(sgl)) + sgl->offset;
1705 memset(buf, 0, sgl->length);
1706 kunmap(sg_page(sgl));
1710 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
1711 sgl_bidi, sgl_bidi_count);
1713 transport_generic_request_failure(se_cmd, rc);
1719 * Check if we need to delay processing because of ALUA
1720 * Active/NonOptimized primary access state..
1722 core_alua_check_nonop_delay(se_cmd);
1724 transport_handle_cdb_direct(se_cmd);
1727 EXPORT_SYMBOL(target_submit_cmd_map_sgls);
1730 * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1732 * @se_cmd: command descriptor to submit
1733 * @se_sess: associated se_sess for endpoint
1734 * @cdb: pointer to SCSI CDB
1735 * @sense: pointer to SCSI sense buffer
1736 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1737 * @data_length: fabric expected data transfer length
1738 * @task_attr: SAM task attribute
1739 * @data_dir: DMA data direction
1740 * @flags: flags for command submission from target_sc_flags_tables
1742 * Task tags are supported if the caller has set @se_cmd->tag.
1744 * Returns non zero to signal active I/O shutdown failure. All other
1745 * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1746 * but still return zero here.
1748 * This may only be called from process context, and also currently
1749 * assumes internal allocation of fabric payload buffer by target-core.
1751 * It also assumes interal target core SGL memory allocation.
1753 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1754 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1755 u32 data_length, int task_attr, int data_dir, int flags)
1757 return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
1758 unpacked_lun, data_length, task_attr, data_dir,
1759 flags, NULL, 0, NULL, 0, NULL, 0);
1761 EXPORT_SYMBOL(target_submit_cmd);
1763 static void target_complete_tmr_failure(struct work_struct *work)
1765 struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
1767 se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1768 se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1770 transport_lun_remove_cmd(se_cmd);
1771 transport_cmd_check_stop_to_fabric(se_cmd);
1775 * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
1778 * @se_cmd: command descriptor to submit
1779 * @se_sess: associated se_sess for endpoint
1780 * @sense: pointer to SCSI sense buffer
1781 * @unpacked_lun: unpacked LUN to reference for struct se_lun
1782 * @fabric_tmr_ptr: fabric context for TMR req
1783 * @tm_type: Type of TM request
1784 * @gfp: gfp type for caller
1785 * @tag: referenced task tag for TMR_ABORT_TASK
1786 * @flags: submit cmd flags
1788 * Callable from all contexts.
1791 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1792 unsigned char *sense, u64 unpacked_lun,
1793 void *fabric_tmr_ptr, unsigned char tm_type,
1794 gfp_t gfp, u64 tag, int flags)
1796 struct se_portal_group *se_tpg;
1799 se_tpg = se_sess->se_tpg;
1802 transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1803 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun);
1805 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
1806 * allocation failure.
1808 ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1812 if (tm_type == TMR_ABORT_TASK)
1813 se_cmd->se_tmr_req->ref_task_tag = tag;
1815 /* See target_submit_cmd for commentary */
1816 ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1818 core_tmr_release_req(se_cmd->se_tmr_req);
1822 ret = transport_lookup_tmr_lun(se_cmd);
1826 transport_generic_handle_tmr(se_cmd);
1830 * For callback during failure handling, push this work off
1831 * to process context with TMR_LUN_DOES_NOT_EXIST status.
1834 INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1835 schedule_work(&se_cmd->work);
1838 EXPORT_SYMBOL(target_submit_tmr);
1841 * Handle SAM-esque emulation for generic transport request failures.
1843 void transport_generic_request_failure(struct se_cmd *cmd,
1844 sense_reason_t sense_reason)
1846 int ret = 0, post_ret;
1848 pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
1850 target_show_cmd("-----[ ", cmd);
1853 * For SAM Task Attribute emulation for failed struct se_cmd
1855 transport_complete_task_attr(cmd);
1857 if (cmd->transport_complete_callback)
1858 cmd->transport_complete_callback(cmd, false, &post_ret);
1860 if (cmd->transport_state & CMD_T_ABORTED) {
1861 INIT_WORK(&cmd->work, target_abort_work);
1862 queue_work(target_completion_wq, &cmd->work);
1866 switch (sense_reason) {
1867 case TCM_NON_EXISTENT_LUN:
1868 case TCM_UNSUPPORTED_SCSI_OPCODE:
1869 case TCM_INVALID_CDB_FIELD:
1870 case TCM_INVALID_PARAMETER_LIST:
1871 case TCM_PARAMETER_LIST_LENGTH_ERROR:
1872 case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1873 case TCM_UNKNOWN_MODE_PAGE:
1874 case TCM_WRITE_PROTECTED:
1875 case TCM_ADDRESS_OUT_OF_RANGE:
1876 case TCM_CHECK_CONDITION_ABORT_CMD:
1877 case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1878 case TCM_CHECK_CONDITION_NOT_READY:
1879 case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
1880 case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
1881 case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
1882 case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
1883 case TCM_TOO_MANY_TARGET_DESCS:
1884 case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
1885 case TCM_TOO_MANY_SEGMENT_DESCS:
1886 case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
1887 case TCM_INVALID_FIELD_IN_COMMAND_IU:
1889 case TCM_OUT_OF_RESOURCES:
1890 cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
1893 cmd->scsi_status = SAM_STAT_BUSY;
1895 case TCM_RESERVATION_CONFLICT:
1897 * No SENSE Data payload for this case, set SCSI Status
1898 * and queue the response to $FABRIC_MOD.
1900 * Uses linux/include/scsi/scsi.h SAM status codes defs
1902 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1904 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1905 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1908 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1911 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl
1912 == TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) {
1913 target_ua_allocate_lun(cmd->se_sess->se_node_acl,
1914 cmd->orig_fe_lun, 0x2C,
1915 ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1920 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1921 cmd->t_task_cdb[0], sense_reason);
1922 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1926 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
1931 transport_lun_remove_cmd(cmd);
1932 transport_cmd_check_stop_to_fabric(cmd);
1936 trace_target_cmd_complete(cmd);
1937 ret = cmd->se_tfo->queue_status(cmd);
1941 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
1943 EXPORT_SYMBOL(transport_generic_request_failure);
1945 void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
1949 if (!cmd->execute_cmd) {
1950 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1955 * Check for an existing UNIT ATTENTION condition after
1956 * target_handle_task_attr() has done SAM task attr
1957 * checking, and possibly have already defered execution
1958 * out to target_restart_delayed_cmds() context.
1960 ret = target_scsi3_ua_check(cmd);
1964 ret = target_alua_state_check(cmd);
1968 ret = target_check_reservation(cmd);
1970 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1975 ret = cmd->execute_cmd(cmd);
1979 spin_lock_irq(&cmd->t_state_lock);
1980 cmd->transport_state &= ~CMD_T_SENT;
1981 spin_unlock_irq(&cmd->t_state_lock);
1983 transport_generic_request_failure(cmd, ret);
1986 static int target_write_prot_action(struct se_cmd *cmd)
1990 * Perform WRITE_INSERT of PI using software emulation when backend
1991 * device has PI enabled, if the transport has not already generated
1992 * PI using hardware WRITE_INSERT offload.
1994 switch (cmd->prot_op) {
1995 case TARGET_PROT_DOUT_INSERT:
1996 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
1997 sbc_dif_generate(cmd);
1999 case TARGET_PROT_DOUT_STRIP:
2000 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
2003 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
2004 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2005 sectors, 0, cmd->t_prot_sg, 0);
2006 if (unlikely(cmd->pi_err)) {
2007 spin_lock_irq(&cmd->t_state_lock);
2008 cmd->transport_state &= ~CMD_T_SENT;
2009 spin_unlock_irq(&cmd->t_state_lock);
2010 transport_generic_request_failure(cmd, cmd->pi_err);
2021 static bool target_handle_task_attr(struct se_cmd *cmd)
2023 struct se_device *dev = cmd->se_dev;
2025 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2028 cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
2031 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
2032 * to allow the passed struct se_cmd list of tasks to the front of the list.
2034 switch (cmd->sam_task_attr) {
2036 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
2037 cmd->t_task_cdb[0]);
2039 case TCM_ORDERED_TAG:
2040 atomic_inc_mb(&dev->dev_ordered_sync);
2042 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
2043 cmd->t_task_cdb[0]);
2046 * Execute an ORDERED command if no other older commands
2047 * exist that need to be completed first.
2049 if (!atomic_read(&dev->simple_cmds))
2054 * For SIMPLE and UNTAGGED Task Attribute commands
2056 atomic_inc_mb(&dev->simple_cmds);
2060 if (atomic_read(&dev->dev_ordered_sync) == 0)
2063 spin_lock(&dev->delayed_cmd_lock);
2064 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
2065 spin_unlock(&dev->delayed_cmd_lock);
2067 pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
2068 cmd->t_task_cdb[0], cmd->sam_task_attr);
2072 void target_execute_cmd(struct se_cmd *cmd)
2075 * Determine if frontend context caller is requesting the stopping of
2076 * this command for frontend exceptions.
2078 * If the received CDB has already been aborted stop processing it here.
2080 if (target_cmd_interrupted(cmd))
2083 spin_lock_irq(&cmd->t_state_lock);
2084 cmd->t_state = TRANSPORT_PROCESSING;
2085 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
2086 spin_unlock_irq(&cmd->t_state_lock);
2088 if (target_write_prot_action(cmd))
2091 if (target_handle_task_attr(cmd)) {
2092 spin_lock_irq(&cmd->t_state_lock);
2093 cmd->transport_state &= ~CMD_T_SENT;
2094 spin_unlock_irq(&cmd->t_state_lock);
2098 __target_execute_cmd(cmd, true);
2100 EXPORT_SYMBOL(target_execute_cmd);
2103 * Process all commands up to the last received ORDERED task attribute which
2104 * requires another blocking boundary
2106 static void target_restart_delayed_cmds(struct se_device *dev)
2111 spin_lock(&dev->delayed_cmd_lock);
2112 if (list_empty(&dev->delayed_cmd_list)) {
2113 spin_unlock(&dev->delayed_cmd_lock);
2117 cmd = list_entry(dev->delayed_cmd_list.next,
2118 struct se_cmd, se_delayed_node);
2119 list_del(&cmd->se_delayed_node);
2120 spin_unlock(&dev->delayed_cmd_lock);
2122 cmd->transport_state |= CMD_T_SENT;
2124 __target_execute_cmd(cmd, true);
2126 if (cmd->sam_task_attr == TCM_ORDERED_TAG)
2132 * Called from I/O completion to determine which dormant/delayed
2133 * and ordered cmds need to have their tasks added to the execution queue.
2135 static void transport_complete_task_attr(struct se_cmd *cmd)
2137 struct se_device *dev = cmd->se_dev;
2139 if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2142 if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
2145 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
2146 atomic_dec_mb(&dev->simple_cmds);
2147 dev->dev_cur_ordered_id++;
2148 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
2149 dev->dev_cur_ordered_id++;
2150 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
2151 dev->dev_cur_ordered_id);
2152 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
2153 atomic_dec_mb(&dev->dev_ordered_sync);
2155 dev->dev_cur_ordered_id++;
2156 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
2157 dev->dev_cur_ordered_id);
2159 cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
2162 target_restart_delayed_cmds(dev);
2165 static void transport_complete_qf(struct se_cmd *cmd)
2169 transport_complete_task_attr(cmd);
2171 * If a fabric driver ->write_pending() or ->queue_data_in() callback
2172 * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
2173 * the same callbacks should not be retried. Return CHECK_CONDITION
2174 * if a scsi_status is not already set.
2176 * If a fabric driver ->queue_status() has returned non zero, always
2177 * keep retrying no matter what..
2179 if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
2180 if (cmd->scsi_status)
2183 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
2188 * Check if we need to send a sense buffer from
2189 * the struct se_cmd in question. We do NOT want
2190 * to take this path of the IO has been marked as
2191 * needing to be treated like a "normal read". This
2192 * is the case if it's a tape read, and either the
2193 * FM, EOM, or ILI bits are set, but there is no
2196 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2197 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
2200 switch (cmd->data_direction) {
2201 case DMA_FROM_DEVICE:
2202 /* queue status if not treating this as a normal read */
2203 if (cmd->scsi_status &&
2204 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
2207 trace_target_cmd_complete(cmd);
2208 ret = cmd->se_tfo->queue_data_in(cmd);
2211 if (cmd->se_cmd_flags & SCF_BIDI) {
2212 ret = cmd->se_tfo->queue_data_in(cmd);
2218 trace_target_cmd_complete(cmd);
2219 ret = cmd->se_tfo->queue_status(cmd);
2226 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2229 transport_lun_remove_cmd(cmd);
2230 transport_cmd_check_stop_to_fabric(cmd);
2233 static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
2234 int err, bool write_pending)
2237 * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
2238 * ->queue_data_in() callbacks from new process context.
2240 * Otherwise for other errors, transport_complete_qf() will send
2241 * CHECK_CONDITION via ->queue_status() instead of attempting to
2242 * retry associated fabric driver data-transfer callbacks.
2244 if (err == -EAGAIN || err == -ENOMEM) {
2245 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
2246 TRANSPORT_COMPLETE_QF_OK;
2248 pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
2249 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
2252 spin_lock_irq(&dev->qf_cmd_lock);
2253 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
2254 atomic_inc_mb(&dev->dev_qf_count);
2255 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
2257 schedule_work(&cmd->se_dev->qf_work_queue);
2260 static bool target_read_prot_action(struct se_cmd *cmd)
2262 switch (cmd->prot_op) {
2263 case TARGET_PROT_DIN_STRIP:
2264 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
2265 u32 sectors = cmd->data_length >>
2266 ilog2(cmd->se_dev->dev_attrib.block_size);
2268 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2269 sectors, 0, cmd->t_prot_sg,
2275 case TARGET_PROT_DIN_INSERT:
2276 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
2279 sbc_dif_generate(cmd);
2288 static void target_complete_ok_work(struct work_struct *work)
2290 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2294 * Check if we need to move delayed/dormant tasks from cmds on the
2295 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
2298 transport_complete_task_attr(cmd);
2301 * Check to schedule QUEUE_FULL work, or execute an existing
2302 * cmd->transport_qf_callback()
2304 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
2305 schedule_work(&cmd->se_dev->qf_work_queue);
2308 * Check if we need to send a sense buffer from
2309 * the struct se_cmd in question. We do NOT want
2310 * to take this path of the IO has been marked as
2311 * needing to be treated like a "normal read". This
2312 * is the case if it's a tape read, and either the
2313 * FM, EOM, or ILI bits are set, but there is no
2316 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2317 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
2318 WARN_ON(!cmd->scsi_status);
2319 ret = transport_send_check_condition_and_sense(
2324 transport_lun_remove_cmd(cmd);
2325 transport_cmd_check_stop_to_fabric(cmd);
2329 * Check for a callback, used by amongst other things
2330 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation.
2332 if (cmd->transport_complete_callback) {
2334 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
2335 bool zero_dl = !(cmd->data_length);
2338 rc = cmd->transport_complete_callback(cmd, true, &post_ret);
2339 if (!rc && !post_ret) {
2345 ret = transport_send_check_condition_and_sense(cmd,
2350 transport_lun_remove_cmd(cmd);
2351 transport_cmd_check_stop_to_fabric(cmd);
2357 switch (cmd->data_direction) {
2358 case DMA_FROM_DEVICE:
2360 * if this is a READ-type IO, but SCSI status
2361 * is set, then skip returning data and just
2362 * return the status -- unless this IO is marked
2363 * as needing to be treated as a normal read,
2364 * in which case we want to go ahead and return
2365 * the data. This happens, for example, for tape
2366 * reads with the FM, EOM, or ILI bits set, with
2369 if (cmd->scsi_status &&
2370 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
2373 atomic_long_add(cmd->data_length,
2374 &cmd->se_lun->lun_stats.tx_data_octets);
2376 * Perform READ_STRIP of PI using software emulation when
2377 * backend had PI enabled, if the transport will not be
2378 * performing hardware READ_STRIP offload.
2380 if (target_read_prot_action(cmd)) {
2381 ret = transport_send_check_condition_and_sense(cmd,
2386 transport_lun_remove_cmd(cmd);
2387 transport_cmd_check_stop_to_fabric(cmd);
2391 trace_target_cmd_complete(cmd);
2392 ret = cmd->se_tfo->queue_data_in(cmd);
2397 atomic_long_add(cmd->data_length,
2398 &cmd->se_lun->lun_stats.rx_data_octets);
2400 * Check if we need to send READ payload for BIDI-COMMAND
2402 if (cmd->se_cmd_flags & SCF_BIDI) {
2403 atomic_long_add(cmd->data_length,
2404 &cmd->se_lun->lun_stats.tx_data_octets);
2405 ret = cmd->se_tfo->queue_data_in(cmd);
2413 trace_target_cmd_complete(cmd);
2414 ret = cmd->se_tfo->queue_status(cmd);
2422 transport_lun_remove_cmd(cmd);
2423 transport_cmd_check_stop_to_fabric(cmd);
2427 pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2428 " data_direction: %d\n", cmd, cmd->data_direction);
2430 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2433 void target_free_sgl(struct scatterlist *sgl, int nents)
2435 sgl_free_n_order(sgl, nents, 0);
2437 EXPORT_SYMBOL(target_free_sgl);
2439 static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
2442 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE
2443 * emulation, and free + reset pointers if necessary..
2445 if (!cmd->t_data_sg_orig)
2448 kfree(cmd->t_data_sg);
2449 cmd->t_data_sg = cmd->t_data_sg_orig;
2450 cmd->t_data_sg_orig = NULL;
2451 cmd->t_data_nents = cmd->t_data_nents_orig;
2452 cmd->t_data_nents_orig = 0;
2455 static inline void transport_free_pages(struct se_cmd *cmd)
2457 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2458 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
2459 cmd->t_prot_sg = NULL;
2460 cmd->t_prot_nents = 0;
2463 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
2465 * Release special case READ buffer payload required for
2466 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
2468 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
2469 target_free_sgl(cmd->t_bidi_data_sg,
2470 cmd->t_bidi_data_nents);
2471 cmd->t_bidi_data_sg = NULL;
2472 cmd->t_bidi_data_nents = 0;
2474 transport_reset_sgl_orig(cmd);
2477 transport_reset_sgl_orig(cmd);
2479 target_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
2480 cmd->t_data_sg = NULL;
2481 cmd->t_data_nents = 0;
2483 target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2484 cmd->t_bidi_data_sg = NULL;
2485 cmd->t_bidi_data_nents = 0;
2488 void *transport_kmap_data_sg(struct se_cmd *cmd)
2490 struct scatterlist *sg = cmd->t_data_sg;
2491 struct page **pages;
2495 * We need to take into account a possible offset here for fabrics like
2496 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
2497 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
2499 if (!cmd->t_data_nents)
2503 if (cmd->t_data_nents == 1)
2504 return kmap(sg_page(sg)) + sg->offset;
2506 /* >1 page. use vmap */
2507 pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL);
2511 /* convert sg[] to pages[] */
2512 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
2513 pages[i] = sg_page(sg);
2516 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL);
2518 if (!cmd->t_data_vmap)
2521 return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
2523 EXPORT_SYMBOL(transport_kmap_data_sg);
2525 void transport_kunmap_data_sg(struct se_cmd *cmd)
2527 if (!cmd->t_data_nents) {
2529 } else if (cmd->t_data_nents == 1) {
2530 kunmap(sg_page(cmd->t_data_sg));
2534 vunmap(cmd->t_data_vmap);
2535 cmd->t_data_vmap = NULL;
2537 EXPORT_SYMBOL(transport_kunmap_data_sg);
2540 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
2541 bool zero_page, bool chainable)
2543 gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0);
2545 *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents);
2546 return *sgl ? 0 : -ENOMEM;
2548 EXPORT_SYMBOL(target_alloc_sgl);
2551 * Allocate any required resources to execute the command. For writes we
2552 * might not have the payload yet, so notify the fabric via a call to
2553 * ->write_pending instead. Otherwise place it on the execution queue.
2556 transport_generic_new_cmd(struct se_cmd *cmd)
2558 unsigned long flags;
2560 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
2562 if (cmd->prot_op != TARGET_PROT_NORMAL &&
2563 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2564 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
2565 cmd->prot_length, true, false);
2567 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2571 * Determine if the TCM fabric module has already allocated physical
2572 * memory, and is directly calling transport_generic_map_mem_to_cmd()
2575 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
2578 if ((cmd->se_cmd_flags & SCF_BIDI) ||
2579 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
2582 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
2583 bidi_length = cmd->t_task_nolb *
2584 cmd->se_dev->dev_attrib.block_size;
2586 bidi_length = cmd->data_length;
2588 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2589 &cmd->t_bidi_data_nents,
2590 bidi_length, zero_flag, false);
2592 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2595 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
2596 cmd->data_length, zero_flag, false);
2598 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2599 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
2602 * Special case for COMPARE_AND_WRITE with fabrics
2603 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
2605 u32 caw_length = cmd->t_task_nolb *
2606 cmd->se_dev->dev_attrib.block_size;
2608 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2609 &cmd->t_bidi_data_nents,
2610 caw_length, zero_flag, false);
2612 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2615 * If this command is not a write we can execute it right here,
2616 * for write buffers we need to notify the fabric driver first
2617 * and let it call back once the write buffers are ready.
2619 target_add_to_state_list(cmd);
2620 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
2621 target_execute_cmd(cmd);
2625 spin_lock_irqsave(&cmd->t_state_lock, flags);
2626 cmd->t_state = TRANSPORT_WRITE_PENDING;
2628 * Determine if frontend context caller is requesting the stopping of
2629 * this command for frontend exceptions.
2631 if (cmd->transport_state & CMD_T_STOP &&
2632 !cmd->se_tfo->write_pending_must_be_called) {
2633 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
2634 __func__, __LINE__, cmd->tag);
2636 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2638 complete_all(&cmd->t_transport_stop_comp);
2641 cmd->transport_state &= ~CMD_T_ACTIVE;
2642 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2644 ret = cmd->se_tfo->write_pending(cmd);
2651 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2652 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2655 EXPORT_SYMBOL(transport_generic_new_cmd);
2657 static void transport_write_pending_qf(struct se_cmd *cmd)
2659 unsigned long flags;
2663 spin_lock_irqsave(&cmd->t_state_lock, flags);
2664 stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
2665 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2668 pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
2669 __func__, __LINE__, cmd->tag);
2670 complete_all(&cmd->t_transport_stop_comp);
2674 ret = cmd->se_tfo->write_pending(cmd);
2676 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2678 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2683 __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
2684 unsigned long *flags);
2686 static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2688 unsigned long flags;
2690 spin_lock_irqsave(&cmd->t_state_lock, flags);
2691 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
2692 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2696 * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has
2699 void target_put_cmd_and_wait(struct se_cmd *cmd)
2701 DECLARE_COMPLETION_ONSTACK(compl);
2703 WARN_ON_ONCE(cmd->abrt_compl);
2704 cmd->abrt_compl = &compl;
2705 target_put_sess_cmd(cmd);
2706 wait_for_completion(&compl);
2710 * This function is called by frontend drivers after processing of a command
2713 * The protocol for ensuring that either the regular frontend command
2714 * processing flow or target_handle_abort() code drops one reference is as
2716 * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause
2717 * the frontend driver to call this function synchronously or asynchronously.
2718 * That will cause one reference to be dropped.
2719 * - During regular command processing the target core sets CMD_T_COMPLETE
2720 * before invoking one of the .queue_*() functions.
2721 * - The code that aborts commands skips commands and TMFs for which
2722 * CMD_T_COMPLETE has been set.
2723 * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for
2724 * commands that will be aborted.
2725 * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set
2726 * transport_generic_free_cmd() skips its call to target_put_sess_cmd().
2727 * - For aborted commands for which CMD_T_TAS has been set .queue_status() will
2728 * be called and will drop a reference.
2729 * - For aborted commands for which CMD_T_TAS has not been set .aborted_task()
2730 * will be called. target_handle_abort() will drop the final reference.
2732 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2734 DECLARE_COMPLETION_ONSTACK(compl);
2736 bool aborted = false, tas = false;
2739 target_wait_free_cmd(cmd, &aborted, &tas);
2741 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) {
2743 * Handle WRITE failure case where transport_generic_new_cmd()
2744 * has already added se_cmd to state_list, but fabric has
2745 * failed command before I/O submission.
2747 if (cmd->state_active)
2748 target_remove_from_state_list(cmd);
2751 transport_lun_remove_cmd(cmd);
2754 cmd->free_compl = &compl;
2755 ret = target_put_sess_cmd(cmd);
2757 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
2758 wait_for_completion(&compl);
2763 EXPORT_SYMBOL(transport_generic_free_cmd);
2766 * target_get_sess_cmd - Verify the session is accepting cmds and take ref
2767 * @se_cmd: command descriptor to add
2768 * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd()
2770 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2772 struct se_session *se_sess = se_cmd->se_sess;
2776 * Add a second kref if the fabric caller is expecting to handle
2777 * fabric acknowledgement that requires two target_put_sess_cmd()
2778 * invocations before se_cmd descriptor release.
2781 if (!kref_get_unless_zero(&se_cmd->cmd_kref))
2784 se_cmd->se_cmd_flags |= SCF_ACK_KREF;
2787 if (!percpu_ref_tryget_live(&se_sess->cmd_count))
2790 if (ret && ack_kref)
2791 target_put_sess_cmd(se_cmd);
2795 EXPORT_SYMBOL(target_get_sess_cmd);
2797 static void target_free_cmd_mem(struct se_cmd *cmd)
2799 transport_free_pages(cmd);
2801 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2802 core_tmr_release_req(cmd->se_tmr_req);
2803 if (cmd->t_task_cdb != cmd->__t_task_cdb)
2804 kfree(cmd->t_task_cdb);
2807 static void target_release_cmd_kref(struct kref *kref)
2809 struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2810 struct se_session *se_sess = se_cmd->se_sess;
2811 struct completion *free_compl = se_cmd->free_compl;
2812 struct completion *abrt_compl = se_cmd->abrt_compl;
2814 target_free_cmd_mem(se_cmd);
2815 se_cmd->se_tfo->release_cmd(se_cmd);
2817 complete(free_compl);
2819 complete(abrt_compl);
2821 percpu_ref_put(&se_sess->cmd_count);
2825 * target_put_sess_cmd - decrease the command reference count
2826 * @se_cmd: command to drop a reference from
2828 * Returns 1 if and only if this target_put_sess_cmd() call caused the
2829 * refcount to drop to zero. Returns zero otherwise.
2831 int target_put_sess_cmd(struct se_cmd *se_cmd)
2833 return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
2835 EXPORT_SYMBOL(target_put_sess_cmd);
2837 static const char *data_dir_name(enum dma_data_direction d)
2840 case DMA_BIDIRECTIONAL: return "BIDI";
2841 case DMA_TO_DEVICE: return "WRITE";
2842 case DMA_FROM_DEVICE: return "READ";
2843 case DMA_NONE: return "NONE";
2849 static const char *cmd_state_name(enum transport_state_table t)
2852 case TRANSPORT_NO_STATE: return "NO_STATE";
2853 case TRANSPORT_NEW_CMD: return "NEW_CMD";
2854 case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING";
2855 case TRANSPORT_PROCESSING: return "PROCESSING";
2856 case TRANSPORT_COMPLETE: return "COMPLETE";
2857 case TRANSPORT_ISTATE_PROCESSING:
2858 return "ISTATE_PROCESSING";
2859 case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP";
2860 case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK";
2861 case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR";
2867 static void target_append_str(char **str, const char *txt)
2871 *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) :
2872 kstrdup(txt, GFP_ATOMIC);
2877 * Convert a transport state bitmask into a string. The caller is
2878 * responsible for freeing the returned pointer.
2880 static char *target_ts_to_str(u32 ts)
2884 if (ts & CMD_T_ABORTED)
2885 target_append_str(&str, "aborted");
2886 if (ts & CMD_T_ACTIVE)
2887 target_append_str(&str, "active");
2888 if (ts & CMD_T_COMPLETE)
2889 target_append_str(&str, "complete");
2890 if (ts & CMD_T_SENT)
2891 target_append_str(&str, "sent");
2892 if (ts & CMD_T_STOP)
2893 target_append_str(&str, "stop");
2894 if (ts & CMD_T_FABRIC_STOP)
2895 target_append_str(&str, "fabric_stop");
2900 static const char *target_tmf_name(enum tcm_tmreq_table tmf)
2903 case TMR_ABORT_TASK: return "ABORT_TASK";
2904 case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET";
2905 case TMR_CLEAR_ACA: return "CLEAR_ACA";
2906 case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET";
2907 case TMR_LUN_RESET: return "LUN_RESET";
2908 case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET";
2909 case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET";
2910 case TMR_LUN_RESET_PRO: return "LUN_RESET_PRO";
2911 case TMR_UNKNOWN: break;
2916 void target_show_cmd(const char *pfx, struct se_cmd *cmd)
2918 char *ts_str = target_ts_to_str(cmd->transport_state);
2919 const u8 *cdb = cmd->t_task_cdb;
2920 struct se_tmr_req *tmf = cmd->se_tmr_req;
2922 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
2923 pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n",
2924 pfx, cdb[0], cdb[1], cmd->tag,
2925 data_dir_name(cmd->data_direction),
2926 cmd->se_tfo->get_cmd_state(cmd),
2927 cmd_state_name(cmd->t_state), cmd->data_length,
2928 kref_read(&cmd->cmd_kref), ts_str);
2930 pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n",
2931 pfx, target_tmf_name(tmf->function), cmd->tag,
2932 tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd),
2933 cmd_state_name(cmd->t_state),
2934 kref_read(&cmd->cmd_kref), ts_str);
2938 EXPORT_SYMBOL(target_show_cmd);
2940 static void target_stop_session_confirm(struct percpu_ref *ref)
2942 struct se_session *se_sess = container_of(ref, struct se_session,
2944 complete_all(&se_sess->stop_done);
2948 * target_stop_session - Stop new IO from being queued on the session.
2949 * @se_sess: session to stop
2951 void target_stop_session(struct se_session *se_sess)
2953 pr_debug("Stopping session queue.\n");
2954 if (atomic_cmpxchg(&se_sess->stopped, 0, 1) == 0)
2955 percpu_ref_kill_and_confirm(&se_sess->cmd_count,
2956 target_stop_session_confirm);
2958 EXPORT_SYMBOL(target_stop_session);
2961 * target_wait_for_sess_cmds - Wait for outstanding commands
2962 * @se_sess: session to wait for active I/O
2964 void target_wait_for_sess_cmds(struct se_session *se_sess)
2968 WARN_ON_ONCE(!atomic_read(&se_sess->stopped));
2971 pr_debug("Waiting for running cmds to complete.\n");
2972 ret = wait_event_timeout(se_sess->cmd_count_wq,
2973 percpu_ref_is_zero(&se_sess->cmd_count),
2977 wait_for_completion(&se_sess->stop_done);
2978 pr_debug("Waiting for cmds done.\n");
2980 EXPORT_SYMBOL(target_wait_for_sess_cmds);
2983 * Prevent that new percpu_ref_tryget_live() calls succeed and wait until
2984 * all references to the LUN have been released. Called during LUN shutdown.
2986 void transport_clear_lun_ref(struct se_lun *lun)
2988 percpu_ref_kill(&lun->lun_ref);
2989 wait_for_completion(&lun->lun_shutdown_comp);
2993 __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
2994 bool *aborted, bool *tas, unsigned long *flags)
2995 __releases(&cmd->t_state_lock)
2996 __acquires(&cmd->t_state_lock)
2999 assert_spin_locked(&cmd->t_state_lock);
3000 WARN_ON_ONCE(!irqs_disabled());
3003 cmd->transport_state |= CMD_T_FABRIC_STOP;
3005 if (cmd->transport_state & CMD_T_ABORTED)
3008 if (cmd->transport_state & CMD_T_TAS)
3011 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
3012 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
3015 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
3016 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
3019 if (!(cmd->transport_state & CMD_T_ACTIVE))
3022 if (fabric_stop && *aborted)
3025 cmd->transport_state |= CMD_T_STOP;
3027 target_show_cmd("wait_for_tasks: Stopping ", cmd);
3029 spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
3031 while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp,
3033 target_show_cmd("wait for tasks: ", cmd);
3035 spin_lock_irqsave(&cmd->t_state_lock, *flags);
3036 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
3038 pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
3039 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
3045 * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp
3046 * @cmd: command to wait on
3048 bool transport_wait_for_tasks(struct se_cmd *cmd)
3050 unsigned long flags;
3051 bool ret, aborted = false, tas = false;
3053 spin_lock_irqsave(&cmd->t_state_lock, flags);
3054 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
3055 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3059 EXPORT_SYMBOL(transport_wait_for_tasks);
3061 struct sense_detail {
3065 bool add_sense_info;
3068 static const struct sense_detail sense_detail_table[] = {
3072 [TCM_NON_EXISTENT_LUN] = {
3073 .key = ILLEGAL_REQUEST,
3074 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */
3076 [TCM_UNSUPPORTED_SCSI_OPCODE] = {
3077 .key = ILLEGAL_REQUEST,
3078 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
3080 [TCM_SECTOR_COUNT_TOO_MANY] = {
3081 .key = ILLEGAL_REQUEST,
3082 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
3084 [TCM_UNKNOWN_MODE_PAGE] = {
3085 .key = ILLEGAL_REQUEST,
3086 .asc = 0x24, /* INVALID FIELD IN CDB */
3088 [TCM_CHECK_CONDITION_ABORT_CMD] = {
3089 .key = ABORTED_COMMAND,
3090 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */
3093 [TCM_INCORRECT_AMOUNT_OF_DATA] = {
3094 .key = ABORTED_COMMAND,
3095 .asc = 0x0c, /* WRITE ERROR */
3096 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */
3098 [TCM_INVALID_CDB_FIELD] = {
3099 .key = ILLEGAL_REQUEST,
3100 .asc = 0x24, /* INVALID FIELD IN CDB */
3102 [TCM_INVALID_PARAMETER_LIST] = {
3103 .key = ILLEGAL_REQUEST,
3104 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
3106 [TCM_TOO_MANY_TARGET_DESCS] = {
3107 .key = ILLEGAL_REQUEST,
3109 .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */
3111 [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = {
3112 .key = ILLEGAL_REQUEST,
3114 .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */
3116 [TCM_TOO_MANY_SEGMENT_DESCS] = {
3117 .key = ILLEGAL_REQUEST,
3119 .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */
3121 [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = {
3122 .key = ILLEGAL_REQUEST,
3124 .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */
3126 [TCM_PARAMETER_LIST_LENGTH_ERROR] = {
3127 .key = ILLEGAL_REQUEST,
3128 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
3130 [TCM_UNEXPECTED_UNSOLICITED_DATA] = {
3131 .key = ILLEGAL_REQUEST,
3132 .asc = 0x0c, /* WRITE ERROR */
3133 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */
3135 [TCM_SERVICE_CRC_ERROR] = {
3136 .key = ABORTED_COMMAND,
3137 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */
3138 .ascq = 0x05, /* N/A */
3140 [TCM_SNACK_REJECTED] = {
3141 .key = ABORTED_COMMAND,
3142 .asc = 0x11, /* READ ERROR */
3143 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */
3145 [TCM_WRITE_PROTECTED] = {
3146 .key = DATA_PROTECT,
3147 .asc = 0x27, /* WRITE PROTECTED */
3149 [TCM_ADDRESS_OUT_OF_RANGE] = {
3150 .key = ILLEGAL_REQUEST,
3151 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
3153 [TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
3154 .key = UNIT_ATTENTION,
3156 [TCM_CHECK_CONDITION_NOT_READY] = {
3159 [TCM_MISCOMPARE_VERIFY] = {
3161 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
3163 .add_sense_info = true,
3165 [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
3166 .key = ABORTED_COMMAND,
3168 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
3169 .add_sense_info = true,
3171 [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
3172 .key = ABORTED_COMMAND,
3174 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
3175 .add_sense_info = true,
3177 [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
3178 .key = ABORTED_COMMAND,
3180 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
3181 .add_sense_info = true,
3183 [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
3184 .key = COPY_ABORTED,
3186 .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
3189 [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
3191 * Returning ILLEGAL REQUEST would cause immediate IO errors on
3192 * Solaris initiators. Returning NOT READY instead means the
3193 * operations will be retried a finite number of times and we
3194 * can survive intermittent errors.
3197 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
3199 [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = {
3201 * From spc4r22 section5.7.7,5.7.8
3202 * If a PERSISTENT RESERVE OUT command with a REGISTER service action
3203 * or a REGISTER AND IGNORE EXISTING KEY service action or
3204 * REGISTER AND MOVE service actionis attempted,
3205 * but there are insufficient device server resources to complete the
3206 * operation, then the command shall be terminated with CHECK CONDITION
3207 * status, with the sense key set to ILLEGAL REQUEST,and the additonal
3208 * sense code set to INSUFFICIENT REGISTRATION RESOURCES.
3210 .key = ILLEGAL_REQUEST,
3212 .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */
3214 [TCM_INVALID_FIELD_IN_COMMAND_IU] = {
3215 .key = ILLEGAL_REQUEST,
3217 .ascq = 0x03, /* INVALID FIELD IN COMMAND INFORMATION UNIT */
3222 * translate_sense_reason - translate a sense reason into T10 key, asc and ascq
3223 * @cmd: SCSI command in which the resulting sense buffer or SCSI status will
3225 * @reason: LIO sense reason code. If this argument has the value
3226 * TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If
3227 * dequeuing a unit attention fails due to multiple commands being processed
3228 * concurrently, set the command status to BUSY.
3230 * Return: 0 upon success or -EINVAL if the sense buffer is too small.
3232 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
3234 const struct sense_detail *sd;
3235 u8 *buffer = cmd->sense_buffer;
3236 int r = (__force int)reason;
3238 bool desc_format = target_sense_desc_format(cmd->se_dev);
3240 if (r < ARRAY_SIZE(sense_detail_table) && sense_detail_table[r].key)
3241 sd = &sense_detail_table[r];
3243 sd = &sense_detail_table[(__force int)
3244 TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
3247 if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
3248 if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc,
3250 cmd->scsi_status = SAM_STAT_BUSY;
3253 } else if (sd->asc == 0) {
3254 WARN_ON_ONCE(cmd->scsi_asc == 0);
3255 asc = cmd->scsi_asc;
3256 ascq = cmd->scsi_ascq;
3262 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
3263 cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
3264 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
3265 scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq);
3266 if (sd->add_sense_info)
3267 WARN_ON_ONCE(scsi_set_sense_information(buffer,
3268 cmd->scsi_sense_length,
3269 cmd->sense_info) < 0);
3273 transport_send_check_condition_and_sense(struct se_cmd *cmd,
3274 sense_reason_t reason, int from_transport)
3276 unsigned long flags;
3278 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
3280 spin_lock_irqsave(&cmd->t_state_lock, flags);
3281 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
3282 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3285 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
3286 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3288 if (!from_transport)
3289 translate_sense_reason(cmd, reason);
3291 trace_target_cmd_complete(cmd);
3292 return cmd->se_tfo->queue_status(cmd);
3294 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
3297 * target_send_busy - Send SCSI BUSY status back to the initiator
3298 * @cmd: SCSI command for which to send a BUSY reply.
3300 * Note: Only call this function if target_submit_cmd*() failed.
3302 int target_send_busy(struct se_cmd *cmd)
3304 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
3306 cmd->scsi_status = SAM_STAT_BUSY;
3307 trace_target_cmd_complete(cmd);
3308 return cmd->se_tfo->queue_status(cmd);
3310 EXPORT_SYMBOL(target_send_busy);
3312 static void target_tmr_work(struct work_struct *work)
3314 struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3315 struct se_device *dev = cmd->se_dev;
3316 struct se_tmr_req *tmr = cmd->se_tmr_req;
3319 if (cmd->transport_state & CMD_T_ABORTED)
3322 switch (tmr->function) {
3323 case TMR_ABORT_TASK:
3324 core_tmr_abort_task(dev, tmr, cmd->se_sess);
3326 case TMR_ABORT_TASK_SET:
3328 case TMR_CLEAR_TASK_SET:
3329 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
3332 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
3333 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
3334 TMR_FUNCTION_REJECTED;
3335 if (tmr->response == TMR_FUNCTION_COMPLETE) {
3336 target_ua_allocate_lun(cmd->se_sess->se_node_acl,
3337 cmd->orig_fe_lun, 0x29,
3338 ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
3341 case TMR_TARGET_WARM_RESET:
3342 tmr->response = TMR_FUNCTION_REJECTED;
3344 case TMR_TARGET_COLD_RESET:
3345 tmr->response = TMR_FUNCTION_REJECTED;
3348 pr_err("Unknown TMR function: 0x%02x.\n",
3350 tmr->response = TMR_FUNCTION_REJECTED;
3354 if (cmd->transport_state & CMD_T_ABORTED)
3357 cmd->se_tfo->queue_tm_rsp(cmd);
3359 transport_lun_remove_cmd(cmd);
3360 transport_cmd_check_stop_to_fabric(cmd);
3364 target_handle_abort(cmd);
3367 int transport_generic_handle_tmr(
3370 unsigned long flags;
3371 bool aborted = false;
3373 spin_lock_irqsave(&cmd->t_state_lock, flags);
3374 if (cmd->transport_state & CMD_T_ABORTED) {
3377 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3378 cmd->transport_state |= CMD_T_ACTIVE;
3380 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3383 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n",
3384 cmd->se_tmr_req->function,
3385 cmd->se_tmr_req->ref_task_tag, cmd->tag);
3386 target_handle_abort(cmd);
3390 INIT_WORK(&cmd->work, target_tmr_work);
3391 schedule_work(&cmd->work);
3394 EXPORT_SYMBOL(transport_generic_handle_tmr);
3397 target_check_wce(struct se_device *dev)
3401 if (dev->transport->get_write_cache)
3402 wce = dev->transport->get_write_cache(dev);
3403 else if (dev->dev_attrib.emulate_write_cache > 0)
3410 target_check_fua(struct se_device *dev)
3412 return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;