Merge tag 's390-5.7-4' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
[linux-2.6-microblaze.git] / drivers / target / target_core_transport.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*******************************************************************************
3  * Filename:  target_core_transport.c
4  *
5  * This file contains the Generic Target Engine Core.
6  *
7  * (c) Copyright 2002-2013 Datera, Inc.
8  *
9  * Nicholas A. Bellinger <nab@kernel.org>
10  *
11  ******************************************************************************/
12
13 #include <linux/net.h>
14 #include <linux/delay.h>
15 #include <linux/string.h>
16 #include <linux/timer.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/kthread.h>
20 #include <linux/in.h>
21 #include <linux/cdrom.h>
22 #include <linux/module.h>
23 #include <linux/ratelimit.h>
24 #include <linux/vmalloc.h>
25 #include <asm/unaligned.h>
26 #include <net/sock.h>
27 #include <net/tcp.h>
28 #include <scsi/scsi_proto.h>
29 #include <scsi/scsi_common.h>
30
31 #include <target/target_core_base.h>
32 #include <target/target_core_backend.h>
33 #include <target/target_core_fabric.h>
34
35 #include "target_core_internal.h"
36 #include "target_core_alua.h"
37 #include "target_core_pr.h"
38 #include "target_core_ua.h"
39
40 #define CREATE_TRACE_POINTS
41 #include <trace/events/target.h>
42
43 static struct workqueue_struct *target_completion_wq;
44 static struct kmem_cache *se_sess_cache;
45 struct kmem_cache *se_ua_cache;
46 struct kmem_cache *t10_pr_reg_cache;
47 struct kmem_cache *t10_alua_lu_gp_cache;
48 struct kmem_cache *t10_alua_lu_gp_mem_cache;
49 struct kmem_cache *t10_alua_tg_pt_gp_cache;
50 struct kmem_cache *t10_alua_lba_map_cache;
51 struct kmem_cache *t10_alua_lba_map_mem_cache;
52
53 static void transport_complete_task_attr(struct se_cmd *cmd);
54 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
55 static void transport_handle_queue_full(struct se_cmd *cmd,
56                 struct se_device *dev, int err, bool write_pending);
57 static void target_complete_ok_work(struct work_struct *work);
58
59 int init_se_kmem_caches(void)
60 {
61         se_sess_cache = kmem_cache_create("se_sess_cache",
62                         sizeof(struct se_session), __alignof__(struct se_session),
63                         0, NULL);
64         if (!se_sess_cache) {
65                 pr_err("kmem_cache_create() for struct se_session"
66                                 " failed\n");
67                 goto out;
68         }
69         se_ua_cache = kmem_cache_create("se_ua_cache",
70                         sizeof(struct se_ua), __alignof__(struct se_ua),
71                         0, NULL);
72         if (!se_ua_cache) {
73                 pr_err("kmem_cache_create() for struct se_ua failed\n");
74                 goto out_free_sess_cache;
75         }
76         t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
77                         sizeof(struct t10_pr_registration),
78                         __alignof__(struct t10_pr_registration), 0, NULL);
79         if (!t10_pr_reg_cache) {
80                 pr_err("kmem_cache_create() for struct t10_pr_registration"
81                                 " failed\n");
82                 goto out_free_ua_cache;
83         }
84         t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
85                         sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
86                         0, NULL);
87         if (!t10_alua_lu_gp_cache) {
88                 pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
89                                 " failed\n");
90                 goto out_free_pr_reg_cache;
91         }
92         t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
93                         sizeof(struct t10_alua_lu_gp_member),
94                         __alignof__(struct t10_alua_lu_gp_member), 0, NULL);
95         if (!t10_alua_lu_gp_mem_cache) {
96                 pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
97                                 "cache failed\n");
98                 goto out_free_lu_gp_cache;
99         }
100         t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
101                         sizeof(struct t10_alua_tg_pt_gp),
102                         __alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
103         if (!t10_alua_tg_pt_gp_cache) {
104                 pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
105                                 "cache failed\n");
106                 goto out_free_lu_gp_mem_cache;
107         }
108         t10_alua_lba_map_cache = kmem_cache_create(
109                         "t10_alua_lba_map_cache",
110                         sizeof(struct t10_alua_lba_map),
111                         __alignof__(struct t10_alua_lba_map), 0, NULL);
112         if (!t10_alua_lba_map_cache) {
113                 pr_err("kmem_cache_create() for t10_alua_lba_map_"
114                                 "cache failed\n");
115                 goto out_free_tg_pt_gp_cache;
116         }
117         t10_alua_lba_map_mem_cache = kmem_cache_create(
118                         "t10_alua_lba_map_mem_cache",
119                         sizeof(struct t10_alua_lba_map_member),
120                         __alignof__(struct t10_alua_lba_map_member), 0, NULL);
121         if (!t10_alua_lba_map_mem_cache) {
122                 pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
123                                 "cache failed\n");
124                 goto out_free_lba_map_cache;
125         }
126
127         target_completion_wq = alloc_workqueue("target_completion",
128                                                WQ_MEM_RECLAIM, 0);
129         if (!target_completion_wq)
130                 goto out_free_lba_map_mem_cache;
131
132         return 0;
133
134 out_free_lba_map_mem_cache:
135         kmem_cache_destroy(t10_alua_lba_map_mem_cache);
136 out_free_lba_map_cache:
137         kmem_cache_destroy(t10_alua_lba_map_cache);
138 out_free_tg_pt_gp_cache:
139         kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
140 out_free_lu_gp_mem_cache:
141         kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
142 out_free_lu_gp_cache:
143         kmem_cache_destroy(t10_alua_lu_gp_cache);
144 out_free_pr_reg_cache:
145         kmem_cache_destroy(t10_pr_reg_cache);
146 out_free_ua_cache:
147         kmem_cache_destroy(se_ua_cache);
148 out_free_sess_cache:
149         kmem_cache_destroy(se_sess_cache);
150 out:
151         return -ENOMEM;
152 }
153
154 void release_se_kmem_caches(void)
155 {
156         destroy_workqueue(target_completion_wq);
157         kmem_cache_destroy(se_sess_cache);
158         kmem_cache_destroy(se_ua_cache);
159         kmem_cache_destroy(t10_pr_reg_cache);
160         kmem_cache_destroy(t10_alua_lu_gp_cache);
161         kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
162         kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
163         kmem_cache_destroy(t10_alua_lba_map_cache);
164         kmem_cache_destroy(t10_alua_lba_map_mem_cache);
165 }
166
167 /* This code ensures unique mib indexes are handed out. */
168 static DEFINE_SPINLOCK(scsi_mib_index_lock);
169 static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
170
171 /*
172  * Allocate a new row index for the entry type specified
173  */
174 u32 scsi_get_new_index(scsi_index_t type)
175 {
176         u32 new_index;
177
178         BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
179
180         spin_lock(&scsi_mib_index_lock);
181         new_index = ++scsi_mib_index[type];
182         spin_unlock(&scsi_mib_index_lock);
183
184         return new_index;
185 }
186
187 void transport_subsystem_check_init(void)
188 {
189         int ret;
190         static int sub_api_initialized;
191
192         if (sub_api_initialized)
193                 return;
194
195         ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock");
196         if (ret != 0)
197                 pr_err("Unable to load target_core_iblock\n");
198
199         ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file");
200         if (ret != 0)
201                 pr_err("Unable to load target_core_file\n");
202
203         ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi");
204         if (ret != 0)
205                 pr_err("Unable to load target_core_pscsi\n");
206
207         ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user");
208         if (ret != 0)
209                 pr_err("Unable to load target_core_user\n");
210
211         sub_api_initialized = 1;
212 }
213
214 static void target_release_sess_cmd_refcnt(struct percpu_ref *ref)
215 {
216         struct se_session *sess = container_of(ref, typeof(*sess), cmd_count);
217
218         wake_up(&sess->cmd_list_wq);
219 }
220
221 /**
222  * transport_init_session - initialize a session object
223  * @se_sess: Session object pointer.
224  *
225  * The caller must have zero-initialized @se_sess before calling this function.
226  */
227 int transport_init_session(struct se_session *se_sess)
228 {
229         INIT_LIST_HEAD(&se_sess->sess_list);
230         INIT_LIST_HEAD(&se_sess->sess_acl_list);
231         INIT_LIST_HEAD(&se_sess->sess_cmd_list);
232         spin_lock_init(&se_sess->sess_cmd_lock);
233         init_waitqueue_head(&se_sess->cmd_list_wq);
234         return percpu_ref_init(&se_sess->cmd_count,
235                                target_release_sess_cmd_refcnt, 0, GFP_KERNEL);
236 }
237 EXPORT_SYMBOL(transport_init_session);
238
239 /**
240  * transport_alloc_session - allocate a session object and initialize it
241  * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
242  */
243 struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
244 {
245         struct se_session *se_sess;
246         int ret;
247
248         se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
249         if (!se_sess) {
250                 pr_err("Unable to allocate struct se_session from"
251                                 " se_sess_cache\n");
252                 return ERR_PTR(-ENOMEM);
253         }
254         ret = transport_init_session(se_sess);
255         if (ret < 0) {
256                 kmem_cache_free(se_sess_cache, se_sess);
257                 return ERR_PTR(ret);
258         }
259         se_sess->sup_prot_ops = sup_prot_ops;
260
261         return se_sess;
262 }
263 EXPORT_SYMBOL(transport_alloc_session);
264
265 /**
266  * transport_alloc_session_tags - allocate target driver private data
267  * @se_sess:  Session pointer.
268  * @tag_num:  Maximum number of in-flight commands between initiator and target.
269  * @tag_size: Size in bytes of the private data a target driver associates with
270  *            each command.
271  */
272 int transport_alloc_session_tags(struct se_session *se_sess,
273                                  unsigned int tag_num, unsigned int tag_size)
274 {
275         int rc;
276
277         se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num,
278                                          GFP_KERNEL | __GFP_RETRY_MAYFAIL);
279         if (!se_sess->sess_cmd_map) {
280                 pr_err("Unable to allocate se_sess->sess_cmd_map\n");
281                 return -ENOMEM;
282         }
283
284         rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1,
285                         false, GFP_KERNEL, NUMA_NO_NODE);
286         if (rc < 0) {
287                 pr_err("Unable to init se_sess->sess_tag_pool,"
288                         " tag_num: %u\n", tag_num);
289                 kvfree(se_sess->sess_cmd_map);
290                 se_sess->sess_cmd_map = NULL;
291                 return -ENOMEM;
292         }
293
294         return 0;
295 }
296 EXPORT_SYMBOL(transport_alloc_session_tags);
297
298 /**
299  * transport_init_session_tags - allocate a session and target driver private data
300  * @tag_num:  Maximum number of in-flight commands between initiator and target.
301  * @tag_size: Size in bytes of the private data a target driver associates with
302  *            each command.
303  * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
304  */
305 static struct se_session *
306 transport_init_session_tags(unsigned int tag_num, unsigned int tag_size,
307                             enum target_prot_op sup_prot_ops)
308 {
309         struct se_session *se_sess;
310         int rc;
311
312         if (tag_num != 0 && !tag_size) {
313                 pr_err("init_session_tags called with percpu-ida tag_num:"
314                        " %u, but zero tag_size\n", tag_num);
315                 return ERR_PTR(-EINVAL);
316         }
317         if (!tag_num && tag_size) {
318                 pr_err("init_session_tags called with percpu-ida tag_size:"
319                        " %u, but zero tag_num\n", tag_size);
320                 return ERR_PTR(-EINVAL);
321         }
322
323         se_sess = transport_alloc_session(sup_prot_ops);
324         if (IS_ERR(se_sess))
325                 return se_sess;
326
327         rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
328         if (rc < 0) {
329                 transport_free_session(se_sess);
330                 return ERR_PTR(-ENOMEM);
331         }
332
333         return se_sess;
334 }
335
336 /*
337  * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
338  */
339 void __transport_register_session(
340         struct se_portal_group *se_tpg,
341         struct se_node_acl *se_nacl,
342         struct se_session *se_sess,
343         void *fabric_sess_ptr)
344 {
345         const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
346         unsigned char buf[PR_REG_ISID_LEN];
347         unsigned long flags;
348
349         se_sess->se_tpg = se_tpg;
350         se_sess->fabric_sess_ptr = fabric_sess_ptr;
351         /*
352          * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
353          *
354          * Only set for struct se_session's that will actually be moving I/O.
355          * eg: *NOT* discovery sessions.
356          */
357         if (se_nacl) {
358                 /*
359                  *
360                  * Determine if fabric allows for T10-PI feature bits exposed to
361                  * initiators for device backends with !dev->dev_attrib.pi_prot_type.
362                  *
363                  * If so, then always save prot_type on a per se_node_acl node
364                  * basis and re-instate the previous sess_prot_type to avoid
365                  * disabling PI from below any previously initiator side
366                  * registered LUNs.
367                  */
368                 if (se_nacl->saved_prot_type)
369                         se_sess->sess_prot_type = se_nacl->saved_prot_type;
370                 else if (tfo->tpg_check_prot_fabric_only)
371                         se_sess->sess_prot_type = se_nacl->saved_prot_type =
372                                         tfo->tpg_check_prot_fabric_only(se_tpg);
373                 /*
374                  * If the fabric module supports an ISID based TransportID,
375                  * save this value in binary from the fabric I_T Nexus now.
376                  */
377                 if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
378                         memset(&buf[0], 0, PR_REG_ISID_LEN);
379                         se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
380                                         &buf[0], PR_REG_ISID_LEN);
381                         se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
382                 }
383
384                 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
385                 /*
386                  * The se_nacl->nacl_sess pointer will be set to the
387                  * last active I_T Nexus for each struct se_node_acl.
388                  */
389                 se_nacl->nacl_sess = se_sess;
390
391                 list_add_tail(&se_sess->sess_acl_list,
392                               &se_nacl->acl_sess_list);
393                 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
394         }
395         list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
396
397         pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
398                 se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr);
399 }
400 EXPORT_SYMBOL(__transport_register_session);
401
402 void transport_register_session(
403         struct se_portal_group *se_tpg,
404         struct se_node_acl *se_nacl,
405         struct se_session *se_sess,
406         void *fabric_sess_ptr)
407 {
408         unsigned long flags;
409
410         spin_lock_irqsave(&se_tpg->session_lock, flags);
411         __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
412         spin_unlock_irqrestore(&se_tpg->session_lock, flags);
413 }
414 EXPORT_SYMBOL(transport_register_session);
415
416 struct se_session *
417 target_setup_session(struct se_portal_group *tpg,
418                      unsigned int tag_num, unsigned int tag_size,
419                      enum target_prot_op prot_op,
420                      const char *initiatorname, void *private,
421                      int (*callback)(struct se_portal_group *,
422                                      struct se_session *, void *))
423 {
424         struct se_session *sess;
425
426         /*
427          * If the fabric driver is using percpu-ida based pre allocation
428          * of I/O descriptor tags, go ahead and perform that setup now..
429          */
430         if (tag_num != 0)
431                 sess = transport_init_session_tags(tag_num, tag_size, prot_op);
432         else
433                 sess = transport_alloc_session(prot_op);
434
435         if (IS_ERR(sess))
436                 return sess;
437
438         sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
439                                         (unsigned char *)initiatorname);
440         if (!sess->se_node_acl) {
441                 transport_free_session(sess);
442                 return ERR_PTR(-EACCES);
443         }
444         /*
445          * Go ahead and perform any remaining fabric setup that is
446          * required before transport_register_session().
447          */
448         if (callback != NULL) {
449                 int rc = callback(tpg, sess, private);
450                 if (rc) {
451                         transport_free_session(sess);
452                         return ERR_PTR(rc);
453                 }
454         }
455
456         transport_register_session(tpg, sess->se_node_acl, sess, private);
457         return sess;
458 }
459 EXPORT_SYMBOL(target_setup_session);
460
461 ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
462 {
463         struct se_session *se_sess;
464         ssize_t len = 0;
465
466         spin_lock_bh(&se_tpg->session_lock);
467         list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
468                 if (!se_sess->se_node_acl)
469                         continue;
470                 if (!se_sess->se_node_acl->dynamic_node_acl)
471                         continue;
472                 if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
473                         break;
474
475                 len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
476                                 se_sess->se_node_acl->initiatorname);
477                 len += 1; /* Include NULL terminator */
478         }
479         spin_unlock_bh(&se_tpg->session_lock);
480
481         return len;
482 }
483 EXPORT_SYMBOL(target_show_dynamic_sessions);
484
485 static void target_complete_nacl(struct kref *kref)
486 {
487         struct se_node_acl *nacl = container_of(kref,
488                                 struct se_node_acl, acl_kref);
489         struct se_portal_group *se_tpg = nacl->se_tpg;
490
491         if (!nacl->dynamic_stop) {
492                 complete(&nacl->acl_free_comp);
493                 return;
494         }
495
496         mutex_lock(&se_tpg->acl_node_mutex);
497         list_del_init(&nacl->acl_list);
498         mutex_unlock(&se_tpg->acl_node_mutex);
499
500         core_tpg_wait_for_nacl_pr_ref(nacl);
501         core_free_device_list_for_node(nacl, se_tpg);
502         kfree(nacl);
503 }
504
505 void target_put_nacl(struct se_node_acl *nacl)
506 {
507         kref_put(&nacl->acl_kref, target_complete_nacl);
508 }
509 EXPORT_SYMBOL(target_put_nacl);
510
511 void transport_deregister_session_configfs(struct se_session *se_sess)
512 {
513         struct se_node_acl *se_nacl;
514         unsigned long flags;
515         /*
516          * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
517          */
518         se_nacl = se_sess->se_node_acl;
519         if (se_nacl) {
520                 spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
521                 if (!list_empty(&se_sess->sess_acl_list))
522                         list_del_init(&se_sess->sess_acl_list);
523                 /*
524                  * If the session list is empty, then clear the pointer.
525                  * Otherwise, set the struct se_session pointer from the tail
526                  * element of the per struct se_node_acl active session list.
527                  */
528                 if (list_empty(&se_nacl->acl_sess_list))
529                         se_nacl->nacl_sess = NULL;
530                 else {
531                         se_nacl->nacl_sess = container_of(
532                                         se_nacl->acl_sess_list.prev,
533                                         struct se_session, sess_acl_list);
534                 }
535                 spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
536         }
537 }
538 EXPORT_SYMBOL(transport_deregister_session_configfs);
539
540 void transport_free_session(struct se_session *se_sess)
541 {
542         struct se_node_acl *se_nacl = se_sess->se_node_acl;
543
544         /*
545          * Drop the se_node_acl->nacl_kref obtained from within
546          * core_tpg_get_initiator_node_acl().
547          */
548         if (se_nacl) {
549                 struct se_portal_group *se_tpg = se_nacl->se_tpg;
550                 const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
551                 unsigned long flags;
552
553                 se_sess->se_node_acl = NULL;
554
555                 /*
556                  * Also determine if we need to drop the extra ->cmd_kref if
557                  * it had been previously dynamically generated, and
558                  * the endpoint is not caching dynamic ACLs.
559                  */
560                 mutex_lock(&se_tpg->acl_node_mutex);
561                 if (se_nacl->dynamic_node_acl &&
562                     !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
563                         spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
564                         if (list_empty(&se_nacl->acl_sess_list))
565                                 se_nacl->dynamic_stop = true;
566                         spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
567
568                         if (se_nacl->dynamic_stop)
569                                 list_del_init(&se_nacl->acl_list);
570                 }
571                 mutex_unlock(&se_tpg->acl_node_mutex);
572
573                 if (se_nacl->dynamic_stop)
574                         target_put_nacl(se_nacl);
575
576                 target_put_nacl(se_nacl);
577         }
578         if (se_sess->sess_cmd_map) {
579                 sbitmap_queue_free(&se_sess->sess_tag_pool);
580                 kvfree(se_sess->sess_cmd_map);
581         }
582         percpu_ref_exit(&se_sess->cmd_count);
583         kmem_cache_free(se_sess_cache, se_sess);
584 }
585 EXPORT_SYMBOL(transport_free_session);
586
587 static int target_release_res(struct se_device *dev, void *data)
588 {
589         struct se_session *sess = data;
590
591         if (dev->reservation_holder == sess)
592                 target_release_reservation(dev);
593         return 0;
594 }
595
596 void transport_deregister_session(struct se_session *se_sess)
597 {
598         struct se_portal_group *se_tpg = se_sess->se_tpg;
599         unsigned long flags;
600
601         if (!se_tpg) {
602                 transport_free_session(se_sess);
603                 return;
604         }
605
606         spin_lock_irqsave(&se_tpg->session_lock, flags);
607         list_del(&se_sess->sess_list);
608         se_sess->se_tpg = NULL;
609         se_sess->fabric_sess_ptr = NULL;
610         spin_unlock_irqrestore(&se_tpg->session_lock, flags);
611
612         /*
613          * Since the session is being removed, release SPC-2
614          * reservations held by the session that is disappearing.
615          */
616         target_for_each_device(target_release_res, se_sess);
617
618         pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
619                 se_tpg->se_tpg_tfo->fabric_name);
620         /*
621          * If last kref is dropping now for an explicit NodeACL, awake sleeping
622          * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
623          * removal context from within transport_free_session() code.
624          *
625          * For dynamic ACL, target_put_nacl() uses target_complete_nacl()
626          * to release all remaining generate_node_acl=1 created ACL resources.
627          */
628
629         transport_free_session(se_sess);
630 }
631 EXPORT_SYMBOL(transport_deregister_session);
632
633 void target_remove_session(struct se_session *se_sess)
634 {
635         transport_deregister_session_configfs(se_sess);
636         transport_deregister_session(se_sess);
637 }
638 EXPORT_SYMBOL(target_remove_session);
639
640 static void target_remove_from_state_list(struct se_cmd *cmd)
641 {
642         struct se_device *dev = cmd->se_dev;
643         unsigned long flags;
644
645         if (!dev)
646                 return;
647
648         spin_lock_irqsave(&dev->execute_task_lock, flags);
649         if (cmd->state_active) {
650                 list_del(&cmd->state_list);
651                 cmd->state_active = false;
652         }
653         spin_unlock_irqrestore(&dev->execute_task_lock, flags);
654 }
655
656 /*
657  * This function is called by the target core after the target core has
658  * finished processing a SCSI command or SCSI TMF. Both the regular command
659  * processing code and the code for aborting commands can call this
660  * function. CMD_T_STOP is set if and only if another thread is waiting
661  * inside transport_wait_for_tasks() for t_transport_stop_comp.
662  */
663 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
664 {
665         unsigned long flags;
666
667         target_remove_from_state_list(cmd);
668
669         /*
670          * Clear struct se_cmd->se_lun before the handoff to FE.
671          */
672         cmd->se_lun = NULL;
673
674         spin_lock_irqsave(&cmd->t_state_lock, flags);
675         /*
676          * Determine if frontend context caller is requesting the stopping of
677          * this command for frontend exceptions.
678          */
679         if (cmd->transport_state & CMD_T_STOP) {
680                 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
681                         __func__, __LINE__, cmd->tag);
682
683                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
684
685                 complete_all(&cmd->t_transport_stop_comp);
686                 return 1;
687         }
688         cmd->transport_state &= ~CMD_T_ACTIVE;
689         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
690
691         /*
692          * Some fabric modules like tcm_loop can release their internally
693          * allocated I/O reference and struct se_cmd now.
694          *
695          * Fabric modules are expected to return '1' here if the se_cmd being
696          * passed is released at this point, or zero if not being released.
697          */
698         return cmd->se_tfo->check_stop_free(cmd);
699 }
700
701 static void transport_lun_remove_cmd(struct se_cmd *cmd)
702 {
703         struct se_lun *lun = cmd->se_lun;
704
705         if (!lun)
706                 return;
707
708         if (cmpxchg(&cmd->lun_ref_active, true, false))
709                 percpu_ref_put(&lun->lun_ref);
710 }
711
712 static void target_complete_failure_work(struct work_struct *work)
713 {
714         struct se_cmd *cmd = container_of(work, struct se_cmd, work);
715
716         transport_generic_request_failure(cmd,
717                         TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
718 }
719
720 /*
721  * Used when asking transport to copy Sense Data from the underlying
722  * Linux/SCSI struct scsi_cmnd
723  */
724 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
725 {
726         struct se_device *dev = cmd->se_dev;
727
728         WARN_ON(!cmd->se_lun);
729
730         if (!dev)
731                 return NULL;
732
733         if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
734                 return NULL;
735
736         cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
737
738         pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
739                 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
740         return cmd->sense_buffer;
741 }
742
743 void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense)
744 {
745         unsigned char *cmd_sense_buf;
746         unsigned long flags;
747
748         spin_lock_irqsave(&cmd->t_state_lock, flags);
749         cmd_sense_buf = transport_get_sense_buffer(cmd);
750         if (!cmd_sense_buf) {
751                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
752                 return;
753         }
754
755         cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
756         memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length);
757         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
758 }
759 EXPORT_SYMBOL(transport_copy_sense_to_cmd);
760
761 static void target_handle_abort(struct se_cmd *cmd)
762 {
763         bool tas = cmd->transport_state & CMD_T_TAS;
764         bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF;
765         int ret;
766
767         pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas);
768
769         if (tas) {
770                 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
771                         cmd->scsi_status = SAM_STAT_TASK_ABORTED;
772                         pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
773                                  cmd->t_task_cdb[0], cmd->tag);
774                         trace_target_cmd_complete(cmd);
775                         ret = cmd->se_tfo->queue_status(cmd);
776                         if (ret) {
777                                 transport_handle_queue_full(cmd, cmd->se_dev,
778                                                             ret, false);
779                                 return;
780                         }
781                 } else {
782                         cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED;
783                         cmd->se_tfo->queue_tm_rsp(cmd);
784                 }
785         } else {
786                 /*
787                  * Allow the fabric driver to unmap any resources before
788                  * releasing the descriptor via TFO->release_cmd().
789                  */
790                 cmd->se_tfo->aborted_task(cmd);
791                 if (ack_kref)
792                         WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0);
793                 /*
794                  * To do: establish a unit attention condition on the I_T
795                  * nexus associated with cmd. See also the paragraph "Aborting
796                  * commands" in SAM.
797                  */
798         }
799
800         WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
801
802         transport_lun_remove_cmd(cmd);
803
804         transport_cmd_check_stop_to_fabric(cmd);
805 }
806
807 static void target_abort_work(struct work_struct *work)
808 {
809         struct se_cmd *cmd = container_of(work, struct se_cmd, work);
810
811         target_handle_abort(cmd);
812 }
813
814 static bool target_cmd_interrupted(struct se_cmd *cmd)
815 {
816         int post_ret;
817
818         if (cmd->transport_state & CMD_T_ABORTED) {
819                 if (cmd->transport_complete_callback)
820                         cmd->transport_complete_callback(cmd, false, &post_ret);
821                 INIT_WORK(&cmd->work, target_abort_work);
822                 queue_work(target_completion_wq, &cmd->work);
823                 return true;
824         } else if (cmd->transport_state & CMD_T_STOP) {
825                 if (cmd->transport_complete_callback)
826                         cmd->transport_complete_callback(cmd, false, &post_ret);
827                 complete_all(&cmd->t_transport_stop_comp);
828                 return true;
829         }
830
831         return false;
832 }
833
834 /* May be called from interrupt context so must not sleep. */
835 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
836 {
837         int success;
838         unsigned long flags;
839
840         if (target_cmd_interrupted(cmd))
841                 return;
842
843         cmd->scsi_status = scsi_status;
844
845         spin_lock_irqsave(&cmd->t_state_lock, flags);
846         switch (cmd->scsi_status) {
847         case SAM_STAT_CHECK_CONDITION:
848                 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
849                         success = 1;
850                 else
851                         success = 0;
852                 break;
853         default:
854                 success = 1;
855                 break;
856         }
857
858         cmd->t_state = TRANSPORT_COMPLETE;
859         cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
860         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
861
862         INIT_WORK(&cmd->work, success ? target_complete_ok_work :
863                   target_complete_failure_work);
864         if (cmd->se_cmd_flags & SCF_USE_CPUID)
865                 queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
866         else
867                 queue_work(target_completion_wq, &cmd->work);
868 }
869 EXPORT_SYMBOL(target_complete_cmd);
870
871 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
872 {
873         if ((scsi_status == SAM_STAT_GOOD ||
874              cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
875             length < cmd->data_length) {
876                 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
877                         cmd->residual_count += cmd->data_length - length;
878                 } else {
879                         cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
880                         cmd->residual_count = cmd->data_length - length;
881                 }
882
883                 cmd->data_length = length;
884         }
885
886         target_complete_cmd(cmd, scsi_status);
887 }
888 EXPORT_SYMBOL(target_complete_cmd_with_length);
889
890 static void target_add_to_state_list(struct se_cmd *cmd)
891 {
892         struct se_device *dev = cmd->se_dev;
893         unsigned long flags;
894
895         spin_lock_irqsave(&dev->execute_task_lock, flags);
896         if (!cmd->state_active) {
897                 list_add_tail(&cmd->state_list, &dev->state_list);
898                 cmd->state_active = true;
899         }
900         spin_unlock_irqrestore(&dev->execute_task_lock, flags);
901 }
902
903 /*
904  * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
905  */
906 static void transport_write_pending_qf(struct se_cmd *cmd);
907 static void transport_complete_qf(struct se_cmd *cmd);
908
909 void target_qf_do_work(struct work_struct *work)
910 {
911         struct se_device *dev = container_of(work, struct se_device,
912                                         qf_work_queue);
913         LIST_HEAD(qf_cmd_list);
914         struct se_cmd *cmd, *cmd_tmp;
915
916         spin_lock_irq(&dev->qf_cmd_lock);
917         list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
918         spin_unlock_irq(&dev->qf_cmd_lock);
919
920         list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
921                 list_del(&cmd->se_qf_node);
922                 atomic_dec_mb(&dev->dev_qf_count);
923
924                 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
925                         " context: %s\n", cmd->se_tfo->fabric_name, cmd,
926                         (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
927                         (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
928                         : "UNKNOWN");
929
930                 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
931                         transport_write_pending_qf(cmd);
932                 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
933                          cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
934                         transport_complete_qf(cmd);
935         }
936 }
937
938 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
939 {
940         switch (cmd->data_direction) {
941         case DMA_NONE:
942                 return "NONE";
943         case DMA_FROM_DEVICE:
944                 return "READ";
945         case DMA_TO_DEVICE:
946                 return "WRITE";
947         case DMA_BIDIRECTIONAL:
948                 return "BIDI";
949         default:
950                 break;
951         }
952
953         return "UNKNOWN";
954 }
955
956 void transport_dump_dev_state(
957         struct se_device *dev,
958         char *b,
959         int *bl)
960 {
961         *bl += sprintf(b + *bl, "Status: ");
962         if (dev->export_count)
963                 *bl += sprintf(b + *bl, "ACTIVATED");
964         else
965                 *bl += sprintf(b + *bl, "DEACTIVATED");
966
967         *bl += sprintf(b + *bl, "  Max Queue Depth: %d", dev->queue_depth);
968         *bl += sprintf(b + *bl, "  SectorSize: %u  HwMaxSectors: %u\n",
969                 dev->dev_attrib.block_size,
970                 dev->dev_attrib.hw_max_sectors);
971         *bl += sprintf(b + *bl, "        ");
972 }
973
974 void transport_dump_vpd_proto_id(
975         struct t10_vpd *vpd,
976         unsigned char *p_buf,
977         int p_buf_len)
978 {
979         unsigned char buf[VPD_TMP_BUF_SIZE];
980         int len;
981
982         memset(buf, 0, VPD_TMP_BUF_SIZE);
983         len = sprintf(buf, "T10 VPD Protocol Identifier: ");
984
985         switch (vpd->protocol_identifier) {
986         case 0x00:
987                 sprintf(buf+len, "Fibre Channel\n");
988                 break;
989         case 0x10:
990                 sprintf(buf+len, "Parallel SCSI\n");
991                 break;
992         case 0x20:
993                 sprintf(buf+len, "SSA\n");
994                 break;
995         case 0x30:
996                 sprintf(buf+len, "IEEE 1394\n");
997                 break;
998         case 0x40:
999                 sprintf(buf+len, "SCSI Remote Direct Memory Access"
1000                                 " Protocol\n");
1001                 break;
1002         case 0x50:
1003                 sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1004                 break;
1005         case 0x60:
1006                 sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1007                 break;
1008         case 0x70:
1009                 sprintf(buf+len, "Automation/Drive Interface Transport"
1010                                 " Protocol\n");
1011                 break;
1012         case 0x80:
1013                 sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1014                 break;
1015         default:
1016                 sprintf(buf+len, "Unknown 0x%02x\n",
1017                                 vpd->protocol_identifier);
1018                 break;
1019         }
1020
1021         if (p_buf)
1022                 strncpy(p_buf, buf, p_buf_len);
1023         else
1024                 pr_debug("%s", buf);
1025 }
1026
1027 void
1028 transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1029 {
1030         /*
1031          * Check if the Protocol Identifier Valid (PIV) bit is set..
1032          *
1033          * from spc3r23.pdf section 7.5.1
1034          */
1035          if (page_83[1] & 0x80) {
1036                 vpd->protocol_identifier = (page_83[0] & 0xf0);
1037                 vpd->protocol_identifier_set = 1;
1038                 transport_dump_vpd_proto_id(vpd, NULL, 0);
1039         }
1040 }
1041 EXPORT_SYMBOL(transport_set_vpd_proto_id);
1042
1043 int transport_dump_vpd_assoc(
1044         struct t10_vpd *vpd,
1045         unsigned char *p_buf,
1046         int p_buf_len)
1047 {
1048         unsigned char buf[VPD_TMP_BUF_SIZE];
1049         int ret = 0;
1050         int len;
1051
1052         memset(buf, 0, VPD_TMP_BUF_SIZE);
1053         len = sprintf(buf, "T10 VPD Identifier Association: ");
1054
1055         switch (vpd->association) {
1056         case 0x00:
1057                 sprintf(buf+len, "addressed logical unit\n");
1058                 break;
1059         case 0x10:
1060                 sprintf(buf+len, "target port\n");
1061                 break;
1062         case 0x20:
1063                 sprintf(buf+len, "SCSI target device\n");
1064                 break;
1065         default:
1066                 sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1067                 ret = -EINVAL;
1068                 break;
1069         }
1070
1071         if (p_buf)
1072                 strncpy(p_buf, buf, p_buf_len);
1073         else
1074                 pr_debug("%s", buf);
1075
1076         return ret;
1077 }
1078
1079 int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1080 {
1081         /*
1082          * The VPD identification association..
1083          *
1084          * from spc3r23.pdf Section 7.6.3.1 Table 297
1085          */
1086         vpd->association = (page_83[1] & 0x30);
1087         return transport_dump_vpd_assoc(vpd, NULL, 0);
1088 }
1089 EXPORT_SYMBOL(transport_set_vpd_assoc);
1090
1091 int transport_dump_vpd_ident_type(
1092         struct t10_vpd *vpd,
1093         unsigned char *p_buf,
1094         int p_buf_len)
1095 {
1096         unsigned char buf[VPD_TMP_BUF_SIZE];
1097         int ret = 0;
1098         int len;
1099
1100         memset(buf, 0, VPD_TMP_BUF_SIZE);
1101         len = sprintf(buf, "T10 VPD Identifier Type: ");
1102
1103         switch (vpd->device_identifier_type) {
1104         case 0x00:
1105                 sprintf(buf+len, "Vendor specific\n");
1106                 break;
1107         case 0x01:
1108                 sprintf(buf+len, "T10 Vendor ID based\n");
1109                 break;
1110         case 0x02:
1111                 sprintf(buf+len, "EUI-64 based\n");
1112                 break;
1113         case 0x03:
1114                 sprintf(buf+len, "NAA\n");
1115                 break;
1116         case 0x04:
1117                 sprintf(buf+len, "Relative target port identifier\n");
1118                 break;
1119         case 0x08:
1120                 sprintf(buf+len, "SCSI name string\n");
1121                 break;
1122         default:
1123                 sprintf(buf+len, "Unsupported: 0x%02x\n",
1124                                 vpd->device_identifier_type);
1125                 ret = -EINVAL;
1126                 break;
1127         }
1128
1129         if (p_buf) {
1130                 if (p_buf_len < strlen(buf)+1)
1131                         return -EINVAL;
1132                 strncpy(p_buf, buf, p_buf_len);
1133         } else {
1134                 pr_debug("%s", buf);
1135         }
1136
1137         return ret;
1138 }
1139
1140 int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1141 {
1142         /*
1143          * The VPD identifier type..
1144          *
1145          * from spc3r23.pdf Section 7.6.3.1 Table 298
1146          */
1147         vpd->device_identifier_type = (page_83[1] & 0x0f);
1148         return transport_dump_vpd_ident_type(vpd, NULL, 0);
1149 }
1150 EXPORT_SYMBOL(transport_set_vpd_ident_type);
1151
1152 int transport_dump_vpd_ident(
1153         struct t10_vpd *vpd,
1154         unsigned char *p_buf,
1155         int p_buf_len)
1156 {
1157         unsigned char buf[VPD_TMP_BUF_SIZE];
1158         int ret = 0;
1159
1160         memset(buf, 0, VPD_TMP_BUF_SIZE);
1161
1162         switch (vpd->device_identifier_code_set) {
1163         case 0x01: /* Binary */
1164                 snprintf(buf, sizeof(buf),
1165                         "T10 VPD Binary Device Identifier: %s\n",
1166                         &vpd->device_identifier[0]);
1167                 break;
1168         case 0x02: /* ASCII */
1169                 snprintf(buf, sizeof(buf),
1170                         "T10 VPD ASCII Device Identifier: %s\n",
1171                         &vpd->device_identifier[0]);
1172                 break;
1173         case 0x03: /* UTF-8 */
1174                 snprintf(buf, sizeof(buf),
1175                         "T10 VPD UTF-8 Device Identifier: %s\n",
1176                         &vpd->device_identifier[0]);
1177                 break;
1178         default:
1179                 sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1180                         " 0x%02x", vpd->device_identifier_code_set);
1181                 ret = -EINVAL;
1182                 break;
1183         }
1184
1185         if (p_buf)
1186                 strncpy(p_buf, buf, p_buf_len);
1187         else
1188                 pr_debug("%s", buf);
1189
1190         return ret;
1191 }
1192
1193 int
1194 transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1195 {
1196         static const char hex_str[] = "0123456789abcdef";
1197         int j = 0, i = 4; /* offset to start of the identifier */
1198
1199         /*
1200          * The VPD Code Set (encoding)
1201          *
1202          * from spc3r23.pdf Section 7.6.3.1 Table 296
1203          */
1204         vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1205         switch (vpd->device_identifier_code_set) {
1206         case 0x01: /* Binary */
1207                 vpd->device_identifier[j++] =
1208                                 hex_str[vpd->device_identifier_type];
1209                 while (i < (4 + page_83[3])) {
1210                         vpd->device_identifier[j++] =
1211                                 hex_str[(page_83[i] & 0xf0) >> 4];
1212                         vpd->device_identifier[j++] =
1213                                 hex_str[page_83[i] & 0x0f];
1214                         i++;
1215                 }
1216                 break;
1217         case 0x02: /* ASCII */
1218         case 0x03: /* UTF-8 */
1219                 while (i < (4 + page_83[3]))
1220                         vpd->device_identifier[j++] = page_83[i++];
1221                 break;
1222         default:
1223                 break;
1224         }
1225
1226         return transport_dump_vpd_ident(vpd, NULL, 0);
1227 }
1228 EXPORT_SYMBOL(transport_set_vpd_ident);
1229
1230 static sense_reason_t
1231 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
1232                                unsigned int size)
1233 {
1234         u32 mtl;
1235
1236         if (!cmd->se_tfo->max_data_sg_nents)
1237                 return TCM_NO_SENSE;
1238         /*
1239          * Check if fabric enforced maximum SGL entries per I/O descriptor
1240          * exceeds se_cmd->data_length.  If true, set SCF_UNDERFLOW_BIT +
1241          * residual_count and reduce original cmd->data_length to maximum
1242          * length based on single PAGE_SIZE entry scatter-lists.
1243          */
1244         mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
1245         if (cmd->data_length > mtl) {
1246                 /*
1247                  * If an existing CDB overflow is present, calculate new residual
1248                  * based on CDB size minus fabric maximum transfer length.
1249                  *
1250                  * If an existing CDB underflow is present, calculate new residual
1251                  * based on original cmd->data_length minus fabric maximum transfer
1252                  * length.
1253                  *
1254                  * Otherwise, set the underflow residual based on cmd->data_length
1255                  * minus fabric maximum transfer length.
1256                  */
1257                 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1258                         cmd->residual_count = (size - mtl);
1259                 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1260                         u32 orig_dl = size + cmd->residual_count;
1261                         cmd->residual_count = (orig_dl - mtl);
1262                 } else {
1263                         cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1264                         cmd->residual_count = (cmd->data_length - mtl);
1265                 }
1266                 cmd->data_length = mtl;
1267                 /*
1268                  * Reset sbc_check_prot() calculated protection payload
1269                  * length based upon the new smaller MTL.
1270                  */
1271                 if (cmd->prot_length) {
1272                         u32 sectors = (mtl / dev->dev_attrib.block_size);
1273                         cmd->prot_length = dev->prot_length * sectors;
1274                 }
1275         }
1276         return TCM_NO_SENSE;
1277 }
1278
1279 /**
1280  * target_cmd_size_check - Check whether there will be a residual.
1281  * @cmd: SCSI command.
1282  * @size: Data buffer size derived from CDB. The data buffer size provided by
1283  *   the SCSI transport driver is available in @cmd->data_length.
1284  *
1285  * Compare the data buffer size from the CDB with the data buffer limit from the transport
1286  * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary.
1287  *
1288  * Note: target drivers set @cmd->data_length by calling transport_init_se_cmd().
1289  *
1290  * Return: TCM_NO_SENSE
1291  */
1292 sense_reason_t
1293 target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1294 {
1295         struct se_device *dev = cmd->se_dev;
1296
1297         if (cmd->unknown_data_length) {
1298                 cmd->data_length = size;
1299         } else if (size != cmd->data_length) {
1300                 pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
1301                         " %u does not match SCSI CDB Length: %u for SAM Opcode:"
1302                         " 0x%02x\n", cmd->se_tfo->fabric_name,
1303                                 cmd->data_length, size, cmd->t_task_cdb[0]);
1304
1305                 if (cmd->data_direction == DMA_TO_DEVICE) {
1306                         if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1307                                 pr_err_ratelimited("Rejecting underflow/overflow"
1308                                                    " for WRITE data CDB\n");
1309                                 return TCM_INVALID_CDB_FIELD;
1310                         }
1311                         /*
1312                          * Some fabric drivers like iscsi-target still expect to
1313                          * always reject overflow writes.  Reject this case until
1314                          * full fabric driver level support for overflow writes
1315                          * is introduced tree-wide.
1316                          */
1317                         if (size > cmd->data_length) {
1318                                 pr_err_ratelimited("Rejecting overflow for"
1319                                                    " WRITE control CDB\n");
1320                                 return TCM_INVALID_CDB_FIELD;
1321                         }
1322                 }
1323                 /*
1324                  * Reject READ_* or WRITE_* with overflow/underflow for
1325                  * type SCF_SCSI_DATA_CDB.
1326                  */
1327                 if (dev->dev_attrib.block_size != 512)  {
1328                         pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
1329                                 " CDB on non 512-byte sector setup subsystem"
1330                                 " plugin: %s\n", dev->transport->name);
1331                         /* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
1332                         return TCM_INVALID_CDB_FIELD;
1333                 }
1334                 /*
1335                  * For the overflow case keep the existing fabric provided
1336                  * ->data_length.  Otherwise for the underflow case, reset
1337                  * ->data_length to the smaller SCSI expected data transfer
1338                  * length.
1339                  */
1340                 if (size > cmd->data_length) {
1341                         cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
1342                         cmd->residual_count = (size - cmd->data_length);
1343                 } else {
1344                         cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1345                         cmd->residual_count = (cmd->data_length - size);
1346                         cmd->data_length = size;
1347                 }
1348         }
1349
1350         return target_check_max_data_sg_nents(cmd, dev, size);
1351
1352 }
1353
1354 /*
1355  * Used by fabric modules containing a local struct se_cmd within their
1356  * fabric dependent per I/O descriptor.
1357  *
1358  * Preserves the value of @cmd->tag.
1359  */
1360 void transport_init_se_cmd(
1361         struct se_cmd *cmd,
1362         const struct target_core_fabric_ops *tfo,
1363         struct se_session *se_sess,
1364         u32 data_length,
1365         int data_direction,
1366         int task_attr,
1367         unsigned char *sense_buffer)
1368 {
1369         INIT_LIST_HEAD(&cmd->se_delayed_node);
1370         INIT_LIST_HEAD(&cmd->se_qf_node);
1371         INIT_LIST_HEAD(&cmd->se_cmd_list);
1372         INIT_LIST_HEAD(&cmd->state_list);
1373         init_completion(&cmd->t_transport_stop_comp);
1374         cmd->free_compl = NULL;
1375         cmd->abrt_compl = NULL;
1376         spin_lock_init(&cmd->t_state_lock);
1377         INIT_WORK(&cmd->work, NULL);
1378         kref_init(&cmd->cmd_kref);
1379
1380         cmd->se_tfo = tfo;
1381         cmd->se_sess = se_sess;
1382         cmd->data_length = data_length;
1383         cmd->data_direction = data_direction;
1384         cmd->sam_task_attr = task_attr;
1385         cmd->sense_buffer = sense_buffer;
1386
1387         cmd->state_active = false;
1388 }
1389 EXPORT_SYMBOL(transport_init_se_cmd);
1390
1391 static sense_reason_t
1392 transport_check_alloc_task_attr(struct se_cmd *cmd)
1393 {
1394         struct se_device *dev = cmd->se_dev;
1395
1396         /*
1397          * Check if SAM Task Attribute emulation is enabled for this
1398          * struct se_device storage object
1399          */
1400         if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1401                 return 0;
1402
1403         if (cmd->sam_task_attr == TCM_ACA_TAG) {
1404                 pr_debug("SAM Task Attribute ACA"
1405                         " emulation is not supported\n");
1406                 return TCM_INVALID_CDB_FIELD;
1407         }
1408
1409         return 0;
1410 }
1411
1412 sense_reason_t
1413 target_setup_cmd_from_cdb(struct se_cmd *cmd, unsigned char *cdb)
1414 {
1415         struct se_device *dev = cmd->se_dev;
1416         sense_reason_t ret;
1417
1418         /*
1419          * Ensure that the received CDB is less than the max (252 + 8) bytes
1420          * for VARIABLE_LENGTH_CMD
1421          */
1422         if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1423                 pr_err("Received SCSI CDB with command_size: %d that"
1424                         " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1425                         scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1426                 return TCM_INVALID_CDB_FIELD;
1427         }
1428         /*
1429          * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1430          * allocate the additional extended CDB buffer now..  Otherwise
1431          * setup the pointer from __t_task_cdb to t_task_cdb.
1432          */
1433         if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1434                 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
1435                                                 GFP_KERNEL);
1436                 if (!cmd->t_task_cdb) {
1437                         pr_err("Unable to allocate cmd->t_task_cdb"
1438                                 " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1439                                 scsi_command_size(cdb),
1440                                 (unsigned long)sizeof(cmd->__t_task_cdb));
1441                         return TCM_OUT_OF_RESOURCES;
1442                 }
1443         } else
1444                 cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1445         /*
1446          * Copy the original CDB into cmd->
1447          */
1448         memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1449
1450         trace_target_sequencer_start(cmd);
1451
1452         ret = dev->transport->parse_cdb(cmd);
1453         if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
1454                 pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
1455                                     cmd->se_tfo->fabric_name,
1456                                     cmd->se_sess->se_node_acl->initiatorname,
1457                                     cmd->t_task_cdb[0]);
1458         if (ret)
1459                 return ret;
1460
1461         ret = transport_check_alloc_task_attr(cmd);
1462         if (ret)
1463                 return ret;
1464
1465         cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1466         atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
1467         return 0;
1468 }
1469 EXPORT_SYMBOL(target_setup_cmd_from_cdb);
1470
1471 /*
1472  * Used by fabric module frontends to queue tasks directly.
1473  * May only be used from process context.
1474  */
1475 int transport_handle_cdb_direct(
1476         struct se_cmd *cmd)
1477 {
1478         sense_reason_t ret;
1479
1480         if (!cmd->se_lun) {
1481                 dump_stack();
1482                 pr_err("cmd->se_lun is NULL\n");
1483                 return -EINVAL;
1484         }
1485         if (in_interrupt()) {
1486                 dump_stack();
1487                 pr_err("transport_generic_handle_cdb cannot be called"
1488                                 " from interrupt context\n");
1489                 return -EINVAL;
1490         }
1491         /*
1492          * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
1493          * outstanding descriptors are handled correctly during shutdown via
1494          * transport_wait_for_tasks()
1495          *
1496          * Also, we don't take cmd->t_state_lock here as we only expect
1497          * this to be called for initial descriptor submission.
1498          */
1499         cmd->t_state = TRANSPORT_NEW_CMD;
1500         cmd->transport_state |= CMD_T_ACTIVE;
1501
1502         /*
1503          * transport_generic_new_cmd() is already handling QUEUE_FULL,
1504          * so follow TRANSPORT_NEW_CMD processing thread context usage
1505          * and call transport_generic_request_failure() if necessary..
1506          */
1507         ret = transport_generic_new_cmd(cmd);
1508         if (ret)
1509                 transport_generic_request_failure(cmd, ret);
1510         return 0;
1511 }
1512 EXPORT_SYMBOL(transport_handle_cdb_direct);
1513
1514 sense_reason_t
1515 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1516                 u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
1517 {
1518         if (!sgl || !sgl_count)
1519                 return 0;
1520
1521         /*
1522          * Reject SCSI data overflow with map_mem_to_cmd() as incoming
1523          * scatterlists already have been set to follow what the fabric
1524          * passes for the original expected data transfer length.
1525          */
1526         if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1527                 pr_warn("Rejecting SCSI DATA overflow for fabric using"
1528                         " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
1529                 return TCM_INVALID_CDB_FIELD;
1530         }
1531
1532         cmd->t_data_sg = sgl;
1533         cmd->t_data_nents = sgl_count;
1534         cmd->t_bidi_data_sg = sgl_bidi;
1535         cmd->t_bidi_data_nents = sgl_bidi_count;
1536
1537         cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1538         return 0;
1539 }
1540
1541 /**
1542  * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
1543  *                       se_cmd + use pre-allocated SGL memory.
1544  *
1545  * @se_cmd: command descriptor to submit
1546  * @se_sess: associated se_sess for endpoint
1547  * @cdb: pointer to SCSI CDB
1548  * @sense: pointer to SCSI sense buffer
1549  * @unpacked_lun: unpacked LUN to reference for struct se_lun
1550  * @data_length: fabric expected data transfer length
1551  * @task_attr: SAM task attribute
1552  * @data_dir: DMA data direction
1553  * @flags: flags for command submission from target_sc_flags_tables
1554  * @sgl: struct scatterlist memory for unidirectional mapping
1555  * @sgl_count: scatterlist count for unidirectional mapping
1556  * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
1557  * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
1558  * @sgl_prot: struct scatterlist memory protection information
1559  * @sgl_prot_count: scatterlist count for protection information
1560  *
1561  * Task tags are supported if the caller has set @se_cmd->tag.
1562  *
1563  * Returns non zero to signal active I/O shutdown failure.  All other
1564  * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1565  * but still return zero here.
1566  *
1567  * This may only be called from process context, and also currently
1568  * assumes internal allocation of fabric payload buffer by target-core.
1569  */
1570 int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
1571                 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1572                 u32 data_length, int task_attr, int data_dir, int flags,
1573                 struct scatterlist *sgl, u32 sgl_count,
1574                 struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
1575                 struct scatterlist *sgl_prot, u32 sgl_prot_count)
1576 {
1577         struct se_portal_group *se_tpg;
1578         sense_reason_t rc;
1579         int ret;
1580
1581         se_tpg = se_sess->se_tpg;
1582         BUG_ON(!se_tpg);
1583         BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1584         BUG_ON(in_interrupt());
1585         /*
1586          * Initialize se_cmd for target operation.  From this point
1587          * exceptions are handled by sending exception status via
1588          * target_core_fabric_ops->queue_status() callback
1589          */
1590         transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1591                                 data_length, data_dir, task_attr, sense);
1592
1593         if (flags & TARGET_SCF_USE_CPUID)
1594                 se_cmd->se_cmd_flags |= SCF_USE_CPUID;
1595         else
1596                 se_cmd->cpuid = WORK_CPU_UNBOUND;
1597
1598         if (flags & TARGET_SCF_UNKNOWN_SIZE)
1599                 se_cmd->unknown_data_length = 1;
1600         /*
1601          * Obtain struct se_cmd->cmd_kref reference and add new cmd to
1602          * se_sess->sess_cmd_list.  A second kref_get here is necessary
1603          * for fabrics using TARGET_SCF_ACK_KREF that expect a second
1604          * kref_put() to happen during fabric packet acknowledgement.
1605          */
1606         ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1607         if (ret)
1608                 return ret;
1609         /*
1610          * Signal bidirectional data payloads to target-core
1611          */
1612         if (flags & TARGET_SCF_BIDI_OP)
1613                 se_cmd->se_cmd_flags |= SCF_BIDI;
1614         /*
1615          * Locate se_lun pointer and attach it to struct se_cmd
1616          */
1617         rc = transport_lookup_cmd_lun(se_cmd, unpacked_lun);
1618         if (rc) {
1619                 transport_send_check_condition_and_sense(se_cmd, rc, 0);
1620                 target_put_sess_cmd(se_cmd);
1621                 return 0;
1622         }
1623
1624         rc = target_setup_cmd_from_cdb(se_cmd, cdb);
1625         if (rc != 0) {
1626                 transport_generic_request_failure(se_cmd, rc);
1627                 return 0;
1628         }
1629
1630         /*
1631          * Save pointers for SGLs containing protection information,
1632          * if present.
1633          */
1634         if (sgl_prot_count) {
1635                 se_cmd->t_prot_sg = sgl_prot;
1636                 se_cmd->t_prot_nents = sgl_prot_count;
1637                 se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
1638         }
1639
1640         /*
1641          * When a non zero sgl_count has been passed perform SGL passthrough
1642          * mapping for pre-allocated fabric memory instead of having target
1643          * core perform an internal SGL allocation..
1644          */
1645         if (sgl_count != 0) {
1646                 BUG_ON(!sgl);
1647
1648                 /*
1649                  * A work-around for tcm_loop as some userspace code via
1650                  * scsi-generic do not memset their associated read buffers,
1651                  * so go ahead and do that here for type non-data CDBs.  Also
1652                  * note that this is currently guaranteed to be a single SGL
1653                  * for this case by target core in target_setup_cmd_from_cdb()
1654                  * -> transport_generic_cmd_sequencer().
1655                  */
1656                 if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
1657                      se_cmd->data_direction == DMA_FROM_DEVICE) {
1658                         unsigned char *buf = NULL;
1659
1660                         if (sgl)
1661                                 buf = kmap(sg_page(sgl)) + sgl->offset;
1662
1663                         if (buf) {
1664                                 memset(buf, 0, sgl->length);
1665                                 kunmap(sg_page(sgl));
1666                         }
1667                 }
1668
1669                 rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
1670                                 sgl_bidi, sgl_bidi_count);
1671                 if (rc != 0) {
1672                         transport_generic_request_failure(se_cmd, rc);
1673                         return 0;
1674                 }
1675         }
1676
1677         /*
1678          * Check if we need to delay processing because of ALUA
1679          * Active/NonOptimized primary access state..
1680          */
1681         core_alua_check_nonop_delay(se_cmd);
1682
1683         transport_handle_cdb_direct(se_cmd);
1684         return 0;
1685 }
1686 EXPORT_SYMBOL(target_submit_cmd_map_sgls);
1687
1688 /**
1689  * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1690  *
1691  * @se_cmd: command descriptor to submit
1692  * @se_sess: associated se_sess for endpoint
1693  * @cdb: pointer to SCSI CDB
1694  * @sense: pointer to SCSI sense buffer
1695  * @unpacked_lun: unpacked LUN to reference for struct se_lun
1696  * @data_length: fabric expected data transfer length
1697  * @task_attr: SAM task attribute
1698  * @data_dir: DMA data direction
1699  * @flags: flags for command submission from target_sc_flags_tables
1700  *
1701  * Task tags are supported if the caller has set @se_cmd->tag.
1702  *
1703  * Returns non zero to signal active I/O shutdown failure.  All other
1704  * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
1705  * but still return zero here.
1706  *
1707  * This may only be called from process context, and also currently
1708  * assumes internal allocation of fabric payload buffer by target-core.
1709  *
1710  * It also assumes interal target core SGL memory allocation.
1711  */
1712 int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1713                 unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1714                 u32 data_length, int task_attr, int data_dir, int flags)
1715 {
1716         return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
1717                         unpacked_lun, data_length, task_attr, data_dir,
1718                         flags, NULL, 0, NULL, 0, NULL, 0);
1719 }
1720 EXPORT_SYMBOL(target_submit_cmd);
1721
1722 static void target_complete_tmr_failure(struct work_struct *work)
1723 {
1724         struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
1725
1726         se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1727         se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1728
1729         transport_lun_remove_cmd(se_cmd);
1730         transport_cmd_check_stop_to_fabric(se_cmd);
1731 }
1732
1733 static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag,
1734                                        u64 *unpacked_lun)
1735 {
1736         struct se_cmd *se_cmd;
1737         unsigned long flags;
1738         bool ret = false;
1739
1740         spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
1741         list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
1742                 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
1743                         continue;
1744
1745                 if (se_cmd->tag == tag) {
1746                         *unpacked_lun = se_cmd->orig_fe_lun;
1747                         ret = true;
1748                         break;
1749                 }
1750         }
1751         spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
1752
1753         return ret;
1754 }
1755
1756 /**
1757  * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
1758  *                     for TMR CDBs
1759  *
1760  * @se_cmd: command descriptor to submit
1761  * @se_sess: associated se_sess for endpoint
1762  * @sense: pointer to SCSI sense buffer
1763  * @unpacked_lun: unpacked LUN to reference for struct se_lun
1764  * @fabric_tmr_ptr: fabric context for TMR req
1765  * @tm_type: Type of TM request
1766  * @gfp: gfp type for caller
1767  * @tag: referenced task tag for TMR_ABORT_TASK
1768  * @flags: submit cmd flags
1769  *
1770  * Callable from all contexts.
1771  **/
1772
1773 int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1774                 unsigned char *sense, u64 unpacked_lun,
1775                 void *fabric_tmr_ptr, unsigned char tm_type,
1776                 gfp_t gfp, u64 tag, int flags)
1777 {
1778         struct se_portal_group *se_tpg;
1779         int ret;
1780
1781         se_tpg = se_sess->se_tpg;
1782         BUG_ON(!se_tpg);
1783
1784         transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1785                               0, DMA_NONE, TCM_SIMPLE_TAG, sense);
1786         /*
1787          * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
1788          * allocation failure.
1789          */
1790         ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1791         if (ret < 0)
1792                 return -ENOMEM;
1793
1794         if (tm_type == TMR_ABORT_TASK)
1795                 se_cmd->se_tmr_req->ref_task_tag = tag;
1796
1797         /* See target_submit_cmd for commentary */
1798         ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1799         if (ret) {
1800                 core_tmr_release_req(se_cmd->se_tmr_req);
1801                 return ret;
1802         }
1803         /*
1804          * If this is ABORT_TASK with no explicit fabric provided LUN,
1805          * go ahead and search active session tags for a match to figure
1806          * out unpacked_lun for the original se_cmd.
1807          */
1808         if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) {
1809                 if (!target_lookup_lun_from_tag(se_sess, tag, &unpacked_lun))
1810                         goto failure;
1811         }
1812
1813         ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun);
1814         if (ret)
1815                 goto failure;
1816
1817         transport_generic_handle_tmr(se_cmd);
1818         return 0;
1819
1820         /*
1821          * For callback during failure handling, push this work off
1822          * to process context with TMR_LUN_DOES_NOT_EXIST status.
1823          */
1824 failure:
1825         INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
1826         schedule_work(&se_cmd->work);
1827         return 0;
1828 }
1829 EXPORT_SYMBOL(target_submit_tmr);
1830
1831 /*
1832  * Handle SAM-esque emulation for generic transport request failures.
1833  */
1834 void transport_generic_request_failure(struct se_cmd *cmd,
1835                 sense_reason_t sense_reason)
1836 {
1837         int ret = 0, post_ret;
1838
1839         pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
1840                  sense_reason);
1841         target_show_cmd("-----[ ", cmd);
1842
1843         /*
1844          * For SAM Task Attribute emulation for failed struct se_cmd
1845          */
1846         transport_complete_task_attr(cmd);
1847
1848         if (cmd->transport_complete_callback)
1849                 cmd->transport_complete_callback(cmd, false, &post_ret);
1850
1851         if (cmd->transport_state & CMD_T_ABORTED) {
1852                 INIT_WORK(&cmd->work, target_abort_work);
1853                 queue_work(target_completion_wq, &cmd->work);
1854                 return;
1855         }
1856
1857         switch (sense_reason) {
1858         case TCM_NON_EXISTENT_LUN:
1859         case TCM_UNSUPPORTED_SCSI_OPCODE:
1860         case TCM_INVALID_CDB_FIELD:
1861         case TCM_INVALID_PARAMETER_LIST:
1862         case TCM_PARAMETER_LIST_LENGTH_ERROR:
1863         case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
1864         case TCM_UNKNOWN_MODE_PAGE:
1865         case TCM_WRITE_PROTECTED:
1866         case TCM_ADDRESS_OUT_OF_RANGE:
1867         case TCM_CHECK_CONDITION_ABORT_CMD:
1868         case TCM_CHECK_CONDITION_UNIT_ATTENTION:
1869         case TCM_CHECK_CONDITION_NOT_READY:
1870         case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
1871         case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
1872         case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
1873         case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
1874         case TCM_TOO_MANY_TARGET_DESCS:
1875         case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
1876         case TCM_TOO_MANY_SEGMENT_DESCS:
1877         case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
1878                 break;
1879         case TCM_OUT_OF_RESOURCES:
1880                 cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
1881                 goto queue_status;
1882         case TCM_LUN_BUSY:
1883                 cmd->scsi_status = SAM_STAT_BUSY;
1884                 goto queue_status;
1885         case TCM_RESERVATION_CONFLICT:
1886                 /*
1887                  * No SENSE Data payload for this case, set SCSI Status
1888                  * and queue the response to $FABRIC_MOD.
1889                  *
1890                  * Uses linux/include/scsi/scsi.h SAM status codes defs
1891                  */
1892                 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1893                 /*
1894                  * For UA Interlock Code 11b, a RESERVATION CONFLICT will
1895                  * establish a UNIT ATTENTION with PREVIOUS RESERVATION
1896                  * CONFLICT STATUS.
1897                  *
1898                  * See spc4r17, section 7.4.6 Control Mode Page, Table 349
1899                  */
1900                 if (cmd->se_sess &&
1901                     cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl
1902                                         == TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) {
1903                         target_ua_allocate_lun(cmd->se_sess->se_node_acl,
1904                                                cmd->orig_fe_lun, 0x2C,
1905                                         ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
1906                 }
1907
1908                 goto queue_status;
1909         default:
1910                 pr_err("Unknown transport error for CDB 0x%02x: %d\n",
1911                         cmd->t_task_cdb[0], sense_reason);
1912                 sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
1913                 break;
1914         }
1915
1916         ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
1917         if (ret)
1918                 goto queue_full;
1919
1920 check_stop:
1921         transport_lun_remove_cmd(cmd);
1922         transport_cmd_check_stop_to_fabric(cmd);
1923         return;
1924
1925 queue_status:
1926         trace_target_cmd_complete(cmd);
1927         ret = cmd->se_tfo->queue_status(cmd);
1928         if (!ret)
1929                 goto check_stop;
1930 queue_full:
1931         transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
1932 }
1933 EXPORT_SYMBOL(transport_generic_request_failure);
1934
1935 void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
1936 {
1937         sense_reason_t ret;
1938
1939         if (!cmd->execute_cmd) {
1940                 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1941                 goto err;
1942         }
1943         if (do_checks) {
1944                 /*
1945                  * Check for an existing UNIT ATTENTION condition after
1946                  * target_handle_task_attr() has done SAM task attr
1947                  * checking, and possibly have already defered execution
1948                  * out to target_restart_delayed_cmds() context.
1949                  */
1950                 ret = target_scsi3_ua_check(cmd);
1951                 if (ret)
1952                         goto err;
1953
1954                 ret = target_alua_state_check(cmd);
1955                 if (ret)
1956                         goto err;
1957
1958                 ret = target_check_reservation(cmd);
1959                 if (ret) {
1960                         cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
1961                         goto err;
1962                 }
1963         }
1964
1965         ret = cmd->execute_cmd(cmd);
1966         if (!ret)
1967                 return;
1968 err:
1969         spin_lock_irq(&cmd->t_state_lock);
1970         cmd->transport_state &= ~CMD_T_SENT;
1971         spin_unlock_irq(&cmd->t_state_lock);
1972
1973         transport_generic_request_failure(cmd, ret);
1974 }
1975
1976 static int target_write_prot_action(struct se_cmd *cmd)
1977 {
1978         u32 sectors;
1979         /*
1980          * Perform WRITE_INSERT of PI using software emulation when backend
1981          * device has PI enabled, if the transport has not already generated
1982          * PI using hardware WRITE_INSERT offload.
1983          */
1984         switch (cmd->prot_op) {
1985         case TARGET_PROT_DOUT_INSERT:
1986                 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
1987                         sbc_dif_generate(cmd);
1988                 break;
1989         case TARGET_PROT_DOUT_STRIP:
1990                 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
1991                         break;
1992
1993                 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
1994                 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
1995                                              sectors, 0, cmd->t_prot_sg, 0);
1996                 if (unlikely(cmd->pi_err)) {
1997                         spin_lock_irq(&cmd->t_state_lock);
1998                         cmd->transport_state &= ~CMD_T_SENT;
1999                         spin_unlock_irq(&cmd->t_state_lock);
2000                         transport_generic_request_failure(cmd, cmd->pi_err);
2001                         return -1;
2002                 }
2003                 break;
2004         default:
2005                 break;
2006         }
2007
2008         return 0;
2009 }
2010
2011 static bool target_handle_task_attr(struct se_cmd *cmd)
2012 {
2013         struct se_device *dev = cmd->se_dev;
2014
2015         if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2016                 return false;
2017
2018         cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
2019
2020         /*
2021          * Check for the existence of HEAD_OF_QUEUE, and if true return 1
2022          * to allow the passed struct se_cmd list of tasks to the front of the list.
2023          */
2024         switch (cmd->sam_task_attr) {
2025         case TCM_HEAD_TAG:
2026                 pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
2027                          cmd->t_task_cdb[0]);
2028                 return false;
2029         case TCM_ORDERED_TAG:
2030                 atomic_inc_mb(&dev->dev_ordered_sync);
2031
2032                 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
2033                          cmd->t_task_cdb[0]);
2034
2035                 /*
2036                  * Execute an ORDERED command if no other older commands
2037                  * exist that need to be completed first.
2038                  */
2039                 if (!atomic_read(&dev->simple_cmds))
2040                         return false;
2041                 break;
2042         default:
2043                 /*
2044                  * For SIMPLE and UNTAGGED Task Attribute commands
2045                  */
2046                 atomic_inc_mb(&dev->simple_cmds);
2047                 break;
2048         }
2049
2050         if (atomic_read(&dev->dev_ordered_sync) == 0)
2051                 return false;
2052
2053         spin_lock(&dev->delayed_cmd_lock);
2054         list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
2055         spin_unlock(&dev->delayed_cmd_lock);
2056
2057         pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
2058                 cmd->t_task_cdb[0], cmd->sam_task_attr);
2059         return true;
2060 }
2061
2062 void target_execute_cmd(struct se_cmd *cmd)
2063 {
2064         /*
2065          * Determine if frontend context caller is requesting the stopping of
2066          * this command for frontend exceptions.
2067          *
2068          * If the received CDB has already been aborted stop processing it here.
2069          */
2070         if (target_cmd_interrupted(cmd))
2071                 return;
2072
2073         spin_lock_irq(&cmd->t_state_lock);
2074         cmd->t_state = TRANSPORT_PROCESSING;
2075         cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
2076         spin_unlock_irq(&cmd->t_state_lock);
2077
2078         if (target_write_prot_action(cmd))
2079                 return;
2080
2081         if (target_handle_task_attr(cmd)) {
2082                 spin_lock_irq(&cmd->t_state_lock);
2083                 cmd->transport_state &= ~CMD_T_SENT;
2084                 spin_unlock_irq(&cmd->t_state_lock);
2085                 return;
2086         }
2087
2088         __target_execute_cmd(cmd, true);
2089 }
2090 EXPORT_SYMBOL(target_execute_cmd);
2091
2092 /*
2093  * Process all commands up to the last received ORDERED task attribute which
2094  * requires another blocking boundary
2095  */
2096 static void target_restart_delayed_cmds(struct se_device *dev)
2097 {
2098         for (;;) {
2099                 struct se_cmd *cmd;
2100
2101                 spin_lock(&dev->delayed_cmd_lock);
2102                 if (list_empty(&dev->delayed_cmd_list)) {
2103                         spin_unlock(&dev->delayed_cmd_lock);
2104                         break;
2105                 }
2106
2107                 cmd = list_entry(dev->delayed_cmd_list.next,
2108                                  struct se_cmd, se_delayed_node);
2109                 list_del(&cmd->se_delayed_node);
2110                 spin_unlock(&dev->delayed_cmd_lock);
2111
2112                 cmd->transport_state |= CMD_T_SENT;
2113
2114                 __target_execute_cmd(cmd, true);
2115
2116                 if (cmd->sam_task_attr == TCM_ORDERED_TAG)
2117                         break;
2118         }
2119 }
2120
2121 /*
2122  * Called from I/O completion to determine which dormant/delayed
2123  * and ordered cmds need to have their tasks added to the execution queue.
2124  */
2125 static void transport_complete_task_attr(struct se_cmd *cmd)
2126 {
2127         struct se_device *dev = cmd->se_dev;
2128
2129         if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2130                 return;
2131
2132         if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
2133                 goto restart;
2134
2135         if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
2136                 atomic_dec_mb(&dev->simple_cmds);
2137                 dev->dev_cur_ordered_id++;
2138         } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
2139                 dev->dev_cur_ordered_id++;
2140                 pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
2141                          dev->dev_cur_ordered_id);
2142         } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
2143                 atomic_dec_mb(&dev->dev_ordered_sync);
2144
2145                 dev->dev_cur_ordered_id++;
2146                 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
2147                          dev->dev_cur_ordered_id);
2148         }
2149         cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
2150
2151 restart:
2152         target_restart_delayed_cmds(dev);
2153 }
2154
2155 static void transport_complete_qf(struct se_cmd *cmd)
2156 {
2157         int ret = 0;
2158
2159         transport_complete_task_attr(cmd);
2160         /*
2161          * If a fabric driver ->write_pending() or ->queue_data_in() callback
2162          * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
2163          * the same callbacks should not be retried.  Return CHECK_CONDITION
2164          * if a scsi_status is not already set.
2165          *
2166          * If a fabric driver ->queue_status() has returned non zero, always
2167          * keep retrying no matter what..
2168          */
2169         if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
2170                 if (cmd->scsi_status)
2171                         goto queue_status;
2172
2173                 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
2174                 goto queue_status;
2175         }
2176
2177         /*
2178          * Check if we need to send a sense buffer from
2179          * the struct se_cmd in question. We do NOT want
2180          * to take this path of the IO has been marked as
2181          * needing to be treated like a "normal read". This
2182          * is the case if it's a tape read, and either the
2183          * FM, EOM, or ILI bits are set, but there is no
2184          * sense data.
2185          */
2186         if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2187             cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
2188                 goto queue_status;
2189
2190         switch (cmd->data_direction) {
2191         case DMA_FROM_DEVICE:
2192                 /* queue status if not treating this as a normal read */
2193                 if (cmd->scsi_status &&
2194                     !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
2195                         goto queue_status;
2196
2197                 trace_target_cmd_complete(cmd);
2198                 ret = cmd->se_tfo->queue_data_in(cmd);
2199                 break;
2200         case DMA_TO_DEVICE:
2201                 if (cmd->se_cmd_flags & SCF_BIDI) {
2202                         ret = cmd->se_tfo->queue_data_in(cmd);
2203                         break;
2204                 }
2205                 /* fall through */
2206         case DMA_NONE:
2207 queue_status:
2208                 trace_target_cmd_complete(cmd);
2209                 ret = cmd->se_tfo->queue_status(cmd);
2210                 break;
2211         default:
2212                 break;
2213         }
2214
2215         if (ret < 0) {
2216                 transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2217                 return;
2218         }
2219         transport_lun_remove_cmd(cmd);
2220         transport_cmd_check_stop_to_fabric(cmd);
2221 }
2222
2223 static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
2224                                         int err, bool write_pending)
2225 {
2226         /*
2227          * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
2228          * ->queue_data_in() callbacks from new process context.
2229          *
2230          * Otherwise for other errors, transport_complete_qf() will send
2231          * CHECK_CONDITION via ->queue_status() instead of attempting to
2232          * retry associated fabric driver data-transfer callbacks.
2233          */
2234         if (err == -EAGAIN || err == -ENOMEM) {
2235                 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
2236                                                  TRANSPORT_COMPLETE_QF_OK;
2237         } else {
2238                 pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
2239                 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
2240         }
2241
2242         spin_lock_irq(&dev->qf_cmd_lock);
2243         list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
2244         atomic_inc_mb(&dev->dev_qf_count);
2245         spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
2246
2247         schedule_work(&cmd->se_dev->qf_work_queue);
2248 }
2249
2250 static bool target_read_prot_action(struct se_cmd *cmd)
2251 {
2252         switch (cmd->prot_op) {
2253         case TARGET_PROT_DIN_STRIP:
2254                 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
2255                         u32 sectors = cmd->data_length >>
2256                                   ilog2(cmd->se_dev->dev_attrib.block_size);
2257
2258                         cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2259                                                      sectors, 0, cmd->t_prot_sg,
2260                                                      0);
2261                         if (cmd->pi_err)
2262                                 return true;
2263                 }
2264                 break;
2265         case TARGET_PROT_DIN_INSERT:
2266                 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
2267                         break;
2268
2269                 sbc_dif_generate(cmd);
2270                 break;
2271         default:
2272                 break;
2273         }
2274
2275         return false;
2276 }
2277
2278 static void target_complete_ok_work(struct work_struct *work)
2279 {
2280         struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2281         int ret;
2282
2283         /*
2284          * Check if we need to move delayed/dormant tasks from cmds on the
2285          * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
2286          * Attribute.
2287          */
2288         transport_complete_task_attr(cmd);
2289
2290         /*
2291          * Check to schedule QUEUE_FULL work, or execute an existing
2292          * cmd->transport_qf_callback()
2293          */
2294         if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
2295                 schedule_work(&cmd->se_dev->qf_work_queue);
2296
2297         /*
2298          * Check if we need to send a sense buffer from
2299          * the struct se_cmd in question. We do NOT want
2300          * to take this path of the IO has been marked as
2301          * needing to be treated like a "normal read". This
2302          * is the case if it's a tape read, and either the
2303          * FM, EOM, or ILI bits are set, but there is no
2304          * sense data.
2305          */
2306         if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2307             cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
2308                 WARN_ON(!cmd->scsi_status);
2309                 ret = transport_send_check_condition_and_sense(
2310                                         cmd, 0, 1);
2311                 if (ret)
2312                         goto queue_full;
2313
2314                 transport_lun_remove_cmd(cmd);
2315                 transport_cmd_check_stop_to_fabric(cmd);
2316                 return;
2317         }
2318         /*
2319          * Check for a callback, used by amongst other things
2320          * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation.
2321          */
2322         if (cmd->transport_complete_callback) {
2323                 sense_reason_t rc;
2324                 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
2325                 bool zero_dl = !(cmd->data_length);
2326                 int post_ret = 0;
2327
2328                 rc = cmd->transport_complete_callback(cmd, true, &post_ret);
2329                 if (!rc && !post_ret) {
2330                         if (caw && zero_dl)
2331                                 goto queue_rsp;
2332
2333                         return;
2334                 } else if (rc) {
2335                         ret = transport_send_check_condition_and_sense(cmd,
2336                                                 rc, 0);
2337                         if (ret)
2338                                 goto queue_full;
2339
2340                         transport_lun_remove_cmd(cmd);
2341                         transport_cmd_check_stop_to_fabric(cmd);
2342                         return;
2343                 }
2344         }
2345
2346 queue_rsp:
2347         switch (cmd->data_direction) {
2348         case DMA_FROM_DEVICE:
2349                 /*
2350                  * if this is a READ-type IO, but SCSI status
2351                  * is set, then skip returning data and just
2352                  * return the status -- unless this IO is marked
2353                  * as needing to be treated as a normal read,
2354                  * in which case we want to go ahead and return
2355                  * the data. This happens, for example, for tape
2356                  * reads with the FM, EOM, or ILI bits set, with
2357                  * no sense data.
2358                  */
2359                 if (cmd->scsi_status &&
2360                     !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
2361                         goto queue_status;
2362
2363                 atomic_long_add(cmd->data_length,
2364                                 &cmd->se_lun->lun_stats.tx_data_octets);
2365                 /*
2366                  * Perform READ_STRIP of PI using software emulation when
2367                  * backend had PI enabled, if the transport will not be
2368                  * performing hardware READ_STRIP offload.
2369                  */
2370                 if (target_read_prot_action(cmd)) {
2371                         ret = transport_send_check_condition_and_sense(cmd,
2372                                                 cmd->pi_err, 0);
2373                         if (ret)
2374                                 goto queue_full;
2375
2376                         transport_lun_remove_cmd(cmd);
2377                         transport_cmd_check_stop_to_fabric(cmd);
2378                         return;
2379                 }
2380
2381                 trace_target_cmd_complete(cmd);
2382                 ret = cmd->se_tfo->queue_data_in(cmd);
2383                 if (ret)
2384                         goto queue_full;
2385                 break;
2386         case DMA_TO_DEVICE:
2387                 atomic_long_add(cmd->data_length,
2388                                 &cmd->se_lun->lun_stats.rx_data_octets);
2389                 /*
2390                  * Check if we need to send READ payload for BIDI-COMMAND
2391                  */
2392                 if (cmd->se_cmd_flags & SCF_BIDI) {
2393                         atomic_long_add(cmd->data_length,
2394                                         &cmd->se_lun->lun_stats.tx_data_octets);
2395                         ret = cmd->se_tfo->queue_data_in(cmd);
2396                         if (ret)
2397                                 goto queue_full;
2398                         break;
2399                 }
2400                 /* fall through */
2401         case DMA_NONE:
2402 queue_status:
2403                 trace_target_cmd_complete(cmd);
2404                 ret = cmd->se_tfo->queue_status(cmd);
2405                 if (ret)
2406                         goto queue_full;
2407                 break;
2408         default:
2409                 break;
2410         }
2411
2412         transport_lun_remove_cmd(cmd);
2413         transport_cmd_check_stop_to_fabric(cmd);
2414         return;
2415
2416 queue_full:
2417         pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2418                 " data_direction: %d\n", cmd, cmd->data_direction);
2419
2420         transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2421 }
2422
2423 void target_free_sgl(struct scatterlist *sgl, int nents)
2424 {
2425         sgl_free_n_order(sgl, nents, 0);
2426 }
2427 EXPORT_SYMBOL(target_free_sgl);
2428
2429 static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
2430 {
2431         /*
2432          * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE
2433          * emulation, and free + reset pointers if necessary..
2434          */
2435         if (!cmd->t_data_sg_orig)
2436                 return;
2437
2438         kfree(cmd->t_data_sg);
2439         cmd->t_data_sg = cmd->t_data_sg_orig;
2440         cmd->t_data_sg_orig = NULL;
2441         cmd->t_data_nents = cmd->t_data_nents_orig;
2442         cmd->t_data_nents_orig = 0;
2443 }
2444
2445 static inline void transport_free_pages(struct se_cmd *cmd)
2446 {
2447         if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2448                 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
2449                 cmd->t_prot_sg = NULL;
2450                 cmd->t_prot_nents = 0;
2451         }
2452
2453         if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
2454                 /*
2455                  * Release special case READ buffer payload required for
2456                  * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
2457                  */
2458                 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
2459                         target_free_sgl(cmd->t_bidi_data_sg,
2460                                            cmd->t_bidi_data_nents);
2461                         cmd->t_bidi_data_sg = NULL;
2462                         cmd->t_bidi_data_nents = 0;
2463                 }
2464                 transport_reset_sgl_orig(cmd);
2465                 return;
2466         }
2467         transport_reset_sgl_orig(cmd);
2468
2469         target_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
2470         cmd->t_data_sg = NULL;
2471         cmd->t_data_nents = 0;
2472
2473         target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2474         cmd->t_bidi_data_sg = NULL;
2475         cmd->t_bidi_data_nents = 0;
2476 }
2477
2478 void *transport_kmap_data_sg(struct se_cmd *cmd)
2479 {
2480         struct scatterlist *sg = cmd->t_data_sg;
2481         struct page **pages;
2482         int i;
2483
2484         /*
2485          * We need to take into account a possible offset here for fabrics like
2486          * tcm_loop who may be using a contig buffer from the SCSI midlayer for
2487          * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
2488          */
2489         if (!cmd->t_data_nents)
2490                 return NULL;
2491
2492         BUG_ON(!sg);
2493         if (cmd->t_data_nents == 1)
2494                 return kmap(sg_page(sg)) + sg->offset;
2495
2496         /* >1 page. use vmap */
2497         pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL);
2498         if (!pages)
2499                 return NULL;
2500
2501         /* convert sg[] to pages[] */
2502         for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
2503                 pages[i] = sg_page(sg);
2504         }
2505
2506         cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
2507         kfree(pages);
2508         if (!cmd->t_data_vmap)
2509                 return NULL;
2510
2511         return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
2512 }
2513 EXPORT_SYMBOL(transport_kmap_data_sg);
2514
2515 void transport_kunmap_data_sg(struct se_cmd *cmd)
2516 {
2517         if (!cmd->t_data_nents) {
2518                 return;
2519         } else if (cmd->t_data_nents == 1) {
2520                 kunmap(sg_page(cmd->t_data_sg));
2521                 return;
2522         }
2523
2524         vunmap(cmd->t_data_vmap);
2525         cmd->t_data_vmap = NULL;
2526 }
2527 EXPORT_SYMBOL(transport_kunmap_data_sg);
2528
2529 int
2530 target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
2531                  bool zero_page, bool chainable)
2532 {
2533         gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0);
2534
2535         *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents);
2536         return *sgl ? 0 : -ENOMEM;
2537 }
2538 EXPORT_SYMBOL(target_alloc_sgl);
2539
2540 /*
2541  * Allocate any required resources to execute the command.  For writes we
2542  * might not have the payload yet, so notify the fabric via a call to
2543  * ->write_pending instead. Otherwise place it on the execution queue.
2544  */
2545 sense_reason_t
2546 transport_generic_new_cmd(struct se_cmd *cmd)
2547 {
2548         unsigned long flags;
2549         int ret = 0;
2550         bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
2551
2552         if (cmd->prot_op != TARGET_PROT_NORMAL &&
2553             !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2554                 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
2555                                        cmd->prot_length, true, false);
2556                 if (ret < 0)
2557                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2558         }
2559
2560         /*
2561          * Determine if the TCM fabric module has already allocated physical
2562          * memory, and is directly calling transport_generic_map_mem_to_cmd()
2563          * beforehand.
2564          */
2565         if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
2566             cmd->data_length) {
2567
2568                 if ((cmd->se_cmd_flags & SCF_BIDI) ||
2569                     (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
2570                         u32 bidi_length;
2571
2572                         if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
2573                                 bidi_length = cmd->t_task_nolb *
2574                                               cmd->se_dev->dev_attrib.block_size;
2575                         else
2576                                 bidi_length = cmd->data_length;
2577
2578                         ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2579                                                &cmd->t_bidi_data_nents,
2580                                                bidi_length, zero_flag, false);
2581                         if (ret < 0)
2582                                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2583                 }
2584
2585                 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
2586                                        cmd->data_length, zero_flag, false);
2587                 if (ret < 0)
2588                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2589         } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
2590                     cmd->data_length) {
2591                 /*
2592                  * Special case for COMPARE_AND_WRITE with fabrics
2593                  * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
2594                  */
2595                 u32 caw_length = cmd->t_task_nolb *
2596                                  cmd->se_dev->dev_attrib.block_size;
2597
2598                 ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2599                                        &cmd->t_bidi_data_nents,
2600                                        caw_length, zero_flag, false);
2601                 if (ret < 0)
2602                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2603         }
2604         /*
2605          * If this command is not a write we can execute it right here,
2606          * for write buffers we need to notify the fabric driver first
2607          * and let it call back once the write buffers are ready.
2608          */
2609         target_add_to_state_list(cmd);
2610         if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
2611                 target_execute_cmd(cmd);
2612                 return 0;
2613         }
2614
2615         spin_lock_irqsave(&cmd->t_state_lock, flags);
2616         cmd->t_state = TRANSPORT_WRITE_PENDING;
2617         /*
2618          * Determine if frontend context caller is requesting the stopping of
2619          * this command for frontend exceptions.
2620          */
2621         if (cmd->transport_state & CMD_T_STOP &&
2622             !cmd->se_tfo->write_pending_must_be_called) {
2623                 pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
2624                          __func__, __LINE__, cmd->tag);
2625
2626                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2627
2628                 complete_all(&cmd->t_transport_stop_comp);
2629                 return 0;
2630         }
2631         cmd->transport_state &= ~CMD_T_ACTIVE;
2632         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2633
2634         ret = cmd->se_tfo->write_pending(cmd);
2635         if (ret)
2636                 goto queue_full;
2637
2638         return 0;
2639
2640 queue_full:
2641         pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2642         transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2643         return 0;
2644 }
2645 EXPORT_SYMBOL(transport_generic_new_cmd);
2646
2647 static void transport_write_pending_qf(struct se_cmd *cmd)
2648 {
2649         unsigned long flags;
2650         int ret;
2651         bool stop;
2652
2653         spin_lock_irqsave(&cmd->t_state_lock, flags);
2654         stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
2655         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2656
2657         if (stop) {
2658                 pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
2659                         __func__, __LINE__, cmd->tag);
2660                 complete_all(&cmd->t_transport_stop_comp);
2661                 return;
2662         }
2663
2664         ret = cmd->se_tfo->write_pending(cmd);
2665         if (ret) {
2666                 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2667                          cmd);
2668                 transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2669         }
2670 }
2671
2672 static bool
2673 __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
2674                            unsigned long *flags);
2675
2676 static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2677 {
2678         unsigned long flags;
2679
2680         spin_lock_irqsave(&cmd->t_state_lock, flags);
2681         __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
2682         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2683 }
2684
2685 /*
2686  * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has
2687  * finished.
2688  */
2689 void target_put_cmd_and_wait(struct se_cmd *cmd)
2690 {
2691         DECLARE_COMPLETION_ONSTACK(compl);
2692
2693         WARN_ON_ONCE(cmd->abrt_compl);
2694         cmd->abrt_compl = &compl;
2695         target_put_sess_cmd(cmd);
2696         wait_for_completion(&compl);
2697 }
2698
2699 /*
2700  * This function is called by frontend drivers after processing of a command
2701  * has finished.
2702  *
2703  * The protocol for ensuring that either the regular frontend command
2704  * processing flow or target_handle_abort() code drops one reference is as
2705  * follows:
2706  * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause
2707  *   the frontend driver to call this function synchronously or asynchronously.
2708  *   That will cause one reference to be dropped.
2709  * - During regular command processing the target core sets CMD_T_COMPLETE
2710  *   before invoking one of the .queue_*() functions.
2711  * - The code that aborts commands skips commands and TMFs for which
2712  *   CMD_T_COMPLETE has been set.
2713  * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for
2714  *   commands that will be aborted.
2715  * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set
2716  *   transport_generic_free_cmd() skips its call to target_put_sess_cmd().
2717  * - For aborted commands for which CMD_T_TAS has been set .queue_status() will
2718  *   be called and will drop a reference.
2719  * - For aborted commands for which CMD_T_TAS has not been set .aborted_task()
2720  *   will be called. target_handle_abort() will drop the final reference.
2721  */
2722 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2723 {
2724         DECLARE_COMPLETION_ONSTACK(compl);
2725         int ret = 0;
2726         bool aborted = false, tas = false;
2727
2728         if (wait_for_tasks)
2729                 target_wait_free_cmd(cmd, &aborted, &tas);
2730
2731         if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) {
2732                 /*
2733                  * Handle WRITE failure case where transport_generic_new_cmd()
2734                  * has already added se_cmd to state_list, but fabric has
2735                  * failed command before I/O submission.
2736                  */
2737                 if (cmd->state_active)
2738                         target_remove_from_state_list(cmd);
2739
2740                 if (cmd->se_lun)
2741                         transport_lun_remove_cmd(cmd);
2742         }
2743         if (aborted)
2744                 cmd->free_compl = &compl;
2745         ret = target_put_sess_cmd(cmd);
2746         if (aborted) {
2747                 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
2748                 wait_for_completion(&compl);
2749                 ret = 1;
2750         }
2751         return ret;
2752 }
2753 EXPORT_SYMBOL(transport_generic_free_cmd);
2754
2755 /**
2756  * target_get_sess_cmd - Add command to active ->sess_cmd_list
2757  * @se_cmd:     command descriptor to add
2758  * @ack_kref:   Signal that fabric will perform an ack target_put_sess_cmd()
2759  */
2760 int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2761 {
2762         struct se_session *se_sess = se_cmd->se_sess;
2763         unsigned long flags;
2764         int ret = 0;
2765
2766         /*
2767          * Add a second kref if the fabric caller is expecting to handle
2768          * fabric acknowledgement that requires two target_put_sess_cmd()
2769          * invocations before se_cmd descriptor release.
2770          */
2771         if (ack_kref) {
2772                 if (!kref_get_unless_zero(&se_cmd->cmd_kref))
2773                         return -EINVAL;
2774
2775                 se_cmd->se_cmd_flags |= SCF_ACK_KREF;
2776         }
2777
2778         spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2779         if (se_sess->sess_tearing_down) {
2780                 ret = -ESHUTDOWN;
2781                 goto out;
2782         }
2783         list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
2784         percpu_ref_get(&se_sess->cmd_count);
2785 out:
2786         spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2787
2788         if (ret && ack_kref)
2789                 target_put_sess_cmd(se_cmd);
2790
2791         return ret;
2792 }
2793 EXPORT_SYMBOL(target_get_sess_cmd);
2794
2795 static void target_free_cmd_mem(struct se_cmd *cmd)
2796 {
2797         transport_free_pages(cmd);
2798
2799         if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
2800                 core_tmr_release_req(cmd->se_tmr_req);
2801         if (cmd->t_task_cdb != cmd->__t_task_cdb)
2802                 kfree(cmd->t_task_cdb);
2803 }
2804
2805 static void target_release_cmd_kref(struct kref *kref)
2806 {
2807         struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
2808         struct se_session *se_sess = se_cmd->se_sess;
2809         struct completion *free_compl = se_cmd->free_compl;
2810         struct completion *abrt_compl = se_cmd->abrt_compl;
2811         unsigned long flags;
2812
2813         if (se_sess) {
2814                 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2815                 list_del_init(&se_cmd->se_cmd_list);
2816                 spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2817         }
2818
2819         target_free_cmd_mem(se_cmd);
2820         se_cmd->se_tfo->release_cmd(se_cmd);
2821         if (free_compl)
2822                 complete(free_compl);
2823         if (abrt_compl)
2824                 complete(abrt_compl);
2825
2826         percpu_ref_put(&se_sess->cmd_count);
2827 }
2828
2829 /**
2830  * target_put_sess_cmd - decrease the command reference count
2831  * @se_cmd:     command to drop a reference from
2832  *
2833  * Returns 1 if and only if this target_put_sess_cmd() call caused the
2834  * refcount to drop to zero. Returns zero otherwise.
2835  */
2836 int target_put_sess_cmd(struct se_cmd *se_cmd)
2837 {
2838         return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
2839 }
2840 EXPORT_SYMBOL(target_put_sess_cmd);
2841
2842 static const char *data_dir_name(enum dma_data_direction d)
2843 {
2844         switch (d) {
2845         case DMA_BIDIRECTIONAL: return "BIDI";
2846         case DMA_TO_DEVICE:     return "WRITE";
2847         case DMA_FROM_DEVICE:   return "READ";
2848         case DMA_NONE:          return "NONE";
2849         }
2850
2851         return "(?)";
2852 }
2853
2854 static const char *cmd_state_name(enum transport_state_table t)
2855 {
2856         switch (t) {
2857         case TRANSPORT_NO_STATE:        return "NO_STATE";
2858         case TRANSPORT_NEW_CMD:         return "NEW_CMD";
2859         case TRANSPORT_WRITE_PENDING:   return "WRITE_PENDING";
2860         case TRANSPORT_PROCESSING:      return "PROCESSING";
2861         case TRANSPORT_COMPLETE:        return "COMPLETE";
2862         case TRANSPORT_ISTATE_PROCESSING:
2863                                         return "ISTATE_PROCESSING";
2864         case TRANSPORT_COMPLETE_QF_WP:  return "COMPLETE_QF_WP";
2865         case TRANSPORT_COMPLETE_QF_OK:  return "COMPLETE_QF_OK";
2866         case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR";
2867         }
2868
2869         return "(?)";
2870 }
2871
2872 static void target_append_str(char **str, const char *txt)
2873 {
2874         char *prev = *str;
2875
2876         *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) :
2877                 kstrdup(txt, GFP_ATOMIC);
2878         kfree(prev);
2879 }
2880
2881 /*
2882  * Convert a transport state bitmask into a string. The caller is
2883  * responsible for freeing the returned pointer.
2884  */
2885 static char *target_ts_to_str(u32 ts)
2886 {
2887         char *str = NULL;
2888
2889         if (ts & CMD_T_ABORTED)
2890                 target_append_str(&str, "aborted");
2891         if (ts & CMD_T_ACTIVE)
2892                 target_append_str(&str, "active");
2893         if (ts & CMD_T_COMPLETE)
2894                 target_append_str(&str, "complete");
2895         if (ts & CMD_T_SENT)
2896                 target_append_str(&str, "sent");
2897         if (ts & CMD_T_STOP)
2898                 target_append_str(&str, "stop");
2899         if (ts & CMD_T_FABRIC_STOP)
2900                 target_append_str(&str, "fabric_stop");
2901
2902         return str;
2903 }
2904
2905 static const char *target_tmf_name(enum tcm_tmreq_table tmf)
2906 {
2907         switch (tmf) {
2908         case TMR_ABORT_TASK:            return "ABORT_TASK";
2909         case TMR_ABORT_TASK_SET:        return "ABORT_TASK_SET";
2910         case TMR_CLEAR_ACA:             return "CLEAR_ACA";
2911         case TMR_CLEAR_TASK_SET:        return "CLEAR_TASK_SET";
2912         case TMR_LUN_RESET:             return "LUN_RESET";
2913         case TMR_TARGET_WARM_RESET:     return "TARGET_WARM_RESET";
2914         case TMR_TARGET_COLD_RESET:     return "TARGET_COLD_RESET";
2915         case TMR_UNKNOWN:               break;
2916         }
2917         return "(?)";
2918 }
2919
2920 void target_show_cmd(const char *pfx, struct se_cmd *cmd)
2921 {
2922         char *ts_str = target_ts_to_str(cmd->transport_state);
2923         const u8 *cdb = cmd->t_task_cdb;
2924         struct se_tmr_req *tmf = cmd->se_tmr_req;
2925
2926         if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
2927                 pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n",
2928                          pfx, cdb[0], cdb[1], cmd->tag,
2929                          data_dir_name(cmd->data_direction),
2930                          cmd->se_tfo->get_cmd_state(cmd),
2931                          cmd_state_name(cmd->t_state), cmd->data_length,
2932                          kref_read(&cmd->cmd_kref), ts_str);
2933         } else {
2934                 pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n",
2935                          pfx, target_tmf_name(tmf->function), cmd->tag,
2936                          tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd),
2937                          cmd_state_name(cmd->t_state),
2938                          kref_read(&cmd->cmd_kref), ts_str);
2939         }
2940         kfree(ts_str);
2941 }
2942 EXPORT_SYMBOL(target_show_cmd);
2943
2944 /**
2945  * target_sess_cmd_list_set_waiting - Set sess_tearing_down so no new commands are queued.
2946  * @se_sess:    session to flag
2947  */
2948 void target_sess_cmd_list_set_waiting(struct se_session *se_sess)
2949 {
2950         unsigned long flags;
2951
2952         spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
2953         se_sess->sess_tearing_down = 1;
2954         spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
2955
2956         percpu_ref_kill(&se_sess->cmd_count);
2957 }
2958 EXPORT_SYMBOL(target_sess_cmd_list_set_waiting);
2959
2960 /**
2961  * target_wait_for_sess_cmds - Wait for outstanding commands
2962  * @se_sess:    session to wait for active I/O
2963  */
2964 void target_wait_for_sess_cmds(struct se_session *se_sess)
2965 {
2966         struct se_cmd *cmd;
2967         int ret;
2968
2969         WARN_ON_ONCE(!se_sess->sess_tearing_down);
2970
2971         do {
2972                 ret = wait_event_timeout(se_sess->cmd_list_wq,
2973                                 percpu_ref_is_zero(&se_sess->cmd_count),
2974                                 180 * HZ);
2975                 list_for_each_entry(cmd, &se_sess->sess_cmd_list, se_cmd_list)
2976                         target_show_cmd("session shutdown: still waiting for ",
2977                                         cmd);
2978         } while (ret <= 0);
2979 }
2980 EXPORT_SYMBOL(target_wait_for_sess_cmds);
2981
2982 /*
2983  * Prevent that new percpu_ref_tryget_live() calls succeed and wait until
2984  * all references to the LUN have been released. Called during LUN shutdown.
2985  */
2986 void transport_clear_lun_ref(struct se_lun *lun)
2987 {
2988         percpu_ref_kill(&lun->lun_ref);
2989         wait_for_completion(&lun->lun_shutdown_comp);
2990 }
2991
2992 static bool
2993 __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
2994                            bool *aborted, bool *tas, unsigned long *flags)
2995         __releases(&cmd->t_state_lock)
2996         __acquires(&cmd->t_state_lock)
2997 {
2998
2999         assert_spin_locked(&cmd->t_state_lock);
3000         WARN_ON_ONCE(!irqs_disabled());
3001
3002         if (fabric_stop)
3003                 cmd->transport_state |= CMD_T_FABRIC_STOP;
3004
3005         if (cmd->transport_state & CMD_T_ABORTED)
3006                 *aborted = true;
3007
3008         if (cmd->transport_state & CMD_T_TAS)
3009                 *tas = true;
3010
3011         if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
3012             !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
3013                 return false;
3014
3015         if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
3016             !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
3017                 return false;
3018
3019         if (!(cmd->transport_state & CMD_T_ACTIVE))
3020                 return false;
3021
3022         if (fabric_stop && *aborted)
3023                 return false;
3024
3025         cmd->transport_state |= CMD_T_STOP;
3026
3027         target_show_cmd("wait_for_tasks: Stopping ", cmd);
3028
3029         spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
3030
3031         while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp,
3032                                             180 * HZ))
3033                 target_show_cmd("wait for tasks: ", cmd);
3034
3035         spin_lock_irqsave(&cmd->t_state_lock, *flags);
3036         cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
3037
3038         pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
3039                  "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
3040
3041         return true;
3042 }
3043
3044 /**
3045  * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp
3046  * @cmd: command to wait on
3047  */
3048 bool transport_wait_for_tasks(struct se_cmd *cmd)
3049 {
3050         unsigned long flags;
3051         bool ret, aborted = false, tas = false;
3052
3053         spin_lock_irqsave(&cmd->t_state_lock, flags);
3054         ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
3055         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3056
3057         return ret;
3058 }
3059 EXPORT_SYMBOL(transport_wait_for_tasks);
3060
3061 struct sense_info {
3062         u8 key;
3063         u8 asc;
3064         u8 ascq;
3065         bool add_sector_info;
3066 };
3067
3068 static const struct sense_info sense_info_table[] = {
3069         [TCM_NO_SENSE] = {
3070                 .key = NOT_READY
3071         },
3072         [TCM_NON_EXISTENT_LUN] = {
3073                 .key = ILLEGAL_REQUEST,
3074                 .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */
3075         },
3076         [TCM_UNSUPPORTED_SCSI_OPCODE] = {
3077                 .key = ILLEGAL_REQUEST,
3078                 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
3079         },
3080         [TCM_SECTOR_COUNT_TOO_MANY] = {
3081                 .key = ILLEGAL_REQUEST,
3082                 .asc = 0x20, /* INVALID COMMAND OPERATION CODE */
3083         },
3084         [TCM_UNKNOWN_MODE_PAGE] = {
3085                 .key = ILLEGAL_REQUEST,
3086                 .asc = 0x24, /* INVALID FIELD IN CDB */
3087         },
3088         [TCM_CHECK_CONDITION_ABORT_CMD] = {
3089                 .key = ABORTED_COMMAND,
3090                 .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */
3091                 .ascq = 0x03,
3092         },
3093         [TCM_INCORRECT_AMOUNT_OF_DATA] = {
3094                 .key = ABORTED_COMMAND,
3095                 .asc = 0x0c, /* WRITE ERROR */
3096                 .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */
3097         },
3098         [TCM_INVALID_CDB_FIELD] = {
3099                 .key = ILLEGAL_REQUEST,
3100                 .asc = 0x24, /* INVALID FIELD IN CDB */
3101         },
3102         [TCM_INVALID_PARAMETER_LIST] = {
3103                 .key = ILLEGAL_REQUEST,
3104                 .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
3105         },
3106         [TCM_TOO_MANY_TARGET_DESCS] = {
3107                 .key = ILLEGAL_REQUEST,
3108                 .asc = 0x26,
3109                 .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */
3110         },
3111         [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = {
3112                 .key = ILLEGAL_REQUEST,
3113                 .asc = 0x26,
3114                 .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */
3115         },
3116         [TCM_TOO_MANY_SEGMENT_DESCS] = {
3117                 .key = ILLEGAL_REQUEST,
3118                 .asc = 0x26,
3119                 .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */
3120         },
3121         [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = {
3122                 .key = ILLEGAL_REQUEST,
3123                 .asc = 0x26,
3124                 .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */
3125         },
3126         [TCM_PARAMETER_LIST_LENGTH_ERROR] = {
3127                 .key = ILLEGAL_REQUEST,
3128                 .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
3129         },
3130         [TCM_UNEXPECTED_UNSOLICITED_DATA] = {
3131                 .key = ILLEGAL_REQUEST,
3132                 .asc = 0x0c, /* WRITE ERROR */
3133                 .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */
3134         },
3135         [TCM_SERVICE_CRC_ERROR] = {
3136                 .key = ABORTED_COMMAND,
3137                 .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */
3138                 .ascq = 0x05, /* N/A */
3139         },
3140         [TCM_SNACK_REJECTED] = {
3141                 .key = ABORTED_COMMAND,
3142                 .asc = 0x11, /* READ ERROR */
3143                 .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */
3144         },
3145         [TCM_WRITE_PROTECTED] = {
3146                 .key = DATA_PROTECT,
3147                 .asc = 0x27, /* WRITE PROTECTED */
3148         },
3149         [TCM_ADDRESS_OUT_OF_RANGE] = {
3150                 .key = ILLEGAL_REQUEST,
3151                 .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
3152         },
3153         [TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
3154                 .key = UNIT_ATTENTION,
3155         },
3156         [TCM_CHECK_CONDITION_NOT_READY] = {
3157                 .key = NOT_READY,
3158         },
3159         [TCM_MISCOMPARE_VERIFY] = {
3160                 .key = MISCOMPARE,
3161                 .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
3162                 .ascq = 0x00,
3163         },
3164         [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
3165                 .key = ABORTED_COMMAND,
3166                 .asc = 0x10,
3167                 .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
3168                 .add_sector_info = true,
3169         },
3170         [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
3171                 .key = ABORTED_COMMAND,
3172                 .asc = 0x10,
3173                 .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
3174                 .add_sector_info = true,
3175         },
3176         [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
3177                 .key = ABORTED_COMMAND,
3178                 .asc = 0x10,
3179                 .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
3180                 .add_sector_info = true,
3181         },
3182         [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
3183                 .key = COPY_ABORTED,
3184                 .asc = 0x0d,
3185                 .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
3186
3187         },
3188         [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
3189                 /*
3190                  * Returning ILLEGAL REQUEST would cause immediate IO errors on
3191                  * Solaris initiators.  Returning NOT READY instead means the
3192                  * operations will be retried a finite number of times and we
3193                  * can survive intermittent errors.
3194                  */
3195                 .key = NOT_READY,
3196                 .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
3197         },
3198         [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = {
3199                 /*
3200                  * From spc4r22 section5.7.7,5.7.8
3201                  * If a PERSISTENT RESERVE OUT command with a REGISTER service action
3202                  * or a REGISTER AND IGNORE EXISTING KEY service action or
3203                  * REGISTER AND MOVE service actionis attempted,
3204                  * but there are insufficient device server resources to complete the
3205                  * operation, then the command shall be terminated with CHECK CONDITION
3206                  * status, with the sense key set to ILLEGAL REQUEST,and the additonal
3207                  * sense code set to INSUFFICIENT REGISTRATION RESOURCES.
3208                  */
3209                 .key = ILLEGAL_REQUEST,
3210                 .asc = 0x55,
3211                 .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */
3212         },
3213 };
3214
3215 /**
3216  * translate_sense_reason - translate a sense reason into T10 key, asc and ascq
3217  * @cmd: SCSI command in which the resulting sense buffer or SCSI status will
3218  *   be stored.
3219  * @reason: LIO sense reason code. If this argument has the value
3220  *   TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If
3221  *   dequeuing a unit attention fails due to multiple commands being processed
3222  *   concurrently, set the command status to BUSY.
3223  *
3224  * Return: 0 upon success or -EINVAL if the sense buffer is too small.
3225  */
3226 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
3227 {
3228         const struct sense_info *si;
3229         u8 *buffer = cmd->sense_buffer;
3230         int r = (__force int)reason;
3231         u8 key, asc, ascq;
3232         bool desc_format = target_sense_desc_format(cmd->se_dev);
3233
3234         if (r < ARRAY_SIZE(sense_info_table) && sense_info_table[r].key)
3235                 si = &sense_info_table[r];
3236         else
3237                 si = &sense_info_table[(__force int)
3238                                        TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
3239
3240         key = si->key;
3241         if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
3242                 if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc,
3243                                                        &ascq)) {
3244                         cmd->scsi_status = SAM_STAT_BUSY;
3245                         return;
3246                 }
3247         } else if (si->asc == 0) {
3248                 WARN_ON_ONCE(cmd->scsi_asc == 0);
3249                 asc = cmd->scsi_asc;
3250                 ascq = cmd->scsi_ascq;
3251         } else {
3252                 asc = si->asc;
3253                 ascq = si->ascq;
3254         }
3255
3256         cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
3257         cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
3258         cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER;
3259         scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq);
3260         if (si->add_sector_info)
3261                 WARN_ON_ONCE(scsi_set_sense_information(buffer,
3262                                                         cmd->scsi_sense_length,
3263                                                         cmd->bad_sector) < 0);
3264 }
3265
3266 int
3267 transport_send_check_condition_and_sense(struct se_cmd *cmd,
3268                 sense_reason_t reason, int from_transport)
3269 {
3270         unsigned long flags;
3271
3272         WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
3273
3274         spin_lock_irqsave(&cmd->t_state_lock, flags);
3275         if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
3276                 spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3277                 return 0;
3278         }
3279         cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
3280         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3281
3282         if (!from_transport)
3283                 translate_sense_reason(cmd, reason);
3284
3285         trace_target_cmd_complete(cmd);
3286         return cmd->se_tfo->queue_status(cmd);
3287 }
3288 EXPORT_SYMBOL(transport_send_check_condition_and_sense);
3289
3290 /**
3291  * target_send_busy - Send SCSI BUSY status back to the initiator
3292  * @cmd: SCSI command for which to send a BUSY reply.
3293  *
3294  * Note: Only call this function if target_submit_cmd*() failed.
3295  */
3296 int target_send_busy(struct se_cmd *cmd)
3297 {
3298         WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
3299
3300         cmd->scsi_status = SAM_STAT_BUSY;
3301         trace_target_cmd_complete(cmd);
3302         return cmd->se_tfo->queue_status(cmd);
3303 }
3304 EXPORT_SYMBOL(target_send_busy);
3305
3306 static void target_tmr_work(struct work_struct *work)
3307 {
3308         struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3309         struct se_device *dev = cmd->se_dev;
3310         struct se_tmr_req *tmr = cmd->se_tmr_req;
3311         int ret;
3312
3313         if (cmd->transport_state & CMD_T_ABORTED)
3314                 goto aborted;
3315
3316         switch (tmr->function) {
3317         case TMR_ABORT_TASK:
3318                 core_tmr_abort_task(dev, tmr, cmd->se_sess);
3319                 break;
3320         case TMR_ABORT_TASK_SET:
3321         case TMR_CLEAR_ACA:
3322         case TMR_CLEAR_TASK_SET:
3323                 tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
3324                 break;
3325         case TMR_LUN_RESET:
3326                 ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
3327                 tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
3328                                          TMR_FUNCTION_REJECTED;
3329                 if (tmr->response == TMR_FUNCTION_COMPLETE) {
3330                         target_ua_allocate_lun(cmd->se_sess->se_node_acl,
3331                                                cmd->orig_fe_lun, 0x29,
3332                                                ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
3333                 }
3334                 break;
3335         case TMR_TARGET_WARM_RESET:
3336                 tmr->response = TMR_FUNCTION_REJECTED;
3337                 break;
3338         case TMR_TARGET_COLD_RESET:
3339                 tmr->response = TMR_FUNCTION_REJECTED;
3340                 break;
3341         default:
3342                 pr_err("Unknown TMR function: 0x%02x.\n",
3343                                 tmr->function);
3344                 tmr->response = TMR_FUNCTION_REJECTED;
3345                 break;
3346         }
3347
3348         if (cmd->transport_state & CMD_T_ABORTED)
3349                 goto aborted;
3350
3351         cmd->se_tfo->queue_tm_rsp(cmd);
3352
3353         transport_lun_remove_cmd(cmd);
3354         transport_cmd_check_stop_to_fabric(cmd);
3355         return;
3356
3357 aborted:
3358         target_handle_abort(cmd);
3359 }
3360
3361 int transport_generic_handle_tmr(
3362         struct se_cmd *cmd)
3363 {
3364         unsigned long flags;
3365         bool aborted = false;
3366
3367         spin_lock_irqsave(&cmd->t_state_lock, flags);
3368         if (cmd->transport_state & CMD_T_ABORTED) {
3369                 aborted = true;
3370         } else {
3371                 cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3372                 cmd->transport_state |= CMD_T_ACTIVE;
3373         }
3374         spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3375
3376         if (aborted) {
3377                 pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n",
3378                                     cmd->se_tmr_req->function,
3379                                     cmd->se_tmr_req->ref_task_tag, cmd->tag);
3380                 target_handle_abort(cmd);
3381                 return 0;
3382         }
3383
3384         INIT_WORK(&cmd->work, target_tmr_work);
3385         schedule_work(&cmd->work);
3386         return 0;
3387 }
3388 EXPORT_SYMBOL(transport_generic_handle_tmr);
3389
3390 bool
3391 target_check_wce(struct se_device *dev)
3392 {
3393         bool wce = false;
3394
3395         if (dev->transport->get_write_cache)
3396                 wce = dev->transport->get_write_cache(dev);
3397         else if (dev->dev_attrib.emulate_write_cache > 0)
3398                 wce = true;
3399
3400         return wce;
3401 }
3402
3403 bool
3404 target_check_fua(struct se_device *dev)
3405 {
3406         return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
3407 }