RDMA/rtrs: fix uninitialized symbol 'cnt'
[linux-2.6-microblaze.git] / drivers / target / target_core_transport.c
index 5ecb9f1..8fbfe75 100644 (file)
@@ -41,6 +41,7 @@
 #include <trace/events/target.h>
 
 static struct workqueue_struct *target_completion_wq;
+static struct workqueue_struct *target_submission_wq;
 static struct kmem_cache *se_sess_cache;
 struct kmem_cache *se_ua_cache;
 struct kmem_cache *t10_pr_reg_cache;
@@ -129,8 +130,15 @@ int init_se_kmem_caches(void)
        if (!target_completion_wq)
                goto out_free_lba_map_mem_cache;
 
+       target_submission_wq = alloc_workqueue("target_submission",
+                                              WQ_MEM_RECLAIM, 0);
+       if (!target_submission_wq)
+               goto out_free_completion_wq;
+
        return 0;
 
+out_free_completion_wq:
+       destroy_workqueue(target_completion_wq);
 out_free_lba_map_mem_cache:
        kmem_cache_destroy(t10_alua_lba_map_mem_cache);
 out_free_lba_map_cache:
@@ -153,6 +161,7 @@ out:
 
 void release_se_kmem_caches(void)
 {
+       destroy_workqueue(target_submission_wq);
        destroy_workqueue(target_completion_wq);
        kmem_cache_destroy(se_sess_cache);
        kmem_cache_destroy(se_ua_cache);
@@ -848,7 +857,8 @@ static bool target_cmd_interrupted(struct se_cmd *cmd)
 /* May be called from interrupt context so must not sleep. */
 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
 {
-       int success;
+       struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn;
+       int success, cpu;
        unsigned long flags;
 
        if (target_cmd_interrupted(cmd))
@@ -875,7 +885,13 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
 
        INIT_WORK(&cmd->work, success ? target_complete_ok_work :
                  target_complete_failure_work);
-       queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
+
+       if (wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
+               cpu = cmd->cpuid;
+       else
+               cpu = wwn->cmd_compl_affinity;
+
+       queue_work_on(cpu, target_completion_wq, &cmd->work);
 }
 EXPORT_SYMBOL(target_complete_cmd);
 
@@ -1304,7 +1320,7 @@ target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
  * Compare the data buffer size from the CDB with the data buffer limit from the transport
  * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary.
  *
- * Note: target drivers set @cmd->data_length by calling transport_init_se_cmd().
+ * Note: target drivers set @cmd->data_length by calling __target_init_cmd().
  *
  * Return: TCM_NO_SENSE
  */
@@ -1371,7 +1387,7 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
  *
  * Preserves the value of @cmd->tag.
  */
-void transport_init_se_cmd(
+void __target_init_cmd(
        struct se_cmd *cmd,
        const struct target_core_fabric_ops *tfo,
        struct se_session *se_sess,
@@ -1382,7 +1398,6 @@ void transport_init_se_cmd(
 {
        INIT_LIST_HEAD(&cmd->se_delayed_node);
        INIT_LIST_HEAD(&cmd->se_qf_node);
-       INIT_LIST_HEAD(&cmd->se_cmd_list);
        INIT_LIST_HEAD(&cmd->state_list);
        init_completion(&cmd->t_transport_stop_comp);
        cmd->free_compl = NULL;
@@ -1391,6 +1406,7 @@ void transport_init_se_cmd(
        INIT_WORK(&cmd->work, NULL);
        kref_init(&cmd->cmd_kref);
 
+       cmd->t_task_cdb = &cmd->__t_task_cdb[0];
        cmd->se_tfo = tfo;
        cmd->se_sess = se_sess;
        cmd->data_length = data_length;
@@ -1404,7 +1420,7 @@ void transport_init_se_cmd(
 
        cmd->state_active = false;
 }
-EXPORT_SYMBOL(transport_init_se_cmd);
+EXPORT_SYMBOL(__target_init_cmd);
 
 static sense_reason_t
 transport_check_alloc_task_attr(struct se_cmd *cmd)
@@ -1428,11 +1444,10 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
 }
 
 sense_reason_t
-target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb)
+target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb, gfp_t gfp)
 {
        sense_reason_t ret;
 
-       cmd->t_task_cdb = &cmd->__t_task_cdb[0];
        /*
         * Ensure that the received CDB is less than the max (252 + 8) bytes
         * for VARIABLE_LENGTH_CMD
@@ -1450,8 +1465,7 @@ target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb)
         * setup the pointer from __t_task_cdb to t_task_cdb.
         */
        if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
-               cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
-                                               GFP_KERNEL);
+               cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp);
                if (!cmd->t_task_cdb) {
                        pr_err("Unable to allocate cmd->t_task_cdb"
                                " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
@@ -1573,46 +1587,31 @@ transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
 }
 
 /**
- * target_submit_cmd_map_sgls - lookup unpacked lun and submit uninitialized
- *                      se_cmd + use pre-allocated SGL memory.
- *
- * @se_cmd: command descriptor to submit
+ * target_init_cmd - initialize se_cmd
+ * @se_cmd: command descriptor to init
  * @se_sess: associated se_sess for endpoint
- * @cdb: pointer to SCSI CDB
  * @sense: pointer to SCSI sense buffer
  * @unpacked_lun: unpacked LUN to reference for struct se_lun
  * @data_length: fabric expected data transfer length
  * @task_attr: SAM task attribute
  * @data_dir: DMA data direction
  * @flags: flags for command submission from target_sc_flags_tables
- * @sgl: struct scatterlist memory for unidirectional mapping
- * @sgl_count: scatterlist count for unidirectional mapping
- * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
- * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
- * @sgl_prot: struct scatterlist memory protection information
- * @sgl_prot_count: scatterlist count for protection information
  *
  * Task tags are supported if the caller has set @se_cmd->tag.
  *
- * Returns non zero to signal active I/O shutdown failure.  All other
- * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
- * but still return zero here.
+ * Returns:
+ *     - less than zero to signal active I/O shutdown failure.
+ *     - zero on success.
  *
- * This may only be called from process context, and also currently
- * assumes internal allocation of fabric payload buffer by target-core.
+ * If the fabric driver calls target_stop_session, then it must check the
+ * return code and handle failures. This will never fail for other drivers,
+ * and the return code can be ignored.
  */
-int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess,
-               unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
-               u32 data_length, int task_attr, int data_dir, int flags,
-               struct scatterlist *sgl, u32 sgl_count,
-               struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
-               struct scatterlist *sgl_prot, u32 sgl_prot_count)
+int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+                   unsigned char *sense, u64 unpacked_lun,
+                   u32 data_length, int task_attr, int data_dir, int flags)
 {
        struct se_portal_group *se_tpg;
-       sense_reason_t rc;
-       int ret;
-
-       might_sleep();
 
        se_tpg = se_sess->se_tpg;
        BUG_ON(!se_tpg);
@@ -1620,53 +1619,72 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
 
        if (flags & TARGET_SCF_USE_CPUID)
                se_cmd->se_cmd_flags |= SCF_USE_CPUID;
+       /*
+        * Signal bidirectional data payloads to target-core
+        */
+       if (flags & TARGET_SCF_BIDI_OP)
+               se_cmd->se_cmd_flags |= SCF_BIDI;
+
+       if (flags & TARGET_SCF_UNKNOWN_SIZE)
+               se_cmd->unknown_data_length = 1;
        /*
         * Initialize se_cmd for target operation.  From this point
         * exceptions are handled by sending exception status via
         * target_core_fabric_ops->queue_status() callback
         */
-       transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
-                               data_length, data_dir, task_attr, sense,
-                               unpacked_lun);
+       __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length,
+                         data_dir, task_attr, sense, unpacked_lun);
 
-       if (flags & TARGET_SCF_UNKNOWN_SIZE)
-               se_cmd->unknown_data_length = 1;
        /*
         * Obtain struct se_cmd->cmd_kref reference. A second kref_get here is
         * necessary for fabrics using TARGET_SCF_ACK_KREF that expect a second
         * kref_put() to happen during fabric packet acknowledgement.
         */
-       ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
-       if (ret)
-               return ret;
-       /*
-        * Signal bidirectional data payloads to target-core
-        */
-       if (flags & TARGET_SCF_BIDI_OP)
-               se_cmd->se_cmd_flags |= SCF_BIDI;
+       return target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
+}
+EXPORT_SYMBOL_GPL(target_init_cmd);
 
-       rc = target_cmd_init_cdb(se_cmd, cdb);
-       if (rc) {
-               transport_send_check_condition_and_sense(se_cmd, rc, 0);
-               target_put_sess_cmd(se_cmd);
-               return 0;
-       }
+/**
+ * target_submit_prep - prepare cmd for submission
+ * @se_cmd: command descriptor to prep
+ * @cdb: pointer to SCSI CDB
+ * @sgl: struct scatterlist memory for unidirectional mapping
+ * @sgl_count: scatterlist count for unidirectional mapping
+ * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
+ * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
+ * @sgl_prot: struct scatterlist memory protection information
+ * @sgl_prot_count: scatterlist count for protection information
+ * @gfp: gfp allocation type
+ *
+ * Returns:
+ *     - less than zero to signal failure.
+ *     - zero on success.
+ *
+ * If failure is returned, lio will the callers queue_status to complete
+ * the cmd.
+ */
+int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb,
+                      struct scatterlist *sgl, u32 sgl_count,
+                      struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
+                      struct scatterlist *sgl_prot, u32 sgl_prot_count,
+                      gfp_t gfp)
+{
+       sense_reason_t rc;
+
+       rc = target_cmd_init_cdb(se_cmd, cdb, gfp);
+       if (rc)
+               goto send_cc_direct;
 
        /*
         * Locate se_lun pointer and attach it to struct se_cmd
         */
        rc = transport_lookup_cmd_lun(se_cmd);
-       if (rc) {
-               transport_send_check_condition_and_sense(se_cmd, rc, 0);
-               target_put_sess_cmd(se_cmd);
-               return 0;
-       }
+       if (rc)
+               goto send_cc_direct;
 
        rc = target_cmd_parse_cdb(se_cmd);
-       if (rc != 0) {
-               transport_generic_request_failure(se_cmd, rc);
-               return 0;
-       }
+       if (rc != 0)
+               goto generic_fail;
 
        /*
         * Save pointers for SGLs containing protection information,
@@ -1686,6 +1704,41 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
        if (sgl_count != 0) {
                BUG_ON(!sgl);
 
+               rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
+                               sgl_bidi, sgl_bidi_count);
+               if (rc != 0)
+                       goto generic_fail;
+       }
+
+       return 0;
+
+send_cc_direct:
+       transport_send_check_condition_and_sense(se_cmd, rc, 0);
+       target_put_sess_cmd(se_cmd);
+       return -EIO;
+
+generic_fail:
+       transport_generic_request_failure(se_cmd, rc);
+       return -EIO;
+}
+EXPORT_SYMBOL_GPL(target_submit_prep);
+
+/**
+ * target_submit - perform final initialization and submit cmd to LIO core
+ * @se_cmd: command descriptor to submit
+ *
+ * target_submit_prep must have been called on the cmd, and this must be
+ * called from process context.
+ */
+void target_submit(struct se_cmd *se_cmd)
+{
+       struct scatterlist *sgl = se_cmd->t_data_sg;
+       unsigned char *buf = NULL;
+
+       might_sleep();
+
+       if (se_cmd->t_data_nents != 0) {
+               BUG_ON(!sgl);
                /*
                 * A work-around for tcm_loop as some userspace code via
                 * scsi-generic do not memset their associated read buffers,
@@ -1696,8 +1749,6 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
                 */
                if (!(se_cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
                     se_cmd->data_direction == DMA_FROM_DEVICE) {
-                       unsigned char *buf = NULL;
-
                        if (sgl)
                                buf = kmap(sg_page(sgl)) + sgl->offset;
 
@@ -1707,12 +1758,6 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
                        }
                }
 
-               rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
-                               sgl_bidi, sgl_bidi_count);
-               if (rc != 0) {
-                       transport_generic_request_failure(se_cmd, rc);
-                       return 0;
-               }
        }
 
        /*
@@ -1722,9 +1767,8 @@ int target_submit_cmd_map_sgls(struct se_cmd *se_cmd, struct se_session *se_sess
        core_alua_check_nonop_delay(se_cmd);
 
        transport_handle_cdb_direct(se_cmd);
-       return 0;
 }
-EXPORT_SYMBOL(target_submit_cmd_map_sgls);
+EXPORT_SYMBOL_GPL(target_submit);
 
 /**
  * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
@@ -1741,25 +1785,109 @@ EXPORT_SYMBOL(target_submit_cmd_map_sgls);
  *
  * Task tags are supported if the caller has set @se_cmd->tag.
  *
- * Returns non zero to signal active I/O shutdown failure.  All other
- * setup exceptions will be returned as a SCSI CHECK_CONDITION response,
- * but still return zero here.
- *
  * This may only be called from process context, and also currently
  * assumes internal allocation of fabric payload buffer by target-core.
  *
  * It also assumes interal target core SGL memory allocation.
+ *
+ * This function must only be used by drivers that do their own
+ * sync during shutdown and does not use target_stop_session. If there
+ * is a failure this function will call into the fabric driver's
+ * queue_status with a CHECK_CONDITION.
  */
-int target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
                unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
                u32 data_length, int task_attr, int data_dir, int flags)
 {
-       return target_submit_cmd_map_sgls(se_cmd, se_sess, cdb, sense,
-                       unpacked_lun, data_length, task_attr, data_dir,
-                       flags, NULL, 0, NULL, 0, NULL, 0);
+       int rc;
+
+       rc = target_init_cmd(se_cmd, se_sess, sense, unpacked_lun, data_length,
+                            task_attr, data_dir, flags);
+       WARN(rc, "Invalid target_submit_cmd use. Driver must not use target_stop_session or call target_init_cmd directly.\n");
+       if (rc)
+               return;
+
+       if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, NULL, 0,
+                              GFP_KERNEL))
+               return;
+
+       target_submit(se_cmd);
 }
 EXPORT_SYMBOL(target_submit_cmd);
 
+
+static struct se_dev_plug *target_plug_device(struct se_device *se_dev)
+{
+       struct se_dev_plug *se_plug;
+
+       if (!se_dev->transport->plug_device)
+               return NULL;
+
+       se_plug = se_dev->transport->plug_device(se_dev);
+       if (!se_plug)
+               return NULL;
+
+       se_plug->se_dev = se_dev;
+       /*
+        * We have a ref to the lun at this point, but the cmds could
+        * complete before we unplug, so grab a ref to the se_device so we
+        * can call back into the backend.
+        */
+       config_group_get(&se_dev->dev_group);
+       return se_plug;
+}
+
+static void target_unplug_device(struct se_dev_plug *se_plug)
+{
+       struct se_device *se_dev = se_plug->se_dev;
+
+       se_dev->transport->unplug_device(se_plug);
+       config_group_put(&se_dev->dev_group);
+}
+
+void target_queued_submit_work(struct work_struct *work)
+{
+       struct se_cmd_queue *sq = container_of(work, struct se_cmd_queue, work);
+       struct se_cmd *se_cmd, *next_cmd;
+       struct se_dev_plug *se_plug = NULL;
+       struct se_device *se_dev = NULL;
+       struct llist_node *cmd_list;
+
+       cmd_list = llist_del_all(&sq->cmd_list);
+       if (!cmd_list)
+               /* Previous call took what we were queued to submit */
+               return;
+
+       cmd_list = llist_reverse_order(cmd_list);
+       llist_for_each_entry_safe(se_cmd, next_cmd, cmd_list, se_cmd_list) {
+               if (!se_dev) {
+                       se_dev = se_cmd->se_dev;
+                       se_plug = target_plug_device(se_dev);
+               }
+
+               target_submit(se_cmd);
+       }
+
+       if (se_plug)
+               target_unplug_device(se_plug);
+}
+
+/**
+ * target_queue_submission - queue the cmd to run on the LIO workqueue
+ * @se_cmd: command descriptor to submit
+ */
+void target_queue_submission(struct se_cmd *se_cmd)
+{
+       struct se_device *se_dev = se_cmd->se_dev;
+       int cpu = se_cmd->cpuid;
+       struct se_cmd_queue *sq;
+
+       sq = &se_dev->queues[cpu].sq;
+       llist_add(&se_cmd->se_cmd_list, &sq->cmd_list);
+       queue_work_on(cpu, target_submission_wq, &sq->work);
+}
+EXPORT_SYMBOL_GPL(target_queue_submission);
+
 static void target_complete_tmr_failure(struct work_struct *work)
 {
        struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
@@ -1799,8 +1927,8 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
        se_tpg = se_sess->se_tpg;
        BUG_ON(!se_tpg);
 
-       transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
-                             0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun);
+       __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
+                         0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun);
        /*
         * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
         * allocation failure.
@@ -2778,9 +2906,7 @@ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
         * invocations before se_cmd descriptor release.
         */
        if (ack_kref) {
-               if (!kref_get_unless_zero(&se_cmd->cmd_kref))
-                       return -EINVAL;
-
+               kref_get(&se_cmd->cmd_kref);
                se_cmd->se_cmd_flags |= SCF_ACK_KREF;
        }