RDMA/rtrs: fix uninitialized symbol 'cnt'
[linux-2.6-microblaze.git] / drivers / target / target_core_transport.c
index 560daf9..8fbfe75 100644 (file)
@@ -41,6 +41,7 @@
 #include <trace/events/target.h>
 
 static struct workqueue_struct *target_completion_wq;
+static struct workqueue_struct *target_submission_wq;
 static struct kmem_cache *se_sess_cache;
 struct kmem_cache *se_ua_cache;
 struct kmem_cache *t10_pr_reg_cache;
@@ -129,8 +130,15 @@ int init_se_kmem_caches(void)
        if (!target_completion_wq)
                goto out_free_lba_map_mem_cache;
 
+       target_submission_wq = alloc_workqueue("target_submission",
+                                              WQ_MEM_RECLAIM, 0);
+       if (!target_submission_wq)
+               goto out_free_completion_wq;
+
        return 0;
 
+out_free_completion_wq:
+       destroy_workqueue(target_completion_wq);
 out_free_lba_map_mem_cache:
        kmem_cache_destroy(t10_alua_lba_map_mem_cache);
 out_free_lba_map_cache:
@@ -153,6 +161,7 @@ out:
 
 void release_se_kmem_caches(void)
 {
+       destroy_workqueue(target_submission_wq);
        destroy_workqueue(target_completion_wq);
        kmem_cache_destroy(se_sess_cache);
        kmem_cache_destroy(se_ua_cache);
@@ -848,7 +857,8 @@ static bool target_cmd_interrupted(struct se_cmd *cmd)
 /* May be called from interrupt context so must not sleep. */
 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
 {
-       int success;
+       struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn;
+       int success, cpu;
        unsigned long flags;
 
        if (target_cmd_interrupted(cmd))
@@ -875,7 +885,13 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
 
        INIT_WORK(&cmd->work, success ? target_complete_ok_work :
                  target_complete_failure_work);
-       queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
+
+       if (wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
+               cpu = cmd->cpuid;
+       else
+               cpu = wwn->cmd_compl_affinity;
+
+       queue_work_on(cpu, target_completion_wq, &cmd->work);
 }
 EXPORT_SYMBOL(target_complete_cmd);
 
@@ -1382,7 +1398,6 @@ void __target_init_cmd(
 {
        INIT_LIST_HEAD(&cmd->se_delayed_node);
        INIT_LIST_HEAD(&cmd->se_qf_node);
-       INIT_LIST_HEAD(&cmd->se_cmd_list);
        INIT_LIST_HEAD(&cmd->state_list);
        init_completion(&cmd->t_transport_stop_comp);
        cmd->free_compl = NULL;
@@ -1429,7 +1444,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
 }
 
 sense_reason_t
-target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb)
+target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb, gfp_t gfp)
 {
        sense_reason_t ret;
 
@@ -1450,8 +1465,7 @@ target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb)
         * setup the pointer from __t_task_cdb to t_task_cdb.
         */
        if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
-               cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
-                                               GFP_KERNEL);
+               cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp);
                if (!cmd->t_task_cdb) {
                        pr_err("Unable to allocate cmd->t_task_cdb"
                                " %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
@@ -1640,21 +1654,24 @@ EXPORT_SYMBOL_GPL(target_init_cmd);
  * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
  * @sgl_prot: struct scatterlist memory protection information
  * @sgl_prot_count: scatterlist count for protection information
+ * @gfp: gfp allocation type
  *
  * Returns:
  *     - less than zero to signal failure.
  *     - zero on success.
+ *
  * If failure is returned, lio will the callers queue_status to complete
  * the cmd.
  */
 int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb,
                       struct scatterlist *sgl, u32 sgl_count,
                       struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
-                      struct scatterlist *sgl_prot, u32 sgl_prot_count)
+                      struct scatterlist *sgl_prot, u32 sgl_prot_count,
+                      gfp_t gfp)
 {
        sense_reason_t rc;
 
-       rc = target_cmd_init_cdb(se_cmd, cdb);
+       rc = target_cmd_init_cdb(se_cmd, cdb, gfp);
        if (rc)
                goto send_cc_direct;
 
@@ -1790,13 +1807,87 @@ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
        if (rc)
                return;
 
-       if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, NULL, 0))
+       if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, NULL, 0,
+                              GFP_KERNEL))
                return;
 
        target_submit(se_cmd);
 }
 EXPORT_SYMBOL(target_submit_cmd);
 
+
+static struct se_dev_plug *target_plug_device(struct se_device *se_dev)
+{
+       struct se_dev_plug *se_plug;
+
+       if (!se_dev->transport->plug_device)
+               return NULL;
+
+       se_plug = se_dev->transport->plug_device(se_dev);
+       if (!se_plug)
+               return NULL;
+
+       se_plug->se_dev = se_dev;
+       /*
+        * We have a ref to the lun at this point, but the cmds could
+        * complete before we unplug, so grab a ref to the se_device so we
+        * can call back into the backend.
+        */
+       config_group_get(&se_dev->dev_group);
+       return se_plug;
+}
+
+static void target_unplug_device(struct se_dev_plug *se_plug)
+{
+       struct se_device *se_dev = se_plug->se_dev;
+
+       se_dev->transport->unplug_device(se_plug);
+       config_group_put(&se_dev->dev_group);
+}
+
+void target_queued_submit_work(struct work_struct *work)
+{
+       struct se_cmd_queue *sq = container_of(work, struct se_cmd_queue, work);
+       struct se_cmd *se_cmd, *next_cmd;
+       struct se_dev_plug *se_plug = NULL;
+       struct se_device *se_dev = NULL;
+       struct llist_node *cmd_list;
+
+       cmd_list = llist_del_all(&sq->cmd_list);
+       if (!cmd_list)
+               /* Previous call took what we were queued to submit */
+               return;
+
+       cmd_list = llist_reverse_order(cmd_list);
+       llist_for_each_entry_safe(se_cmd, next_cmd, cmd_list, se_cmd_list) {
+               if (!se_dev) {
+                       se_dev = se_cmd->se_dev;
+                       se_plug = target_plug_device(se_dev);
+               }
+
+               target_submit(se_cmd);
+       }
+
+       if (se_plug)
+               target_unplug_device(se_plug);
+}
+
+/**
+ * target_queue_submission - queue the cmd to run on the LIO workqueue
+ * @se_cmd: command descriptor to submit
+ */
+void target_queue_submission(struct se_cmd *se_cmd)
+{
+       struct se_device *se_dev = se_cmd->se_dev;
+       int cpu = se_cmd->cpuid;
+       struct se_cmd_queue *sq;
+
+       sq = &se_dev->queues[cpu].sq;
+       llist_add(&se_cmd->se_cmd_list, &sq->cmd_list);
+       queue_work_on(cpu, target_submission_wq, &sq->work);
+}
+EXPORT_SYMBOL_GPL(target_queue_submission);
+
 static void target_complete_tmr_failure(struct work_struct *work)
 {
        struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);