scsi: target: Fix ordered tag handling
[linux-2.6-microblaze.git] / drivers / target / target_core_transport.c
index 14c6f2b..4a2e749 100644 (file)
@@ -1511,10 +1511,10 @@ target_cmd_parse_cdb(struct se_cmd *cmd)
 
        ret = dev->transport->parse_cdb(cmd);
        if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
-               pr_warn_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
-                                   cmd->se_tfo->fabric_name,
-                                   cmd->se_sess->se_node_acl->initiatorname,
-                                   cmd->t_task_cdb[0]);
+               pr_debug_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
+                                    cmd->se_tfo->fabric_name,
+                                    cmd->se_sess->se_node_acl->initiatorname,
+                                    cmd->t_task_cdb[0]);
        if (ret)
                return ret;
 
@@ -2173,32 +2173,39 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
         */
        switch (cmd->sam_task_attr) {
        case TCM_HEAD_TAG:
+               atomic_inc_mb(&dev->non_ordered);
                pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
                         cmd->t_task_cdb[0]);
                return false;
        case TCM_ORDERED_TAG:
-               atomic_inc_mb(&dev->dev_ordered_sync);
+               atomic_inc_mb(&dev->delayed_cmd_count);
 
                pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
                         cmd->t_task_cdb[0]);
-
-               /*
-                * Execute an ORDERED command if no other older commands
-                * exist that need to be completed first.
-                */
-               if (!atomic_read(&dev->simple_cmds))
-                       return false;
                break;
        default:
                /*
                 * For SIMPLE and UNTAGGED Task Attribute commands
                 */
-               atomic_inc_mb(&dev->simple_cmds);
+               atomic_inc_mb(&dev->non_ordered);
+
+               if (atomic_read(&dev->delayed_cmd_count) == 0)
+                       return false;
                break;
        }
 
-       if (atomic_read(&dev->dev_ordered_sync) == 0)
-               return false;
+       if (cmd->sam_task_attr != TCM_ORDERED_TAG) {
+               atomic_inc_mb(&dev->delayed_cmd_count);
+               /*
+                * We will account for this when we dequeue from the delayed
+                * list.
+                */
+               atomic_dec_mb(&dev->non_ordered);
+       }
+
+       spin_lock_irq(&cmd->t_state_lock);
+       cmd->transport_state &= ~CMD_T_SENT;
+       spin_unlock_irq(&cmd->t_state_lock);
 
        spin_lock(&dev->delayed_cmd_lock);
        list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
@@ -2206,6 +2213,12 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
 
        pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
                cmd->t_task_cdb[0], cmd->sam_task_attr);
+       /*
+        * We may have no non ordered cmds when this function started or we
+        * could have raced with the last simple/head cmd completing, so kick
+        * the delayed handler here.
+        */
+       schedule_work(&dev->delayed_cmd_work);
        return true;
 }
 
@@ -2228,12 +2241,8 @@ void target_execute_cmd(struct se_cmd *cmd)
        if (target_write_prot_action(cmd))
                return;
 
-       if (target_handle_task_attr(cmd)) {
-               spin_lock_irq(&cmd->t_state_lock);
-               cmd->transport_state &= ~CMD_T_SENT;
-               spin_unlock_irq(&cmd->t_state_lock);
+       if (target_handle_task_attr(cmd))
                return;
-       }
 
        __target_execute_cmd(cmd, true);
 }
@@ -2243,29 +2252,48 @@ EXPORT_SYMBOL(target_execute_cmd);
  * Process all commands up to the last received ORDERED task attribute which
  * requires another blocking boundary
  */
-static void target_restart_delayed_cmds(struct se_device *dev)
+void target_do_delayed_work(struct work_struct *work)
 {
-       for (;;) {
+       struct se_device *dev = container_of(work, struct se_device,
+                                            delayed_cmd_work);
+
+       spin_lock(&dev->delayed_cmd_lock);
+       while (!dev->ordered_sync_in_progress) {
                struct se_cmd *cmd;
 
-               spin_lock(&dev->delayed_cmd_lock);
-               if (list_empty(&dev->delayed_cmd_list)) {
-                       spin_unlock(&dev->delayed_cmd_lock);
+               if (list_empty(&dev->delayed_cmd_list))
                        break;
-               }
 
                cmd = list_entry(dev->delayed_cmd_list.next,
                                 struct se_cmd, se_delayed_node);
+
+               if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
+                       /*
+                        * Check if we started with:
+                        * [ordered] [simple] [ordered]
+                        * and we are now at the last ordered so we have to wait
+                        * for the simple cmd.
+                        */
+                       if (atomic_read(&dev->non_ordered) > 0)
+                               break;
+
+                       dev->ordered_sync_in_progress = true;
+               }
+
                list_del(&cmd->se_delayed_node);
+               atomic_dec_mb(&dev->delayed_cmd_count);
                spin_unlock(&dev->delayed_cmd_lock);
 
+               if (cmd->sam_task_attr != TCM_ORDERED_TAG)
+                       atomic_inc_mb(&dev->non_ordered);
+
                cmd->transport_state |= CMD_T_SENT;
 
                __target_execute_cmd(cmd, true);
 
-               if (cmd->sam_task_attr == TCM_ORDERED_TAG)
-                       break;
+               spin_lock(&dev->delayed_cmd_lock);
        }
+       spin_unlock(&dev->delayed_cmd_lock);
 }
 
 /*
@@ -2283,14 +2311,17 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
                goto restart;
 
        if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
-               atomic_dec_mb(&dev->simple_cmds);
+               atomic_dec_mb(&dev->non_ordered);
                dev->dev_cur_ordered_id++;
        } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
+               atomic_dec_mb(&dev->non_ordered);
                dev->dev_cur_ordered_id++;
                pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
                         dev->dev_cur_ordered_id);
        } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
-               atomic_dec_mb(&dev->dev_ordered_sync);
+               spin_lock(&dev->delayed_cmd_lock);
+               dev->ordered_sync_in_progress = false;
+               spin_unlock(&dev->delayed_cmd_lock);
 
                dev->dev_cur_ordered_id++;
                pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
@@ -2299,7 +2330,8 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
        cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
 
 restart:
-       target_restart_delayed_cmds(dev);
+       if (atomic_read(&dev->delayed_cmd_count) > 0)
+               schedule_work(&dev->delayed_cmd_work);
 }
 
 static void transport_complete_qf(struct se_cmd *cmd)