scsi: lpfc: Refactor cpu affinity assignment paths
authorJustin Tee <justin.tee@broadcom.com>
Wed, 12 Jul 2023 18:05:19 +0000 (11:05 -0700)
committerMartin K. Petersen <martin.petersen@oracle.com>
Sun, 23 Jul 2023 20:17:07 +0000 (16:17 -0400)
During initialization, a lot of the same logic is used on MSI-X vector CPU
affinity assignment.

Create a lpfc_next_present_cpu() helper routine, and apply its usage for
refactoring purposes.

Signed-off-by: Justin Tee <justin.tee@broadcom.com>
Link: https://lore.kernel.org/r/20230712180522.112722-10-justintee8345@gmail.com
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_nvmet.c

index e8d7eee..bc1c5f6 100644 (file)
@@ -1709,6 +1709,25 @@ lpfc_next_online_cpu(const struct cpumask *mask, unsigned int start)
 
        return cpu_it;
 }
+/**
+ * lpfc_next_present_cpu - Finds next present CPU after n
+ * @n: the cpu prior to search
+ *
+ * Note: If no next present cpu, then fallback to first present cpu.
+ *
+ **/
+static inline unsigned int lpfc_next_present_cpu(int n)
+{
+       unsigned int cpu;
+
+       cpu = cpumask_next(n, cpu_present_mask);
+
+       if (cpu >= nr_cpu_ids)
+               cpu = cpumask_first(cpu_present_mask);
+
+       return cpu;
+}
+
 /**
  * lpfc_sli4_mod_hba_eq_delay - update EQ delay
  * @phba: Pointer to HBA context object.
index c878fb9..9e59c05 100644 (file)
@@ -12512,10 +12512,7 @@ lpfc_cpu_affinity_check(struct lpfc_hba *phba, int vectors)
                                    (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY) &&
                                    (new_cpup->phys_id == cpup->phys_id))
                                        goto found_same;
-                               new_cpu = cpumask_next(
-                                       new_cpu, cpu_present_mask);
-                               if (new_cpu >= nr_cpu_ids)
-                                       new_cpu = first_cpu;
+                               new_cpu = lpfc_next_present_cpu(new_cpu);
                        }
                        /* At this point, we leave the CPU as unassigned */
                        continue;
@@ -12527,9 +12524,7 @@ found_same:
                         * chance of having multiple unassigned CPU entries
                         * selecting the same IRQ.
                         */
-                       start_cpu = cpumask_next(new_cpu, cpu_present_mask);
-                       if (start_cpu >= nr_cpu_ids)
-                               start_cpu = first_cpu;
+                       start_cpu = lpfc_next_present_cpu(new_cpu);
 
                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                                        "3337 Set Affinity: CPU %d "
@@ -12562,10 +12557,7 @@ found_same:
                                if (!(new_cpup->flag & LPFC_CPU_MAP_UNASSIGN) &&
                                    (new_cpup->eq != LPFC_VECTOR_MAP_EMPTY))
                                        goto found_any;
-                               new_cpu = cpumask_next(
-                                       new_cpu, cpu_present_mask);
-                               if (new_cpu >= nr_cpu_ids)
-                                       new_cpu = first_cpu;
+                               new_cpu = lpfc_next_present_cpu(new_cpu);
                        }
                        /* We should never leave an entry unassigned */
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -12581,9 +12573,7 @@ found_any:
                         * chance of having multiple unassigned CPU entries
                         * selecting the same IRQ.
                         */
-                       start_cpu = cpumask_next(new_cpu, cpu_present_mask);
-                       if (start_cpu >= nr_cpu_ids)
-                               start_cpu = first_cpu;
+                       start_cpu = lpfc_next_present_cpu(new_cpu);
 
                        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
                                        "3338 Set Affinity: CPU %d "
@@ -12654,9 +12644,7 @@ found_any:
                            new_cpup->core_id == cpup->core_id) {
                                goto found_hdwq;
                        }
-                       new_cpu = cpumask_next(new_cpu, cpu_present_mask);
-                       if (new_cpu >= nr_cpu_ids)
-                               new_cpu = first_cpu;
+                       new_cpu = lpfc_next_present_cpu(new_cpu);
                }
 
                /* If we can't match both phys_id and core_id,
@@ -12668,10 +12656,7 @@ found_any:
                        if (new_cpup->hdwq != LPFC_VECTOR_MAP_EMPTY &&
                            new_cpup->phys_id == cpup->phys_id)
                                goto found_hdwq;
-
-                       new_cpu = cpumask_next(new_cpu, cpu_present_mask);
-                       if (new_cpu >= nr_cpu_ids)
-                               new_cpu = first_cpu;
+                       new_cpu = lpfc_next_present_cpu(new_cpu);
                }
 
                /* Otherwise just round robin on cfg_hdw_queue */
@@ -12680,9 +12665,7 @@ found_any:
                goto logit;
  found_hdwq:
                /* We found an available entry, copy the IRQ info */
-               start_cpu = cpumask_next(new_cpu, cpu_present_mask);
-               if (start_cpu >= nr_cpu_ids)
-                       start_cpu = first_cpu;
+               start_cpu = lpfc_next_present_cpu(new_cpu);
                cpup->hdwq = new_cpup->hdwq;
  logit:
                lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
index dff4584..425328d 100644 (file)
@@ -1620,10 +1620,7 @@ lpfc_nvmet_setup_io_context(struct lpfc_hba *phba)
                        cpu = cpumask_first(cpu_present_mask);
                        continue;
                }
-               cpu = cpumask_next(cpu, cpu_present_mask);
-               if (cpu == nr_cpu_ids)
-                       cpu = cpumask_first(cpu_present_mask);
-
+               cpu = lpfc_next_present_cpu(cpu);
        }
 
        for_each_present_cpu(i) {