locking/qspinlock_stat: Track the no MCS node available case
authorWaiman Long <longman@redhat.com>
Tue, 29 Jan 2019 21:53:46 +0000 (22:53 +0100)
committerIngo Molnar <mingo@kernel.org>
Mon, 4 Feb 2019 08:03:30 +0000 (09:03 +0100)
Track the number of slowpath locking operations that are being done
without any MCS node available as well renaming lock_index[123] to make
them more descriptive.

Using these stat counters is one way to find out if a code path is
being exercised.

Signed-off-by: Waiman Long <longman@redhat.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Borislav Petkov <bp@alien8.de>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: James Morse <james.morse@arm.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: SRINIVAS <srinivas.eeda@oracle.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Zhenzhong Duan <zhenzhong.duan@oracle.com>
Link: https://lkml.kernel.org/r/1548798828-16156-3-git-send-email-longman@redhat.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
kernel/locking/qspinlock.c
kernel/locking/qspinlock_stat.h

index 0875053..21ee51b 100644 (file)
@@ -422,6 +422,7 @@ pv_queue:
         * simple enough.
         */
        if (unlikely(idx >= MAX_NODES)) {
+               qstat_inc(qstat_lock_no_node, true);
                while (!queued_spin_trylock(lock))
                        cpu_relax();
                goto release;
@@ -432,7 +433,7 @@ pv_queue:
        /*
         * Keep counts of non-zero index values:
         */
-       qstat_inc(qstat_lock_idx1 + idx - 1, idx);
+       qstat_inc(qstat_lock_use_node2 + idx - 1, idx);
 
        /*
         * Ensure that we increment the head node->count before initialising
index 42d3d8d..d73f853 100644 (file)
  *   pv_wait_node      - # of vCPU wait's at a non-head queue node
  *   lock_pending      - # of locking operations via pending code
  *   lock_slowpath     - # of locking operations via MCS lock queue
+ *   lock_use_node2    - # of locking operations that use 2nd per-CPU node
+ *   lock_use_node3    - # of locking operations that use 3rd per-CPU node
+ *   lock_use_node4    - # of locking operations that use 4th per-CPU node
+ *   lock_no_node      - # of locking operations without using per-CPU node
+ *
+ * Subtracting lock_use_node[234] from lock_slowpath will give you
+ * lock_use_node1.
  *
  * Writing to the "reset_counters" file will reset all the above counter
  * values.
@@ -55,9 +62,10 @@ enum qlock_stats {
        qstat_pv_wait_node,
        qstat_lock_pending,
        qstat_lock_slowpath,
-       qstat_lock_idx1,
-       qstat_lock_idx2,
-       qstat_lock_idx3,
+       qstat_lock_use_node2,
+       qstat_lock_use_node3,
+       qstat_lock_use_node4,
+       qstat_lock_no_node,
        qstat_num,      /* Total number of statistical counters */
        qstat_reset_cnts = qstat_num,
 };
@@ -85,9 +93,10 @@ static const char * const qstat_names[qstat_num + 1] = {
        [qstat_pv_wait_node]       = "pv_wait_node",
        [qstat_lock_pending]       = "lock_pending",
        [qstat_lock_slowpath]      = "lock_slowpath",
-       [qstat_lock_idx1]          = "lock_index1",
-       [qstat_lock_idx2]          = "lock_index2",
-       [qstat_lock_idx3]          = "lock_index3",
+       [qstat_lock_use_node2]     = "lock_use_node2",
+       [qstat_lock_use_node3]     = "lock_use_node3",
+       [qstat_lock_use_node4]     = "lock_use_node4",
+       [qstat_lock_no_node]       = "lock_no_node",
        [qstat_reset_cnts]         = "reset_counters",
 };