flex_proportions: Allow N events instead of 1
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Sat, 20 Mar 2021 20:34:54 +0000 (16:34 -0400)
committerMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 18 Oct 2021 11:49:39 +0000 (07:49 -0400)
When batching events (such as writing back N pages in a single I/O), it
is better to do one flex_proportion operation instead of N.  There is
only one caller of __fprop_inc_percpu_max(), and it's the one we're
going to change in the next patch, so rename it instead of adding a
compatibility wrapper.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Jan Kara <jack@suse.cz>
include/linux/flex_proportions.h
lib/flex_proportions.c
mm/page-writeback.c

index c12df59..3e378b1 100644 (file)
@@ -83,9 +83,10 @@ struct fprop_local_percpu {
 
 int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp);
 void fprop_local_destroy_percpu(struct fprop_local_percpu *pl);
-void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl);
-void __fprop_inc_percpu_max(struct fprop_global *p, struct fprop_local_percpu *pl,
-                           int max_frac);
+void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
+               long nr);
+void __fprop_add_percpu_max(struct fprop_global *p,
+               struct fprop_local_percpu *pl, int max_frac, long nr);
 void fprop_fraction_percpu(struct fprop_global *p,
        struct fprop_local_percpu *pl, unsigned long *numerator,
        unsigned long *denominator);
@@ -96,7 +97,7 @@ void fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
        unsigned long flags;
 
        local_irq_save(flags);
-       __fprop_inc_percpu(p, pl);
+       __fprop_add_percpu(p, pl, 1);
        local_irq_restore(flags);
 }
 
index 4515439..53e7eb1 100644 (file)
@@ -217,11 +217,12 @@ static void fprop_reflect_period_percpu(struct fprop_global *p,
 }
 
 /* Event of type pl happened */
-void __fprop_inc_percpu(struct fprop_global *p, struct fprop_local_percpu *pl)
+void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
+               long nr)
 {
        fprop_reflect_period_percpu(p, pl);
-       percpu_counter_add_batch(&pl->events, 1, PROP_BATCH);
-       percpu_counter_add(&p->events, 1);
+       percpu_counter_add_batch(&pl->events, nr, PROP_BATCH);
+       percpu_counter_add(&p->events, nr);
 }
 
 void fprop_fraction_percpu(struct fprop_global *p,
@@ -253,20 +254,29 @@ void fprop_fraction_percpu(struct fprop_global *p,
 }
 
 /*
- * Like __fprop_inc_percpu() except that event is counted only if the given
+ * Like __fprop_add_percpu() except that event is counted only if the given
  * type has fraction smaller than @max_frac/FPROP_FRAC_BASE
  */
-void __fprop_inc_percpu_max(struct fprop_global *p,
-                           struct fprop_local_percpu *pl, int max_frac)
+void __fprop_add_percpu_max(struct fprop_global *p,
+               struct fprop_local_percpu *pl, int max_frac, long nr)
 {
        if (unlikely(max_frac < FPROP_FRAC_BASE)) {
                unsigned long numerator, denominator;
+               s64 tmp;
 
                fprop_fraction_percpu(p, pl, &numerator, &denominator);
-               if (numerator >
-                   (((u64)denominator) * max_frac) >> FPROP_FRAC_SHIFT)
+               /* Adding 'nr' to fraction exceeds max_frac/FPROP_FRAC_BASE? */
+               tmp = (u64)denominator * max_frac -
+                                       ((u64)numerator << FPROP_FRAC_SHIFT);
+               if (tmp < 0) {
+                       /* Maximum fraction already exceeded? */
                        return;
+               } else if (tmp < nr * (FPROP_FRAC_BASE - max_frac)) {
+                       /* Add just enough for the fraction to saturate */
+                       nr = div_u64(tmp + FPROP_FRAC_BASE - max_frac - 1,
+                                       FPROP_FRAC_BASE - max_frac);
+               }
        }
 
-       __fprop_inc_percpu(p, pl);
+       __fprop_add_percpu(p, pl, nr);
 }
index 1d8f2ee..cb7387d 100644 (file)
@@ -566,8 +566,8 @@ static void wb_domain_writeout_inc(struct wb_domain *dom,
                                   struct fprop_local_percpu *completions,
                                   unsigned int max_prop_frac)
 {
-       __fprop_inc_percpu_max(&dom->completions, completions,
-                              max_prop_frac);
+       __fprop_add_percpu_max(&dom->completions, completions,
+                              max_prop_frac, 1);
        /* First event after period switching was turned off? */
        if (unlikely(!dom->period_time)) {
                /*