Merge tag 'acpi-5.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael...
[linux-2.6-microblaze.git] / block / blk-throttle.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Interface for controlling IO bandwidth on a request queue
4  *
5  * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
6  */
7
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/bio.h>
12 #include <linux/blktrace_api.h>
13 #include <linux/blk-cgroup.h>
14 #include "blk.h"
15 #include "blk-cgroup-rwstat.h"
16 #include "blk-stat.h"
17 #include "blk-throttle.h"
18
19 /* Max dispatch from a group in 1 round */
20 #define THROTL_GRP_QUANTUM 8
21
22 /* Total max dispatch from all groups in one round */
23 #define THROTL_QUANTUM 32
24
25 /* Throttling is performed over a slice and after that slice is renewed */
26 #define DFL_THROTL_SLICE_HD (HZ / 10)
27 #define DFL_THROTL_SLICE_SSD (HZ / 50)
28 #define MAX_THROTL_SLICE (HZ)
29 #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
30 #define MIN_THROTL_BPS (320 * 1024)
31 #define MIN_THROTL_IOPS (10)
32 #define DFL_LATENCY_TARGET (-1L)
33 #define DFL_IDLE_THRESHOLD (0)
34 #define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
35 #define LATENCY_FILTERED_SSD (0)
36 /*
37  * For HD, very small latency comes from sequential IO. Such IO is helpless to
38  * help determine if its IO is impacted by others, hence we ignore the IO
39  */
40 #define LATENCY_FILTERED_HD (1000L) /* 1ms */
41
42 /* A workqueue to queue throttle related work */
43 static struct workqueue_struct *kthrotld_workqueue;
44
45 enum tg_state_flags {
46         THROTL_TG_PENDING       = 1 << 0,       /* on parent's pending tree */
47         THROTL_TG_WAS_EMPTY     = 1 << 1,       /* bio_lists[] became non-empty */
48 };
49
50 #define rb_entry_tg(node)       rb_entry((node), struct throtl_grp, rb_node)
51
52 /* We measure latency for request size from <= 4k to >= 1M */
53 #define LATENCY_BUCKET_SIZE 9
54
55 struct latency_bucket {
56         unsigned long total_latency; /* ns / 1024 */
57         int samples;
58 };
59
60 struct avg_latency_bucket {
61         unsigned long latency; /* ns / 1024 */
62         bool valid;
63 };
64
65 struct throtl_data
66 {
67         /* service tree for active throtl groups */
68         struct throtl_service_queue service_queue;
69
70         struct request_queue *queue;
71
72         /* Total Number of queued bios on READ and WRITE lists */
73         unsigned int nr_queued[2];
74
75         unsigned int throtl_slice;
76
77         /* Work for dispatching throttled bios */
78         struct work_struct dispatch_work;
79         unsigned int limit_index;
80         bool limit_valid[LIMIT_CNT];
81
82         unsigned long low_upgrade_time;
83         unsigned long low_downgrade_time;
84
85         unsigned int scale;
86
87         struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE];
88         struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE];
89         struct latency_bucket __percpu *latency_buckets[2];
90         unsigned long last_calculate_time;
91         unsigned long filtered_latency;
92
93         bool track_bio_latency;
94 };
95
96 static void throtl_pending_timer_fn(struct timer_list *t);
97
98 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
99 {
100         return pd_to_blkg(&tg->pd);
101 }
102
103 /**
104  * sq_to_tg - return the throl_grp the specified service queue belongs to
105  * @sq: the throtl_service_queue of interest
106  *
107  * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
108  * embedded in throtl_data, %NULL is returned.
109  */
110 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
111 {
112         if (sq && sq->parent_sq)
113                 return container_of(sq, struct throtl_grp, service_queue);
114         else
115                 return NULL;
116 }
117
118 /**
119  * sq_to_td - return throtl_data the specified service queue belongs to
120  * @sq: the throtl_service_queue of interest
121  *
122  * A service_queue can be embedded in either a throtl_grp or throtl_data.
123  * Determine the associated throtl_data accordingly and return it.
124  */
125 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
126 {
127         struct throtl_grp *tg = sq_to_tg(sq);
128
129         if (tg)
130                 return tg->td;
131         else
132                 return container_of(sq, struct throtl_data, service_queue);
133 }
134
135 /*
136  * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
137  * make the IO dispatch more smooth.
138  * Scale up: linearly scale up according to lapsed time since upgrade. For
139  *           every throtl_slice, the limit scales up 1/2 .low limit till the
140  *           limit hits .max limit
141  * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
142  */
143 static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
144 {
145         /* arbitrary value to avoid too big scale */
146         if (td->scale < 4096 && time_after_eq(jiffies,
147             td->low_upgrade_time + td->scale * td->throtl_slice))
148                 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;
149
150         return low + (low >> 1) * td->scale;
151 }
152
153 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
154 {
155         struct blkcg_gq *blkg = tg_to_blkg(tg);
156         struct throtl_data *td;
157         uint64_t ret;
158
159         if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
160                 return U64_MAX;
161
162         td = tg->td;
163         ret = tg->bps[rw][td->limit_index];
164         if (ret == 0 && td->limit_index == LIMIT_LOW) {
165                 /* intermediate node or iops isn't 0 */
166                 if (!list_empty(&blkg->blkcg->css.children) ||
167                     tg->iops[rw][td->limit_index])
168                         return U64_MAX;
169                 else
170                         return MIN_THROTL_BPS;
171         }
172
173         if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
174             tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
175                 uint64_t adjusted;
176
177                 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
178                 ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
179         }
180         return ret;
181 }
182
183 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
184 {
185         struct blkcg_gq *blkg = tg_to_blkg(tg);
186         struct throtl_data *td;
187         unsigned int ret;
188
189         if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
190                 return UINT_MAX;
191
192         td = tg->td;
193         ret = tg->iops[rw][td->limit_index];
194         if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
195                 /* intermediate node or bps isn't 0 */
196                 if (!list_empty(&blkg->blkcg->css.children) ||
197                     tg->bps[rw][td->limit_index])
198                         return UINT_MAX;
199                 else
200                         return MIN_THROTL_IOPS;
201         }
202
203         if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
204             tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
205                 uint64_t adjusted;
206
207                 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
208                 if (adjusted > UINT_MAX)
209                         adjusted = UINT_MAX;
210                 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
211         }
212         return ret;
213 }
214
215 #define request_bucket_index(sectors) \
216         clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
217
218 /**
219  * throtl_log - log debug message via blktrace
220  * @sq: the service_queue being reported
221  * @fmt: printf format string
222  * @args: printf args
223  *
224  * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
225  * throtl_grp; otherwise, just "throtl".
226  */
227 #define throtl_log(sq, fmt, args...)    do {                            \
228         struct throtl_grp *__tg = sq_to_tg((sq));                       \
229         struct throtl_data *__td = sq_to_td((sq));                      \
230                                                                         \
231         (void)__td;                                                     \
232         if (likely(!blk_trace_note_message_enabled(__td->queue)))       \
233                 break;                                                  \
234         if ((__tg)) {                                                   \
235                 blk_add_cgroup_trace_msg(__td->queue,                   \
236                         tg_to_blkg(__tg)->blkcg, "throtl " fmt, ##args);\
237         } else {                                                        \
238                 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);  \
239         }                                                               \
240 } while (0)
241
242 static inline unsigned int throtl_bio_data_size(struct bio *bio)
243 {
244         /* assume it's one sector */
245         if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
246                 return 512;
247         return bio->bi_iter.bi_size;
248 }
249
250 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
251 {
252         INIT_LIST_HEAD(&qn->node);
253         bio_list_init(&qn->bios);
254         qn->tg = tg;
255 }
256
257 /**
258  * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
259  * @bio: bio being added
260  * @qn: qnode to add bio to
261  * @queued: the service_queue->queued[] list @qn belongs to
262  *
263  * Add @bio to @qn and put @qn on @queued if it's not already on.
264  * @qn->tg's reference count is bumped when @qn is activated.  See the
265  * comment on top of throtl_qnode definition for details.
266  */
267 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
268                                  struct list_head *queued)
269 {
270         bio_list_add(&qn->bios, bio);
271         if (list_empty(&qn->node)) {
272                 list_add_tail(&qn->node, queued);
273                 blkg_get(tg_to_blkg(qn->tg));
274         }
275 }
276
277 /**
278  * throtl_peek_queued - peek the first bio on a qnode list
279  * @queued: the qnode list to peek
280  */
281 static struct bio *throtl_peek_queued(struct list_head *queued)
282 {
283         struct throtl_qnode *qn;
284         struct bio *bio;
285
286         if (list_empty(queued))
287                 return NULL;
288
289         qn = list_first_entry(queued, struct throtl_qnode, node);
290         bio = bio_list_peek(&qn->bios);
291         WARN_ON_ONCE(!bio);
292         return bio;
293 }
294
295 /**
296  * throtl_pop_queued - pop the first bio form a qnode list
297  * @queued: the qnode list to pop a bio from
298  * @tg_to_put: optional out argument for throtl_grp to put
299  *
300  * Pop the first bio from the qnode list @queued.  After popping, the first
301  * qnode is removed from @queued if empty or moved to the end of @queued so
302  * that the popping order is round-robin.
303  *
304  * When the first qnode is removed, its associated throtl_grp should be put
305  * too.  If @tg_to_put is NULL, this function automatically puts it;
306  * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
307  * responsible for putting it.
308  */
309 static struct bio *throtl_pop_queued(struct list_head *queued,
310                                      struct throtl_grp **tg_to_put)
311 {
312         struct throtl_qnode *qn;
313         struct bio *bio;
314
315         if (list_empty(queued))
316                 return NULL;
317
318         qn = list_first_entry(queued, struct throtl_qnode, node);
319         bio = bio_list_pop(&qn->bios);
320         WARN_ON_ONCE(!bio);
321
322         if (bio_list_empty(&qn->bios)) {
323                 list_del_init(&qn->node);
324                 if (tg_to_put)
325                         *tg_to_put = qn->tg;
326                 else
327                         blkg_put(tg_to_blkg(qn->tg));
328         } else {
329                 list_move_tail(&qn->node, queued);
330         }
331
332         return bio;
333 }
334
335 /* init a service_queue, assumes the caller zeroed it */
336 static void throtl_service_queue_init(struct throtl_service_queue *sq)
337 {
338         INIT_LIST_HEAD(&sq->queued[0]);
339         INIT_LIST_HEAD(&sq->queued[1]);
340         sq->pending_tree = RB_ROOT_CACHED;
341         timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
342 }
343
344 static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp,
345                                                 struct request_queue *q,
346                                                 struct blkcg *blkcg)
347 {
348         struct throtl_grp *tg;
349         int rw;
350
351         tg = kzalloc_node(sizeof(*tg), gfp, q->node);
352         if (!tg)
353                 return NULL;
354
355         if (blkg_rwstat_init(&tg->stat_bytes, gfp))
356                 goto err_free_tg;
357
358         if (blkg_rwstat_init(&tg->stat_ios, gfp))
359                 goto err_exit_stat_bytes;
360
361         throtl_service_queue_init(&tg->service_queue);
362
363         for (rw = READ; rw <= WRITE; rw++) {
364                 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
365                 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
366         }
367
368         RB_CLEAR_NODE(&tg->rb_node);
369         tg->bps[READ][LIMIT_MAX] = U64_MAX;
370         tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
371         tg->iops[READ][LIMIT_MAX] = UINT_MAX;
372         tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
373         tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
374         tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
375         tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
376         tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
377         /* LIMIT_LOW will have default value 0 */
378
379         tg->latency_target = DFL_LATENCY_TARGET;
380         tg->latency_target_conf = DFL_LATENCY_TARGET;
381         tg->idletime_threshold = DFL_IDLE_THRESHOLD;
382         tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
383
384         return &tg->pd;
385
386 err_exit_stat_bytes:
387         blkg_rwstat_exit(&tg->stat_bytes);
388 err_free_tg:
389         kfree(tg);
390         return NULL;
391 }
392
393 static void throtl_pd_init(struct blkg_policy_data *pd)
394 {
395         struct throtl_grp *tg = pd_to_tg(pd);
396         struct blkcg_gq *blkg = tg_to_blkg(tg);
397         struct throtl_data *td = blkg->q->td;
398         struct throtl_service_queue *sq = &tg->service_queue;
399
400         /*
401          * If on the default hierarchy, we switch to properly hierarchical
402          * behavior where limits on a given throtl_grp are applied to the
403          * whole subtree rather than just the group itself.  e.g. If 16M
404          * read_bps limit is set on the root group, the whole system can't
405          * exceed 16M for the device.
406          *
407          * If not on the default hierarchy, the broken flat hierarchy
408          * behavior is retained where all throtl_grps are treated as if
409          * they're all separate root groups right below throtl_data.
410          * Limits of a group don't interact with limits of other groups
411          * regardless of the position of the group in the hierarchy.
412          */
413         sq->parent_sq = &td->service_queue;
414         if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
415                 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
416         tg->td = td;
417 }
418
419 /*
420  * Set has_rules[] if @tg or any of its parents have limits configured.
421  * This doesn't require walking up to the top of the hierarchy as the
422  * parent's has_rules[] is guaranteed to be correct.
423  */
424 static void tg_update_has_rules(struct throtl_grp *tg)
425 {
426         struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
427         struct throtl_data *td = tg->td;
428         int rw;
429
430         for (rw = READ; rw <= WRITE; rw++)
431                 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
432                         (td->limit_valid[td->limit_index] &&
433                          (tg_bps_limit(tg, rw) != U64_MAX ||
434                           tg_iops_limit(tg, rw) != UINT_MAX));
435 }
436
437 static void throtl_pd_online(struct blkg_policy_data *pd)
438 {
439         struct throtl_grp *tg = pd_to_tg(pd);
440         /*
441          * We don't want new groups to escape the limits of its ancestors.
442          * Update has_rules[] after a new group is brought online.
443          */
444         tg_update_has_rules(tg);
445 }
446
447 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
448 static void blk_throtl_update_limit_valid(struct throtl_data *td)
449 {
450         struct cgroup_subsys_state *pos_css;
451         struct blkcg_gq *blkg;
452         bool low_valid = false;
453
454         rcu_read_lock();
455         blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
456                 struct throtl_grp *tg = blkg_to_tg(blkg);
457
458                 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
459                     tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
460                         low_valid = true;
461                         break;
462                 }
463         }
464         rcu_read_unlock();
465
466         td->limit_valid[LIMIT_LOW] = low_valid;
467 }
468 #else
469 static inline void blk_throtl_update_limit_valid(struct throtl_data *td)
470 {
471 }
472 #endif
473
474 static void throtl_upgrade_state(struct throtl_data *td);
475 static void throtl_pd_offline(struct blkg_policy_data *pd)
476 {
477         struct throtl_grp *tg = pd_to_tg(pd);
478
479         tg->bps[READ][LIMIT_LOW] = 0;
480         tg->bps[WRITE][LIMIT_LOW] = 0;
481         tg->iops[READ][LIMIT_LOW] = 0;
482         tg->iops[WRITE][LIMIT_LOW] = 0;
483
484         blk_throtl_update_limit_valid(tg->td);
485
486         if (!tg->td->limit_valid[tg->td->limit_index])
487                 throtl_upgrade_state(tg->td);
488 }
489
490 static void throtl_pd_free(struct blkg_policy_data *pd)
491 {
492         struct throtl_grp *tg = pd_to_tg(pd);
493
494         del_timer_sync(&tg->service_queue.pending_timer);
495         blkg_rwstat_exit(&tg->stat_bytes);
496         blkg_rwstat_exit(&tg->stat_ios);
497         kfree(tg);
498 }
499
500 static struct throtl_grp *
501 throtl_rb_first(struct throtl_service_queue *parent_sq)
502 {
503         struct rb_node *n;
504
505         n = rb_first_cached(&parent_sq->pending_tree);
506         WARN_ON_ONCE(!n);
507         if (!n)
508                 return NULL;
509         return rb_entry_tg(n);
510 }
511
512 static void throtl_rb_erase(struct rb_node *n,
513                             struct throtl_service_queue *parent_sq)
514 {
515         rb_erase_cached(n, &parent_sq->pending_tree);
516         RB_CLEAR_NODE(n);
517         --parent_sq->nr_pending;
518 }
519
520 static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
521 {
522         struct throtl_grp *tg;
523
524         tg = throtl_rb_first(parent_sq);
525         if (!tg)
526                 return;
527
528         parent_sq->first_pending_disptime = tg->disptime;
529 }
530
531 static void tg_service_queue_add(struct throtl_grp *tg)
532 {
533         struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
534         struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
535         struct rb_node *parent = NULL;
536         struct throtl_grp *__tg;
537         unsigned long key = tg->disptime;
538         bool leftmost = true;
539
540         while (*node != NULL) {
541                 parent = *node;
542                 __tg = rb_entry_tg(parent);
543
544                 if (time_before(key, __tg->disptime))
545                         node = &parent->rb_left;
546                 else {
547                         node = &parent->rb_right;
548                         leftmost = false;
549                 }
550         }
551
552         rb_link_node(&tg->rb_node, parent, node);
553         rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
554                                leftmost);
555 }
556
557 static void throtl_enqueue_tg(struct throtl_grp *tg)
558 {
559         if (!(tg->flags & THROTL_TG_PENDING)) {
560                 tg_service_queue_add(tg);
561                 tg->flags |= THROTL_TG_PENDING;
562                 tg->service_queue.parent_sq->nr_pending++;
563         }
564 }
565
566 static void throtl_dequeue_tg(struct throtl_grp *tg)
567 {
568         if (tg->flags & THROTL_TG_PENDING) {
569                 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
570                 tg->flags &= ~THROTL_TG_PENDING;
571         }
572 }
573
574 /* Call with queue lock held */
575 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
576                                           unsigned long expires)
577 {
578         unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
579
580         /*
581          * Since we are adjusting the throttle limit dynamically, the sleep
582          * time calculated according to previous limit might be invalid. It's
583          * possible the cgroup sleep time is very long and no other cgroups
584          * have IO running so notify the limit changes. Make sure the cgroup
585          * doesn't sleep too long to avoid the missed notification.
586          */
587         if (time_after(expires, max_expire))
588                 expires = max_expire;
589         mod_timer(&sq->pending_timer, expires);
590         throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
591                    expires - jiffies, jiffies);
592 }
593
594 /**
595  * throtl_schedule_next_dispatch - schedule the next dispatch cycle
596  * @sq: the service_queue to schedule dispatch for
597  * @force: force scheduling
598  *
599  * Arm @sq->pending_timer so that the next dispatch cycle starts on the
600  * dispatch time of the first pending child.  Returns %true if either timer
601  * is armed or there's no pending child left.  %false if the current
602  * dispatch window is still open and the caller should continue
603  * dispatching.
604  *
605  * If @force is %true, the dispatch timer is always scheduled and this
606  * function is guaranteed to return %true.  This is to be used when the
607  * caller can't dispatch itself and needs to invoke pending_timer
608  * unconditionally.  Note that forced scheduling is likely to induce short
609  * delay before dispatch starts even if @sq->first_pending_disptime is not
610  * in the future and thus shouldn't be used in hot paths.
611  */
612 static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
613                                           bool force)
614 {
615         /* any pending children left? */
616         if (!sq->nr_pending)
617                 return true;
618
619         update_min_dispatch_time(sq);
620
621         /* is the next dispatch time in the future? */
622         if (force || time_after(sq->first_pending_disptime, jiffies)) {
623                 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
624                 return true;
625         }
626
627         /* tell the caller to continue dispatching */
628         return false;
629 }
630
631 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
632                 bool rw, unsigned long start)
633 {
634         tg->bytes_disp[rw] = 0;
635         tg->io_disp[rw] = 0;
636
637         atomic_set(&tg->io_split_cnt[rw], 0);
638
639         /*
640          * Previous slice has expired. We must have trimmed it after last
641          * bio dispatch. That means since start of last slice, we never used
642          * that bandwidth. Do try to make use of that bandwidth while giving
643          * credit.
644          */
645         if (time_after_eq(start, tg->slice_start[rw]))
646                 tg->slice_start[rw] = start;
647
648         tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
649         throtl_log(&tg->service_queue,
650                    "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
651                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
652                    tg->slice_end[rw], jiffies);
653 }
654
655 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
656 {
657         tg->bytes_disp[rw] = 0;
658         tg->io_disp[rw] = 0;
659         tg->slice_start[rw] = jiffies;
660         tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
661
662         atomic_set(&tg->io_split_cnt[rw], 0);
663
664         throtl_log(&tg->service_queue,
665                    "[%c] new slice start=%lu end=%lu jiffies=%lu",
666                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
667                    tg->slice_end[rw], jiffies);
668 }
669
670 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
671                                         unsigned long jiffy_end)
672 {
673         tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
674 }
675
676 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
677                                        unsigned long jiffy_end)
678 {
679         throtl_set_slice_end(tg, rw, jiffy_end);
680         throtl_log(&tg->service_queue,
681                    "[%c] extend slice start=%lu end=%lu jiffies=%lu",
682                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
683                    tg->slice_end[rw], jiffies);
684 }
685
686 /* Determine if previously allocated or extended slice is complete or not */
687 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
688 {
689         if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
690                 return false;
691
692         return true;
693 }
694
695 /* Trim the used slices and adjust slice start accordingly */
696 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
697 {
698         unsigned long nr_slices, time_elapsed, io_trim;
699         u64 bytes_trim, tmp;
700
701         BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
702
703         /*
704          * If bps are unlimited (-1), then time slice don't get
705          * renewed. Don't try to trim the slice if slice is used. A new
706          * slice will start when appropriate.
707          */
708         if (throtl_slice_used(tg, rw))
709                 return;
710
711         /*
712          * A bio has been dispatched. Also adjust slice_end. It might happen
713          * that initially cgroup limit was very low resulting in high
714          * slice_end, but later limit was bumped up and bio was dispatched
715          * sooner, then we need to reduce slice_end. A high bogus slice_end
716          * is bad because it does not allow new slice to start.
717          */
718
719         throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
720
721         time_elapsed = jiffies - tg->slice_start[rw];
722
723         nr_slices = time_elapsed / tg->td->throtl_slice;
724
725         if (!nr_slices)
726                 return;
727         tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
728         do_div(tmp, HZ);
729         bytes_trim = tmp;
730
731         io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
732                 HZ;
733
734         if (!bytes_trim && !io_trim)
735                 return;
736
737         if (tg->bytes_disp[rw] >= bytes_trim)
738                 tg->bytes_disp[rw] -= bytes_trim;
739         else
740                 tg->bytes_disp[rw] = 0;
741
742         if (tg->io_disp[rw] >= io_trim)
743                 tg->io_disp[rw] -= io_trim;
744         else
745                 tg->io_disp[rw] = 0;
746
747         tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
748
749         throtl_log(&tg->service_queue,
750                    "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
751                    rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
752                    tg->slice_start[rw], tg->slice_end[rw], jiffies);
753 }
754
755 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
756                                   u32 iops_limit, unsigned long *wait)
757 {
758         bool rw = bio_data_dir(bio);
759         unsigned int io_allowed;
760         unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
761         u64 tmp;
762
763         if (iops_limit == UINT_MAX) {
764                 if (wait)
765                         *wait = 0;
766                 return true;
767         }
768
769         jiffy_elapsed = jiffies - tg->slice_start[rw];
770
771         /* Round up to the next throttle slice, wait time must be nonzero */
772         jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
773
774         /*
775          * jiffy_elapsed_rnd should not be a big value as minimum iops can be
776          * 1 then at max jiffy elapsed should be equivalent of 1 second as we
777          * will allow dispatch after 1 second and after that slice should
778          * have been trimmed.
779          */
780
781         tmp = (u64)iops_limit * jiffy_elapsed_rnd;
782         do_div(tmp, HZ);
783
784         if (tmp > UINT_MAX)
785                 io_allowed = UINT_MAX;
786         else
787                 io_allowed = tmp;
788
789         if (tg->io_disp[rw] + 1 <= io_allowed) {
790                 if (wait)
791                         *wait = 0;
792                 return true;
793         }
794
795         /* Calc approx time to dispatch */
796         jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
797
798         if (wait)
799                 *wait = jiffy_wait;
800         return false;
801 }
802
803 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
804                                  u64 bps_limit, unsigned long *wait)
805 {
806         bool rw = bio_data_dir(bio);
807         u64 bytes_allowed, extra_bytes, tmp;
808         unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
809         unsigned int bio_size = throtl_bio_data_size(bio);
810
811         if (bps_limit == U64_MAX) {
812                 if (wait)
813                         *wait = 0;
814                 return true;
815         }
816
817         jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
818
819         /* Slice has just started. Consider one slice interval */
820         if (!jiffy_elapsed)
821                 jiffy_elapsed_rnd = tg->td->throtl_slice;
822
823         jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
824
825         tmp = bps_limit * jiffy_elapsed_rnd;
826         do_div(tmp, HZ);
827         bytes_allowed = tmp;
828
829         if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
830                 if (wait)
831                         *wait = 0;
832                 return true;
833         }
834
835         /* Calc approx time to dispatch */
836         extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
837         jiffy_wait = div64_u64(extra_bytes * HZ, bps_limit);
838
839         if (!jiffy_wait)
840                 jiffy_wait = 1;
841
842         /*
843          * This wait time is without taking into consideration the rounding
844          * up we did. Add that time also.
845          */
846         jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
847         if (wait)
848                 *wait = jiffy_wait;
849         return false;
850 }
851
852 /*
853  * Returns whether one can dispatch a bio or not. Also returns approx number
854  * of jiffies to wait before this bio is with-in IO rate and can be dispatched
855  */
856 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
857                             unsigned long *wait)
858 {
859         bool rw = bio_data_dir(bio);
860         unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
861         u64 bps_limit = tg_bps_limit(tg, rw);
862         u32 iops_limit = tg_iops_limit(tg, rw);
863
864         /*
865          * Currently whole state machine of group depends on first bio
866          * queued in the group bio list. So one should not be calling
867          * this function with a different bio if there are other bios
868          * queued.
869          */
870         BUG_ON(tg->service_queue.nr_queued[rw] &&
871                bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
872
873         /* If tg->bps = -1, then BW is unlimited */
874         if (bps_limit == U64_MAX && iops_limit == UINT_MAX) {
875                 if (wait)
876                         *wait = 0;
877                 return true;
878         }
879
880         /*
881          * If previous slice expired, start a new one otherwise renew/extend
882          * existing slice to make sure it is at least throtl_slice interval
883          * long since now. New slice is started only for empty throttle group.
884          * If there is queued bio, that means there should be an active
885          * slice and it should be extended instead.
886          */
887         if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
888                 throtl_start_new_slice(tg, rw);
889         else {
890                 if (time_before(tg->slice_end[rw],
891                     jiffies + tg->td->throtl_slice))
892                         throtl_extend_slice(tg, rw,
893                                 jiffies + tg->td->throtl_slice);
894         }
895
896         if (iops_limit != UINT_MAX)
897                 tg->io_disp[rw] += atomic_xchg(&tg->io_split_cnt[rw], 0);
898
899         if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) &&
900             tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) {
901                 if (wait)
902                         *wait = 0;
903                 return true;
904         }
905
906         max_wait = max(bps_wait, iops_wait);
907
908         if (wait)
909                 *wait = max_wait;
910
911         if (time_before(tg->slice_end[rw], jiffies + max_wait))
912                 throtl_extend_slice(tg, rw, jiffies + max_wait);
913
914         return false;
915 }
916
917 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
918 {
919         bool rw = bio_data_dir(bio);
920         unsigned int bio_size = throtl_bio_data_size(bio);
921
922         /* Charge the bio to the group */
923         tg->bytes_disp[rw] += bio_size;
924         tg->io_disp[rw]++;
925         tg->last_bytes_disp[rw] += bio_size;
926         tg->last_io_disp[rw]++;
927
928         /*
929          * BIO_THROTTLED is used to prevent the same bio to be throttled
930          * more than once as a throttled bio will go through blk-throtl the
931          * second time when it eventually gets issued.  Set it when a bio
932          * is being charged to a tg.
933          */
934         if (!bio_flagged(bio, BIO_THROTTLED))
935                 bio_set_flag(bio, BIO_THROTTLED);
936 }
937
938 /**
939  * throtl_add_bio_tg - add a bio to the specified throtl_grp
940  * @bio: bio to add
941  * @qn: qnode to use
942  * @tg: the target throtl_grp
943  *
944  * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
945  * tg->qnode_on_self[] is used.
946  */
947 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
948                               struct throtl_grp *tg)
949 {
950         struct throtl_service_queue *sq = &tg->service_queue;
951         bool rw = bio_data_dir(bio);
952
953         if (!qn)
954                 qn = &tg->qnode_on_self[rw];
955
956         /*
957          * If @tg doesn't currently have any bios queued in the same
958          * direction, queueing @bio can change when @tg should be
959          * dispatched.  Mark that @tg was empty.  This is automatically
960          * cleared on the next tg_update_disptime().
961          */
962         if (!sq->nr_queued[rw])
963                 tg->flags |= THROTL_TG_WAS_EMPTY;
964
965         throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
966
967         sq->nr_queued[rw]++;
968         throtl_enqueue_tg(tg);
969 }
970
971 static void tg_update_disptime(struct throtl_grp *tg)
972 {
973         struct throtl_service_queue *sq = &tg->service_queue;
974         unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
975         struct bio *bio;
976
977         bio = throtl_peek_queued(&sq->queued[READ]);
978         if (bio)
979                 tg_may_dispatch(tg, bio, &read_wait);
980
981         bio = throtl_peek_queued(&sq->queued[WRITE]);
982         if (bio)
983                 tg_may_dispatch(tg, bio, &write_wait);
984
985         min_wait = min(read_wait, write_wait);
986         disptime = jiffies + min_wait;
987
988         /* Update dispatch time */
989         throtl_dequeue_tg(tg);
990         tg->disptime = disptime;
991         throtl_enqueue_tg(tg);
992
993         /* see throtl_add_bio_tg() */
994         tg->flags &= ~THROTL_TG_WAS_EMPTY;
995 }
996
997 static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
998                                         struct throtl_grp *parent_tg, bool rw)
999 {
1000         if (throtl_slice_used(parent_tg, rw)) {
1001                 throtl_start_new_slice_with_credit(parent_tg, rw,
1002                                 child_tg->slice_start[rw]);
1003         }
1004
1005 }
1006
1007 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1008 {
1009         struct throtl_service_queue *sq = &tg->service_queue;
1010         struct throtl_service_queue *parent_sq = sq->parent_sq;
1011         struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
1012         struct throtl_grp *tg_to_put = NULL;
1013         struct bio *bio;
1014
1015         /*
1016          * @bio is being transferred from @tg to @parent_sq.  Popping a bio
1017          * from @tg may put its reference and @parent_sq might end up
1018          * getting released prematurely.  Remember the tg to put and put it
1019          * after @bio is transferred to @parent_sq.
1020          */
1021         bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
1022         sq->nr_queued[rw]--;
1023
1024         throtl_charge_bio(tg, bio);
1025
1026         /*
1027          * If our parent is another tg, we just need to transfer @bio to
1028          * the parent using throtl_add_bio_tg().  If our parent is
1029          * @td->service_queue, @bio is ready to be issued.  Put it on its
1030          * bio_lists[] and decrease total number queued.  The caller is
1031          * responsible for issuing these bios.
1032          */
1033         if (parent_tg) {
1034                 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1035                 start_parent_slice_with_credit(tg, parent_tg, rw);
1036         } else {
1037                 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1038                                      &parent_sq->queued[rw]);
1039                 BUG_ON(tg->td->nr_queued[rw] <= 0);
1040                 tg->td->nr_queued[rw]--;
1041         }
1042
1043         throtl_trim_slice(tg, rw);
1044
1045         if (tg_to_put)
1046                 blkg_put(tg_to_blkg(tg_to_put));
1047 }
1048
1049 static int throtl_dispatch_tg(struct throtl_grp *tg)
1050 {
1051         struct throtl_service_queue *sq = &tg->service_queue;
1052         unsigned int nr_reads = 0, nr_writes = 0;
1053         unsigned int max_nr_reads = THROTL_GRP_QUANTUM * 3 / 4;
1054         unsigned int max_nr_writes = THROTL_GRP_QUANTUM - max_nr_reads;
1055         struct bio *bio;
1056
1057         /* Try to dispatch 75% READS and 25% WRITES */
1058
1059         while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1060                tg_may_dispatch(tg, bio, NULL)) {
1061
1062                 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1063                 nr_reads++;
1064
1065                 if (nr_reads >= max_nr_reads)
1066                         break;
1067         }
1068
1069         while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1070                tg_may_dispatch(tg, bio, NULL)) {
1071
1072                 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1073                 nr_writes++;
1074
1075                 if (nr_writes >= max_nr_writes)
1076                         break;
1077         }
1078
1079         return nr_reads + nr_writes;
1080 }
1081
1082 static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
1083 {
1084         unsigned int nr_disp = 0;
1085
1086         while (1) {
1087                 struct throtl_grp *tg;
1088                 struct throtl_service_queue *sq;
1089
1090                 if (!parent_sq->nr_pending)
1091                         break;
1092
1093                 tg = throtl_rb_first(parent_sq);
1094                 if (!tg)
1095                         break;
1096
1097                 if (time_before(jiffies, tg->disptime))
1098                         break;
1099
1100                 throtl_dequeue_tg(tg);
1101
1102                 nr_disp += throtl_dispatch_tg(tg);
1103
1104                 sq = &tg->service_queue;
1105                 if (sq->nr_queued[0] || sq->nr_queued[1])
1106                         tg_update_disptime(tg);
1107
1108                 if (nr_disp >= THROTL_QUANTUM)
1109                         break;
1110         }
1111
1112         return nr_disp;
1113 }
1114
1115 static bool throtl_can_upgrade(struct throtl_data *td,
1116         struct throtl_grp *this_tg);
1117 /**
1118  * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1119  * @t: the pending_timer member of the throtl_service_queue being serviced
1120  *
1121  * This timer is armed when a child throtl_grp with active bio's become
1122  * pending and queued on the service_queue's pending_tree and expires when
1123  * the first child throtl_grp should be dispatched.  This function
1124  * dispatches bio's from the children throtl_grps to the parent
1125  * service_queue.
1126  *
1127  * If the parent's parent is another throtl_grp, dispatching is propagated
1128  * by either arming its pending_timer or repeating dispatch directly.  If
1129  * the top-level service_tree is reached, throtl_data->dispatch_work is
1130  * kicked so that the ready bio's are issued.
1131  */
1132 static void throtl_pending_timer_fn(struct timer_list *t)
1133 {
1134         struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
1135         struct throtl_grp *tg = sq_to_tg(sq);
1136         struct throtl_data *td = sq_to_td(sq);
1137         struct request_queue *q = td->queue;
1138         struct throtl_service_queue *parent_sq;
1139         bool dispatched;
1140         int ret;
1141
1142         spin_lock_irq(&q->queue_lock);
1143         if (throtl_can_upgrade(td, NULL))
1144                 throtl_upgrade_state(td);
1145
1146 again:
1147         parent_sq = sq->parent_sq;
1148         dispatched = false;
1149
1150         while (true) {
1151                 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1152                            sq->nr_queued[READ] + sq->nr_queued[WRITE],
1153                            sq->nr_queued[READ], sq->nr_queued[WRITE]);
1154
1155                 ret = throtl_select_dispatch(sq);
1156                 if (ret) {
1157                         throtl_log(sq, "bios disp=%u", ret);
1158                         dispatched = true;
1159                 }
1160
1161                 if (throtl_schedule_next_dispatch(sq, false))
1162                         break;
1163
1164                 /* this dispatch windows is still open, relax and repeat */
1165                 spin_unlock_irq(&q->queue_lock);
1166                 cpu_relax();
1167                 spin_lock_irq(&q->queue_lock);
1168         }
1169
1170         if (!dispatched)
1171                 goto out_unlock;
1172
1173         if (parent_sq) {
1174                 /* @parent_sq is another throl_grp, propagate dispatch */
1175                 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1176                         tg_update_disptime(tg);
1177                         if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1178                                 /* window is already open, repeat dispatching */
1179                                 sq = parent_sq;
1180                                 tg = sq_to_tg(sq);
1181                                 goto again;
1182                         }
1183                 }
1184         } else {
1185                 /* reached the top-level, queue issuing */
1186                 queue_work(kthrotld_workqueue, &td->dispatch_work);
1187         }
1188 out_unlock:
1189         spin_unlock_irq(&q->queue_lock);
1190 }
1191
1192 /**
1193  * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1194  * @work: work item being executed
1195  *
1196  * This function is queued for execution when bios reach the bio_lists[]
1197  * of throtl_data->service_queue.  Those bios are ready and issued by this
1198  * function.
1199  */
1200 static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1201 {
1202         struct throtl_data *td = container_of(work, struct throtl_data,
1203                                               dispatch_work);
1204         struct throtl_service_queue *td_sq = &td->service_queue;
1205         struct request_queue *q = td->queue;
1206         struct bio_list bio_list_on_stack;
1207         struct bio *bio;
1208         struct blk_plug plug;
1209         int rw;
1210
1211         bio_list_init(&bio_list_on_stack);
1212
1213         spin_lock_irq(&q->queue_lock);
1214         for (rw = READ; rw <= WRITE; rw++)
1215                 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1216                         bio_list_add(&bio_list_on_stack, bio);
1217         spin_unlock_irq(&q->queue_lock);
1218
1219         if (!bio_list_empty(&bio_list_on_stack)) {
1220                 blk_start_plug(&plug);
1221                 while ((bio = bio_list_pop(&bio_list_on_stack)))
1222                         submit_bio_noacct(bio);
1223                 blk_finish_plug(&plug);
1224         }
1225 }
1226
1227 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1228                               int off)
1229 {
1230         struct throtl_grp *tg = pd_to_tg(pd);
1231         u64 v = *(u64 *)((void *)tg + off);
1232
1233         if (v == U64_MAX)
1234                 return 0;
1235         return __blkg_prfill_u64(sf, pd, v);
1236 }
1237
1238 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1239                                int off)
1240 {
1241         struct throtl_grp *tg = pd_to_tg(pd);
1242         unsigned int v = *(unsigned int *)((void *)tg + off);
1243
1244         if (v == UINT_MAX)
1245                 return 0;
1246         return __blkg_prfill_u64(sf, pd, v);
1247 }
1248
1249 static int tg_print_conf_u64(struct seq_file *sf, void *v)
1250 {
1251         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1252                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1253         return 0;
1254 }
1255
1256 static int tg_print_conf_uint(struct seq_file *sf, void *v)
1257 {
1258         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1259                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1260         return 0;
1261 }
1262
1263 static void tg_conf_updated(struct throtl_grp *tg, bool global)
1264 {
1265         struct throtl_service_queue *sq = &tg->service_queue;
1266         struct cgroup_subsys_state *pos_css;
1267         struct blkcg_gq *blkg;
1268
1269         throtl_log(&tg->service_queue,
1270                    "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1271                    tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1272                    tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1273
1274         /*
1275          * Update has_rules[] flags for the updated tg's subtree.  A tg is
1276          * considered to have rules if either the tg itself or any of its
1277          * ancestors has rules.  This identifies groups without any
1278          * restrictions in the whole hierarchy and allows them to bypass
1279          * blk-throttle.
1280          */
1281         blkg_for_each_descendant_pre(blkg, pos_css,
1282                         global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1283                 struct throtl_grp *this_tg = blkg_to_tg(blkg);
1284                 struct throtl_grp *parent_tg;
1285
1286                 tg_update_has_rules(this_tg);
1287                 /* ignore root/second level */
1288                 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1289                     !blkg->parent->parent)
1290                         continue;
1291                 parent_tg = blkg_to_tg(blkg->parent);
1292                 /*
1293                  * make sure all children has lower idle time threshold and
1294                  * higher latency target
1295                  */
1296                 this_tg->idletime_threshold = min(this_tg->idletime_threshold,
1297                                 parent_tg->idletime_threshold);
1298                 this_tg->latency_target = max(this_tg->latency_target,
1299                                 parent_tg->latency_target);
1300         }
1301
1302         /*
1303          * We're already holding queue_lock and know @tg is valid.  Let's
1304          * apply the new config directly.
1305          *
1306          * Restart the slices for both READ and WRITES. It might happen
1307          * that a group's limit are dropped suddenly and we don't want to
1308          * account recently dispatched IO with new low rate.
1309          */
1310         throtl_start_new_slice(tg, READ);
1311         throtl_start_new_slice(tg, WRITE);
1312
1313         if (tg->flags & THROTL_TG_PENDING) {
1314                 tg_update_disptime(tg);
1315                 throtl_schedule_next_dispatch(sq->parent_sq, true);
1316         }
1317 }
1318
1319 static ssize_t tg_set_conf(struct kernfs_open_file *of,
1320                            char *buf, size_t nbytes, loff_t off, bool is_u64)
1321 {
1322         struct blkcg *blkcg = css_to_blkcg(of_css(of));
1323         struct blkg_conf_ctx ctx;
1324         struct throtl_grp *tg;
1325         int ret;
1326         u64 v;
1327
1328         ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1329         if (ret)
1330                 return ret;
1331
1332         ret = -EINVAL;
1333         if (sscanf(ctx.body, "%llu", &v) != 1)
1334                 goto out_finish;
1335         if (!v)
1336                 v = U64_MAX;
1337
1338         tg = blkg_to_tg(ctx.blkg);
1339
1340         if (is_u64)
1341                 *(u64 *)((void *)tg + of_cft(of)->private) = v;
1342         else
1343                 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1344
1345         tg_conf_updated(tg, false);
1346         ret = 0;
1347 out_finish:
1348         blkg_conf_finish(&ctx);
1349         return ret ?: nbytes;
1350 }
1351
1352 static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1353                                char *buf, size_t nbytes, loff_t off)
1354 {
1355         return tg_set_conf(of, buf, nbytes, off, true);
1356 }
1357
1358 static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1359                                 char *buf, size_t nbytes, loff_t off)
1360 {
1361         return tg_set_conf(of, buf, nbytes, off, false);
1362 }
1363
1364 static int tg_print_rwstat(struct seq_file *sf, void *v)
1365 {
1366         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1367                           blkg_prfill_rwstat, &blkcg_policy_throtl,
1368                           seq_cft(sf)->private, true);
1369         return 0;
1370 }
1371
1372 static u64 tg_prfill_rwstat_recursive(struct seq_file *sf,
1373                                       struct blkg_policy_data *pd, int off)
1374 {
1375         struct blkg_rwstat_sample sum;
1376
1377         blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_throtl, off,
1378                                   &sum);
1379         return __blkg_prfill_rwstat(sf, pd, &sum);
1380 }
1381
1382 static int tg_print_rwstat_recursive(struct seq_file *sf, void *v)
1383 {
1384         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1385                           tg_prfill_rwstat_recursive, &blkcg_policy_throtl,
1386                           seq_cft(sf)->private, true);
1387         return 0;
1388 }
1389
1390 static struct cftype throtl_legacy_files[] = {
1391         {
1392                 .name = "throttle.read_bps_device",
1393                 .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
1394                 .seq_show = tg_print_conf_u64,
1395                 .write = tg_set_conf_u64,
1396         },
1397         {
1398                 .name = "throttle.write_bps_device",
1399                 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1400                 .seq_show = tg_print_conf_u64,
1401                 .write = tg_set_conf_u64,
1402         },
1403         {
1404                 .name = "throttle.read_iops_device",
1405                 .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
1406                 .seq_show = tg_print_conf_uint,
1407                 .write = tg_set_conf_uint,
1408         },
1409         {
1410                 .name = "throttle.write_iops_device",
1411                 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1412                 .seq_show = tg_print_conf_uint,
1413                 .write = tg_set_conf_uint,
1414         },
1415         {
1416                 .name = "throttle.io_service_bytes",
1417                 .private = offsetof(struct throtl_grp, stat_bytes),
1418                 .seq_show = tg_print_rwstat,
1419         },
1420         {
1421                 .name = "throttle.io_service_bytes_recursive",
1422                 .private = offsetof(struct throtl_grp, stat_bytes),
1423                 .seq_show = tg_print_rwstat_recursive,
1424         },
1425         {
1426                 .name = "throttle.io_serviced",
1427                 .private = offsetof(struct throtl_grp, stat_ios),
1428                 .seq_show = tg_print_rwstat,
1429         },
1430         {
1431                 .name = "throttle.io_serviced_recursive",
1432                 .private = offsetof(struct throtl_grp, stat_ios),
1433                 .seq_show = tg_print_rwstat_recursive,
1434         },
1435         { }     /* terminate */
1436 };
1437
1438 static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1439                          int off)
1440 {
1441         struct throtl_grp *tg = pd_to_tg(pd);
1442         const char *dname = blkg_dev_name(pd->blkg);
1443         char bufs[4][21] = { "max", "max", "max", "max" };
1444         u64 bps_dft;
1445         unsigned int iops_dft;
1446         char idle_time[26] = "";
1447         char latency_time[26] = "";
1448
1449         if (!dname)
1450                 return 0;
1451
1452         if (off == LIMIT_LOW) {
1453                 bps_dft = 0;
1454                 iops_dft = 0;
1455         } else {
1456                 bps_dft = U64_MAX;
1457                 iops_dft = UINT_MAX;
1458         }
1459
1460         if (tg->bps_conf[READ][off] == bps_dft &&
1461             tg->bps_conf[WRITE][off] == bps_dft &&
1462             tg->iops_conf[READ][off] == iops_dft &&
1463             tg->iops_conf[WRITE][off] == iops_dft &&
1464             (off != LIMIT_LOW ||
1465              (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
1466               tg->latency_target_conf == DFL_LATENCY_TARGET)))
1467                 return 0;
1468
1469         if (tg->bps_conf[READ][off] != U64_MAX)
1470                 snprintf(bufs[0], sizeof(bufs[0]), "%llu",
1471                         tg->bps_conf[READ][off]);
1472         if (tg->bps_conf[WRITE][off] != U64_MAX)
1473                 snprintf(bufs[1], sizeof(bufs[1]), "%llu",
1474                         tg->bps_conf[WRITE][off]);
1475         if (tg->iops_conf[READ][off] != UINT_MAX)
1476                 snprintf(bufs[2], sizeof(bufs[2]), "%u",
1477                         tg->iops_conf[READ][off]);
1478         if (tg->iops_conf[WRITE][off] != UINT_MAX)
1479                 snprintf(bufs[3], sizeof(bufs[3]), "%u",
1480                         tg->iops_conf[WRITE][off]);
1481         if (off == LIMIT_LOW) {
1482                 if (tg->idletime_threshold_conf == ULONG_MAX)
1483                         strcpy(idle_time, " idle=max");
1484                 else
1485                         snprintf(idle_time, sizeof(idle_time), " idle=%lu",
1486                                 tg->idletime_threshold_conf);
1487
1488                 if (tg->latency_target_conf == ULONG_MAX)
1489                         strcpy(latency_time, " latency=max");
1490                 else
1491                         snprintf(latency_time, sizeof(latency_time),
1492                                 " latency=%lu", tg->latency_target_conf);
1493         }
1494
1495         seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1496                    dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
1497                    latency_time);
1498         return 0;
1499 }
1500
1501 static int tg_print_limit(struct seq_file *sf, void *v)
1502 {
1503         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1504                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1505         return 0;
1506 }
1507
1508 static ssize_t tg_set_limit(struct kernfs_open_file *of,
1509                           char *buf, size_t nbytes, loff_t off)
1510 {
1511         struct blkcg *blkcg = css_to_blkcg(of_css(of));
1512         struct blkg_conf_ctx ctx;
1513         struct throtl_grp *tg;
1514         u64 v[4];
1515         unsigned long idle_time;
1516         unsigned long latency_time;
1517         int ret;
1518         int index = of_cft(of)->private;
1519
1520         ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1521         if (ret)
1522                 return ret;
1523
1524         tg = blkg_to_tg(ctx.blkg);
1525
1526         v[0] = tg->bps_conf[READ][index];
1527         v[1] = tg->bps_conf[WRITE][index];
1528         v[2] = tg->iops_conf[READ][index];
1529         v[3] = tg->iops_conf[WRITE][index];
1530
1531         idle_time = tg->idletime_threshold_conf;
1532         latency_time = tg->latency_target_conf;
1533         while (true) {
1534                 char tok[27];   /* wiops=18446744073709551616 */
1535                 char *p;
1536                 u64 val = U64_MAX;
1537                 int len;
1538
1539                 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1540                         break;
1541                 if (tok[0] == '\0')
1542                         break;
1543                 ctx.body += len;
1544
1545                 ret = -EINVAL;
1546                 p = tok;
1547                 strsep(&p, "=");
1548                 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1549                         goto out_finish;
1550
1551                 ret = -ERANGE;
1552                 if (!val)
1553                         goto out_finish;
1554
1555                 ret = -EINVAL;
1556                 if (!strcmp(tok, "rbps") && val > 1)
1557                         v[0] = val;
1558                 else if (!strcmp(tok, "wbps") && val > 1)
1559                         v[1] = val;
1560                 else if (!strcmp(tok, "riops") && val > 1)
1561                         v[2] = min_t(u64, val, UINT_MAX);
1562                 else if (!strcmp(tok, "wiops") && val > 1)
1563                         v[3] = min_t(u64, val, UINT_MAX);
1564                 else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
1565                         idle_time = val;
1566                 else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
1567                         latency_time = val;
1568                 else
1569                         goto out_finish;
1570         }
1571
1572         tg->bps_conf[READ][index] = v[0];
1573         tg->bps_conf[WRITE][index] = v[1];
1574         tg->iops_conf[READ][index] = v[2];
1575         tg->iops_conf[WRITE][index] = v[3];
1576
1577         if (index == LIMIT_MAX) {
1578                 tg->bps[READ][index] = v[0];
1579                 tg->bps[WRITE][index] = v[1];
1580                 tg->iops[READ][index] = v[2];
1581                 tg->iops[WRITE][index] = v[3];
1582         }
1583         tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1584                 tg->bps_conf[READ][LIMIT_MAX]);
1585         tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1586                 tg->bps_conf[WRITE][LIMIT_MAX]);
1587         tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1588                 tg->iops_conf[READ][LIMIT_MAX]);
1589         tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1590                 tg->iops_conf[WRITE][LIMIT_MAX]);
1591         tg->idletime_threshold_conf = idle_time;
1592         tg->latency_target_conf = latency_time;
1593
1594         /* force user to configure all settings for low limit  */
1595         if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1596               tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1597             tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
1598             tg->latency_target_conf == DFL_LATENCY_TARGET) {
1599                 tg->bps[READ][LIMIT_LOW] = 0;
1600                 tg->bps[WRITE][LIMIT_LOW] = 0;
1601                 tg->iops[READ][LIMIT_LOW] = 0;
1602                 tg->iops[WRITE][LIMIT_LOW] = 0;
1603                 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
1604                 tg->latency_target = DFL_LATENCY_TARGET;
1605         } else if (index == LIMIT_LOW) {
1606                 tg->idletime_threshold = tg->idletime_threshold_conf;
1607                 tg->latency_target = tg->latency_target_conf;
1608         }
1609
1610         blk_throtl_update_limit_valid(tg->td);
1611         if (tg->td->limit_valid[LIMIT_LOW]) {
1612                 if (index == LIMIT_LOW)
1613                         tg->td->limit_index = LIMIT_LOW;
1614         } else
1615                 tg->td->limit_index = LIMIT_MAX;
1616         tg_conf_updated(tg, index == LIMIT_LOW &&
1617                 tg->td->limit_valid[LIMIT_LOW]);
1618         ret = 0;
1619 out_finish:
1620         blkg_conf_finish(&ctx);
1621         return ret ?: nbytes;
1622 }
1623
1624 static struct cftype throtl_files[] = {
1625 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1626         {
1627                 .name = "low",
1628                 .flags = CFTYPE_NOT_ON_ROOT,
1629                 .seq_show = tg_print_limit,
1630                 .write = tg_set_limit,
1631                 .private = LIMIT_LOW,
1632         },
1633 #endif
1634         {
1635                 .name = "max",
1636                 .flags = CFTYPE_NOT_ON_ROOT,
1637                 .seq_show = tg_print_limit,
1638                 .write = tg_set_limit,
1639                 .private = LIMIT_MAX,
1640         },
1641         { }     /* terminate */
1642 };
1643
1644 static void throtl_shutdown_wq(struct request_queue *q)
1645 {
1646         struct throtl_data *td = q->td;
1647
1648         cancel_work_sync(&td->dispatch_work);
1649 }
1650
1651 struct blkcg_policy blkcg_policy_throtl = {
1652         .dfl_cftypes            = throtl_files,
1653         .legacy_cftypes         = throtl_legacy_files,
1654
1655         .pd_alloc_fn            = throtl_pd_alloc,
1656         .pd_init_fn             = throtl_pd_init,
1657         .pd_online_fn           = throtl_pd_online,
1658         .pd_offline_fn          = throtl_pd_offline,
1659         .pd_free_fn             = throtl_pd_free,
1660 };
1661
1662 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1663 {
1664         unsigned long rtime = jiffies, wtime = jiffies;
1665
1666         if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1667                 rtime = tg->last_low_overflow_time[READ];
1668         if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1669                 wtime = tg->last_low_overflow_time[WRITE];
1670         return min(rtime, wtime);
1671 }
1672
1673 /* tg should not be an intermediate node */
1674 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1675 {
1676         struct throtl_service_queue *parent_sq;
1677         struct throtl_grp *parent = tg;
1678         unsigned long ret = __tg_last_low_overflow_time(tg);
1679
1680         while (true) {
1681                 parent_sq = parent->service_queue.parent_sq;
1682                 parent = sq_to_tg(parent_sq);
1683                 if (!parent)
1684                         break;
1685
1686                 /*
1687                  * The parent doesn't have low limit, it always reaches low
1688                  * limit. Its overflow time is useless for children
1689                  */
1690                 if (!parent->bps[READ][LIMIT_LOW] &&
1691                     !parent->iops[READ][LIMIT_LOW] &&
1692                     !parent->bps[WRITE][LIMIT_LOW] &&
1693                     !parent->iops[WRITE][LIMIT_LOW])
1694                         continue;
1695                 if (time_after(__tg_last_low_overflow_time(parent), ret))
1696                         ret = __tg_last_low_overflow_time(parent);
1697         }
1698         return ret;
1699 }
1700
1701 static bool throtl_tg_is_idle(struct throtl_grp *tg)
1702 {
1703         /*
1704          * cgroup is idle if:
1705          * - single idle is too long, longer than a fixed value (in case user
1706          *   configure a too big threshold) or 4 times of idletime threshold
1707          * - average think time is more than threshold
1708          * - IO latency is largely below threshold
1709          */
1710         unsigned long time;
1711         bool ret;
1712
1713         time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
1714         ret = tg->latency_target == DFL_LATENCY_TARGET ||
1715               tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
1716               (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1717               tg->avg_idletime > tg->idletime_threshold ||
1718               (tg->latency_target && tg->bio_cnt &&
1719                 tg->bad_bio_cnt * 5 < tg->bio_cnt);
1720         throtl_log(&tg->service_queue,
1721                 "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1722                 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1723                 tg->bio_cnt, ret, tg->td->scale);
1724         return ret;
1725 }
1726
1727 static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1728 {
1729         struct throtl_service_queue *sq = &tg->service_queue;
1730         bool read_limit, write_limit;
1731
1732         /*
1733          * if cgroup reaches low limit (if low limit is 0, the cgroup always
1734          * reaches), it's ok to upgrade to next limit
1735          */
1736         read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
1737         write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
1738         if (!read_limit && !write_limit)
1739                 return true;
1740         if (read_limit && sq->nr_queued[READ] &&
1741             (!write_limit || sq->nr_queued[WRITE]))
1742                 return true;
1743         if (write_limit && sq->nr_queued[WRITE] &&
1744             (!read_limit || sq->nr_queued[READ]))
1745                 return true;
1746
1747         if (time_after_eq(jiffies,
1748                 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
1749             throtl_tg_is_idle(tg))
1750                 return true;
1751         return false;
1752 }
1753
1754 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1755 {
1756         while (true) {
1757                 if (throtl_tg_can_upgrade(tg))
1758                         return true;
1759                 tg = sq_to_tg(tg->service_queue.parent_sq);
1760                 if (!tg || !tg_to_blkg(tg)->parent)
1761                         return false;
1762         }
1763         return false;
1764 }
1765
1766 static bool throtl_can_upgrade(struct throtl_data *td,
1767         struct throtl_grp *this_tg)
1768 {
1769         struct cgroup_subsys_state *pos_css;
1770         struct blkcg_gq *blkg;
1771
1772         if (td->limit_index != LIMIT_LOW)
1773                 return false;
1774
1775         if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
1776                 return false;
1777
1778         rcu_read_lock();
1779         blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1780                 struct throtl_grp *tg = blkg_to_tg(blkg);
1781
1782                 if (tg == this_tg)
1783                         continue;
1784                 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1785                         continue;
1786                 if (!throtl_hierarchy_can_upgrade(tg)) {
1787                         rcu_read_unlock();
1788                         return false;
1789                 }
1790         }
1791         rcu_read_unlock();
1792         return true;
1793 }
1794
1795 static void throtl_upgrade_check(struct throtl_grp *tg)
1796 {
1797         unsigned long now = jiffies;
1798
1799         if (tg->td->limit_index != LIMIT_LOW)
1800                 return;
1801
1802         if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1803                 return;
1804
1805         tg->last_check_time = now;
1806
1807         if (!time_after_eq(now,
1808              __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
1809                 return;
1810
1811         if (throtl_can_upgrade(tg->td, NULL))
1812                 throtl_upgrade_state(tg->td);
1813 }
1814
1815 static void throtl_upgrade_state(struct throtl_data *td)
1816 {
1817         struct cgroup_subsys_state *pos_css;
1818         struct blkcg_gq *blkg;
1819
1820         throtl_log(&td->service_queue, "upgrade to max");
1821         td->limit_index = LIMIT_MAX;
1822         td->low_upgrade_time = jiffies;
1823         td->scale = 0;
1824         rcu_read_lock();
1825         blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1826                 struct throtl_grp *tg = blkg_to_tg(blkg);
1827                 struct throtl_service_queue *sq = &tg->service_queue;
1828
1829                 tg->disptime = jiffies - 1;
1830                 throtl_select_dispatch(sq);
1831                 throtl_schedule_next_dispatch(sq, true);
1832         }
1833         rcu_read_unlock();
1834         throtl_select_dispatch(&td->service_queue);
1835         throtl_schedule_next_dispatch(&td->service_queue, true);
1836         queue_work(kthrotld_workqueue, &td->dispatch_work);
1837 }
1838
1839 static void throtl_downgrade_state(struct throtl_data *td)
1840 {
1841         td->scale /= 2;
1842
1843         throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
1844         if (td->scale) {
1845                 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
1846                 return;
1847         }
1848
1849         td->limit_index = LIMIT_LOW;
1850         td->low_downgrade_time = jiffies;
1851 }
1852
1853 static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
1854 {
1855         struct throtl_data *td = tg->td;
1856         unsigned long now = jiffies;
1857
1858         /*
1859          * If cgroup is below low limit, consider downgrade and throttle other
1860          * cgroups
1861          */
1862         if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
1863             time_after_eq(now, tg_last_low_overflow_time(tg) +
1864                                         td->throtl_slice) &&
1865             (!throtl_tg_is_idle(tg) ||
1866              !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
1867                 return true;
1868         return false;
1869 }
1870
1871 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
1872 {
1873         while (true) {
1874                 if (!throtl_tg_can_downgrade(tg))
1875                         return false;
1876                 tg = sq_to_tg(tg->service_queue.parent_sq);
1877                 if (!tg || !tg_to_blkg(tg)->parent)
1878                         break;
1879         }
1880         return true;
1881 }
1882
1883 static void throtl_downgrade_check(struct throtl_grp *tg)
1884 {
1885         uint64_t bps;
1886         unsigned int iops;
1887         unsigned long elapsed_time;
1888         unsigned long now = jiffies;
1889
1890         if (tg->td->limit_index != LIMIT_MAX ||
1891             !tg->td->limit_valid[LIMIT_LOW])
1892                 return;
1893         if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1894                 return;
1895         if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1896                 return;
1897
1898         elapsed_time = now - tg->last_check_time;
1899         tg->last_check_time = now;
1900
1901         if (time_before(now, tg_last_low_overflow_time(tg) +
1902                         tg->td->throtl_slice))
1903                 return;
1904
1905         if (tg->bps[READ][LIMIT_LOW]) {
1906                 bps = tg->last_bytes_disp[READ] * HZ;
1907                 do_div(bps, elapsed_time);
1908                 if (bps >= tg->bps[READ][LIMIT_LOW])
1909                         tg->last_low_overflow_time[READ] = now;
1910         }
1911
1912         if (tg->bps[WRITE][LIMIT_LOW]) {
1913                 bps = tg->last_bytes_disp[WRITE] * HZ;
1914                 do_div(bps, elapsed_time);
1915                 if (bps >= tg->bps[WRITE][LIMIT_LOW])
1916                         tg->last_low_overflow_time[WRITE] = now;
1917         }
1918
1919         if (tg->iops[READ][LIMIT_LOW]) {
1920                 tg->last_io_disp[READ] += atomic_xchg(&tg->last_io_split_cnt[READ], 0);
1921                 iops = tg->last_io_disp[READ] * HZ / elapsed_time;
1922                 if (iops >= tg->iops[READ][LIMIT_LOW])
1923                         tg->last_low_overflow_time[READ] = now;
1924         }
1925
1926         if (tg->iops[WRITE][LIMIT_LOW]) {
1927                 tg->last_io_disp[WRITE] += atomic_xchg(&tg->last_io_split_cnt[WRITE], 0);
1928                 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
1929                 if (iops >= tg->iops[WRITE][LIMIT_LOW])
1930                         tg->last_low_overflow_time[WRITE] = now;
1931         }
1932
1933         /*
1934          * If cgroup is below low limit, consider downgrade and throttle other
1935          * cgroups
1936          */
1937         if (throtl_hierarchy_can_downgrade(tg))
1938                 throtl_downgrade_state(tg->td);
1939
1940         tg->last_bytes_disp[READ] = 0;
1941         tg->last_bytes_disp[WRITE] = 0;
1942         tg->last_io_disp[READ] = 0;
1943         tg->last_io_disp[WRITE] = 0;
1944 }
1945
1946 static void blk_throtl_update_idletime(struct throtl_grp *tg)
1947 {
1948         unsigned long now;
1949         unsigned long last_finish_time = tg->last_finish_time;
1950
1951         if (last_finish_time == 0)
1952                 return;
1953
1954         now = ktime_get_ns() >> 10;
1955         if (now <= last_finish_time ||
1956             last_finish_time == tg->checked_last_finish_time)
1957                 return;
1958
1959         tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
1960         tg->checked_last_finish_time = last_finish_time;
1961 }
1962
1963 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1964 static void throtl_update_latency_buckets(struct throtl_data *td)
1965 {
1966         struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE];
1967         int i, cpu, rw;
1968         unsigned long last_latency[2] = { 0 };
1969         unsigned long latency[2];
1970
1971         if (!blk_queue_nonrot(td->queue) || !td->limit_valid[LIMIT_LOW])
1972                 return;
1973         if (time_before(jiffies, td->last_calculate_time + HZ))
1974                 return;
1975         td->last_calculate_time = jiffies;
1976
1977         memset(avg_latency, 0, sizeof(avg_latency));
1978         for (rw = READ; rw <= WRITE; rw++) {
1979                 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
1980                         struct latency_bucket *tmp = &td->tmp_buckets[rw][i];
1981
1982                         for_each_possible_cpu(cpu) {
1983                                 struct latency_bucket *bucket;
1984
1985                                 /* this isn't race free, but ok in practice */
1986                                 bucket = per_cpu_ptr(td->latency_buckets[rw],
1987                                         cpu);
1988                                 tmp->total_latency += bucket[i].total_latency;
1989                                 tmp->samples += bucket[i].samples;
1990                                 bucket[i].total_latency = 0;
1991                                 bucket[i].samples = 0;
1992                         }
1993
1994                         if (tmp->samples >= 32) {
1995                                 int samples = tmp->samples;
1996
1997                                 latency[rw] = tmp->total_latency;
1998
1999                                 tmp->total_latency = 0;
2000                                 tmp->samples = 0;
2001                                 latency[rw] /= samples;
2002                                 if (latency[rw] == 0)
2003                                         continue;
2004                                 avg_latency[rw][i].latency = latency[rw];
2005                         }
2006                 }
2007         }
2008
2009         for (rw = READ; rw <= WRITE; rw++) {
2010                 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2011                         if (!avg_latency[rw][i].latency) {
2012                                 if (td->avg_buckets[rw][i].latency < last_latency[rw])
2013                                         td->avg_buckets[rw][i].latency =
2014                                                 last_latency[rw];
2015                                 continue;
2016                         }
2017
2018                         if (!td->avg_buckets[rw][i].valid)
2019                                 latency[rw] = avg_latency[rw][i].latency;
2020                         else
2021                                 latency[rw] = (td->avg_buckets[rw][i].latency * 7 +
2022                                         avg_latency[rw][i].latency) >> 3;
2023
2024                         td->avg_buckets[rw][i].latency = max(latency[rw],
2025                                 last_latency[rw]);
2026                         td->avg_buckets[rw][i].valid = true;
2027                         last_latency[rw] = td->avg_buckets[rw][i].latency;
2028                 }
2029         }
2030
2031         for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2032                 throtl_log(&td->service_queue,
2033                         "Latency bucket %d: read latency=%ld, read valid=%d, "
2034                         "write latency=%ld, write valid=%d", i,
2035                         td->avg_buckets[READ][i].latency,
2036                         td->avg_buckets[READ][i].valid,
2037                         td->avg_buckets[WRITE][i].latency,
2038                         td->avg_buckets[WRITE][i].valid);
2039 }
2040 #else
2041 static inline void throtl_update_latency_buckets(struct throtl_data *td)
2042 {
2043 }
2044 #endif
2045
2046 void blk_throtl_charge_bio_split(struct bio *bio)
2047 {
2048         struct blkcg_gq *blkg = bio->bi_blkg;
2049         struct throtl_grp *parent = blkg_to_tg(blkg);
2050         struct throtl_service_queue *parent_sq;
2051         bool rw = bio_data_dir(bio);
2052
2053         do {
2054                 if (!parent->has_rules[rw])
2055                         break;
2056
2057                 atomic_inc(&parent->io_split_cnt[rw]);
2058                 atomic_inc(&parent->last_io_split_cnt[rw]);
2059
2060                 parent_sq = parent->service_queue.parent_sq;
2061                 parent = sq_to_tg(parent_sq);
2062         } while (parent);
2063 }
2064
2065 bool __blk_throtl_bio(struct bio *bio)
2066 {
2067         struct request_queue *q = bdev_get_queue(bio->bi_bdev);
2068         struct blkcg_gq *blkg = bio->bi_blkg;
2069         struct throtl_qnode *qn = NULL;
2070         struct throtl_grp *tg = blkg_to_tg(blkg);
2071         struct throtl_service_queue *sq;
2072         bool rw = bio_data_dir(bio);
2073         bool throttled = false;
2074         struct throtl_data *td = tg->td;
2075
2076         rcu_read_lock();
2077
2078         if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
2079                 blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
2080                                 bio->bi_iter.bi_size);
2081                 blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
2082         }
2083
2084         spin_lock_irq(&q->queue_lock);
2085
2086         throtl_update_latency_buckets(td);
2087
2088         blk_throtl_update_idletime(tg);
2089
2090         sq = &tg->service_queue;
2091
2092 again:
2093         while (true) {
2094                 if (tg->last_low_overflow_time[rw] == 0)
2095                         tg->last_low_overflow_time[rw] = jiffies;
2096                 throtl_downgrade_check(tg);
2097                 throtl_upgrade_check(tg);
2098                 /* throtl is FIFO - if bios are already queued, should queue */
2099                 if (sq->nr_queued[rw])
2100                         break;
2101
2102                 /* if above limits, break to queue */
2103                 if (!tg_may_dispatch(tg, bio, NULL)) {
2104                         tg->last_low_overflow_time[rw] = jiffies;
2105                         if (throtl_can_upgrade(td, tg)) {
2106                                 throtl_upgrade_state(td);
2107                                 goto again;
2108                         }
2109                         break;
2110                 }
2111
2112                 /* within limits, let's charge and dispatch directly */
2113                 throtl_charge_bio(tg, bio);
2114
2115                 /*
2116                  * We need to trim slice even when bios are not being queued
2117                  * otherwise it might happen that a bio is not queued for
2118                  * a long time and slice keeps on extending and trim is not
2119                  * called for a long time. Now if limits are reduced suddenly
2120                  * we take into account all the IO dispatched so far at new
2121                  * low rate and * newly queued IO gets a really long dispatch
2122                  * time.
2123                  *
2124                  * So keep on trimming slice even if bio is not queued.
2125                  */
2126                 throtl_trim_slice(tg, rw);
2127
2128                 /*
2129                  * @bio passed through this layer without being throttled.
2130                  * Climb up the ladder.  If we're already at the top, it
2131                  * can be executed directly.
2132                  */
2133                 qn = &tg->qnode_on_parent[rw];
2134                 sq = sq->parent_sq;
2135                 tg = sq_to_tg(sq);
2136                 if (!tg)
2137                         goto out_unlock;
2138         }
2139
2140         /* out-of-limit, queue to @tg */
2141         throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2142                    rw == READ ? 'R' : 'W',
2143                    tg->bytes_disp[rw], bio->bi_iter.bi_size,
2144                    tg_bps_limit(tg, rw),
2145                    tg->io_disp[rw], tg_iops_limit(tg, rw),
2146                    sq->nr_queued[READ], sq->nr_queued[WRITE]);
2147
2148         tg->last_low_overflow_time[rw] = jiffies;
2149
2150         td->nr_queued[rw]++;
2151         throtl_add_bio_tg(bio, qn, tg);
2152         throttled = true;
2153
2154         /*
2155          * Update @tg's dispatch time and force schedule dispatch if @tg
2156          * was empty before @bio.  The forced scheduling isn't likely to
2157          * cause undue delay as @bio is likely to be dispatched directly if
2158          * its @tg's disptime is not in the future.
2159          */
2160         if (tg->flags & THROTL_TG_WAS_EMPTY) {
2161                 tg_update_disptime(tg);
2162                 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
2163         }
2164
2165 out_unlock:
2166         spin_unlock_irq(&q->queue_lock);
2167         bio_set_flag(bio, BIO_THROTTLED);
2168
2169 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2170         if (throttled || !td->track_bio_latency)
2171                 bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY;
2172 #endif
2173         rcu_read_unlock();
2174         return throttled;
2175 }
2176
2177 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2178 static void throtl_track_latency(struct throtl_data *td, sector_t size,
2179         int op, unsigned long time)
2180 {
2181         struct latency_bucket *latency;
2182         int index;
2183
2184         if (!td || td->limit_index != LIMIT_LOW ||
2185             !(op == REQ_OP_READ || op == REQ_OP_WRITE) ||
2186             !blk_queue_nonrot(td->queue))
2187                 return;
2188
2189         index = request_bucket_index(size);
2190
2191         latency = get_cpu_ptr(td->latency_buckets[op]);
2192         latency[index].total_latency += time;
2193         latency[index].samples++;
2194         put_cpu_ptr(td->latency_buckets[op]);
2195 }
2196
2197 void blk_throtl_stat_add(struct request *rq, u64 time_ns)
2198 {
2199         struct request_queue *q = rq->q;
2200         struct throtl_data *td = q->td;
2201
2202         throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq),
2203                              time_ns >> 10);
2204 }
2205
2206 void blk_throtl_bio_endio(struct bio *bio)
2207 {
2208         struct blkcg_gq *blkg;
2209         struct throtl_grp *tg;
2210         u64 finish_time_ns;
2211         unsigned long finish_time;
2212         unsigned long start_time;
2213         unsigned long lat;
2214         int rw = bio_data_dir(bio);
2215
2216         blkg = bio->bi_blkg;
2217         if (!blkg)
2218                 return;
2219         tg = blkg_to_tg(blkg);
2220         if (!tg->td->limit_valid[LIMIT_LOW])
2221                 return;
2222
2223         finish_time_ns = ktime_get_ns();
2224         tg->last_finish_time = finish_time_ns >> 10;
2225
2226         start_time = bio_issue_time(&bio->bi_issue) >> 10;
2227         finish_time = __bio_issue_time(finish_time_ns) >> 10;
2228         if (!start_time || finish_time <= start_time)
2229                 return;
2230
2231         lat = finish_time - start_time;
2232         /* this is only for bio based driver */
2233         if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY))
2234                 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue),
2235                                      bio_op(bio), lat);
2236
2237         if (tg->latency_target && lat >= tg->td->filtered_latency) {
2238                 int bucket;
2239                 unsigned int threshold;
2240
2241                 bucket = request_bucket_index(bio_issue_size(&bio->bi_issue));
2242                 threshold = tg->td->avg_buckets[rw][bucket].latency +
2243                         tg->latency_target;
2244                 if (lat > threshold)
2245                         tg->bad_bio_cnt++;
2246                 /*
2247                  * Not race free, could get wrong count, which means cgroups
2248                  * will be throttled
2249                  */
2250                 tg->bio_cnt++;
2251         }
2252
2253         if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
2254                 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
2255                 tg->bio_cnt /= 2;
2256                 tg->bad_bio_cnt /= 2;
2257         }
2258 }
2259 #endif
2260
2261 int blk_throtl_init(struct request_queue *q)
2262 {
2263         struct throtl_data *td;
2264         int ret;
2265
2266         td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
2267         if (!td)
2268                 return -ENOMEM;
2269         td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
2270                 LATENCY_BUCKET_SIZE, __alignof__(u64));
2271         if (!td->latency_buckets[READ]) {
2272                 kfree(td);
2273                 return -ENOMEM;
2274         }
2275         td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) *
2276                 LATENCY_BUCKET_SIZE, __alignof__(u64));
2277         if (!td->latency_buckets[WRITE]) {
2278                 free_percpu(td->latency_buckets[READ]);
2279                 kfree(td);
2280                 return -ENOMEM;
2281         }
2282
2283         INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
2284         throtl_service_queue_init(&td->service_queue);
2285
2286         q->td = td;
2287         td->queue = q;
2288
2289         td->limit_valid[LIMIT_MAX] = true;
2290         td->limit_index = LIMIT_MAX;
2291         td->low_upgrade_time = jiffies;
2292         td->low_downgrade_time = jiffies;
2293
2294         /* activate policy */
2295         ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
2296         if (ret) {
2297                 free_percpu(td->latency_buckets[READ]);
2298                 free_percpu(td->latency_buckets[WRITE]);
2299                 kfree(td);
2300         }
2301         return ret;
2302 }
2303
2304 void blk_throtl_exit(struct request_queue *q)
2305 {
2306         BUG_ON(!q->td);
2307         del_timer_sync(&q->td->service_queue.pending_timer);
2308         throtl_shutdown_wq(q);
2309         blkcg_deactivate_policy(q, &blkcg_policy_throtl);
2310         free_percpu(q->td->latency_buckets[READ]);
2311         free_percpu(q->td->latency_buckets[WRITE]);
2312         kfree(q->td);
2313 }
2314
2315 void blk_throtl_register_queue(struct request_queue *q)
2316 {
2317         struct throtl_data *td;
2318         int i;
2319
2320         td = q->td;
2321         BUG_ON(!td);
2322
2323         if (blk_queue_nonrot(q)) {
2324                 td->throtl_slice = DFL_THROTL_SLICE_SSD;
2325                 td->filtered_latency = LATENCY_FILTERED_SSD;
2326         } else {
2327                 td->throtl_slice = DFL_THROTL_SLICE_HD;
2328                 td->filtered_latency = LATENCY_FILTERED_HD;
2329                 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2330                         td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY;
2331                         td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY;
2332                 }
2333         }
2334 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2335         /* if no low limit, use previous default */
2336         td->throtl_slice = DFL_THROTL_SLICE_HD;
2337 #endif
2338
2339         td->track_bio_latency = !queue_is_mq(q);
2340         if (!td->track_bio_latency)
2341                 blk_stat_enable_accounting(q);
2342 }
2343
2344 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2345 ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
2346 {
2347         if (!q->td)
2348                 return -EINVAL;
2349         return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
2350 }
2351
2352 ssize_t blk_throtl_sample_time_store(struct request_queue *q,
2353         const char *page, size_t count)
2354 {
2355         unsigned long v;
2356         unsigned long t;
2357
2358         if (!q->td)
2359                 return -EINVAL;
2360         if (kstrtoul(page, 10, &v))
2361                 return -EINVAL;
2362         t = msecs_to_jiffies(v);
2363         if (t == 0 || t > MAX_THROTL_SLICE)
2364                 return -EINVAL;
2365         q->td->throtl_slice = t;
2366         return count;
2367 }
2368 #endif
2369
2370 static int __init throtl_init(void)
2371 {
2372         kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
2373         if (!kthrotld_workqueue)
2374                 panic("Failed to create kthrotld\n");
2375
2376         return blkcg_policy_register(&blkcg_policy_throtl);
2377 }
2378
2379 module_init(throtl_init);