perf top: Add overwrite fall back
[linux-2.6-microblaze.git] / block / blk-throttle.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Interface for controlling IO bandwidth on a request queue
4  *
5  * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
6  */
7
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/bio.h>
12 #include <linux/blktrace_api.h>
13 #include <linux/blk-cgroup.h>
14 #include "blk.h"
15
16 /* Max dispatch from a group in 1 round */
17 static int throtl_grp_quantum = 8;
18
19 /* Total max dispatch from all groups in one round */
20 static int throtl_quantum = 32;
21
22 /* Throttling is performed over a slice and after that slice is renewed */
23 #define DFL_THROTL_SLICE_HD (HZ / 10)
24 #define DFL_THROTL_SLICE_SSD (HZ / 50)
25 #define MAX_THROTL_SLICE (HZ)
26 #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
27 #define MIN_THROTL_BPS (320 * 1024)
28 #define MIN_THROTL_IOPS (10)
29 #define DFL_LATENCY_TARGET (-1L)
30 #define DFL_IDLE_THRESHOLD (0)
31 #define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
32 #define LATENCY_FILTERED_SSD (0)
33 /*
34  * For HD, very small latency comes from sequential IO. Such IO is helpless to
35  * help determine if its IO is impacted by others, hence we ignore the IO
36  */
37 #define LATENCY_FILTERED_HD (1000L) /* 1ms */
38
39 #define SKIP_LATENCY (((u64)1) << BLK_STAT_RES_SHIFT)
40
41 static struct blkcg_policy blkcg_policy_throtl;
42
43 /* A workqueue to queue throttle related work */
44 static struct workqueue_struct *kthrotld_workqueue;
45
46 /*
47  * To implement hierarchical throttling, throtl_grps form a tree and bios
48  * are dispatched upwards level by level until they reach the top and get
49  * issued.  When dispatching bios from the children and local group at each
50  * level, if the bios are dispatched into a single bio_list, there's a risk
51  * of a local or child group which can queue many bios at once filling up
52  * the list starving others.
53  *
54  * To avoid such starvation, dispatched bios are queued separately
55  * according to where they came from.  When they are again dispatched to
56  * the parent, they're popped in round-robin order so that no single source
57  * hogs the dispatch window.
58  *
59  * throtl_qnode is used to keep the queued bios separated by their sources.
60  * Bios are queued to throtl_qnode which in turn is queued to
61  * throtl_service_queue and then dispatched in round-robin order.
62  *
63  * It's also used to track the reference counts on blkg's.  A qnode always
64  * belongs to a throtl_grp and gets queued on itself or the parent, so
65  * incrementing the reference of the associated throtl_grp when a qnode is
66  * queued and decrementing when dequeued is enough to keep the whole blkg
67  * tree pinned while bios are in flight.
68  */
69 struct throtl_qnode {
70         struct list_head        node;           /* service_queue->queued[] */
71         struct bio_list         bios;           /* queued bios */
72         struct throtl_grp       *tg;            /* tg this qnode belongs to */
73 };
74
75 struct throtl_service_queue {
76         struct throtl_service_queue *parent_sq; /* the parent service_queue */
77
78         /*
79          * Bios queued directly to this service_queue or dispatched from
80          * children throtl_grp's.
81          */
82         struct list_head        queued[2];      /* throtl_qnode [READ/WRITE] */
83         unsigned int            nr_queued[2];   /* number of queued bios */
84
85         /*
86          * RB tree of active children throtl_grp's, which are sorted by
87          * their ->disptime.
88          */
89         struct rb_root          pending_tree;   /* RB tree of active tgs */
90         struct rb_node          *first_pending; /* first node in the tree */
91         unsigned int            nr_pending;     /* # queued in the tree */
92         unsigned long           first_pending_disptime; /* disptime of the first tg */
93         struct timer_list       pending_timer;  /* fires on first_pending_disptime */
94 };
95
96 enum tg_state_flags {
97         THROTL_TG_PENDING       = 1 << 0,       /* on parent's pending tree */
98         THROTL_TG_WAS_EMPTY     = 1 << 1,       /* bio_lists[] became non-empty */
99 };
100
101 #define rb_entry_tg(node)       rb_entry((node), struct throtl_grp, rb_node)
102
103 enum {
104         LIMIT_LOW,
105         LIMIT_MAX,
106         LIMIT_CNT,
107 };
108
109 struct throtl_grp {
110         /* must be the first member */
111         struct blkg_policy_data pd;
112
113         /* active throtl group service_queue member */
114         struct rb_node rb_node;
115
116         /* throtl_data this group belongs to */
117         struct throtl_data *td;
118
119         /* this group's service queue */
120         struct throtl_service_queue service_queue;
121
122         /*
123          * qnode_on_self is used when bios are directly queued to this
124          * throtl_grp so that local bios compete fairly with bios
125          * dispatched from children.  qnode_on_parent is used when bios are
126          * dispatched from this throtl_grp into its parent and will compete
127          * with the sibling qnode_on_parents and the parent's
128          * qnode_on_self.
129          */
130         struct throtl_qnode qnode_on_self[2];
131         struct throtl_qnode qnode_on_parent[2];
132
133         /*
134          * Dispatch time in jiffies. This is the estimated time when group
135          * will unthrottle and is ready to dispatch more bio. It is used as
136          * key to sort active groups in service tree.
137          */
138         unsigned long disptime;
139
140         unsigned int flags;
141
142         /* are there any throtl rules between this group and td? */
143         bool has_rules[2];
144
145         /* internally used bytes per second rate limits */
146         uint64_t bps[2][LIMIT_CNT];
147         /* user configured bps limits */
148         uint64_t bps_conf[2][LIMIT_CNT];
149
150         /* internally used IOPS limits */
151         unsigned int iops[2][LIMIT_CNT];
152         /* user configured IOPS limits */
153         unsigned int iops_conf[2][LIMIT_CNT];
154
155         /* Number of bytes disptached in current slice */
156         uint64_t bytes_disp[2];
157         /* Number of bio's dispatched in current slice */
158         unsigned int io_disp[2];
159
160         unsigned long last_low_overflow_time[2];
161
162         uint64_t last_bytes_disp[2];
163         unsigned int last_io_disp[2];
164
165         unsigned long last_check_time;
166
167         unsigned long latency_target; /* us */
168         unsigned long latency_target_conf; /* us */
169         /* When did we start a new slice */
170         unsigned long slice_start[2];
171         unsigned long slice_end[2];
172
173         unsigned long last_finish_time; /* ns / 1024 */
174         unsigned long checked_last_finish_time; /* ns / 1024 */
175         unsigned long avg_idletime; /* ns / 1024 */
176         unsigned long idletime_threshold; /* us */
177         unsigned long idletime_threshold_conf; /* us */
178
179         unsigned int bio_cnt; /* total bios */
180         unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
181         unsigned long bio_cnt_reset_time;
182 };
183
184 /* We measure latency for request size from <= 4k to >= 1M */
185 #define LATENCY_BUCKET_SIZE 9
186
187 struct latency_bucket {
188         unsigned long total_latency; /* ns / 1024 */
189         int samples;
190 };
191
192 struct avg_latency_bucket {
193         unsigned long latency; /* ns / 1024 */
194         bool valid;
195 };
196
197 struct throtl_data
198 {
199         /* service tree for active throtl groups */
200         struct throtl_service_queue service_queue;
201
202         struct request_queue *queue;
203
204         /* Total Number of queued bios on READ and WRITE lists */
205         unsigned int nr_queued[2];
206
207         unsigned int throtl_slice;
208
209         /* Work for dispatching throttled bios */
210         struct work_struct dispatch_work;
211         unsigned int limit_index;
212         bool limit_valid[LIMIT_CNT];
213
214         unsigned long low_upgrade_time;
215         unsigned long low_downgrade_time;
216
217         unsigned int scale;
218
219         struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE];
220         struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE];
221         struct latency_bucket __percpu *latency_buckets[2];
222         unsigned long last_calculate_time;
223         unsigned long filtered_latency;
224
225         bool track_bio_latency;
226 };
227
228 static void throtl_pending_timer_fn(struct timer_list *t);
229
230 static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
231 {
232         return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
233 }
234
235 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
236 {
237         return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
238 }
239
240 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
241 {
242         return pd_to_blkg(&tg->pd);
243 }
244
245 /**
246  * sq_to_tg - return the throl_grp the specified service queue belongs to
247  * @sq: the throtl_service_queue of interest
248  *
249  * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
250  * embedded in throtl_data, %NULL is returned.
251  */
252 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
253 {
254         if (sq && sq->parent_sq)
255                 return container_of(sq, struct throtl_grp, service_queue);
256         else
257                 return NULL;
258 }
259
260 /**
261  * sq_to_td - return throtl_data the specified service queue belongs to
262  * @sq: the throtl_service_queue of interest
263  *
264  * A service_queue can be embedded in either a throtl_grp or throtl_data.
265  * Determine the associated throtl_data accordingly and return it.
266  */
267 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
268 {
269         struct throtl_grp *tg = sq_to_tg(sq);
270
271         if (tg)
272                 return tg->td;
273         else
274                 return container_of(sq, struct throtl_data, service_queue);
275 }
276
277 /*
278  * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
279  * make the IO dispatch more smooth.
280  * Scale up: linearly scale up according to lapsed time since upgrade. For
281  *           every throtl_slice, the limit scales up 1/2 .low limit till the
282  *           limit hits .max limit
283  * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
284  */
285 static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
286 {
287         /* arbitrary value to avoid too big scale */
288         if (td->scale < 4096 && time_after_eq(jiffies,
289             td->low_upgrade_time + td->scale * td->throtl_slice))
290                 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;
291
292         return low + (low >> 1) * td->scale;
293 }
294
295 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
296 {
297         struct blkcg_gq *blkg = tg_to_blkg(tg);
298         struct throtl_data *td;
299         uint64_t ret;
300
301         if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
302                 return U64_MAX;
303
304         td = tg->td;
305         ret = tg->bps[rw][td->limit_index];
306         if (ret == 0 && td->limit_index == LIMIT_LOW) {
307                 /* intermediate node or iops isn't 0 */
308                 if (!list_empty(&blkg->blkcg->css.children) ||
309                     tg->iops[rw][td->limit_index])
310                         return U64_MAX;
311                 else
312                         return MIN_THROTL_BPS;
313         }
314
315         if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
316             tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
317                 uint64_t adjusted;
318
319                 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
320                 ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
321         }
322         return ret;
323 }
324
325 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
326 {
327         struct blkcg_gq *blkg = tg_to_blkg(tg);
328         struct throtl_data *td;
329         unsigned int ret;
330
331         if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
332                 return UINT_MAX;
333
334         td = tg->td;
335         ret = tg->iops[rw][td->limit_index];
336         if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
337                 /* intermediate node or bps isn't 0 */
338                 if (!list_empty(&blkg->blkcg->css.children) ||
339                     tg->bps[rw][td->limit_index])
340                         return UINT_MAX;
341                 else
342                         return MIN_THROTL_IOPS;
343         }
344
345         if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
346             tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
347                 uint64_t adjusted;
348
349                 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
350                 if (adjusted > UINT_MAX)
351                         adjusted = UINT_MAX;
352                 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
353         }
354         return ret;
355 }
356
357 #define request_bucket_index(sectors) \
358         clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
359
360 /**
361  * throtl_log - log debug message via blktrace
362  * @sq: the service_queue being reported
363  * @fmt: printf format string
364  * @args: printf args
365  *
366  * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
367  * throtl_grp; otherwise, just "throtl".
368  */
369 #define throtl_log(sq, fmt, args...)    do {                            \
370         struct throtl_grp *__tg = sq_to_tg((sq));                       \
371         struct throtl_data *__td = sq_to_td((sq));                      \
372                                                                         \
373         (void)__td;                                                     \
374         if (likely(!blk_trace_note_message_enabled(__td->queue)))       \
375                 break;                                                  \
376         if ((__tg)) {                                                   \
377                 blk_add_cgroup_trace_msg(__td->queue,                   \
378                         tg_to_blkg(__tg)->blkcg, "throtl " fmt, ##args);\
379         } else {                                                        \
380                 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);  \
381         }                                                               \
382 } while (0)
383
384 static inline unsigned int throtl_bio_data_size(struct bio *bio)
385 {
386         /* assume it's one sector */
387         if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
388                 return 512;
389         return bio->bi_iter.bi_size;
390 }
391
392 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
393 {
394         INIT_LIST_HEAD(&qn->node);
395         bio_list_init(&qn->bios);
396         qn->tg = tg;
397 }
398
399 /**
400  * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
401  * @bio: bio being added
402  * @qn: qnode to add bio to
403  * @queued: the service_queue->queued[] list @qn belongs to
404  *
405  * Add @bio to @qn and put @qn on @queued if it's not already on.
406  * @qn->tg's reference count is bumped when @qn is activated.  See the
407  * comment on top of throtl_qnode definition for details.
408  */
409 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
410                                  struct list_head *queued)
411 {
412         bio_list_add(&qn->bios, bio);
413         if (list_empty(&qn->node)) {
414                 list_add_tail(&qn->node, queued);
415                 blkg_get(tg_to_blkg(qn->tg));
416         }
417 }
418
419 /**
420  * throtl_peek_queued - peek the first bio on a qnode list
421  * @queued: the qnode list to peek
422  */
423 static struct bio *throtl_peek_queued(struct list_head *queued)
424 {
425         struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
426         struct bio *bio;
427
428         if (list_empty(queued))
429                 return NULL;
430
431         bio = bio_list_peek(&qn->bios);
432         WARN_ON_ONCE(!bio);
433         return bio;
434 }
435
436 /**
437  * throtl_pop_queued - pop the first bio form a qnode list
438  * @queued: the qnode list to pop a bio from
439  * @tg_to_put: optional out argument for throtl_grp to put
440  *
441  * Pop the first bio from the qnode list @queued.  After popping, the first
442  * qnode is removed from @queued if empty or moved to the end of @queued so
443  * that the popping order is round-robin.
444  *
445  * When the first qnode is removed, its associated throtl_grp should be put
446  * too.  If @tg_to_put is NULL, this function automatically puts it;
447  * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
448  * responsible for putting it.
449  */
450 static struct bio *throtl_pop_queued(struct list_head *queued,
451                                      struct throtl_grp **tg_to_put)
452 {
453         struct throtl_qnode *qn = list_first_entry(queued, struct throtl_qnode, node);
454         struct bio *bio;
455
456         if (list_empty(queued))
457                 return NULL;
458
459         bio = bio_list_pop(&qn->bios);
460         WARN_ON_ONCE(!bio);
461
462         if (bio_list_empty(&qn->bios)) {
463                 list_del_init(&qn->node);
464                 if (tg_to_put)
465                         *tg_to_put = qn->tg;
466                 else
467                         blkg_put(tg_to_blkg(qn->tg));
468         } else {
469                 list_move_tail(&qn->node, queued);
470         }
471
472         return bio;
473 }
474
475 /* init a service_queue, assumes the caller zeroed it */
476 static void throtl_service_queue_init(struct throtl_service_queue *sq)
477 {
478         INIT_LIST_HEAD(&sq->queued[0]);
479         INIT_LIST_HEAD(&sq->queued[1]);
480         sq->pending_tree = RB_ROOT;
481         timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
482 }
483
484 static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp, int node)
485 {
486         struct throtl_grp *tg;
487         int rw;
488
489         tg = kzalloc_node(sizeof(*tg), gfp, node);
490         if (!tg)
491                 return NULL;
492
493         throtl_service_queue_init(&tg->service_queue);
494
495         for (rw = READ; rw <= WRITE; rw++) {
496                 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
497                 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
498         }
499
500         RB_CLEAR_NODE(&tg->rb_node);
501         tg->bps[READ][LIMIT_MAX] = U64_MAX;
502         tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
503         tg->iops[READ][LIMIT_MAX] = UINT_MAX;
504         tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
505         tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
506         tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
507         tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
508         tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
509         /* LIMIT_LOW will have default value 0 */
510
511         tg->latency_target = DFL_LATENCY_TARGET;
512         tg->latency_target_conf = DFL_LATENCY_TARGET;
513         tg->idletime_threshold = DFL_IDLE_THRESHOLD;
514         tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
515
516         return &tg->pd;
517 }
518
519 static void throtl_pd_init(struct blkg_policy_data *pd)
520 {
521         struct throtl_grp *tg = pd_to_tg(pd);
522         struct blkcg_gq *blkg = tg_to_blkg(tg);
523         struct throtl_data *td = blkg->q->td;
524         struct throtl_service_queue *sq = &tg->service_queue;
525
526         /*
527          * If on the default hierarchy, we switch to properly hierarchical
528          * behavior where limits on a given throtl_grp are applied to the
529          * whole subtree rather than just the group itself.  e.g. If 16M
530          * read_bps limit is set on the root group, the whole system can't
531          * exceed 16M for the device.
532          *
533          * If not on the default hierarchy, the broken flat hierarchy
534          * behavior is retained where all throtl_grps are treated as if
535          * they're all separate root groups right below throtl_data.
536          * Limits of a group don't interact with limits of other groups
537          * regardless of the position of the group in the hierarchy.
538          */
539         sq->parent_sq = &td->service_queue;
540         if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
541                 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
542         tg->td = td;
543 }
544
545 /*
546  * Set has_rules[] if @tg or any of its parents have limits configured.
547  * This doesn't require walking up to the top of the hierarchy as the
548  * parent's has_rules[] is guaranteed to be correct.
549  */
550 static void tg_update_has_rules(struct throtl_grp *tg)
551 {
552         struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
553         struct throtl_data *td = tg->td;
554         int rw;
555
556         for (rw = READ; rw <= WRITE; rw++)
557                 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
558                         (td->limit_valid[td->limit_index] &&
559                          (tg_bps_limit(tg, rw) != U64_MAX ||
560                           tg_iops_limit(tg, rw) != UINT_MAX));
561 }
562
563 static void throtl_pd_online(struct blkg_policy_data *pd)
564 {
565         struct throtl_grp *tg = pd_to_tg(pd);
566         /*
567          * We don't want new groups to escape the limits of its ancestors.
568          * Update has_rules[] after a new group is brought online.
569          */
570         tg_update_has_rules(tg);
571 }
572
573 static void blk_throtl_update_limit_valid(struct throtl_data *td)
574 {
575         struct cgroup_subsys_state *pos_css;
576         struct blkcg_gq *blkg;
577         bool low_valid = false;
578
579         rcu_read_lock();
580         blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
581                 struct throtl_grp *tg = blkg_to_tg(blkg);
582
583                 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
584                     tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
585                         low_valid = true;
586         }
587         rcu_read_unlock();
588
589         td->limit_valid[LIMIT_LOW] = low_valid;
590 }
591
592 static void throtl_upgrade_state(struct throtl_data *td);
593 static void throtl_pd_offline(struct blkg_policy_data *pd)
594 {
595         struct throtl_grp *tg = pd_to_tg(pd);
596
597         tg->bps[READ][LIMIT_LOW] = 0;
598         tg->bps[WRITE][LIMIT_LOW] = 0;
599         tg->iops[READ][LIMIT_LOW] = 0;
600         tg->iops[WRITE][LIMIT_LOW] = 0;
601
602         blk_throtl_update_limit_valid(tg->td);
603
604         if (!tg->td->limit_valid[tg->td->limit_index])
605                 throtl_upgrade_state(tg->td);
606 }
607
608 static void throtl_pd_free(struct blkg_policy_data *pd)
609 {
610         struct throtl_grp *tg = pd_to_tg(pd);
611
612         del_timer_sync(&tg->service_queue.pending_timer);
613         kfree(tg);
614 }
615
616 static struct throtl_grp *
617 throtl_rb_first(struct throtl_service_queue *parent_sq)
618 {
619         /* Service tree is empty */
620         if (!parent_sq->nr_pending)
621                 return NULL;
622
623         if (!parent_sq->first_pending)
624                 parent_sq->first_pending = rb_first(&parent_sq->pending_tree);
625
626         if (parent_sq->first_pending)
627                 return rb_entry_tg(parent_sq->first_pending);
628
629         return NULL;
630 }
631
632 static void rb_erase_init(struct rb_node *n, struct rb_root *root)
633 {
634         rb_erase(n, root);
635         RB_CLEAR_NODE(n);
636 }
637
638 static void throtl_rb_erase(struct rb_node *n,
639                             struct throtl_service_queue *parent_sq)
640 {
641         if (parent_sq->first_pending == n)
642                 parent_sq->first_pending = NULL;
643         rb_erase_init(n, &parent_sq->pending_tree);
644         --parent_sq->nr_pending;
645 }
646
647 static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
648 {
649         struct throtl_grp *tg;
650
651         tg = throtl_rb_first(parent_sq);
652         if (!tg)
653                 return;
654
655         parent_sq->first_pending_disptime = tg->disptime;
656 }
657
658 static void tg_service_queue_add(struct throtl_grp *tg)
659 {
660         struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
661         struct rb_node **node = &parent_sq->pending_tree.rb_node;
662         struct rb_node *parent = NULL;
663         struct throtl_grp *__tg;
664         unsigned long key = tg->disptime;
665         int left = 1;
666
667         while (*node != NULL) {
668                 parent = *node;
669                 __tg = rb_entry_tg(parent);
670
671                 if (time_before(key, __tg->disptime))
672                         node = &parent->rb_left;
673                 else {
674                         node = &parent->rb_right;
675                         left = 0;
676                 }
677         }
678
679         if (left)
680                 parent_sq->first_pending = &tg->rb_node;
681
682         rb_link_node(&tg->rb_node, parent, node);
683         rb_insert_color(&tg->rb_node, &parent_sq->pending_tree);
684 }
685
686 static void __throtl_enqueue_tg(struct throtl_grp *tg)
687 {
688         tg_service_queue_add(tg);
689         tg->flags |= THROTL_TG_PENDING;
690         tg->service_queue.parent_sq->nr_pending++;
691 }
692
693 static void throtl_enqueue_tg(struct throtl_grp *tg)
694 {
695         if (!(tg->flags & THROTL_TG_PENDING))
696                 __throtl_enqueue_tg(tg);
697 }
698
699 static void __throtl_dequeue_tg(struct throtl_grp *tg)
700 {
701         throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
702         tg->flags &= ~THROTL_TG_PENDING;
703 }
704
705 static void throtl_dequeue_tg(struct throtl_grp *tg)
706 {
707         if (tg->flags & THROTL_TG_PENDING)
708                 __throtl_dequeue_tg(tg);
709 }
710
711 /* Call with queue lock held */
712 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
713                                           unsigned long expires)
714 {
715         unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
716
717         /*
718          * Since we are adjusting the throttle limit dynamically, the sleep
719          * time calculated according to previous limit might be invalid. It's
720          * possible the cgroup sleep time is very long and no other cgroups
721          * have IO running so notify the limit changes. Make sure the cgroup
722          * doesn't sleep too long to avoid the missed notification.
723          */
724         if (time_after(expires, max_expire))
725                 expires = max_expire;
726         mod_timer(&sq->pending_timer, expires);
727         throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
728                    expires - jiffies, jiffies);
729 }
730
731 /**
732  * throtl_schedule_next_dispatch - schedule the next dispatch cycle
733  * @sq: the service_queue to schedule dispatch for
734  * @force: force scheduling
735  *
736  * Arm @sq->pending_timer so that the next dispatch cycle starts on the
737  * dispatch time of the first pending child.  Returns %true if either timer
738  * is armed or there's no pending child left.  %false if the current
739  * dispatch window is still open and the caller should continue
740  * dispatching.
741  *
742  * If @force is %true, the dispatch timer is always scheduled and this
743  * function is guaranteed to return %true.  This is to be used when the
744  * caller can't dispatch itself and needs to invoke pending_timer
745  * unconditionally.  Note that forced scheduling is likely to induce short
746  * delay before dispatch starts even if @sq->first_pending_disptime is not
747  * in the future and thus shouldn't be used in hot paths.
748  */
749 static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
750                                           bool force)
751 {
752         /* any pending children left? */
753         if (!sq->nr_pending)
754                 return true;
755
756         update_min_dispatch_time(sq);
757
758         /* is the next dispatch time in the future? */
759         if (force || time_after(sq->first_pending_disptime, jiffies)) {
760                 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
761                 return true;
762         }
763
764         /* tell the caller to continue dispatching */
765         return false;
766 }
767
768 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
769                 bool rw, unsigned long start)
770 {
771         tg->bytes_disp[rw] = 0;
772         tg->io_disp[rw] = 0;
773
774         /*
775          * Previous slice has expired. We must have trimmed it after last
776          * bio dispatch. That means since start of last slice, we never used
777          * that bandwidth. Do try to make use of that bandwidth while giving
778          * credit.
779          */
780         if (time_after_eq(start, tg->slice_start[rw]))
781                 tg->slice_start[rw] = start;
782
783         tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
784         throtl_log(&tg->service_queue,
785                    "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
786                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
787                    tg->slice_end[rw], jiffies);
788 }
789
790 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
791 {
792         tg->bytes_disp[rw] = 0;
793         tg->io_disp[rw] = 0;
794         tg->slice_start[rw] = jiffies;
795         tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
796         throtl_log(&tg->service_queue,
797                    "[%c] new slice start=%lu end=%lu jiffies=%lu",
798                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
799                    tg->slice_end[rw], jiffies);
800 }
801
802 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
803                                         unsigned long jiffy_end)
804 {
805         tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
806 }
807
808 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
809                                        unsigned long jiffy_end)
810 {
811         tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
812         throtl_log(&tg->service_queue,
813                    "[%c] extend slice start=%lu end=%lu jiffies=%lu",
814                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
815                    tg->slice_end[rw], jiffies);
816 }
817
818 /* Determine if previously allocated or extended slice is complete or not */
819 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
820 {
821         if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
822                 return false;
823
824         return 1;
825 }
826
827 /* Trim the used slices and adjust slice start accordingly */
828 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
829 {
830         unsigned long nr_slices, time_elapsed, io_trim;
831         u64 bytes_trim, tmp;
832
833         BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
834
835         /*
836          * If bps are unlimited (-1), then time slice don't get
837          * renewed. Don't try to trim the slice if slice is used. A new
838          * slice will start when appropriate.
839          */
840         if (throtl_slice_used(tg, rw))
841                 return;
842
843         /*
844          * A bio has been dispatched. Also adjust slice_end. It might happen
845          * that initially cgroup limit was very low resulting in high
846          * slice_end, but later limit was bumped up and bio was dispached
847          * sooner, then we need to reduce slice_end. A high bogus slice_end
848          * is bad because it does not allow new slice to start.
849          */
850
851         throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
852
853         time_elapsed = jiffies - tg->slice_start[rw];
854
855         nr_slices = time_elapsed / tg->td->throtl_slice;
856
857         if (!nr_slices)
858                 return;
859         tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
860         do_div(tmp, HZ);
861         bytes_trim = tmp;
862
863         io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
864                 HZ;
865
866         if (!bytes_trim && !io_trim)
867                 return;
868
869         if (tg->bytes_disp[rw] >= bytes_trim)
870                 tg->bytes_disp[rw] -= bytes_trim;
871         else
872                 tg->bytes_disp[rw] = 0;
873
874         if (tg->io_disp[rw] >= io_trim)
875                 tg->io_disp[rw] -= io_trim;
876         else
877                 tg->io_disp[rw] = 0;
878
879         tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
880
881         throtl_log(&tg->service_queue,
882                    "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
883                    rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
884                    tg->slice_start[rw], tg->slice_end[rw], jiffies);
885 }
886
887 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
888                                   unsigned long *wait)
889 {
890         bool rw = bio_data_dir(bio);
891         unsigned int io_allowed;
892         unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
893         u64 tmp;
894
895         jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
896
897         /* Slice has just started. Consider one slice interval */
898         if (!jiffy_elapsed)
899                 jiffy_elapsed_rnd = tg->td->throtl_slice;
900
901         jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
902
903         /*
904          * jiffy_elapsed_rnd should not be a big value as minimum iops can be
905          * 1 then at max jiffy elapsed should be equivalent of 1 second as we
906          * will allow dispatch after 1 second and after that slice should
907          * have been trimmed.
908          */
909
910         tmp = (u64)tg_iops_limit(tg, rw) * jiffy_elapsed_rnd;
911         do_div(tmp, HZ);
912
913         if (tmp > UINT_MAX)
914                 io_allowed = UINT_MAX;
915         else
916                 io_allowed = tmp;
917
918         if (tg->io_disp[rw] + 1 <= io_allowed) {
919                 if (wait)
920                         *wait = 0;
921                 return true;
922         }
923
924         /* Calc approx time to dispatch */
925         jiffy_wait = ((tg->io_disp[rw] + 1) * HZ) / tg_iops_limit(tg, rw) + 1;
926
927         if (jiffy_wait > jiffy_elapsed)
928                 jiffy_wait = jiffy_wait - jiffy_elapsed;
929         else
930                 jiffy_wait = 1;
931
932         if (wait)
933                 *wait = jiffy_wait;
934         return 0;
935 }
936
937 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
938                                  unsigned long *wait)
939 {
940         bool rw = bio_data_dir(bio);
941         u64 bytes_allowed, extra_bytes, tmp;
942         unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
943         unsigned int bio_size = throtl_bio_data_size(bio);
944
945         jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
946
947         /* Slice has just started. Consider one slice interval */
948         if (!jiffy_elapsed)
949                 jiffy_elapsed_rnd = tg->td->throtl_slice;
950
951         jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
952
953         tmp = tg_bps_limit(tg, rw) * jiffy_elapsed_rnd;
954         do_div(tmp, HZ);
955         bytes_allowed = tmp;
956
957         if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
958                 if (wait)
959                         *wait = 0;
960                 return true;
961         }
962
963         /* Calc approx time to dispatch */
964         extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
965         jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
966
967         if (!jiffy_wait)
968                 jiffy_wait = 1;
969
970         /*
971          * This wait time is without taking into consideration the rounding
972          * up we did. Add that time also.
973          */
974         jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
975         if (wait)
976                 *wait = jiffy_wait;
977         return 0;
978 }
979
980 /*
981  * Returns whether one can dispatch a bio or not. Also returns approx number
982  * of jiffies to wait before this bio is with-in IO rate and can be dispatched
983  */
984 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
985                             unsigned long *wait)
986 {
987         bool rw = bio_data_dir(bio);
988         unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
989
990         /*
991          * Currently whole state machine of group depends on first bio
992          * queued in the group bio list. So one should not be calling
993          * this function with a different bio if there are other bios
994          * queued.
995          */
996         BUG_ON(tg->service_queue.nr_queued[rw] &&
997                bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
998
999         /* If tg->bps = -1, then BW is unlimited */
1000         if (tg_bps_limit(tg, rw) == U64_MAX &&
1001             tg_iops_limit(tg, rw) == UINT_MAX) {
1002                 if (wait)
1003                         *wait = 0;
1004                 return true;
1005         }
1006
1007         /*
1008          * If previous slice expired, start a new one otherwise renew/extend
1009          * existing slice to make sure it is at least throtl_slice interval
1010          * long since now. New slice is started only for empty throttle group.
1011          * If there is queued bio, that means there should be an active
1012          * slice and it should be extended instead.
1013          */
1014         if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
1015                 throtl_start_new_slice(tg, rw);
1016         else {
1017                 if (time_before(tg->slice_end[rw],
1018                     jiffies + tg->td->throtl_slice))
1019                         throtl_extend_slice(tg, rw,
1020                                 jiffies + tg->td->throtl_slice);
1021         }
1022
1023         if (tg_with_in_bps_limit(tg, bio, &bps_wait) &&
1024             tg_with_in_iops_limit(tg, bio, &iops_wait)) {
1025                 if (wait)
1026                         *wait = 0;
1027                 return 1;
1028         }
1029
1030         max_wait = max(bps_wait, iops_wait);
1031
1032         if (wait)
1033                 *wait = max_wait;
1034
1035         if (time_before(tg->slice_end[rw], jiffies + max_wait))
1036                 throtl_extend_slice(tg, rw, jiffies + max_wait);
1037
1038         return 0;
1039 }
1040
1041 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1042 {
1043         bool rw = bio_data_dir(bio);
1044         unsigned int bio_size = throtl_bio_data_size(bio);
1045
1046         /* Charge the bio to the group */
1047         tg->bytes_disp[rw] += bio_size;
1048         tg->io_disp[rw]++;
1049         tg->last_bytes_disp[rw] += bio_size;
1050         tg->last_io_disp[rw]++;
1051
1052         /*
1053          * BIO_THROTTLED is used to prevent the same bio to be throttled
1054          * more than once as a throttled bio will go through blk-throtl the
1055          * second time when it eventually gets issued.  Set it when a bio
1056          * is being charged to a tg.
1057          */
1058         if (!bio_flagged(bio, BIO_THROTTLED))
1059                 bio_set_flag(bio, BIO_THROTTLED);
1060 }
1061
1062 /**
1063  * throtl_add_bio_tg - add a bio to the specified throtl_grp
1064  * @bio: bio to add
1065  * @qn: qnode to use
1066  * @tg: the target throtl_grp
1067  *
1068  * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
1069  * tg->qnode_on_self[] is used.
1070  */
1071 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
1072                               struct throtl_grp *tg)
1073 {
1074         struct throtl_service_queue *sq = &tg->service_queue;
1075         bool rw = bio_data_dir(bio);
1076
1077         if (!qn)
1078                 qn = &tg->qnode_on_self[rw];
1079
1080         /*
1081          * If @tg doesn't currently have any bios queued in the same
1082          * direction, queueing @bio can change when @tg should be
1083          * dispatched.  Mark that @tg was empty.  This is automatically
1084          * cleaered on the next tg_update_disptime().
1085          */
1086         if (!sq->nr_queued[rw])
1087                 tg->flags |= THROTL_TG_WAS_EMPTY;
1088
1089         throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
1090
1091         sq->nr_queued[rw]++;
1092         throtl_enqueue_tg(tg);
1093 }
1094
1095 static void tg_update_disptime(struct throtl_grp *tg)
1096 {
1097         struct throtl_service_queue *sq = &tg->service_queue;
1098         unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
1099         struct bio *bio;
1100
1101         bio = throtl_peek_queued(&sq->queued[READ]);
1102         if (bio)
1103                 tg_may_dispatch(tg, bio, &read_wait);
1104
1105         bio = throtl_peek_queued(&sq->queued[WRITE]);
1106         if (bio)
1107                 tg_may_dispatch(tg, bio, &write_wait);
1108
1109         min_wait = min(read_wait, write_wait);
1110         disptime = jiffies + min_wait;
1111
1112         /* Update dispatch time */
1113         throtl_dequeue_tg(tg);
1114         tg->disptime = disptime;
1115         throtl_enqueue_tg(tg);
1116
1117         /* see throtl_add_bio_tg() */
1118         tg->flags &= ~THROTL_TG_WAS_EMPTY;
1119 }
1120
1121 static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
1122                                         struct throtl_grp *parent_tg, bool rw)
1123 {
1124         if (throtl_slice_used(parent_tg, rw)) {
1125                 throtl_start_new_slice_with_credit(parent_tg, rw,
1126                                 child_tg->slice_start[rw]);
1127         }
1128
1129 }
1130
1131 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1132 {
1133         struct throtl_service_queue *sq = &tg->service_queue;
1134         struct throtl_service_queue *parent_sq = sq->parent_sq;
1135         struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
1136         struct throtl_grp *tg_to_put = NULL;
1137         struct bio *bio;
1138
1139         /*
1140          * @bio is being transferred from @tg to @parent_sq.  Popping a bio
1141          * from @tg may put its reference and @parent_sq might end up
1142          * getting released prematurely.  Remember the tg to put and put it
1143          * after @bio is transferred to @parent_sq.
1144          */
1145         bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
1146         sq->nr_queued[rw]--;
1147
1148         throtl_charge_bio(tg, bio);
1149
1150         /*
1151          * If our parent is another tg, we just need to transfer @bio to
1152          * the parent using throtl_add_bio_tg().  If our parent is
1153          * @td->service_queue, @bio is ready to be issued.  Put it on its
1154          * bio_lists[] and decrease total number queued.  The caller is
1155          * responsible for issuing these bios.
1156          */
1157         if (parent_tg) {
1158                 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1159                 start_parent_slice_with_credit(tg, parent_tg, rw);
1160         } else {
1161                 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1162                                      &parent_sq->queued[rw]);
1163                 BUG_ON(tg->td->nr_queued[rw] <= 0);
1164                 tg->td->nr_queued[rw]--;
1165         }
1166
1167         throtl_trim_slice(tg, rw);
1168
1169         if (tg_to_put)
1170                 blkg_put(tg_to_blkg(tg_to_put));
1171 }
1172
1173 static int throtl_dispatch_tg(struct throtl_grp *tg)
1174 {
1175         struct throtl_service_queue *sq = &tg->service_queue;
1176         unsigned int nr_reads = 0, nr_writes = 0;
1177         unsigned int max_nr_reads = throtl_grp_quantum*3/4;
1178         unsigned int max_nr_writes = throtl_grp_quantum - max_nr_reads;
1179         struct bio *bio;
1180
1181         /* Try to dispatch 75% READS and 25% WRITES */
1182
1183         while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1184                tg_may_dispatch(tg, bio, NULL)) {
1185
1186                 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1187                 nr_reads++;
1188
1189                 if (nr_reads >= max_nr_reads)
1190                         break;
1191         }
1192
1193         while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1194                tg_may_dispatch(tg, bio, NULL)) {
1195
1196                 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1197                 nr_writes++;
1198
1199                 if (nr_writes >= max_nr_writes)
1200                         break;
1201         }
1202
1203         return nr_reads + nr_writes;
1204 }
1205
1206 static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
1207 {
1208         unsigned int nr_disp = 0;
1209
1210         while (1) {
1211                 struct throtl_grp *tg = throtl_rb_first(parent_sq);
1212                 struct throtl_service_queue *sq = &tg->service_queue;
1213
1214                 if (!tg)
1215                         break;
1216
1217                 if (time_before(jiffies, tg->disptime))
1218                         break;
1219
1220                 throtl_dequeue_tg(tg);
1221
1222                 nr_disp += throtl_dispatch_tg(tg);
1223
1224                 if (sq->nr_queued[0] || sq->nr_queued[1])
1225                         tg_update_disptime(tg);
1226
1227                 if (nr_disp >= throtl_quantum)
1228                         break;
1229         }
1230
1231         return nr_disp;
1232 }
1233
1234 static bool throtl_can_upgrade(struct throtl_data *td,
1235         struct throtl_grp *this_tg);
1236 /**
1237  * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1238  * @arg: the throtl_service_queue being serviced
1239  *
1240  * This timer is armed when a child throtl_grp with active bio's become
1241  * pending and queued on the service_queue's pending_tree and expires when
1242  * the first child throtl_grp should be dispatched.  This function
1243  * dispatches bio's from the children throtl_grps to the parent
1244  * service_queue.
1245  *
1246  * If the parent's parent is another throtl_grp, dispatching is propagated
1247  * by either arming its pending_timer or repeating dispatch directly.  If
1248  * the top-level service_tree is reached, throtl_data->dispatch_work is
1249  * kicked so that the ready bio's are issued.
1250  */
1251 static void throtl_pending_timer_fn(struct timer_list *t)
1252 {
1253         struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
1254         struct throtl_grp *tg = sq_to_tg(sq);
1255         struct throtl_data *td = sq_to_td(sq);
1256         struct request_queue *q = td->queue;
1257         struct throtl_service_queue *parent_sq;
1258         bool dispatched;
1259         int ret;
1260
1261         spin_lock_irq(q->queue_lock);
1262         if (throtl_can_upgrade(td, NULL))
1263                 throtl_upgrade_state(td);
1264
1265 again:
1266         parent_sq = sq->parent_sq;
1267         dispatched = false;
1268
1269         while (true) {
1270                 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1271                            sq->nr_queued[READ] + sq->nr_queued[WRITE],
1272                            sq->nr_queued[READ], sq->nr_queued[WRITE]);
1273
1274                 ret = throtl_select_dispatch(sq);
1275                 if (ret) {
1276                         throtl_log(sq, "bios disp=%u", ret);
1277                         dispatched = true;
1278                 }
1279
1280                 if (throtl_schedule_next_dispatch(sq, false))
1281                         break;
1282
1283                 /* this dispatch windows is still open, relax and repeat */
1284                 spin_unlock_irq(q->queue_lock);
1285                 cpu_relax();
1286                 spin_lock_irq(q->queue_lock);
1287         }
1288
1289         if (!dispatched)
1290                 goto out_unlock;
1291
1292         if (parent_sq) {
1293                 /* @parent_sq is another throl_grp, propagate dispatch */
1294                 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1295                         tg_update_disptime(tg);
1296                         if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1297                                 /* window is already open, repeat dispatching */
1298                                 sq = parent_sq;
1299                                 tg = sq_to_tg(sq);
1300                                 goto again;
1301                         }
1302                 }
1303         } else {
1304                 /* reached the top-level, queue issueing */
1305                 queue_work(kthrotld_workqueue, &td->dispatch_work);
1306         }
1307 out_unlock:
1308         spin_unlock_irq(q->queue_lock);
1309 }
1310
1311 /**
1312  * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1313  * @work: work item being executed
1314  *
1315  * This function is queued for execution when bio's reach the bio_lists[]
1316  * of throtl_data->service_queue.  Those bio's are ready and issued by this
1317  * function.
1318  */
1319 static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1320 {
1321         struct throtl_data *td = container_of(work, struct throtl_data,
1322                                               dispatch_work);
1323         struct throtl_service_queue *td_sq = &td->service_queue;
1324         struct request_queue *q = td->queue;
1325         struct bio_list bio_list_on_stack;
1326         struct bio *bio;
1327         struct blk_plug plug;
1328         int rw;
1329
1330         bio_list_init(&bio_list_on_stack);
1331
1332         spin_lock_irq(q->queue_lock);
1333         for (rw = READ; rw <= WRITE; rw++)
1334                 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1335                         bio_list_add(&bio_list_on_stack, bio);
1336         spin_unlock_irq(q->queue_lock);
1337
1338         if (!bio_list_empty(&bio_list_on_stack)) {
1339                 blk_start_plug(&plug);
1340                 while((bio = bio_list_pop(&bio_list_on_stack)))
1341                         generic_make_request(bio);
1342                 blk_finish_plug(&plug);
1343         }
1344 }
1345
1346 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1347                               int off)
1348 {
1349         struct throtl_grp *tg = pd_to_tg(pd);
1350         u64 v = *(u64 *)((void *)tg + off);
1351
1352         if (v == U64_MAX)
1353                 return 0;
1354         return __blkg_prfill_u64(sf, pd, v);
1355 }
1356
1357 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1358                                int off)
1359 {
1360         struct throtl_grp *tg = pd_to_tg(pd);
1361         unsigned int v = *(unsigned int *)((void *)tg + off);
1362
1363         if (v == UINT_MAX)
1364                 return 0;
1365         return __blkg_prfill_u64(sf, pd, v);
1366 }
1367
1368 static int tg_print_conf_u64(struct seq_file *sf, void *v)
1369 {
1370         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1371                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1372         return 0;
1373 }
1374
1375 static int tg_print_conf_uint(struct seq_file *sf, void *v)
1376 {
1377         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1378                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1379         return 0;
1380 }
1381
1382 static void tg_conf_updated(struct throtl_grp *tg, bool global)
1383 {
1384         struct throtl_service_queue *sq = &tg->service_queue;
1385         struct cgroup_subsys_state *pos_css;
1386         struct blkcg_gq *blkg;
1387
1388         throtl_log(&tg->service_queue,
1389                    "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1390                    tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1391                    tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1392
1393         /*
1394          * Update has_rules[] flags for the updated tg's subtree.  A tg is
1395          * considered to have rules if either the tg itself or any of its
1396          * ancestors has rules.  This identifies groups without any
1397          * restrictions in the whole hierarchy and allows them to bypass
1398          * blk-throttle.
1399          */
1400         blkg_for_each_descendant_pre(blkg, pos_css,
1401                         global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1402                 struct throtl_grp *this_tg = blkg_to_tg(blkg);
1403                 struct throtl_grp *parent_tg;
1404
1405                 tg_update_has_rules(this_tg);
1406                 /* ignore root/second level */
1407                 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1408                     !blkg->parent->parent)
1409                         continue;
1410                 parent_tg = blkg_to_tg(blkg->parent);
1411                 /*
1412                  * make sure all children has lower idle time threshold and
1413                  * higher latency target
1414                  */
1415                 this_tg->idletime_threshold = min(this_tg->idletime_threshold,
1416                                 parent_tg->idletime_threshold);
1417                 this_tg->latency_target = max(this_tg->latency_target,
1418                                 parent_tg->latency_target);
1419         }
1420
1421         /*
1422          * We're already holding queue_lock and know @tg is valid.  Let's
1423          * apply the new config directly.
1424          *
1425          * Restart the slices for both READ and WRITES. It might happen
1426          * that a group's limit are dropped suddenly and we don't want to
1427          * account recently dispatched IO with new low rate.
1428          */
1429         throtl_start_new_slice(tg, 0);
1430         throtl_start_new_slice(tg, 1);
1431
1432         if (tg->flags & THROTL_TG_PENDING) {
1433                 tg_update_disptime(tg);
1434                 throtl_schedule_next_dispatch(sq->parent_sq, true);
1435         }
1436 }
1437
1438 static ssize_t tg_set_conf(struct kernfs_open_file *of,
1439                            char *buf, size_t nbytes, loff_t off, bool is_u64)
1440 {
1441         struct blkcg *blkcg = css_to_blkcg(of_css(of));
1442         struct blkg_conf_ctx ctx;
1443         struct throtl_grp *tg;
1444         int ret;
1445         u64 v;
1446
1447         ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1448         if (ret)
1449                 return ret;
1450
1451         ret = -EINVAL;
1452         if (sscanf(ctx.body, "%llu", &v) != 1)
1453                 goto out_finish;
1454         if (!v)
1455                 v = U64_MAX;
1456
1457         tg = blkg_to_tg(ctx.blkg);
1458
1459         if (is_u64)
1460                 *(u64 *)((void *)tg + of_cft(of)->private) = v;
1461         else
1462                 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1463
1464         tg_conf_updated(tg, false);
1465         ret = 0;
1466 out_finish:
1467         blkg_conf_finish(&ctx);
1468         return ret ?: nbytes;
1469 }
1470
1471 static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1472                                char *buf, size_t nbytes, loff_t off)
1473 {
1474         return tg_set_conf(of, buf, nbytes, off, true);
1475 }
1476
1477 static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1478                                 char *buf, size_t nbytes, loff_t off)
1479 {
1480         return tg_set_conf(of, buf, nbytes, off, false);
1481 }
1482
1483 static struct cftype throtl_legacy_files[] = {
1484         {
1485                 .name = "throttle.read_bps_device",
1486                 .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
1487                 .seq_show = tg_print_conf_u64,
1488                 .write = tg_set_conf_u64,
1489         },
1490         {
1491                 .name = "throttle.write_bps_device",
1492                 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1493                 .seq_show = tg_print_conf_u64,
1494                 .write = tg_set_conf_u64,
1495         },
1496         {
1497                 .name = "throttle.read_iops_device",
1498                 .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
1499                 .seq_show = tg_print_conf_uint,
1500                 .write = tg_set_conf_uint,
1501         },
1502         {
1503                 .name = "throttle.write_iops_device",
1504                 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1505                 .seq_show = tg_print_conf_uint,
1506                 .write = tg_set_conf_uint,
1507         },
1508         {
1509                 .name = "throttle.io_service_bytes",
1510                 .private = (unsigned long)&blkcg_policy_throtl,
1511                 .seq_show = blkg_print_stat_bytes,
1512         },
1513         {
1514                 .name = "throttle.io_service_bytes_recursive",
1515                 .private = (unsigned long)&blkcg_policy_throtl,
1516                 .seq_show = blkg_print_stat_bytes_recursive,
1517         },
1518         {
1519                 .name = "throttle.io_serviced",
1520                 .private = (unsigned long)&blkcg_policy_throtl,
1521                 .seq_show = blkg_print_stat_ios,
1522         },
1523         {
1524                 .name = "throttle.io_serviced_recursive",
1525                 .private = (unsigned long)&blkcg_policy_throtl,
1526                 .seq_show = blkg_print_stat_ios_recursive,
1527         },
1528         { }     /* terminate */
1529 };
1530
1531 static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1532                          int off)
1533 {
1534         struct throtl_grp *tg = pd_to_tg(pd);
1535         const char *dname = blkg_dev_name(pd->blkg);
1536         char bufs[4][21] = { "max", "max", "max", "max" };
1537         u64 bps_dft;
1538         unsigned int iops_dft;
1539         char idle_time[26] = "";
1540         char latency_time[26] = "";
1541
1542         if (!dname)
1543                 return 0;
1544
1545         if (off == LIMIT_LOW) {
1546                 bps_dft = 0;
1547                 iops_dft = 0;
1548         } else {
1549                 bps_dft = U64_MAX;
1550                 iops_dft = UINT_MAX;
1551         }
1552
1553         if (tg->bps_conf[READ][off] == bps_dft &&
1554             tg->bps_conf[WRITE][off] == bps_dft &&
1555             tg->iops_conf[READ][off] == iops_dft &&
1556             tg->iops_conf[WRITE][off] == iops_dft &&
1557             (off != LIMIT_LOW ||
1558              (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
1559               tg->latency_target_conf == DFL_LATENCY_TARGET)))
1560                 return 0;
1561
1562         if (tg->bps_conf[READ][off] != U64_MAX)
1563                 snprintf(bufs[0], sizeof(bufs[0]), "%llu",
1564                         tg->bps_conf[READ][off]);
1565         if (tg->bps_conf[WRITE][off] != U64_MAX)
1566                 snprintf(bufs[1], sizeof(bufs[1]), "%llu",
1567                         tg->bps_conf[WRITE][off]);
1568         if (tg->iops_conf[READ][off] != UINT_MAX)
1569                 snprintf(bufs[2], sizeof(bufs[2]), "%u",
1570                         tg->iops_conf[READ][off]);
1571         if (tg->iops_conf[WRITE][off] != UINT_MAX)
1572                 snprintf(bufs[3], sizeof(bufs[3]), "%u",
1573                         tg->iops_conf[WRITE][off]);
1574         if (off == LIMIT_LOW) {
1575                 if (tg->idletime_threshold_conf == ULONG_MAX)
1576                         strcpy(idle_time, " idle=max");
1577                 else
1578                         snprintf(idle_time, sizeof(idle_time), " idle=%lu",
1579                                 tg->idletime_threshold_conf);
1580
1581                 if (tg->latency_target_conf == ULONG_MAX)
1582                         strcpy(latency_time, " latency=max");
1583                 else
1584                         snprintf(latency_time, sizeof(latency_time),
1585                                 " latency=%lu", tg->latency_target_conf);
1586         }
1587
1588         seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1589                    dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
1590                    latency_time);
1591         return 0;
1592 }
1593
1594 static int tg_print_limit(struct seq_file *sf, void *v)
1595 {
1596         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1597                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1598         return 0;
1599 }
1600
1601 static ssize_t tg_set_limit(struct kernfs_open_file *of,
1602                           char *buf, size_t nbytes, loff_t off)
1603 {
1604         struct blkcg *blkcg = css_to_blkcg(of_css(of));
1605         struct blkg_conf_ctx ctx;
1606         struct throtl_grp *tg;
1607         u64 v[4];
1608         unsigned long idle_time;
1609         unsigned long latency_time;
1610         int ret;
1611         int index = of_cft(of)->private;
1612
1613         ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1614         if (ret)
1615                 return ret;
1616
1617         tg = blkg_to_tg(ctx.blkg);
1618
1619         v[0] = tg->bps_conf[READ][index];
1620         v[1] = tg->bps_conf[WRITE][index];
1621         v[2] = tg->iops_conf[READ][index];
1622         v[3] = tg->iops_conf[WRITE][index];
1623
1624         idle_time = tg->idletime_threshold_conf;
1625         latency_time = tg->latency_target_conf;
1626         while (true) {
1627                 char tok[27];   /* wiops=18446744073709551616 */
1628                 char *p;
1629                 u64 val = U64_MAX;
1630                 int len;
1631
1632                 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1633                         break;
1634                 if (tok[0] == '\0')
1635                         break;
1636                 ctx.body += len;
1637
1638                 ret = -EINVAL;
1639                 p = tok;
1640                 strsep(&p, "=");
1641                 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1642                         goto out_finish;
1643
1644                 ret = -ERANGE;
1645                 if (!val)
1646                         goto out_finish;
1647
1648                 ret = -EINVAL;
1649                 if (!strcmp(tok, "rbps"))
1650                         v[0] = val;
1651                 else if (!strcmp(tok, "wbps"))
1652                         v[1] = val;
1653                 else if (!strcmp(tok, "riops"))
1654                         v[2] = min_t(u64, val, UINT_MAX);
1655                 else if (!strcmp(tok, "wiops"))
1656                         v[3] = min_t(u64, val, UINT_MAX);
1657                 else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
1658                         idle_time = val;
1659                 else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
1660                         latency_time = val;
1661                 else
1662                         goto out_finish;
1663         }
1664
1665         tg->bps_conf[READ][index] = v[0];
1666         tg->bps_conf[WRITE][index] = v[1];
1667         tg->iops_conf[READ][index] = v[2];
1668         tg->iops_conf[WRITE][index] = v[3];
1669
1670         if (index == LIMIT_MAX) {
1671                 tg->bps[READ][index] = v[0];
1672                 tg->bps[WRITE][index] = v[1];
1673                 tg->iops[READ][index] = v[2];
1674                 tg->iops[WRITE][index] = v[3];
1675         }
1676         tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1677                 tg->bps_conf[READ][LIMIT_MAX]);
1678         tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1679                 tg->bps_conf[WRITE][LIMIT_MAX]);
1680         tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1681                 tg->iops_conf[READ][LIMIT_MAX]);
1682         tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1683                 tg->iops_conf[WRITE][LIMIT_MAX]);
1684         tg->idletime_threshold_conf = idle_time;
1685         tg->latency_target_conf = latency_time;
1686
1687         /* force user to configure all settings for low limit  */
1688         if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1689               tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1690             tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
1691             tg->latency_target_conf == DFL_LATENCY_TARGET) {
1692                 tg->bps[READ][LIMIT_LOW] = 0;
1693                 tg->bps[WRITE][LIMIT_LOW] = 0;
1694                 tg->iops[READ][LIMIT_LOW] = 0;
1695                 tg->iops[WRITE][LIMIT_LOW] = 0;
1696                 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
1697                 tg->latency_target = DFL_LATENCY_TARGET;
1698         } else if (index == LIMIT_LOW) {
1699                 tg->idletime_threshold = tg->idletime_threshold_conf;
1700                 tg->latency_target = tg->latency_target_conf;
1701         }
1702
1703         blk_throtl_update_limit_valid(tg->td);
1704         if (tg->td->limit_valid[LIMIT_LOW]) {
1705                 if (index == LIMIT_LOW)
1706                         tg->td->limit_index = LIMIT_LOW;
1707         } else
1708                 tg->td->limit_index = LIMIT_MAX;
1709         tg_conf_updated(tg, index == LIMIT_LOW &&
1710                 tg->td->limit_valid[LIMIT_LOW]);
1711         ret = 0;
1712 out_finish:
1713         blkg_conf_finish(&ctx);
1714         return ret ?: nbytes;
1715 }
1716
1717 static struct cftype throtl_files[] = {
1718 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1719         {
1720                 .name = "low",
1721                 .flags = CFTYPE_NOT_ON_ROOT,
1722                 .seq_show = tg_print_limit,
1723                 .write = tg_set_limit,
1724                 .private = LIMIT_LOW,
1725         },
1726 #endif
1727         {
1728                 .name = "max",
1729                 .flags = CFTYPE_NOT_ON_ROOT,
1730                 .seq_show = tg_print_limit,
1731                 .write = tg_set_limit,
1732                 .private = LIMIT_MAX,
1733         },
1734         { }     /* terminate */
1735 };
1736
1737 static void throtl_shutdown_wq(struct request_queue *q)
1738 {
1739         struct throtl_data *td = q->td;
1740
1741         cancel_work_sync(&td->dispatch_work);
1742 }
1743
1744 static struct blkcg_policy blkcg_policy_throtl = {
1745         .dfl_cftypes            = throtl_files,
1746         .legacy_cftypes         = throtl_legacy_files,
1747
1748         .pd_alloc_fn            = throtl_pd_alloc,
1749         .pd_init_fn             = throtl_pd_init,
1750         .pd_online_fn           = throtl_pd_online,
1751         .pd_offline_fn          = throtl_pd_offline,
1752         .pd_free_fn             = throtl_pd_free,
1753 };
1754
1755 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1756 {
1757         unsigned long rtime = jiffies, wtime = jiffies;
1758
1759         if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1760                 rtime = tg->last_low_overflow_time[READ];
1761         if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1762                 wtime = tg->last_low_overflow_time[WRITE];
1763         return min(rtime, wtime);
1764 }
1765
1766 /* tg should not be an intermediate node */
1767 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1768 {
1769         struct throtl_service_queue *parent_sq;
1770         struct throtl_grp *parent = tg;
1771         unsigned long ret = __tg_last_low_overflow_time(tg);
1772
1773         while (true) {
1774                 parent_sq = parent->service_queue.parent_sq;
1775                 parent = sq_to_tg(parent_sq);
1776                 if (!parent)
1777                         break;
1778
1779                 /*
1780                  * The parent doesn't have low limit, it always reaches low
1781                  * limit. Its overflow time is useless for children
1782                  */
1783                 if (!parent->bps[READ][LIMIT_LOW] &&
1784                     !parent->iops[READ][LIMIT_LOW] &&
1785                     !parent->bps[WRITE][LIMIT_LOW] &&
1786                     !parent->iops[WRITE][LIMIT_LOW])
1787                         continue;
1788                 if (time_after(__tg_last_low_overflow_time(parent), ret))
1789                         ret = __tg_last_low_overflow_time(parent);
1790         }
1791         return ret;
1792 }
1793
1794 static bool throtl_tg_is_idle(struct throtl_grp *tg)
1795 {
1796         /*
1797          * cgroup is idle if:
1798          * - single idle is too long, longer than a fixed value (in case user
1799          *   configure a too big threshold) or 4 times of idletime threshold
1800          * - average think time is more than threshold
1801          * - IO latency is largely below threshold
1802          */
1803         unsigned long time;
1804         bool ret;
1805
1806         time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
1807         ret = tg->latency_target == DFL_LATENCY_TARGET ||
1808               tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
1809               (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1810               tg->avg_idletime > tg->idletime_threshold ||
1811               (tg->latency_target && tg->bio_cnt &&
1812                 tg->bad_bio_cnt * 5 < tg->bio_cnt);
1813         throtl_log(&tg->service_queue,
1814                 "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1815                 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1816                 tg->bio_cnt, ret, tg->td->scale);
1817         return ret;
1818 }
1819
1820 static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1821 {
1822         struct throtl_service_queue *sq = &tg->service_queue;
1823         bool read_limit, write_limit;
1824
1825         /*
1826          * if cgroup reaches low limit (if low limit is 0, the cgroup always
1827          * reaches), it's ok to upgrade to next limit
1828          */
1829         read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
1830         write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
1831         if (!read_limit && !write_limit)
1832                 return true;
1833         if (read_limit && sq->nr_queued[READ] &&
1834             (!write_limit || sq->nr_queued[WRITE]))
1835                 return true;
1836         if (write_limit && sq->nr_queued[WRITE] &&
1837             (!read_limit || sq->nr_queued[READ]))
1838                 return true;
1839
1840         if (time_after_eq(jiffies,
1841                 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
1842             throtl_tg_is_idle(tg))
1843                 return true;
1844         return false;
1845 }
1846
1847 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1848 {
1849         while (true) {
1850                 if (throtl_tg_can_upgrade(tg))
1851                         return true;
1852                 tg = sq_to_tg(tg->service_queue.parent_sq);
1853                 if (!tg || !tg_to_blkg(tg)->parent)
1854                         return false;
1855         }
1856         return false;
1857 }
1858
1859 static bool throtl_can_upgrade(struct throtl_data *td,
1860         struct throtl_grp *this_tg)
1861 {
1862         struct cgroup_subsys_state *pos_css;
1863         struct blkcg_gq *blkg;
1864
1865         if (td->limit_index != LIMIT_LOW)
1866                 return false;
1867
1868         if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
1869                 return false;
1870
1871         rcu_read_lock();
1872         blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1873                 struct throtl_grp *tg = blkg_to_tg(blkg);
1874
1875                 if (tg == this_tg)
1876                         continue;
1877                 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1878                         continue;
1879                 if (!throtl_hierarchy_can_upgrade(tg)) {
1880                         rcu_read_unlock();
1881                         return false;
1882                 }
1883         }
1884         rcu_read_unlock();
1885         return true;
1886 }
1887
1888 static void throtl_upgrade_check(struct throtl_grp *tg)
1889 {
1890         unsigned long now = jiffies;
1891
1892         if (tg->td->limit_index != LIMIT_LOW)
1893                 return;
1894
1895         if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1896                 return;
1897
1898         tg->last_check_time = now;
1899
1900         if (!time_after_eq(now,
1901              __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
1902                 return;
1903
1904         if (throtl_can_upgrade(tg->td, NULL))
1905                 throtl_upgrade_state(tg->td);
1906 }
1907
1908 static void throtl_upgrade_state(struct throtl_data *td)
1909 {
1910         struct cgroup_subsys_state *pos_css;
1911         struct blkcg_gq *blkg;
1912
1913         throtl_log(&td->service_queue, "upgrade to max");
1914         td->limit_index = LIMIT_MAX;
1915         td->low_upgrade_time = jiffies;
1916         td->scale = 0;
1917         rcu_read_lock();
1918         blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1919                 struct throtl_grp *tg = blkg_to_tg(blkg);
1920                 struct throtl_service_queue *sq = &tg->service_queue;
1921
1922                 tg->disptime = jiffies - 1;
1923                 throtl_select_dispatch(sq);
1924                 throtl_schedule_next_dispatch(sq, true);
1925         }
1926         rcu_read_unlock();
1927         throtl_select_dispatch(&td->service_queue);
1928         throtl_schedule_next_dispatch(&td->service_queue, true);
1929         queue_work(kthrotld_workqueue, &td->dispatch_work);
1930 }
1931
1932 static void throtl_downgrade_state(struct throtl_data *td, int new)
1933 {
1934         td->scale /= 2;
1935
1936         throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
1937         if (td->scale) {
1938                 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
1939                 return;
1940         }
1941
1942         td->limit_index = new;
1943         td->low_downgrade_time = jiffies;
1944 }
1945
1946 static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
1947 {
1948         struct throtl_data *td = tg->td;
1949         unsigned long now = jiffies;
1950
1951         /*
1952          * If cgroup is below low limit, consider downgrade and throttle other
1953          * cgroups
1954          */
1955         if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
1956             time_after_eq(now, tg_last_low_overflow_time(tg) +
1957                                         td->throtl_slice) &&
1958             (!throtl_tg_is_idle(tg) ||
1959              !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
1960                 return true;
1961         return false;
1962 }
1963
1964 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
1965 {
1966         while (true) {
1967                 if (!throtl_tg_can_downgrade(tg))
1968                         return false;
1969                 tg = sq_to_tg(tg->service_queue.parent_sq);
1970                 if (!tg || !tg_to_blkg(tg)->parent)
1971                         break;
1972         }
1973         return true;
1974 }
1975
1976 static void throtl_downgrade_check(struct throtl_grp *tg)
1977 {
1978         uint64_t bps;
1979         unsigned int iops;
1980         unsigned long elapsed_time;
1981         unsigned long now = jiffies;
1982
1983         if (tg->td->limit_index != LIMIT_MAX ||
1984             !tg->td->limit_valid[LIMIT_LOW])
1985                 return;
1986         if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1987                 return;
1988         if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1989                 return;
1990
1991         elapsed_time = now - tg->last_check_time;
1992         tg->last_check_time = now;
1993
1994         if (time_before(now, tg_last_low_overflow_time(tg) +
1995                         tg->td->throtl_slice))
1996                 return;
1997
1998         if (tg->bps[READ][LIMIT_LOW]) {
1999                 bps = tg->last_bytes_disp[READ] * HZ;
2000                 do_div(bps, elapsed_time);
2001                 if (bps >= tg->bps[READ][LIMIT_LOW])
2002                         tg->last_low_overflow_time[READ] = now;
2003         }
2004
2005         if (tg->bps[WRITE][LIMIT_LOW]) {
2006                 bps = tg->last_bytes_disp[WRITE] * HZ;
2007                 do_div(bps, elapsed_time);
2008                 if (bps >= tg->bps[WRITE][LIMIT_LOW])
2009                         tg->last_low_overflow_time[WRITE] = now;
2010         }
2011
2012         if (tg->iops[READ][LIMIT_LOW]) {
2013                 iops = tg->last_io_disp[READ] * HZ / elapsed_time;
2014                 if (iops >= tg->iops[READ][LIMIT_LOW])
2015                         tg->last_low_overflow_time[READ] = now;
2016         }
2017
2018         if (tg->iops[WRITE][LIMIT_LOW]) {
2019                 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
2020                 if (iops >= tg->iops[WRITE][LIMIT_LOW])
2021                         tg->last_low_overflow_time[WRITE] = now;
2022         }
2023
2024         /*
2025          * If cgroup is below low limit, consider downgrade and throttle other
2026          * cgroups
2027          */
2028         if (throtl_hierarchy_can_downgrade(tg))
2029                 throtl_downgrade_state(tg->td, LIMIT_LOW);
2030
2031         tg->last_bytes_disp[READ] = 0;
2032         tg->last_bytes_disp[WRITE] = 0;
2033         tg->last_io_disp[READ] = 0;
2034         tg->last_io_disp[WRITE] = 0;
2035 }
2036
2037 static void blk_throtl_update_idletime(struct throtl_grp *tg)
2038 {
2039         unsigned long now = ktime_get_ns() >> 10;
2040         unsigned long last_finish_time = tg->last_finish_time;
2041
2042         if (now <= last_finish_time || last_finish_time == 0 ||
2043             last_finish_time == tg->checked_last_finish_time)
2044                 return;
2045
2046         tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
2047         tg->checked_last_finish_time = last_finish_time;
2048 }
2049
2050 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2051 static void throtl_update_latency_buckets(struct throtl_data *td)
2052 {
2053         struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE];
2054         int i, cpu, rw;
2055         unsigned long last_latency[2] = { 0 };
2056         unsigned long latency[2];
2057
2058         if (!blk_queue_nonrot(td->queue))
2059                 return;
2060         if (time_before(jiffies, td->last_calculate_time + HZ))
2061                 return;
2062         td->last_calculate_time = jiffies;
2063
2064         memset(avg_latency, 0, sizeof(avg_latency));
2065         for (rw = READ; rw <= WRITE; rw++) {
2066                 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2067                         struct latency_bucket *tmp = &td->tmp_buckets[rw][i];
2068
2069                         for_each_possible_cpu(cpu) {
2070                                 struct latency_bucket *bucket;
2071
2072                                 /* this isn't race free, but ok in practice */
2073                                 bucket = per_cpu_ptr(td->latency_buckets[rw],
2074                                         cpu);
2075                                 tmp->total_latency += bucket[i].total_latency;
2076                                 tmp->samples += bucket[i].samples;
2077                                 bucket[i].total_latency = 0;
2078                                 bucket[i].samples = 0;
2079                         }
2080
2081                         if (tmp->samples >= 32) {
2082                                 int samples = tmp->samples;
2083
2084                                 latency[rw] = tmp->total_latency;
2085
2086                                 tmp->total_latency = 0;
2087                                 tmp->samples = 0;
2088                                 latency[rw] /= samples;
2089                                 if (latency[rw] == 0)
2090                                         continue;
2091                                 avg_latency[rw][i].latency = latency[rw];
2092                         }
2093                 }
2094         }
2095
2096         for (rw = READ; rw <= WRITE; rw++) {
2097                 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2098                         if (!avg_latency[rw][i].latency) {
2099                                 if (td->avg_buckets[rw][i].latency < last_latency[rw])
2100                                         td->avg_buckets[rw][i].latency =
2101                                                 last_latency[rw];
2102                                 continue;
2103                         }
2104
2105                         if (!td->avg_buckets[rw][i].valid)
2106                                 latency[rw] = avg_latency[rw][i].latency;
2107                         else
2108                                 latency[rw] = (td->avg_buckets[rw][i].latency * 7 +
2109                                         avg_latency[rw][i].latency) >> 3;
2110
2111                         td->avg_buckets[rw][i].latency = max(latency[rw],
2112                                 last_latency[rw]);
2113                         td->avg_buckets[rw][i].valid = true;
2114                         last_latency[rw] = td->avg_buckets[rw][i].latency;
2115                 }
2116         }
2117
2118         for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2119                 throtl_log(&td->service_queue,
2120                         "Latency bucket %d: read latency=%ld, read valid=%d, "
2121                         "write latency=%ld, write valid=%d", i,
2122                         td->avg_buckets[READ][i].latency,
2123                         td->avg_buckets[READ][i].valid,
2124                         td->avg_buckets[WRITE][i].latency,
2125                         td->avg_buckets[WRITE][i].valid);
2126 }
2127 #else
2128 static inline void throtl_update_latency_buckets(struct throtl_data *td)
2129 {
2130 }
2131 #endif
2132
2133 static void blk_throtl_assoc_bio(struct throtl_grp *tg, struct bio *bio)
2134 {
2135 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2136         if (bio->bi_css) {
2137                 if (bio->bi_cg_private)
2138                         blkg_put(tg_to_blkg(bio->bi_cg_private));
2139                 bio->bi_cg_private = tg;
2140                 blkg_get(tg_to_blkg(tg));
2141         }
2142         blk_stat_set_issue(&bio->bi_issue_stat, bio_sectors(bio));
2143 #endif
2144 }
2145
2146 bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
2147                     struct bio *bio)
2148 {
2149         struct throtl_qnode *qn = NULL;
2150         struct throtl_grp *tg = blkg_to_tg(blkg ?: q->root_blkg);
2151         struct throtl_service_queue *sq;
2152         bool rw = bio_data_dir(bio);
2153         bool throttled = false;
2154         struct throtl_data *td = tg->td;
2155
2156         WARN_ON_ONCE(!rcu_read_lock_held());
2157
2158         /* see throtl_charge_bio() */
2159         if (bio_flagged(bio, BIO_THROTTLED) || !tg->has_rules[rw])
2160                 goto out;
2161
2162         spin_lock_irq(q->queue_lock);
2163
2164         throtl_update_latency_buckets(td);
2165
2166         if (unlikely(blk_queue_bypass(q)))
2167                 goto out_unlock;
2168
2169         blk_throtl_assoc_bio(tg, bio);
2170         blk_throtl_update_idletime(tg);
2171
2172         sq = &tg->service_queue;
2173
2174 again:
2175         while (true) {
2176                 if (tg->last_low_overflow_time[rw] == 0)
2177                         tg->last_low_overflow_time[rw] = jiffies;
2178                 throtl_downgrade_check(tg);
2179                 throtl_upgrade_check(tg);
2180                 /* throtl is FIFO - if bios are already queued, should queue */
2181                 if (sq->nr_queued[rw])
2182                         break;
2183
2184                 /* if above limits, break to queue */
2185                 if (!tg_may_dispatch(tg, bio, NULL)) {
2186                         tg->last_low_overflow_time[rw] = jiffies;
2187                         if (throtl_can_upgrade(td, tg)) {
2188                                 throtl_upgrade_state(td);
2189                                 goto again;
2190                         }
2191                         break;
2192                 }
2193
2194                 /* within limits, let's charge and dispatch directly */
2195                 throtl_charge_bio(tg, bio);
2196
2197                 /*
2198                  * We need to trim slice even when bios are not being queued
2199                  * otherwise it might happen that a bio is not queued for
2200                  * a long time and slice keeps on extending and trim is not
2201                  * called for a long time. Now if limits are reduced suddenly
2202                  * we take into account all the IO dispatched so far at new
2203                  * low rate and * newly queued IO gets a really long dispatch
2204                  * time.
2205                  *
2206                  * So keep on trimming slice even if bio is not queued.
2207                  */
2208                 throtl_trim_slice(tg, rw);
2209
2210                 /*
2211                  * @bio passed through this layer without being throttled.
2212                  * Climb up the ladder.  If we''re already at the top, it
2213                  * can be executed directly.
2214                  */
2215                 qn = &tg->qnode_on_parent[rw];
2216                 sq = sq->parent_sq;
2217                 tg = sq_to_tg(sq);
2218                 if (!tg)
2219                         goto out_unlock;
2220         }
2221
2222         /* out-of-limit, queue to @tg */
2223         throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2224                    rw == READ ? 'R' : 'W',
2225                    tg->bytes_disp[rw], bio->bi_iter.bi_size,
2226                    tg_bps_limit(tg, rw),
2227                    tg->io_disp[rw], tg_iops_limit(tg, rw),
2228                    sq->nr_queued[READ], sq->nr_queued[WRITE]);
2229
2230         tg->last_low_overflow_time[rw] = jiffies;
2231
2232         td->nr_queued[rw]++;
2233         throtl_add_bio_tg(bio, qn, tg);
2234         throttled = true;
2235
2236         /*
2237          * Update @tg's dispatch time and force schedule dispatch if @tg
2238          * was empty before @bio.  The forced scheduling isn't likely to
2239          * cause undue delay as @bio is likely to be dispatched directly if
2240          * its @tg's disptime is not in the future.
2241          */
2242         if (tg->flags & THROTL_TG_WAS_EMPTY) {
2243                 tg_update_disptime(tg);
2244                 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
2245         }
2246
2247 out_unlock:
2248         spin_unlock_irq(q->queue_lock);
2249 out:
2250         bio_set_flag(bio, BIO_THROTTLED);
2251
2252 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2253         if (throttled || !td->track_bio_latency)
2254                 bio->bi_issue_stat.stat |= SKIP_LATENCY;
2255 #endif
2256         return throttled;
2257 }
2258
2259 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2260 static void throtl_track_latency(struct throtl_data *td, sector_t size,
2261         int op, unsigned long time)
2262 {
2263         struct latency_bucket *latency;
2264         int index;
2265
2266         if (!td || td->limit_index != LIMIT_LOW ||
2267             !(op == REQ_OP_READ || op == REQ_OP_WRITE) ||
2268             !blk_queue_nonrot(td->queue))
2269                 return;
2270
2271         index = request_bucket_index(size);
2272
2273         latency = get_cpu_ptr(td->latency_buckets[op]);
2274         latency[index].total_latency += time;
2275         latency[index].samples++;
2276         put_cpu_ptr(td->latency_buckets[op]);
2277 }
2278
2279 void blk_throtl_stat_add(struct request *rq, u64 time_ns)
2280 {
2281         struct request_queue *q = rq->q;
2282         struct throtl_data *td = q->td;
2283
2284         throtl_track_latency(td, blk_stat_size(&rq->issue_stat),
2285                 req_op(rq), time_ns >> 10);
2286 }
2287
2288 void blk_throtl_bio_endio(struct bio *bio)
2289 {
2290         struct throtl_grp *tg;
2291         u64 finish_time_ns;
2292         unsigned long finish_time;
2293         unsigned long start_time;
2294         unsigned long lat;
2295         int rw = bio_data_dir(bio);
2296
2297         tg = bio->bi_cg_private;
2298         if (!tg)
2299                 return;
2300         bio->bi_cg_private = NULL;
2301
2302         finish_time_ns = ktime_get_ns();
2303         tg->last_finish_time = finish_time_ns >> 10;
2304
2305         start_time = blk_stat_time(&bio->bi_issue_stat) >> 10;
2306         finish_time = __blk_stat_time(finish_time_ns) >> 10;
2307         if (!start_time || finish_time <= start_time) {
2308                 blkg_put(tg_to_blkg(tg));
2309                 return;
2310         }
2311
2312         lat = finish_time - start_time;
2313         /* this is only for bio based driver */
2314         if (!(bio->bi_issue_stat.stat & SKIP_LATENCY))
2315                 throtl_track_latency(tg->td, blk_stat_size(&bio->bi_issue_stat),
2316                         bio_op(bio), lat);
2317
2318         if (tg->latency_target && lat >= tg->td->filtered_latency) {
2319                 int bucket;
2320                 unsigned int threshold;
2321
2322                 bucket = request_bucket_index(
2323                         blk_stat_size(&bio->bi_issue_stat));
2324                 threshold = tg->td->avg_buckets[rw][bucket].latency +
2325                         tg->latency_target;
2326                 if (lat > threshold)
2327                         tg->bad_bio_cnt++;
2328                 /*
2329                  * Not race free, could get wrong count, which means cgroups
2330                  * will be throttled
2331                  */
2332                 tg->bio_cnt++;
2333         }
2334
2335         if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
2336                 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
2337                 tg->bio_cnt /= 2;
2338                 tg->bad_bio_cnt /= 2;
2339         }
2340
2341         blkg_put(tg_to_blkg(tg));
2342 }
2343 #endif
2344
2345 /*
2346  * Dispatch all bios from all children tg's queued on @parent_sq.  On
2347  * return, @parent_sq is guaranteed to not have any active children tg's
2348  * and all bios from previously active tg's are on @parent_sq->bio_lists[].
2349  */
2350 static void tg_drain_bios(struct throtl_service_queue *parent_sq)
2351 {
2352         struct throtl_grp *tg;
2353
2354         while ((tg = throtl_rb_first(parent_sq))) {
2355                 struct throtl_service_queue *sq = &tg->service_queue;
2356                 struct bio *bio;
2357
2358                 throtl_dequeue_tg(tg);
2359
2360                 while ((bio = throtl_peek_queued(&sq->queued[READ])))
2361                         tg_dispatch_one_bio(tg, bio_data_dir(bio));
2362                 while ((bio = throtl_peek_queued(&sq->queued[WRITE])))
2363                         tg_dispatch_one_bio(tg, bio_data_dir(bio));
2364         }
2365 }
2366
2367 /**
2368  * blk_throtl_drain - drain throttled bios
2369  * @q: request_queue to drain throttled bios for
2370  *
2371  * Dispatch all currently throttled bios on @q through ->make_request_fn().
2372  */
2373 void blk_throtl_drain(struct request_queue *q)
2374         __releases(q->queue_lock) __acquires(q->queue_lock)
2375 {
2376         struct throtl_data *td = q->td;
2377         struct blkcg_gq *blkg;
2378         struct cgroup_subsys_state *pos_css;
2379         struct bio *bio;
2380         int rw;
2381
2382         queue_lockdep_assert_held(q);
2383         rcu_read_lock();
2384
2385         /*
2386          * Drain each tg while doing post-order walk on the blkg tree, so
2387          * that all bios are propagated to td->service_queue.  It'd be
2388          * better to walk service_queue tree directly but blkg walk is
2389          * easier.
2390          */
2391         blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
2392                 tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
2393
2394         /* finally, transfer bios from top-level tg's into the td */
2395         tg_drain_bios(&td->service_queue);
2396
2397         rcu_read_unlock();
2398         spin_unlock_irq(q->queue_lock);
2399
2400         /* all bios now should be in td->service_queue, issue them */
2401         for (rw = READ; rw <= WRITE; rw++)
2402                 while ((bio = throtl_pop_queued(&td->service_queue.queued[rw],
2403                                                 NULL)))
2404                         generic_make_request(bio);
2405
2406         spin_lock_irq(q->queue_lock);
2407 }
2408
2409 int blk_throtl_init(struct request_queue *q)
2410 {
2411         struct throtl_data *td;
2412         int ret;
2413
2414         td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
2415         if (!td)
2416                 return -ENOMEM;
2417         td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
2418                 LATENCY_BUCKET_SIZE, __alignof__(u64));
2419         if (!td->latency_buckets[READ]) {
2420                 kfree(td);
2421                 return -ENOMEM;
2422         }
2423         td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) *
2424                 LATENCY_BUCKET_SIZE, __alignof__(u64));
2425         if (!td->latency_buckets[WRITE]) {
2426                 free_percpu(td->latency_buckets[READ]);
2427                 kfree(td);
2428                 return -ENOMEM;
2429         }
2430
2431         INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
2432         throtl_service_queue_init(&td->service_queue);
2433
2434         q->td = td;
2435         td->queue = q;
2436
2437         td->limit_valid[LIMIT_MAX] = true;
2438         td->limit_index = LIMIT_MAX;
2439         td->low_upgrade_time = jiffies;
2440         td->low_downgrade_time = jiffies;
2441
2442         /* activate policy */
2443         ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
2444         if (ret) {
2445                 free_percpu(td->latency_buckets[READ]);
2446                 free_percpu(td->latency_buckets[WRITE]);
2447                 kfree(td);
2448         }
2449         return ret;
2450 }
2451
2452 void blk_throtl_exit(struct request_queue *q)
2453 {
2454         BUG_ON(!q->td);
2455         throtl_shutdown_wq(q);
2456         blkcg_deactivate_policy(q, &blkcg_policy_throtl);
2457         free_percpu(q->td->latency_buckets[READ]);
2458         free_percpu(q->td->latency_buckets[WRITE]);
2459         kfree(q->td);
2460 }
2461
2462 void blk_throtl_register_queue(struct request_queue *q)
2463 {
2464         struct throtl_data *td;
2465         int i;
2466
2467         td = q->td;
2468         BUG_ON(!td);
2469
2470         if (blk_queue_nonrot(q)) {
2471                 td->throtl_slice = DFL_THROTL_SLICE_SSD;
2472                 td->filtered_latency = LATENCY_FILTERED_SSD;
2473         } else {
2474                 td->throtl_slice = DFL_THROTL_SLICE_HD;
2475                 td->filtered_latency = LATENCY_FILTERED_HD;
2476                 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2477                         td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY;
2478                         td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY;
2479                 }
2480         }
2481 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2482         /* if no low limit, use previous default */
2483         td->throtl_slice = DFL_THROTL_SLICE_HD;
2484 #endif
2485
2486         td->track_bio_latency = !queue_is_rq_based(q);
2487         if (!td->track_bio_latency)
2488                 blk_stat_enable_accounting(q);
2489 }
2490
2491 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2492 ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
2493 {
2494         if (!q->td)
2495                 return -EINVAL;
2496         return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
2497 }
2498
2499 ssize_t blk_throtl_sample_time_store(struct request_queue *q,
2500         const char *page, size_t count)
2501 {
2502         unsigned long v;
2503         unsigned long t;
2504
2505         if (!q->td)
2506                 return -EINVAL;
2507         if (kstrtoul(page, 10, &v))
2508                 return -EINVAL;
2509         t = msecs_to_jiffies(v);
2510         if (t == 0 || t > MAX_THROTL_SLICE)
2511                 return -EINVAL;
2512         q->td->throtl_slice = t;
2513         return count;
2514 }
2515 #endif
2516
2517 static int __init throtl_init(void)
2518 {
2519         kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
2520         if (!kthrotld_workqueue)
2521                 panic("Failed to create kthrotld\n");
2522
2523         return blkcg_policy_register(&blkcg_policy_throtl);
2524 }
2525
2526 module_init(throtl_init);