Merge drm/drm-next into drm-intel-next-queued
[linux-2.6-microblaze.git] / block / blk-throttle.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Interface for controlling IO bandwidth on a request queue
4  *
5  * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com>
6  */
7
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/bio.h>
12 #include <linux/blktrace_api.h>
13 #include <linux/blk-cgroup.h>
14 #include "blk.h"
15 #include "blk-cgroup-rwstat.h"
16
17 /* Max dispatch from a group in 1 round */
18 #define THROTL_GRP_QUANTUM 8
19
20 /* Total max dispatch from all groups in one round */
21 #define THROTL_QUANTUM 32
22
23 /* Throttling is performed over a slice and after that slice is renewed */
24 #define DFL_THROTL_SLICE_HD (HZ / 10)
25 #define DFL_THROTL_SLICE_SSD (HZ / 50)
26 #define MAX_THROTL_SLICE (HZ)
27 #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */
28 #define MIN_THROTL_BPS (320 * 1024)
29 #define MIN_THROTL_IOPS (10)
30 #define DFL_LATENCY_TARGET (-1L)
31 #define DFL_IDLE_THRESHOLD (0)
32 #define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */
33 #define LATENCY_FILTERED_SSD (0)
34 /*
35  * For HD, very small latency comes from sequential IO. Such IO is helpless to
36  * help determine if its IO is impacted by others, hence we ignore the IO
37  */
38 #define LATENCY_FILTERED_HD (1000L) /* 1ms */
39
40 static struct blkcg_policy blkcg_policy_throtl;
41
42 /* A workqueue to queue throttle related work */
43 static struct workqueue_struct *kthrotld_workqueue;
44
45 /*
46  * To implement hierarchical throttling, throtl_grps form a tree and bios
47  * are dispatched upwards level by level until they reach the top and get
48  * issued.  When dispatching bios from the children and local group at each
49  * level, if the bios are dispatched into a single bio_list, there's a risk
50  * of a local or child group which can queue many bios at once filling up
51  * the list starving others.
52  *
53  * To avoid such starvation, dispatched bios are queued separately
54  * according to where they came from.  When they are again dispatched to
55  * the parent, they're popped in round-robin order so that no single source
56  * hogs the dispatch window.
57  *
58  * throtl_qnode is used to keep the queued bios separated by their sources.
59  * Bios are queued to throtl_qnode which in turn is queued to
60  * throtl_service_queue and then dispatched in round-robin order.
61  *
62  * It's also used to track the reference counts on blkg's.  A qnode always
63  * belongs to a throtl_grp and gets queued on itself or the parent, so
64  * incrementing the reference of the associated throtl_grp when a qnode is
65  * queued and decrementing when dequeued is enough to keep the whole blkg
66  * tree pinned while bios are in flight.
67  */
68 struct throtl_qnode {
69         struct list_head        node;           /* service_queue->queued[] */
70         struct bio_list         bios;           /* queued bios */
71         struct throtl_grp       *tg;            /* tg this qnode belongs to */
72 };
73
74 struct throtl_service_queue {
75         struct throtl_service_queue *parent_sq; /* the parent service_queue */
76
77         /*
78          * Bios queued directly to this service_queue or dispatched from
79          * children throtl_grp's.
80          */
81         struct list_head        queued[2];      /* throtl_qnode [READ/WRITE] */
82         unsigned int            nr_queued[2];   /* number of queued bios */
83
84         /*
85          * RB tree of active children throtl_grp's, which are sorted by
86          * their ->disptime.
87          */
88         struct rb_root_cached   pending_tree;   /* RB tree of active tgs */
89         unsigned int            nr_pending;     /* # queued in the tree */
90         unsigned long           first_pending_disptime; /* disptime of the first tg */
91         struct timer_list       pending_timer;  /* fires on first_pending_disptime */
92 };
93
94 enum tg_state_flags {
95         THROTL_TG_PENDING       = 1 << 0,       /* on parent's pending tree */
96         THROTL_TG_WAS_EMPTY     = 1 << 1,       /* bio_lists[] became non-empty */
97 };
98
99 #define rb_entry_tg(node)       rb_entry((node), struct throtl_grp, rb_node)
100
101 enum {
102         LIMIT_LOW,
103         LIMIT_MAX,
104         LIMIT_CNT,
105 };
106
107 struct throtl_grp {
108         /* must be the first member */
109         struct blkg_policy_data pd;
110
111         /* active throtl group service_queue member */
112         struct rb_node rb_node;
113
114         /* throtl_data this group belongs to */
115         struct throtl_data *td;
116
117         /* this group's service queue */
118         struct throtl_service_queue service_queue;
119
120         /*
121          * qnode_on_self is used when bios are directly queued to this
122          * throtl_grp so that local bios compete fairly with bios
123          * dispatched from children.  qnode_on_parent is used when bios are
124          * dispatched from this throtl_grp into its parent and will compete
125          * with the sibling qnode_on_parents and the parent's
126          * qnode_on_self.
127          */
128         struct throtl_qnode qnode_on_self[2];
129         struct throtl_qnode qnode_on_parent[2];
130
131         /*
132          * Dispatch time in jiffies. This is the estimated time when group
133          * will unthrottle and is ready to dispatch more bio. It is used as
134          * key to sort active groups in service tree.
135          */
136         unsigned long disptime;
137
138         unsigned int flags;
139
140         /* are there any throtl rules between this group and td? */
141         bool has_rules[2];
142
143         /* internally used bytes per second rate limits */
144         uint64_t bps[2][LIMIT_CNT];
145         /* user configured bps limits */
146         uint64_t bps_conf[2][LIMIT_CNT];
147
148         /* internally used IOPS limits */
149         unsigned int iops[2][LIMIT_CNT];
150         /* user configured IOPS limits */
151         unsigned int iops_conf[2][LIMIT_CNT];
152
153         /* Number of bytes dispatched in current slice */
154         uint64_t bytes_disp[2];
155         /* Number of bio's dispatched in current slice */
156         unsigned int io_disp[2];
157
158         unsigned long last_low_overflow_time[2];
159
160         uint64_t last_bytes_disp[2];
161         unsigned int last_io_disp[2];
162
163         unsigned long last_check_time;
164
165         unsigned long latency_target; /* us */
166         unsigned long latency_target_conf; /* us */
167         /* When did we start a new slice */
168         unsigned long slice_start[2];
169         unsigned long slice_end[2];
170
171         unsigned long last_finish_time; /* ns / 1024 */
172         unsigned long checked_last_finish_time; /* ns / 1024 */
173         unsigned long avg_idletime; /* ns / 1024 */
174         unsigned long idletime_threshold; /* us */
175         unsigned long idletime_threshold_conf; /* us */
176
177         unsigned int bio_cnt; /* total bios */
178         unsigned int bad_bio_cnt; /* bios exceeding latency threshold */
179         unsigned long bio_cnt_reset_time;
180
181         struct blkg_rwstat stat_bytes;
182         struct blkg_rwstat stat_ios;
183 };
184
185 /* We measure latency for request size from <= 4k to >= 1M */
186 #define LATENCY_BUCKET_SIZE 9
187
188 struct latency_bucket {
189         unsigned long total_latency; /* ns / 1024 */
190         int samples;
191 };
192
193 struct avg_latency_bucket {
194         unsigned long latency; /* ns / 1024 */
195         bool valid;
196 };
197
198 struct throtl_data
199 {
200         /* service tree for active throtl groups */
201         struct throtl_service_queue service_queue;
202
203         struct request_queue *queue;
204
205         /* Total Number of queued bios on READ and WRITE lists */
206         unsigned int nr_queued[2];
207
208         unsigned int throtl_slice;
209
210         /* Work for dispatching throttled bios */
211         struct work_struct dispatch_work;
212         unsigned int limit_index;
213         bool limit_valid[LIMIT_CNT];
214
215         unsigned long low_upgrade_time;
216         unsigned long low_downgrade_time;
217
218         unsigned int scale;
219
220         struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE];
221         struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE];
222         struct latency_bucket __percpu *latency_buckets[2];
223         unsigned long last_calculate_time;
224         unsigned long filtered_latency;
225
226         bool track_bio_latency;
227 };
228
229 static void throtl_pending_timer_fn(struct timer_list *t);
230
231 static inline struct throtl_grp *pd_to_tg(struct blkg_policy_data *pd)
232 {
233         return pd ? container_of(pd, struct throtl_grp, pd) : NULL;
234 }
235
236 static inline struct throtl_grp *blkg_to_tg(struct blkcg_gq *blkg)
237 {
238         return pd_to_tg(blkg_to_pd(blkg, &blkcg_policy_throtl));
239 }
240
241 static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg)
242 {
243         return pd_to_blkg(&tg->pd);
244 }
245
246 /**
247  * sq_to_tg - return the throl_grp the specified service queue belongs to
248  * @sq: the throtl_service_queue of interest
249  *
250  * Return the throtl_grp @sq belongs to.  If @sq is the top-level one
251  * embedded in throtl_data, %NULL is returned.
252  */
253 static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq)
254 {
255         if (sq && sq->parent_sq)
256                 return container_of(sq, struct throtl_grp, service_queue);
257         else
258                 return NULL;
259 }
260
261 /**
262  * sq_to_td - return throtl_data the specified service queue belongs to
263  * @sq: the throtl_service_queue of interest
264  *
265  * A service_queue can be embedded in either a throtl_grp or throtl_data.
266  * Determine the associated throtl_data accordingly and return it.
267  */
268 static struct throtl_data *sq_to_td(struct throtl_service_queue *sq)
269 {
270         struct throtl_grp *tg = sq_to_tg(sq);
271
272         if (tg)
273                 return tg->td;
274         else
275                 return container_of(sq, struct throtl_data, service_queue);
276 }
277
278 /*
279  * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to
280  * make the IO dispatch more smooth.
281  * Scale up: linearly scale up according to lapsed time since upgrade. For
282  *           every throtl_slice, the limit scales up 1/2 .low limit till the
283  *           limit hits .max limit
284  * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit
285  */
286 static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td)
287 {
288         /* arbitrary value to avoid too big scale */
289         if (td->scale < 4096 && time_after_eq(jiffies,
290             td->low_upgrade_time + td->scale * td->throtl_slice))
291                 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice;
292
293         return low + (low >> 1) * td->scale;
294 }
295
296 static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw)
297 {
298         struct blkcg_gq *blkg = tg_to_blkg(tg);
299         struct throtl_data *td;
300         uint64_t ret;
301
302         if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
303                 return U64_MAX;
304
305         td = tg->td;
306         ret = tg->bps[rw][td->limit_index];
307         if (ret == 0 && td->limit_index == LIMIT_LOW) {
308                 /* intermediate node or iops isn't 0 */
309                 if (!list_empty(&blkg->blkcg->css.children) ||
310                     tg->iops[rw][td->limit_index])
311                         return U64_MAX;
312                 else
313                         return MIN_THROTL_BPS;
314         }
315
316         if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] &&
317             tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) {
318                 uint64_t adjusted;
319
320                 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td);
321                 ret = min(tg->bps[rw][LIMIT_MAX], adjusted);
322         }
323         return ret;
324 }
325
326 static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
327 {
328         struct blkcg_gq *blkg = tg_to_blkg(tg);
329         struct throtl_data *td;
330         unsigned int ret;
331
332         if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent)
333                 return UINT_MAX;
334
335         td = tg->td;
336         ret = tg->iops[rw][td->limit_index];
337         if (ret == 0 && tg->td->limit_index == LIMIT_LOW) {
338                 /* intermediate node or bps isn't 0 */
339                 if (!list_empty(&blkg->blkcg->css.children) ||
340                     tg->bps[rw][td->limit_index])
341                         return UINT_MAX;
342                 else
343                         return MIN_THROTL_IOPS;
344         }
345
346         if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] &&
347             tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) {
348                 uint64_t adjusted;
349
350                 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td);
351                 if (adjusted > UINT_MAX)
352                         adjusted = UINT_MAX;
353                 ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted);
354         }
355         return ret;
356 }
357
358 #define request_bucket_index(sectors) \
359         clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1)
360
361 /**
362  * throtl_log - log debug message via blktrace
363  * @sq: the service_queue being reported
364  * @fmt: printf format string
365  * @args: printf args
366  *
367  * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a
368  * throtl_grp; otherwise, just "throtl".
369  */
370 #define throtl_log(sq, fmt, args...)    do {                            \
371         struct throtl_grp *__tg = sq_to_tg((sq));                       \
372         struct throtl_data *__td = sq_to_td((sq));                      \
373                                                                         \
374         (void)__td;                                                     \
375         if (likely(!blk_trace_note_message_enabled(__td->queue)))       \
376                 break;                                                  \
377         if ((__tg)) {                                                   \
378                 blk_add_cgroup_trace_msg(__td->queue,                   \
379                         tg_to_blkg(__tg)->blkcg, "throtl " fmt, ##args);\
380         } else {                                                        \
381                 blk_add_trace_msg(__td->queue, "throtl " fmt, ##args);  \
382         }                                                               \
383 } while (0)
384
385 static inline unsigned int throtl_bio_data_size(struct bio *bio)
386 {
387         /* assume it's one sector */
388         if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
389                 return 512;
390         return bio->bi_iter.bi_size;
391 }
392
393 static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
394 {
395         INIT_LIST_HEAD(&qn->node);
396         bio_list_init(&qn->bios);
397         qn->tg = tg;
398 }
399
400 /**
401  * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it
402  * @bio: bio being added
403  * @qn: qnode to add bio to
404  * @queued: the service_queue->queued[] list @qn belongs to
405  *
406  * Add @bio to @qn and put @qn on @queued if it's not already on.
407  * @qn->tg's reference count is bumped when @qn is activated.  See the
408  * comment on top of throtl_qnode definition for details.
409  */
410 static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn,
411                                  struct list_head *queued)
412 {
413         bio_list_add(&qn->bios, bio);
414         if (list_empty(&qn->node)) {
415                 list_add_tail(&qn->node, queued);
416                 blkg_get(tg_to_blkg(qn->tg));
417         }
418 }
419
420 /**
421  * throtl_peek_queued - peek the first bio on a qnode list
422  * @queued: the qnode list to peek
423  */
424 static struct bio *throtl_peek_queued(struct list_head *queued)
425 {
426         struct throtl_qnode *qn;
427         struct bio *bio;
428
429         if (list_empty(queued))
430                 return NULL;
431
432         qn = list_first_entry(queued, struct throtl_qnode, node);
433         bio = bio_list_peek(&qn->bios);
434         WARN_ON_ONCE(!bio);
435         return bio;
436 }
437
438 /**
439  * throtl_pop_queued - pop the first bio form a qnode list
440  * @queued: the qnode list to pop a bio from
441  * @tg_to_put: optional out argument for throtl_grp to put
442  *
443  * Pop the first bio from the qnode list @queued.  After popping, the first
444  * qnode is removed from @queued if empty or moved to the end of @queued so
445  * that the popping order is round-robin.
446  *
447  * When the first qnode is removed, its associated throtl_grp should be put
448  * too.  If @tg_to_put is NULL, this function automatically puts it;
449  * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is
450  * responsible for putting it.
451  */
452 static struct bio *throtl_pop_queued(struct list_head *queued,
453                                      struct throtl_grp **tg_to_put)
454 {
455         struct throtl_qnode *qn;
456         struct bio *bio;
457
458         if (list_empty(queued))
459                 return NULL;
460
461         qn = list_first_entry(queued, struct throtl_qnode, node);
462         bio = bio_list_pop(&qn->bios);
463         WARN_ON_ONCE(!bio);
464
465         if (bio_list_empty(&qn->bios)) {
466                 list_del_init(&qn->node);
467                 if (tg_to_put)
468                         *tg_to_put = qn->tg;
469                 else
470                         blkg_put(tg_to_blkg(qn->tg));
471         } else {
472                 list_move_tail(&qn->node, queued);
473         }
474
475         return bio;
476 }
477
478 /* init a service_queue, assumes the caller zeroed it */
479 static void throtl_service_queue_init(struct throtl_service_queue *sq)
480 {
481         INIT_LIST_HEAD(&sq->queued[0]);
482         INIT_LIST_HEAD(&sq->queued[1]);
483         sq->pending_tree = RB_ROOT_CACHED;
484         timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0);
485 }
486
487 static struct blkg_policy_data *throtl_pd_alloc(gfp_t gfp,
488                                                 struct request_queue *q,
489                                                 struct blkcg *blkcg)
490 {
491         struct throtl_grp *tg;
492         int rw;
493
494         tg = kzalloc_node(sizeof(*tg), gfp, q->node);
495         if (!tg)
496                 return NULL;
497
498         if (blkg_rwstat_init(&tg->stat_bytes, gfp))
499                 goto err_free_tg;
500
501         if (blkg_rwstat_init(&tg->stat_ios, gfp))
502                 goto err_exit_stat_bytes;
503
504         throtl_service_queue_init(&tg->service_queue);
505
506         for (rw = READ; rw <= WRITE; rw++) {
507                 throtl_qnode_init(&tg->qnode_on_self[rw], tg);
508                 throtl_qnode_init(&tg->qnode_on_parent[rw], tg);
509         }
510
511         RB_CLEAR_NODE(&tg->rb_node);
512         tg->bps[READ][LIMIT_MAX] = U64_MAX;
513         tg->bps[WRITE][LIMIT_MAX] = U64_MAX;
514         tg->iops[READ][LIMIT_MAX] = UINT_MAX;
515         tg->iops[WRITE][LIMIT_MAX] = UINT_MAX;
516         tg->bps_conf[READ][LIMIT_MAX] = U64_MAX;
517         tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX;
518         tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX;
519         tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX;
520         /* LIMIT_LOW will have default value 0 */
521
522         tg->latency_target = DFL_LATENCY_TARGET;
523         tg->latency_target_conf = DFL_LATENCY_TARGET;
524         tg->idletime_threshold = DFL_IDLE_THRESHOLD;
525         tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD;
526
527         return &tg->pd;
528
529 err_exit_stat_bytes:
530         blkg_rwstat_exit(&tg->stat_bytes);
531 err_free_tg:
532         kfree(tg);
533         return NULL;
534 }
535
536 static void throtl_pd_init(struct blkg_policy_data *pd)
537 {
538         struct throtl_grp *tg = pd_to_tg(pd);
539         struct blkcg_gq *blkg = tg_to_blkg(tg);
540         struct throtl_data *td = blkg->q->td;
541         struct throtl_service_queue *sq = &tg->service_queue;
542
543         /*
544          * If on the default hierarchy, we switch to properly hierarchical
545          * behavior where limits on a given throtl_grp are applied to the
546          * whole subtree rather than just the group itself.  e.g. If 16M
547          * read_bps limit is set on the root group, the whole system can't
548          * exceed 16M for the device.
549          *
550          * If not on the default hierarchy, the broken flat hierarchy
551          * behavior is retained where all throtl_grps are treated as if
552          * they're all separate root groups right below throtl_data.
553          * Limits of a group don't interact with limits of other groups
554          * regardless of the position of the group in the hierarchy.
555          */
556         sq->parent_sq = &td->service_queue;
557         if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent)
558                 sq->parent_sq = &blkg_to_tg(blkg->parent)->service_queue;
559         tg->td = td;
560 }
561
562 /*
563  * Set has_rules[] if @tg or any of its parents have limits configured.
564  * This doesn't require walking up to the top of the hierarchy as the
565  * parent's has_rules[] is guaranteed to be correct.
566  */
567 static void tg_update_has_rules(struct throtl_grp *tg)
568 {
569         struct throtl_grp *parent_tg = sq_to_tg(tg->service_queue.parent_sq);
570         struct throtl_data *td = tg->td;
571         int rw;
572
573         for (rw = READ; rw <= WRITE; rw++)
574                 tg->has_rules[rw] = (parent_tg && parent_tg->has_rules[rw]) ||
575                         (td->limit_valid[td->limit_index] &&
576                          (tg_bps_limit(tg, rw) != U64_MAX ||
577                           tg_iops_limit(tg, rw) != UINT_MAX));
578 }
579
580 static void throtl_pd_online(struct blkg_policy_data *pd)
581 {
582         struct throtl_grp *tg = pd_to_tg(pd);
583         /*
584          * We don't want new groups to escape the limits of its ancestors.
585          * Update has_rules[] after a new group is brought online.
586          */
587         tg_update_has_rules(tg);
588 }
589
590 static void blk_throtl_update_limit_valid(struct throtl_data *td)
591 {
592         struct cgroup_subsys_state *pos_css;
593         struct blkcg_gq *blkg;
594         bool low_valid = false;
595
596         rcu_read_lock();
597         blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
598                 struct throtl_grp *tg = blkg_to_tg(blkg);
599
600                 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] ||
601                     tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) {
602                         low_valid = true;
603                         break;
604                 }
605         }
606         rcu_read_unlock();
607
608         td->limit_valid[LIMIT_LOW] = low_valid;
609 }
610
611 static void throtl_upgrade_state(struct throtl_data *td);
612 static void throtl_pd_offline(struct blkg_policy_data *pd)
613 {
614         struct throtl_grp *tg = pd_to_tg(pd);
615
616         tg->bps[READ][LIMIT_LOW] = 0;
617         tg->bps[WRITE][LIMIT_LOW] = 0;
618         tg->iops[READ][LIMIT_LOW] = 0;
619         tg->iops[WRITE][LIMIT_LOW] = 0;
620
621         blk_throtl_update_limit_valid(tg->td);
622
623         if (!tg->td->limit_valid[tg->td->limit_index])
624                 throtl_upgrade_state(tg->td);
625 }
626
627 static void throtl_pd_free(struct blkg_policy_data *pd)
628 {
629         struct throtl_grp *tg = pd_to_tg(pd);
630
631         del_timer_sync(&tg->service_queue.pending_timer);
632         blkg_rwstat_exit(&tg->stat_bytes);
633         blkg_rwstat_exit(&tg->stat_ios);
634         kfree(tg);
635 }
636
637 static struct throtl_grp *
638 throtl_rb_first(struct throtl_service_queue *parent_sq)
639 {
640         struct rb_node *n;
641
642         n = rb_first_cached(&parent_sq->pending_tree);
643         WARN_ON_ONCE(!n);
644         if (!n)
645                 return NULL;
646         return rb_entry_tg(n);
647 }
648
649 static void throtl_rb_erase(struct rb_node *n,
650                             struct throtl_service_queue *parent_sq)
651 {
652         rb_erase_cached(n, &parent_sq->pending_tree);
653         RB_CLEAR_NODE(n);
654         --parent_sq->nr_pending;
655 }
656
657 static void update_min_dispatch_time(struct throtl_service_queue *parent_sq)
658 {
659         struct throtl_grp *tg;
660
661         tg = throtl_rb_first(parent_sq);
662         if (!tg)
663                 return;
664
665         parent_sq->first_pending_disptime = tg->disptime;
666 }
667
668 static void tg_service_queue_add(struct throtl_grp *tg)
669 {
670         struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq;
671         struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node;
672         struct rb_node *parent = NULL;
673         struct throtl_grp *__tg;
674         unsigned long key = tg->disptime;
675         bool leftmost = true;
676
677         while (*node != NULL) {
678                 parent = *node;
679                 __tg = rb_entry_tg(parent);
680
681                 if (time_before(key, __tg->disptime))
682                         node = &parent->rb_left;
683                 else {
684                         node = &parent->rb_right;
685                         leftmost = false;
686                 }
687         }
688
689         rb_link_node(&tg->rb_node, parent, node);
690         rb_insert_color_cached(&tg->rb_node, &parent_sq->pending_tree,
691                                leftmost);
692 }
693
694 static void throtl_enqueue_tg(struct throtl_grp *tg)
695 {
696         if (!(tg->flags & THROTL_TG_PENDING)) {
697                 tg_service_queue_add(tg);
698                 tg->flags |= THROTL_TG_PENDING;
699                 tg->service_queue.parent_sq->nr_pending++;
700         }
701 }
702
703 static void throtl_dequeue_tg(struct throtl_grp *tg)
704 {
705         if (tg->flags & THROTL_TG_PENDING) {
706                 throtl_rb_erase(&tg->rb_node, tg->service_queue.parent_sq);
707                 tg->flags &= ~THROTL_TG_PENDING;
708         }
709 }
710
711 /* Call with queue lock held */
712 static void throtl_schedule_pending_timer(struct throtl_service_queue *sq,
713                                           unsigned long expires)
714 {
715         unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice;
716
717         /*
718          * Since we are adjusting the throttle limit dynamically, the sleep
719          * time calculated according to previous limit might be invalid. It's
720          * possible the cgroup sleep time is very long and no other cgroups
721          * have IO running so notify the limit changes. Make sure the cgroup
722          * doesn't sleep too long to avoid the missed notification.
723          */
724         if (time_after(expires, max_expire))
725                 expires = max_expire;
726         mod_timer(&sq->pending_timer, expires);
727         throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu",
728                    expires - jiffies, jiffies);
729 }
730
731 /**
732  * throtl_schedule_next_dispatch - schedule the next dispatch cycle
733  * @sq: the service_queue to schedule dispatch for
734  * @force: force scheduling
735  *
736  * Arm @sq->pending_timer so that the next dispatch cycle starts on the
737  * dispatch time of the first pending child.  Returns %true if either timer
738  * is armed or there's no pending child left.  %false if the current
739  * dispatch window is still open and the caller should continue
740  * dispatching.
741  *
742  * If @force is %true, the dispatch timer is always scheduled and this
743  * function is guaranteed to return %true.  This is to be used when the
744  * caller can't dispatch itself and needs to invoke pending_timer
745  * unconditionally.  Note that forced scheduling is likely to induce short
746  * delay before dispatch starts even if @sq->first_pending_disptime is not
747  * in the future and thus shouldn't be used in hot paths.
748  */
749 static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq,
750                                           bool force)
751 {
752         /* any pending children left? */
753         if (!sq->nr_pending)
754                 return true;
755
756         update_min_dispatch_time(sq);
757
758         /* is the next dispatch time in the future? */
759         if (force || time_after(sq->first_pending_disptime, jiffies)) {
760                 throtl_schedule_pending_timer(sq, sq->first_pending_disptime);
761                 return true;
762         }
763
764         /* tell the caller to continue dispatching */
765         return false;
766 }
767
768 static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg,
769                 bool rw, unsigned long start)
770 {
771         tg->bytes_disp[rw] = 0;
772         tg->io_disp[rw] = 0;
773
774         /*
775          * Previous slice has expired. We must have trimmed it after last
776          * bio dispatch. That means since start of last slice, we never used
777          * that bandwidth. Do try to make use of that bandwidth while giving
778          * credit.
779          */
780         if (time_after_eq(start, tg->slice_start[rw]))
781                 tg->slice_start[rw] = start;
782
783         tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
784         throtl_log(&tg->service_queue,
785                    "[%c] new slice with credit start=%lu end=%lu jiffies=%lu",
786                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
787                    tg->slice_end[rw], jiffies);
788 }
789
790 static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw)
791 {
792         tg->bytes_disp[rw] = 0;
793         tg->io_disp[rw] = 0;
794         tg->slice_start[rw] = jiffies;
795         tg->slice_end[rw] = jiffies + tg->td->throtl_slice;
796         throtl_log(&tg->service_queue,
797                    "[%c] new slice start=%lu end=%lu jiffies=%lu",
798                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
799                    tg->slice_end[rw], jiffies);
800 }
801
802 static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw,
803                                         unsigned long jiffy_end)
804 {
805         tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice);
806 }
807
808 static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw,
809                                        unsigned long jiffy_end)
810 {
811         throtl_set_slice_end(tg, rw, jiffy_end);
812         throtl_log(&tg->service_queue,
813                    "[%c] extend slice start=%lu end=%lu jiffies=%lu",
814                    rw == READ ? 'R' : 'W', tg->slice_start[rw],
815                    tg->slice_end[rw], jiffies);
816 }
817
818 /* Determine if previously allocated or extended slice is complete or not */
819 static bool throtl_slice_used(struct throtl_grp *tg, bool rw)
820 {
821         if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw]))
822                 return false;
823
824         return true;
825 }
826
827 /* Trim the used slices and adjust slice start accordingly */
828 static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw)
829 {
830         unsigned long nr_slices, time_elapsed, io_trim;
831         u64 bytes_trim, tmp;
832
833         BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw]));
834
835         /*
836          * If bps are unlimited (-1), then time slice don't get
837          * renewed. Don't try to trim the slice if slice is used. A new
838          * slice will start when appropriate.
839          */
840         if (throtl_slice_used(tg, rw))
841                 return;
842
843         /*
844          * A bio has been dispatched. Also adjust slice_end. It might happen
845          * that initially cgroup limit was very low resulting in high
846          * slice_end, but later limit was bumped up and bio was dispatched
847          * sooner, then we need to reduce slice_end. A high bogus slice_end
848          * is bad because it does not allow new slice to start.
849          */
850
851         throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice);
852
853         time_elapsed = jiffies - tg->slice_start[rw];
854
855         nr_slices = time_elapsed / tg->td->throtl_slice;
856
857         if (!nr_slices)
858                 return;
859         tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices;
860         do_div(tmp, HZ);
861         bytes_trim = tmp;
862
863         io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) /
864                 HZ;
865
866         if (!bytes_trim && !io_trim)
867                 return;
868
869         if (tg->bytes_disp[rw] >= bytes_trim)
870                 tg->bytes_disp[rw] -= bytes_trim;
871         else
872                 tg->bytes_disp[rw] = 0;
873
874         if (tg->io_disp[rw] >= io_trim)
875                 tg->io_disp[rw] -= io_trim;
876         else
877                 tg->io_disp[rw] = 0;
878
879         tg->slice_start[rw] += nr_slices * tg->td->throtl_slice;
880
881         throtl_log(&tg->service_queue,
882                    "[%c] trim slice nr=%lu bytes=%llu io=%lu start=%lu end=%lu jiffies=%lu",
883                    rw == READ ? 'R' : 'W', nr_slices, bytes_trim, io_trim,
884                    tg->slice_start[rw], tg->slice_end[rw], jiffies);
885 }
886
887 static bool tg_with_in_iops_limit(struct throtl_grp *tg, struct bio *bio,
888                                   u32 iops_limit, unsigned long *wait)
889 {
890         bool rw = bio_data_dir(bio);
891         unsigned int io_allowed;
892         unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
893         u64 tmp;
894
895         if (iops_limit == UINT_MAX) {
896                 if (wait)
897                         *wait = 0;
898                 return true;
899         }
900
901         jiffy_elapsed = jiffies - tg->slice_start[rw];
902
903         /* Round up to the next throttle slice, wait time must be nonzero */
904         jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice);
905
906         /*
907          * jiffy_elapsed_rnd should not be a big value as minimum iops can be
908          * 1 then at max jiffy elapsed should be equivalent of 1 second as we
909          * will allow dispatch after 1 second and after that slice should
910          * have been trimmed.
911          */
912
913         tmp = (u64)iops_limit * jiffy_elapsed_rnd;
914         do_div(tmp, HZ);
915
916         if (tmp > UINT_MAX)
917                 io_allowed = UINT_MAX;
918         else
919                 io_allowed = tmp;
920
921         if (tg->io_disp[rw] + 1 <= io_allowed) {
922                 if (wait)
923                         *wait = 0;
924                 return true;
925         }
926
927         /* Calc approx time to dispatch */
928         jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed;
929
930         if (wait)
931                 *wait = jiffy_wait;
932         return false;
933 }
934
935 static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
936                                  u64 bps_limit, unsigned long *wait)
937 {
938         bool rw = bio_data_dir(bio);
939         u64 bytes_allowed, extra_bytes, tmp;
940         unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
941         unsigned int bio_size = throtl_bio_data_size(bio);
942
943         if (bps_limit == U64_MAX) {
944                 if (wait)
945                         *wait = 0;
946                 return true;
947         }
948
949         jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
950
951         /* Slice has just started. Consider one slice interval */
952         if (!jiffy_elapsed)
953                 jiffy_elapsed_rnd = tg->td->throtl_slice;
954
955         jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice);
956
957         tmp = bps_limit * jiffy_elapsed_rnd;
958         do_div(tmp, HZ);
959         bytes_allowed = tmp;
960
961         if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
962                 if (wait)
963                         *wait = 0;
964                 return true;
965         }
966
967         /* Calc approx time to dispatch */
968         extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
969         jiffy_wait = div64_u64(extra_bytes * HZ, bps_limit);
970
971         if (!jiffy_wait)
972                 jiffy_wait = 1;
973
974         /*
975          * This wait time is without taking into consideration the rounding
976          * up we did. Add that time also.
977          */
978         jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed);
979         if (wait)
980                 *wait = jiffy_wait;
981         return false;
982 }
983
984 /*
985  * Returns whether one can dispatch a bio or not. Also returns approx number
986  * of jiffies to wait before this bio is with-in IO rate and can be dispatched
987  */
988 static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
989                             unsigned long *wait)
990 {
991         bool rw = bio_data_dir(bio);
992         unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0;
993         u64 bps_limit = tg_bps_limit(tg, rw);
994         u32 iops_limit = tg_iops_limit(tg, rw);
995
996         /*
997          * Currently whole state machine of group depends on first bio
998          * queued in the group bio list. So one should not be calling
999          * this function with a different bio if there are other bios
1000          * queued.
1001          */
1002         BUG_ON(tg->service_queue.nr_queued[rw] &&
1003                bio != throtl_peek_queued(&tg->service_queue.queued[rw]));
1004
1005         /* If tg->bps = -1, then BW is unlimited */
1006         if (bps_limit == U64_MAX && iops_limit == UINT_MAX) {
1007                 if (wait)
1008                         *wait = 0;
1009                 return true;
1010         }
1011
1012         /*
1013          * If previous slice expired, start a new one otherwise renew/extend
1014          * existing slice to make sure it is at least throtl_slice interval
1015          * long since now. New slice is started only for empty throttle group.
1016          * If there is queued bio, that means there should be an active
1017          * slice and it should be extended instead.
1018          */
1019         if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
1020                 throtl_start_new_slice(tg, rw);
1021         else {
1022                 if (time_before(tg->slice_end[rw],
1023                     jiffies + tg->td->throtl_slice))
1024                         throtl_extend_slice(tg, rw,
1025                                 jiffies + tg->td->throtl_slice);
1026         }
1027
1028         if (tg_with_in_bps_limit(tg, bio, bps_limit, &bps_wait) &&
1029             tg_with_in_iops_limit(tg, bio, iops_limit, &iops_wait)) {
1030                 if (wait)
1031                         *wait = 0;
1032                 return true;
1033         }
1034
1035         max_wait = max(bps_wait, iops_wait);
1036
1037         if (wait)
1038                 *wait = max_wait;
1039
1040         if (time_before(tg->slice_end[rw], jiffies + max_wait))
1041                 throtl_extend_slice(tg, rw, jiffies + max_wait);
1042
1043         return false;
1044 }
1045
1046 static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1047 {
1048         bool rw = bio_data_dir(bio);
1049         unsigned int bio_size = throtl_bio_data_size(bio);
1050
1051         /* Charge the bio to the group */
1052         tg->bytes_disp[rw] += bio_size;
1053         tg->io_disp[rw]++;
1054         tg->last_bytes_disp[rw] += bio_size;
1055         tg->last_io_disp[rw]++;
1056
1057         /*
1058          * BIO_THROTTLED is used to prevent the same bio to be throttled
1059          * more than once as a throttled bio will go through blk-throtl the
1060          * second time when it eventually gets issued.  Set it when a bio
1061          * is being charged to a tg.
1062          */
1063         if (!bio_flagged(bio, BIO_THROTTLED))
1064                 bio_set_flag(bio, BIO_THROTTLED);
1065 }
1066
1067 /**
1068  * throtl_add_bio_tg - add a bio to the specified throtl_grp
1069  * @bio: bio to add
1070  * @qn: qnode to use
1071  * @tg: the target throtl_grp
1072  *
1073  * Add @bio to @tg's service_queue using @qn.  If @qn is not specified,
1074  * tg->qnode_on_self[] is used.
1075  */
1076 static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn,
1077                               struct throtl_grp *tg)
1078 {
1079         struct throtl_service_queue *sq = &tg->service_queue;
1080         bool rw = bio_data_dir(bio);
1081
1082         if (!qn)
1083                 qn = &tg->qnode_on_self[rw];
1084
1085         /*
1086          * If @tg doesn't currently have any bios queued in the same
1087          * direction, queueing @bio can change when @tg should be
1088          * dispatched.  Mark that @tg was empty.  This is automatically
1089          * cleared on the next tg_update_disptime().
1090          */
1091         if (!sq->nr_queued[rw])
1092                 tg->flags |= THROTL_TG_WAS_EMPTY;
1093
1094         throtl_qnode_add_bio(bio, qn, &sq->queued[rw]);
1095
1096         sq->nr_queued[rw]++;
1097         throtl_enqueue_tg(tg);
1098 }
1099
1100 static void tg_update_disptime(struct throtl_grp *tg)
1101 {
1102         struct throtl_service_queue *sq = &tg->service_queue;
1103         unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
1104         struct bio *bio;
1105
1106         bio = throtl_peek_queued(&sq->queued[READ]);
1107         if (bio)
1108                 tg_may_dispatch(tg, bio, &read_wait);
1109
1110         bio = throtl_peek_queued(&sq->queued[WRITE]);
1111         if (bio)
1112                 tg_may_dispatch(tg, bio, &write_wait);
1113
1114         min_wait = min(read_wait, write_wait);
1115         disptime = jiffies + min_wait;
1116
1117         /* Update dispatch time */
1118         throtl_dequeue_tg(tg);
1119         tg->disptime = disptime;
1120         throtl_enqueue_tg(tg);
1121
1122         /* see throtl_add_bio_tg() */
1123         tg->flags &= ~THROTL_TG_WAS_EMPTY;
1124 }
1125
1126 static void start_parent_slice_with_credit(struct throtl_grp *child_tg,
1127                                         struct throtl_grp *parent_tg, bool rw)
1128 {
1129         if (throtl_slice_used(parent_tg, rw)) {
1130                 throtl_start_new_slice_with_credit(parent_tg, rw,
1131                                 child_tg->slice_start[rw]);
1132         }
1133
1134 }
1135
1136 static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw)
1137 {
1138         struct throtl_service_queue *sq = &tg->service_queue;
1139         struct throtl_service_queue *parent_sq = sq->parent_sq;
1140         struct throtl_grp *parent_tg = sq_to_tg(parent_sq);
1141         struct throtl_grp *tg_to_put = NULL;
1142         struct bio *bio;
1143
1144         /*
1145          * @bio is being transferred from @tg to @parent_sq.  Popping a bio
1146          * from @tg may put its reference and @parent_sq might end up
1147          * getting released prematurely.  Remember the tg to put and put it
1148          * after @bio is transferred to @parent_sq.
1149          */
1150         bio = throtl_pop_queued(&sq->queued[rw], &tg_to_put);
1151         sq->nr_queued[rw]--;
1152
1153         throtl_charge_bio(tg, bio);
1154
1155         /*
1156          * If our parent is another tg, we just need to transfer @bio to
1157          * the parent using throtl_add_bio_tg().  If our parent is
1158          * @td->service_queue, @bio is ready to be issued.  Put it on its
1159          * bio_lists[] and decrease total number queued.  The caller is
1160          * responsible for issuing these bios.
1161          */
1162         if (parent_tg) {
1163                 throtl_add_bio_tg(bio, &tg->qnode_on_parent[rw], parent_tg);
1164                 start_parent_slice_with_credit(tg, parent_tg, rw);
1165         } else {
1166                 throtl_qnode_add_bio(bio, &tg->qnode_on_parent[rw],
1167                                      &parent_sq->queued[rw]);
1168                 BUG_ON(tg->td->nr_queued[rw] <= 0);
1169                 tg->td->nr_queued[rw]--;
1170         }
1171
1172         throtl_trim_slice(tg, rw);
1173
1174         if (tg_to_put)
1175                 blkg_put(tg_to_blkg(tg_to_put));
1176 }
1177
1178 static int throtl_dispatch_tg(struct throtl_grp *tg)
1179 {
1180         struct throtl_service_queue *sq = &tg->service_queue;
1181         unsigned int nr_reads = 0, nr_writes = 0;
1182         unsigned int max_nr_reads = THROTL_GRP_QUANTUM * 3 / 4;
1183         unsigned int max_nr_writes = THROTL_GRP_QUANTUM - max_nr_reads;
1184         struct bio *bio;
1185
1186         /* Try to dispatch 75% READS and 25% WRITES */
1187
1188         while ((bio = throtl_peek_queued(&sq->queued[READ])) &&
1189                tg_may_dispatch(tg, bio, NULL)) {
1190
1191                 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1192                 nr_reads++;
1193
1194                 if (nr_reads >= max_nr_reads)
1195                         break;
1196         }
1197
1198         while ((bio = throtl_peek_queued(&sq->queued[WRITE])) &&
1199                tg_may_dispatch(tg, bio, NULL)) {
1200
1201                 tg_dispatch_one_bio(tg, bio_data_dir(bio));
1202                 nr_writes++;
1203
1204                 if (nr_writes >= max_nr_writes)
1205                         break;
1206         }
1207
1208         return nr_reads + nr_writes;
1209 }
1210
1211 static int throtl_select_dispatch(struct throtl_service_queue *parent_sq)
1212 {
1213         unsigned int nr_disp = 0;
1214
1215         while (1) {
1216                 struct throtl_grp *tg;
1217                 struct throtl_service_queue *sq;
1218
1219                 if (!parent_sq->nr_pending)
1220                         break;
1221
1222                 tg = throtl_rb_first(parent_sq);
1223                 if (!tg)
1224                         break;
1225
1226                 if (time_before(jiffies, tg->disptime))
1227                         break;
1228
1229                 throtl_dequeue_tg(tg);
1230
1231                 nr_disp += throtl_dispatch_tg(tg);
1232
1233                 sq = &tg->service_queue;
1234                 if (sq->nr_queued[0] || sq->nr_queued[1])
1235                         tg_update_disptime(tg);
1236
1237                 if (nr_disp >= THROTL_QUANTUM)
1238                         break;
1239         }
1240
1241         return nr_disp;
1242 }
1243
1244 static bool throtl_can_upgrade(struct throtl_data *td,
1245         struct throtl_grp *this_tg);
1246 /**
1247  * throtl_pending_timer_fn - timer function for service_queue->pending_timer
1248  * @t: the pending_timer member of the throtl_service_queue being serviced
1249  *
1250  * This timer is armed when a child throtl_grp with active bio's become
1251  * pending and queued on the service_queue's pending_tree and expires when
1252  * the first child throtl_grp should be dispatched.  This function
1253  * dispatches bio's from the children throtl_grps to the parent
1254  * service_queue.
1255  *
1256  * If the parent's parent is another throtl_grp, dispatching is propagated
1257  * by either arming its pending_timer or repeating dispatch directly.  If
1258  * the top-level service_tree is reached, throtl_data->dispatch_work is
1259  * kicked so that the ready bio's are issued.
1260  */
1261 static void throtl_pending_timer_fn(struct timer_list *t)
1262 {
1263         struct throtl_service_queue *sq = from_timer(sq, t, pending_timer);
1264         struct throtl_grp *tg = sq_to_tg(sq);
1265         struct throtl_data *td = sq_to_td(sq);
1266         struct request_queue *q = td->queue;
1267         struct throtl_service_queue *parent_sq;
1268         bool dispatched;
1269         int ret;
1270
1271         spin_lock_irq(&q->queue_lock);
1272         if (throtl_can_upgrade(td, NULL))
1273                 throtl_upgrade_state(td);
1274
1275 again:
1276         parent_sq = sq->parent_sq;
1277         dispatched = false;
1278
1279         while (true) {
1280                 throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u",
1281                            sq->nr_queued[READ] + sq->nr_queued[WRITE],
1282                            sq->nr_queued[READ], sq->nr_queued[WRITE]);
1283
1284                 ret = throtl_select_dispatch(sq);
1285                 if (ret) {
1286                         throtl_log(sq, "bios disp=%u", ret);
1287                         dispatched = true;
1288                 }
1289
1290                 if (throtl_schedule_next_dispatch(sq, false))
1291                         break;
1292
1293                 /* this dispatch windows is still open, relax and repeat */
1294                 spin_unlock_irq(&q->queue_lock);
1295                 cpu_relax();
1296                 spin_lock_irq(&q->queue_lock);
1297         }
1298
1299         if (!dispatched)
1300                 goto out_unlock;
1301
1302         if (parent_sq) {
1303                 /* @parent_sq is another throl_grp, propagate dispatch */
1304                 if (tg->flags & THROTL_TG_WAS_EMPTY) {
1305                         tg_update_disptime(tg);
1306                         if (!throtl_schedule_next_dispatch(parent_sq, false)) {
1307                                 /* window is already open, repeat dispatching */
1308                                 sq = parent_sq;
1309                                 tg = sq_to_tg(sq);
1310                                 goto again;
1311                         }
1312                 }
1313         } else {
1314                 /* reached the top-level, queue issuing */
1315                 queue_work(kthrotld_workqueue, &td->dispatch_work);
1316         }
1317 out_unlock:
1318         spin_unlock_irq(&q->queue_lock);
1319 }
1320
1321 /**
1322  * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work
1323  * @work: work item being executed
1324  *
1325  * This function is queued for execution when bios reach the bio_lists[]
1326  * of throtl_data->service_queue.  Those bios are ready and issued by this
1327  * function.
1328  */
1329 static void blk_throtl_dispatch_work_fn(struct work_struct *work)
1330 {
1331         struct throtl_data *td = container_of(work, struct throtl_data,
1332                                               dispatch_work);
1333         struct throtl_service_queue *td_sq = &td->service_queue;
1334         struct request_queue *q = td->queue;
1335         struct bio_list bio_list_on_stack;
1336         struct bio *bio;
1337         struct blk_plug plug;
1338         int rw;
1339
1340         bio_list_init(&bio_list_on_stack);
1341
1342         spin_lock_irq(&q->queue_lock);
1343         for (rw = READ; rw <= WRITE; rw++)
1344                 while ((bio = throtl_pop_queued(&td_sq->queued[rw], NULL)))
1345                         bio_list_add(&bio_list_on_stack, bio);
1346         spin_unlock_irq(&q->queue_lock);
1347
1348         if (!bio_list_empty(&bio_list_on_stack)) {
1349                 blk_start_plug(&plug);
1350                 while ((bio = bio_list_pop(&bio_list_on_stack)))
1351                         submit_bio_noacct(bio);
1352                 blk_finish_plug(&plug);
1353         }
1354 }
1355
1356 static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd,
1357                               int off)
1358 {
1359         struct throtl_grp *tg = pd_to_tg(pd);
1360         u64 v = *(u64 *)((void *)tg + off);
1361
1362         if (v == U64_MAX)
1363                 return 0;
1364         return __blkg_prfill_u64(sf, pd, v);
1365 }
1366
1367 static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd,
1368                                int off)
1369 {
1370         struct throtl_grp *tg = pd_to_tg(pd);
1371         unsigned int v = *(unsigned int *)((void *)tg + off);
1372
1373         if (v == UINT_MAX)
1374                 return 0;
1375         return __blkg_prfill_u64(sf, pd, v);
1376 }
1377
1378 static int tg_print_conf_u64(struct seq_file *sf, void *v)
1379 {
1380         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_u64,
1381                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1382         return 0;
1383 }
1384
1385 static int tg_print_conf_uint(struct seq_file *sf, void *v)
1386 {
1387         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_conf_uint,
1388                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1389         return 0;
1390 }
1391
1392 static void tg_conf_updated(struct throtl_grp *tg, bool global)
1393 {
1394         struct throtl_service_queue *sq = &tg->service_queue;
1395         struct cgroup_subsys_state *pos_css;
1396         struct blkcg_gq *blkg;
1397
1398         throtl_log(&tg->service_queue,
1399                    "limit change rbps=%llu wbps=%llu riops=%u wiops=%u",
1400                    tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE),
1401                    tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE));
1402
1403         /*
1404          * Update has_rules[] flags for the updated tg's subtree.  A tg is
1405          * considered to have rules if either the tg itself or any of its
1406          * ancestors has rules.  This identifies groups without any
1407          * restrictions in the whole hierarchy and allows them to bypass
1408          * blk-throttle.
1409          */
1410         blkg_for_each_descendant_pre(blkg, pos_css,
1411                         global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) {
1412                 struct throtl_grp *this_tg = blkg_to_tg(blkg);
1413                 struct throtl_grp *parent_tg;
1414
1415                 tg_update_has_rules(this_tg);
1416                 /* ignore root/second level */
1417                 if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent ||
1418                     !blkg->parent->parent)
1419                         continue;
1420                 parent_tg = blkg_to_tg(blkg->parent);
1421                 /*
1422                  * make sure all children has lower idle time threshold and
1423                  * higher latency target
1424                  */
1425                 this_tg->idletime_threshold = min(this_tg->idletime_threshold,
1426                                 parent_tg->idletime_threshold);
1427                 this_tg->latency_target = max(this_tg->latency_target,
1428                                 parent_tg->latency_target);
1429         }
1430
1431         /*
1432          * We're already holding queue_lock and know @tg is valid.  Let's
1433          * apply the new config directly.
1434          *
1435          * Restart the slices for both READ and WRITES. It might happen
1436          * that a group's limit are dropped suddenly and we don't want to
1437          * account recently dispatched IO with new low rate.
1438          */
1439         throtl_start_new_slice(tg, READ);
1440         throtl_start_new_slice(tg, WRITE);
1441
1442         if (tg->flags & THROTL_TG_PENDING) {
1443                 tg_update_disptime(tg);
1444                 throtl_schedule_next_dispatch(sq->parent_sq, true);
1445         }
1446 }
1447
1448 static ssize_t tg_set_conf(struct kernfs_open_file *of,
1449                            char *buf, size_t nbytes, loff_t off, bool is_u64)
1450 {
1451         struct blkcg *blkcg = css_to_blkcg(of_css(of));
1452         struct blkg_conf_ctx ctx;
1453         struct throtl_grp *tg;
1454         int ret;
1455         u64 v;
1456
1457         ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1458         if (ret)
1459                 return ret;
1460
1461         ret = -EINVAL;
1462         if (sscanf(ctx.body, "%llu", &v) != 1)
1463                 goto out_finish;
1464         if (!v)
1465                 v = U64_MAX;
1466
1467         tg = blkg_to_tg(ctx.blkg);
1468
1469         if (is_u64)
1470                 *(u64 *)((void *)tg + of_cft(of)->private) = v;
1471         else
1472                 *(unsigned int *)((void *)tg + of_cft(of)->private) = v;
1473
1474         tg_conf_updated(tg, false);
1475         ret = 0;
1476 out_finish:
1477         blkg_conf_finish(&ctx);
1478         return ret ?: nbytes;
1479 }
1480
1481 static ssize_t tg_set_conf_u64(struct kernfs_open_file *of,
1482                                char *buf, size_t nbytes, loff_t off)
1483 {
1484         return tg_set_conf(of, buf, nbytes, off, true);
1485 }
1486
1487 static ssize_t tg_set_conf_uint(struct kernfs_open_file *of,
1488                                 char *buf, size_t nbytes, loff_t off)
1489 {
1490         return tg_set_conf(of, buf, nbytes, off, false);
1491 }
1492
1493 static int tg_print_rwstat(struct seq_file *sf, void *v)
1494 {
1495         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1496                           blkg_prfill_rwstat, &blkcg_policy_throtl,
1497                           seq_cft(sf)->private, true);
1498         return 0;
1499 }
1500
1501 static u64 tg_prfill_rwstat_recursive(struct seq_file *sf,
1502                                       struct blkg_policy_data *pd, int off)
1503 {
1504         struct blkg_rwstat_sample sum;
1505
1506         blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_throtl, off,
1507                                   &sum);
1508         return __blkg_prfill_rwstat(sf, pd, &sum);
1509 }
1510
1511 static int tg_print_rwstat_recursive(struct seq_file *sf, void *v)
1512 {
1513         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)),
1514                           tg_prfill_rwstat_recursive, &blkcg_policy_throtl,
1515                           seq_cft(sf)->private, true);
1516         return 0;
1517 }
1518
1519 static struct cftype throtl_legacy_files[] = {
1520         {
1521                 .name = "throttle.read_bps_device",
1522                 .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]),
1523                 .seq_show = tg_print_conf_u64,
1524                 .write = tg_set_conf_u64,
1525         },
1526         {
1527                 .name = "throttle.write_bps_device",
1528                 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1529                 .seq_show = tg_print_conf_u64,
1530                 .write = tg_set_conf_u64,
1531         },
1532         {
1533                 .name = "throttle.read_iops_device",
1534                 .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]),
1535                 .seq_show = tg_print_conf_uint,
1536                 .write = tg_set_conf_uint,
1537         },
1538         {
1539                 .name = "throttle.write_iops_device",
1540                 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1541                 .seq_show = tg_print_conf_uint,
1542                 .write = tg_set_conf_uint,
1543         },
1544         {
1545                 .name = "throttle.io_service_bytes",
1546                 .private = offsetof(struct throtl_grp, stat_bytes),
1547                 .seq_show = tg_print_rwstat,
1548         },
1549         {
1550                 .name = "throttle.io_service_bytes_recursive",
1551                 .private = offsetof(struct throtl_grp, stat_bytes),
1552                 .seq_show = tg_print_rwstat_recursive,
1553         },
1554         {
1555                 .name = "throttle.io_serviced",
1556                 .private = offsetof(struct throtl_grp, stat_ios),
1557                 .seq_show = tg_print_rwstat,
1558         },
1559         {
1560                 .name = "throttle.io_serviced_recursive",
1561                 .private = offsetof(struct throtl_grp, stat_ios),
1562                 .seq_show = tg_print_rwstat_recursive,
1563         },
1564         { }     /* terminate */
1565 };
1566
1567 static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd,
1568                          int off)
1569 {
1570         struct throtl_grp *tg = pd_to_tg(pd);
1571         const char *dname = blkg_dev_name(pd->blkg);
1572         char bufs[4][21] = { "max", "max", "max", "max" };
1573         u64 bps_dft;
1574         unsigned int iops_dft;
1575         char idle_time[26] = "";
1576         char latency_time[26] = "";
1577
1578         if (!dname)
1579                 return 0;
1580
1581         if (off == LIMIT_LOW) {
1582                 bps_dft = 0;
1583                 iops_dft = 0;
1584         } else {
1585                 bps_dft = U64_MAX;
1586                 iops_dft = UINT_MAX;
1587         }
1588
1589         if (tg->bps_conf[READ][off] == bps_dft &&
1590             tg->bps_conf[WRITE][off] == bps_dft &&
1591             tg->iops_conf[READ][off] == iops_dft &&
1592             tg->iops_conf[WRITE][off] == iops_dft &&
1593             (off != LIMIT_LOW ||
1594              (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD &&
1595               tg->latency_target_conf == DFL_LATENCY_TARGET)))
1596                 return 0;
1597
1598         if (tg->bps_conf[READ][off] != U64_MAX)
1599                 snprintf(bufs[0], sizeof(bufs[0]), "%llu",
1600                         tg->bps_conf[READ][off]);
1601         if (tg->bps_conf[WRITE][off] != U64_MAX)
1602                 snprintf(bufs[1], sizeof(bufs[1]), "%llu",
1603                         tg->bps_conf[WRITE][off]);
1604         if (tg->iops_conf[READ][off] != UINT_MAX)
1605                 snprintf(bufs[2], sizeof(bufs[2]), "%u",
1606                         tg->iops_conf[READ][off]);
1607         if (tg->iops_conf[WRITE][off] != UINT_MAX)
1608                 snprintf(bufs[3], sizeof(bufs[3]), "%u",
1609                         tg->iops_conf[WRITE][off]);
1610         if (off == LIMIT_LOW) {
1611                 if (tg->idletime_threshold_conf == ULONG_MAX)
1612                         strcpy(idle_time, " idle=max");
1613                 else
1614                         snprintf(idle_time, sizeof(idle_time), " idle=%lu",
1615                                 tg->idletime_threshold_conf);
1616
1617                 if (tg->latency_target_conf == ULONG_MAX)
1618                         strcpy(latency_time, " latency=max");
1619                 else
1620                         snprintf(latency_time, sizeof(latency_time),
1621                                 " latency=%lu", tg->latency_target_conf);
1622         }
1623
1624         seq_printf(sf, "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n",
1625                    dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time,
1626                    latency_time);
1627         return 0;
1628 }
1629
1630 static int tg_print_limit(struct seq_file *sf, void *v)
1631 {
1632         blkcg_print_blkgs(sf, css_to_blkcg(seq_css(sf)), tg_prfill_limit,
1633                           &blkcg_policy_throtl, seq_cft(sf)->private, false);
1634         return 0;
1635 }
1636
1637 static ssize_t tg_set_limit(struct kernfs_open_file *of,
1638                           char *buf, size_t nbytes, loff_t off)
1639 {
1640         struct blkcg *blkcg = css_to_blkcg(of_css(of));
1641         struct blkg_conf_ctx ctx;
1642         struct throtl_grp *tg;
1643         u64 v[4];
1644         unsigned long idle_time;
1645         unsigned long latency_time;
1646         int ret;
1647         int index = of_cft(of)->private;
1648
1649         ret = blkg_conf_prep(blkcg, &blkcg_policy_throtl, buf, &ctx);
1650         if (ret)
1651                 return ret;
1652
1653         tg = blkg_to_tg(ctx.blkg);
1654
1655         v[0] = tg->bps_conf[READ][index];
1656         v[1] = tg->bps_conf[WRITE][index];
1657         v[2] = tg->iops_conf[READ][index];
1658         v[3] = tg->iops_conf[WRITE][index];
1659
1660         idle_time = tg->idletime_threshold_conf;
1661         latency_time = tg->latency_target_conf;
1662         while (true) {
1663                 char tok[27];   /* wiops=18446744073709551616 */
1664                 char *p;
1665                 u64 val = U64_MAX;
1666                 int len;
1667
1668                 if (sscanf(ctx.body, "%26s%n", tok, &len) != 1)
1669                         break;
1670                 if (tok[0] == '\0')
1671                         break;
1672                 ctx.body += len;
1673
1674                 ret = -EINVAL;
1675                 p = tok;
1676                 strsep(&p, "=");
1677                 if (!p || (sscanf(p, "%llu", &val) != 1 && strcmp(p, "max")))
1678                         goto out_finish;
1679
1680                 ret = -ERANGE;
1681                 if (!val)
1682                         goto out_finish;
1683
1684                 ret = -EINVAL;
1685                 if (!strcmp(tok, "rbps") && val > 1)
1686                         v[0] = val;
1687                 else if (!strcmp(tok, "wbps") && val > 1)
1688                         v[1] = val;
1689                 else if (!strcmp(tok, "riops") && val > 1)
1690                         v[2] = min_t(u64, val, UINT_MAX);
1691                 else if (!strcmp(tok, "wiops") && val > 1)
1692                         v[3] = min_t(u64, val, UINT_MAX);
1693                 else if (off == LIMIT_LOW && !strcmp(tok, "idle"))
1694                         idle_time = val;
1695                 else if (off == LIMIT_LOW && !strcmp(tok, "latency"))
1696                         latency_time = val;
1697                 else
1698                         goto out_finish;
1699         }
1700
1701         tg->bps_conf[READ][index] = v[0];
1702         tg->bps_conf[WRITE][index] = v[1];
1703         tg->iops_conf[READ][index] = v[2];
1704         tg->iops_conf[WRITE][index] = v[3];
1705
1706         if (index == LIMIT_MAX) {
1707                 tg->bps[READ][index] = v[0];
1708                 tg->bps[WRITE][index] = v[1];
1709                 tg->iops[READ][index] = v[2];
1710                 tg->iops[WRITE][index] = v[3];
1711         }
1712         tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW],
1713                 tg->bps_conf[READ][LIMIT_MAX]);
1714         tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW],
1715                 tg->bps_conf[WRITE][LIMIT_MAX]);
1716         tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW],
1717                 tg->iops_conf[READ][LIMIT_MAX]);
1718         tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW],
1719                 tg->iops_conf[WRITE][LIMIT_MAX]);
1720         tg->idletime_threshold_conf = idle_time;
1721         tg->latency_target_conf = latency_time;
1722
1723         /* force user to configure all settings for low limit  */
1724         if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] ||
1725               tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) ||
1726             tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD ||
1727             tg->latency_target_conf == DFL_LATENCY_TARGET) {
1728                 tg->bps[READ][LIMIT_LOW] = 0;
1729                 tg->bps[WRITE][LIMIT_LOW] = 0;
1730                 tg->iops[READ][LIMIT_LOW] = 0;
1731                 tg->iops[WRITE][LIMIT_LOW] = 0;
1732                 tg->idletime_threshold = DFL_IDLE_THRESHOLD;
1733                 tg->latency_target = DFL_LATENCY_TARGET;
1734         } else if (index == LIMIT_LOW) {
1735                 tg->idletime_threshold = tg->idletime_threshold_conf;
1736                 tg->latency_target = tg->latency_target_conf;
1737         }
1738
1739         blk_throtl_update_limit_valid(tg->td);
1740         if (tg->td->limit_valid[LIMIT_LOW]) {
1741                 if (index == LIMIT_LOW)
1742                         tg->td->limit_index = LIMIT_LOW;
1743         } else
1744                 tg->td->limit_index = LIMIT_MAX;
1745         tg_conf_updated(tg, index == LIMIT_LOW &&
1746                 tg->td->limit_valid[LIMIT_LOW]);
1747         ret = 0;
1748 out_finish:
1749         blkg_conf_finish(&ctx);
1750         return ret ?: nbytes;
1751 }
1752
1753 static struct cftype throtl_files[] = {
1754 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
1755         {
1756                 .name = "low",
1757                 .flags = CFTYPE_NOT_ON_ROOT,
1758                 .seq_show = tg_print_limit,
1759                 .write = tg_set_limit,
1760                 .private = LIMIT_LOW,
1761         },
1762 #endif
1763         {
1764                 .name = "max",
1765                 .flags = CFTYPE_NOT_ON_ROOT,
1766                 .seq_show = tg_print_limit,
1767                 .write = tg_set_limit,
1768                 .private = LIMIT_MAX,
1769         },
1770         { }     /* terminate */
1771 };
1772
1773 static void throtl_shutdown_wq(struct request_queue *q)
1774 {
1775         struct throtl_data *td = q->td;
1776
1777         cancel_work_sync(&td->dispatch_work);
1778 }
1779
1780 static struct blkcg_policy blkcg_policy_throtl = {
1781         .dfl_cftypes            = throtl_files,
1782         .legacy_cftypes         = throtl_legacy_files,
1783
1784         .pd_alloc_fn            = throtl_pd_alloc,
1785         .pd_init_fn             = throtl_pd_init,
1786         .pd_online_fn           = throtl_pd_online,
1787         .pd_offline_fn          = throtl_pd_offline,
1788         .pd_free_fn             = throtl_pd_free,
1789 };
1790
1791 static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg)
1792 {
1793         unsigned long rtime = jiffies, wtime = jiffies;
1794
1795         if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW])
1796                 rtime = tg->last_low_overflow_time[READ];
1797         if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW])
1798                 wtime = tg->last_low_overflow_time[WRITE];
1799         return min(rtime, wtime);
1800 }
1801
1802 /* tg should not be an intermediate node */
1803 static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg)
1804 {
1805         struct throtl_service_queue *parent_sq;
1806         struct throtl_grp *parent = tg;
1807         unsigned long ret = __tg_last_low_overflow_time(tg);
1808
1809         while (true) {
1810                 parent_sq = parent->service_queue.parent_sq;
1811                 parent = sq_to_tg(parent_sq);
1812                 if (!parent)
1813                         break;
1814
1815                 /*
1816                  * The parent doesn't have low limit, it always reaches low
1817                  * limit. Its overflow time is useless for children
1818                  */
1819                 if (!parent->bps[READ][LIMIT_LOW] &&
1820                     !parent->iops[READ][LIMIT_LOW] &&
1821                     !parent->bps[WRITE][LIMIT_LOW] &&
1822                     !parent->iops[WRITE][LIMIT_LOW])
1823                         continue;
1824                 if (time_after(__tg_last_low_overflow_time(parent), ret))
1825                         ret = __tg_last_low_overflow_time(parent);
1826         }
1827         return ret;
1828 }
1829
1830 static bool throtl_tg_is_idle(struct throtl_grp *tg)
1831 {
1832         /*
1833          * cgroup is idle if:
1834          * - single idle is too long, longer than a fixed value (in case user
1835          *   configure a too big threshold) or 4 times of idletime threshold
1836          * - average think time is more than threshold
1837          * - IO latency is largely below threshold
1838          */
1839         unsigned long time;
1840         bool ret;
1841
1842         time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold);
1843         ret = tg->latency_target == DFL_LATENCY_TARGET ||
1844               tg->idletime_threshold == DFL_IDLE_THRESHOLD ||
1845               (ktime_get_ns() >> 10) - tg->last_finish_time > time ||
1846               tg->avg_idletime > tg->idletime_threshold ||
1847               (tg->latency_target && tg->bio_cnt &&
1848                 tg->bad_bio_cnt * 5 < tg->bio_cnt);
1849         throtl_log(&tg->service_queue,
1850                 "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d",
1851                 tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt,
1852                 tg->bio_cnt, ret, tg->td->scale);
1853         return ret;
1854 }
1855
1856 static bool throtl_tg_can_upgrade(struct throtl_grp *tg)
1857 {
1858         struct throtl_service_queue *sq = &tg->service_queue;
1859         bool read_limit, write_limit;
1860
1861         /*
1862          * if cgroup reaches low limit (if low limit is 0, the cgroup always
1863          * reaches), it's ok to upgrade to next limit
1864          */
1865         read_limit = tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW];
1866         write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW];
1867         if (!read_limit && !write_limit)
1868                 return true;
1869         if (read_limit && sq->nr_queued[READ] &&
1870             (!write_limit || sq->nr_queued[WRITE]))
1871                 return true;
1872         if (write_limit && sq->nr_queued[WRITE] &&
1873             (!read_limit || sq->nr_queued[READ]))
1874                 return true;
1875
1876         if (time_after_eq(jiffies,
1877                 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) &&
1878             throtl_tg_is_idle(tg))
1879                 return true;
1880         return false;
1881 }
1882
1883 static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg)
1884 {
1885         while (true) {
1886                 if (throtl_tg_can_upgrade(tg))
1887                         return true;
1888                 tg = sq_to_tg(tg->service_queue.parent_sq);
1889                 if (!tg || !tg_to_blkg(tg)->parent)
1890                         return false;
1891         }
1892         return false;
1893 }
1894
1895 static bool throtl_can_upgrade(struct throtl_data *td,
1896         struct throtl_grp *this_tg)
1897 {
1898         struct cgroup_subsys_state *pos_css;
1899         struct blkcg_gq *blkg;
1900
1901         if (td->limit_index != LIMIT_LOW)
1902                 return false;
1903
1904         if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice))
1905                 return false;
1906
1907         rcu_read_lock();
1908         blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1909                 struct throtl_grp *tg = blkg_to_tg(blkg);
1910
1911                 if (tg == this_tg)
1912                         continue;
1913                 if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
1914                         continue;
1915                 if (!throtl_hierarchy_can_upgrade(tg)) {
1916                         rcu_read_unlock();
1917                         return false;
1918                 }
1919         }
1920         rcu_read_unlock();
1921         return true;
1922 }
1923
1924 static void throtl_upgrade_check(struct throtl_grp *tg)
1925 {
1926         unsigned long now = jiffies;
1927
1928         if (tg->td->limit_index != LIMIT_LOW)
1929                 return;
1930
1931         if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
1932                 return;
1933
1934         tg->last_check_time = now;
1935
1936         if (!time_after_eq(now,
1937              __tg_last_low_overflow_time(tg) + tg->td->throtl_slice))
1938                 return;
1939
1940         if (throtl_can_upgrade(tg->td, NULL))
1941                 throtl_upgrade_state(tg->td);
1942 }
1943
1944 static void throtl_upgrade_state(struct throtl_data *td)
1945 {
1946         struct cgroup_subsys_state *pos_css;
1947         struct blkcg_gq *blkg;
1948
1949         throtl_log(&td->service_queue, "upgrade to max");
1950         td->limit_index = LIMIT_MAX;
1951         td->low_upgrade_time = jiffies;
1952         td->scale = 0;
1953         rcu_read_lock();
1954         blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) {
1955                 struct throtl_grp *tg = blkg_to_tg(blkg);
1956                 struct throtl_service_queue *sq = &tg->service_queue;
1957
1958                 tg->disptime = jiffies - 1;
1959                 throtl_select_dispatch(sq);
1960                 throtl_schedule_next_dispatch(sq, true);
1961         }
1962         rcu_read_unlock();
1963         throtl_select_dispatch(&td->service_queue);
1964         throtl_schedule_next_dispatch(&td->service_queue, true);
1965         queue_work(kthrotld_workqueue, &td->dispatch_work);
1966 }
1967
1968 static void throtl_downgrade_state(struct throtl_data *td)
1969 {
1970         td->scale /= 2;
1971
1972         throtl_log(&td->service_queue, "downgrade, scale %d", td->scale);
1973         if (td->scale) {
1974                 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice;
1975                 return;
1976         }
1977
1978         td->limit_index = LIMIT_LOW;
1979         td->low_downgrade_time = jiffies;
1980 }
1981
1982 static bool throtl_tg_can_downgrade(struct throtl_grp *tg)
1983 {
1984         struct throtl_data *td = tg->td;
1985         unsigned long now = jiffies;
1986
1987         /*
1988          * If cgroup is below low limit, consider downgrade and throttle other
1989          * cgroups
1990          */
1991         if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) &&
1992             time_after_eq(now, tg_last_low_overflow_time(tg) +
1993                                         td->throtl_slice) &&
1994             (!throtl_tg_is_idle(tg) ||
1995              !list_empty(&tg_to_blkg(tg)->blkcg->css.children)))
1996                 return true;
1997         return false;
1998 }
1999
2000 static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg)
2001 {
2002         while (true) {
2003                 if (!throtl_tg_can_downgrade(tg))
2004                         return false;
2005                 tg = sq_to_tg(tg->service_queue.parent_sq);
2006                 if (!tg || !tg_to_blkg(tg)->parent)
2007                         break;
2008         }
2009         return true;
2010 }
2011
2012 static void throtl_downgrade_check(struct throtl_grp *tg)
2013 {
2014         uint64_t bps;
2015         unsigned int iops;
2016         unsigned long elapsed_time;
2017         unsigned long now = jiffies;
2018
2019         if (tg->td->limit_index != LIMIT_MAX ||
2020             !tg->td->limit_valid[LIMIT_LOW])
2021                 return;
2022         if (!list_empty(&tg_to_blkg(tg)->blkcg->css.children))
2023                 return;
2024         if (time_after(tg->last_check_time + tg->td->throtl_slice, now))
2025                 return;
2026
2027         elapsed_time = now - tg->last_check_time;
2028         tg->last_check_time = now;
2029
2030         if (time_before(now, tg_last_low_overflow_time(tg) +
2031                         tg->td->throtl_slice))
2032                 return;
2033
2034         if (tg->bps[READ][LIMIT_LOW]) {
2035                 bps = tg->last_bytes_disp[READ] * HZ;
2036                 do_div(bps, elapsed_time);
2037                 if (bps >= tg->bps[READ][LIMIT_LOW])
2038                         tg->last_low_overflow_time[READ] = now;
2039         }
2040
2041         if (tg->bps[WRITE][LIMIT_LOW]) {
2042                 bps = tg->last_bytes_disp[WRITE] * HZ;
2043                 do_div(bps, elapsed_time);
2044                 if (bps >= tg->bps[WRITE][LIMIT_LOW])
2045                         tg->last_low_overflow_time[WRITE] = now;
2046         }
2047
2048         if (tg->iops[READ][LIMIT_LOW]) {
2049                 iops = tg->last_io_disp[READ] * HZ / elapsed_time;
2050                 if (iops >= tg->iops[READ][LIMIT_LOW])
2051                         tg->last_low_overflow_time[READ] = now;
2052         }
2053
2054         if (tg->iops[WRITE][LIMIT_LOW]) {
2055                 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time;
2056                 if (iops >= tg->iops[WRITE][LIMIT_LOW])
2057                         tg->last_low_overflow_time[WRITE] = now;
2058         }
2059
2060         /*
2061          * If cgroup is below low limit, consider downgrade and throttle other
2062          * cgroups
2063          */
2064         if (throtl_hierarchy_can_downgrade(tg))
2065                 throtl_downgrade_state(tg->td);
2066
2067         tg->last_bytes_disp[READ] = 0;
2068         tg->last_bytes_disp[WRITE] = 0;
2069         tg->last_io_disp[READ] = 0;
2070         tg->last_io_disp[WRITE] = 0;
2071 }
2072
2073 static void blk_throtl_update_idletime(struct throtl_grp *tg)
2074 {
2075         unsigned long now;
2076         unsigned long last_finish_time = tg->last_finish_time;
2077
2078         if (last_finish_time == 0)
2079                 return;
2080
2081         now = ktime_get_ns() >> 10;
2082         if (now <= last_finish_time ||
2083             last_finish_time == tg->checked_last_finish_time)
2084                 return;
2085
2086         tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3;
2087         tg->checked_last_finish_time = last_finish_time;
2088 }
2089
2090 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2091 static void throtl_update_latency_buckets(struct throtl_data *td)
2092 {
2093         struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE];
2094         int i, cpu, rw;
2095         unsigned long last_latency[2] = { 0 };
2096         unsigned long latency[2];
2097
2098         if (!blk_queue_nonrot(td->queue) || !td->limit_valid[LIMIT_LOW])
2099                 return;
2100         if (time_before(jiffies, td->last_calculate_time + HZ))
2101                 return;
2102         td->last_calculate_time = jiffies;
2103
2104         memset(avg_latency, 0, sizeof(avg_latency));
2105         for (rw = READ; rw <= WRITE; rw++) {
2106                 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2107                         struct latency_bucket *tmp = &td->tmp_buckets[rw][i];
2108
2109                         for_each_possible_cpu(cpu) {
2110                                 struct latency_bucket *bucket;
2111
2112                                 /* this isn't race free, but ok in practice */
2113                                 bucket = per_cpu_ptr(td->latency_buckets[rw],
2114                                         cpu);
2115                                 tmp->total_latency += bucket[i].total_latency;
2116                                 tmp->samples += bucket[i].samples;
2117                                 bucket[i].total_latency = 0;
2118                                 bucket[i].samples = 0;
2119                         }
2120
2121                         if (tmp->samples >= 32) {
2122                                 int samples = tmp->samples;
2123
2124                                 latency[rw] = tmp->total_latency;
2125
2126                                 tmp->total_latency = 0;
2127                                 tmp->samples = 0;
2128                                 latency[rw] /= samples;
2129                                 if (latency[rw] == 0)
2130                                         continue;
2131                                 avg_latency[rw][i].latency = latency[rw];
2132                         }
2133                 }
2134         }
2135
2136         for (rw = READ; rw <= WRITE; rw++) {
2137                 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2138                         if (!avg_latency[rw][i].latency) {
2139                                 if (td->avg_buckets[rw][i].latency < last_latency[rw])
2140                                         td->avg_buckets[rw][i].latency =
2141                                                 last_latency[rw];
2142                                 continue;
2143                         }
2144
2145                         if (!td->avg_buckets[rw][i].valid)
2146                                 latency[rw] = avg_latency[rw][i].latency;
2147                         else
2148                                 latency[rw] = (td->avg_buckets[rw][i].latency * 7 +
2149                                         avg_latency[rw][i].latency) >> 3;
2150
2151                         td->avg_buckets[rw][i].latency = max(latency[rw],
2152                                 last_latency[rw]);
2153                         td->avg_buckets[rw][i].valid = true;
2154                         last_latency[rw] = td->avg_buckets[rw][i].latency;
2155                 }
2156         }
2157
2158         for (i = 0; i < LATENCY_BUCKET_SIZE; i++)
2159                 throtl_log(&td->service_queue,
2160                         "Latency bucket %d: read latency=%ld, read valid=%d, "
2161                         "write latency=%ld, write valid=%d", i,
2162                         td->avg_buckets[READ][i].latency,
2163                         td->avg_buckets[READ][i].valid,
2164                         td->avg_buckets[WRITE][i].latency,
2165                         td->avg_buckets[WRITE][i].valid);
2166 }
2167 #else
2168 static inline void throtl_update_latency_buckets(struct throtl_data *td)
2169 {
2170 }
2171 #endif
2172
2173 bool blk_throtl_bio(struct bio *bio)
2174 {
2175         struct request_queue *q = bio->bi_disk->queue;
2176         struct blkcg_gq *blkg = bio->bi_blkg;
2177         struct throtl_qnode *qn = NULL;
2178         struct throtl_grp *tg = blkg_to_tg(blkg);
2179         struct throtl_service_queue *sq;
2180         bool rw = bio_data_dir(bio);
2181         bool throttled = false;
2182         struct throtl_data *td = tg->td;
2183
2184         rcu_read_lock();
2185
2186         /* see throtl_charge_bio() */
2187         if (bio_flagged(bio, BIO_THROTTLED))
2188                 goto out;
2189
2190         if (!cgroup_subsys_on_dfl(io_cgrp_subsys)) {
2191                 blkg_rwstat_add(&tg->stat_bytes, bio->bi_opf,
2192                                 bio->bi_iter.bi_size);
2193                 blkg_rwstat_add(&tg->stat_ios, bio->bi_opf, 1);
2194         }
2195
2196         if (!tg->has_rules[rw])
2197                 goto out;
2198
2199         spin_lock_irq(&q->queue_lock);
2200
2201         throtl_update_latency_buckets(td);
2202
2203         blk_throtl_update_idletime(tg);
2204
2205         sq = &tg->service_queue;
2206
2207 again:
2208         while (true) {
2209                 if (tg->last_low_overflow_time[rw] == 0)
2210                         tg->last_low_overflow_time[rw] = jiffies;
2211                 throtl_downgrade_check(tg);
2212                 throtl_upgrade_check(tg);
2213                 /* throtl is FIFO - if bios are already queued, should queue */
2214                 if (sq->nr_queued[rw])
2215                         break;
2216
2217                 /* if above limits, break to queue */
2218                 if (!tg_may_dispatch(tg, bio, NULL)) {
2219                         tg->last_low_overflow_time[rw] = jiffies;
2220                         if (throtl_can_upgrade(td, tg)) {
2221                                 throtl_upgrade_state(td);
2222                                 goto again;
2223                         }
2224                         break;
2225                 }
2226
2227                 /* within limits, let's charge and dispatch directly */
2228                 throtl_charge_bio(tg, bio);
2229
2230                 /*
2231                  * We need to trim slice even when bios are not being queued
2232                  * otherwise it might happen that a bio is not queued for
2233                  * a long time and slice keeps on extending and trim is not
2234                  * called for a long time. Now if limits are reduced suddenly
2235                  * we take into account all the IO dispatched so far at new
2236                  * low rate and * newly queued IO gets a really long dispatch
2237                  * time.
2238                  *
2239                  * So keep on trimming slice even if bio is not queued.
2240                  */
2241                 throtl_trim_slice(tg, rw);
2242
2243                 /*
2244                  * @bio passed through this layer without being throttled.
2245                  * Climb up the ladder.  If we're already at the top, it
2246                  * can be executed directly.
2247                  */
2248                 qn = &tg->qnode_on_parent[rw];
2249                 sq = sq->parent_sq;
2250                 tg = sq_to_tg(sq);
2251                 if (!tg)
2252                         goto out_unlock;
2253         }
2254
2255         /* out-of-limit, queue to @tg */
2256         throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
2257                    rw == READ ? 'R' : 'W',
2258                    tg->bytes_disp[rw], bio->bi_iter.bi_size,
2259                    tg_bps_limit(tg, rw),
2260                    tg->io_disp[rw], tg_iops_limit(tg, rw),
2261                    sq->nr_queued[READ], sq->nr_queued[WRITE]);
2262
2263         tg->last_low_overflow_time[rw] = jiffies;
2264
2265         td->nr_queued[rw]++;
2266         throtl_add_bio_tg(bio, qn, tg);
2267         throttled = true;
2268
2269         /*
2270          * Update @tg's dispatch time and force schedule dispatch if @tg
2271          * was empty before @bio.  The forced scheduling isn't likely to
2272          * cause undue delay as @bio is likely to be dispatched directly if
2273          * its @tg's disptime is not in the future.
2274          */
2275         if (tg->flags & THROTL_TG_WAS_EMPTY) {
2276                 tg_update_disptime(tg);
2277                 throtl_schedule_next_dispatch(tg->service_queue.parent_sq, true);
2278         }
2279
2280 out_unlock:
2281         spin_unlock_irq(&q->queue_lock);
2282 out:
2283         bio_set_flag(bio, BIO_THROTTLED);
2284
2285 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2286         if (throttled || !td->track_bio_latency)
2287                 bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY;
2288 #endif
2289         rcu_read_unlock();
2290         return throttled;
2291 }
2292
2293 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2294 static void throtl_track_latency(struct throtl_data *td, sector_t size,
2295         int op, unsigned long time)
2296 {
2297         struct latency_bucket *latency;
2298         int index;
2299
2300         if (!td || td->limit_index != LIMIT_LOW ||
2301             !(op == REQ_OP_READ || op == REQ_OP_WRITE) ||
2302             !blk_queue_nonrot(td->queue))
2303                 return;
2304
2305         index = request_bucket_index(size);
2306
2307         latency = get_cpu_ptr(td->latency_buckets[op]);
2308         latency[index].total_latency += time;
2309         latency[index].samples++;
2310         put_cpu_ptr(td->latency_buckets[op]);
2311 }
2312
2313 void blk_throtl_stat_add(struct request *rq, u64 time_ns)
2314 {
2315         struct request_queue *q = rq->q;
2316         struct throtl_data *td = q->td;
2317
2318         throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq),
2319                              time_ns >> 10);
2320 }
2321
2322 void blk_throtl_bio_endio(struct bio *bio)
2323 {
2324         struct blkcg_gq *blkg;
2325         struct throtl_grp *tg;
2326         u64 finish_time_ns;
2327         unsigned long finish_time;
2328         unsigned long start_time;
2329         unsigned long lat;
2330         int rw = bio_data_dir(bio);
2331
2332         blkg = bio->bi_blkg;
2333         if (!blkg)
2334                 return;
2335         tg = blkg_to_tg(blkg);
2336         if (!tg->td->limit_valid[LIMIT_LOW])
2337                 return;
2338
2339         finish_time_ns = ktime_get_ns();
2340         tg->last_finish_time = finish_time_ns >> 10;
2341
2342         start_time = bio_issue_time(&bio->bi_issue) >> 10;
2343         finish_time = __bio_issue_time(finish_time_ns) >> 10;
2344         if (!start_time || finish_time <= start_time)
2345                 return;
2346
2347         lat = finish_time - start_time;
2348         /* this is only for bio based driver */
2349         if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY))
2350                 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue),
2351                                      bio_op(bio), lat);
2352
2353         if (tg->latency_target && lat >= tg->td->filtered_latency) {
2354                 int bucket;
2355                 unsigned int threshold;
2356
2357                 bucket = request_bucket_index(bio_issue_size(&bio->bi_issue));
2358                 threshold = tg->td->avg_buckets[rw][bucket].latency +
2359                         tg->latency_target;
2360                 if (lat > threshold)
2361                         tg->bad_bio_cnt++;
2362                 /*
2363                  * Not race free, could get wrong count, which means cgroups
2364                  * will be throttled
2365                  */
2366                 tg->bio_cnt++;
2367         }
2368
2369         if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) {
2370                 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies;
2371                 tg->bio_cnt /= 2;
2372                 tg->bad_bio_cnt /= 2;
2373         }
2374 }
2375 #endif
2376
2377 int blk_throtl_init(struct request_queue *q)
2378 {
2379         struct throtl_data *td;
2380         int ret;
2381
2382         td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node);
2383         if (!td)
2384                 return -ENOMEM;
2385         td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) *
2386                 LATENCY_BUCKET_SIZE, __alignof__(u64));
2387         if (!td->latency_buckets[READ]) {
2388                 kfree(td);
2389                 return -ENOMEM;
2390         }
2391         td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) *
2392                 LATENCY_BUCKET_SIZE, __alignof__(u64));
2393         if (!td->latency_buckets[WRITE]) {
2394                 free_percpu(td->latency_buckets[READ]);
2395                 kfree(td);
2396                 return -ENOMEM;
2397         }
2398
2399         INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn);
2400         throtl_service_queue_init(&td->service_queue);
2401
2402         q->td = td;
2403         td->queue = q;
2404
2405         td->limit_valid[LIMIT_MAX] = true;
2406         td->limit_index = LIMIT_MAX;
2407         td->low_upgrade_time = jiffies;
2408         td->low_downgrade_time = jiffies;
2409
2410         /* activate policy */
2411         ret = blkcg_activate_policy(q, &blkcg_policy_throtl);
2412         if (ret) {
2413                 free_percpu(td->latency_buckets[READ]);
2414                 free_percpu(td->latency_buckets[WRITE]);
2415                 kfree(td);
2416         }
2417         return ret;
2418 }
2419
2420 void blk_throtl_exit(struct request_queue *q)
2421 {
2422         BUG_ON(!q->td);
2423         throtl_shutdown_wq(q);
2424         blkcg_deactivate_policy(q, &blkcg_policy_throtl);
2425         free_percpu(q->td->latency_buckets[READ]);
2426         free_percpu(q->td->latency_buckets[WRITE]);
2427         kfree(q->td);
2428 }
2429
2430 void blk_throtl_register_queue(struct request_queue *q)
2431 {
2432         struct throtl_data *td;
2433         int i;
2434
2435         td = q->td;
2436         BUG_ON(!td);
2437
2438         if (blk_queue_nonrot(q)) {
2439                 td->throtl_slice = DFL_THROTL_SLICE_SSD;
2440                 td->filtered_latency = LATENCY_FILTERED_SSD;
2441         } else {
2442                 td->throtl_slice = DFL_THROTL_SLICE_HD;
2443                 td->filtered_latency = LATENCY_FILTERED_HD;
2444                 for (i = 0; i < LATENCY_BUCKET_SIZE; i++) {
2445                         td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY;
2446                         td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY;
2447                 }
2448         }
2449 #ifndef CONFIG_BLK_DEV_THROTTLING_LOW
2450         /* if no low limit, use previous default */
2451         td->throtl_slice = DFL_THROTL_SLICE_HD;
2452 #endif
2453
2454         td->track_bio_latency = !queue_is_mq(q);
2455         if (!td->track_bio_latency)
2456                 blk_stat_enable_accounting(q);
2457 }
2458
2459 #ifdef CONFIG_BLK_DEV_THROTTLING_LOW
2460 ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page)
2461 {
2462         if (!q->td)
2463                 return -EINVAL;
2464         return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice));
2465 }
2466
2467 ssize_t blk_throtl_sample_time_store(struct request_queue *q,
2468         const char *page, size_t count)
2469 {
2470         unsigned long v;
2471         unsigned long t;
2472
2473         if (!q->td)
2474                 return -EINVAL;
2475         if (kstrtoul(page, 10, &v))
2476                 return -EINVAL;
2477         t = msecs_to_jiffies(v);
2478         if (t == 0 || t > MAX_THROTL_SLICE)
2479                 return -EINVAL;
2480         q->td->throtl_slice = t;
2481         return count;
2482 }
2483 #endif
2484
2485 static int __init throtl_init(void)
2486 {
2487         kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0);
2488         if (!kthrotld_workqueue)
2489                 panic("Failed to create kthrotld\n");
2490
2491         return blkcg_policy_register(&blkcg_policy_throtl);
2492 }
2493
2494 module_init(throtl_init);