2 #include <linux/wait.h>
3 #include <linux/backing-dev.h>
4 #include <linux/kthread.h>
5 #include <linux/freezer.h>
7 #include <linux/pagemap.h>
9 #include <linux/sched.h>
10 #include <linux/module.h>
11 #include <linux/writeback.h>
12 #include <linux/device.h>
14 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
16 void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
19 EXPORT_SYMBOL(default_unplug_io_fn);
21 struct backing_dev_info default_backing_dev_info = {
23 .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
25 .capabilities = BDI_CAP_MAP_COPY,
26 .unplug_io_fn = default_unplug_io_fn,
28 EXPORT_SYMBOL_GPL(default_backing_dev_info);
30 static struct class *bdi_class;
33 * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
34 * reader side protection for bdi_pending_list. bdi_list has RCU reader side
37 DEFINE_SPINLOCK(bdi_lock);
39 LIST_HEAD(bdi_pending_list);
41 static struct task_struct *sync_supers_tsk;
42 static struct timer_list sync_supers_timer;
44 static int bdi_sync_supers(void *);
45 static void sync_supers_timer_fn(unsigned long);
46 static void arm_supers_timer(void);
48 static void bdi_add_default_flusher_task(struct backing_dev_info *bdi);
50 #ifdef CONFIG_DEBUG_FS
51 #include <linux/debugfs.h>
52 #include <linux/seq_file.h>
54 static struct dentry *bdi_debug_root;
56 static void bdi_debug_init(void)
58 bdi_debug_root = debugfs_create_dir("bdi", NULL);
61 static int bdi_debug_stats_show(struct seq_file *m, void *v)
63 struct backing_dev_info *bdi = m->private;
64 struct bdi_writeback *wb;
65 unsigned long background_thresh;
66 unsigned long dirty_thresh;
67 unsigned long bdi_thresh;
68 unsigned long nr_dirty, nr_io, nr_more_io, nr_wb;
72 * inode lock is enough here, the bdi->wb_list is protected by
73 * RCU on the reader side
75 nr_wb = nr_dirty = nr_io = nr_more_io = 0;
76 spin_lock(&inode_lock);
77 list_for_each_entry(wb, &bdi->wb_list, list) {
79 list_for_each_entry(inode, &wb->b_dirty, i_list)
81 list_for_each_entry(inode, &wb->b_io, i_list)
83 list_for_each_entry(inode, &wb->b_more_io, i_list)
86 spin_unlock(&inode_lock);
88 get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi);
90 #define K(x) ((x) << (PAGE_SHIFT - 10))
92 "BdiWriteback: %8lu kB\n"
93 "BdiReclaimable: %8lu kB\n"
94 "BdiDirtyThresh: %8lu kB\n"
95 "DirtyThresh: %8lu kB\n"
96 "BackgroundThresh: %8lu kB\n"
97 "WritebackThreads: %8lu\n"
106 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)),
107 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)),
108 K(bdi_thresh), K(dirty_thresh),
109 K(background_thresh), nr_wb, nr_dirty, nr_io, nr_more_io,
110 !list_empty(&bdi->bdi_list), bdi->state, bdi->wb_mask,
111 !list_empty(&bdi->wb_list), bdi->wb_cnt);
117 static int bdi_debug_stats_open(struct inode *inode, struct file *file)
119 return single_open(file, bdi_debug_stats_show, inode->i_private);
122 static const struct file_operations bdi_debug_stats_fops = {
123 .open = bdi_debug_stats_open,
126 .release = single_release,
129 static void bdi_debug_register(struct backing_dev_info *bdi, const char *name)
131 bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root);
132 bdi->debug_stats = debugfs_create_file("stats", 0444, bdi->debug_dir,
133 bdi, &bdi_debug_stats_fops);
136 static void bdi_debug_unregister(struct backing_dev_info *bdi)
138 debugfs_remove(bdi->debug_stats);
139 debugfs_remove(bdi->debug_dir);
142 static inline void bdi_debug_init(void)
145 static inline void bdi_debug_register(struct backing_dev_info *bdi,
149 static inline void bdi_debug_unregister(struct backing_dev_info *bdi)
154 static ssize_t read_ahead_kb_store(struct device *dev,
155 struct device_attribute *attr,
156 const char *buf, size_t count)
158 struct backing_dev_info *bdi = dev_get_drvdata(dev);
160 unsigned long read_ahead_kb;
161 ssize_t ret = -EINVAL;
163 read_ahead_kb = simple_strtoul(buf, &end, 10);
164 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
165 bdi->ra_pages = read_ahead_kb >> (PAGE_SHIFT - 10);
171 #define K(pages) ((pages) << (PAGE_SHIFT - 10))
173 #define BDI_SHOW(name, expr) \
174 static ssize_t name##_show(struct device *dev, \
175 struct device_attribute *attr, char *page) \
177 struct backing_dev_info *bdi = dev_get_drvdata(dev); \
179 return snprintf(page, PAGE_SIZE-1, "%lld\n", (long long)expr); \
182 BDI_SHOW(read_ahead_kb, K(bdi->ra_pages))
184 static ssize_t min_ratio_store(struct device *dev,
185 struct device_attribute *attr, const char *buf, size_t count)
187 struct backing_dev_info *bdi = dev_get_drvdata(dev);
190 ssize_t ret = -EINVAL;
192 ratio = simple_strtoul(buf, &end, 10);
193 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
194 ret = bdi_set_min_ratio(bdi, ratio);
200 BDI_SHOW(min_ratio, bdi->min_ratio)
202 static ssize_t max_ratio_store(struct device *dev,
203 struct device_attribute *attr, const char *buf, size_t count)
205 struct backing_dev_info *bdi = dev_get_drvdata(dev);
208 ssize_t ret = -EINVAL;
210 ratio = simple_strtoul(buf, &end, 10);
211 if (*buf && (end[0] == '\0' || (end[0] == '\n' && end[1] == '\0'))) {
212 ret = bdi_set_max_ratio(bdi, ratio);
218 BDI_SHOW(max_ratio, bdi->max_ratio)
220 #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store)
222 static struct device_attribute bdi_dev_attrs[] = {
223 __ATTR_RW(read_ahead_kb),
224 __ATTR_RW(min_ratio),
225 __ATTR_RW(max_ratio),
229 static __init int bdi_class_init(void)
231 bdi_class = class_create(THIS_MODULE, "bdi");
232 if (IS_ERR(bdi_class))
233 return PTR_ERR(bdi_class);
235 bdi_class->dev_attrs = bdi_dev_attrs;
239 postcore_initcall(bdi_class_init);
241 static int __init default_bdi_init(void)
245 sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers");
246 BUG_ON(IS_ERR(sync_supers_tsk));
248 init_timer(&sync_supers_timer);
249 setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0);
252 err = bdi_init(&default_backing_dev_info);
254 bdi_register(&default_backing_dev_info, NULL, "default");
258 subsys_initcall(default_bdi_init);
260 static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
262 memset(wb, 0, sizeof(*wb));
265 wb->last_old_flush = jiffies;
266 INIT_LIST_HEAD(&wb->b_dirty);
267 INIT_LIST_HEAD(&wb->b_io);
268 INIT_LIST_HEAD(&wb->b_more_io);
271 static void bdi_task_init(struct backing_dev_info *bdi,
272 struct bdi_writeback *wb)
274 struct task_struct *tsk = current;
276 spin_lock(&bdi->wb_lock);
277 list_add_tail_rcu(&wb->list, &bdi->wb_list);
278 spin_unlock(&bdi->wb_lock);
280 tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
284 * Our parent may run at a different priority, just set us to normal
286 set_user_nice(tsk, 0);
289 static int bdi_start_fn(void *ptr)
291 struct bdi_writeback *wb = ptr;
292 struct backing_dev_info *bdi = wb->bdi;
296 * Add us to the active bdi_list
298 spin_lock_bh(&bdi_lock);
299 list_add_rcu(&bdi->bdi_list, &bdi_list);
300 spin_unlock_bh(&bdi_lock);
302 bdi_task_init(bdi, wb);
305 * Clear pending bit and wakeup anybody waiting to tear us down
307 clear_bit(BDI_pending, &bdi->state);
308 smp_mb__after_clear_bit();
309 wake_up_bit(&bdi->state, BDI_pending);
311 ret = bdi_writeback_task(wb);
314 * Remove us from the list
316 spin_lock(&bdi->wb_lock);
317 list_del_rcu(&wb->list);
318 spin_unlock(&bdi->wb_lock);
321 * Flush any work that raced with us exiting. No new work
322 * will be added, since this bdi isn't discoverable anymore.
324 if (!list_empty(&bdi->work_list))
325 wb_do_writeback(wb, 1);
331 int bdi_has_dirty_io(struct backing_dev_info *bdi)
333 return wb_has_dirty_io(&bdi->wb);
336 static void bdi_flush_io(struct backing_dev_info *bdi)
338 struct writeback_control wbc = {
340 .sync_mode = WB_SYNC_NONE,
341 .older_than_this = NULL,
346 writeback_inodes_wbc(&wbc);
350 * kupdated() used to do this. We cannot do it from the bdi_forker_task()
351 * or we risk deadlocking on ->s_umount. The longer term solution would be
352 * to implement sync_supers_bdi() or similar and simply do it from the
353 * bdi writeback tasks individually.
355 static int bdi_sync_supers(void *unused)
357 set_user_nice(current, 0);
359 while (!kthread_should_stop()) {
360 set_current_state(TASK_INTERRUPTIBLE);
364 * Do this periodically, like kupdated() did before.
372 static void arm_supers_timer(void)
376 next = msecs_to_jiffies(dirty_writeback_interval * 10) + jiffies;
377 mod_timer(&sync_supers_timer, round_jiffies_up(next));
380 static void sync_supers_timer_fn(unsigned long unused)
382 wake_up_process(sync_supers_tsk);
386 static int bdi_forker_task(void *ptr)
388 struct bdi_writeback *me = ptr;
390 bdi_task_init(me->bdi, me);
393 struct backing_dev_info *bdi, *tmp;
394 struct bdi_writeback *wb;
397 * Temporary measure, we want to make sure we don't see
398 * dirty data on the default backing_dev_info
400 if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list))
401 wb_do_writeback(me, 0);
403 spin_lock_bh(&bdi_lock);
406 * Check if any existing bdi's have dirty data without
407 * a thread registered. If so, set that up.
409 list_for_each_entry_safe(bdi, tmp, &bdi_list, bdi_list) {
412 if (list_empty(&bdi->work_list) &&
413 !bdi_has_dirty_io(bdi))
416 bdi_add_default_flusher_task(bdi);
419 set_current_state(TASK_INTERRUPTIBLE);
421 if (list_empty(&bdi_pending_list)) {
424 spin_unlock_bh(&bdi_lock);
425 wait = msecs_to_jiffies(dirty_writeback_interval * 10);
426 schedule_timeout(wait);
431 __set_current_state(TASK_RUNNING);
434 * This is our real job - check for pending entries in
435 * bdi_pending_list, and create the tasks that got added
437 bdi = list_entry(bdi_pending_list.next, struct backing_dev_info,
439 list_del_init(&bdi->bdi_list);
440 spin_unlock_bh(&bdi_lock);
443 wb->task = kthread_run(bdi_start_fn, wb, "flush-%s",
446 * If task creation fails, then readd the bdi to
447 * the pending list and force writeout of the bdi
448 * from this forker thread. That will free some memory
449 * and we can try again.
451 if (IS_ERR(wb->task)) {
455 * Add this 'bdi' to the back, so we get
456 * a chance to flush other bdi's to free
459 spin_lock_bh(&bdi_lock);
460 list_add_tail(&bdi->bdi_list, &bdi_pending_list);
461 spin_unlock_bh(&bdi_lock);
470 static void bdi_add_to_pending(struct rcu_head *head)
472 struct backing_dev_info *bdi;
474 bdi = container_of(head, struct backing_dev_info, rcu_head);
475 INIT_LIST_HEAD(&bdi->bdi_list);
477 spin_lock(&bdi_lock);
478 list_add_tail(&bdi->bdi_list, &bdi_pending_list);
479 spin_unlock(&bdi_lock);
482 * We are now on the pending list, wake up bdi_forker_task()
483 * to finish the job and add us back to the active bdi_list
485 wake_up_process(default_backing_dev_info.wb.task);
489 * Add the default flusher task that gets created for any bdi
490 * that has dirty data pending writeout
492 void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
494 if (!bdi_cap_writeback_dirty(bdi))
497 if (WARN_ON(!test_bit(BDI_registered, &bdi->state))) {
498 printk(KERN_ERR "bdi %p/%s is not registered!\n",
504 * Check with the helper whether to proceed adding a task. Will only
505 * abort if we two or more simultanous calls to
506 * bdi_add_default_flusher_task() occured, further additions will block
507 * waiting for previous additions to finish.
509 if (!test_and_set_bit(BDI_pending, &bdi->state)) {
510 list_del_rcu(&bdi->bdi_list);
513 * We must wait for the current RCU period to end before
514 * moving to the pending list. So schedule that operation
515 * from an RCU callback.
517 call_rcu(&bdi->rcu_head, bdi_add_to_pending);
522 * Remove bdi from bdi_list, and ensure that it is no longer visible
524 static void bdi_remove_from_list(struct backing_dev_info *bdi)
526 spin_lock_bh(&bdi_lock);
527 list_del_rcu(&bdi->bdi_list);
528 spin_unlock_bh(&bdi_lock);
533 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
534 const char *fmt, ...)
540 if (bdi->dev) /* The driver needs to use separate queues per device */
544 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args);
551 spin_lock_bh(&bdi_lock);
552 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
553 spin_unlock_bh(&bdi_lock);
558 * Just start the forker thread for our default backing_dev_info,
559 * and add other bdi's to the list. They will get a thread created
560 * on-demand when they need it.
562 if (bdi_cap_flush_forker(bdi)) {
563 struct bdi_writeback *wb = &bdi->wb;
565 wb->task = kthread_run(bdi_forker_task, wb, "bdi-%s",
567 if (IS_ERR(wb->task)) {
571 bdi_remove_from_list(bdi);
576 bdi_debug_register(bdi, dev_name(dev));
577 set_bit(BDI_registered, &bdi->state);
581 EXPORT_SYMBOL(bdi_register);
583 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev)
585 return bdi_register(bdi, NULL, "%u:%u", MAJOR(dev), MINOR(dev));
587 EXPORT_SYMBOL(bdi_register_dev);
590 * Remove bdi from the global list and shutdown any threads we have running
592 static void bdi_wb_shutdown(struct backing_dev_info *bdi)
594 struct bdi_writeback *wb;
596 if (!bdi_cap_writeback_dirty(bdi))
600 * If setup is pending, wait for that to complete first
602 wait_on_bit(&bdi->state, BDI_pending, bdi_sched_wait,
603 TASK_UNINTERRUPTIBLE);
606 * Make sure nobody finds us on the bdi_list anymore
608 bdi_remove_from_list(bdi);
611 * Finally, kill the kernel threads. We don't need to be RCU
612 * safe anymore, since the bdi is gone from visibility. Force
613 * unfreeze of the thread before calling kthread_stop(), otherwise
614 * it would never exet if it is currently stuck in the refrigerator.
616 list_for_each_entry(wb, &bdi->wb_list, list) {
617 thaw_process(wb->task);
618 kthread_stop(wb->task);
623 * This bdi is going away now, make sure that no super_blocks point to it
625 static void bdi_prune_sb(struct backing_dev_info *bdi)
627 struct super_block *sb;
630 list_for_each_entry(sb, &super_blocks, s_list) {
631 if (sb->s_bdi == bdi)
634 spin_unlock(&sb_lock);
637 void bdi_unregister(struct backing_dev_info *bdi)
642 if (!bdi_cap_flush_forker(bdi))
643 bdi_wb_shutdown(bdi);
644 bdi_debug_unregister(bdi);
645 device_unregister(bdi->dev);
649 EXPORT_SYMBOL(bdi_unregister);
651 int bdi_init(struct backing_dev_info *bdi)
658 bdi->max_ratio = 100;
659 bdi->max_prop_frac = PROP_FRAC_BASE;
660 spin_lock_init(&bdi->wb_lock);
661 INIT_RCU_HEAD(&bdi->rcu_head);
662 INIT_LIST_HEAD(&bdi->bdi_list);
663 INIT_LIST_HEAD(&bdi->wb_list);
664 INIT_LIST_HEAD(&bdi->work_list);
666 bdi_wb_init(&bdi->wb, bdi);
669 * Just one thread support for now, hard code mask and count
674 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) {
675 err = percpu_counter_init(&bdi->bdi_stat[i], 0);
680 bdi->dirty_exceeded = 0;
681 err = prop_local_init_percpu(&bdi->completions);
686 percpu_counter_destroy(&bdi->bdi_stat[i]);
691 EXPORT_SYMBOL(bdi_init);
693 void bdi_destroy(struct backing_dev_info *bdi)
698 * Splice our entries to the default_backing_dev_info, if this
701 if (bdi_has_dirty_io(bdi)) {
702 struct bdi_writeback *dst = &default_backing_dev_info.wb;
704 spin_lock(&inode_lock);
705 list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
706 list_splice(&bdi->wb.b_io, &dst->b_io);
707 list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
708 spin_unlock(&inode_lock);
713 for (i = 0; i < NR_BDI_STAT_ITEMS; i++)
714 percpu_counter_destroy(&bdi->bdi_stat[i]);
716 prop_local_destroy_percpu(&bdi->completions);
718 EXPORT_SYMBOL(bdi_destroy);
721 * For use from filesystems to quickly init and register a bdi associated
722 * with dirty writeback
724 int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
731 bdi->capabilities = cap;
736 sprintf(tmp, "%.28s%s", name, "-%d");
737 err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
745 EXPORT_SYMBOL(bdi_setup_and_register);
747 static wait_queue_head_t congestion_wqh[2] = {
748 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]),
749 __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1])
752 void clear_bdi_congested(struct backing_dev_info *bdi, int sync)
755 wait_queue_head_t *wqh = &congestion_wqh[sync];
757 bit = sync ? BDI_sync_congested : BDI_async_congested;
758 clear_bit(bit, &bdi->state);
759 smp_mb__after_clear_bit();
760 if (waitqueue_active(wqh))
763 EXPORT_SYMBOL(clear_bdi_congested);
765 void set_bdi_congested(struct backing_dev_info *bdi, int sync)
769 bit = sync ? BDI_sync_congested : BDI_async_congested;
770 set_bit(bit, &bdi->state);
772 EXPORT_SYMBOL(set_bdi_congested);
775 * congestion_wait - wait for a backing_dev to become uncongested
776 * @sync: SYNC or ASYNC IO
777 * @timeout: timeout in jiffies
779 * Waits for up to @timeout jiffies for a backing_dev (any backing_dev) to exit
780 * write congestion. If no backing_devs are congested then just wait for the
781 * next write to be completed.
783 long congestion_wait(int sync, long timeout)
787 wait_queue_head_t *wqh = &congestion_wqh[sync];
789 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
790 ret = io_schedule_timeout(timeout);
791 finish_wait(wqh, &wait);
794 EXPORT_SYMBOL(congestion_wait);