2 * include/linux/writeback.h
7 #include <linux/sched.h>
8 #include <linux/workqueue.h>
10 #include <linux/flex_proportions.h>
11 #include <linux/backing-dev-defs.h>
13 DECLARE_PER_CPU(int, dirty_throttle_leaks);
16 * The 1/4 region under the global dirty thresh is for smooth dirty throttling:
18 * (thresh - thresh/DIRTY_FULL_SCOPE, thresh)
20 * Further beyond, all dirtier tasks will enter a loop waiting (possibly long
21 * time) for the dirty pages to drop, unless written enough pages.
23 * The global dirty threshold is normally equal to the global dirty limit,
24 * except when the system suddenly allocates a lot of anonymous memory and
25 * knocks down the global dirty threshold quickly, in which case the global
26 * dirty limit will follow down slowly to prevent livelocking all dirtier tasks.
29 #define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2)
31 struct backing_dev_info;
36 enum writeback_sync_modes {
37 WB_SYNC_NONE, /* Don't wait on anything */
38 WB_SYNC_ALL, /* Wait on every mapping */
42 * why some writeback work was initiated
46 WB_REASON_TRY_TO_FREE_PAGES,
49 WB_REASON_LAPTOP_TIMER,
50 WB_REASON_FREE_MORE_MEM,
51 WB_REASON_FS_FREE_SPACE,
53 * There is no bdi forker thread any more and works are done
54 * by emergency worker, however, this is TPs userland visible
55 * and we'll be exposing exactly the same information,
56 * so it has a mismatch name.
58 WB_REASON_FORKER_THREAD,
64 * A control structure which tells the writeback code what to do. These are
65 * always on the stack, and hence need no locking. They are always initialised
66 * in a manner such that unspecified fields are set to zero.
68 struct writeback_control {
69 long nr_to_write; /* Write this many pages, and decrement
70 this for each page written */
71 long pages_skipped; /* Pages which were not written */
74 * For a_ops->writepages(): if start or end are non-zero then this is
75 * a hint that the filesystem need only write out the pages inside that
76 * byterange. The byte at `end' is included in the writeout request.
81 enum writeback_sync_modes sync_mode;
83 unsigned for_kupdate:1; /* A kupdate writeback */
84 unsigned for_background:1; /* A background writeback */
85 unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
86 unsigned for_reclaim:1; /* Invoked from the page allocator */
87 unsigned range_cyclic:1; /* range_start is cyclic */
88 unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
92 * A wb_domain represents a domain that wb's (bdi_writeback's) belong to
93 * and are measured against each other in. There always is one global
94 * domain, global_wb_domain, that every wb in the system is a member of.
95 * This allows measuring the relative bandwidth of each wb to distribute
96 * dirtyable memory accordingly.
102 * Scale the writeback cache size proportional to the relative
105 * We do this by keeping a floating proportion between BDIs, based
106 * on page writeback completions [end_page_writeback()]. Those
107 * devices that write out pages fastest will get the larger share,
108 * while the slower will get a smaller share.
110 * We use page writeout completions because we are interested in
111 * getting rid of dirty pages. Having them written out is the
114 * We introduce a concept of time, a period over which we measure
115 * these events, because demand can/will vary over time. The length
116 * of this period itself is measured in page writeback completions.
118 struct fprop_global completions;
119 struct timer_list period_timer; /* timer for aging of completions */
120 unsigned long period_time;
123 * The dirtyable memory and dirty threshold could be suddenly
124 * knocked down by a large amount (eg. on the startup of KVM in a
125 * swapless system). This may throw the system into deep dirty
126 * exceeded state and throttle heavy/light dirtiers alike. To
127 * retain good responsiveness, maintain global_dirty_limit for
128 * tracking slowly down to the knocked down dirty threshold.
130 * Both fields are protected by ->lock.
132 unsigned long dirty_limit_tstamp;
133 unsigned long dirty_limit;
137 * wb_domain_size_changed - memory available to a wb_domain has changed
138 * @dom: wb_domain of interest
140 * This function should be called when the amount of memory available to
141 * @dom has changed. It resets @dom's dirty limit parameters to prevent
142 * the past values which don't match the current configuration from skewing
143 * dirty throttling. Without this, when memory size of a wb_domain is
144 * greatly reduced, the dirty throttling logic may allow too many pages to
145 * be dirtied leading to consecutive unnecessary OOMs and may get stuck in
148 static inline void wb_domain_size_changed(struct wb_domain *dom)
150 spin_lock(&dom->lock);
151 dom->dirty_limit_tstamp = jiffies;
152 dom->dirty_limit = 0;
153 spin_unlock(&dom->lock);
159 struct bdi_writeback;
160 void writeback_inodes_sb(struct super_block *, enum wb_reason reason);
161 void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
162 enum wb_reason reason);
163 bool try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
164 bool try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
165 enum wb_reason reason);
166 void sync_inodes_sb(struct super_block *);
167 void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
168 void inode_wait_for_writeback(struct inode *inode);
170 /* writeback.h requires fs.h; it, too, is not included from here. */
171 static inline void wait_on_inode(struct inode *inode)
174 wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE);
177 #ifdef CONFIG_CGROUP_WRITEBACK
179 void __inode_attach_wb(struct inode *inode, struct page *page);
182 * inode_attach_wb - associate an inode with its wb
183 * @inode: inode of interest
184 * @page: page being dirtied (may be NULL)
186 * If @inode doesn't have its wb, associate it with the wb matching the
187 * memcg of @page or, if @page is NULL, %current. May be called w/ or w/o
190 static inline void inode_attach_wb(struct inode *inode, struct page *page)
193 __inode_attach_wb(inode, page);
197 * inode_detach_wb - disassociate an inode from its wb
198 * @inode: inode of interest
200 * @inode is being freed. Detach from its wb.
202 static inline void inode_detach_wb(struct inode *inode)
210 #else /* CONFIG_CGROUP_WRITEBACK */
212 static inline void inode_attach_wb(struct inode *inode, struct page *page)
216 static inline void inode_detach_wb(struct inode *inode)
220 #endif /* CONFIG_CGROUP_WRITEBACK */
223 * mm/page-writeback.c
226 void laptop_io_completion(struct backing_dev_info *info);
227 void laptop_sync_completion(void);
228 void laptop_mode_sync(struct work_struct *work);
229 void laptop_mode_timer_fn(unsigned long data);
231 static inline void laptop_sync_completion(void) { }
233 void throttle_vm_writeout(gfp_t gfp_mask);
234 bool zone_dirty_ok(struct zone *zone);
235 int wb_domain_init(struct wb_domain *dom, gfp_t gfp);
236 #ifdef CONFIG_CGROUP_WRITEBACK
237 void wb_domain_exit(struct wb_domain *dom);
240 extern struct wb_domain global_wb_domain;
242 /* These are exported to sysctl. */
243 extern int dirty_background_ratio;
244 extern unsigned long dirty_background_bytes;
245 extern int vm_dirty_ratio;
246 extern unsigned long vm_dirty_bytes;
247 extern unsigned int dirty_writeback_interval;
248 extern unsigned int dirty_expire_interval;
249 extern unsigned int dirtytime_expire_interval;
250 extern int vm_highmem_is_dirtyable;
251 extern int block_dump;
252 extern int laptop_mode;
254 extern int dirty_background_ratio_handler(struct ctl_table *table, int write,
255 void __user *buffer, size_t *lenp,
257 extern int dirty_background_bytes_handler(struct ctl_table *table, int write,
258 void __user *buffer, size_t *lenp,
260 extern int dirty_ratio_handler(struct ctl_table *table, int write,
261 void __user *buffer, size_t *lenp,
263 extern int dirty_bytes_handler(struct ctl_table *table, int write,
264 void __user *buffer, size_t *lenp,
266 int dirtytime_interval_handler(struct ctl_table *table, int write,
267 void __user *buffer, size_t *lenp, loff_t *ppos);
270 int dirty_writeback_centisecs_handler(struct ctl_table *, int,
271 void __user *, size_t *, loff_t *);
273 void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty);
274 unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh);
276 void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time);
277 void page_writeback_init(void);
278 void balance_dirty_pages_ratelimited(struct address_space *mapping);
279 bool wb_over_bg_thresh(struct bdi_writeback *wb);
281 typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
284 int generic_writepages(struct address_space *mapping,
285 struct writeback_control *wbc);
286 void tag_pages_for_writeback(struct address_space *mapping,
287 pgoff_t start, pgoff_t end);
288 int write_cache_pages(struct address_space *mapping,
289 struct writeback_control *wbc, writepage_t writepage,
291 int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
292 void writeback_set_ratelimit(void);
293 void tag_pages_for_writeback(struct address_space *mapping,
294 pgoff_t start, pgoff_t end);
296 void account_page_redirty(struct page *page);
298 #endif /* WRITEBACK_H */