ext4: fix the deadlock in mpage_da_map_and_submit()
[linux-2.6-microblaze.git] / fs / ext4 / inode.c
1 /*
2  *  linux/fs/ext4/inode.c
3  *
4  * Copyright (C) 1992, 1993, 1994, 1995
5  * Remy Card (card@masi.ibp.fr)
6  * Laboratoire MASI - Institut Blaise Pascal
7  * Universite Pierre et Marie Curie (Paris VI)
8  *
9  *  from
10  *
11  *  linux/fs/minix/inode.c
12  *
13  *  Copyright (C) 1991, 1992  Linus Torvalds
14  *
15  *  64-bit file support on 64-bit platforms by Jakub Jelinek
16  *      (jj@sunsite.ms.mff.cuni.cz)
17  *
18  *  Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
19  */
20
21 #include <linux/module.h>
22 #include <linux/fs.h>
23 #include <linux/time.h>
24 #include <linux/jbd2.h>
25 #include <linux/highuid.h>
26 #include <linux/pagemap.h>
27 #include <linux/quotaops.h>
28 #include <linux/string.h>
29 #include <linux/buffer_head.h>
30 #include <linux/writeback.h>
31 #include <linux/pagevec.h>
32 #include <linux/mpage.h>
33 #include <linux/namei.h>
34 #include <linux/uio.h>
35 #include <linux/bio.h>
36 #include <linux/workqueue.h>
37 #include <linux/kernel.h>
38 #include <linux/printk.h>
39 #include <linux/slab.h>
40 #include <linux/ratelimit.h>
41
42 #include "ext4_jbd2.h"
43 #include "xattr.h"
44 #include "acl.h"
45 #include "truncate.h"
46
47 #include <trace/events/ext4.h>
48
49 #define MPAGE_DA_EXTENT_TAIL 0x01
50
51 static inline int ext4_begin_ordered_truncate(struct inode *inode,
52                                               loff_t new_size)
53 {
54         trace_ext4_begin_ordered_truncate(inode, new_size);
55         /*
56          * If jinode is zero, then we never opened the file for
57          * writing, so there's no need to call
58          * jbd2_journal_begin_ordered_truncate() since there's no
59          * outstanding writes we need to flush.
60          */
61         if (!EXT4_I(inode)->jinode)
62                 return 0;
63         return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
64                                                    EXT4_I(inode)->jinode,
65                                                    new_size);
66 }
67
68 static void ext4_invalidatepage(struct page *page, unsigned long offset);
69 static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
70                                    struct buffer_head *bh_result, int create);
71 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
72 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
73 static int __ext4_journalled_writepage(struct page *page, unsigned int len);
74 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
75
76 /*
77  * Test whether an inode is a fast symlink.
78  */
79 static int ext4_inode_is_fast_symlink(struct inode *inode)
80 {
81         int ea_blocks = EXT4_I(inode)->i_file_acl ?
82                 (inode->i_sb->s_blocksize >> 9) : 0;
83
84         return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
85 }
86
87 /*
88  * Restart the transaction associated with *handle.  This does a commit,
89  * so before we call here everything must be consistently dirtied against
90  * this transaction.
91  */
92 int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
93                                  int nblocks)
94 {
95         int ret;
96
97         /*
98          * Drop i_data_sem to avoid deadlock with ext4_map_blocks.  At this
99          * moment, get_block can be called only for blocks inside i_size since
100          * page cache has been already dropped and writes are blocked by
101          * i_mutex. So we can safely drop the i_data_sem here.
102          */
103         BUG_ON(EXT4_JOURNAL(inode) == NULL);
104         jbd_debug(2, "restarting handle %p\n", handle);
105         up_write(&EXT4_I(inode)->i_data_sem);
106         ret = ext4_journal_restart(handle, nblocks);
107         down_write(&EXT4_I(inode)->i_data_sem);
108         ext4_discard_preallocations(inode);
109
110         return ret;
111 }
112
113 /*
114  * Called at the last iput() if i_nlink is zero.
115  */
116 void ext4_evict_inode(struct inode *inode)
117 {
118         handle_t *handle;
119         int err;
120
121         trace_ext4_evict_inode(inode);
122
123         ext4_ioend_wait(inode);
124
125         if (inode->i_nlink) {
126                 /*
127                  * When journalling data dirty buffers are tracked only in the
128                  * journal. So although mm thinks everything is clean and
129                  * ready for reaping the inode might still have some pages to
130                  * write in the running transaction or waiting to be
131                  * checkpointed. Thus calling jbd2_journal_invalidatepage()
132                  * (via truncate_inode_pages()) to discard these buffers can
133                  * cause data loss. Also even if we did not discard these
134                  * buffers, we would have no way to find them after the inode
135                  * is reaped and thus user could see stale data if he tries to
136                  * read them before the transaction is checkpointed. So be
137                  * careful and force everything to disk here... We use
138                  * ei->i_datasync_tid to store the newest transaction
139                  * containing inode's data.
140                  *
141                  * Note that directories do not have this problem because they
142                  * don't use page cache.
143                  */
144                 if (ext4_should_journal_data(inode) &&
145                     (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode))) {
146                         journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
147                         tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
148
149                         jbd2_log_start_commit(journal, commit_tid);
150                         jbd2_log_wait_commit(journal, commit_tid);
151                         filemap_write_and_wait(&inode->i_data);
152                 }
153                 truncate_inode_pages(&inode->i_data, 0);
154                 goto no_delete;
155         }
156
157         if (!is_bad_inode(inode))
158                 dquot_initialize(inode);
159
160         if (ext4_should_order_data(inode))
161                 ext4_begin_ordered_truncate(inode, 0);
162         truncate_inode_pages(&inode->i_data, 0);
163
164         if (is_bad_inode(inode))
165                 goto no_delete;
166
167         handle = ext4_journal_start(inode, ext4_blocks_for_truncate(inode)+3);
168         if (IS_ERR(handle)) {
169                 ext4_std_error(inode->i_sb, PTR_ERR(handle));
170                 /*
171                  * If we're going to skip the normal cleanup, we still need to
172                  * make sure that the in-core orphan linked list is properly
173                  * cleaned up.
174                  */
175                 ext4_orphan_del(NULL, inode);
176                 goto no_delete;
177         }
178
179         if (IS_SYNC(inode))
180                 ext4_handle_sync(handle);
181         inode->i_size = 0;
182         err = ext4_mark_inode_dirty(handle, inode);
183         if (err) {
184                 ext4_warning(inode->i_sb,
185                              "couldn't mark inode dirty (err %d)", err);
186                 goto stop_handle;
187         }
188         if (inode->i_blocks)
189                 ext4_truncate(inode);
190
191         /*
192          * ext4_ext_truncate() doesn't reserve any slop when it
193          * restarts journal transactions; therefore there may not be
194          * enough credits left in the handle to remove the inode from
195          * the orphan list and set the dtime field.
196          */
197         if (!ext4_handle_has_enough_credits(handle, 3)) {
198                 err = ext4_journal_extend(handle, 3);
199                 if (err > 0)
200                         err = ext4_journal_restart(handle, 3);
201                 if (err != 0) {
202                         ext4_warning(inode->i_sb,
203                                      "couldn't extend journal (err %d)", err);
204                 stop_handle:
205                         ext4_journal_stop(handle);
206                         ext4_orphan_del(NULL, inode);
207                         goto no_delete;
208                 }
209         }
210
211         /*
212          * Kill off the orphan record which ext4_truncate created.
213          * AKPM: I think this can be inside the above `if'.
214          * Note that ext4_orphan_del() has to be able to cope with the
215          * deletion of a non-existent orphan - this is because we don't
216          * know if ext4_truncate() actually created an orphan record.
217          * (Well, we could do this if we need to, but heck - it works)
218          */
219         ext4_orphan_del(handle, inode);
220         EXT4_I(inode)->i_dtime  = get_seconds();
221
222         /*
223          * One subtle ordering requirement: if anything has gone wrong
224          * (transaction abort, IO errors, whatever), then we can still
225          * do these next steps (the fs will already have been marked as
226          * having errors), but we can't free the inode if the mark_dirty
227          * fails.
228          */
229         if (ext4_mark_inode_dirty(handle, inode))
230                 /* If that failed, just do the required in-core inode clear. */
231                 ext4_clear_inode(inode);
232         else
233                 ext4_free_inode(handle, inode);
234         ext4_journal_stop(handle);
235         return;
236 no_delete:
237         ext4_clear_inode(inode);        /* We must guarantee clearing of inode... */
238 }
239
240 #ifdef CONFIG_QUOTA
241 qsize_t *ext4_get_reserved_space(struct inode *inode)
242 {
243         return &EXT4_I(inode)->i_reserved_quota;
244 }
245 #endif
246
247 /*
248  * Calculate the number of metadata blocks need to reserve
249  * to allocate a block located at @lblock
250  */
251 static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
252 {
253         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
254                 return ext4_ext_calc_metadata_amount(inode, lblock);
255
256         return ext4_ind_calc_metadata_amount(inode, lblock);
257 }
258
259 /*
260  * Called with i_data_sem down, which is important since we can call
261  * ext4_discard_preallocations() from here.
262  */
263 void ext4_da_update_reserve_space(struct inode *inode,
264                                         int used, int quota_claim)
265 {
266         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
267         struct ext4_inode_info *ei = EXT4_I(inode);
268
269         spin_lock(&ei->i_block_reservation_lock);
270         trace_ext4_da_update_reserve_space(inode, used, quota_claim);
271         if (unlikely(used > ei->i_reserved_data_blocks)) {
272                 ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, used %d "
273                          "with only %d reserved data blocks\n",
274                          __func__, inode->i_ino, used,
275                          ei->i_reserved_data_blocks);
276                 WARN_ON(1);
277                 used = ei->i_reserved_data_blocks;
278         }
279
280         /* Update per-inode reservations */
281         ei->i_reserved_data_blocks -= used;
282         ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
283         percpu_counter_sub(&sbi->s_dirtyclusters_counter,
284                            used + ei->i_allocated_meta_blocks);
285         ei->i_allocated_meta_blocks = 0;
286
287         if (ei->i_reserved_data_blocks == 0) {
288                 /*
289                  * We can release all of the reserved metadata blocks
290                  * only when we have written all of the delayed
291                  * allocation blocks.
292                  */
293                 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
294                                    ei->i_reserved_meta_blocks);
295                 ei->i_reserved_meta_blocks = 0;
296                 ei->i_da_metadata_calc_len = 0;
297         }
298         spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
299
300         /* Update quota subsystem for data blocks */
301         if (quota_claim)
302                 dquot_claim_block(inode, EXT4_C2B(sbi, used));
303         else {
304                 /*
305                  * We did fallocate with an offset that is already delayed
306                  * allocated. So on delayed allocated writeback we should
307                  * not re-claim the quota for fallocated blocks.
308                  */
309                 dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
310         }
311
312         /*
313          * If we have done all the pending block allocations and if
314          * there aren't any writers on the inode, we can discard the
315          * inode's preallocations.
316          */
317         if ((ei->i_reserved_data_blocks == 0) &&
318             (atomic_read(&inode->i_writecount) == 0))
319                 ext4_discard_preallocations(inode);
320 }
321
322 static int __check_block_validity(struct inode *inode, const char *func,
323                                 unsigned int line,
324                                 struct ext4_map_blocks *map)
325 {
326         if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
327                                    map->m_len)) {
328                 ext4_error_inode(inode, func, line, map->m_pblk,
329                                  "lblock %lu mapped to illegal pblock "
330                                  "(length %d)", (unsigned long) map->m_lblk,
331                                  map->m_len);
332                 return -EIO;
333         }
334         return 0;
335 }
336
337 #define check_block_validity(inode, map)        \
338         __check_block_validity((inode), __func__, __LINE__, (map))
339
340 /*
341  * Return the number of contiguous dirty pages in a given inode
342  * starting at page frame idx.
343  */
344 static pgoff_t ext4_num_dirty_pages(struct inode *inode, pgoff_t idx,
345                                     unsigned int max_pages)
346 {
347         struct address_space *mapping = inode->i_mapping;
348         pgoff_t index;
349         struct pagevec pvec;
350         pgoff_t num = 0;
351         int i, nr_pages, done = 0;
352
353         if (max_pages == 0)
354                 return 0;
355         pagevec_init(&pvec, 0);
356         while (!done) {
357                 index = idx;
358                 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
359                                               PAGECACHE_TAG_DIRTY,
360                                               (pgoff_t)PAGEVEC_SIZE);
361                 if (nr_pages == 0)
362                         break;
363                 for (i = 0; i < nr_pages; i++) {
364                         struct page *page = pvec.pages[i];
365                         struct buffer_head *bh, *head;
366
367                         lock_page(page);
368                         if (unlikely(page->mapping != mapping) ||
369                             !PageDirty(page) ||
370                             PageWriteback(page) ||
371                             page->index != idx) {
372                                 done = 1;
373                                 unlock_page(page);
374                                 break;
375                         }
376                         if (page_has_buffers(page)) {
377                                 bh = head = page_buffers(page);
378                                 do {
379                                         if (!buffer_delay(bh) &&
380                                             !buffer_unwritten(bh))
381                                                 done = 1;
382                                         bh = bh->b_this_page;
383                                 } while (!done && (bh != head));
384                         }
385                         unlock_page(page);
386                         if (done)
387                                 break;
388                         idx++;
389                         num++;
390                         if (num >= max_pages) {
391                                 done = 1;
392                                 break;
393                         }
394                 }
395                 pagevec_release(&pvec);
396         }
397         return num;
398 }
399
400 /*
401  * Sets the BH_Da_Mapped bit on the buffer heads corresponding to the given map.
402  */
403 static void set_buffers_da_mapped(struct inode *inode,
404                                    struct ext4_map_blocks *map)
405 {
406         struct address_space *mapping = inode->i_mapping;
407         struct pagevec pvec;
408         int i, nr_pages;
409         pgoff_t index, end;
410
411         index = map->m_lblk >> (PAGE_CACHE_SHIFT - inode->i_blkbits);
412         end = (map->m_lblk + map->m_len - 1) >>
413                 (PAGE_CACHE_SHIFT - inode->i_blkbits);
414
415         pagevec_init(&pvec, 0);
416         while (index <= end) {
417                 nr_pages = pagevec_lookup(&pvec, mapping, index,
418                                           min(end - index + 1,
419                                               (pgoff_t)PAGEVEC_SIZE));
420                 if (nr_pages == 0)
421                         break;
422                 for (i = 0; i < nr_pages; i++) {
423                         struct page *page = pvec.pages[i];
424                         struct buffer_head *bh, *head;
425
426                         if (unlikely(page->mapping != mapping) ||
427                             !PageDirty(page))
428                                 break;
429
430                         if (page_has_buffers(page)) {
431                                 bh = head = page_buffers(page);
432                                 do {
433                                         set_buffer_da_mapped(bh);
434                                         bh = bh->b_this_page;
435                                 } while (bh != head);
436                         }
437                         index++;
438                 }
439                 pagevec_release(&pvec);
440         }
441 }
442
443 /*
444  * The ext4_map_blocks() function tries to look up the requested blocks,
445  * and returns if the blocks are already mapped.
446  *
447  * Otherwise it takes the write lock of the i_data_sem and allocate blocks
448  * and store the allocated blocks in the result buffer head and mark it
449  * mapped.
450  *
451  * If file type is extents based, it will call ext4_ext_map_blocks(),
452  * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
453  * based files
454  *
455  * On success, it returns the number of blocks being mapped or allocate.
456  * if create==0 and the blocks are pre-allocated and uninitialized block,
457  * the result buffer head is unmapped. If the create ==1, it will make sure
458  * the buffer head is mapped.
459  *
460  * It returns 0 if plain look up failed (blocks have not been allocated), in
461  * that case, buffer head is unmapped
462  *
463  * It returns the error in case of allocation failure.
464  */
465 int ext4_map_blocks(handle_t *handle, struct inode *inode,
466                     struct ext4_map_blocks *map, int flags)
467 {
468         int retval;
469
470         map->m_flags = 0;
471         ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
472                   "logical block %lu\n", inode->i_ino, flags, map->m_len,
473                   (unsigned long) map->m_lblk);
474         /*
475          * Try to see if we can get the block without requesting a new
476          * file system block.
477          */
478         down_read((&EXT4_I(inode)->i_data_sem));
479         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
480                 retval = ext4_ext_map_blocks(handle, inode, map, 0);
481         } else {
482                 retval = ext4_ind_map_blocks(handle, inode, map, 0);
483         }
484         up_read((&EXT4_I(inode)->i_data_sem));
485
486         if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
487                 int ret = check_block_validity(inode, map);
488                 if (ret != 0)
489                         return ret;
490         }
491
492         /* If it is only a block(s) look up */
493         if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
494                 return retval;
495
496         /*
497          * Returns if the blocks have already allocated
498          *
499          * Note that if blocks have been preallocated
500          * ext4_ext_get_block() returns the create = 0
501          * with buffer head unmapped.
502          */
503         if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
504                 return retval;
505
506         /*
507          * When we call get_blocks without the create flag, the
508          * BH_Unwritten flag could have gotten set if the blocks
509          * requested were part of a uninitialized extent.  We need to
510          * clear this flag now that we are committed to convert all or
511          * part of the uninitialized extent to be an initialized
512          * extent.  This is because we need to avoid the combination
513          * of BH_Unwritten and BH_Mapped flags being simultaneously
514          * set on the buffer_head.
515          */
516         map->m_flags &= ~EXT4_MAP_UNWRITTEN;
517
518         /*
519          * New blocks allocate and/or writing to uninitialized extent
520          * will possibly result in updating i_data, so we take
521          * the write lock of i_data_sem, and call get_blocks()
522          * with create == 1 flag.
523          */
524         down_write((&EXT4_I(inode)->i_data_sem));
525
526         /*
527          * if the caller is from delayed allocation writeout path
528          * we have already reserved fs blocks for allocation
529          * let the underlying get_block() function know to
530          * avoid double accounting
531          */
532         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
533                 ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
534         /*
535          * We need to check for EXT4 here because migrate
536          * could have changed the inode type in between
537          */
538         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
539                 retval = ext4_ext_map_blocks(handle, inode, map, flags);
540         } else {
541                 retval = ext4_ind_map_blocks(handle, inode, map, flags);
542
543                 if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
544                         /*
545                          * We allocated new blocks which will result in
546                          * i_data's format changing.  Force the migrate
547                          * to fail by clearing migrate flags
548                          */
549                         ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
550                 }
551
552                 /*
553                  * Update reserved blocks/metadata blocks after successful
554                  * block allocation which had been deferred till now. We don't
555                  * support fallocate for non extent files. So we can update
556                  * reserve space here.
557                  */
558                 if ((retval > 0) &&
559                         (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
560                         ext4_da_update_reserve_space(inode, retval, 1);
561         }
562         if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
563                 ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
564
565                 /* If we have successfully mapped the delayed allocated blocks,
566                  * set the BH_Da_Mapped bit on them. Its important to do this
567                  * under the protection of i_data_sem.
568                  */
569                 if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
570                         set_buffers_da_mapped(inode, map);
571         }
572
573         up_write((&EXT4_I(inode)->i_data_sem));
574         if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
575                 int ret = check_block_validity(inode, map);
576                 if (ret != 0)
577                         return ret;
578         }
579         return retval;
580 }
581
582 /* Maximum number of blocks we map for direct IO at once. */
583 #define DIO_MAX_BLOCKS 4096
584
585 static int _ext4_get_block(struct inode *inode, sector_t iblock,
586                            struct buffer_head *bh, int flags)
587 {
588         handle_t *handle = ext4_journal_current_handle();
589         struct ext4_map_blocks map;
590         int ret = 0, started = 0;
591         int dio_credits;
592
593         map.m_lblk = iblock;
594         map.m_len = bh->b_size >> inode->i_blkbits;
595
596         if (flags && !handle) {
597                 /* Direct IO write... */
598                 if (map.m_len > DIO_MAX_BLOCKS)
599                         map.m_len = DIO_MAX_BLOCKS;
600                 dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
601                 handle = ext4_journal_start(inode, dio_credits);
602                 if (IS_ERR(handle)) {
603                         ret = PTR_ERR(handle);
604                         return ret;
605                 }
606                 started = 1;
607         }
608
609         ret = ext4_map_blocks(handle, inode, &map, flags);
610         if (ret > 0) {
611                 map_bh(bh, inode->i_sb, map.m_pblk);
612                 bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
613                 bh->b_size = inode->i_sb->s_blocksize * map.m_len;
614                 ret = 0;
615         }
616         if (started)
617                 ext4_journal_stop(handle);
618         return ret;
619 }
620
621 int ext4_get_block(struct inode *inode, sector_t iblock,
622                    struct buffer_head *bh, int create)
623 {
624         return _ext4_get_block(inode, iblock, bh,
625                                create ? EXT4_GET_BLOCKS_CREATE : 0);
626 }
627
628 /*
629  * `handle' can be NULL if create is zero
630  */
631 struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
632                                 ext4_lblk_t block, int create, int *errp)
633 {
634         struct ext4_map_blocks map;
635         struct buffer_head *bh;
636         int fatal = 0, err;
637
638         J_ASSERT(handle != NULL || create == 0);
639
640         map.m_lblk = block;
641         map.m_len = 1;
642         err = ext4_map_blocks(handle, inode, &map,
643                               create ? EXT4_GET_BLOCKS_CREATE : 0);
644
645         if (err < 0)
646                 *errp = err;
647         if (err <= 0)
648                 return NULL;
649         *errp = 0;
650
651         bh = sb_getblk(inode->i_sb, map.m_pblk);
652         if (!bh) {
653                 *errp = -EIO;
654                 return NULL;
655         }
656         if (map.m_flags & EXT4_MAP_NEW) {
657                 J_ASSERT(create != 0);
658                 J_ASSERT(handle != NULL);
659
660                 /*
661                  * Now that we do not always journal data, we should
662                  * keep in mind whether this should always journal the
663                  * new buffer as metadata.  For now, regular file
664                  * writes use ext4_get_block instead, so it's not a
665                  * problem.
666                  */
667                 lock_buffer(bh);
668                 BUFFER_TRACE(bh, "call get_create_access");
669                 fatal = ext4_journal_get_create_access(handle, bh);
670                 if (!fatal && !buffer_uptodate(bh)) {
671                         memset(bh->b_data, 0, inode->i_sb->s_blocksize);
672                         set_buffer_uptodate(bh);
673                 }
674                 unlock_buffer(bh);
675                 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
676                 err = ext4_handle_dirty_metadata(handle, inode, bh);
677                 if (!fatal)
678                         fatal = err;
679         } else {
680                 BUFFER_TRACE(bh, "not a new buffer");
681         }
682         if (fatal) {
683                 *errp = fatal;
684                 brelse(bh);
685                 bh = NULL;
686         }
687         return bh;
688 }
689
690 struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
691                                ext4_lblk_t block, int create, int *err)
692 {
693         struct buffer_head *bh;
694
695         bh = ext4_getblk(handle, inode, block, create, err);
696         if (!bh)
697                 return bh;
698         if (buffer_uptodate(bh))
699                 return bh;
700         ll_rw_block(READ_META, 1, &bh);
701         wait_on_buffer(bh);
702         if (buffer_uptodate(bh))
703                 return bh;
704         put_bh(bh);
705         *err = -EIO;
706         return NULL;
707 }
708
709 static int walk_page_buffers(handle_t *handle,
710                              struct buffer_head *head,
711                              unsigned from,
712                              unsigned to,
713                              int *partial,
714                              int (*fn)(handle_t *handle,
715                                        struct buffer_head *bh))
716 {
717         struct buffer_head *bh;
718         unsigned block_start, block_end;
719         unsigned blocksize = head->b_size;
720         int err, ret = 0;
721         struct buffer_head *next;
722
723         for (bh = head, block_start = 0;
724              ret == 0 && (bh != head || !block_start);
725              block_start = block_end, bh = next) {
726                 next = bh->b_this_page;
727                 block_end = block_start + blocksize;
728                 if (block_end <= from || block_start >= to) {
729                         if (partial && !buffer_uptodate(bh))
730                                 *partial = 1;
731                         continue;
732                 }
733                 err = (*fn)(handle, bh);
734                 if (!ret)
735                         ret = err;
736         }
737         return ret;
738 }
739
740 /*
741  * To preserve ordering, it is essential that the hole instantiation and
742  * the data write be encapsulated in a single transaction.  We cannot
743  * close off a transaction and start a new one between the ext4_get_block()
744  * and the commit_write().  So doing the jbd2_journal_start at the start of
745  * prepare_write() is the right place.
746  *
747  * Also, this function can nest inside ext4_writepage() ->
748  * block_write_full_page(). In that case, we *know* that ext4_writepage()
749  * has generated enough buffer credits to do the whole page.  So we won't
750  * block on the journal in that case, which is good, because the caller may
751  * be PF_MEMALLOC.
752  *
753  * By accident, ext4 can be reentered when a transaction is open via
754  * quota file writes.  If we were to commit the transaction while thus
755  * reentered, there can be a deadlock - we would be holding a quota
756  * lock, and the commit would never complete if another thread had a
757  * transaction open and was blocking on the quota lock - a ranking
758  * violation.
759  *
760  * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
761  * will _not_ run commit under these circumstances because handle->h_ref
762  * is elevated.  We'll still have enough credits for the tiny quotafile
763  * write.
764  */
765 static int do_journal_get_write_access(handle_t *handle,
766                                        struct buffer_head *bh)
767 {
768         int dirty = buffer_dirty(bh);
769         int ret;
770
771         if (!buffer_mapped(bh) || buffer_freed(bh))
772                 return 0;
773         /*
774          * __block_write_begin() could have dirtied some buffers. Clean
775          * the dirty bit as jbd2_journal_get_write_access() could complain
776          * otherwise about fs integrity issues. Setting of the dirty bit
777          * by __block_write_begin() isn't a real problem here as we clear
778          * the bit before releasing a page lock and thus writeback cannot
779          * ever write the buffer.
780          */
781         if (dirty)
782                 clear_buffer_dirty(bh);
783         ret = ext4_journal_get_write_access(handle, bh);
784         if (!ret && dirty)
785                 ret = ext4_handle_dirty_metadata(handle, NULL, bh);
786         return ret;
787 }
788
789 static int ext4_get_block_write(struct inode *inode, sector_t iblock,
790                    struct buffer_head *bh_result, int create);
791 static int ext4_write_begin(struct file *file, struct address_space *mapping,
792                             loff_t pos, unsigned len, unsigned flags,
793                             struct page **pagep, void **fsdata)
794 {
795         struct inode *inode = mapping->host;
796         int ret, needed_blocks;
797         handle_t *handle;
798         int retries = 0;
799         struct page *page;
800         pgoff_t index;
801         unsigned from, to;
802
803         trace_ext4_write_begin(inode, pos, len, flags);
804         /*
805          * Reserve one block more for addition to orphan list in case
806          * we allocate blocks but write fails for some reason
807          */
808         needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
809         index = pos >> PAGE_CACHE_SHIFT;
810         from = pos & (PAGE_CACHE_SIZE - 1);
811         to = from + len;
812
813 retry:
814         handle = ext4_journal_start(inode, needed_blocks);
815         if (IS_ERR(handle)) {
816                 ret = PTR_ERR(handle);
817                 goto out;
818         }
819
820         /* We cannot recurse into the filesystem as the transaction is already
821          * started */
822         flags |= AOP_FLAG_NOFS;
823
824         page = grab_cache_page_write_begin(mapping, index, flags);
825         if (!page) {
826                 ext4_journal_stop(handle);
827                 ret = -ENOMEM;
828                 goto out;
829         }
830         *pagep = page;
831
832         if (ext4_should_dioread_nolock(inode))
833                 ret = __block_write_begin(page, pos, len, ext4_get_block_write);
834         else
835                 ret = __block_write_begin(page, pos, len, ext4_get_block);
836
837         if (!ret && ext4_should_journal_data(inode)) {
838                 ret = walk_page_buffers(handle, page_buffers(page),
839                                 from, to, NULL, do_journal_get_write_access);
840         }
841
842         if (ret) {
843                 unlock_page(page);
844                 page_cache_release(page);
845                 /*
846                  * __block_write_begin may have instantiated a few blocks
847                  * outside i_size.  Trim these off again. Don't need
848                  * i_size_read because we hold i_mutex.
849                  *
850                  * Add inode to orphan list in case we crash before
851                  * truncate finishes
852                  */
853                 if (pos + len > inode->i_size && ext4_can_truncate(inode))
854                         ext4_orphan_add(handle, inode);
855
856                 ext4_journal_stop(handle);
857                 if (pos + len > inode->i_size) {
858                         ext4_truncate_failed_write(inode);
859                         /*
860                          * If truncate failed early the inode might
861                          * still be on the orphan list; we need to
862                          * make sure the inode is removed from the
863                          * orphan list in that case.
864                          */
865                         if (inode->i_nlink)
866                                 ext4_orphan_del(NULL, inode);
867                 }
868         }
869
870         if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
871                 goto retry;
872 out:
873         return ret;
874 }
875
876 /* For write_end() in data=journal mode */
877 static int write_end_fn(handle_t *handle, struct buffer_head *bh)
878 {
879         if (!buffer_mapped(bh) || buffer_freed(bh))
880                 return 0;
881         set_buffer_uptodate(bh);
882         return ext4_handle_dirty_metadata(handle, NULL, bh);
883 }
884
885 static int ext4_generic_write_end(struct file *file,
886                                   struct address_space *mapping,
887                                   loff_t pos, unsigned len, unsigned copied,
888                                   struct page *page, void *fsdata)
889 {
890         int i_size_changed = 0;
891         struct inode *inode = mapping->host;
892         handle_t *handle = ext4_journal_current_handle();
893
894         copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
895
896         /*
897          * No need to use i_size_read() here, the i_size
898          * cannot change under us because we hold i_mutex.
899          *
900          * But it's important to update i_size while still holding page lock:
901          * page writeout could otherwise come in and zero beyond i_size.
902          */
903         if (pos + copied > inode->i_size) {
904                 i_size_write(inode, pos + copied);
905                 i_size_changed = 1;
906         }
907
908         if (pos + copied >  EXT4_I(inode)->i_disksize) {
909                 /* We need to mark inode dirty even if
910                  * new_i_size is less that inode->i_size
911                  * bu greater than i_disksize.(hint delalloc)
912                  */
913                 ext4_update_i_disksize(inode, (pos + copied));
914                 i_size_changed = 1;
915         }
916         unlock_page(page);
917         page_cache_release(page);
918
919         /*
920          * Don't mark the inode dirty under page lock. First, it unnecessarily
921          * makes the holding time of page lock longer. Second, it forces lock
922          * ordering of page lock and transaction start for journaling
923          * filesystems.
924          */
925         if (i_size_changed)
926                 ext4_mark_inode_dirty(handle, inode);
927
928         return copied;
929 }
930
931 /*
932  * We need to pick up the new inode size which generic_commit_write gave us
933  * `file' can be NULL - eg, when called from page_symlink().
934  *
935  * ext4 never places buffers on inode->i_mapping->private_list.  metadata
936  * buffers are managed internally.
937  */
938 static int ext4_ordered_write_end(struct file *file,
939                                   struct address_space *mapping,
940                                   loff_t pos, unsigned len, unsigned copied,
941                                   struct page *page, void *fsdata)
942 {
943         handle_t *handle = ext4_journal_current_handle();
944         struct inode *inode = mapping->host;
945         int ret = 0, ret2;
946
947         trace_ext4_ordered_write_end(inode, pos, len, copied);
948         ret = ext4_jbd2_file_inode(handle, inode);
949
950         if (ret == 0) {
951                 ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
952                                                         page, fsdata);
953                 copied = ret2;
954                 if (pos + len > inode->i_size && ext4_can_truncate(inode))
955                         /* if we have allocated more blocks and copied
956                          * less. We will have blocks allocated outside
957                          * inode->i_size. So truncate them
958                          */
959                         ext4_orphan_add(handle, inode);
960                 if (ret2 < 0)
961                         ret = ret2;
962         } else {
963                 unlock_page(page);
964                 page_cache_release(page);
965         }
966
967         ret2 = ext4_journal_stop(handle);
968         if (!ret)
969                 ret = ret2;
970
971         if (pos + len > inode->i_size) {
972                 ext4_truncate_failed_write(inode);
973                 /*
974                  * If truncate failed early the inode might still be
975                  * on the orphan list; we need to make sure the inode
976                  * is removed from the orphan list in that case.
977                  */
978                 if (inode->i_nlink)
979                         ext4_orphan_del(NULL, inode);
980         }
981
982
983         return ret ? ret : copied;
984 }
985
986 static int ext4_writeback_write_end(struct file *file,
987                                     struct address_space *mapping,
988                                     loff_t pos, unsigned len, unsigned copied,
989                                     struct page *page, void *fsdata)
990 {
991         handle_t *handle = ext4_journal_current_handle();
992         struct inode *inode = mapping->host;
993         int ret = 0, ret2;
994
995         trace_ext4_writeback_write_end(inode, pos, len, copied);
996         ret2 = ext4_generic_write_end(file, mapping, pos, len, copied,
997                                                         page, fsdata);
998         copied = ret2;
999         if (pos + len > inode->i_size && ext4_can_truncate(inode))
1000                 /* if we have allocated more blocks and copied
1001                  * less. We will have blocks allocated outside
1002                  * inode->i_size. So truncate them
1003                  */
1004                 ext4_orphan_add(handle, inode);
1005
1006         if (ret2 < 0)
1007                 ret = ret2;
1008
1009         ret2 = ext4_journal_stop(handle);
1010         if (!ret)
1011                 ret = ret2;
1012
1013         if (pos + len > inode->i_size) {
1014                 ext4_truncate_failed_write(inode);
1015                 /*
1016                  * If truncate failed early the inode might still be
1017                  * on the orphan list; we need to make sure the inode
1018                  * is removed from the orphan list in that case.
1019                  */
1020                 if (inode->i_nlink)
1021                         ext4_orphan_del(NULL, inode);
1022         }
1023
1024         return ret ? ret : copied;
1025 }
1026
1027 static int ext4_journalled_write_end(struct file *file,
1028                                      struct address_space *mapping,
1029                                      loff_t pos, unsigned len, unsigned copied,
1030                                      struct page *page, void *fsdata)
1031 {
1032         handle_t *handle = ext4_journal_current_handle();
1033         struct inode *inode = mapping->host;
1034         int ret = 0, ret2;
1035         int partial = 0;
1036         unsigned from, to;
1037         loff_t new_i_size;
1038
1039         trace_ext4_journalled_write_end(inode, pos, len, copied);
1040         from = pos & (PAGE_CACHE_SIZE - 1);
1041         to = from + len;
1042
1043         BUG_ON(!ext4_handle_valid(handle));
1044
1045         if (copied < len) {
1046                 if (!PageUptodate(page))
1047                         copied = 0;
1048                 page_zero_new_buffers(page, from+copied, to);
1049         }
1050
1051         ret = walk_page_buffers(handle, page_buffers(page), from,
1052                                 to, &partial, write_end_fn);
1053         if (!partial)
1054                 SetPageUptodate(page);
1055         new_i_size = pos + copied;
1056         if (new_i_size > inode->i_size)
1057                 i_size_write(inode, pos+copied);
1058         ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1059         EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1060         if (new_i_size > EXT4_I(inode)->i_disksize) {
1061                 ext4_update_i_disksize(inode, new_i_size);
1062                 ret2 = ext4_mark_inode_dirty(handle, inode);
1063                 if (!ret)
1064                         ret = ret2;
1065         }
1066
1067         unlock_page(page);
1068         page_cache_release(page);
1069         if (pos + len > inode->i_size && ext4_can_truncate(inode))
1070                 /* if we have allocated more blocks and copied
1071                  * less. We will have blocks allocated outside
1072                  * inode->i_size. So truncate them
1073                  */
1074                 ext4_orphan_add(handle, inode);
1075
1076         ret2 = ext4_journal_stop(handle);
1077         if (!ret)
1078                 ret = ret2;
1079         if (pos + len > inode->i_size) {
1080                 ext4_truncate_failed_write(inode);
1081                 /*
1082                  * If truncate failed early the inode might still be
1083                  * on the orphan list; we need to make sure the inode
1084                  * is removed from the orphan list in that case.
1085                  */
1086                 if (inode->i_nlink)
1087                         ext4_orphan_del(NULL, inode);
1088         }
1089
1090         return ret ? ret : copied;
1091 }
1092
1093 /*
1094  * Reserve a single cluster located at lblock
1095  */
1096 static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1097 {
1098         int retries = 0;
1099         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1100         struct ext4_inode_info *ei = EXT4_I(inode);
1101         unsigned int md_needed;
1102         int ret;
1103
1104         /*
1105          * recalculate the amount of metadata blocks to reserve
1106          * in order to allocate nrblocks
1107          * worse case is one extent per block
1108          */
1109 repeat:
1110         spin_lock(&ei->i_block_reservation_lock);
1111         md_needed = EXT4_NUM_B2C(sbi,
1112                                  ext4_calc_metadata_amount(inode, lblock));
1113         trace_ext4_da_reserve_space(inode, md_needed);
1114         spin_unlock(&ei->i_block_reservation_lock);
1115
1116         /*
1117          * We will charge metadata quota at writeout time; this saves
1118          * us from metadata over-estimation, though we may go over by
1119          * a small amount in the end.  Here we just reserve for data.
1120          */
1121         ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1122         if (ret)
1123                 return ret;
1124         /*
1125          * We do still charge estimated metadata to the sb though;
1126          * we cannot afford to run out of free blocks.
1127          */
1128         if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
1129                 dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1130                 if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
1131                         yield();
1132                         goto repeat;
1133                 }
1134                 return -ENOSPC;
1135         }
1136         spin_lock(&ei->i_block_reservation_lock);
1137         ei->i_reserved_data_blocks++;
1138         ei->i_reserved_meta_blocks += md_needed;
1139         spin_unlock(&ei->i_block_reservation_lock);
1140
1141         return 0;       /* success */
1142 }
1143
1144 static void ext4_da_release_space(struct inode *inode, int to_free)
1145 {
1146         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1147         struct ext4_inode_info *ei = EXT4_I(inode);
1148
1149         if (!to_free)
1150                 return;         /* Nothing to release, exit */
1151
1152         spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1153
1154         trace_ext4_da_release_space(inode, to_free);
1155         if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1156                 /*
1157                  * if there aren't enough reserved blocks, then the
1158                  * counter is messed up somewhere.  Since this
1159                  * function is called from invalidate page, it's
1160                  * harmless to return without any action.
1161                  */
1162                 ext4_msg(inode->i_sb, KERN_NOTICE, "ext4_da_release_space: "
1163                          "ino %lu, to_free %d with only %d reserved "
1164                          "data blocks\n", inode->i_ino, to_free,
1165                          ei->i_reserved_data_blocks);
1166                 WARN_ON(1);
1167                 to_free = ei->i_reserved_data_blocks;
1168         }
1169         ei->i_reserved_data_blocks -= to_free;
1170
1171         if (ei->i_reserved_data_blocks == 0) {
1172                 /*
1173                  * We can release all of the reserved metadata blocks
1174                  * only when we have written all of the delayed
1175                  * allocation blocks.
1176                  * Note that in case of bigalloc, i_reserved_meta_blocks,
1177                  * i_reserved_data_blocks, etc. refer to number of clusters.
1178                  */
1179                 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
1180                                    ei->i_reserved_meta_blocks);
1181                 ei->i_reserved_meta_blocks = 0;
1182                 ei->i_da_metadata_calc_len = 0;
1183         }
1184
1185         /* update fs dirty data blocks counter */
1186         percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1187
1188         spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1189
1190         dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1191 }
1192
1193 static void ext4_da_page_release_reservation(struct page *page,
1194                                              unsigned long offset)
1195 {
1196         int to_release = 0;
1197         struct buffer_head *head, *bh;
1198         unsigned int curr_off = 0;
1199         struct inode *inode = page->mapping->host;
1200         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1201         int num_clusters;
1202
1203         head = page_buffers(page);
1204         bh = head;
1205         do {
1206                 unsigned int next_off = curr_off + bh->b_size;
1207
1208                 if ((offset <= curr_off) && (buffer_delay(bh))) {
1209                         to_release++;
1210                         clear_buffer_delay(bh);
1211                         clear_buffer_da_mapped(bh);
1212                 }
1213                 curr_off = next_off;
1214         } while ((bh = bh->b_this_page) != head);
1215
1216         /* If we have released all the blocks belonging to a cluster, then we
1217          * need to release the reserved space for that cluster. */
1218         num_clusters = EXT4_NUM_B2C(sbi, to_release);
1219         while (num_clusters > 0) {
1220                 ext4_fsblk_t lblk;
1221                 lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) +
1222                         ((num_clusters - 1) << sbi->s_cluster_bits);
1223                 if (sbi->s_cluster_ratio == 1 ||
1224                     !ext4_find_delalloc_cluster(inode, lblk, 1))
1225                         ext4_da_release_space(inode, 1);
1226
1227                 num_clusters--;
1228         }
1229 }
1230
1231 /*
1232  * Delayed allocation stuff
1233  */
1234
1235 /*
1236  * mpage_da_submit_io - walks through extent of pages and try to write
1237  * them with writepage() call back
1238  *
1239  * @mpd->inode: inode
1240  * @mpd->first_page: first page of the extent
1241  * @mpd->next_page: page after the last page of the extent
1242  *
1243  * By the time mpage_da_submit_io() is called we expect all blocks
1244  * to be allocated. this may be wrong if allocation failed.
1245  *
1246  * As pages are already locked by write_cache_pages(), we can't use it
1247  */
1248 static int mpage_da_submit_io(struct mpage_da_data *mpd,
1249                               struct ext4_map_blocks *map)
1250 {
1251         struct pagevec pvec;
1252         unsigned long index, end;
1253         int ret = 0, err, nr_pages, i;
1254         struct inode *inode = mpd->inode;
1255         struct address_space *mapping = inode->i_mapping;
1256         loff_t size = i_size_read(inode);
1257         unsigned int len, block_start;
1258         struct buffer_head *bh, *page_bufs = NULL;
1259         int journal_data = ext4_should_journal_data(inode);
1260         sector_t pblock = 0, cur_logical = 0;
1261         struct ext4_io_submit io_submit;
1262
1263         BUG_ON(mpd->next_page <= mpd->first_page);
1264         memset(&io_submit, 0, sizeof(io_submit));
1265         /*
1266          * We need to start from the first_page to the next_page - 1
1267          * to make sure we also write the mapped dirty buffer_heads.
1268          * If we look at mpd->b_blocknr we would only be looking
1269          * at the currently mapped buffer_heads.
1270          */
1271         index = mpd->first_page;
1272         end = mpd->next_page - 1;
1273
1274         pagevec_init(&pvec, 0);
1275         while (index <= end) {
1276                 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1277                 if (nr_pages == 0)
1278                         break;
1279                 for (i = 0; i < nr_pages; i++) {
1280                         int commit_write = 0, skip_page = 0;
1281                         struct page *page = pvec.pages[i];
1282
1283                         index = page->index;
1284                         if (index > end)
1285                                 break;
1286
1287                         if (index == size >> PAGE_CACHE_SHIFT)
1288                                 len = size & ~PAGE_CACHE_MASK;
1289                         else
1290                                 len = PAGE_CACHE_SIZE;
1291                         if (map) {
1292                                 cur_logical = index << (PAGE_CACHE_SHIFT -
1293                                                         inode->i_blkbits);
1294                                 pblock = map->m_pblk + (cur_logical -
1295                                                         map->m_lblk);
1296                         }
1297                         index++;
1298
1299                         BUG_ON(!PageLocked(page));
1300                         BUG_ON(PageWriteback(page));
1301
1302                         /*
1303                          * If the page does not have buffers (for
1304                          * whatever reason), try to create them using
1305                          * __block_write_begin.  If this fails,
1306                          * skip the page and move on.
1307                          */
1308                         if (!page_has_buffers(page)) {
1309                                 if (__block_write_begin(page, 0, len,
1310                                                 noalloc_get_block_write)) {
1311                                 skip_page:
1312                                         unlock_page(page);
1313                                         continue;
1314                                 }
1315                                 commit_write = 1;
1316                         }
1317
1318                         bh = page_bufs = page_buffers(page);
1319                         block_start = 0;
1320                         do {
1321                                 if (!bh)
1322                                         goto skip_page;
1323                                 if (map && (cur_logical >= map->m_lblk) &&
1324                                     (cur_logical <= (map->m_lblk +
1325                                                      (map->m_len - 1)))) {
1326                                         if (buffer_delay(bh)) {
1327                                                 clear_buffer_delay(bh);
1328                                                 bh->b_blocknr = pblock;
1329                                         }
1330                                         if (buffer_da_mapped(bh))
1331                                                 clear_buffer_da_mapped(bh);
1332                                         if (buffer_unwritten(bh) ||
1333                                             buffer_mapped(bh))
1334                                                 BUG_ON(bh->b_blocknr != pblock);
1335                                         if (map->m_flags & EXT4_MAP_UNINIT)
1336                                                 set_buffer_uninit(bh);
1337                                         clear_buffer_unwritten(bh);
1338                                 }
1339
1340                                 /* skip page if block allocation undone */
1341                                 if (buffer_delay(bh) || buffer_unwritten(bh))
1342                                         skip_page = 1;
1343                                 bh = bh->b_this_page;
1344                                 block_start += bh->b_size;
1345                                 cur_logical++;
1346                                 pblock++;
1347                         } while (bh != page_bufs);
1348
1349                         if (skip_page)
1350                                 goto skip_page;
1351
1352                         if (commit_write)
1353                                 /* mark the buffer_heads as dirty & uptodate */
1354                                 block_commit_write(page, 0, len);
1355
1356                         clear_page_dirty_for_io(page);
1357                         /*
1358                          * Delalloc doesn't support data journalling,
1359                          * but eventually maybe we'll lift this
1360                          * restriction.
1361                          */
1362                         if (unlikely(journal_data && PageChecked(page)))
1363                                 err = __ext4_journalled_writepage(page, len);
1364                         else if (test_opt(inode->i_sb, MBLK_IO_SUBMIT))
1365                                 err = ext4_bio_write_page(&io_submit, page,
1366                                                           len, mpd->wbc);
1367                         else if (buffer_uninit(page_bufs)) {
1368                                 ext4_set_bh_endio(page_bufs, inode);
1369                                 err = block_write_full_page_endio(page,
1370                                         noalloc_get_block_write,
1371                                         mpd->wbc, ext4_end_io_buffer_write);
1372                         } else
1373                                 err = block_write_full_page(page,
1374                                         noalloc_get_block_write, mpd->wbc);
1375
1376                         if (!err)
1377                                 mpd->pages_written++;
1378                         /*
1379                          * In error case, we have to continue because
1380                          * remaining pages are still locked
1381                          */
1382                         if (ret == 0)
1383                                 ret = err;
1384                 }
1385                 pagevec_release(&pvec);
1386         }
1387         ext4_io_submit(&io_submit);
1388         return ret;
1389 }
1390
1391 static void ext4_da_block_invalidatepages(struct mpage_da_data *mpd)
1392 {
1393         int nr_pages, i;
1394         pgoff_t index, end;
1395         struct pagevec pvec;
1396         struct inode *inode = mpd->inode;
1397         struct address_space *mapping = inode->i_mapping;
1398
1399         index = mpd->first_page;
1400         end   = mpd->next_page - 1;
1401         while (index <= end) {
1402                 nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1403                 if (nr_pages == 0)
1404                         break;
1405                 for (i = 0; i < nr_pages; i++) {
1406                         struct page *page = pvec.pages[i];
1407                         if (page->index > end)
1408                                 break;
1409                         BUG_ON(!PageLocked(page));
1410                         BUG_ON(PageWriteback(page));
1411                         block_invalidatepage(page, 0);
1412                         ClearPageUptodate(page);
1413                         unlock_page(page);
1414                 }
1415                 index = pvec.pages[nr_pages - 1]->index + 1;
1416                 pagevec_release(&pvec);
1417         }
1418         return;
1419 }
1420
1421 static void ext4_print_free_blocks(struct inode *inode)
1422 {
1423         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1424         printk(KERN_CRIT "Total free blocks count %lld\n",
1425                EXT4_C2B(EXT4_SB(inode->i_sb),
1426                         ext4_count_free_clusters(inode->i_sb)));
1427         printk(KERN_CRIT "Free/Dirty block details\n");
1428         printk(KERN_CRIT "free_blocks=%lld\n",
1429                (long long) EXT4_C2B(EXT4_SB(inode->i_sb),
1430                 percpu_counter_sum(&sbi->s_freeclusters_counter)));
1431         printk(KERN_CRIT "dirty_blocks=%lld\n",
1432                (long long) EXT4_C2B(EXT4_SB(inode->i_sb),
1433                 percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1434         printk(KERN_CRIT "Block reservation details\n");
1435         printk(KERN_CRIT "i_reserved_data_blocks=%u\n",
1436                EXT4_I(inode)->i_reserved_data_blocks);
1437         printk(KERN_CRIT "i_reserved_meta_blocks=%u\n",
1438                EXT4_I(inode)->i_reserved_meta_blocks);
1439         return;
1440 }
1441
1442 /*
1443  * mpage_da_map_and_submit - go through given space, map them
1444  *       if necessary, and then submit them for I/O
1445  *
1446  * @mpd - bh describing space
1447  *
1448  * The function skips space we know is already mapped to disk blocks.
1449  *
1450  */
1451 static void mpage_da_map_and_submit(struct mpage_da_data *mpd)
1452 {
1453         int err, blks, get_blocks_flags;
1454         struct ext4_map_blocks map, *mapp = NULL;
1455         sector_t next = mpd->b_blocknr;
1456         unsigned max_blocks = mpd->b_size >> mpd->inode->i_blkbits;
1457         loff_t disksize = EXT4_I(mpd->inode)->i_disksize;
1458         handle_t *handle = NULL;
1459
1460         /*
1461          * If the blocks are mapped already, or we couldn't accumulate
1462          * any blocks, then proceed immediately to the submission stage.
1463          */
1464         if ((mpd->b_size == 0) ||
1465             ((mpd->b_state  & (1 << BH_Mapped)) &&
1466              !(mpd->b_state & (1 << BH_Delay)) &&
1467              !(mpd->b_state & (1 << BH_Unwritten))))
1468                 goto submit_io;
1469
1470         handle = ext4_journal_current_handle();
1471         BUG_ON(!handle);
1472
1473         /*
1474          * Call ext4_map_blocks() to allocate any delayed allocation
1475          * blocks, or to convert an uninitialized extent to be
1476          * initialized (in the case where we have written into
1477          * one or more preallocated blocks).
1478          *
1479          * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE to
1480          * indicate that we are on the delayed allocation path.  This
1481          * affects functions in many different parts of the allocation
1482          * call path.  This flag exists primarily because we don't
1483          * want to change *many* call functions, so ext4_map_blocks()
1484          * will set the EXT4_STATE_DELALLOC_RESERVED flag once the
1485          * inode's allocation semaphore is taken.
1486          *
1487          * If the blocks in questions were delalloc blocks, set
1488          * EXT4_GET_BLOCKS_DELALLOC_RESERVE so the delalloc accounting
1489          * variables are updated after the blocks have been allocated.
1490          */
1491         map.m_lblk = next;
1492         map.m_len = max_blocks;
1493         get_blocks_flags = EXT4_GET_BLOCKS_CREATE;
1494         if (ext4_should_dioread_nolock(mpd->inode))
1495                 get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
1496         if (mpd->b_state & (1 << BH_Delay))
1497                 get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
1498
1499         blks = ext4_map_blocks(handle, mpd->inode, &map, get_blocks_flags);
1500         if (blks < 0) {
1501                 struct super_block *sb = mpd->inode->i_sb;
1502
1503                 err = blks;
1504                 /*
1505                  * If get block returns EAGAIN or ENOSPC and there
1506                  * appears to be free blocks we will just let
1507                  * mpage_da_submit_io() unlock all of the pages.
1508                  */
1509                 if (err == -EAGAIN)
1510                         goto submit_io;
1511
1512                 if (err == -ENOSPC && ext4_count_free_clusters(sb)) {
1513                         mpd->retval = err;
1514                         goto submit_io;
1515                 }
1516
1517                 /*
1518                  * get block failure will cause us to loop in
1519                  * writepages, because a_ops->writepage won't be able
1520                  * to make progress. The page will be redirtied by
1521                  * writepage and writepages will again try to write
1522                  * the same.
1523                  */
1524                 if (!(EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)) {
1525                         ext4_msg(sb, KERN_CRIT,
1526                                  "delayed block allocation failed for inode %lu "
1527                                  "at logical offset %llu with max blocks %zd "
1528                                  "with error %d", mpd->inode->i_ino,
1529                                  (unsigned long long) next,
1530                                  mpd->b_size >> mpd->inode->i_blkbits, err);
1531                         ext4_msg(sb, KERN_CRIT,
1532                                 "This should not happen!! Data will be lost\n");
1533                         if (err == -ENOSPC)
1534                                 ext4_print_free_blocks(mpd->inode);
1535                 }
1536                 /* invalidate all the pages */
1537                 ext4_da_block_invalidatepages(mpd);
1538
1539                 /* Mark this page range as having been completed */
1540                 mpd->io_done = 1;
1541                 return;
1542         }
1543         BUG_ON(blks == 0);
1544
1545         mapp = &map;
1546         if (map.m_flags & EXT4_MAP_NEW) {
1547                 struct block_device *bdev = mpd->inode->i_sb->s_bdev;
1548                 int i;
1549
1550                 for (i = 0; i < map.m_len; i++)
1551                         unmap_underlying_metadata(bdev, map.m_pblk + i);
1552
1553                 if (ext4_should_order_data(mpd->inode)) {
1554                         err = ext4_jbd2_file_inode(handle, mpd->inode);
1555                         if (err) {
1556                                 /* Only if the journal is aborted */
1557                                 mpd->retval = err;
1558                                 goto submit_io;
1559                         }
1560                 }
1561         }
1562
1563         /*
1564          * Update on-disk size along with block allocation.
1565          */
1566         disksize = ((loff_t) next + blks) << mpd->inode->i_blkbits;
1567         if (disksize > i_size_read(mpd->inode))
1568                 disksize = i_size_read(mpd->inode);
1569         if (disksize > EXT4_I(mpd->inode)->i_disksize) {
1570                 ext4_update_i_disksize(mpd->inode, disksize);
1571                 err = ext4_mark_inode_dirty(handle, mpd->inode);
1572                 if (err)
1573                         ext4_error(mpd->inode->i_sb,
1574                                    "Failed to mark inode %lu dirty",
1575                                    mpd->inode->i_ino);
1576         }
1577
1578 submit_io:
1579         mpage_da_submit_io(mpd, mapp);
1580         mpd->io_done = 1;
1581 }
1582
1583 #define BH_FLAGS ((1 << BH_Uptodate) | (1 << BH_Mapped) | \
1584                 (1 << BH_Delay) | (1 << BH_Unwritten))
1585
1586 /*
1587  * mpage_add_bh_to_extent - try to add one more block to extent of blocks
1588  *
1589  * @mpd->lbh - extent of blocks
1590  * @logical - logical number of the block in the file
1591  * @bh - bh of the block (used to access block's state)
1592  *
1593  * the function is used to collect contig. blocks in same state
1594  */
1595 static void mpage_add_bh_to_extent(struct mpage_da_data *mpd,
1596                                    sector_t logical, size_t b_size,
1597                                    unsigned long b_state)
1598 {
1599         sector_t next;
1600         int nrblocks = mpd->b_size >> mpd->inode->i_blkbits;
1601
1602         /*
1603          * XXX Don't go larger than mballoc is willing to allocate
1604          * This is a stopgap solution.  We eventually need to fold
1605          * mpage_da_submit_io() into this function and then call
1606          * ext4_map_blocks() multiple times in a loop
1607          */
1608         if (nrblocks >= 8*1024*1024/mpd->inode->i_sb->s_blocksize)
1609                 goto flush_it;
1610
1611         /* check if thereserved journal credits might overflow */
1612         if (!(ext4_test_inode_flag(mpd->inode, EXT4_INODE_EXTENTS))) {
1613                 if (nrblocks >= EXT4_MAX_TRANS_DATA) {
1614                         /*
1615                          * With non-extent format we are limited by the journal
1616                          * credit available.  Total credit needed to insert
1617                          * nrblocks contiguous blocks is dependent on the
1618                          * nrblocks.  So limit nrblocks.
1619                          */
1620                         goto flush_it;
1621                 } else if ((nrblocks + (b_size >> mpd->inode->i_blkbits)) >
1622                                 EXT4_MAX_TRANS_DATA) {
1623                         /*
1624                          * Adding the new buffer_head would make it cross the
1625                          * allowed limit for which we have journal credit
1626                          * reserved. So limit the new bh->b_size
1627                          */
1628                         b_size = (EXT4_MAX_TRANS_DATA - nrblocks) <<
1629                                                 mpd->inode->i_blkbits;
1630                         /* we will do mpage_da_submit_io in the next loop */
1631                 }
1632         }
1633         /*
1634          * First block in the extent
1635          */
1636         if (mpd->b_size == 0) {
1637                 mpd->b_blocknr = logical;
1638                 mpd->b_size = b_size;
1639                 mpd->b_state = b_state & BH_FLAGS;
1640                 return;
1641         }
1642
1643         next = mpd->b_blocknr + nrblocks;
1644         /*
1645          * Can we merge the block to our big extent?
1646          */
1647         if (logical == next && (b_state & BH_FLAGS) == mpd->b_state) {
1648                 mpd->b_size += b_size;
1649                 return;
1650         }
1651
1652 flush_it:
1653         /*
1654          * We couldn't merge the block to our extent, so we
1655          * need to flush current  extent and start new one
1656          */
1657         mpage_da_map_and_submit(mpd);
1658         return;
1659 }
1660
1661 static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
1662 {
1663         return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
1664 }
1665
1666 /*
1667  * This function is grabs code from the very beginning of
1668  * ext4_map_blocks, but assumes that the caller is from delayed write
1669  * time. This function looks up the requested blocks and sets the
1670  * buffer delay bit under the protection of i_data_sem.
1671  */
1672 static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1673                               struct ext4_map_blocks *map,
1674                               struct buffer_head *bh)
1675 {
1676         int retval;
1677         sector_t invalid_block = ~((sector_t) 0xffff);
1678
1679         if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1680                 invalid_block = ~0;
1681
1682         map->m_flags = 0;
1683         ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
1684                   "logical block %lu\n", inode->i_ino, map->m_len,
1685                   (unsigned long) map->m_lblk);
1686         /*
1687          * Try to see if we can get the block without requesting a new
1688          * file system block.
1689          */
1690         down_read((&EXT4_I(inode)->i_data_sem));
1691         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1692                 retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1693         else
1694                 retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1695
1696         if (retval == 0) {
1697                 /*
1698                  * XXX: __block_prepare_write() unmaps passed block,
1699                  * is it OK?
1700                  */
1701                 /* If the block was allocated from previously allocated cluster,
1702                  * then we dont need to reserve it again. */
1703                 if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) {
1704                         retval = ext4_da_reserve_space(inode, iblock);
1705                         if (retval)
1706                                 /* not enough space to reserve */
1707                                 goto out_unlock;
1708                 }
1709
1710                 /* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served
1711                  * and it should not appear on the bh->b_state.
1712                  */
1713                 map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
1714
1715                 map_bh(bh, inode->i_sb, invalid_block);
1716                 set_buffer_new(bh);
1717                 set_buffer_delay(bh);
1718         }
1719
1720 out_unlock:
1721         up_read((&EXT4_I(inode)->i_data_sem));
1722
1723         return retval;
1724 }
1725
1726 /*
1727  * This is a special get_blocks_t callback which is used by
1728  * ext4_da_write_begin().  It will either return mapped block or
1729  * reserve space for a single block.
1730  *
1731  * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1732  * We also have b_blocknr = -1 and b_bdev initialized properly
1733  *
1734  * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1735  * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1736  * initialized properly.
1737  */
1738 static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1739                                   struct buffer_head *bh, int create)
1740 {
1741         struct ext4_map_blocks map;
1742         int ret = 0;
1743
1744         BUG_ON(create == 0);
1745         BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1746
1747         map.m_lblk = iblock;
1748         map.m_len = 1;
1749
1750         /*
1751          * first, we need to know whether the block is allocated already
1752          * preallocated blocks are unmapped but should treated
1753          * the same as allocated blocks.
1754          */
1755         ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1756         if (ret <= 0)
1757                 return ret;
1758
1759         map_bh(bh, inode->i_sb, map.m_pblk);
1760         bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
1761
1762         if (buffer_unwritten(bh)) {
1763                 /* A delayed write to unwritten bh should be marked
1764                  * new and mapped.  Mapped ensures that we don't do
1765                  * get_block multiple times when we write to the same
1766                  * offset and new ensures that we do proper zero out
1767                  * for partial write.
1768                  */
1769                 set_buffer_new(bh);
1770                 set_buffer_mapped(bh);
1771         }
1772         return 0;
1773 }
1774
1775 /*
1776  * This function is used as a standard get_block_t calback function
1777  * when there is no desire to allocate any blocks.  It is used as a
1778  * callback function for block_write_begin() and block_write_full_page().
1779  * These functions should only try to map a single block at a time.
1780  *
1781  * Since this function doesn't do block allocations even if the caller
1782  * requests it by passing in create=1, it is critically important that
1783  * any caller checks to make sure that any buffer heads are returned
1784  * by this function are either all already mapped or marked for
1785  * delayed allocation before calling  block_write_full_page().  Otherwise,
1786  * b_blocknr could be left unitialized, and the page write functions will
1787  * be taken by surprise.
1788  */
1789 static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
1790                                    struct buffer_head *bh_result, int create)
1791 {
1792         BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
1793         return _ext4_get_block(inode, iblock, bh_result, 0);
1794 }
1795
1796 static int bget_one(handle_t *handle, struct buffer_head *bh)
1797 {
1798         get_bh(bh);
1799         return 0;
1800 }
1801
1802 static int bput_one(handle_t *handle, struct buffer_head *bh)
1803 {
1804         put_bh(bh);
1805         return 0;
1806 }
1807
1808 static int __ext4_journalled_writepage(struct page *page,
1809                                        unsigned int len)
1810 {
1811         struct address_space *mapping = page->mapping;
1812         struct inode *inode = mapping->host;
1813         struct buffer_head *page_bufs;
1814         handle_t *handle = NULL;
1815         int ret = 0;
1816         int err;
1817
1818         ClearPageChecked(page);
1819         page_bufs = page_buffers(page);
1820         BUG_ON(!page_bufs);
1821         walk_page_buffers(handle, page_bufs, 0, len, NULL, bget_one);
1822         /* As soon as we unlock the page, it can go away, but we have
1823          * references to buffers so we are safe */
1824         unlock_page(page);
1825
1826         handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
1827         if (IS_ERR(handle)) {
1828                 ret = PTR_ERR(handle);
1829                 goto out;
1830         }
1831
1832         BUG_ON(!ext4_handle_valid(handle));
1833
1834         ret = walk_page_buffers(handle, page_bufs, 0, len, NULL,
1835                                 do_journal_get_write_access);
1836
1837         err = walk_page_buffers(handle, page_bufs, 0, len, NULL,
1838                                 write_end_fn);
1839         if (ret == 0)
1840                 ret = err;
1841         EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1842         err = ext4_journal_stop(handle);
1843         if (!ret)
1844                 ret = err;
1845
1846         walk_page_buffers(handle, page_bufs, 0, len, NULL, bput_one);
1847         ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1848 out:
1849         return ret;
1850 }
1851
1852 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode);
1853 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate);
1854
1855 /*
1856  * Note that we don't need to start a transaction unless we're journaling data
1857  * because we should have holes filled from ext4_page_mkwrite(). We even don't
1858  * need to file the inode to the transaction's list in ordered mode because if
1859  * we are writing back data added by write(), the inode is already there and if
1860  * we are writing back data modified via mmap(), no one guarantees in which
1861  * transaction the data will hit the disk. In case we are journaling data, we
1862  * cannot start transaction directly because transaction start ranks above page
1863  * lock so we have to do some magic.
1864  *
1865  * This function can get called via...
1866  *   - ext4_da_writepages after taking page lock (have journal handle)
1867  *   - journal_submit_inode_data_buffers (no journal handle)
1868  *   - shrink_page_list via pdflush (no journal handle)
1869  *   - grab_page_cache when doing write_begin (have journal handle)
1870  *
1871  * We don't do any block allocation in this function. If we have page with
1872  * multiple blocks we need to write those buffer_heads that are mapped. This
1873  * is important for mmaped based write. So if we do with blocksize 1K
1874  * truncate(f, 1024);
1875  * a = mmap(f, 0, 4096);
1876  * a[0] = 'a';
1877  * truncate(f, 4096);
1878  * we have in the page first buffer_head mapped via page_mkwrite call back
1879  * but other bufer_heads would be unmapped but dirty(dirty done via the
1880  * do_wp_page). So writepage should write the first block. If we modify
1881  * the mmap area beyond 1024 we will again get a page_fault and the
1882  * page_mkwrite callback will do the block allocation and mark the
1883  * buffer_heads mapped.
1884  *
1885  * We redirty the page if we have any buffer_heads that is either delay or
1886  * unwritten in the page.
1887  *
1888  * We can get recursively called as show below.
1889  *
1890  *      ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1891  *              ext4_writepage()
1892  *
1893  * But since we don't do any block allocation we should not deadlock.
1894  * Page also have the dirty flag cleared so we don't get recurive page_lock.
1895  */
1896 static int ext4_writepage(struct page *page,
1897                           struct writeback_control *wbc)
1898 {
1899         int ret = 0, commit_write = 0;
1900         loff_t size;
1901         unsigned int len;
1902         struct buffer_head *page_bufs = NULL;
1903         struct inode *inode = page->mapping->host;
1904
1905         trace_ext4_writepage(page);
1906         size = i_size_read(inode);
1907         if (page->index == size >> PAGE_CACHE_SHIFT)
1908                 len = size & ~PAGE_CACHE_MASK;
1909         else
1910                 len = PAGE_CACHE_SIZE;
1911
1912         /*
1913          * If the page does not have buffers (for whatever reason),
1914          * try to create them using __block_write_begin.  If this
1915          * fails, redirty the page and move on.
1916          */
1917         if (!page_has_buffers(page)) {
1918                 if (__block_write_begin(page, 0, len,
1919                                         noalloc_get_block_write)) {
1920                 redirty_page:
1921                         redirty_page_for_writepage(wbc, page);
1922                         unlock_page(page);
1923                         return 0;
1924                 }
1925                 commit_write = 1;
1926         }
1927         page_bufs = page_buffers(page);
1928         if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
1929                               ext4_bh_delay_or_unwritten)) {
1930                 /*
1931                  * We don't want to do block allocation, so redirty
1932                  * the page and return.  We may reach here when we do
1933                  * a journal commit via journal_submit_inode_data_buffers.
1934                  * We can also reach here via shrink_page_list
1935                  */
1936                 goto redirty_page;
1937         }
1938         if (commit_write)
1939                 /* now mark the buffer_heads as dirty and uptodate */
1940                 block_commit_write(page, 0, len);
1941
1942         if (PageChecked(page) && ext4_should_journal_data(inode))
1943                 /*
1944                  * It's mmapped pagecache.  Add buffers and journal it.  There
1945                  * doesn't seem much point in redirtying the page here.
1946                  */
1947                 return __ext4_journalled_writepage(page, len);
1948
1949         if (buffer_uninit(page_bufs)) {
1950                 ext4_set_bh_endio(page_bufs, inode);
1951                 ret = block_write_full_page_endio(page, noalloc_get_block_write,
1952                                             wbc, ext4_end_io_buffer_write);
1953         } else
1954                 ret = block_write_full_page(page, noalloc_get_block_write,
1955                                             wbc);
1956
1957         return ret;
1958 }
1959
1960 /*
1961  * This is called via ext4_da_writepages() to
1962  * calculate the total number of credits to reserve to fit
1963  * a single extent allocation into a single transaction,
1964  * ext4_da_writpeages() will loop calling this before
1965  * the block allocation.
1966  */
1967
1968 static int ext4_da_writepages_trans_blocks(struct inode *inode)
1969 {
1970         int max_blocks = EXT4_I(inode)->i_reserved_data_blocks;
1971
1972         /*
1973          * With non-extent format the journal credit needed to
1974          * insert nrblocks contiguous block is dependent on
1975          * number of contiguous block. So we will limit
1976          * number of contiguous block to a sane value
1977          */
1978         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) &&
1979             (max_blocks > EXT4_MAX_TRANS_DATA))
1980                 max_blocks = EXT4_MAX_TRANS_DATA;
1981
1982         return ext4_chunk_trans_blocks(inode, max_blocks);
1983 }
1984
1985 /*
1986  * write_cache_pages_da - walk the list of dirty pages of the given
1987  * address space and accumulate pages that need writing, and call
1988  * mpage_da_map_and_submit to map a single contiguous memory region
1989  * and then write them.
1990  */
1991 static int write_cache_pages_da(struct address_space *mapping,
1992                                 struct writeback_control *wbc,
1993                                 struct mpage_da_data *mpd,
1994                                 pgoff_t *done_index)
1995 {
1996         struct buffer_head      *bh, *head;
1997         struct inode            *inode = mapping->host;
1998         struct pagevec          pvec;
1999         unsigned int            nr_pages;
2000         sector_t                logical;
2001         pgoff_t                 index, end;
2002         long                    nr_to_write = wbc->nr_to_write;
2003         int                     i, tag, ret = 0;
2004
2005         memset(mpd, 0, sizeof(struct mpage_da_data));
2006         mpd->wbc = wbc;
2007         mpd->inode = inode;
2008         pagevec_init(&pvec, 0);
2009         index = wbc->range_start >> PAGE_CACHE_SHIFT;
2010         end = wbc->range_end >> PAGE_CACHE_SHIFT;
2011
2012         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2013                 tag = PAGECACHE_TAG_TOWRITE;
2014         else
2015                 tag = PAGECACHE_TAG_DIRTY;
2016
2017         *done_index = index;
2018         while (index <= end) {
2019                 nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2020                               min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2021                 if (nr_pages == 0)
2022                         return 0;
2023
2024                 for (i = 0; i < nr_pages; i++) {
2025                         struct page *page = pvec.pages[i];
2026
2027                         /*
2028                          * At this point, the page may be truncated or
2029                          * invalidated (changing page->mapping to NULL), or
2030                          * even swizzled back from swapper_space to tmpfs file
2031                          * mapping. However, page->index will not change
2032                          * because we have a reference on the page.
2033                          */
2034                         if (page->index > end)
2035                                 goto out;
2036
2037                         *done_index = page->index + 1;
2038
2039                         /*
2040                          * If we can't merge this page, and we have
2041                          * accumulated an contiguous region, write it
2042                          */
2043                         if ((mpd->next_page != page->index) &&
2044                             (mpd->next_page != mpd->first_page)) {
2045                                 mpage_da_map_and_submit(mpd);
2046                                 goto ret_extent_tail;
2047                         }
2048
2049                         lock_page(page);
2050
2051                         /*
2052                          * If the page is no longer dirty, or its
2053                          * mapping no longer corresponds to inode we
2054                          * are writing (which means it has been
2055                          * truncated or invalidated), or the page is
2056                          * already under writeback and we are not
2057                          * doing a data integrity writeback, skip the page
2058                          */
2059                         if (!PageDirty(page) ||
2060                             (PageWriteback(page) &&
2061                              (wbc->sync_mode == WB_SYNC_NONE)) ||
2062                             unlikely(page->mapping != mapping)) {
2063                                 unlock_page(page);
2064                                 continue;
2065                         }
2066
2067                         wait_on_page_writeback(page);
2068                         BUG_ON(PageWriteback(page));
2069
2070                         if (mpd->next_page != page->index)
2071                                 mpd->first_page = page->index;
2072                         mpd->next_page = page->index + 1;
2073                         logical = (sector_t) page->index <<
2074                                 (PAGE_CACHE_SHIFT - inode->i_blkbits);
2075
2076                         if (!page_has_buffers(page)) {
2077                                 mpage_add_bh_to_extent(mpd, logical,
2078                                                        PAGE_CACHE_SIZE,
2079                                                        (1 << BH_Dirty) | (1 << BH_Uptodate));
2080                                 if (mpd->io_done)
2081                                         goto ret_extent_tail;
2082                         } else {
2083                                 /*
2084                                  * Page with regular buffer heads,
2085                                  * just add all dirty ones
2086                                  */
2087                                 head = page_buffers(page);
2088                                 bh = head;
2089                                 do {
2090                                         BUG_ON(buffer_locked(bh));
2091                                         /*
2092                                          * We need to try to allocate
2093                                          * unmapped blocks in the same page.
2094                                          * Otherwise we won't make progress
2095                                          * with the page in ext4_writepage
2096                                          */
2097                                         if (ext4_bh_delay_or_unwritten(NULL, bh)) {
2098                                                 mpage_add_bh_to_extent(mpd, logical,
2099                                                                        bh->b_size,
2100                                                                        bh->b_state);
2101                                                 if (mpd->io_done)
2102                                                         goto ret_extent_tail;
2103                                         } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2104                                                 /*
2105                                                  * mapped dirty buffer. We need
2106                                                  * to update the b_state
2107                                                  * because we look at b_state
2108                                                  * in mpage_da_map_blocks.  We
2109                                                  * don't update b_size because
2110                                                  * if we find an unmapped
2111                                                  * buffer_head later we need to
2112                                                  * use the b_state flag of that
2113                                                  * buffer_head.
2114                                                  */
2115                                                 if (mpd->b_size == 0)
2116                                                         mpd->b_state = bh->b_state & BH_FLAGS;
2117                                         }
2118                                         logical++;
2119                                 } while ((bh = bh->b_this_page) != head);
2120                         }
2121
2122                         if (nr_to_write > 0) {
2123                                 nr_to_write--;
2124                                 if (nr_to_write == 0 &&
2125                                     wbc->sync_mode == WB_SYNC_NONE)
2126                                         /*
2127                                          * We stop writing back only if we are
2128                                          * not doing integrity sync. In case of
2129                                          * integrity sync we have to keep going
2130                                          * because someone may be concurrently
2131                                          * dirtying pages, and we might have
2132                                          * synced a lot of newly appeared dirty
2133                                          * pages, but have not synced all of the
2134                                          * old dirty pages.
2135                                          */
2136                                         goto out;
2137                         }
2138                 }
2139                 pagevec_release(&pvec);
2140                 cond_resched();
2141         }
2142         return 0;
2143 ret_extent_tail:
2144         ret = MPAGE_DA_EXTENT_TAIL;
2145 out:
2146         pagevec_release(&pvec);
2147         cond_resched();
2148         return ret;
2149 }
2150
2151
2152 static int ext4_da_writepages(struct address_space *mapping,
2153                               struct writeback_control *wbc)
2154 {
2155         pgoff_t index;
2156         int range_whole = 0;
2157         handle_t *handle = NULL;
2158         struct mpage_da_data mpd;
2159         struct inode *inode = mapping->host;
2160         int pages_written = 0;
2161         unsigned int max_pages;
2162         int range_cyclic, cycled = 1, io_done = 0;
2163         int needed_blocks, ret = 0;
2164         long desired_nr_to_write, nr_to_writebump = 0;
2165         loff_t range_start = wbc->range_start;
2166         struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2167         pgoff_t done_index = 0;
2168         pgoff_t end;
2169         struct blk_plug plug;
2170
2171         trace_ext4_da_writepages(inode, wbc);
2172
2173         /*
2174          * No pages to write? This is mainly a kludge to avoid starting
2175          * a transaction for special inodes like journal inode on last iput()
2176          * because that could violate lock ordering on umount
2177          */
2178         if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2179                 return 0;
2180
2181         /*
2182          * If the filesystem has aborted, it is read-only, so return
2183          * right away instead of dumping stack traces later on that
2184          * will obscure the real source of the problem.  We test
2185          * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
2186          * the latter could be true if the filesystem is mounted
2187          * read-only, and in that case, ext4_da_writepages should
2188          * *never* be called, so if that ever happens, we would want
2189          * the stack trace.
2190          */
2191         if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED))
2192                 return -EROFS;
2193
2194         if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2195                 range_whole = 1;
2196
2197         range_cyclic = wbc->range_cyclic;
2198         if (wbc->range_cyclic) {
2199                 index = mapping->writeback_index;
2200                 if (index)
2201                         cycled = 0;
2202                 wbc->range_start = index << PAGE_CACHE_SHIFT;
2203                 wbc->range_end  = LLONG_MAX;
2204                 wbc->range_cyclic = 0;
2205                 end = -1;
2206         } else {
2207                 index = wbc->range_start >> PAGE_CACHE_SHIFT;
2208                 end = wbc->range_end >> PAGE_CACHE_SHIFT;
2209         }
2210
2211         /*
2212          * This works around two forms of stupidity.  The first is in
2213          * the writeback code, which caps the maximum number of pages
2214          * written to be 1024 pages.  This is wrong on multiple
2215          * levels; different architectues have a different page size,
2216          * which changes the maximum amount of data which gets
2217          * written.  Secondly, 4 megabytes is way too small.  XFS
2218          * forces this value to be 16 megabytes by multiplying
2219          * nr_to_write parameter by four, and then relies on its
2220          * allocator to allocate larger extents to make them
2221          * contiguous.  Unfortunately this brings us to the second
2222          * stupidity, which is that ext4's mballoc code only allocates
2223          * at most 2048 blocks.  So we force contiguous writes up to
2224          * the number of dirty blocks in the inode, or
2225          * sbi->max_writeback_mb_bump whichever is smaller.
2226          */
2227         max_pages = sbi->s_max_writeback_mb_bump << (20 - PAGE_CACHE_SHIFT);
2228         if (!range_cyclic && range_whole) {
2229                 if (wbc->nr_to_write == LONG_MAX)
2230                         desired_nr_to_write = wbc->nr_to_write;
2231                 else
2232                         desired_nr_to_write = wbc->nr_to_write * 8;
2233         } else
2234                 desired_nr_to_write = ext4_num_dirty_pages(inode, index,
2235                                                            max_pages);
2236         if (desired_nr_to_write > max_pages)
2237                 desired_nr_to_write = max_pages;
2238
2239         if (wbc->nr_to_write < desired_nr_to_write) {
2240                 nr_to_writebump = desired_nr_to_write - wbc->nr_to_write;
2241                 wbc->nr_to_write = desired_nr_to_write;
2242         }
2243
2244 retry:
2245         if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2246                 tag_pages_for_writeback(mapping, index, end);
2247
2248         blk_start_plug(&plug);
2249         while (!ret && wbc->nr_to_write > 0) {
2250
2251                 /*
2252                  * we  insert one extent at a time. So we need
2253                  * credit needed for single extent allocation.
2254                  * journalled mode is currently not supported
2255                  * by delalloc
2256                  */
2257                 BUG_ON(ext4_should_journal_data(inode));
2258                 needed_blocks = ext4_da_writepages_trans_blocks(inode);
2259
2260                 /* start a new transaction*/
2261                 handle = ext4_journal_start(inode, needed_blocks);
2262                 if (IS_ERR(handle)) {
2263                         ret = PTR_ERR(handle);
2264                         ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2265                                "%ld pages, ino %lu; err %d", __func__,
2266                                 wbc->nr_to_write, inode->i_ino, ret);
2267                         goto out_writepages;
2268                 }
2269
2270                 /*
2271                  * Now call write_cache_pages_da() to find the next
2272                  * contiguous region of logical blocks that need
2273                  * blocks to be allocated by ext4 and submit them.
2274                  */
2275                 ret = write_cache_pages_da(mapping, wbc, &mpd, &done_index);
2276                 /*
2277                  * If we have a contiguous extent of pages and we
2278                  * haven't done the I/O yet, map the blocks and submit
2279                  * them for I/O.
2280                  */
2281                 if (!mpd.io_done && mpd.next_page != mpd.first_page) {
2282                         mpage_da_map_and_submit(&mpd);
2283                         ret = MPAGE_DA_EXTENT_TAIL;
2284                 }
2285                 trace_ext4_da_write_pages(inode, &mpd);
2286                 wbc->nr_to_write -= mpd.pages_written;
2287
2288                 ext4_journal_stop(handle);
2289
2290                 if ((mpd.retval == -ENOSPC) && sbi->s_journal) {
2291                         /* commit the transaction which would
2292                          * free blocks released in the transaction
2293                          * and try again
2294                          */
2295                         jbd2_journal_force_commit_nested(sbi->s_journal);
2296                         ret = 0;
2297                 } else if (ret == MPAGE_DA_EXTENT_TAIL) {
2298                         /*
2299                          * Got one extent now try with rest of the pages.
2300                          * If mpd.retval is set -EIO, journal is aborted.
2301                          * So we don't need to write any more.
2302                          */
2303                         pages_written += mpd.pages_written;
2304                         ret = mpd.retval;
2305                         io_done = 1;
2306                 } else if (wbc->nr_to_write)
2307                         /*
2308                          * There is no more writeout needed
2309                          * or we requested for a noblocking writeout
2310                          * and we found the device congested
2311                          */
2312                         break;
2313         }
2314         blk_finish_plug(&plug);
2315         if (!io_done && !cycled) {
2316                 cycled = 1;
2317                 index = 0;
2318                 wbc->range_start = index << PAGE_CACHE_SHIFT;
2319                 wbc->range_end  = mapping->writeback_index - 1;
2320                 goto retry;
2321         }
2322
2323         /* Update index */
2324         wbc->range_cyclic = range_cyclic;
2325         if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2326                 /*
2327                  * set the writeback_index so that range_cyclic
2328                  * mode will write it back later
2329                  */
2330                 mapping->writeback_index = done_index;
2331
2332 out_writepages:
2333         wbc->nr_to_write -= nr_to_writebump;
2334         wbc->range_start = range_start;
2335         trace_ext4_da_writepages_result(inode, wbc, ret, pages_written);
2336         return ret;
2337 }
2338
2339 #define FALL_BACK_TO_NONDELALLOC 1
2340 static int ext4_nonda_switch(struct super_block *sb)
2341 {
2342         s64 free_blocks, dirty_blocks;
2343         struct ext4_sb_info *sbi = EXT4_SB(sb);
2344
2345         /*
2346          * switch to non delalloc mode if we are running low
2347          * on free block. The free block accounting via percpu
2348          * counters can get slightly wrong with percpu_counter_batch getting
2349          * accumulated on each CPU without updating global counters
2350          * Delalloc need an accurate free block accounting. So switch
2351          * to non delalloc when we are near to error range.
2352          */
2353         free_blocks  = EXT4_C2B(sbi,
2354                 percpu_counter_read_positive(&sbi->s_freeclusters_counter));
2355         dirty_blocks = percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2356         if (2 * free_blocks < 3 * dirty_blocks ||
2357                 free_blocks < (dirty_blocks + EXT4_FREECLUSTERS_WATERMARK)) {
2358                 /*
2359                  * free block count is less than 150% of dirty blocks
2360                  * or free blocks is less than watermark
2361                  */
2362                 return 1;
2363         }
2364         /*
2365          * Even if we don't switch but are nearing capacity,
2366          * start pushing delalloc when 1/2 of free blocks are dirty.
2367          */
2368         if (free_blocks < 2 * dirty_blocks)
2369                 writeback_inodes_sb_if_idle(sb);
2370
2371         return 0;
2372 }
2373
2374 static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2375                                loff_t pos, unsigned len, unsigned flags,
2376                                struct page **pagep, void **fsdata)
2377 {
2378         int ret, retries = 0;
2379         struct page *page;
2380         pgoff_t index;
2381         struct inode *inode = mapping->host;
2382         handle_t *handle;
2383         loff_t page_len;
2384
2385         index = pos >> PAGE_CACHE_SHIFT;
2386
2387         if (ext4_nonda_switch(inode->i_sb)) {
2388                 *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2389                 return ext4_write_begin(file, mapping, pos,
2390                                         len, flags, pagep, fsdata);
2391         }
2392         *fsdata = (void *)0;
2393         trace_ext4_da_write_begin(inode, pos, len, flags);
2394 retry:
2395         /*
2396          * With delayed allocation, we don't log the i_disksize update
2397          * if there is delayed block allocation. But we still need
2398          * to journalling the i_disksize update if writes to the end
2399          * of file which has an already mapped buffer.
2400          */
2401         handle = ext4_journal_start(inode, 1);
2402         if (IS_ERR(handle)) {
2403                 ret = PTR_ERR(handle);
2404                 goto out;
2405         }
2406         /* We cannot recurse into the filesystem as the transaction is already
2407          * started */
2408         flags |= AOP_FLAG_NOFS;
2409
2410         page = grab_cache_page_write_begin(mapping, index, flags);
2411         if (!page) {
2412                 ext4_journal_stop(handle);
2413                 ret = -ENOMEM;
2414                 goto out;
2415         }
2416         *pagep = page;
2417
2418         ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
2419         if (ret < 0) {
2420                 unlock_page(page);
2421                 ext4_journal_stop(handle);
2422                 page_cache_release(page);
2423                 /*
2424                  * block_write_begin may have instantiated a few blocks
2425                  * outside i_size.  Trim these off again. Don't need
2426                  * i_size_read because we hold i_mutex.
2427                  */
2428                 if (pos + len > inode->i_size)
2429                         ext4_truncate_failed_write(inode);
2430         } else {
2431                 page_len = pos & (PAGE_CACHE_SIZE - 1);
2432                 if (page_len > 0) {
2433                         ret = ext4_discard_partial_page_buffers_no_lock(handle,
2434                                 inode, page, pos - page_len, page_len,
2435                                 EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
2436                 }
2437         }
2438
2439         if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
2440                 goto retry;
2441 out:
2442         return ret;
2443 }
2444
2445 /*
2446  * Check if we should update i_disksize
2447  * when write to the end of file but not require block allocation
2448  */
2449 static int ext4_da_should_update_i_disksize(struct page *page,
2450                                             unsigned long offset)
2451 {
2452         struct buffer_head *bh;
2453         struct inode *inode = page->mapping->host;
2454         unsigned int idx;
2455         int i;
2456
2457         bh = page_buffers(page);
2458         idx = offset >> inode->i_blkbits;
2459
2460         for (i = 0; i < idx; i++)
2461                 bh = bh->b_this_page;
2462
2463         if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
2464                 return 0;
2465         return 1;
2466 }
2467
2468 static int ext4_da_write_end(struct file *file,
2469                              struct address_space *mapping,
2470                              loff_t pos, unsigned len, unsigned copied,
2471                              struct page *page, void *fsdata)
2472 {
2473         struct inode *inode = mapping->host;
2474         int ret = 0, ret2;
2475         handle_t *handle = ext4_journal_current_handle();
2476         loff_t new_i_size;
2477         unsigned long start, end;
2478         int write_mode = (int)(unsigned long)fsdata;
2479         loff_t page_len;
2480
2481         if (write_mode == FALL_BACK_TO_NONDELALLOC) {
2482                 if (ext4_should_order_data(inode)) {
2483                         return ext4_ordered_write_end(file, mapping, pos,
2484                                         len, copied, page, fsdata);
2485                 } else if (ext4_should_writeback_data(inode)) {
2486                         return ext4_writeback_write_end(file, mapping, pos,
2487                                         len, copied, page, fsdata);
2488                 } else {
2489                         BUG();
2490                 }
2491         }
2492
2493         trace_ext4_da_write_end(inode, pos, len, copied);
2494         start = pos & (PAGE_CACHE_SIZE - 1);
2495         end = start + copied - 1;
2496
2497         /*
2498          * generic_write_end() will run mark_inode_dirty() if i_size
2499          * changes.  So let's piggyback the i_disksize mark_inode_dirty
2500          * into that.
2501          */
2502
2503         new_i_size = pos + copied;
2504         if (new_i_size > EXT4_I(inode)->i_disksize) {
2505                 if (ext4_da_should_update_i_disksize(page, end)) {
2506                         down_write(&EXT4_I(inode)->i_data_sem);
2507                         if (new_i_size > EXT4_I(inode)->i_disksize) {
2508                                 /*
2509                                  * Updating i_disksize when extending file
2510                                  * without needing block allocation
2511                                  */
2512                                 if (ext4_should_order_data(inode))
2513                                         ret = ext4_jbd2_file_inode(handle,
2514                                                                    inode);
2515
2516                                 EXT4_I(inode)->i_disksize = new_i_size;
2517                         }
2518                         up_write(&EXT4_I(inode)->i_data_sem);
2519                         /* We need to mark inode dirty even if
2520                          * new_i_size is less that inode->i_size
2521                          * bu greater than i_disksize.(hint delalloc)
2522                          */
2523                         ext4_mark_inode_dirty(handle, inode);
2524                 }
2525         }
2526         ret2 = generic_write_end(file, mapping, pos, len, copied,
2527                                                         page, fsdata);
2528
2529         page_len = PAGE_CACHE_SIZE -
2530                         ((pos + copied - 1) & (PAGE_CACHE_SIZE - 1));
2531
2532         if (page_len > 0) {
2533                 ret = ext4_discard_partial_page_buffers_no_lock(handle,
2534                         inode, page, pos + copied - 1, page_len,
2535                         EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED);
2536         }
2537
2538         copied = ret2;
2539         if (ret2 < 0)
2540                 ret = ret2;
2541         ret2 = ext4_journal_stop(handle);
2542         if (!ret)
2543                 ret = ret2;
2544
2545         return ret ? ret : copied;
2546 }
2547
2548 static void ext4_da_invalidatepage(struct page *page, unsigned long offset)
2549 {
2550         /*
2551          * Drop reserved blocks
2552          */
2553         BUG_ON(!PageLocked(page));
2554         if (!page_has_buffers(page))
2555                 goto out;
2556
2557         ext4_da_page_release_reservation(page, offset);
2558
2559 out:
2560         ext4_invalidatepage(page, offset);
2561
2562         return;
2563 }
2564
2565 /*
2566  * Force all delayed allocation blocks to be allocated for a given inode.
2567  */
2568 int ext4_alloc_da_blocks(struct inode *inode)
2569 {
2570         trace_ext4_alloc_da_blocks(inode);
2571
2572         if (!EXT4_I(inode)->i_reserved_data_blocks &&
2573             !EXT4_I(inode)->i_reserved_meta_blocks)
2574                 return 0;
2575
2576         /*
2577          * We do something simple for now.  The filemap_flush() will
2578          * also start triggering a write of the data blocks, which is
2579          * not strictly speaking necessary (and for users of
2580          * laptop_mode, not even desirable).  However, to do otherwise
2581          * would require replicating code paths in:
2582          *
2583          * ext4_da_writepages() ->
2584          *    write_cache_pages() ---> (via passed in callback function)
2585          *        __mpage_da_writepage() -->
2586          *           mpage_add_bh_to_extent()
2587          *           mpage_da_map_blocks()
2588          *
2589          * The problem is that write_cache_pages(), located in
2590          * mm/page-writeback.c, marks pages clean in preparation for
2591          * doing I/O, which is not desirable if we're not planning on
2592          * doing I/O at all.
2593          *
2594          * We could call write_cache_pages(), and then redirty all of
2595          * the pages by calling redirty_page_for_writepage() but that
2596          * would be ugly in the extreme.  So instead we would need to
2597          * replicate parts of the code in the above functions,
2598          * simplifying them because we wouldn't actually intend to
2599          * write out the pages, but rather only collect contiguous
2600          * logical block extents, call the multi-block allocator, and
2601          * then update the buffer heads with the block allocations.
2602          *
2603          * For now, though, we'll cheat by calling filemap_flush(),
2604          * which will map the blocks, and start the I/O, but not
2605          * actually wait for the I/O to complete.
2606          */
2607         return filemap_flush(inode->i_mapping);
2608 }
2609
2610 /*
2611  * bmap() is special.  It gets used by applications such as lilo and by
2612  * the swapper to find the on-disk block of a specific piece of data.
2613  *
2614  * Naturally, this is dangerous if the block concerned is still in the
2615  * journal.  If somebody makes a swapfile on an ext4 data-journaling
2616  * filesystem and enables swap, then they may get a nasty shock when the
2617  * data getting swapped to that swapfile suddenly gets overwritten by
2618  * the original zero's written out previously to the journal and
2619  * awaiting writeback in the kernel's buffer cache.
2620  *
2621  * So, if we see any bmap calls here on a modified, data-journaled file,
2622  * take extra steps to flush any blocks which might be in the cache.
2623  */
2624 static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
2625 {
2626         struct inode *inode = mapping->host;
2627         journal_t *journal;
2628         int err;
2629
2630         if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
2631                         test_opt(inode->i_sb, DELALLOC)) {
2632                 /*
2633                  * With delalloc we want to sync the file
2634                  * so that we can make sure we allocate
2635                  * blocks for file
2636                  */
2637                 filemap_write_and_wait(mapping);
2638         }
2639
2640         if (EXT4_JOURNAL(inode) &&
2641             ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
2642                 /*
2643                  * This is a REALLY heavyweight approach, but the use of
2644                  * bmap on dirty files is expected to be extremely rare:
2645                  * only if we run lilo or swapon on a freshly made file
2646                  * do we expect this to happen.
2647                  *
2648                  * (bmap requires CAP_SYS_RAWIO so this does not
2649                  * represent an unprivileged user DOS attack --- we'd be
2650                  * in trouble if mortal users could trigger this path at
2651                  * will.)
2652                  *
2653                  * NB. EXT4_STATE_JDATA is not set on files other than
2654                  * regular files.  If somebody wants to bmap a directory
2655                  * or symlink and gets confused because the buffer
2656                  * hasn't yet been flushed to disk, they deserve
2657                  * everything they get.
2658                  */
2659
2660                 ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
2661                 journal = EXT4_JOURNAL(inode);
2662                 jbd2_journal_lock_updates(journal);
2663                 err = jbd2_journal_flush(journal);
2664                 jbd2_journal_unlock_updates(journal);
2665
2666                 if (err)
2667                         return 0;
2668         }
2669
2670         return generic_block_bmap(mapping, block, ext4_get_block);
2671 }
2672
2673 static int ext4_readpage(struct file *file, struct page *page)
2674 {
2675         trace_ext4_readpage(page);
2676         return mpage_readpage(page, ext4_get_block);
2677 }
2678
2679 static int
2680 ext4_readpages(struct file *file, struct address_space *mapping,
2681                 struct list_head *pages, unsigned nr_pages)
2682 {
2683         return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
2684 }
2685
2686 static void ext4_invalidatepage_free_endio(struct page *page, unsigned long offset)
2687 {
2688         struct buffer_head *head, *bh;
2689         unsigned int curr_off = 0;
2690
2691         if (!page_has_buffers(page))
2692                 return;
2693         head = bh = page_buffers(page);
2694         do {
2695                 if (offset <= curr_off && test_clear_buffer_uninit(bh)
2696                                         && bh->b_private) {
2697                         ext4_free_io_end(bh->b_private);
2698                         bh->b_private = NULL;
2699                         bh->b_end_io = NULL;
2700                 }
2701                 curr_off = curr_off + bh->b_size;
2702                 bh = bh->b_this_page;
2703         } while (bh != head);
2704 }
2705
2706 static void ext4_invalidatepage(struct page *page, unsigned long offset)
2707 {
2708         journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2709
2710         trace_ext4_invalidatepage(page, offset);
2711
2712         /*
2713          * free any io_end structure allocated for buffers to be discarded
2714          */
2715         if (ext4_should_dioread_nolock(page->mapping->host))
2716                 ext4_invalidatepage_free_endio(page, offset);
2717         /*
2718          * If it's a full truncate we just forget about the pending dirtying
2719          */
2720         if (offset == 0)
2721                 ClearPageChecked(page);
2722
2723         if (journal)
2724                 jbd2_journal_invalidatepage(journal, page, offset);
2725         else
2726                 block_invalidatepage(page, offset);
2727 }
2728
2729 static int ext4_releasepage(struct page *page, gfp_t wait)
2730 {
2731         journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2732
2733         trace_ext4_releasepage(page);
2734
2735         WARN_ON(PageChecked(page));
2736         if (!page_has_buffers(page))
2737                 return 0;
2738         if (journal)
2739                 return jbd2_journal_try_to_free_buffers(journal, page, wait);
2740         else
2741                 return try_to_free_buffers(page);
2742 }
2743
2744 /*
2745  * ext4_get_block used when preparing for a DIO write or buffer write.
2746  * We allocate an uinitialized extent if blocks haven't been allocated.
2747  * The extent will be converted to initialized after the IO is complete.
2748  */
2749 static int ext4_get_block_write(struct inode *inode, sector_t iblock,
2750                    struct buffer_head *bh_result, int create)
2751 {
2752         ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
2753                    inode->i_ino, create);
2754         return _ext4_get_block(inode, iblock, bh_result,
2755                                EXT4_GET_BLOCKS_IO_CREATE_EXT);
2756 }
2757
2758 static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
2759                             ssize_t size, void *private, int ret,
2760                             bool is_async)
2761 {
2762         struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
2763         ext4_io_end_t *io_end = iocb->private;
2764         struct workqueue_struct *wq;
2765         unsigned long flags;
2766         struct ext4_inode_info *ei;
2767
2768         /* if not async direct IO or dio with 0 bytes write, just return */
2769         if (!io_end || !size)
2770                 goto out;
2771
2772         ext_debug("ext4_end_io_dio(): io_end 0x%p"
2773                   "for inode %lu, iocb 0x%p, offset %llu, size %llu\n",
2774                   iocb->private, io_end->inode->i_ino, iocb, offset,
2775                   size);
2776
2777         /* if not aio dio with unwritten extents, just free io and return */
2778         if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
2779                 ext4_free_io_end(io_end);
2780                 iocb->private = NULL;
2781 out:
2782                 if (is_async)
2783                         aio_complete(iocb, ret, 0);
2784                 inode_dio_done(inode);
2785                 return;
2786         }
2787
2788         io_end->offset = offset;
2789         io_end->size = size;
2790         if (is_async) {
2791                 io_end->iocb = iocb;
2792                 io_end->result = ret;
2793         }
2794         wq = EXT4_SB(io_end->inode->i_sb)->dio_unwritten_wq;
2795
2796         /* Add the io_end to per-inode completed aio dio list*/
2797         ei = EXT4_I(io_end->inode);
2798         spin_lock_irqsave(&ei->i_completed_io_lock, flags);
2799         list_add_tail(&io_end->list, &ei->i_completed_io_list);
2800         spin_unlock_irqrestore(&ei->i_completed_io_lock, flags);
2801
2802         /* queue the work to convert unwritten extents to written */
2803         queue_work(wq, &io_end->work);
2804         iocb->private = NULL;
2805
2806         /* XXX: probably should move into the real I/O completion handler */
2807         inode_dio_done(inode);
2808 }
2809
2810 static void ext4_end_io_buffer_write(struct buffer_head *bh, int uptodate)
2811 {
2812         ext4_io_end_t *io_end = bh->b_private;
2813         struct workqueue_struct *wq;
2814         struct inode *inode;
2815         unsigned long flags;
2816
2817         if (!test_clear_buffer_uninit(bh) || !io_end)
2818                 goto out;
2819
2820         if (!(io_end->inode->i_sb->s_flags & MS_ACTIVE)) {
2821                 printk("sb umounted, discard end_io request for inode %lu\n",
2822                         io_end->inode->i_ino);
2823                 ext4_free_io_end(io_end);
2824                 goto out;
2825         }
2826
2827         /*
2828          * It may be over-defensive here to check EXT4_IO_END_UNWRITTEN now,
2829          * but being more careful is always safe for the future change.
2830          */
2831         inode = io_end->inode;
2832         if (!(io_end->flag & EXT4_IO_END_UNWRITTEN)) {
2833                 io_end->flag |= EXT4_IO_END_UNWRITTEN;
2834                 atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten);
2835         }
2836
2837         /* Add the io_end to per-inode completed io list*/
2838         spin_lock_irqsave(&EXT4_I(inode)->i_completed_io_lock, flags);
2839         list_add_tail(&io_end->list, &EXT4_I(inode)->i_completed_io_list);
2840         spin_unlock_irqrestore(&EXT4_I(inode)->i_completed_io_lock, flags);
2841
2842         wq = EXT4_SB(inode->i_sb)->dio_unwritten_wq;
2843         /* queue the work to convert unwritten extents to written */
2844         queue_work(wq, &io_end->work);
2845 out:
2846         bh->b_private = NULL;
2847         bh->b_end_io = NULL;
2848         clear_buffer_uninit(bh);
2849         end_buffer_async_write(bh, uptodate);
2850 }
2851
2852 static int ext4_set_bh_endio(struct buffer_head *bh, struct inode *inode)
2853 {
2854         ext4_io_end_t *io_end;
2855         struct page *page = bh->b_page;
2856         loff_t offset = (sector_t)page->index << PAGE_CACHE_SHIFT;
2857         size_t size = bh->b_size;
2858
2859 retry:
2860         io_end = ext4_init_io_end(inode, GFP_ATOMIC);
2861         if (!io_end) {
2862                 pr_warn_ratelimited("%s: allocation fail\n", __func__);
2863                 schedule();
2864                 goto retry;
2865         }
2866         io_end->offset = offset;
2867         io_end->size = size;
2868         /*
2869          * We need to hold a reference to the page to make sure it
2870          * doesn't get evicted before ext4_end_io_work() has a chance
2871          * to convert the extent from written to unwritten.
2872          */
2873         io_end->page = page;
2874         get_page(io_end->page);
2875
2876         bh->b_private = io_end;
2877         bh->b_end_io = ext4_end_io_buffer_write;
2878         return 0;
2879 }
2880
2881 /*
2882  * For ext4 extent files, ext4 will do direct-io write to holes,
2883  * preallocated extents, and those write extend the file, no need to
2884  * fall back to buffered IO.
2885  *
2886  * For holes, we fallocate those blocks, mark them as uninitialized
2887  * If those blocks were preallocated, we mark sure they are splited, but
2888  * still keep the range to write as uninitialized.
2889  *
2890  * The unwrritten extents will be converted to written when DIO is completed.
2891  * For async direct IO, since the IO may still pending when return, we
2892  * set up an end_io call back function, which will do the conversion
2893  * when async direct IO completed.
2894  *
2895  * If the O_DIRECT write will extend the file then add this inode to the
2896  * orphan list.  So recovery will truncate it back to the original size
2897  * if the machine crashes during the write.
2898  *
2899  */
2900 static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
2901                               const struct iovec *iov, loff_t offset,
2902                               unsigned long nr_segs)
2903 {
2904         struct file *file = iocb->ki_filp;
2905         struct inode *inode = file->f_mapping->host;
2906         ssize_t ret;
2907         size_t count = iov_length(iov, nr_segs);
2908
2909         loff_t final_size = offset + count;
2910         if (rw == WRITE && final_size <= inode->i_size) {
2911                 /*
2912                  * We could direct write to holes and fallocate.
2913                  *
2914                  * Allocated blocks to fill the hole are marked as uninitialized
2915                  * to prevent parallel buffered read to expose the stale data
2916                  * before DIO complete the data IO.
2917                  *
2918                  * As to previously fallocated extents, ext4 get_block
2919                  * will just simply mark the buffer mapped but still
2920                  * keep the extents uninitialized.
2921                  *
2922                  * for non AIO case, we will convert those unwritten extents
2923                  * to written after return back from blockdev_direct_IO.
2924                  *
2925                  * for async DIO, the conversion needs to be defered when
2926                  * the IO is completed. The ext4 end_io callback function
2927                  * will be called to take care of the conversion work.
2928                  * Here for async case, we allocate an io_end structure to
2929                  * hook to the iocb.
2930                  */
2931                 iocb->private = NULL;
2932                 EXT4_I(inode)->cur_aio_dio = NULL;
2933                 if (!is_sync_kiocb(iocb)) {
2934                         iocb->private = ext4_init_io_end(inode, GFP_NOFS);
2935                         if (!iocb->private)
2936                                 return -ENOMEM;
2937                         /*
2938                          * we save the io structure for current async
2939                          * direct IO, so that later ext4_map_blocks()
2940                          * could flag the io structure whether there
2941                          * is a unwritten extents needs to be converted
2942                          * when IO is completed.
2943                          */
2944                         EXT4_I(inode)->cur_aio_dio = iocb->private;
2945                 }
2946
2947                 ret = __blockdev_direct_IO(rw, iocb, inode,
2948                                          inode->i_sb->s_bdev, iov,
2949                                          offset, nr_segs,
2950                                          ext4_get_block_write,
2951                                          ext4_end_io_dio,
2952                                          NULL,
2953                                          DIO_LOCKING | DIO_SKIP_HOLES);
2954                 if (iocb->private)
2955                         EXT4_I(inode)->cur_aio_dio = NULL;
2956                 /*
2957                  * The io_end structure takes a reference to the inode,
2958                  * that structure needs to be destroyed and the
2959                  * reference to the inode need to be dropped, when IO is
2960                  * complete, even with 0 byte write, or failed.
2961                  *
2962                  * In the successful AIO DIO case, the io_end structure will be
2963                  * desctroyed and the reference to the inode will be dropped
2964                  * after the end_io call back function is called.
2965                  *
2966                  * In the case there is 0 byte write, or error case, since
2967                  * VFS direct IO won't invoke the end_io call back function,
2968                  * we need to free the end_io structure here.
2969                  */
2970                 if (ret != -EIOCBQUEUED && ret <= 0 && iocb->private) {
2971                         ext4_free_io_end(iocb->private);
2972                         iocb->private = NULL;
2973                 } else if (ret > 0 && ext4_test_inode_state(inode,
2974                                                 EXT4_STATE_DIO_UNWRITTEN)) {
2975                         int err;
2976                         /*
2977                          * for non AIO case, since the IO is already
2978                          * completed, we could do the conversion right here
2979                          */
2980                         err = ext4_convert_unwritten_extents(inode,
2981                                                              offset, ret);
2982                         if (err < 0)
2983                                 ret = err;
2984                         ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
2985                 }
2986                 return ret;
2987         }
2988
2989         /* for write the the end of file case, we fall back to old way */
2990         return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
2991 }
2992
2993 static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
2994                               const struct iovec *iov, loff_t offset,
2995                               unsigned long nr_segs)
2996 {
2997         struct file *file = iocb->ki_filp;
2998         struct inode *inode = file->f_mapping->host;
2999         ssize_t ret;
3000
3001         /*
3002          * If we are doing data journalling we don't support O_DIRECT
3003          */
3004         if (ext4_should_journal_data(inode))
3005                 return 0;
3006
3007         trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
3008         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3009                 ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
3010         else
3011                 ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3012         trace_ext4_direct_IO_exit(inode, offset,
3013                                 iov_length(iov, nr_segs), rw, ret);
3014         return ret;
3015 }
3016
3017 /*
3018  * Pages can be marked dirty completely asynchronously from ext4's journalling
3019  * activity.  By filemap_sync_pte(), try_to_unmap_one(), etc.  We cannot do
3020  * much here because ->set_page_dirty is called under VFS locks.  The page is
3021  * not necessarily locked.
3022  *
3023  * We cannot just dirty the page and leave attached buffers clean, because the
3024  * buffers' dirty state is "definitive".  We cannot just set the buffers dirty
3025  * or jbddirty because all the journalling code will explode.
3026  *
3027  * So what we do is to mark the page "pending dirty" and next time writepage
3028  * is called, propagate that into the buffers appropriately.
3029  */
3030 static int ext4_journalled_set_page_dirty(struct page *page)
3031 {
3032         SetPageChecked(page);
3033         return __set_page_dirty_nobuffers(page);
3034 }
3035
3036 static const struct address_space_operations ext4_ordered_aops = {
3037         .readpage               = ext4_readpage,
3038         .readpages              = ext4_readpages,
3039         .writepage              = ext4_writepage,
3040         .write_begin            = ext4_write_begin,
3041         .write_end              = ext4_ordered_write_end,
3042         .bmap                   = ext4_bmap,
3043         .invalidatepage         = ext4_invalidatepage,
3044         .releasepage            = ext4_releasepage,
3045         .direct_IO              = ext4_direct_IO,
3046         .migratepage            = buffer_migrate_page,
3047         .is_partially_uptodate  = block_is_partially_uptodate,
3048         .error_remove_page      = generic_error_remove_page,
3049 };
3050
3051 static const struct address_space_operations ext4_writeback_aops = {
3052         .readpage               = ext4_readpage,
3053         .readpages              = ext4_readpages,
3054         .writepage              = ext4_writepage,
3055         .write_begin            = ext4_write_begin,
3056         .write_end              = ext4_writeback_write_end,
3057         .bmap                   = ext4_bmap,
3058         .invalidatepage         = ext4_invalidatepage,
3059         .releasepage            = ext4_releasepage,
3060         .direct_IO              = ext4_direct_IO,
3061         .migratepage            = buffer_migrate_page,
3062         .is_partially_uptodate  = block_is_partially_uptodate,
3063         .error_remove_page      = generic_error_remove_page,
3064 };
3065
3066 static const struct address_space_operations ext4_journalled_aops = {
3067         .readpage               = ext4_readpage,
3068         .readpages              = ext4_readpages,
3069         .writepage              = ext4_writepage,
3070         .write_begin            = ext4_write_begin,
3071         .write_end              = ext4_journalled_write_end,
3072         .set_page_dirty         = ext4_journalled_set_page_dirty,
3073         .bmap                   = ext4_bmap,
3074         .invalidatepage         = ext4_invalidatepage,
3075         .releasepage            = ext4_releasepage,
3076         .direct_IO              = ext4_direct_IO,
3077         .is_partially_uptodate  = block_is_partially_uptodate,
3078         .error_remove_page      = generic_error_remove_page,
3079 };
3080
3081 static const struct address_space_operations ext4_da_aops = {
3082         .readpage               = ext4_readpage,
3083         .readpages              = ext4_readpages,
3084         .writepage              = ext4_writepage,
3085         .writepages             = ext4_da_writepages,
3086         .write_begin            = ext4_da_write_begin,
3087         .write_end              = ext4_da_write_end,
3088         .bmap                   = ext4_bmap,
3089         .invalidatepage         = ext4_da_invalidatepage,
3090         .releasepage            = ext4_releasepage,
3091         .direct_IO              = ext4_direct_IO,
3092         .migratepage            = buffer_migrate_page,
3093         .is_partially_uptodate  = block_is_partially_uptodate,
3094         .error_remove_page      = generic_error_remove_page,
3095 };
3096
3097 void ext4_set_aops(struct inode *inode)
3098 {
3099         if (ext4_should_order_data(inode) &&
3100                 test_opt(inode->i_sb, DELALLOC))
3101                 inode->i_mapping->a_ops = &ext4_da_aops;
3102         else if (ext4_should_order_data(inode))
3103                 inode->i_mapping->a_ops = &ext4_ordered_aops;
3104         else if (ext4_should_writeback_data(inode) &&
3105                  test_opt(inode->i_sb, DELALLOC))
3106                 inode->i_mapping->a_ops = &ext4_da_aops;
3107         else if (ext4_should_writeback_data(inode))
3108                 inode->i_mapping->a_ops = &ext4_writeback_aops;
3109         else
3110                 inode->i_mapping->a_ops = &ext4_journalled_aops;
3111 }
3112
3113
3114 /*
3115  * ext4_discard_partial_page_buffers()
3116  * Wrapper function for ext4_discard_partial_page_buffers_no_lock.
3117  * This function finds and locks the page containing the offset
3118  * "from" and passes it to ext4_discard_partial_page_buffers_no_lock.
3119  * Calling functions that already have the page locked should call
3120  * ext4_discard_partial_page_buffers_no_lock directly.
3121  */
3122 int ext4_discard_partial_page_buffers(handle_t *handle,
3123                 struct address_space *mapping, loff_t from,
3124                 loff_t length, int flags)
3125 {
3126         struct inode *inode = mapping->host;
3127         struct page *page;
3128         int err = 0;
3129
3130         page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
3131                                    mapping_gfp_mask(mapping) & ~__GFP_FS);
3132         if (!page)
3133                 return -EINVAL;
3134
3135         err = ext4_discard_partial_page_buffers_no_lock(handle, inode, page,
3136                 from, length, flags);
3137
3138         unlock_page(page);
3139         page_cache_release(page);
3140         return err;
3141 }
3142
3143 /*
3144  * ext4_discard_partial_page_buffers_no_lock()
3145  * Zeros a page range of length 'length' starting from offset 'from'.
3146  * Buffer heads that correspond to the block aligned regions of the
3147  * zeroed range will be unmapped.  Unblock aligned regions
3148  * will have the corresponding buffer head mapped if needed so that
3149  * that region of the page can be updated with the partial zero out.
3150  *
3151  * This function assumes that the page has already been  locked.  The
3152  * The range to be discarded must be contained with in the given page.
3153  * If the specified range exceeds the end of the page it will be shortened
3154  * to the end of the page that corresponds to 'from'.  This function is
3155  * appropriate for updating a page and it buffer heads to be unmapped and
3156  * zeroed for blocks that have been either released, or are going to be
3157  * released.
3158  *
3159  * handle: The journal handle
3160  * inode:  The files inode
3161  * page:   A locked page that contains the offset "from"
3162  * from:   The starting byte offset (from the begining of the file)
3163  *         to begin discarding
3164  * len:    The length of bytes to discard
3165  * flags:  Optional flags that may be used:
3166  *
3167  *         EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED
3168  *         Only zero the regions of the page whose buffer heads
3169  *         have already been unmapped.  This flag is appropriate
3170  *         for updateing the contents of a page whose blocks may
3171  *         have already been released, and we only want to zero
3172  *         out the regions that correspond to those released blocks.
3173  *
3174  * Returns zero on sucess or negative on failure.
3175  */
3176 int ext4_discard_partial_page_buffers_no_lock(handle_t *handle,
3177                 struct inode *inode, struct page *page, loff_t from,
3178                 loff_t length, int flags)
3179 {
3180         ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
3181         unsigned int offset = from & (PAGE_CACHE_SIZE-1);
3182         unsigned int blocksize, max, pos;
3183         unsigned int end_of_block, range_to_discard;
3184         ext4_lblk_t iblock;
3185         struct buffer_head *bh;
3186         int err = 0;
3187
3188         blocksize = inode->i_sb->s_blocksize;
3189         max = PAGE_CACHE_SIZE - offset;
3190
3191         if (index != page->index)
3192                 return -EINVAL;
3193
3194         /*
3195          * correct length if it does not fall between
3196          * 'from' and the end of the page
3197          */
3198         if (length > max || length < 0)
3199                 length = max;
3200
3201         iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
3202
3203         if (!page_has_buffers(page)) {
3204                 /*
3205                  * If the range to be discarded covers a partial block
3206                  * we need to get the page buffers.  This is because
3207                  * partial blocks cannot be released and the page needs
3208                  * to be updated with the contents of the block before
3209                  * we write the zeros on top of it.
3210                  */
3211                 if (!(from & (blocksize - 1)) ||
3212                     !((from + length) & (blocksize - 1))) {
3213                         create_empty_buffers(page, blocksize, 0);
3214                 } else {
3215                         /*
3216                          * If there are no partial blocks,
3217                          * there is nothing to update,
3218                          * so we can return now
3219                          */
3220                         return 0;
3221                 }
3222         }
3223
3224         /* Find the buffer that contains "offset" */
3225         bh = page_buffers(page);
3226         pos = blocksize;
3227         while (offset >= pos) {
3228                 bh = bh->b_this_page;
3229                 iblock++;
3230                 pos += blocksize;
3231         }
3232
3233         pos = offset;
3234         while (pos < offset + length) {
3235                 err = 0;
3236
3237                 /* The length of space left to zero and unmap */
3238                 range_to_discard = offset + length - pos;
3239
3240                 /* The length of space until the end of the block */
3241                 end_of_block = blocksize - (pos & (blocksize-1));
3242
3243                 /*
3244                  * Do not unmap or zero past end of block
3245                  * for this buffer head
3246                  */
3247                 if (range_to_discard > end_of_block)
3248                         range_to_discard = end_of_block;
3249
3250
3251                 /*
3252                  * Skip this buffer head if we are only zeroing unampped
3253                  * regions of the page
3254                  */
3255                 if (flags & EXT4_DISCARD_PARTIAL_PG_ZERO_UNMAPPED &&
3256                         buffer_mapped(bh))
3257                                 goto next;
3258
3259                 /* If the range is block aligned, unmap */
3260                 if (range_to_discard == blocksize) {
3261                         clear_buffer_dirty(bh);
3262                         bh->b_bdev = NULL;
3263                         clear_buffer_mapped(bh);
3264                         clear_buffer_req(bh);
3265                         clear_buffer_new(bh);
3266                         clear_buffer_delay(bh);
3267                         clear_buffer_unwritten(bh);
3268                         clear_buffer_uptodate(bh);
3269                         zero_user(page, pos, range_to_discard);
3270                         BUFFER_TRACE(bh, "Buffer discarded");
3271                         goto next;
3272                 }
3273
3274                 /*
3275                  * If this block is not completely contained in the range
3276                  * to be discarded, then it is not going to be released. Because
3277                  * we need to keep this block, we need to make sure this part
3278                  * of the page is uptodate before we modify it by writeing
3279                  * partial zeros on it.
3280                  */
3281                 if (!buffer_mapped(bh)) {
3282                         /*
3283                          * Buffer head must be mapped before we can read
3284                          * from the block
3285                          */
3286                         BUFFER_TRACE(bh, "unmapped");
3287                         ext4_get_block(inode, iblock, bh, 0);
3288                         /* unmapped? It's a hole - nothing to do */
3289                         if (!buffer_mapped(bh)) {
3290                                 BUFFER_TRACE(bh, "still unmapped");
3291                                 goto next;
3292                         }
3293                 }
3294
3295                 /* Ok, it's mapped. Make sure it's up-to-date */
3296                 if (PageUptodate(page))
3297                         set_buffer_uptodate(bh);
3298
3299                 if (!buffer_uptodate(bh)) {
3300                         err = -EIO;
3301                         ll_rw_block(READ, 1, &bh);
3302                         wait_on_buffer(bh);
3303                         /* Uhhuh. Read error. Complain and punt.*/
3304                         if (!buffer_uptodate(bh))
3305                                 goto next;
3306                 }
3307
3308                 if (ext4_should_journal_data(inode)) {
3309                         BUFFER_TRACE(bh, "get write access");
3310                         err = ext4_journal_get_write_access(handle, bh);
3311                         if (err)
3312                                 goto next;
3313                 }
3314
3315                 zero_user(page, pos, range_to_discard);
3316
3317                 err = 0;
3318                 if (ext4_should_journal_data(inode)) {
3319                         err = ext4_handle_dirty_metadata(handle, inode, bh);
3320                 } else
3321                         mark_buffer_dirty(bh);
3322
3323                 BUFFER_TRACE(bh, "Partial buffer zeroed");
3324 next:
3325                 bh = bh->b_this_page;
3326                 iblock++;
3327                 pos += range_to_discard;
3328         }
3329
3330         return err;
3331 }
3332
3333 /*
3334  * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3335  * up to the end of the block which corresponds to `from'.
3336  * This required during truncate. We need to physically zero the tail end
3337  * of that block so it doesn't yield old data if the file is later grown.
3338  */
3339 int ext4_block_truncate_page(handle_t *handle,
3340                 struct address_space *mapping, loff_t from)
3341 {
3342         unsigned offset = from & (PAGE_CACHE_SIZE-1);
3343         unsigned length;
3344         unsigned blocksize;
3345         struct inode *inode = mapping->host;
3346
3347         blocksize = inode->i_sb->s_blocksize;
3348         length = blocksize - (offset & (blocksize - 1));
3349
3350         return ext4_block_zero_page_range(handle, mapping, from, length);
3351 }
3352
3353 /*
3354  * ext4_block_zero_page_range() zeros out a mapping of length 'length'
3355  * starting from file offset 'from'.  The range to be zero'd must
3356  * be contained with in one block.  If the specified range exceeds
3357  * the end of the block it will be shortened to end of the block
3358  * that cooresponds to 'from'
3359  */
3360 int ext4_block_zero_page_range(handle_t *handle,
3361                 struct address_space *mapping, loff_t from, loff_t length)
3362 {
3363         ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
3364         unsigned offset = from & (PAGE_CACHE_SIZE-1);
3365         unsigned blocksize, max, pos;
3366         ext4_lblk_t iblock;
3367         struct inode *inode = mapping->host;
3368         struct buffer_head *bh;
3369         struct page *page;
3370         int err = 0;
3371
3372         page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
3373                                    mapping_gfp_mask(mapping) & ~__GFP_FS);
3374         if (!page)
3375                 return -EINVAL;
3376
3377         blocksize = inode->i_sb->s_blocksize;
3378         max = blocksize - (offset & (blocksize - 1));
3379
3380         /*
3381          * correct length if it does not fall between
3382          * 'from' and the end of the block
3383          */
3384         if (length > max || length < 0)
3385                 length = max;
3386
3387         iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
3388
3389         if (!page_has_buffers(page))
3390                 create_empty_buffers(page, blocksize, 0);
3391
3392         /* Find the buffer that contains "offset" */
3393         bh = page_buffers(page);
3394         pos = blocksize;
3395         while (offset >= pos) {
3396                 bh = bh->b_this_page;
3397                 iblock++;
3398                 pos += blocksize;
3399         }
3400
3401         err = 0;
3402         if (buffer_freed(bh)) {
3403                 BUFFER_TRACE(bh, "freed: skip");
3404                 goto unlock;
3405         }
3406
3407         if (!buffer_mapped(bh)) {
3408                 BUFFER_TRACE(bh, "unmapped");
3409                 ext4_get_block(inode, iblock, bh, 0);
3410                 /* unmapped? It's a hole - nothing to do */
3411                 if (!buffer_mapped(bh)) {
3412                         BUFFER_TRACE(bh, "still unmapped");
3413                         goto unlock;
3414                 }
3415         }
3416
3417         /* Ok, it's mapped. Make sure it's up-to-date */
3418         if (PageUptodate(page))
3419                 set_buffer_uptodate(bh);
3420
3421         if (!buffer_uptodate(bh)) {
3422                 err = -EIO;
3423                 ll_rw_block(READ, 1, &bh);
3424                 wait_on_buffer(bh);
3425                 /* Uhhuh. Read error. Complain and punt. */
3426                 if (!buffer_uptodate(bh))
3427                         goto unlock;
3428         }
3429
3430         if (ext4_should_journal_data(inode)) {
3431                 BUFFER_TRACE(bh, "get write access");
3432                 err = ext4_journal_get_write_access(handle, bh);
3433                 if (err)
3434                         goto unlock;
3435         }
3436
3437         zero_user(page, offset, length);
3438
3439         BUFFER_TRACE(bh, "zeroed end of block");
3440
3441         err = 0;
3442         if (ext4_should_journal_data(inode)) {
3443                 err = ext4_handle_dirty_metadata(handle, inode, bh);
3444         } else
3445                 mark_buffer_dirty(bh);
3446
3447 unlock:
3448         unlock_page(page);
3449         page_cache_release(page);
3450         return err;
3451 }
3452
3453 int ext4_can_truncate(struct inode *inode)
3454 {
3455         if (S_ISREG(inode->i_mode))
3456                 return 1;
3457         if (S_ISDIR(inode->i_mode))
3458                 return 1;
3459         if (S_ISLNK(inode->i_mode))
3460                 return !ext4_inode_is_fast_symlink(inode);
3461         return 0;
3462 }
3463
3464 /*
3465  * ext4_punch_hole: punches a hole in a file by releaseing the blocks
3466  * associated with the given offset and length
3467  *
3468  * @inode:  File inode
3469  * @offset: The offset where the hole will begin
3470  * @len:    The length of the hole
3471  *
3472  * Returns: 0 on sucess or negative on failure
3473  */
3474
3475 int ext4_punch_hole(struct file *file, loff_t offset, loff_t length)
3476 {
3477         struct inode *inode = file->f_path.dentry->d_inode;
3478         if (!S_ISREG(inode->i_mode))
3479                 return -ENOTSUPP;
3480
3481         if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
3482                 /* TODO: Add support for non extent hole punching */
3483                 return -ENOTSUPP;
3484         }
3485
3486         if (EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) {
3487                 /* TODO: Add support for bigalloc file systems */
3488                 return -ENOTSUPP;
3489         }
3490
3491         return ext4_ext_punch_hole(file, offset, length);
3492 }
3493
3494 /*
3495  * ext4_truncate()
3496  *
3497  * We block out ext4_get_block() block instantiations across the entire
3498  * transaction, and VFS/VM ensures that ext4_truncate() cannot run
3499  * simultaneously on behalf of the same inode.
3500  *
3501  * As we work through the truncate and commmit bits of it to the journal there
3502  * is one core, guiding principle: the file's tree must always be consistent on
3503  * disk.  We must be able to restart the truncate after a crash.
3504  *
3505  * The file's tree may be transiently inconsistent in memory (although it
3506  * probably isn't), but whenever we close off and commit a journal transaction,
3507  * the contents of (the filesystem + the journal) must be consistent and
3508  * restartable.  It's pretty simple, really: bottom up, right to left (although
3509  * left-to-right works OK too).
3510  *
3511  * Note that at recovery time, journal replay occurs *before* the restart of
3512  * truncate against the orphan inode list.
3513  *
3514  * The committed inode has the new, desired i_size (which is the same as
3515  * i_disksize in this case).  After a crash, ext4_orphan_cleanup() will see
3516  * that this inode's truncate did not complete and it will again call
3517  * ext4_truncate() to have another go.  So there will be instantiated blocks
3518  * to the right of the truncation point in a crashed ext4 filesystem.  But
3519  * that's fine - as long as they are linked from the inode, the post-crash
3520  * ext4_truncate() run will find them and release them.
3521  */
3522 void ext4_truncate(struct inode *inode)
3523 {
3524         trace_ext4_truncate_enter(inode);
3525
3526         if (!ext4_can_truncate(inode))
3527                 return;
3528
3529         ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3530
3531         if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
3532                 ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
3533
3534         if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3535                 ext4_ext_truncate(inode);
3536         else
3537                 ext4_ind_truncate(inode);
3538
3539         trace_ext4_truncate_exit(inode);
3540 }
3541
3542 /*
3543  * ext4_get_inode_loc returns with an extra refcount against the inode's
3544  * underlying buffer_head on success. If 'in_mem' is true, we have all
3545  * data in memory that is needed to recreate the on-disk version of this
3546  * inode.
3547  */
3548 static int __ext4_get_inode_loc(struct inode *inode,
3549                                 struct ext4_iloc *iloc, int in_mem)
3550 {
3551         struct ext4_group_desc  *gdp;
3552         struct buffer_head      *bh;
3553         struct super_block      *sb = inode->i_sb;
3554         ext4_fsblk_t            block;
3555         int                     inodes_per_block, inode_offset;
3556
3557         iloc->bh = NULL;
3558         if (!ext4_valid_inum(sb, inode->i_ino))
3559                 return -EIO;
3560
3561         iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
3562         gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
3563         if (!gdp)
3564                 return -EIO;
3565
3566         /*
3567          * Figure out the offset within the block group inode table
3568          */
3569         inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
3570         inode_offset = ((inode->i_ino - 1) %
3571                         EXT4_INODES_PER_GROUP(sb));
3572         block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
3573         iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
3574
3575         bh = sb_getblk(sb, block);
3576         if (!bh) {
3577                 EXT4_ERROR_INODE_BLOCK(inode, block,
3578                                        "unable to read itable block");
3579                 return -EIO;
3580         }
3581         if (!buffer_uptodate(bh)) {
3582                 lock_buffer(bh);
3583
3584                 /*
3585                  * If the buffer has the write error flag, we have failed
3586                  * to write out another inode in the same block.  In this
3587                  * case, we don't have to read the block because we may
3588                  * read the old inode data successfully.
3589                  */
3590                 if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
3591                         set_buffer_uptodate(bh);
3592
3593                 if (buffer_uptodate(bh)) {
3594                         /* someone brought it uptodate while we waited */
3595                         unlock_buffer(bh);
3596                         goto has_buffer;
3597                 }
3598
3599                 /*
3600                  * If we have all information of the inode in memory and this
3601                  * is the only valid inode in the block, we need not read the
3602                  * block.
3603                  */
3604                 if (in_mem) {
3605                         struct buffer_head *bitmap_bh;
3606                         int i, start;
3607
3608                         start = inode_offset & ~(inodes_per_block - 1);
3609
3610                         /* Is the inode bitmap in cache? */
3611                         bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
3612                         if (!bitmap_bh)
3613                                 goto make_io;
3614
3615                         /*
3616                          * If the inode bitmap isn't in cache then the
3617                          * optimisation may end up performing two reads instead
3618                          * of one, so skip it.
3619                          */
3620                         if (!buffer_uptodate(bitmap_bh)) {
3621                                 brelse(bitmap_bh);
3622                                 goto make_io;
3623                         }
3624                         for (i = start; i < start + inodes_per_block; i++) {
3625                                 if (i == inode_offset)
3626                                         continue;
3627                                 if (ext4_test_bit(i, bitmap_bh->b_data))
3628                                         break;
3629                         }
3630                         brelse(bitmap_bh);
3631                         if (i == start + inodes_per_block) {
3632                                 /* all other inodes are free, so skip I/O */
3633                                 memset(bh->b_data, 0, bh->b_size);
3634                                 set_buffer_uptodate(bh);
3635                                 unlock_buffer(bh);
3636                                 goto has_buffer;
3637                         }
3638                 }
3639
3640 make_io:
3641                 /*
3642                  * If we need to do any I/O, try to pre-readahead extra
3643                  * blocks from the inode table.
3644                  */
3645                 if (EXT4_SB(sb)->s_inode_readahead_blks) {
3646                         ext4_fsblk_t b, end, table;
3647                         unsigned num;
3648
3649                         table = ext4_inode_table(sb, gdp);
3650                         /* s_inode_readahead_blks is always a power of 2 */
3651                         b = block & ~(EXT4_SB(sb)->s_inode_readahead_blks-1);
3652                         if (table > b)
3653                                 b = table;
3654                         end = b + EXT4_SB(sb)->s_inode_readahead_blks;
3655                         num = EXT4_INODES_PER_GROUP(sb);
3656                         if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3657                                        EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
3658                                 num -= ext4_itable_unused_count(sb, gdp);
3659                         table += num / inodes_per_block;
3660                         if (end > table)
3661                                 end = table;
3662                         while (b <= end)
3663                                 sb_breadahead(sb, b++);
3664                 }
3665
3666                 /*
3667                  * There are other valid inodes in the buffer, this inode
3668                  * has in-inode xattrs, or we don't have this inode in memory.
3669                  * Read the block from disk.
3670                  */
3671                 trace_ext4_load_inode(inode);
3672                 get_bh(bh);
3673                 bh->b_end_io = end_buffer_read_sync;
3674                 submit_bh(READ_META, bh);
3675                 wait_on_buffer(bh);
3676                 if (!buffer_uptodate(bh)) {
3677                         EXT4_ERROR_INODE_BLOCK(inode, block,
3678                                                "unable to read itable block");
3679                         brelse(bh);
3680                         return -EIO;
3681                 }
3682         }
3683 has_buffer:
3684         iloc->bh = bh;
3685         return 0;
3686 }
3687
3688 int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
3689 {
3690         /* We have all inode data except xattrs in memory here. */
3691         return __ext4_get_inode_loc(inode, iloc,
3692                 !ext4_test_inode_state(inode, EXT4_STATE_XATTR));
3693 }
3694
3695 void ext4_set_inode_flags(struct inode *inode)
3696 {
3697         unsigned int flags = EXT4_I(inode)->i_flags;
3698
3699         inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
3700         if (flags & EXT4_SYNC_FL)
3701                 inode->i_flags |= S_SYNC;
3702         if (flags & EXT4_APPEND_FL)
3703                 inode->i_flags |= S_APPEND;
3704         if (flags & EXT4_IMMUTABLE_FL)
3705                 inode->i_flags |= S_IMMUTABLE;
3706         if (flags & EXT4_NOATIME_FL)
3707                 inode->i_flags |= S_NOATIME;
3708         if (flags & EXT4_DIRSYNC_FL)
3709                 inode->i_flags |= S_DIRSYNC;
3710 }
3711
3712 /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
3713 void ext4_get_inode_flags(struct ext4_inode_info *ei)
3714 {
3715         unsigned int vfs_fl;
3716         unsigned long old_fl, new_fl;
3717
3718         do {
3719                 vfs_fl = ei->vfs_inode.i_flags;
3720                 old_fl = ei->i_flags;
3721                 new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
3722                                 EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
3723                                 EXT4_DIRSYNC_FL);
3724                 if (vfs_fl & S_SYNC)
3725                         new_fl |= EXT4_SYNC_FL;
3726                 if (vfs_fl & S_APPEND)
3727                         new_fl |= EXT4_APPEND_FL;
3728                 if (vfs_fl & S_IMMUTABLE)
3729                         new_fl |= EXT4_IMMUTABLE_FL;
3730                 if (vfs_fl & S_NOATIME)
3731                         new_fl |= EXT4_NOATIME_FL;
3732                 if (vfs_fl & S_DIRSYNC)
3733                         new_fl |= EXT4_DIRSYNC_FL;
3734         } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
3735 }
3736
3737 static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
3738                                   struct ext4_inode_info *ei)
3739 {
3740         blkcnt_t i_blocks ;
3741         struct inode *inode = &(ei->vfs_inode);
3742         struct super_block *sb = inode->i_sb;
3743
3744         if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3745                                 EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
3746                 /* we are using combined 48 bit field */
3747                 i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
3748                                         le32_to_cpu(raw_inode->i_blocks_lo);
3749                 if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
3750                         /* i_blocks represent file system block size */
3751                         return i_blocks  << (inode->i_blkbits - 9);
3752                 } else {
3753                         return i_blocks;
3754                 }
3755         } else {
3756                 return le32_to_cpu(raw_inode->i_blocks_lo);
3757         }
3758 }
3759
3760 struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
3761 {
3762         struct ext4_iloc iloc;
3763         struct ext4_inode *raw_inode;
3764         struct ext4_inode_info *ei;
3765         struct inode *inode;
3766         journal_t *journal = EXT4_SB(sb)->s_journal;
3767         long ret;
3768         int block;
3769
3770         inode = iget_locked(sb, ino);
3771         if (!inode)
3772                 return ERR_PTR(-ENOMEM);
3773         if (!(inode->i_state & I_NEW))
3774                 return inode;
3775
3776         ei = EXT4_I(inode);
3777         iloc.bh = NULL;
3778
3779         ret = __ext4_get_inode_loc(inode, &iloc, 0);
3780         if (ret < 0)
3781                 goto bad_inode;
3782         raw_inode = ext4_raw_inode(&iloc);
3783         inode->i_mode = le16_to_cpu(raw_inode->i_mode);
3784         inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
3785         inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
3786         if (!(test_opt(inode->i_sb, NO_UID32))) {
3787                 inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
3788                 inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
3789         }
3790         inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
3791
3792         ext4_clear_state_flags(ei);     /* Only relevant on 32-bit archs */
3793         ei->i_dir_start_lookup = 0;
3794         ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
3795         /* We now have enough fields to check if the inode was active or not.
3796          * This is needed because nfsd might try to access dead inodes
3797          * the test is that same one that e2fsck uses
3798          * NeilBrown 1999oct15
3799          */
3800         if (inode->i_nlink == 0) {
3801                 if (inode->i_mode == 0 ||
3802                     !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) {
3803                         /* this inode is deleted */
3804                         ret = -ESTALE;
3805                         goto bad_inode;
3806                 }
3807                 /* The only unlinked inodes we let through here have
3808                  * valid i_mode and are being read by the orphan
3809                  * recovery code: that's fine, we're about to complete
3810                  * the process of deleting those. */
3811         }
3812         ei->i_flags = le32_to_cpu(raw_inode->i_flags);
3813         inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
3814         ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
3815         if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
3816                 ei->i_file_acl |=
3817                         ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
3818         inode->i_size = ext4_isize(raw_inode);
3819         ei->i_disksize = inode->i_size;
3820 #ifdef CONFIG_QUOTA
3821         ei->i_reserved_quota = 0;
3822 #endif
3823         inode->i_generation = le32_to_cpu(raw_inode->i_generation);
3824         ei->i_block_group = iloc.block_group;
3825         ei->i_last_alloc_group = ~0;
3826         /*
3827          * NOTE! The in-memory inode i_data array is in little-endian order
3828          * even on big-endian machines: we do NOT byteswap the block numbers!
3829          */
3830         for (block = 0; block < EXT4_N_BLOCKS; block++)
3831                 ei->i_data[block] = raw_inode->i_block[block];
3832         INIT_LIST_HEAD(&ei->i_orphan);
3833
3834         /*
3835          * Set transaction id's of transactions that have to be committed
3836          * to finish f[data]sync. We set them to currently running transaction
3837          * as we cannot be sure that the inode or some of its metadata isn't
3838          * part of the transaction - the inode could have been reclaimed and
3839          * now it is reread from disk.
3840          */
3841         if (journal) {
3842                 transaction_t *transaction;
3843                 tid_t tid;
3844
3845                 read_lock(&journal->j_state_lock);
3846                 if (journal->j_running_transaction)
3847                         transaction = journal->j_running_transaction;
3848                 else
3849                         transaction = journal->j_committing_transaction;
3850                 if (transaction)
3851                         tid = transaction->t_tid;
3852                 else
3853                         tid = journal->j_commit_sequence;
3854                 read_unlock(&journal->j_state_lock);
3855                 ei->i_sync_tid = tid;
3856                 ei->i_datasync_tid = tid;
3857         }
3858
3859         if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3860                 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
3861                 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
3862                     EXT4_INODE_SIZE(inode->i_sb)) {
3863                         ret = -EIO;
3864                         goto bad_inode;
3865                 }
3866                 if (ei->i_extra_isize == 0) {
3867                         /* The extra space is currently unused. Use it. */
3868                         ei->i_extra_isize = sizeof(struct ext4_inode) -
3869                                             EXT4_GOOD_OLD_INODE_SIZE;
3870                 } else {
3871                         __le32 *magic = (void *)raw_inode +
3872                                         EXT4_GOOD_OLD_INODE_SIZE +
3873                                         ei->i_extra_isize;
3874                         if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
3875                                 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
3876                 }
3877         } else
3878                 ei->i_extra_isize = 0;
3879
3880         EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
3881         EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
3882         EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
3883         EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
3884
3885         inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
3886         if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3887                 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
3888                         inode->i_version |=
3889                         (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
3890         }
3891
3892         ret = 0;
3893         if (ei->i_file_acl &&
3894             !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
3895                 EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
3896                                  ei->i_file_acl);
3897                 ret = -EIO;
3898                 goto bad_inode;
3899         } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
3900                 if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
3901                     (S_ISLNK(inode->i_mode) &&
3902                      !ext4_inode_is_fast_symlink(inode)))
3903                         /* Validate extent which is part of inode */
3904                         ret = ext4_ext_check_inode(inode);
3905         } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
3906                    (S_ISLNK(inode->i_mode) &&
3907                     !ext4_inode_is_fast_symlink(inode))) {
3908                 /* Validate block references which are part of inode */
3909                 ret = ext4_ind_check_inode(inode);
3910         }
3911         if (ret)
3912                 goto bad_inode;
3913
3914         if (S_ISREG(inode->i_mode)) {
3915                 inode->i_op = &ext4_file_inode_operations;
3916                 inode->i_fop = &ext4_file_operations;
3917                 ext4_set_aops(inode);
3918         } else if (S_ISDIR(inode->i_mode)) {
3919                 inode->i_op = &ext4_dir_inode_operations;
3920                 inode->i_fop = &ext4_dir_operations;
3921         } else if (S_ISLNK(inode->i_mode)) {
3922                 if (ext4_inode_is_fast_symlink(inode)) {
3923                         inode->i_op = &ext4_fast_symlink_inode_operations;
3924                         nd_terminate_link(ei->i_data, inode->i_size,
3925                                 sizeof(ei->i_data) - 1);
3926                 } else {
3927                         inode->i_op = &ext4_symlink_inode_operations;
3928                         ext4_set_aops(inode);
3929                 }
3930         } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
3931               S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
3932                 inode->i_op = &ext4_special_inode_operations;
3933                 if (raw_inode->i_block[0])
3934                         init_special_inode(inode, inode->i_mode,
3935                            old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
3936                 else
3937                         init_special_inode(inode, inode->i_mode,
3938                            new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
3939         } else {
3940                 ret = -EIO;
3941                 EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
3942                 goto bad_inode;
3943         }
3944         brelse(iloc.bh);
3945         ext4_set_inode_flags(inode);
3946         unlock_new_inode(inode);
3947         return inode;
3948
3949 bad_inode:
3950         brelse(iloc.bh);
3951         iget_failed(inode);
3952         return ERR_PTR(ret);
3953 }
3954
3955 static int ext4_inode_blocks_set(handle_t *handle,
3956                                 struct ext4_inode *raw_inode,
3957                                 struct ext4_inode_info *ei)
3958 {
3959         struct inode *inode = &(ei->vfs_inode);
3960         u64 i_blocks = inode->i_blocks;
3961         struct super_block *sb = inode->i_sb;
3962
3963         if (i_blocks <= ~0U) {
3964                 /*
3965                  * i_blocks can be represnted in a 32 bit variable
3966                  * as multiple of 512 bytes
3967                  */
3968                 raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
3969                 raw_inode->i_blocks_high = 0;
3970                 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
3971                 return 0;
3972         }
3973         if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
3974                 return -EFBIG;
3975
3976         if (i_blocks <= 0xffffffffffffULL) {
3977                 /*
3978                  * i_blocks can be represented in a 48 bit variable
3979                  * as multiple of 512 bytes
3980                  */
3981                 raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
3982                 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
3983                 ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
3984         } else {
3985                 ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
3986                 /* i_block is stored in file system block size */
3987                 i_blocks = i_blocks >> (inode->i_blkbits - 9);
3988                 raw_inode->i_blocks_lo   = cpu_to_le32(i_blocks);
3989                 raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
3990         }
3991         return 0;
3992 }
3993
3994 /*
3995  * Post the struct inode info into an on-disk inode location in the
3996  * buffer-cache.  This gobbles the caller's reference to the
3997  * buffer_head in the inode location struct.
3998  *
3999  * The caller must have write access to iloc->bh.
4000  */
4001 static int ext4_do_update_inode(handle_t *handle,
4002                                 struct inode *inode,
4003                                 struct ext4_iloc *iloc)
4004 {
4005         struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
4006         struct ext4_inode_info *ei = EXT4_I(inode);
4007         struct buffer_head *bh = iloc->bh;
4008         int err = 0, rc, block;
4009
4010         /* For fields not not tracking in the in-memory inode,
4011          * initialise them to zero for new inodes. */
4012         if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
4013                 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
4014
4015         ext4_get_inode_flags(ei);
4016         raw_inode->i_mode = cpu_to_le16(inode->i_mode);
4017         if (!(test_opt(inode->i_sb, NO_UID32))) {
4018                 raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
4019                 raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
4020 /*
4021  * Fix up interoperability with old kernels. Otherwise, old inodes get
4022  * re-used with the upper 16 bits of the uid/gid intact
4023  */
4024                 if (!ei->i_dtime) {
4025                         raw_inode->i_uid_high =
4026                                 cpu_to_le16(high_16_bits(inode->i_uid));
4027                         raw_inode->i_gid_high =
4028                                 cpu_to_le16(high_16_bits(inode->i_gid));
4029                 } else {
4030                         raw_inode->i_uid_high = 0;
4031                         raw_inode->i_gid_high = 0;
4032                 }
4033         } else {
4034                 raw_inode->i_uid_low =
4035                         cpu_to_le16(fs_high2lowuid(inode->i_uid));
4036                 raw_inode->i_gid_low =
4037                         cpu_to_le16(fs_high2lowgid(inode->i_gid));
4038                 raw_inode->i_uid_high = 0;
4039                 raw_inode->i_gid_high = 0;
4040         }
4041         raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4042
4043         EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4044         EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4045         EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4046         EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4047
4048         if (ext4_inode_blocks_set(handle, raw_inode, ei))
4049                 goto out_brelse;
4050         raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4051         raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
4052         if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
4053             cpu_to_le32(EXT4_OS_HURD))
4054                 raw_inode->i_file_acl_high =
4055                         cpu_to_le16(ei->i_file_acl >> 32);
4056         raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4057         ext4_isize_set(raw_inode, ei->i_disksize);
4058         if (ei->i_disksize > 0x7fffffffULL) {
4059                 struct super_block *sb = inode->i_sb;
4060                 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
4061                                 EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
4062                                 EXT4_SB(sb)->s_es->s_rev_level ==
4063                                 cpu_to_le32(EXT4_GOOD_OLD_REV)) {
4064                         /* If this is the first large file
4065                          * created, add a flag to the superblock.
4066                          */
4067                         err = ext4_journal_get_write_access(handle,
4068                                         EXT4_SB(sb)->s_sbh);
4069                         if (err)
4070                                 goto out_brelse;
4071                         ext4_update_dynamic_rev(sb);
4072                         EXT4_SET_RO_COMPAT_FEATURE(sb,
4073                                         EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
4074                         sb->s_dirt = 1;
4075                         ext4_handle_sync(handle);
4076                         err = ext4_handle_dirty_metadata(handle, NULL,
4077                                         EXT4_SB(sb)->s_sbh);
4078                 }
4079         }
4080         raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4081         if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4082                 if (old_valid_dev(inode->i_rdev)) {
4083                         raw_inode->i_block[0] =
4084                                 cpu_to_le32(old_encode_dev(inode->i_rdev));
4085                         raw_inode->i_block[1] = 0;
4086                 } else {
4087                         raw_inode->i_block[0] = 0;
4088                         raw_inode->i_block[1] =
4089                                 cpu_to_le32(new_encode_dev(inode->i_rdev));
4090                         raw_inode->i_block[2] = 0;
4091                 }
4092         } else
4093                 for (block = 0; block < EXT4_N_BLOCKS; block++)
4094                         raw_inode->i_block[block] = ei->i_data[block];
4095
4096         raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
4097         if (ei->i_extra_isize) {
4098                 if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4099                         raw_inode->i_version_hi =
4100                         cpu_to_le32(inode->i_version >> 32);
4101                 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
4102         }
4103
4104         BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4105         rc = ext4_handle_dirty_metadata(handle, NULL, bh);
4106         if (!err)
4107                 err = rc;
4108         ext4_clear_inode_state(inode, EXT4_STATE_NEW);
4109
4110         ext4_update_inode_fsync_trans(handle, inode, 0);
4111 out_brelse:
4112         brelse(bh);
4113         ext4_std_error(inode->i_sb, err);
4114         return err;
4115 }
4116
4117 /*
4118  * ext4_write_inode()
4119  *
4120  * We are called from a few places:
4121  *
4122  * - Within generic_file_write() for O_SYNC files.
4123  *   Here, there will be no transaction running. We wait for any running
4124  *   trasnaction to commit.
4125  *
4126  * - Within sys_sync(), kupdate and such.
4127  *   We wait on commit, if tol to.
4128  *
4129  * - Within prune_icache() (PF_MEMALLOC == true)
4130  *   Here we simply return.  We can't afford to block kswapd on the
4131  *   journal commit.
4132  *
4133  * In all cases it is actually safe for us to return without doing anything,
4134  * because the inode has been copied into a raw inode buffer in
4135  * ext4_mark_inode_dirty().  This is a correctness thing for O_SYNC and for
4136  * knfsd.
4137  *
4138  * Note that we are absolutely dependent upon all inode dirtiers doing the
4139  * right thing: they *must* call mark_inode_dirty() after dirtying info in
4140  * which we are interested.
4141  *
4142  * It would be a bug for them to not do this.  The code:
4143  *
4144  *      mark_inode_dirty(inode)
4145  *      stuff();
4146  *      inode->i_size = expr;
4147  *
4148  * is in error because a kswapd-driven write_inode() could occur while
4149  * `stuff()' is running, and the new i_size will be lost.  Plus the inode
4150  * will no longer be on the superblock's dirty inode list.
4151  */
4152 int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
4153 {
4154         int err;
4155
4156         if (current->flags & PF_MEMALLOC)
4157                 return 0;
4158
4159         if (EXT4_SB(inode->i_sb)->s_journal) {
4160                 if (ext4_journal_current_handle()) {
4161                         jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
4162                         dump_stack();
4163                         return -EIO;
4164                 }
4165
4166                 if (wbc->sync_mode != WB_SYNC_ALL)
4167                         return 0;
4168
4169                 err = ext4_force_commit(inode->i_sb);
4170         } else {
4171                 struct ext4_iloc iloc;
4172
4173                 err = __ext4_get_inode_loc(inode, &iloc, 0);
4174                 if (err)
4175                         return err;
4176                 if (wbc->sync_mode == WB_SYNC_ALL)
4177                         sync_dirty_buffer(iloc.bh);
4178                 if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
4179                         EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
4180                                          "IO error syncing inode");
4181                         err = -EIO;
4182                 }
4183                 brelse(iloc.bh);
4184         }
4185         return err;
4186 }
4187
4188 /*
4189  * ext4_setattr()
4190  *
4191  * Called from notify_change.
4192  *
4193  * We want to trap VFS attempts to truncate the file as soon as
4194  * possible.  In particular, we want to make sure that when the VFS
4195  * shrinks i_size, we put the inode on the orphan list and modify
4196  * i_disksize immediately, so that during the subsequent flushing of
4197  * dirty pages and freeing of disk blocks, we can guarantee that any
4198  * commit will leave the blocks being flushed in an unused state on
4199  * disk.  (On recovery, the inode will get truncated and the blocks will
4200  * be freed, so we have a strong guarantee that no future commit will
4201  * leave these blocks visible to the user.)
4202  *
4203  * Another thing we have to assure is that if we are in ordered mode
4204  * and inode is still attached to the committing transaction, we must
4205  * we start writeout of all the dirty pages which are being truncated.
4206  * This way we are sure that all the data written in the previous
4207  * transaction are already on disk (truncate waits for pages under
4208  * writeback).
4209  *
4210  * Called with inode->i_mutex down.
4211  */
4212 int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4213 {
4214         struct inode *inode = dentry->d_inode;
4215         int error, rc = 0;
4216         int orphan = 0;
4217         const unsigned int ia_valid = attr->ia_valid;
4218
4219         error = inode_change_ok(inode, attr);
4220         if (error)
4221                 return error;
4222
4223         if (is_quota_modification(inode, attr))
4224                 dquot_initialize(inode);
4225         if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
4226                 (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
4227                 handle_t *handle;
4228
4229                 /* (user+group)*(old+new) structure, inode write (sb,
4230                  * inode block, ? - but truncate inode update has it) */
4231                 handle = ext4_journal_start(inode, (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb)+
4232                                         EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb))+3);
4233                 if (IS_ERR(handle)) {
4234                         error = PTR_ERR(handle);
4235                         goto err_out;
4236                 }
4237                 error = dquot_transfer(inode, attr);
4238                 if (error) {
4239                         ext4_journal_stop(handle);
4240                         return error;
4241                 }
4242                 /* Update corresponding info in inode so that everything is in
4243                  * one transaction */
4244                 if (attr->ia_valid & ATTR_UID)
4245                         inode->i_uid = attr->ia_uid;
4246                 if (attr->ia_valid & ATTR_GID)
4247                         inode->i_gid = attr->ia_gid;
4248                 error = ext4_mark_inode_dirty(handle, inode);
4249                 ext4_journal_stop(handle);
4250         }
4251
4252         if (attr->ia_valid & ATTR_SIZE) {
4253                 inode_dio_wait(inode);
4254
4255                 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4256                         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4257
4258                         if (attr->ia_size > sbi->s_bitmap_maxbytes)
4259                                 return -EFBIG;
4260                 }
4261         }
4262
4263         if (S_ISREG(inode->i_mode) &&
4264             attr->ia_valid & ATTR_SIZE &&
4265             (attr->ia_size < inode->i_size)) {
4266                 handle_t *handle;
4267
4268                 handle = ext4_journal_start(inode, 3);
4269                 if (IS_ERR(handle)) {
4270                         error = PTR_ERR(handle);
4271                         goto err_out;
4272                 }
4273                 if (ext4_handle_valid(handle)) {
4274                         error = ext4_orphan_add(handle, inode);
4275                         orphan = 1;
4276                 }
4277                 EXT4_I(inode)->i_disksize = attr->ia_size;
4278                 rc = ext4_mark_inode_dirty(handle, inode);
4279                 if (!error)
4280                         error = rc;
4281                 ext4_journal_stop(handle);
4282
4283                 if (ext4_should_order_data(inode)) {
4284                         error = ext4_begin_ordered_truncate(inode,
4285                                                             attr->ia_size);
4286                         if (error) {
4287                                 /* Do as much error cleanup as possible */
4288                                 handle = ext4_journal_start(inode, 3);
4289                                 if (IS_ERR(handle)) {
4290                                         ext4_orphan_del(NULL, inode);
4291                                         goto err_out;
4292                                 }
4293                                 ext4_orphan_del(handle, inode);
4294                                 orphan = 0;
4295                                 ext4_journal_stop(handle);
4296                                 goto err_out;
4297                         }
4298                 }
4299         }
4300
4301         if (attr->ia_valid & ATTR_SIZE) {
4302                 if (attr->ia_size != i_size_read(inode)) {
4303                         truncate_setsize(inode, attr->ia_size);
4304                         ext4_truncate(inode);
4305                 } else if (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))
4306                         ext4_truncate(inode);
4307         }
4308
4309         if (!rc) {
4310                 setattr_copy(inode, attr);
4311                 mark_inode_dirty(inode);
4312         }
4313
4314         /*
4315          * If the call to ext4_truncate failed to get a transaction handle at
4316          * all, we need to clean up the in-core orphan list manually.
4317          */
4318         if (orphan && inode->i_nlink)
4319                 ext4_orphan_del(NULL, inode);
4320
4321         if (!rc && (ia_valid & ATTR_MODE))
4322                 rc = ext4_acl_chmod(inode);
4323
4324 err_out:
4325         ext4_std_error(inode->i_sb, error);
4326         if (!error)
4327                 error = rc;
4328         return error;
4329 }
4330
4331 int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
4332                  struct kstat *stat)
4333 {
4334         struct inode *inode;
4335         unsigned long delalloc_blocks;
4336
4337         inode = dentry->d_inode;
4338         generic_fillattr(inode, stat);
4339
4340         /*
4341          * We can't update i_blocks if the block allocation is delayed
4342          * otherwise in the case of system crash before the real block
4343          * allocation is done, we will have i_blocks inconsistent with
4344          * on-disk file blocks.
4345          * We always keep i_blocks updated together with real
4346          * allocation. But to not confuse with user, stat
4347          * will return the blocks that include the delayed allocation
4348          * blocks for this file.
4349          */
4350         delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
4351
4352         stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
4353         return 0;
4354 }
4355
4356 static int ext4_index_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4357 {
4358         if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4359                 return ext4_ind_trans_blocks(inode, nrblocks, chunk);
4360         return ext4_ext_index_trans_blocks(inode, nrblocks, chunk);
4361 }
4362
4363 /*
4364  * Account for index blocks, block groups bitmaps and block group
4365  * descriptor blocks if modify datablocks and index blocks
4366  * worse case, the indexs blocks spread over different block groups
4367  *
4368  * If datablocks are discontiguous, they are possible to spread over
4369  * different block groups too. If they are contiuguous, with flexbg,
4370  * they could still across block group boundary.
4371  *
4372  * Also account for superblock, inode, quota and xattr blocks
4373  */
4374 static int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4375 {
4376         ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
4377         int gdpblocks;
4378         int idxblocks;
4379         int ret = 0;
4380
4381         /*
4382          * How many index blocks need to touch to modify nrblocks?
4383          * The "Chunk" flag indicating whether the nrblocks is
4384          * physically contiguous on disk
4385          *
4386          * For Direct IO and fallocate, they calls get_block to allocate
4387          * one single extent at a time, so they could set the "Chunk" flag
4388          */
4389         idxblocks = ext4_index_trans_blocks(inode, nrblocks, chunk);
4390
4391         ret = idxblocks;
4392
4393         /*
4394          * Now let's see how many group bitmaps and group descriptors need
4395          * to account
4396          */
4397         groups = idxblocks;
4398         if (chunk)
4399                 groups += 1;
4400         else
4401                 groups += nrblocks;
4402
4403         gdpblocks = groups;
4404         if (groups > ngroups)
4405                 groups = ngroups;
4406         if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
4407                 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
4408
4409         /* bitmaps and block group descriptor blocks */
4410         ret += groups + gdpblocks;
4411
4412         /* Blocks for super block, inode, quota and xattr blocks */
4413         ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
4414
4415         return ret;
4416 }
4417
4418 /*
4419  * Calculate the total number of credits to reserve to fit
4420  * the modification of a single pages into a single transaction,
4421  * which may include multiple chunks of block allocations.
4422  *
4423  * This could be called via ext4_write_begin()
4424  *
4425  * We need to consider the worse case, when
4426  * one new block per extent.
4427  */
4428 int ext4_writepage_trans_blocks(struct inode *inode)
4429 {
4430         int bpp = ext4_journal_blocks_per_page(inode);
4431         int ret;
4432
4433         ret = ext4_meta_trans_blocks(inode, bpp, 0);
4434
4435         /* Account for data blocks for journalled mode */
4436         if (ext4_should_journal_data(inode))
4437                 ret += bpp;
4438         return ret;
4439 }
4440
4441 /*
4442  * Calculate the journal credits for a chunk of data modification.
4443  *
4444  * This is called from DIO, fallocate or whoever calling
4445  * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
4446  *
4447  * journal buffers for data blocks are not included here, as DIO
4448  * and fallocate do no need to journal data buffers.
4449  */
4450 int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
4451 {
4452         return ext4_meta_trans_blocks(inode, nrblocks, 1);
4453 }
4454
4455 /*
4456  * The caller must have previously called ext4_reserve_inode_write().
4457  * Give this, we know that the caller already has write access to iloc->bh.
4458  */
4459 int ext4_mark_iloc_dirty(handle_t *handle,
4460                          struct inode *inode, struct ext4_iloc *iloc)
4461 {
4462         int err = 0;
4463
4464         if (test_opt(inode->i_sb, I_VERSION))
4465                 inode_inc_iversion(inode);
4466
4467         /* the do_update_inode consumes one bh->b_count */
4468         get_bh(iloc->bh);
4469
4470         /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
4471         err = ext4_do_update_inode(handle, inode, iloc);
4472         put_bh(iloc->bh);
4473         return err;
4474 }
4475
4476 /*
4477  * On success, We end up with an outstanding reference count against
4478  * iloc->bh.  This _must_ be cleaned up later.
4479  */
4480
4481 int
4482 ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
4483                          struct ext4_iloc *iloc)
4484 {
4485         int err;
4486
4487         err = ext4_get_inode_loc(inode, iloc);
4488         if (!err) {
4489                 BUFFER_TRACE(iloc->bh, "get_write_access");
4490                 err = ext4_journal_get_write_access(handle, iloc->bh);
4491                 if (err) {
4492                         brelse(iloc->bh);
4493                         iloc->bh = NULL;
4494                 }
4495         }
4496         ext4_std_error(inode->i_sb, err);
4497         return err;
4498 }
4499
4500 /*
4501  * Expand an inode by new_extra_isize bytes.
4502  * Returns 0 on success or negative error number on failure.
4503  */
4504 static int ext4_expand_extra_isize(struct inode *inode,
4505                                    unsigned int new_extra_isize,
4506                                    struct ext4_iloc iloc,
4507                                    handle_t *handle)
4508 {
4509         struct ext4_inode *raw_inode;
4510         struct ext4_xattr_ibody_header *header;
4511
4512         if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
4513                 return 0;
4514
4515         raw_inode = ext4_raw_inode(&iloc);
4516
4517         header = IHDR(inode, raw_inode);
4518
4519         /* No extended attributes present */
4520         if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
4521             header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
4522                 memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
4523                         new_extra_isize);
4524                 EXT4_I(inode)->i_extra_isize = new_extra_isize;
4525                 return 0;
4526         }
4527
4528         /* try to expand with EAs present */
4529         return ext4_expand_extra_isize_ea(inode, new_extra_isize,
4530                                           raw_inode, handle);
4531 }
4532
4533 /*
4534  * What we do here is to mark the in-core inode as clean with respect to inode
4535  * dirtiness (it may still be data-dirty).
4536  * This means that the in-core inode may be reaped by prune_icache
4537  * without having to perform any I/O.  This is a very good thing,
4538  * because *any* task may call prune_icache - even ones which
4539  * have a transaction open against a different journal.
4540  *
4541  * Is this cheating?  Not really.  Sure, we haven't written the
4542  * inode out, but prune_icache isn't a user-visible syncing function.
4543  * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
4544  * we start and wait on commits.
4545  *
4546  * Is this efficient/effective?  Well, we're being nice to the system
4547  * by cleaning up our inodes proactively so they can be reaped
4548  * without I/O.  But we are potentially leaving up to five seconds'
4549  * worth of inodes floating about which prune_icache wants us to
4550  * write out.  One way to fix that would be to get prune_icache()
4551  * to do a write_super() to free up some memory.  It has the desired
4552  * effect.
4553  */
4554 int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4555 {
4556         struct ext4_iloc iloc;
4557         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4558         static unsigned int mnt_count;
4559         int err, ret;
4560
4561         might_sleep();
4562         trace_ext4_mark_inode_dirty(inode, _RET_IP_);
4563         err = ext4_reserve_inode_write(handle, inode, &iloc);
4564         if (ext4_handle_valid(handle) &&
4565             EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
4566             !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
4567                 /*
4568                  * We need extra buffer credits since we may write into EA block
4569                  * with this same handle. If journal_extend fails, then it will
4570                  * only result in a minor loss of functionality for that inode.
4571                  * If this is felt to be critical, then e2fsck should be run to
4572                  * force a large enough s_min_extra_isize.
4573                  */
4574                 if ((jbd2_journal_extend(handle,
4575                              EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
4576                         ret = ext4_expand_extra_isize(inode,
4577                                                       sbi->s_want_extra_isize,
4578                                                       iloc, handle);
4579                         if (ret) {
4580                                 ext4_set_inode_state(inode,
4581                                                      EXT4_STATE_NO_EXPAND);
4582                                 if (mnt_count !=
4583                                         le16_to_cpu(sbi->s_es->s_mnt_count)) {
4584                                         ext4_warning(inode->i_sb,
4585                                         "Unable to expand inode %lu. Delete"
4586                                         " some EAs or run e2fsck.",
4587                                         inode->i_ino);
4588                                         mnt_count =
4589                                           le16_to_cpu(sbi->s_es->s_mnt_count);
4590                                 }
4591                         }
4592                 }
4593         }
4594         if (!err)
4595                 err = ext4_mark_iloc_dirty(handle, inode, &iloc);
4596         return err;
4597 }
4598
4599 /*
4600  * ext4_dirty_inode() is called from __mark_inode_dirty()
4601  *
4602  * We're really interested in the case where a file is being extended.
4603  * i_size has been changed by generic_commit_write() and we thus need
4604  * to include the updated inode in the current transaction.
4605  *
4606  * Also, dquot_alloc_block() will always dirty the inode when blocks
4607  * are allocated to the file.
4608  *
4609  * If the inode is marked synchronous, we don't honour that here - doing
4610  * so would cause a commit on atime updates, which we don't bother doing.
4611  * We handle synchronous inodes at the highest possible level.
4612  */
4613 void ext4_dirty_inode(struct inode *inode, int flags)
4614 {
4615         handle_t *handle;
4616
4617         handle = ext4_journal_start(inode, 2);
4618         if (IS_ERR(handle))
4619                 goto out;
4620
4621         ext4_mark_inode_dirty(handle, inode);
4622
4623         ext4_journal_stop(handle);
4624 out:
4625         return;
4626 }
4627
4628 #if 0
4629 /*
4630  * Bind an inode's backing buffer_head into this transaction, to prevent
4631  * it from being flushed to disk early.  Unlike
4632  * ext4_reserve_inode_write, this leaves behind no bh reference and
4633  * returns no iloc structure, so the caller needs to repeat the iloc
4634  * lookup to mark the inode dirty later.
4635  */
4636 static int ext4_pin_inode(handle_t *handle, struct inode *inode)
4637 {
4638         struct ext4_iloc iloc;
4639
4640         int err = 0;
4641         if (handle) {
4642                 err = ext4_get_inode_loc(inode, &iloc);
4643                 if (!err) {
4644                         BUFFER_TRACE(iloc.bh, "get_write_access");
4645                         err = jbd2_journal_get_write_access(handle, iloc.bh);
4646                         if (!err)
4647                                 err = ext4_handle_dirty_metadata(handle,
4648                                                                  NULL,
4649                                                                  iloc.bh);
4650                         brelse(iloc.bh);
4651                 }
4652         }
4653         ext4_std_error(inode->i_sb, err);
4654         return err;
4655 }
4656 #endif
4657
4658 int ext4_change_inode_journal_flag(struct inode *inode, int val)
4659 {
4660         journal_t *journal;
4661         handle_t *handle;
4662         int err;
4663
4664         /*
4665          * We have to be very careful here: changing a data block's
4666          * journaling status dynamically is dangerous.  If we write a
4667          * data block to the journal, change the status and then delete
4668          * that block, we risk forgetting to revoke the old log record
4669          * from the journal and so a subsequent replay can corrupt data.
4670          * So, first we make sure that the journal is empty and that
4671          * nobody is changing anything.
4672          */
4673
4674         journal = EXT4_JOURNAL(inode);
4675         if (!journal)
4676                 return 0;
4677         if (is_journal_aborted(journal))
4678                 return -EROFS;
4679
4680         jbd2_journal_lock_updates(journal);
4681         jbd2_journal_flush(journal);
4682
4683         /*
4684          * OK, there are no updates running now, and all cached data is
4685          * synced to disk.  We are now in a completely consistent state
4686          * which doesn't have anything in the journal, and we know that
4687          * no filesystem updates are running, so it is safe to modify
4688          * the inode's in-core data-journaling state flag now.
4689          */
4690
4691         if (val)
4692                 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
4693         else
4694                 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
4695         ext4_set_aops(inode);
4696
4697         jbd2_journal_unlock_updates(journal);
4698
4699         /* Finally we can mark the inode as dirty. */
4700
4701         handle = ext4_journal_start(inode, 1);
4702         if (IS_ERR(handle))
4703                 return PTR_ERR(handle);
4704
4705         err = ext4_mark_inode_dirty(handle, inode);
4706         ext4_handle_sync(handle);
4707         ext4_journal_stop(handle);
4708         ext4_std_error(inode->i_sb, err);
4709
4710         return err;
4711 }
4712
4713 static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
4714 {
4715         return !buffer_mapped(bh);
4716 }
4717
4718 int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
4719 {
4720         struct page *page = vmf->page;
4721         loff_t size;
4722         unsigned long len;
4723         int ret;
4724         struct file *file = vma->vm_file;
4725         struct inode *inode = file->f_path.dentry->d_inode;
4726         struct address_space *mapping = inode->i_mapping;
4727         handle_t *handle;
4728         get_block_t *get_block;
4729         int retries = 0;
4730
4731         /*
4732          * This check is racy but catches the common case. We rely on
4733          * __block_page_mkwrite() to do a reliable check.
4734          */
4735         vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
4736         /* Delalloc case is easy... */
4737         if (test_opt(inode->i_sb, DELALLOC) &&
4738             !ext4_should_journal_data(inode) &&
4739             !ext4_nonda_switch(inode->i_sb)) {
4740                 do {
4741                         ret = __block_page_mkwrite(vma, vmf,
4742                                                    ext4_da_get_block_prep);
4743                 } while (ret == -ENOSPC &&
4744                        ext4_should_retry_alloc(inode->i_sb, &retries));
4745                 goto out_ret;
4746         }
4747
4748         lock_page(page);
4749         size = i_size_read(inode);
4750         /* Page got truncated from under us? */
4751         if (page->mapping != mapping || page_offset(page) > size) {
4752                 unlock_page(page);
4753                 ret = VM_FAULT_NOPAGE;
4754                 goto out;
4755         }
4756
4757         if (page->index == size >> PAGE_CACHE_SHIFT)
4758                 len = size & ~PAGE_CACHE_MASK;
4759         else
4760                 len = PAGE_CACHE_SIZE;
4761         /*
4762          * Return if we have all the buffers mapped. This avoids the need to do
4763          * journal_start/journal_stop which can block and take a long time
4764          */
4765         if (page_has_buffers(page)) {
4766                 if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL,
4767                                         ext4_bh_unmapped)) {
4768                         /* Wait so that we don't change page under IO */
4769                         wait_on_page_writeback(page);
4770                         ret = VM_FAULT_LOCKED;
4771                         goto out;
4772                 }
4773         }
4774         unlock_page(page);
4775         /* OK, we need to fill the hole... */
4776         if (ext4_should_dioread_nolock(inode))
4777                 get_block = ext4_get_block_write;
4778         else
4779                 get_block = ext4_get_block;
4780 retry_alloc:
4781         handle = ext4_journal_start(inode, ext4_writepage_trans_blocks(inode));
4782         if (IS_ERR(handle)) {
4783                 ret = VM_FAULT_SIGBUS;
4784                 goto out;
4785         }
4786         ret = __block_page_mkwrite(vma, vmf, get_block);
4787         if (!ret && ext4_should_journal_data(inode)) {
4788                 if (walk_page_buffers(handle, page_buffers(page), 0,
4789                           PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
4790                         unlock_page(page);
4791                         ret = VM_FAULT_SIGBUS;
4792                         goto out;
4793                 }
4794                 ext4_set_inode_state(inode, EXT4_STATE_JDATA);
4795         }
4796         ext4_journal_stop(handle);
4797         if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
4798                 goto retry_alloc;
4799 out_ret:
4800         ret = block_page_mkwrite_return(ret);
4801 out:
4802         return ret;
4803 }