gfs2: Clean up gfs2_unstuff_dinode
[linux-2.6-microblaze.git] / fs / gfs2 / file.c
index 493a83e..84ec053 100644 (file)
@@ -210,7 +210,7 @@ void gfs2_set_inode_flags(struct inode *inode)
 
 /**
  * do_gfs2_set_flags - set flags on an inode
- * @filp: file pointer
+ * @inode: The inode
  * @reqflags: The flags to set
  * @mask: Indicates which flags are valid
  * @fsflags: The FS_* inode flags passed in
@@ -427,22 +427,25 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
        struct gfs2_alloc_parms ap = { .aflags = 0, };
        u64 offset = page_offset(page);
        unsigned int data_blocks, ind_blocks, rblocks;
+       vm_fault_t ret = VM_FAULT_LOCKED;
        struct gfs2_holder gh;
        unsigned int length;
        loff_t size;
-       int ret;
+       int err;
 
        sb_start_pagefault(inode->i_sb);
 
        gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
-       ret = gfs2_glock_nq(&gh);
-       if (ret)
+       err = gfs2_glock_nq(&gh);
+       if (err) {
+               ret = block_page_mkwrite_return(err);
                goto out_uninit;
+       }
 
        /* Check page index against inode size */
        size = i_size_read(inode);
        if (offset >= size) {
-               ret = -EINVAL;
+               ret = VM_FAULT_SIGBUS;
                goto out_unlock;
        }
 
@@ -450,8 +453,8 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
        file_update_time(vmf->vma->vm_file);
 
        /* page is wholly or partially inside EOF */
-       if (offset > size - PAGE_SIZE)
-               length = offset_in_page(size);
+       if (size - offset < PAGE_SIZE)
+               length = size - offset;
        else
                length = PAGE_SIZE;
 
@@ -469,24 +472,30 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
            !gfs2_write_alloc_required(ip, offset, length)) {
                lock_page(page);
                if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
-                       ret = -EAGAIN;
+                       ret = VM_FAULT_NOPAGE;
                        unlock_page(page);
                }
                goto out_unlock;
        }
 
-       ret = gfs2_rindex_update(sdp);
-       if (ret)
+       err = gfs2_rindex_update(sdp);
+       if (err) {
+               ret = block_page_mkwrite_return(err);
                goto out_unlock;
+       }
 
        gfs2_write_calc_reserv(ip, length, &data_blocks, &ind_blocks);
        ap.target = data_blocks + ind_blocks;
-       ret = gfs2_quota_lock_check(ip, &ap);
-       if (ret)
+       err = gfs2_quota_lock_check(ip, &ap);
+       if (err) {
+               ret = block_page_mkwrite_return(err);
                goto out_unlock;
-       ret = gfs2_inplace_reserve(ip, &ap);
-       if (ret)
+       }
+       err = gfs2_inplace_reserve(ip, &ap);
+       if (err) {
+               ret = block_page_mkwrite_return(err);
                goto out_quota_unlock;
+       }
 
        rblocks = RES_DINODE + ind_blocks;
        if (gfs2_is_jdata(ip))
@@ -495,28 +504,38 @@ static vm_fault_t gfs2_page_mkwrite(struct vm_fault *vmf)
                rblocks += RES_STATFS + RES_QUOTA;
                rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
        }
-       ret = gfs2_trans_begin(sdp, rblocks, 0);
-       if (ret)
+       err = gfs2_trans_begin(sdp, rblocks, 0);
+       if (err) {
+               ret = block_page_mkwrite_return(err);
                goto out_trans_fail;
+       }
+
+       /* Unstuff, if required, and allocate backing blocks for page */
+       if (gfs2_is_stuffed(ip)) {
+               err = gfs2_unstuff_dinode(ip);
+               if (err) {
+                       ret = block_page_mkwrite_return(err);
+                       goto out_trans_end;
+               }
+       }
 
        lock_page(page);
-       ret = -EAGAIN;
        /* If truncated, we must retry the operation, we may have raced
         * with the glock demotion code.
         */
-       if (!PageUptodate(page) || page->mapping != inode->i_mapping)
-               goto out_trans_end;
+       if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
+               ret = VM_FAULT_NOPAGE;
+               goto out_page_locked;
+       }
 
-       /* Unstuff, if required, and allocate backing blocks for page */
-       ret = 0;
-       if (gfs2_is_stuffed(ip))
-               ret = gfs2_unstuff_dinode(ip, page);
-       if (ret == 0)
-               ret = gfs2_allocate_page_backing(page, length);
+       err = gfs2_allocate_page_backing(page, length);
+       if (err)
+               ret = block_page_mkwrite_return(err);
 
-out_trans_end:
-       if (ret)
+out_page_locked:
+       if (ret != VM_FAULT_LOCKED)
                unlock_page(page);
+out_trans_end:
        gfs2_trans_end(sdp);
 out_trans_fail:
        gfs2_inplace_release(ip);
@@ -526,12 +545,12 @@ out_unlock:
        gfs2_glock_dq(&gh);
 out_uninit:
        gfs2_holder_uninit(&gh);
-       if (ret == 0) {
+       if (ret == VM_FAULT_LOCKED) {
                set_page_dirty(page);
                wait_for_stable_page(page);
        }
        sb_end_pagefault(inode->i_sb);
-       return block_page_mkwrite_return(ret);
+       return ret;
 }
 
 static vm_fault_t gfs2_fault(struct vm_fault *vmf)
@@ -962,7 +981,7 @@ static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
        gfs2_trans_add_meta(ip->i_gl, dibh);
 
        if (gfs2_is_stuffed(ip)) {
-               error = gfs2_unstuff_dinode(ip, NULL);
+               error = gfs2_unstuff_dinode(ip);
                if (unlikely(error))
                        goto out;
        }