* Note: assumes we have exclusive access to this mapping either
         *       through inode->i_mutex or some other mechanism.
         */
-       if (page->index == 0)
-               invalidate_inode_pages2_range(inode->i_mapping, PAGE_CACHE_SIZE, -1);
+       if (page->index == 0 && invalidate_inode_pages2_range(inode->i_mapping, PAGE_CACHE_SIZE, -1) < 0) {
+               /* Should never happen */
+               nfs_zap_mapping(inode, inode->i_mapping);
+       }
        unlock_page(page);
        return 0;
  error:
 
                        if (dreq->commit_data != NULL)
                                nfs_commit_free(dreq->commit_data);
                        nfs_direct_free_writedata(dreq);
+                       nfs_zap_mapping(inode, inode->i_mapping);
                        nfs_direct_complete(dreq);
        }
 }
 {
        nfs_end_data_update(inode);
        nfs_direct_free_writedata(dreq);
+       nfs_zap_mapping(inode, inode->i_mapping);
        nfs_direct_complete(dreq);
 }
 #endif
 
        retval = nfs_direct_write(iocb, (unsigned long) buf, count, pos);
 
-       /*
-        * XXX: nfs_end_data_update() already ensures this file's
-        *      cached data is subsequently invalidated.  Do we really
-        *      need to call invalidate_inode_pages2() again here?
-        *
-        *      For aio writes, this invalidation will almost certainly
-        *      occur before the writes complete.  Kind of racey.
-        */
-       if (mapping->nrpages)
-               invalidate_inode_pages2(mapping);
-
        if (retval > 0)
                iocb->ki_pos = pos + retval;
 
 
        spin_unlock(&inode->i_lock);
 }
 
+void nfs_zap_mapping(struct inode *inode, struct address_space *mapping)
+{
+       if (mapping->nrpages != 0) {
+               spin_lock(&inode->i_lock);
+               NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA;
+               spin_unlock(&inode->i_lock);
+       }
+}
+
 static void nfs_zap_acl_cache(struct inode *inode)
 {
        void (*clear_acl_cache)(struct inode *);
        if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
                        || nfs_attribute_timeout(inode))
                ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
+       if (ret < 0)
+               goto out;
 
        if (nfsi->cache_validity & NFS_INO_INVALID_DATA) {
-               nfs_inc_stats(inode, NFSIOS_DATAINVALIDATE);
-               if (S_ISREG(inode->i_mode))
-                       nfs_sync_mapping(mapping);
-               invalidate_inode_pages2(mapping);
-
+               if (mapping->nrpages != 0) {
+                       if (S_ISREG(inode->i_mode)) {
+                               ret = nfs_sync_mapping(mapping);
+                               if (ret < 0)
+                                       goto out;
+                       }
+                       ret = invalidate_inode_pages2(mapping);
+                       if (ret < 0)
+                               goto out;
+               }
                spin_lock(&inode->i_lock);
                nfsi->cache_validity &= ~NFS_INO_INVALID_DATA;
                if (S_ISDIR(inode->i_mode)) {
                }
                spin_unlock(&inode->i_lock);
 
+               nfs_inc_stats(inode, NFSIOS_DATAINVALIDATE);
                dfprintk(PAGECACHE, "NFS: (%s/%Ld) data cache invalidated\n",
                                inode->i_sb->s_id,
                                (long long)NFS_FILEID(inode));
        }
+out:
        return ret;
 }
 
 
  * linux/fs/nfs/inode.c
  */
 extern int nfs_sync_mapping(struct address_space *mapping);
+extern void nfs_zap_mapping(struct inode *inode, struct address_space *mapping);
 extern void nfs_zap_caches(struct inode *);
 extern struct inode *nfs_fhget(struct super_block *, struct nfs_fh *,
                                struct nfs_fattr *);