xfs: move xfs_get_extsz_hint() and kill xfs_rw.h
[linux-2.6-microblaze.git] / fs / xfs / xfs_vnodeops.c
1 /*
2  * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3  * All Rights Reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it would be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write the Free Software Foundation,
16  * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17  */
18
19 #include "xfs.h"
20 #include "xfs_fs.h"
21 #include "xfs_types.h"
22 #include "xfs_bit.h"
23 #include "xfs_log.h"
24 #include "xfs_trans.h"
25 #include "xfs_sb.h"
26 #include "xfs_ag.h"
27 #include "xfs_dir2.h"
28 #include "xfs_mount.h"
29 #include "xfs_da_btree.h"
30 #include "xfs_bmap_btree.h"
31 #include "xfs_ialloc_btree.h"
32 #include "xfs_dinode.h"
33 #include "xfs_inode.h"
34 #include "xfs_inode_item.h"
35 #include "xfs_itable.h"
36 #include "xfs_ialloc.h"
37 #include "xfs_alloc.h"
38 #include "xfs_bmap.h"
39 #include "xfs_acl.h"
40 #include "xfs_attr.h"
41 #include "xfs_error.h"
42 #include "xfs_quota.h"
43 #include "xfs_utils.h"
44 #include "xfs_rtalloc.h"
45 #include "xfs_trans_space.h"
46 #include "xfs_log_priv.h"
47 #include "xfs_filestream.h"
48 #include "xfs_vnodeops.h"
49 #include "xfs_trace.h"
50
51 /*
52  * The maximum pathlen is 1024 bytes. Since the minimum file system
53  * blocksize is 512 bytes, we can get a max of 2 extents back from
54  * bmapi.
55  */
56 #define SYMLINK_MAPS 2
57
58 STATIC int
59 xfs_readlink_bmap(
60         xfs_inode_t     *ip,
61         char            *link)
62 {
63         xfs_mount_t     *mp = ip->i_mount;
64         int             pathlen = ip->i_d.di_size;
65         int             nmaps = SYMLINK_MAPS;
66         xfs_bmbt_irec_t mval[SYMLINK_MAPS];
67         xfs_daddr_t     d;
68         int             byte_cnt;
69         int             n;
70         xfs_buf_t       *bp;
71         int             error = 0;
72
73         error = xfs_bmapi_read(ip, 0, XFS_B_TO_FSB(mp, pathlen), mval, &nmaps,
74                                0);
75         if (error)
76                 goto out;
77
78         for (n = 0; n < nmaps; n++) {
79                 d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
80                 byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
81
82                 bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt),
83                                   XBF_MAPPED);
84                 if (!bp)
85                         return XFS_ERROR(ENOMEM);
86                 error = bp->b_error;
87                 if (error) {
88                         xfs_buf_ioerror_alert(bp, __func__);
89                         xfs_buf_relse(bp);
90                         goto out;
91                 }
92                 if (pathlen < byte_cnt)
93                         byte_cnt = pathlen;
94                 pathlen -= byte_cnt;
95
96                 memcpy(link, bp->b_addr, byte_cnt);
97                 xfs_buf_relse(bp);
98         }
99
100         link[ip->i_d.di_size] = '\0';
101         error = 0;
102
103  out:
104         return error;
105 }
106
107 int
108 xfs_readlink(
109         xfs_inode_t     *ip,
110         char            *link)
111 {
112         xfs_mount_t     *mp = ip->i_mount;
113         xfs_fsize_t     pathlen;
114         int             error = 0;
115
116         trace_xfs_readlink(ip);
117
118         if (XFS_FORCED_SHUTDOWN(mp))
119                 return XFS_ERROR(EIO);
120
121         xfs_ilock(ip, XFS_ILOCK_SHARED);
122
123         pathlen = ip->i_d.di_size;
124         if (!pathlen)
125                 goto out;
126
127         if (pathlen < 0 || pathlen > MAXPATHLEN) {
128                 xfs_alert(mp, "%s: inode (%llu) bad symlink length (%lld)",
129                          __func__, (unsigned long long) ip->i_ino,
130                          (long long) pathlen);
131                 ASSERT(0);
132                 error = XFS_ERROR(EFSCORRUPTED);
133                 goto out;
134         }
135
136
137         if (ip->i_df.if_flags & XFS_IFINLINE) {
138                 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
139                 link[pathlen] = '\0';
140         } else {
141                 error = xfs_readlink_bmap(ip, link);
142         }
143
144  out:
145         xfs_iunlock(ip, XFS_ILOCK_SHARED);
146         return error;
147 }
148
149 /*
150  * Flags for xfs_free_eofblocks
151  */
152 #define XFS_FREE_EOF_TRYLOCK    (1<<0)
153
154 /*
155  * This is called by xfs_inactive to free any blocks beyond eof
156  * when the link count isn't zero and by xfs_dm_punch_hole() when
157  * punching a hole to EOF.
158  */
159 STATIC int
160 xfs_free_eofblocks(
161         xfs_mount_t     *mp,
162         xfs_inode_t     *ip,
163         int             flags)
164 {
165         xfs_trans_t     *tp;
166         int             error;
167         xfs_fileoff_t   end_fsb;
168         xfs_fileoff_t   last_fsb;
169         xfs_filblks_t   map_len;
170         int             nimaps;
171         xfs_bmbt_irec_t imap;
172
173         /*
174          * Figure out if there are any blocks beyond the end
175          * of the file.  If not, then there is nothing to do.
176          */
177         end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
178         last_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
179         if (last_fsb <= end_fsb)
180                 return 0;
181         map_len = last_fsb - end_fsb;
182
183         nimaps = 1;
184         xfs_ilock(ip, XFS_ILOCK_SHARED);
185         error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
186         xfs_iunlock(ip, XFS_ILOCK_SHARED);
187
188         if (!error && (nimaps != 0) &&
189             (imap.br_startblock != HOLESTARTBLOCK ||
190              ip->i_delayed_blks)) {
191                 /*
192                  * Attach the dquots to the inode up front.
193                  */
194                 error = xfs_qm_dqattach(ip, 0);
195                 if (error)
196                         return error;
197
198                 /*
199                  * There are blocks after the end of file.
200                  * Free them up now by truncating the file to
201                  * its current size.
202                  */
203                 tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
204
205                 if (flags & XFS_FREE_EOF_TRYLOCK) {
206                         if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
207                                 xfs_trans_cancel(tp, 0);
208                                 return 0;
209                         }
210                 } else {
211                         xfs_ilock(ip, XFS_IOLOCK_EXCL);
212                 }
213
214                 error = xfs_trans_reserve(tp, 0,
215                                           XFS_ITRUNCATE_LOG_RES(mp),
216                                           0, XFS_TRANS_PERM_LOG_RES,
217                                           XFS_ITRUNCATE_LOG_COUNT);
218                 if (error) {
219                         ASSERT(XFS_FORCED_SHUTDOWN(mp));
220                         xfs_trans_cancel(tp, 0);
221                         xfs_iunlock(ip, XFS_IOLOCK_EXCL);
222                         return error;
223                 }
224
225                 xfs_ilock(ip, XFS_ILOCK_EXCL);
226                 xfs_trans_ijoin(tp, ip, 0);
227
228                 /*
229                  * Do not update the on-disk file size.  If we update the
230                  * on-disk file size and then the system crashes before the
231                  * contents of the file are flushed to disk then the files
232                  * may be full of holes (ie NULL files bug).
233                  */
234                 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
235                                               XFS_ISIZE(ip));
236                 if (error) {
237                         /*
238                          * If we get an error at this point we simply don't
239                          * bother truncating the file.
240                          */
241                         xfs_trans_cancel(tp,
242                                          (XFS_TRANS_RELEASE_LOG_RES |
243                                           XFS_TRANS_ABORT));
244                 } else {
245                         error = xfs_trans_commit(tp,
246                                                 XFS_TRANS_RELEASE_LOG_RES);
247                 }
248                 xfs_iunlock(ip, XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL);
249         }
250         return error;
251 }
252
253 /*
254  * Free a symlink that has blocks associated with it.
255  */
256 STATIC int
257 xfs_inactive_symlink_rmt(
258         xfs_inode_t     *ip,
259         xfs_trans_t     **tpp)
260 {
261         xfs_buf_t       *bp;
262         int             committed;
263         int             done;
264         int             error;
265         xfs_fsblock_t   first_block;
266         xfs_bmap_free_t free_list;
267         int             i;
268         xfs_mount_t     *mp;
269         xfs_bmbt_irec_t mval[SYMLINK_MAPS];
270         int             nmaps;
271         xfs_trans_t     *ntp;
272         int             size;
273         xfs_trans_t     *tp;
274
275         tp = *tpp;
276         mp = ip->i_mount;
277         ASSERT(ip->i_d.di_size > XFS_IFORK_DSIZE(ip));
278         /*
279          * We're freeing a symlink that has some
280          * blocks allocated to it.  Free the
281          * blocks here.  We know that we've got
282          * either 1 or 2 extents and that we can
283          * free them all in one bunmapi call.
284          */
285         ASSERT(ip->i_d.di_nextents > 0 && ip->i_d.di_nextents <= 2);
286         if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
287                         XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT))) {
288                 ASSERT(XFS_FORCED_SHUTDOWN(mp));
289                 xfs_trans_cancel(tp, 0);
290                 *tpp = NULL;
291                 return error;
292         }
293         /*
294          * Lock the inode, fix the size, and join it to the transaction.
295          * Hold it so in the normal path, we still have it locked for
296          * the second transaction.  In the error paths we need it
297          * held so the cancel won't rele it, see below.
298          */
299         xfs_ilock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
300         size = (int)ip->i_d.di_size;
301         ip->i_d.di_size = 0;
302         xfs_trans_ijoin(tp, ip, 0);
303         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
304         /*
305          * Find the block(s) so we can inval and unmap them.
306          */
307         done = 0;
308         xfs_bmap_init(&free_list, &first_block);
309         nmaps = ARRAY_SIZE(mval);
310         error = xfs_bmapi_read(ip, 0, XFS_B_TO_FSB(mp, size),
311                                 mval, &nmaps, 0);
312         if (error)
313                 goto error0;
314         /*
315          * Invalidate the block(s).
316          */
317         for (i = 0; i < nmaps; i++) {
318                 bp = xfs_trans_get_buf(tp, mp->m_ddev_targp,
319                         XFS_FSB_TO_DADDR(mp, mval[i].br_startblock),
320                         XFS_FSB_TO_BB(mp, mval[i].br_blockcount), 0);
321                 if (!bp) {
322                         error = ENOMEM;
323                         goto error1;
324                 }
325                 xfs_trans_binval(tp, bp);
326         }
327         /*
328          * Unmap the dead block(s) to the free_list.
329          */
330         if ((error = xfs_bunmapi(tp, ip, 0, size, XFS_BMAPI_METADATA, nmaps,
331                         &first_block, &free_list, &done)))
332                 goto error1;
333         ASSERT(done);
334         /*
335          * Commit the first transaction.  This logs the EFI and the inode.
336          */
337         if ((error = xfs_bmap_finish(&tp, &free_list, &committed)))
338                 goto error1;
339         /*
340          * The transaction must have been committed, since there were
341          * actually extents freed by xfs_bunmapi.  See xfs_bmap_finish.
342          * The new tp has the extent freeing and EFDs.
343          */
344         ASSERT(committed);
345         /*
346          * The first xact was committed, so add the inode to the new one.
347          * Mark it dirty so it will be logged and moved forward in the log as
348          * part of every commit.
349          */
350         xfs_trans_ijoin(tp, ip, 0);
351         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
352         /*
353          * Get a new, empty transaction to return to our caller.
354          */
355         ntp = xfs_trans_dup(tp);
356         /*
357          * Commit the transaction containing extent freeing and EFDs.
358          * If we get an error on the commit here or on the reserve below,
359          * we need to unlock the inode since the new transaction doesn't
360          * have the inode attached.
361          */
362         error = xfs_trans_commit(tp, 0);
363         tp = ntp;
364         if (error) {
365                 ASSERT(XFS_FORCED_SHUTDOWN(mp));
366                 goto error0;
367         }
368         /*
369          * transaction commit worked ok so we can drop the extra ticket
370          * reference that we gained in xfs_trans_dup()
371          */
372         xfs_log_ticket_put(tp->t_ticket);
373
374         /*
375          * Remove the memory for extent descriptions (just bookkeeping).
376          */
377         if (ip->i_df.if_bytes)
378                 xfs_idata_realloc(ip, -ip->i_df.if_bytes, XFS_DATA_FORK);
379         ASSERT(ip->i_df.if_bytes == 0);
380         /*
381          * Put an itruncate log reservation in the new transaction
382          * for our caller.
383          */
384         if ((error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
385                         XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT))) {
386                 ASSERT(XFS_FORCED_SHUTDOWN(mp));
387                 goto error0;
388         }
389         /*
390          * Return with the inode locked but not joined to the transaction.
391          */
392         *tpp = tp;
393         return 0;
394
395  error1:
396         xfs_bmap_cancel(&free_list);
397  error0:
398         /*
399          * Have to come here with the inode locked and either
400          * (held and in the transaction) or (not in the transaction).
401          * If the inode isn't held then cancel would iput it, but
402          * that's wrong since this is inactive and the vnode ref
403          * count is 0 already.
404          * Cancel won't do anything to the inode if held, but it still
405          * needs to be locked until the cancel is done, if it was
406          * joined to the transaction.
407          */
408         xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
409         xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
410         *tpp = NULL;
411         return error;
412
413 }
414
415 STATIC int
416 xfs_inactive_symlink_local(
417         xfs_inode_t     *ip,
418         xfs_trans_t     **tpp)
419 {
420         int             error;
421
422         ASSERT(ip->i_d.di_size <= XFS_IFORK_DSIZE(ip));
423         /*
424          * We're freeing a symlink which fit into
425          * the inode.  Just free the memory used
426          * to hold the old symlink.
427          */
428         error = xfs_trans_reserve(*tpp, 0,
429                                   XFS_ITRUNCATE_LOG_RES(ip->i_mount),
430                                   0, XFS_TRANS_PERM_LOG_RES,
431                                   XFS_ITRUNCATE_LOG_COUNT);
432
433         if (error) {
434                 xfs_trans_cancel(*tpp, 0);
435                 *tpp = NULL;
436                 return error;
437         }
438         xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
439
440         /*
441          * Zero length symlinks _can_ exist.
442          */
443         if (ip->i_df.if_bytes > 0) {
444                 xfs_idata_realloc(ip,
445                                   -(ip->i_df.if_bytes),
446                                   XFS_DATA_FORK);
447                 ASSERT(ip->i_df.if_bytes == 0);
448         }
449         return 0;
450 }
451
452 STATIC int
453 xfs_inactive_attrs(
454         xfs_inode_t     *ip,
455         xfs_trans_t     **tpp)
456 {
457         xfs_trans_t     *tp;
458         int             error;
459         xfs_mount_t     *mp;
460
461         ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
462         tp = *tpp;
463         mp = ip->i_mount;
464         ASSERT(ip->i_d.di_forkoff != 0);
465         error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
466         xfs_iunlock(ip, XFS_ILOCK_EXCL);
467         if (error)
468                 goto error_unlock;
469
470         error = xfs_attr_inactive(ip);
471         if (error)
472                 goto error_unlock;
473
474         tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
475         error = xfs_trans_reserve(tp, 0,
476                                   XFS_IFREE_LOG_RES(mp),
477                                   0, XFS_TRANS_PERM_LOG_RES,
478                                   XFS_INACTIVE_LOG_COUNT);
479         if (error)
480                 goto error_cancel;
481
482         xfs_ilock(ip, XFS_ILOCK_EXCL);
483         xfs_trans_ijoin(tp, ip, 0);
484         xfs_idestroy_fork(ip, XFS_ATTR_FORK);
485
486         ASSERT(ip->i_d.di_anextents == 0);
487
488         *tpp = tp;
489         return 0;
490
491 error_cancel:
492         ASSERT(XFS_FORCED_SHUTDOWN(mp));
493         xfs_trans_cancel(tp, 0);
494 error_unlock:
495         *tpp = NULL;
496         xfs_iunlock(ip, XFS_IOLOCK_EXCL);
497         return error;
498 }
499
500 int
501 xfs_release(
502         xfs_inode_t     *ip)
503 {
504         xfs_mount_t     *mp = ip->i_mount;
505         int             error;
506
507         if (!S_ISREG(ip->i_d.di_mode) || (ip->i_d.di_mode == 0))
508                 return 0;
509
510         /* If this is a read-only mount, don't do this (would generate I/O) */
511         if (mp->m_flags & XFS_MOUNT_RDONLY)
512                 return 0;
513
514         if (!XFS_FORCED_SHUTDOWN(mp)) {
515                 int truncated;
516
517                 /*
518                  * If we are using filestreams, and we have an unlinked
519                  * file that we are processing the last close on, then nothing
520                  * will be able to reopen and write to this file. Purge this
521                  * inode from the filestreams cache so that it doesn't delay
522                  * teardown of the inode.
523                  */
524                 if ((ip->i_d.di_nlink == 0) && xfs_inode_is_filestream(ip))
525                         xfs_filestream_deassociate(ip);
526
527                 /*
528                  * If we previously truncated this file and removed old data
529                  * in the process, we want to initiate "early" writeout on
530                  * the last close.  This is an attempt to combat the notorious
531                  * NULL files problem which is particularly noticeable from a
532                  * truncate down, buffered (re-)write (delalloc), followed by
533                  * a crash.  What we are effectively doing here is
534                  * significantly reducing the time window where we'd otherwise
535                  * be exposed to that problem.
536                  */
537                 truncated = xfs_iflags_test_and_clear(ip, XFS_ITRUNCATED);
538                 if (truncated) {
539                         xfs_iflags_clear(ip, XFS_IDIRTY_RELEASE);
540                         if (VN_DIRTY(VFS_I(ip)) && ip->i_delayed_blks > 0)
541                                 xfs_flush_pages(ip, 0, -1, XBF_ASYNC, FI_NONE);
542                 }
543         }
544
545         if (ip->i_d.di_nlink == 0)
546                 return 0;
547
548         if ((S_ISREG(ip->i_d.di_mode) &&
549              (VFS_I(ip)->i_size > 0 ||
550               (VN_CACHED(VFS_I(ip)) > 0 || ip->i_delayed_blks > 0)) &&
551              (ip->i_df.if_flags & XFS_IFEXTENTS))  &&
552             (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
553
554                 /*
555                  * If we can't get the iolock just skip truncating the blocks
556                  * past EOF because we could deadlock with the mmap_sem
557                  * otherwise.  We'll get another chance to drop them once the
558                  * last reference to the inode is dropped, so we'll never leak
559                  * blocks permanently.
560                  *
561                  * Further, check if the inode is being opened, written and
562                  * closed frequently and we have delayed allocation blocks
563                  * outstanding (e.g. streaming writes from the NFS server),
564                  * truncating the blocks past EOF will cause fragmentation to
565                  * occur.
566                  *
567                  * In this case don't do the truncation, either, but we have to
568                  * be careful how we detect this case. Blocks beyond EOF show
569                  * up as i_delayed_blks even when the inode is clean, so we
570                  * need to truncate them away first before checking for a dirty
571                  * release. Hence on the first dirty close we will still remove
572                  * the speculative allocation, but after that we will leave it
573                  * in place.
574                  */
575                 if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
576                         return 0;
577
578                 error = xfs_free_eofblocks(mp, ip,
579                                            XFS_FREE_EOF_TRYLOCK);
580                 if (error)
581                         return error;
582
583                 /* delalloc blocks after truncation means it really is dirty */
584                 if (ip->i_delayed_blks)
585                         xfs_iflags_set(ip, XFS_IDIRTY_RELEASE);
586         }
587         return 0;
588 }
589
590 /*
591  * xfs_inactive
592  *
593  * This is called when the vnode reference count for the vnode
594  * goes to zero.  If the file has been unlinked, then it must
595  * now be truncated.  Also, we clear all of the read-ahead state
596  * kept for the inode here since the file is now closed.
597  */
598 int
599 xfs_inactive(
600         xfs_inode_t     *ip)
601 {
602         xfs_bmap_free_t free_list;
603         xfs_fsblock_t   first_block;
604         int             committed;
605         xfs_trans_t     *tp;
606         xfs_mount_t     *mp;
607         int             error;
608         int             truncate;
609
610         /*
611          * If the inode is already free, then there can be nothing
612          * to clean up here.
613          */
614         if (ip->i_d.di_mode == 0 || is_bad_inode(VFS_I(ip))) {
615                 ASSERT(ip->i_df.if_real_bytes == 0);
616                 ASSERT(ip->i_df.if_broot_bytes == 0);
617                 return VN_INACTIVE_CACHE;
618         }
619
620         /*
621          * Only do a truncate if it's a regular file with
622          * some actual space in it.  It's OK to look at the
623          * inode's fields without the lock because we're the
624          * only one with a reference to the inode.
625          */
626         truncate = ((ip->i_d.di_nlink == 0) &&
627             ((ip->i_d.di_size != 0) || XFS_ISIZE(ip) != 0 ||
628              (ip->i_d.di_nextents > 0) || (ip->i_delayed_blks > 0)) &&
629             S_ISREG(ip->i_d.di_mode));
630
631         mp = ip->i_mount;
632
633         error = 0;
634
635         /* If this is a read-only mount, don't do this (would generate I/O) */
636         if (mp->m_flags & XFS_MOUNT_RDONLY)
637                 goto out;
638
639         if (ip->i_d.di_nlink != 0) {
640                 if ((S_ISREG(ip->i_d.di_mode) &&
641                     (VFS_I(ip)->i_size > 0 ||
642                      (VN_CACHED(VFS_I(ip)) > 0 || ip->i_delayed_blks > 0)) &&
643                     (ip->i_df.if_flags & XFS_IFEXTENTS) &&
644                     (!(ip->i_d.di_flags &
645                                 (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) ||
646                      ip->i_delayed_blks != 0))) {
647                         error = xfs_free_eofblocks(mp, ip, 0);
648                         if (error)
649                                 return VN_INACTIVE_CACHE;
650                 }
651                 goto out;
652         }
653
654         ASSERT(ip->i_d.di_nlink == 0);
655
656         error = xfs_qm_dqattach(ip, 0);
657         if (error)
658                 return VN_INACTIVE_CACHE;
659
660         tp = xfs_trans_alloc(mp, XFS_TRANS_INACTIVE);
661         if (truncate) {
662                 xfs_ilock(ip, XFS_IOLOCK_EXCL);
663
664                 error = xfs_trans_reserve(tp, 0,
665                                           XFS_ITRUNCATE_LOG_RES(mp),
666                                           0, XFS_TRANS_PERM_LOG_RES,
667                                           XFS_ITRUNCATE_LOG_COUNT);
668                 if (error) {
669                         /* Don't call itruncate_cleanup */
670                         ASSERT(XFS_FORCED_SHUTDOWN(mp));
671                         xfs_trans_cancel(tp, 0);
672                         xfs_iunlock(ip, XFS_IOLOCK_EXCL);
673                         return VN_INACTIVE_CACHE;
674                 }
675
676                 xfs_ilock(ip, XFS_ILOCK_EXCL);
677                 xfs_trans_ijoin(tp, ip, 0);
678
679                 ip->i_d.di_size = 0;
680                 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
681
682                 error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
683                 if (error) {
684                         xfs_trans_cancel(tp,
685                                 XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
686                         xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
687                         return VN_INACTIVE_CACHE;
688                 }
689
690                 ASSERT(ip->i_d.di_nextents == 0);
691         } else if (S_ISLNK(ip->i_d.di_mode)) {
692
693                 /*
694                  * If we get an error while cleaning up a
695                  * symlink we bail out.
696                  */
697                 error = (ip->i_d.di_size > XFS_IFORK_DSIZE(ip)) ?
698                         xfs_inactive_symlink_rmt(ip, &tp) :
699                         xfs_inactive_symlink_local(ip, &tp);
700
701                 if (error) {
702                         ASSERT(tp == NULL);
703                         return VN_INACTIVE_CACHE;
704                 }
705
706                 xfs_trans_ijoin(tp, ip, 0);
707         } else {
708                 error = xfs_trans_reserve(tp, 0,
709                                           XFS_IFREE_LOG_RES(mp),
710                                           0, XFS_TRANS_PERM_LOG_RES,
711                                           XFS_INACTIVE_LOG_COUNT);
712                 if (error) {
713                         ASSERT(XFS_FORCED_SHUTDOWN(mp));
714                         xfs_trans_cancel(tp, 0);
715                         return VN_INACTIVE_CACHE;
716                 }
717
718                 xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
719                 xfs_trans_ijoin(tp, ip, 0);
720         }
721
722         /*
723          * If there are attributes associated with the file
724          * then blow them away now.  The code calls a routine
725          * that recursively deconstructs the attribute fork.
726          * We need to just commit the current transaction
727          * because we can't use it for xfs_attr_inactive().
728          */
729         if (ip->i_d.di_anextents > 0) {
730                 error = xfs_inactive_attrs(ip, &tp);
731                 /*
732                  * If we got an error, the transaction is already
733                  * cancelled, and the inode is unlocked. Just get out.
734                  */
735                  if (error)
736                          return VN_INACTIVE_CACHE;
737         } else if (ip->i_afp) {
738                 xfs_idestroy_fork(ip, XFS_ATTR_FORK);
739         }
740
741         /*
742          * Free the inode.
743          */
744         xfs_bmap_init(&free_list, &first_block);
745         error = xfs_ifree(tp, ip, &free_list);
746         if (error) {
747                 /*
748                  * If we fail to free the inode, shut down.  The cancel
749                  * might do that, we need to make sure.  Otherwise the
750                  * inode might be lost for a long time or forever.
751                  */
752                 if (!XFS_FORCED_SHUTDOWN(mp)) {
753                         xfs_notice(mp, "%s: xfs_ifree returned error %d",
754                                 __func__, error);
755                         xfs_force_shutdown(mp, SHUTDOWN_META_IO_ERROR);
756                 }
757                 xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
758         } else {
759                 /*
760                  * Credit the quota account(s). The inode is gone.
761                  */
762                 xfs_trans_mod_dquot_byino(tp, ip, XFS_TRANS_DQ_ICOUNT, -1);
763
764                 /*
765                  * Just ignore errors at this point.  There is nothing we can
766                  * do except to try to keep going. Make sure it's not a silent
767                  * error.
768                  */
769                 error = xfs_bmap_finish(&tp,  &free_list, &committed);
770                 if (error)
771                         xfs_notice(mp, "%s: xfs_bmap_finish returned error %d",
772                                 __func__, error);
773                 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
774                 if (error)
775                         xfs_notice(mp, "%s: xfs_trans_commit returned error %d",
776                                 __func__, error);
777         }
778
779         /*
780          * Release the dquots held by inode, if any.
781          */
782         xfs_qm_dqdetach(ip);
783         xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
784
785  out:
786         return VN_INACTIVE_CACHE;
787 }
788
789 /*
790  * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
791  * is allowed, otherwise it has to be an exact match. If a CI match is found,
792  * ci_name->name will point to a the actual name (caller must free) or
793  * will be set to NULL if an exact match is found.
794  */
795 int
796 xfs_lookup(
797         xfs_inode_t             *dp,
798         struct xfs_name         *name,
799         xfs_inode_t             **ipp,
800         struct xfs_name         *ci_name)
801 {
802         xfs_ino_t               inum;
803         int                     error;
804         uint                    lock_mode;
805
806         trace_xfs_lookup(dp, name);
807
808         if (XFS_FORCED_SHUTDOWN(dp->i_mount))
809                 return XFS_ERROR(EIO);
810
811         lock_mode = xfs_ilock_map_shared(dp);
812         error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
813         xfs_iunlock_map_shared(dp, lock_mode);
814
815         if (error)
816                 goto out;
817
818         error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp);
819         if (error)
820                 goto out_free_name;
821
822         return 0;
823
824 out_free_name:
825         if (ci_name)
826                 kmem_free(ci_name->name);
827 out:
828         *ipp = NULL;
829         return error;
830 }
831
832 int
833 xfs_create(
834         xfs_inode_t             *dp,
835         struct xfs_name         *name,
836         umode_t                 mode,
837         xfs_dev_t               rdev,
838         xfs_inode_t             **ipp)
839 {
840         int                     is_dir = S_ISDIR(mode);
841         struct xfs_mount        *mp = dp->i_mount;
842         struct xfs_inode        *ip = NULL;
843         struct xfs_trans        *tp = NULL;
844         int                     error;
845         xfs_bmap_free_t         free_list;
846         xfs_fsblock_t           first_block;
847         boolean_t               unlock_dp_on_error = B_FALSE;
848         uint                    cancel_flags;
849         int                     committed;
850         prid_t                  prid;
851         struct xfs_dquot        *udqp = NULL;
852         struct xfs_dquot        *gdqp = NULL;
853         uint                    resblks;
854         uint                    log_res;
855         uint                    log_count;
856
857         trace_xfs_create(dp, name);
858
859         if (XFS_FORCED_SHUTDOWN(mp))
860                 return XFS_ERROR(EIO);
861
862         if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
863                 prid = xfs_get_projid(dp);
864         else
865                 prid = XFS_PROJID_DEFAULT;
866
867         /*
868          * Make sure that we have allocated dquot(s) on disk.
869          */
870         error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
871                         XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
872         if (error)
873                 return error;
874
875         if (is_dir) {
876                 rdev = 0;
877                 resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
878                 log_res = XFS_MKDIR_LOG_RES(mp);
879                 log_count = XFS_MKDIR_LOG_COUNT;
880                 tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR);
881         } else {
882                 resblks = XFS_CREATE_SPACE_RES(mp, name->len);
883                 log_res = XFS_CREATE_LOG_RES(mp);
884                 log_count = XFS_CREATE_LOG_COUNT;
885                 tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE);
886         }
887
888         cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
889
890         /*
891          * Initially assume that the file does not exist and
892          * reserve the resources for that case.  If that is not
893          * the case we'll drop the one we have and get a more
894          * appropriate transaction later.
895          */
896         error = xfs_trans_reserve(tp, resblks, log_res, 0,
897                         XFS_TRANS_PERM_LOG_RES, log_count);
898         if (error == ENOSPC) {
899                 /* flush outstanding delalloc blocks and retry */
900                 xfs_flush_inodes(dp);
901                 error = xfs_trans_reserve(tp, resblks, log_res, 0,
902                                 XFS_TRANS_PERM_LOG_RES, log_count);
903         }
904         if (error == ENOSPC) {
905                 /* No space at all so try a "no-allocation" reservation */
906                 resblks = 0;
907                 error = xfs_trans_reserve(tp, 0, log_res, 0,
908                                 XFS_TRANS_PERM_LOG_RES, log_count);
909         }
910         if (error) {
911                 cancel_flags = 0;
912                 goto out_trans_cancel;
913         }
914
915         xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
916         unlock_dp_on_error = B_TRUE;
917
918         xfs_bmap_init(&free_list, &first_block);
919
920         /*
921          * Reserve disk quota and the inode.
922          */
923         error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, resblks, 1, 0);
924         if (error)
925                 goto out_trans_cancel;
926
927         error = xfs_dir_canenter(tp, dp, name, resblks);
928         if (error)
929                 goto out_trans_cancel;
930
931         /*
932          * A newly created regular or special file just has one directory
933          * entry pointing to them, but a directory also the "." entry
934          * pointing to itself.
935          */
936         error = xfs_dir_ialloc(&tp, dp, mode, is_dir ? 2 : 1, rdev,
937                                prid, resblks > 0, &ip, &committed);
938         if (error) {
939                 if (error == ENOSPC)
940                         goto out_trans_cancel;
941                 goto out_trans_abort;
942         }
943
944         /*
945          * Now we join the directory inode to the transaction.  We do not do it
946          * earlier because xfs_dir_ialloc might commit the previous transaction
947          * (and release all the locks).  An error from here on will result in
948          * the transaction cancel unlocking dp so don't do it explicitly in the
949          * error path.
950          */
951         xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
952         unlock_dp_on_error = B_FALSE;
953
954         error = xfs_dir_createname(tp, dp, name, ip->i_ino,
955                                         &first_block, &free_list, resblks ?
956                                         resblks - XFS_IALLOC_SPACE_RES(mp) : 0);
957         if (error) {
958                 ASSERT(error != ENOSPC);
959                 goto out_trans_abort;
960         }
961         xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
962         xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
963
964         if (is_dir) {
965                 error = xfs_dir_init(tp, ip, dp);
966                 if (error)
967                         goto out_bmap_cancel;
968
969                 error = xfs_bumplink(tp, dp);
970                 if (error)
971                         goto out_bmap_cancel;
972         }
973
974         /*
975          * If this is a synchronous mount, make sure that the
976          * create transaction goes to disk before returning to
977          * the user.
978          */
979         if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
980                 xfs_trans_set_sync(tp);
981
982         /*
983          * Attach the dquot(s) to the inodes and modify them incore.
984          * These ids of the inode couldn't have changed since the new
985          * inode has been locked ever since it was created.
986          */
987         xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp);
988
989         error = xfs_bmap_finish(&tp, &free_list, &committed);
990         if (error)
991                 goto out_bmap_cancel;
992
993         error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
994         if (error)
995                 goto out_release_inode;
996
997         xfs_qm_dqrele(udqp);
998         xfs_qm_dqrele(gdqp);
999
1000         *ipp = ip;
1001         return 0;
1002
1003  out_bmap_cancel:
1004         xfs_bmap_cancel(&free_list);
1005  out_trans_abort:
1006         cancel_flags |= XFS_TRANS_ABORT;
1007  out_trans_cancel:
1008         xfs_trans_cancel(tp, cancel_flags);
1009  out_release_inode:
1010         /*
1011          * Wait until after the current transaction is aborted to
1012          * release the inode.  This prevents recursive transactions
1013          * and deadlocks from xfs_inactive.
1014          */
1015         if (ip)
1016                 IRELE(ip);
1017
1018         xfs_qm_dqrele(udqp);
1019         xfs_qm_dqrele(gdqp);
1020
1021         if (unlock_dp_on_error)
1022                 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1023         return error;
1024 }
1025
1026 #ifdef DEBUG
1027 int xfs_locked_n;
1028 int xfs_small_retries;
1029 int xfs_middle_retries;
1030 int xfs_lots_retries;
1031 int xfs_lock_delays;
1032 #endif
1033
1034 /*
1035  * Bump the subclass so xfs_lock_inodes() acquires each lock with
1036  * a different value
1037  */
1038 static inline int
1039 xfs_lock_inumorder(int lock_mode, int subclass)
1040 {
1041         if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))
1042                 lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_IOLOCK_SHIFT;
1043         if (lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL))
1044                 lock_mode |= (subclass + XFS_LOCK_INUMORDER) << XFS_ILOCK_SHIFT;
1045
1046         return lock_mode;
1047 }
1048
1049 /*
1050  * The following routine will lock n inodes in exclusive mode.
1051  * We assume the caller calls us with the inodes in i_ino order.
1052  *
1053  * We need to detect deadlock where an inode that we lock
1054  * is in the AIL and we start waiting for another inode that is locked
1055  * by a thread in a long running transaction (such as truncate). This can
1056  * result in deadlock since the long running trans might need to wait
1057  * for the inode we just locked in order to push the tail and free space
1058  * in the log.
1059  */
1060 void
1061 xfs_lock_inodes(
1062         xfs_inode_t     **ips,
1063         int             inodes,
1064         uint            lock_mode)
1065 {
1066         int             attempts = 0, i, j, try_lock;
1067         xfs_log_item_t  *lp;
1068
1069         ASSERT(ips && (inodes >= 2)); /* we need at least two */
1070
1071         try_lock = 0;
1072         i = 0;
1073
1074 again:
1075         for (; i < inodes; i++) {
1076                 ASSERT(ips[i]);
1077
1078                 if (i && (ips[i] == ips[i-1]))  /* Already locked */
1079                         continue;
1080
1081                 /*
1082                  * If try_lock is not set yet, make sure all locked inodes
1083                  * are not in the AIL.
1084                  * If any are, set try_lock to be used later.
1085                  */
1086
1087                 if (!try_lock) {
1088                         for (j = (i - 1); j >= 0 && !try_lock; j--) {
1089                                 lp = (xfs_log_item_t *)ips[j]->i_itemp;
1090                                 if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
1091                                         try_lock++;
1092                                 }
1093                         }
1094                 }
1095
1096                 /*
1097                  * If any of the previous locks we have locked is in the AIL,
1098                  * we must TRY to get the second and subsequent locks. If
1099                  * we can't get any, we must release all we have
1100                  * and try again.
1101                  */
1102
1103                 if (try_lock) {
1104                         /* try_lock must be 0 if i is 0. */
1105                         /*
1106                          * try_lock means we have an inode locked
1107                          * that is in the AIL.
1108                          */
1109                         ASSERT(i != 0);
1110                         if (!xfs_ilock_nowait(ips[i], xfs_lock_inumorder(lock_mode, i))) {
1111                                 attempts++;
1112
1113                                 /*
1114                                  * Unlock all previous guys and try again.
1115                                  * xfs_iunlock will try to push the tail
1116                                  * if the inode is in the AIL.
1117                                  */
1118
1119                                 for(j = i - 1; j >= 0; j--) {
1120
1121                                         /*
1122                                          * Check to see if we've already
1123                                          * unlocked this one.
1124                                          * Not the first one going back,
1125                                          * and the inode ptr is the same.
1126                                          */
1127                                         if ((j != (i - 1)) && ips[j] ==
1128                                                                 ips[j+1])
1129                                                 continue;
1130
1131                                         xfs_iunlock(ips[j], lock_mode);
1132                                 }
1133
1134                                 if ((attempts % 5) == 0) {
1135                                         delay(1); /* Don't just spin the CPU */
1136 #ifdef DEBUG
1137                                         xfs_lock_delays++;
1138 #endif
1139                                 }
1140                                 i = 0;
1141                                 try_lock = 0;
1142                                 goto again;
1143                         }
1144                 } else {
1145                         xfs_ilock(ips[i], xfs_lock_inumorder(lock_mode, i));
1146                 }
1147         }
1148
1149 #ifdef DEBUG
1150         if (attempts) {
1151                 if (attempts < 5) xfs_small_retries++;
1152                 else if (attempts < 100) xfs_middle_retries++;
1153                 else xfs_lots_retries++;
1154         } else {
1155                 xfs_locked_n++;
1156         }
1157 #endif
1158 }
1159
1160 /*
1161  * xfs_lock_two_inodes() can only be used to lock one type of lock
1162  * at a time - the iolock or the ilock, but not both at once. If
1163  * we lock both at once, lockdep will report false positives saying
1164  * we have violated locking orders.
1165  */
1166 void
1167 xfs_lock_two_inodes(
1168         xfs_inode_t             *ip0,
1169         xfs_inode_t             *ip1,
1170         uint                    lock_mode)
1171 {
1172         xfs_inode_t             *temp;
1173         int                     attempts = 0;
1174         xfs_log_item_t          *lp;
1175
1176         if (lock_mode & (XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL))
1177                 ASSERT((lock_mode & (XFS_ILOCK_SHARED|XFS_ILOCK_EXCL)) == 0);
1178         ASSERT(ip0->i_ino != ip1->i_ino);
1179
1180         if (ip0->i_ino > ip1->i_ino) {
1181                 temp = ip0;
1182                 ip0 = ip1;
1183                 ip1 = temp;
1184         }
1185
1186  again:
1187         xfs_ilock(ip0, xfs_lock_inumorder(lock_mode, 0));
1188
1189         /*
1190          * If the first lock we have locked is in the AIL, we must TRY to get
1191          * the second lock. If we can't get it, we must release the first one
1192          * and try again.
1193          */
1194         lp = (xfs_log_item_t *)ip0->i_itemp;
1195         if (lp && (lp->li_flags & XFS_LI_IN_AIL)) {
1196                 if (!xfs_ilock_nowait(ip1, xfs_lock_inumorder(lock_mode, 1))) {
1197                         xfs_iunlock(ip0, lock_mode);
1198                         if ((++attempts % 5) == 0)
1199                                 delay(1); /* Don't just spin the CPU */
1200                         goto again;
1201                 }
1202         } else {
1203                 xfs_ilock(ip1, xfs_lock_inumorder(lock_mode, 1));
1204         }
1205 }
1206
1207 int
1208 xfs_remove(
1209         xfs_inode_t             *dp,
1210         struct xfs_name         *name,
1211         xfs_inode_t             *ip)
1212 {
1213         xfs_mount_t             *mp = dp->i_mount;
1214         xfs_trans_t             *tp = NULL;
1215         int                     is_dir = S_ISDIR(ip->i_d.di_mode);
1216         int                     error = 0;
1217         xfs_bmap_free_t         free_list;
1218         xfs_fsblock_t           first_block;
1219         int                     cancel_flags;
1220         int                     committed;
1221         int                     link_zero;
1222         uint                    resblks;
1223         uint                    log_count;
1224
1225         trace_xfs_remove(dp, name);
1226
1227         if (XFS_FORCED_SHUTDOWN(mp))
1228                 return XFS_ERROR(EIO);
1229
1230         error = xfs_qm_dqattach(dp, 0);
1231         if (error)
1232                 goto std_return;
1233
1234         error = xfs_qm_dqattach(ip, 0);
1235         if (error)
1236                 goto std_return;
1237
1238         if (is_dir) {
1239                 tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR);
1240                 log_count = XFS_DEFAULT_LOG_COUNT;
1241         } else {
1242                 tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE);
1243                 log_count = XFS_REMOVE_LOG_COUNT;
1244         }
1245         cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
1246
1247         /*
1248          * We try to get the real space reservation first,
1249          * allowing for directory btree deletion(s) implying
1250          * possible bmap insert(s).  If we can't get the space
1251          * reservation then we use 0 instead, and avoid the bmap
1252          * btree insert(s) in the directory code by, if the bmap
1253          * insert tries to happen, instead trimming the LAST
1254          * block from the directory.
1255          */
1256         resblks = XFS_REMOVE_SPACE_RES(mp);
1257         error = xfs_trans_reserve(tp, resblks, XFS_REMOVE_LOG_RES(mp), 0,
1258                                   XFS_TRANS_PERM_LOG_RES, log_count);
1259         if (error == ENOSPC) {
1260                 resblks = 0;
1261                 error = xfs_trans_reserve(tp, 0, XFS_REMOVE_LOG_RES(mp), 0,
1262                                           XFS_TRANS_PERM_LOG_RES, log_count);
1263         }
1264         if (error) {
1265                 ASSERT(error != ENOSPC);
1266                 cancel_flags = 0;
1267                 goto out_trans_cancel;
1268         }
1269
1270         xfs_lock_two_inodes(dp, ip, XFS_ILOCK_EXCL);
1271
1272         xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1273         xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1274
1275         /*
1276          * If we're removing a directory perform some additional validation.
1277          */
1278         if (is_dir) {
1279                 ASSERT(ip->i_d.di_nlink >= 2);
1280                 if (ip->i_d.di_nlink != 2) {
1281                         error = XFS_ERROR(ENOTEMPTY);
1282                         goto out_trans_cancel;
1283                 }
1284                 if (!xfs_dir_isempty(ip)) {
1285                         error = XFS_ERROR(ENOTEMPTY);
1286                         goto out_trans_cancel;
1287                 }
1288         }
1289
1290         xfs_bmap_init(&free_list, &first_block);
1291         error = xfs_dir_removename(tp, dp, name, ip->i_ino,
1292                                         &first_block, &free_list, resblks);
1293         if (error) {
1294                 ASSERT(error != ENOENT);
1295                 goto out_bmap_cancel;
1296         }
1297         xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1298
1299         if (is_dir) {
1300                 /*
1301                  * Drop the link from ip's "..".
1302                  */
1303                 error = xfs_droplink(tp, dp);
1304                 if (error)
1305                         goto out_bmap_cancel;
1306
1307                 /*
1308                  * Drop the "." link from ip to self.
1309                  */
1310                 error = xfs_droplink(tp, ip);
1311                 if (error)
1312                         goto out_bmap_cancel;
1313         } else {
1314                 /*
1315                  * When removing a non-directory we need to log the parent
1316                  * inode here.  For a directory this is done implicitly
1317                  * by the xfs_droplink call for the ".." entry.
1318                  */
1319                 xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1320         }
1321
1322         /*
1323          * Drop the link from dp to ip.
1324          */
1325         error = xfs_droplink(tp, ip);
1326         if (error)
1327                 goto out_bmap_cancel;
1328
1329         /*
1330          * Determine if this is the last link while
1331          * we are in the transaction.
1332          */
1333         link_zero = (ip->i_d.di_nlink == 0);
1334
1335         /*
1336          * If this is a synchronous mount, make sure that the
1337          * remove transaction goes to disk before returning to
1338          * the user.
1339          */
1340         if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC))
1341                 xfs_trans_set_sync(tp);
1342
1343         error = xfs_bmap_finish(&tp, &free_list, &committed);
1344         if (error)
1345                 goto out_bmap_cancel;
1346
1347         error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1348         if (error)
1349                 goto std_return;
1350
1351         /*
1352          * If we are using filestreams, kill the stream association.
1353          * If the file is still open it may get a new one but that
1354          * will get killed on last close in xfs_close() so we don't
1355          * have to worry about that.
1356          */
1357         if (!is_dir && link_zero && xfs_inode_is_filestream(ip))
1358                 xfs_filestream_deassociate(ip);
1359
1360         return 0;
1361
1362  out_bmap_cancel:
1363         xfs_bmap_cancel(&free_list);
1364         cancel_flags |= XFS_TRANS_ABORT;
1365  out_trans_cancel:
1366         xfs_trans_cancel(tp, cancel_flags);
1367  std_return:
1368         return error;
1369 }
1370
1371 int
1372 xfs_link(
1373         xfs_inode_t             *tdp,
1374         xfs_inode_t             *sip,
1375         struct xfs_name         *target_name)
1376 {
1377         xfs_mount_t             *mp = tdp->i_mount;
1378         xfs_trans_t             *tp;
1379         int                     error;
1380         xfs_bmap_free_t         free_list;
1381         xfs_fsblock_t           first_block;
1382         int                     cancel_flags;
1383         int                     committed;
1384         int                     resblks;
1385
1386         trace_xfs_link(tdp, target_name);
1387
1388         ASSERT(!S_ISDIR(sip->i_d.di_mode));
1389
1390         if (XFS_FORCED_SHUTDOWN(mp))
1391                 return XFS_ERROR(EIO);
1392
1393         error = xfs_qm_dqattach(sip, 0);
1394         if (error)
1395                 goto std_return;
1396
1397         error = xfs_qm_dqattach(tdp, 0);
1398         if (error)
1399                 goto std_return;
1400
1401         tp = xfs_trans_alloc(mp, XFS_TRANS_LINK);
1402         cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
1403         resblks = XFS_LINK_SPACE_RES(mp, target_name->len);
1404         error = xfs_trans_reserve(tp, resblks, XFS_LINK_LOG_RES(mp), 0,
1405                         XFS_TRANS_PERM_LOG_RES, XFS_LINK_LOG_COUNT);
1406         if (error == ENOSPC) {
1407                 resblks = 0;
1408                 error = xfs_trans_reserve(tp, 0, XFS_LINK_LOG_RES(mp), 0,
1409                                 XFS_TRANS_PERM_LOG_RES, XFS_LINK_LOG_COUNT);
1410         }
1411         if (error) {
1412                 cancel_flags = 0;
1413                 goto error_return;
1414         }
1415
1416         xfs_lock_two_inodes(sip, tdp, XFS_ILOCK_EXCL);
1417
1418         xfs_trans_ijoin(tp, sip, XFS_ILOCK_EXCL);
1419         xfs_trans_ijoin(tp, tdp, XFS_ILOCK_EXCL);
1420
1421         /*
1422          * If we are using project inheritance, we only allow hard link
1423          * creation in our tree when the project IDs are the same; else
1424          * the tree quota mechanism could be circumvented.
1425          */
1426         if (unlikely((tdp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
1427                      (xfs_get_projid(tdp) != xfs_get_projid(sip)))) {
1428                 error = XFS_ERROR(EXDEV);
1429                 goto error_return;
1430         }
1431
1432         error = xfs_dir_canenter(tp, tdp, target_name, resblks);
1433         if (error)
1434                 goto error_return;
1435
1436         xfs_bmap_init(&free_list, &first_block);
1437
1438         error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
1439                                         &first_block, &free_list, resblks);
1440         if (error)
1441                 goto abort_return;
1442         xfs_trans_ichgtime(tp, tdp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1443         xfs_trans_log_inode(tp, tdp, XFS_ILOG_CORE);
1444
1445         error = xfs_bumplink(tp, sip);
1446         if (error)
1447                 goto abort_return;
1448
1449         /*
1450          * If this is a synchronous mount, make sure that the
1451          * link transaction goes to disk before returning to
1452          * the user.
1453          */
1454         if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
1455                 xfs_trans_set_sync(tp);
1456         }
1457
1458         error = xfs_bmap_finish (&tp, &free_list, &committed);
1459         if (error) {
1460                 xfs_bmap_cancel(&free_list);
1461                 goto abort_return;
1462         }
1463
1464         return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1465
1466  abort_return:
1467         cancel_flags |= XFS_TRANS_ABORT;
1468  error_return:
1469         xfs_trans_cancel(tp, cancel_flags);
1470  std_return:
1471         return error;
1472 }
1473
1474 int
1475 xfs_symlink(
1476         xfs_inode_t             *dp,
1477         struct xfs_name         *link_name,
1478         const char              *target_path,
1479         umode_t                 mode,
1480         xfs_inode_t             **ipp)
1481 {
1482         xfs_mount_t             *mp = dp->i_mount;
1483         xfs_trans_t             *tp;
1484         xfs_inode_t             *ip;
1485         int                     error;
1486         int                     pathlen;
1487         xfs_bmap_free_t         free_list;
1488         xfs_fsblock_t           first_block;
1489         boolean_t               unlock_dp_on_error = B_FALSE;
1490         uint                    cancel_flags;
1491         int                     committed;
1492         xfs_fileoff_t           first_fsb;
1493         xfs_filblks_t           fs_blocks;
1494         int                     nmaps;
1495         xfs_bmbt_irec_t         mval[SYMLINK_MAPS];
1496         xfs_daddr_t             d;
1497         const char              *cur_chunk;
1498         int                     byte_cnt;
1499         int                     n;
1500         xfs_buf_t               *bp;
1501         prid_t                  prid;
1502         struct xfs_dquot        *udqp, *gdqp;
1503         uint                    resblks;
1504
1505         *ipp = NULL;
1506         error = 0;
1507         ip = NULL;
1508         tp = NULL;
1509
1510         trace_xfs_symlink(dp, link_name);
1511
1512         if (XFS_FORCED_SHUTDOWN(mp))
1513                 return XFS_ERROR(EIO);
1514
1515         /*
1516          * Check component lengths of the target path name.
1517          */
1518         pathlen = strlen(target_path);
1519         if (pathlen >= MAXPATHLEN)      /* total string too long */
1520                 return XFS_ERROR(ENAMETOOLONG);
1521
1522         udqp = gdqp = NULL;
1523         if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1524                 prid = xfs_get_projid(dp);
1525         else
1526                 prid = XFS_PROJID_DEFAULT;
1527
1528         /*
1529          * Make sure that we have allocated dquot(s) on disk.
1530          */
1531         error = xfs_qm_vop_dqalloc(dp, current_fsuid(), current_fsgid(), prid,
1532                         XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT, &udqp, &gdqp);
1533         if (error)
1534                 goto std_return;
1535
1536         tp = xfs_trans_alloc(mp, XFS_TRANS_SYMLINK);
1537         cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
1538         /*
1539          * The symlink will fit into the inode data fork?
1540          * There can't be any attributes so we get the whole variable part.
1541          */
1542         if (pathlen <= XFS_LITINO(mp))
1543                 fs_blocks = 0;
1544         else
1545                 fs_blocks = XFS_B_TO_FSB(mp, pathlen);
1546         resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);
1547         error = xfs_trans_reserve(tp, resblks, XFS_SYMLINK_LOG_RES(mp), 0,
1548                         XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT);
1549         if (error == ENOSPC && fs_blocks == 0) {
1550                 resblks = 0;
1551                 error = xfs_trans_reserve(tp, 0, XFS_SYMLINK_LOG_RES(mp), 0,
1552                                 XFS_TRANS_PERM_LOG_RES, XFS_SYMLINK_LOG_COUNT);
1553         }
1554         if (error) {
1555                 cancel_flags = 0;
1556                 goto error_return;
1557         }
1558
1559         xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
1560         unlock_dp_on_error = B_TRUE;
1561
1562         /*
1563          * Check whether the directory allows new symlinks or not.
1564          */
1565         if (dp->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) {
1566                 error = XFS_ERROR(EPERM);
1567                 goto error_return;
1568         }
1569
1570         /*
1571          * Reserve disk quota : blocks and inode.
1572          */
1573         error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp, resblks, 1, 0);
1574         if (error)
1575                 goto error_return;
1576
1577         /*
1578          * Check for ability to enter directory entry, if no space reserved.
1579          */
1580         error = xfs_dir_canenter(tp, dp, link_name, resblks);
1581         if (error)
1582                 goto error_return;
1583         /*
1584          * Initialize the bmap freelist prior to calling either
1585          * bmapi or the directory create code.
1586          */
1587         xfs_bmap_init(&free_list, &first_block);
1588
1589         /*
1590          * Allocate an inode for the symlink.
1591          */
1592         error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0,
1593                                prid, resblks > 0, &ip, NULL);
1594         if (error) {
1595                 if (error == ENOSPC)
1596                         goto error_return;
1597                 goto error1;
1598         }
1599
1600         /*
1601          * An error after we've joined dp to the transaction will result in the
1602          * transaction cancel unlocking dp so don't do it explicitly in the
1603          * error path.
1604          */
1605         xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
1606         unlock_dp_on_error = B_FALSE;
1607
1608         /*
1609          * Also attach the dquot(s) to it, if applicable.
1610          */
1611         xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp);
1612
1613         if (resblks)
1614                 resblks -= XFS_IALLOC_SPACE_RES(mp);
1615         /*
1616          * If the symlink will fit into the inode, write it inline.
1617          */
1618         if (pathlen <= XFS_IFORK_DSIZE(ip)) {
1619                 xfs_idata_realloc(ip, pathlen, XFS_DATA_FORK);
1620                 memcpy(ip->i_df.if_u1.if_data, target_path, pathlen);
1621                 ip->i_d.di_size = pathlen;
1622
1623                 /*
1624                  * The inode was initially created in extent format.
1625                  */
1626                 ip->i_df.if_flags &= ~(XFS_IFEXTENTS | XFS_IFBROOT);
1627                 ip->i_df.if_flags |= XFS_IFINLINE;
1628
1629                 ip->i_d.di_format = XFS_DINODE_FMT_LOCAL;
1630                 xfs_trans_log_inode(tp, ip, XFS_ILOG_DDATA | XFS_ILOG_CORE);
1631
1632         } else {
1633                 first_fsb = 0;
1634                 nmaps = SYMLINK_MAPS;
1635
1636                 error = xfs_bmapi_write(tp, ip, first_fsb, fs_blocks,
1637                                   XFS_BMAPI_METADATA, &first_block, resblks,
1638                                   mval, &nmaps, &free_list);
1639                 if (error)
1640                         goto error2;
1641
1642                 if (resblks)
1643                         resblks -= fs_blocks;
1644                 ip->i_d.di_size = pathlen;
1645                 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1646
1647                 cur_chunk = target_path;
1648                 for (n = 0; n < nmaps; n++) {
1649                         d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
1650                         byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
1651                         bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
1652                                                BTOBB(byte_cnt), 0);
1653                         if (!bp) {
1654                                 error = ENOMEM;
1655                                 goto error2;
1656                         }
1657                         if (pathlen < byte_cnt) {
1658                                 byte_cnt = pathlen;
1659                         }
1660                         pathlen -= byte_cnt;
1661
1662                         memcpy(bp->b_addr, cur_chunk, byte_cnt);
1663                         cur_chunk += byte_cnt;
1664
1665                         xfs_trans_log_buf(tp, bp, 0, byte_cnt - 1);
1666                 }
1667         }
1668
1669         /*
1670          * Create the directory entry for the symlink.
1671          */
1672         error = xfs_dir_createname(tp, dp, link_name, ip->i_ino,
1673                                         &first_block, &free_list, resblks);
1674         if (error)
1675                 goto error2;
1676         xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
1677         xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);
1678
1679         /*
1680          * If this is a synchronous mount, make sure that the
1681          * symlink transaction goes to disk before returning to
1682          * the user.
1683          */
1684         if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
1685                 xfs_trans_set_sync(tp);
1686         }
1687
1688         error = xfs_bmap_finish(&tp, &free_list, &committed);
1689         if (error) {
1690                 goto error2;
1691         }
1692         error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1693         xfs_qm_dqrele(udqp);
1694         xfs_qm_dqrele(gdqp);
1695
1696         *ipp = ip;
1697         return 0;
1698
1699  error2:
1700         IRELE(ip);
1701  error1:
1702         xfs_bmap_cancel(&free_list);
1703         cancel_flags |= XFS_TRANS_ABORT;
1704  error_return:
1705         xfs_trans_cancel(tp, cancel_flags);
1706         xfs_qm_dqrele(udqp);
1707         xfs_qm_dqrele(gdqp);
1708
1709         if (unlock_dp_on_error)
1710                 xfs_iunlock(dp, XFS_ILOCK_EXCL);
1711  std_return:
1712         return error;
1713 }
1714
1715 int
1716 xfs_set_dmattrs(
1717         xfs_inode_t     *ip,
1718         u_int           evmask,
1719         u_int16_t       state)
1720 {
1721         xfs_mount_t     *mp = ip->i_mount;
1722         xfs_trans_t     *tp;
1723         int             error;
1724
1725         if (!capable(CAP_SYS_ADMIN))
1726                 return XFS_ERROR(EPERM);
1727
1728         if (XFS_FORCED_SHUTDOWN(mp))
1729                 return XFS_ERROR(EIO);
1730
1731         tp = xfs_trans_alloc(mp, XFS_TRANS_SET_DMATTRS);
1732         error = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES (mp), 0, 0, 0);
1733         if (error) {
1734                 xfs_trans_cancel(tp, 0);
1735                 return error;
1736         }
1737         xfs_ilock(ip, XFS_ILOCK_EXCL);
1738         xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
1739
1740         ip->i_d.di_dmevmask = evmask;
1741         ip->i_d.di_dmstate  = state;
1742
1743         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1744         error = xfs_trans_commit(tp, 0);
1745
1746         return error;
1747 }
1748
1749 /*
1750  * xfs_alloc_file_space()
1751  *      This routine allocates disk space for the given file.
1752  *
1753  *      If alloc_type == 0, this request is for an ALLOCSP type
1754  *      request which will change the file size.  In this case, no
1755  *      DMAPI event will be generated by the call.  A TRUNCATE event
1756  *      will be generated later by xfs_setattr.
1757  *
1758  *      If alloc_type != 0, this request is for a RESVSP type
1759  *      request, and a DMAPI DM_EVENT_WRITE will be generated if the
1760  *      lower block boundary byte address is less than the file's
1761  *      length.
1762  *
1763  * RETURNS:
1764  *       0 on success
1765  *      errno on error
1766  *
1767  */
1768 STATIC int
1769 xfs_alloc_file_space(
1770         xfs_inode_t             *ip,
1771         xfs_off_t               offset,
1772         xfs_off_t               len,
1773         int                     alloc_type,
1774         int                     attr_flags)
1775 {
1776         xfs_mount_t             *mp = ip->i_mount;
1777         xfs_off_t               count;
1778         xfs_filblks_t           allocated_fsb;
1779         xfs_filblks_t           allocatesize_fsb;
1780         xfs_extlen_t            extsz, temp;
1781         xfs_fileoff_t           startoffset_fsb;
1782         xfs_fsblock_t           firstfsb;
1783         int                     nimaps;
1784         int                     quota_flag;
1785         int                     rt;
1786         xfs_trans_t             *tp;
1787         xfs_bmbt_irec_t         imaps[1], *imapp;
1788         xfs_bmap_free_t         free_list;
1789         uint                    qblocks, resblks, resrtextents;
1790         int                     committed;
1791         int                     error;
1792
1793         trace_xfs_alloc_file_space(ip);
1794
1795         if (XFS_FORCED_SHUTDOWN(mp))
1796                 return XFS_ERROR(EIO);
1797
1798         error = xfs_qm_dqattach(ip, 0);
1799         if (error)
1800                 return error;
1801
1802         if (len <= 0)
1803                 return XFS_ERROR(EINVAL);
1804
1805         rt = XFS_IS_REALTIME_INODE(ip);
1806         extsz = xfs_get_extsz_hint(ip);
1807
1808         count = len;
1809         imapp = &imaps[0];
1810         nimaps = 1;
1811         startoffset_fsb = XFS_B_TO_FSBT(mp, offset);
1812         allocatesize_fsb = XFS_B_TO_FSB(mp, count);
1813
1814         /*
1815          * Allocate file space until done or until there is an error
1816          */
1817         while (allocatesize_fsb && !error) {
1818                 xfs_fileoff_t   s, e;
1819
1820                 /*
1821                  * Determine space reservations for data/realtime.
1822                  */
1823                 if (unlikely(extsz)) {
1824                         s = startoffset_fsb;
1825                         do_div(s, extsz);
1826                         s *= extsz;
1827                         e = startoffset_fsb + allocatesize_fsb;
1828                         if ((temp = do_mod(startoffset_fsb, extsz)))
1829                                 e += temp;
1830                         if ((temp = do_mod(e, extsz)))
1831                                 e += extsz - temp;
1832                 } else {
1833                         s = 0;
1834                         e = allocatesize_fsb;
1835                 }
1836
1837                 /*
1838                  * The transaction reservation is limited to a 32-bit block
1839                  * count, hence we need to limit the number of blocks we are
1840                  * trying to reserve to avoid an overflow. We can't allocate
1841                  * more than @nimaps extents, and an extent is limited on disk
1842                  * to MAXEXTLEN (21 bits), so use that to enforce the limit.
1843                  */
1844                 resblks = min_t(xfs_fileoff_t, (e - s), (MAXEXTLEN * nimaps));
1845                 if (unlikely(rt)) {
1846                         resrtextents = qblocks = resblks;
1847                         resrtextents /= mp->m_sb.sb_rextsize;
1848                         resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
1849                         quota_flag = XFS_QMOPT_RES_RTBLKS;
1850                 } else {
1851                         resrtextents = 0;
1852                         resblks = qblocks = XFS_DIOSTRAT_SPACE_RES(mp, resblks);
1853                         quota_flag = XFS_QMOPT_RES_REGBLKS;
1854                 }
1855
1856                 /*
1857                  * Allocate and setup the transaction.
1858                  */
1859                 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
1860                 error = xfs_trans_reserve(tp, resblks,
1861                                           XFS_WRITE_LOG_RES(mp), resrtextents,
1862                                           XFS_TRANS_PERM_LOG_RES,
1863                                           XFS_WRITE_LOG_COUNT);
1864                 /*
1865                  * Check for running out of space
1866                  */
1867                 if (error) {
1868                         /*
1869                          * Free the transaction structure.
1870                          */
1871                         ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
1872                         xfs_trans_cancel(tp, 0);
1873                         break;
1874                 }
1875                 xfs_ilock(ip, XFS_ILOCK_EXCL);
1876                 error = xfs_trans_reserve_quota_nblks(tp, ip, qblocks,
1877                                                       0, quota_flag);
1878                 if (error)
1879                         goto error1;
1880
1881                 xfs_trans_ijoin(tp, ip, 0);
1882
1883                 xfs_bmap_init(&free_list, &firstfsb);
1884                 error = xfs_bmapi_write(tp, ip, startoffset_fsb,
1885                                         allocatesize_fsb, alloc_type, &firstfsb,
1886                                         0, imapp, &nimaps, &free_list);
1887                 if (error) {
1888                         goto error0;
1889                 }
1890
1891                 /*
1892                  * Complete the transaction
1893                  */
1894                 error = xfs_bmap_finish(&tp, &free_list, &committed);
1895                 if (error) {
1896                         goto error0;
1897                 }
1898
1899                 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
1900                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
1901                 if (error) {
1902                         break;
1903                 }
1904
1905                 allocated_fsb = imapp->br_blockcount;
1906
1907                 if (nimaps == 0) {
1908                         error = XFS_ERROR(ENOSPC);
1909                         break;
1910                 }
1911
1912                 startoffset_fsb += allocated_fsb;
1913                 allocatesize_fsb -= allocated_fsb;
1914         }
1915
1916         return error;
1917
1918 error0: /* Cancel bmap, unlock inode, unreserve quota blocks, cancel trans */
1919         xfs_bmap_cancel(&free_list);
1920         xfs_trans_unreserve_quota_nblks(tp, ip, qblocks, 0, quota_flag);
1921
1922 error1: /* Just cancel transaction */
1923         xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
1924         xfs_iunlock(ip, XFS_ILOCK_EXCL);
1925         return error;
1926 }
1927
1928 /*
1929  * Zero file bytes between startoff and endoff inclusive.
1930  * The iolock is held exclusive and no blocks are buffered.
1931  *
1932  * This function is used by xfs_free_file_space() to zero
1933  * partial blocks when the range to free is not block aligned.
1934  * When unreserving space with boundaries that are not block
1935  * aligned we round up the start and round down the end
1936  * boundaries and then use this function to zero the parts of
1937  * the blocks that got dropped during the rounding.
1938  */
1939 STATIC int
1940 xfs_zero_remaining_bytes(
1941         xfs_inode_t             *ip,
1942         xfs_off_t               startoff,
1943         xfs_off_t               endoff)
1944 {
1945         xfs_bmbt_irec_t         imap;
1946         xfs_fileoff_t           offset_fsb;
1947         xfs_off_t               lastoffset;
1948         xfs_off_t               offset;
1949         xfs_buf_t               *bp;
1950         xfs_mount_t             *mp = ip->i_mount;
1951         int                     nimap;
1952         int                     error = 0;
1953
1954         /*
1955          * Avoid doing I/O beyond eof - it's not necessary
1956          * since nothing can read beyond eof.  The space will
1957          * be zeroed when the file is extended anyway.
1958          */
1959         if (startoff >= XFS_ISIZE(ip))
1960                 return 0;
1961
1962         if (endoff > XFS_ISIZE(ip))
1963                 endoff = XFS_ISIZE(ip);
1964
1965         bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
1966                                         mp->m_rtdev_targp : mp->m_ddev_targp,
1967                                   BTOBB(mp->m_sb.sb_blocksize), 0);
1968         if (!bp)
1969                 return XFS_ERROR(ENOMEM);
1970
1971         xfs_buf_unlock(bp);
1972
1973         for (offset = startoff; offset <= endoff; offset = lastoffset + 1) {
1974                 offset_fsb = XFS_B_TO_FSBT(mp, offset);
1975                 nimap = 1;
1976                 error = xfs_bmapi_read(ip, offset_fsb, 1, &imap, &nimap, 0);
1977                 if (error || nimap < 1)
1978                         break;
1979                 ASSERT(imap.br_blockcount >= 1);
1980                 ASSERT(imap.br_startoff == offset_fsb);
1981                 lastoffset = XFS_FSB_TO_B(mp, imap.br_startoff + 1) - 1;
1982                 if (lastoffset > endoff)
1983                         lastoffset = endoff;
1984                 if (imap.br_startblock == HOLESTARTBLOCK)
1985                         continue;
1986                 ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
1987                 if (imap.br_state == XFS_EXT_UNWRITTEN)
1988                         continue;
1989                 XFS_BUF_UNDONE(bp);
1990                 XFS_BUF_UNWRITE(bp);
1991                 XFS_BUF_READ(bp);
1992                 XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
1993                 xfsbdstrat(mp, bp);
1994                 error = xfs_buf_iowait(bp);
1995                 if (error) {
1996                         xfs_buf_ioerror_alert(bp,
1997                                         "xfs_zero_remaining_bytes(read)");
1998                         break;
1999                 }
2000                 memset(bp->b_addr +
2001                         (offset - XFS_FSB_TO_B(mp, imap.br_startoff)),
2002                       0, lastoffset - offset + 1);
2003                 XFS_BUF_UNDONE(bp);
2004                 XFS_BUF_UNREAD(bp);
2005                 XFS_BUF_WRITE(bp);
2006                 xfsbdstrat(mp, bp);
2007                 error = xfs_buf_iowait(bp);
2008                 if (error) {
2009                         xfs_buf_ioerror_alert(bp,
2010                                         "xfs_zero_remaining_bytes(write)");
2011                         break;
2012                 }
2013         }
2014         xfs_buf_free(bp);
2015         return error;
2016 }
2017
2018 /*
2019  * xfs_free_file_space()
2020  *      This routine frees disk space for the given file.
2021  *
2022  *      This routine is only called by xfs_change_file_space
2023  *      for an UNRESVSP type call.
2024  *
2025  * RETURNS:
2026  *       0 on success
2027  *      errno on error
2028  *
2029  */
2030 STATIC int
2031 xfs_free_file_space(
2032         xfs_inode_t             *ip,
2033         xfs_off_t               offset,
2034         xfs_off_t               len,
2035         int                     attr_flags)
2036 {
2037         int                     committed;
2038         int                     done;
2039         xfs_fileoff_t           endoffset_fsb;
2040         int                     error;
2041         xfs_fsblock_t           firstfsb;
2042         xfs_bmap_free_t         free_list;
2043         xfs_bmbt_irec_t         imap;
2044         xfs_off_t               ioffset;
2045         xfs_extlen_t            mod=0;
2046         xfs_mount_t             *mp;
2047         int                     nimap;
2048         uint                    resblks;
2049         uint                    rounding;
2050         int                     rt;
2051         xfs_fileoff_t           startoffset_fsb;
2052         xfs_trans_t             *tp;
2053         int                     need_iolock = 1;
2054
2055         mp = ip->i_mount;
2056
2057         trace_xfs_free_file_space(ip);
2058
2059         error = xfs_qm_dqattach(ip, 0);
2060         if (error)
2061                 return error;
2062
2063         error = 0;
2064         if (len <= 0)   /* if nothing being freed */
2065                 return error;
2066         rt = XFS_IS_REALTIME_INODE(ip);
2067         startoffset_fsb = XFS_B_TO_FSB(mp, offset);
2068         endoffset_fsb = XFS_B_TO_FSBT(mp, offset + len);
2069
2070         if (attr_flags & XFS_ATTR_NOLOCK)
2071                 need_iolock = 0;
2072         if (need_iolock) {
2073                 xfs_ilock(ip, XFS_IOLOCK_EXCL);
2074                 /* wait for the completion of any pending DIOs */
2075                 inode_dio_wait(VFS_I(ip));
2076         }
2077
2078         rounding = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE);
2079         ioffset = offset & ~(rounding - 1);
2080
2081         if (VN_CACHED(VFS_I(ip)) != 0) {
2082                 error = xfs_flushinval_pages(ip, ioffset, -1, FI_REMAPF_LOCKED);
2083                 if (error)
2084                         goto out_unlock_iolock;
2085         }
2086
2087         /*
2088          * Need to zero the stuff we're not freeing, on disk.
2089          * If it's a realtime file & can't use unwritten extents then we
2090          * actually need to zero the extent edges.  Otherwise xfs_bunmapi
2091          * will take care of it for us.
2092          */
2093         if (rt && !xfs_sb_version_hasextflgbit(&mp->m_sb)) {
2094                 nimap = 1;
2095                 error = xfs_bmapi_read(ip, startoffset_fsb, 1,
2096                                         &imap, &nimap, 0);
2097                 if (error)
2098                         goto out_unlock_iolock;
2099                 ASSERT(nimap == 0 || nimap == 1);
2100                 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
2101                         xfs_daddr_t     block;
2102
2103                         ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
2104                         block = imap.br_startblock;
2105                         mod = do_div(block, mp->m_sb.sb_rextsize);
2106                         if (mod)
2107                                 startoffset_fsb += mp->m_sb.sb_rextsize - mod;
2108                 }
2109                 nimap = 1;
2110                 error = xfs_bmapi_read(ip, endoffset_fsb - 1, 1,
2111                                         &imap, &nimap, 0);
2112                 if (error)
2113                         goto out_unlock_iolock;
2114                 ASSERT(nimap == 0 || nimap == 1);
2115                 if (nimap && imap.br_startblock != HOLESTARTBLOCK) {
2116                         ASSERT(imap.br_startblock != DELAYSTARTBLOCK);
2117                         mod++;
2118                         if (mod && (mod != mp->m_sb.sb_rextsize))
2119                                 endoffset_fsb -= mod;
2120                 }
2121         }
2122         if ((done = (endoffset_fsb <= startoffset_fsb)))
2123                 /*
2124                  * One contiguous piece to clear
2125                  */
2126                 error = xfs_zero_remaining_bytes(ip, offset, offset + len - 1);
2127         else {
2128                 /*
2129                  * Some full blocks, possibly two pieces to clear
2130                  */
2131                 if (offset < XFS_FSB_TO_B(mp, startoffset_fsb))
2132                         error = xfs_zero_remaining_bytes(ip, offset,
2133                                 XFS_FSB_TO_B(mp, startoffset_fsb) - 1);
2134                 if (!error &&
2135                     XFS_FSB_TO_B(mp, endoffset_fsb) < offset + len)
2136                         error = xfs_zero_remaining_bytes(ip,
2137                                 XFS_FSB_TO_B(mp, endoffset_fsb),
2138                                 offset + len - 1);
2139         }
2140
2141         /*
2142          * free file space until done or until there is an error
2143          */
2144         resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
2145         while (!error && !done) {
2146
2147                 /*
2148                  * allocate and setup the transaction. Allow this
2149                  * transaction to dip into the reserve blocks to ensure
2150                  * the freeing of the space succeeds at ENOSPC.
2151                  */
2152                 tp = xfs_trans_alloc(mp, XFS_TRANS_DIOSTRAT);
2153                 tp->t_flags |= XFS_TRANS_RESERVE;
2154                 error = xfs_trans_reserve(tp,
2155                                           resblks,
2156                                           XFS_WRITE_LOG_RES(mp),
2157                                           0,
2158                                           XFS_TRANS_PERM_LOG_RES,
2159                                           XFS_WRITE_LOG_COUNT);
2160
2161                 /*
2162                  * check for running out of space
2163                  */
2164                 if (error) {
2165                         /*
2166                          * Free the transaction structure.
2167                          */
2168                         ASSERT(error == ENOSPC || XFS_FORCED_SHUTDOWN(mp));
2169                         xfs_trans_cancel(tp, 0);
2170                         break;
2171                 }
2172                 xfs_ilock(ip, XFS_ILOCK_EXCL);
2173                 error = xfs_trans_reserve_quota(tp, mp,
2174                                 ip->i_udquot, ip->i_gdquot,
2175                                 resblks, 0, XFS_QMOPT_RES_REGBLKS);
2176                 if (error)
2177                         goto error1;
2178
2179                 xfs_trans_ijoin(tp, ip, 0);
2180
2181                 /*
2182                  * issue the bunmapi() call to free the blocks
2183                  */
2184                 xfs_bmap_init(&free_list, &firstfsb);
2185                 error = xfs_bunmapi(tp, ip, startoffset_fsb,
2186                                   endoffset_fsb - startoffset_fsb,
2187                                   0, 2, &firstfsb, &free_list, &done);
2188                 if (error) {
2189                         goto error0;
2190                 }
2191
2192                 /*
2193                  * complete the transaction
2194                  */
2195                 error = xfs_bmap_finish(&tp, &free_list, &committed);
2196                 if (error) {
2197                         goto error0;
2198                 }
2199
2200                 error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
2201                 xfs_iunlock(ip, XFS_ILOCK_EXCL);
2202         }
2203
2204  out_unlock_iolock:
2205         if (need_iolock)
2206                 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
2207         return error;
2208
2209  error0:
2210         xfs_bmap_cancel(&free_list);
2211  error1:
2212         xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
2213         xfs_iunlock(ip, need_iolock ? (XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL) :
2214                     XFS_ILOCK_EXCL);
2215         return error;
2216 }
2217
2218 /*
2219  * xfs_change_file_space()
2220  *      This routine allocates or frees disk space for the given file.
2221  *      The user specified parameters are checked for alignment and size
2222  *      limitations.
2223  *
2224  * RETURNS:
2225  *       0 on success
2226  *      errno on error
2227  *
2228  */
2229 int
2230 xfs_change_file_space(
2231         xfs_inode_t     *ip,
2232         int             cmd,
2233         xfs_flock64_t   *bf,
2234         xfs_off_t       offset,
2235         int             attr_flags)
2236 {
2237         xfs_mount_t     *mp = ip->i_mount;
2238         int             clrprealloc;
2239         int             error;
2240         xfs_fsize_t     fsize;
2241         int             setprealloc;
2242         xfs_off_t       startoffset;
2243         xfs_off_t       llen;
2244         xfs_trans_t     *tp;
2245         struct iattr    iattr;
2246         int             prealloc_type;
2247
2248         if (!S_ISREG(ip->i_d.di_mode))
2249                 return XFS_ERROR(EINVAL);
2250
2251         switch (bf->l_whence) {
2252         case 0: /*SEEK_SET*/
2253                 break;
2254         case 1: /*SEEK_CUR*/
2255                 bf->l_start += offset;
2256                 break;
2257         case 2: /*SEEK_END*/
2258                 bf->l_start += XFS_ISIZE(ip);
2259                 break;
2260         default:
2261                 return XFS_ERROR(EINVAL);
2262         }
2263
2264         llen = bf->l_len > 0 ? bf->l_len - 1 : bf->l_len;
2265
2266         if (   (bf->l_start < 0)
2267             || (bf->l_start > XFS_MAXIOFFSET(mp))
2268             || (bf->l_start + llen < 0)
2269             || (bf->l_start + llen > XFS_MAXIOFFSET(mp)))
2270                 return XFS_ERROR(EINVAL);
2271
2272         bf->l_whence = 0;
2273
2274         startoffset = bf->l_start;
2275         fsize = XFS_ISIZE(ip);
2276
2277         /*
2278          * XFS_IOC_RESVSP and XFS_IOC_UNRESVSP will reserve or unreserve
2279          * file space.
2280          * These calls do NOT zero the data space allocated to the file,
2281          * nor do they change the file size.
2282          *
2283          * XFS_IOC_ALLOCSP and XFS_IOC_FREESP will allocate and free file
2284          * space.
2285          * These calls cause the new file data to be zeroed and the file
2286          * size to be changed.
2287          */
2288         setprealloc = clrprealloc = 0;
2289         prealloc_type = XFS_BMAPI_PREALLOC;
2290
2291         switch (cmd) {
2292         case XFS_IOC_ZERO_RANGE:
2293                 prealloc_type |= XFS_BMAPI_CONVERT;
2294                 xfs_tosspages(ip, startoffset, startoffset + bf->l_len, 0);
2295                 /* FALLTHRU */
2296         case XFS_IOC_RESVSP:
2297         case XFS_IOC_RESVSP64:
2298                 error = xfs_alloc_file_space(ip, startoffset, bf->l_len,
2299                                                 prealloc_type, attr_flags);
2300                 if (error)
2301                         return error;
2302                 setprealloc = 1;
2303                 break;
2304
2305         case XFS_IOC_UNRESVSP:
2306         case XFS_IOC_UNRESVSP64:
2307                 if ((error = xfs_free_file_space(ip, startoffset, bf->l_len,
2308                                                                 attr_flags)))
2309                         return error;
2310                 break;
2311
2312         case XFS_IOC_ALLOCSP:
2313         case XFS_IOC_ALLOCSP64:
2314         case XFS_IOC_FREESP:
2315         case XFS_IOC_FREESP64:
2316                 /*
2317                  * These operations actually do IO when extending the file, but
2318                  * the allocation is done seperately to the zeroing that is
2319                  * done. This set of operations need to be serialised against
2320                  * other IO operations, such as truncate and buffered IO. We
2321                  * need to take the IOLOCK here to serialise the allocation and
2322                  * zeroing IO to prevent other IOLOCK holders (e.g. getbmap,
2323                  * truncate, direct IO) from racing against the transient
2324                  * allocated but not written state we can have here.
2325                  */
2326                 xfs_ilock(ip, XFS_IOLOCK_EXCL);
2327                 if (startoffset > fsize) {
2328                         error = xfs_alloc_file_space(ip, fsize,
2329                                         startoffset - fsize, 0,
2330                                         attr_flags | XFS_ATTR_NOLOCK);
2331                         if (error) {
2332                                 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
2333                                 break;
2334                         }
2335                 }
2336
2337                 iattr.ia_valid = ATTR_SIZE;
2338                 iattr.ia_size = startoffset;
2339
2340                 error = xfs_setattr_size(ip, &iattr,
2341                                          attr_flags | XFS_ATTR_NOLOCK);
2342                 xfs_iunlock(ip, XFS_IOLOCK_EXCL);
2343
2344                 if (error)
2345                         return error;
2346
2347                 clrprealloc = 1;
2348                 break;
2349
2350         default:
2351                 ASSERT(0);
2352                 return XFS_ERROR(EINVAL);
2353         }
2354
2355         /*
2356          * update the inode timestamp, mode, and prealloc flag bits
2357          */
2358         tp = xfs_trans_alloc(mp, XFS_TRANS_WRITEID);
2359
2360         if ((error = xfs_trans_reserve(tp, 0, XFS_WRITEID_LOG_RES(mp),
2361                                       0, 0, 0))) {
2362                 /* ASSERT(0); */
2363                 xfs_trans_cancel(tp, 0);
2364                 return error;
2365         }
2366
2367         xfs_ilock(ip, XFS_ILOCK_EXCL);
2368         xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
2369
2370         if ((attr_flags & XFS_ATTR_DMI) == 0) {
2371                 ip->i_d.di_mode &= ~S_ISUID;
2372
2373                 /*
2374                  * Note that we don't have to worry about mandatory
2375                  * file locking being disabled here because we only
2376                  * clear the S_ISGID bit if the Group execute bit is
2377                  * on, but if it was on then mandatory locking wouldn't
2378                  * have been enabled.
2379                  */
2380                 if (ip->i_d.di_mode & S_IXGRP)
2381                         ip->i_d.di_mode &= ~S_ISGID;
2382
2383                 xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
2384         }
2385         if (setprealloc)
2386                 ip->i_d.di_flags |= XFS_DIFLAG_PREALLOC;
2387         else if (clrprealloc)
2388                 ip->i_d.di_flags &= ~XFS_DIFLAG_PREALLOC;
2389
2390         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2391         if (attr_flags & XFS_ATTR_SYNC)
2392                 xfs_trans_set_sync(tp);
2393         return xfs_trans_commit(tp, 0);
2394 }