ipoib: switch to netif_napi_add_weight()
[linux-2.6-microblaze.git] / fs / hfsplus / inode.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/fs/hfsplus/inode.c
4  *
5  * Copyright (C) 2001
6  * Brad Boyer (flar@allandria.com)
7  * (C) 2003 Ardis Technologies <roman@ardistech.com>
8  *
9  * Inode handling routines
10  */
11
12 #include <linux/blkdev.h>
13 #include <linux/mm.h>
14 #include <linux/fs.h>
15 #include <linux/pagemap.h>
16 #include <linux/mpage.h>
17 #include <linux/sched.h>
18 #include <linux/cred.h>
19 #include <linux/uio.h>
20 #include <linux/fileattr.h>
21
22 #include "hfsplus_fs.h"
23 #include "hfsplus_raw.h"
24 #include "xattr.h"
25
26 static int hfsplus_read_folio(struct file *file, struct folio *folio)
27 {
28         return block_read_full_folio(folio, hfsplus_get_block);
29 }
30
31 static int hfsplus_writepage(struct page *page, struct writeback_control *wbc)
32 {
33         return block_write_full_page(page, hfsplus_get_block, wbc);
34 }
35
36 static void hfsplus_write_failed(struct address_space *mapping, loff_t to)
37 {
38         struct inode *inode = mapping->host;
39
40         if (to > inode->i_size) {
41                 truncate_pagecache(inode, inode->i_size);
42                 hfsplus_file_truncate(inode);
43         }
44 }
45
46 int hfsplus_write_begin(struct file *file, struct address_space *mapping,
47                 loff_t pos, unsigned len, struct page **pagep, void **fsdata)
48 {
49         int ret;
50
51         *pagep = NULL;
52         ret = cont_write_begin(file, mapping, pos, len, pagep, fsdata,
53                                 hfsplus_get_block,
54                                 &HFSPLUS_I(mapping->host)->phys_size);
55         if (unlikely(ret))
56                 hfsplus_write_failed(mapping, pos + len);
57
58         return ret;
59 }
60
61 static sector_t hfsplus_bmap(struct address_space *mapping, sector_t block)
62 {
63         return generic_block_bmap(mapping, block, hfsplus_get_block);
64 }
65
66 static bool hfsplus_release_folio(struct folio *folio, gfp_t mask)
67 {
68         struct inode *inode = folio->mapping->host;
69         struct super_block *sb = inode->i_sb;
70         struct hfs_btree *tree;
71         struct hfs_bnode *node;
72         u32 nidx;
73         int i;
74         bool res = true;
75
76         switch (inode->i_ino) {
77         case HFSPLUS_EXT_CNID:
78                 tree = HFSPLUS_SB(sb)->ext_tree;
79                 break;
80         case HFSPLUS_CAT_CNID:
81                 tree = HFSPLUS_SB(sb)->cat_tree;
82                 break;
83         case HFSPLUS_ATTR_CNID:
84                 tree = HFSPLUS_SB(sb)->attr_tree;
85                 break;
86         default:
87                 BUG();
88                 return false;
89         }
90         if (!tree)
91                 return false;
92         if (tree->node_size >= PAGE_SIZE) {
93                 nidx = folio->index >>
94                         (tree->node_size_shift - PAGE_SHIFT);
95                 spin_lock(&tree->hash_lock);
96                 node = hfs_bnode_findhash(tree, nidx);
97                 if (!node)
98                         ;
99                 else if (atomic_read(&node->refcnt))
100                         res = false;
101                 if (res && node) {
102                         hfs_bnode_unhash(node);
103                         hfs_bnode_free(node);
104                 }
105                 spin_unlock(&tree->hash_lock);
106         } else {
107                 nidx = folio->index <<
108                         (PAGE_SHIFT - tree->node_size_shift);
109                 i = 1 << (PAGE_SHIFT - tree->node_size_shift);
110                 spin_lock(&tree->hash_lock);
111                 do {
112                         node = hfs_bnode_findhash(tree, nidx++);
113                         if (!node)
114                                 continue;
115                         if (atomic_read(&node->refcnt)) {
116                                 res = false;
117                                 break;
118                         }
119                         hfs_bnode_unhash(node);
120                         hfs_bnode_free(node);
121                 } while (--i && nidx < tree->node_count);
122                 spin_unlock(&tree->hash_lock);
123         }
124         return res ? try_to_free_buffers(folio) : false;
125 }
126
127 static ssize_t hfsplus_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
128 {
129         struct file *file = iocb->ki_filp;
130         struct address_space *mapping = file->f_mapping;
131         struct inode *inode = mapping->host;
132         size_t count = iov_iter_count(iter);
133         ssize_t ret;
134
135         ret = blockdev_direct_IO(iocb, inode, iter, hfsplus_get_block);
136
137         /*
138          * In case of error extending write may have instantiated a few
139          * blocks outside i_size. Trim these off again.
140          */
141         if (unlikely(iov_iter_rw(iter) == WRITE && ret < 0)) {
142                 loff_t isize = i_size_read(inode);
143                 loff_t end = iocb->ki_pos + count;
144
145                 if (end > isize)
146                         hfsplus_write_failed(mapping, end);
147         }
148
149         return ret;
150 }
151
152 static int hfsplus_writepages(struct address_space *mapping,
153                               struct writeback_control *wbc)
154 {
155         return mpage_writepages(mapping, wbc, hfsplus_get_block);
156 }
157
158 const struct address_space_operations hfsplus_btree_aops = {
159         .dirty_folio    = block_dirty_folio,
160         .invalidate_folio = block_invalidate_folio,
161         .read_folio     = hfsplus_read_folio,
162         .writepage      = hfsplus_writepage,
163         .write_begin    = hfsplus_write_begin,
164         .write_end      = generic_write_end,
165         .bmap           = hfsplus_bmap,
166         .release_folio  = hfsplus_release_folio,
167 };
168
169 const struct address_space_operations hfsplus_aops = {
170         .dirty_folio    = block_dirty_folio,
171         .invalidate_folio = block_invalidate_folio,
172         .read_folio     = hfsplus_read_folio,
173         .writepage      = hfsplus_writepage,
174         .write_begin    = hfsplus_write_begin,
175         .write_end      = generic_write_end,
176         .bmap           = hfsplus_bmap,
177         .direct_IO      = hfsplus_direct_IO,
178         .writepages     = hfsplus_writepages,
179 };
180
181 const struct dentry_operations hfsplus_dentry_operations = {
182         .d_hash       = hfsplus_hash_dentry,
183         .d_compare    = hfsplus_compare_dentry,
184 };
185
186 static void hfsplus_get_perms(struct inode *inode,
187                 struct hfsplus_perm *perms, int dir)
188 {
189         struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
190         u16 mode;
191
192         mode = be16_to_cpu(perms->mode);
193
194         i_uid_write(inode, be32_to_cpu(perms->owner));
195         if (!i_uid_read(inode) && !mode)
196                 inode->i_uid = sbi->uid;
197
198         i_gid_write(inode, be32_to_cpu(perms->group));
199         if (!i_gid_read(inode) && !mode)
200                 inode->i_gid = sbi->gid;
201
202         if (dir) {
203                 mode = mode ? (mode & S_IALLUGO) : (S_IRWXUGO & ~(sbi->umask));
204                 mode |= S_IFDIR;
205         } else if (!mode)
206                 mode = S_IFREG | ((S_IRUGO|S_IWUGO) & ~(sbi->umask));
207         inode->i_mode = mode;
208
209         HFSPLUS_I(inode)->userflags = perms->userflags;
210         if (perms->rootflags & HFSPLUS_FLG_IMMUTABLE)
211                 inode->i_flags |= S_IMMUTABLE;
212         else
213                 inode->i_flags &= ~S_IMMUTABLE;
214         if (perms->rootflags & HFSPLUS_FLG_APPEND)
215                 inode->i_flags |= S_APPEND;
216         else
217                 inode->i_flags &= ~S_APPEND;
218 }
219
220 static int hfsplus_file_open(struct inode *inode, struct file *file)
221 {
222         if (HFSPLUS_IS_RSRC(inode))
223                 inode = HFSPLUS_I(inode)->rsrc_inode;
224         if (!(file->f_flags & O_LARGEFILE) && i_size_read(inode) > MAX_NON_LFS)
225                 return -EOVERFLOW;
226         atomic_inc(&HFSPLUS_I(inode)->opencnt);
227         return 0;
228 }
229
230 static int hfsplus_file_release(struct inode *inode, struct file *file)
231 {
232         struct super_block *sb = inode->i_sb;
233
234         if (HFSPLUS_IS_RSRC(inode))
235                 inode = HFSPLUS_I(inode)->rsrc_inode;
236         if (atomic_dec_and_test(&HFSPLUS_I(inode)->opencnt)) {
237                 inode_lock(inode);
238                 hfsplus_file_truncate(inode);
239                 if (inode->i_flags & S_DEAD) {
240                         hfsplus_delete_cat(inode->i_ino,
241                                            HFSPLUS_SB(sb)->hidden_dir, NULL);
242                         hfsplus_delete_inode(inode);
243                 }
244                 inode_unlock(inode);
245         }
246         return 0;
247 }
248
249 static int hfsplus_setattr(struct user_namespace *mnt_userns,
250                            struct dentry *dentry, struct iattr *attr)
251 {
252         struct inode *inode = d_inode(dentry);
253         int error;
254
255         error = setattr_prepare(&init_user_ns, dentry, attr);
256         if (error)
257                 return error;
258
259         if ((attr->ia_valid & ATTR_SIZE) &&
260             attr->ia_size != i_size_read(inode)) {
261                 inode_dio_wait(inode);
262                 if (attr->ia_size > inode->i_size) {
263                         error = generic_cont_expand_simple(inode,
264                                                            attr->ia_size);
265                         if (error)
266                                 return error;
267                 }
268                 truncate_setsize(inode, attr->ia_size);
269                 hfsplus_file_truncate(inode);
270                 inode->i_mtime = inode->i_ctime = current_time(inode);
271         }
272
273         setattr_copy(&init_user_ns, inode, attr);
274         mark_inode_dirty(inode);
275
276         return 0;
277 }
278
279 int hfsplus_getattr(struct user_namespace *mnt_userns, const struct path *path,
280                     struct kstat *stat, u32 request_mask,
281                     unsigned int query_flags)
282 {
283         struct inode *inode = d_inode(path->dentry);
284         struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
285
286         if (request_mask & STATX_BTIME) {
287                 stat->result_mask |= STATX_BTIME;
288                 stat->btime = hfsp_mt2ut(hip->create_date);
289         }
290
291         if (inode->i_flags & S_APPEND)
292                 stat->attributes |= STATX_ATTR_APPEND;
293         if (inode->i_flags & S_IMMUTABLE)
294                 stat->attributes |= STATX_ATTR_IMMUTABLE;
295         if (hip->userflags & HFSPLUS_FLG_NODUMP)
296                 stat->attributes |= STATX_ATTR_NODUMP;
297
298         stat->attributes_mask |= STATX_ATTR_APPEND | STATX_ATTR_IMMUTABLE |
299                                  STATX_ATTR_NODUMP;
300
301         generic_fillattr(&init_user_ns, inode, stat);
302         return 0;
303 }
304
305 int hfsplus_file_fsync(struct file *file, loff_t start, loff_t end,
306                        int datasync)
307 {
308         struct inode *inode = file->f_mapping->host;
309         struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
310         struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
311         int error = 0, error2;
312
313         error = file_write_and_wait_range(file, start, end);
314         if (error)
315                 return error;
316         inode_lock(inode);
317
318         /*
319          * Sync inode metadata into the catalog and extent trees.
320          */
321         sync_inode_metadata(inode, 1);
322
323         /*
324          * And explicitly write out the btrees.
325          */
326         if (test_and_clear_bit(HFSPLUS_I_CAT_DIRTY, &hip->flags))
327                 error = filemap_write_and_wait(sbi->cat_tree->inode->i_mapping);
328
329         if (test_and_clear_bit(HFSPLUS_I_EXT_DIRTY, &hip->flags)) {
330                 error2 =
331                         filemap_write_and_wait(sbi->ext_tree->inode->i_mapping);
332                 if (!error)
333                         error = error2;
334         }
335
336         if (test_and_clear_bit(HFSPLUS_I_ATTR_DIRTY, &hip->flags)) {
337                 if (sbi->attr_tree) {
338                         error2 =
339                                 filemap_write_and_wait(
340                                             sbi->attr_tree->inode->i_mapping);
341                         if (!error)
342                                 error = error2;
343                 } else {
344                         pr_err("sync non-existent attributes tree\n");
345                 }
346         }
347
348         if (test_and_clear_bit(HFSPLUS_I_ALLOC_DIRTY, &hip->flags)) {
349                 error2 = filemap_write_and_wait(sbi->alloc_file->i_mapping);
350                 if (!error)
351                         error = error2;
352         }
353
354         if (!test_bit(HFSPLUS_SB_NOBARRIER, &sbi->flags))
355                 blkdev_issue_flush(inode->i_sb->s_bdev);
356
357         inode_unlock(inode);
358
359         return error;
360 }
361
362 static const struct inode_operations hfsplus_file_inode_operations = {
363         .setattr        = hfsplus_setattr,
364         .getattr        = hfsplus_getattr,
365         .listxattr      = hfsplus_listxattr,
366         .fileattr_get   = hfsplus_fileattr_get,
367         .fileattr_set   = hfsplus_fileattr_set,
368 };
369
370 static const struct file_operations hfsplus_file_operations = {
371         .llseek         = generic_file_llseek,
372         .read_iter      = generic_file_read_iter,
373         .write_iter     = generic_file_write_iter,
374         .mmap           = generic_file_mmap,
375         .splice_read    = generic_file_splice_read,
376         .fsync          = hfsplus_file_fsync,
377         .open           = hfsplus_file_open,
378         .release        = hfsplus_file_release,
379         .unlocked_ioctl = hfsplus_ioctl,
380 };
381
382 struct inode *hfsplus_new_inode(struct super_block *sb, struct inode *dir,
383                                 umode_t mode)
384 {
385         struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
386         struct inode *inode = new_inode(sb);
387         struct hfsplus_inode_info *hip;
388
389         if (!inode)
390                 return NULL;
391
392         inode->i_ino = sbi->next_cnid++;
393         inode_init_owner(&init_user_ns, inode, dir, mode);
394         set_nlink(inode, 1);
395         inode->i_mtime = inode->i_atime = inode->i_ctime = current_time(inode);
396
397         hip = HFSPLUS_I(inode);
398         INIT_LIST_HEAD(&hip->open_dir_list);
399         spin_lock_init(&hip->open_dir_lock);
400         mutex_init(&hip->extents_lock);
401         atomic_set(&hip->opencnt, 0);
402         hip->extent_state = 0;
403         hip->flags = 0;
404         hip->userflags = 0;
405         hip->subfolders = 0;
406         memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec));
407         memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
408         hip->alloc_blocks = 0;
409         hip->first_blocks = 0;
410         hip->cached_start = 0;
411         hip->cached_blocks = 0;
412         hip->phys_size = 0;
413         hip->fs_blocks = 0;
414         hip->rsrc_inode = NULL;
415         if (S_ISDIR(inode->i_mode)) {
416                 inode->i_size = 2;
417                 sbi->folder_count++;
418                 inode->i_op = &hfsplus_dir_inode_operations;
419                 inode->i_fop = &hfsplus_dir_operations;
420         } else if (S_ISREG(inode->i_mode)) {
421                 sbi->file_count++;
422                 inode->i_op = &hfsplus_file_inode_operations;
423                 inode->i_fop = &hfsplus_file_operations;
424                 inode->i_mapping->a_ops = &hfsplus_aops;
425                 hip->clump_blocks = sbi->data_clump_blocks;
426         } else if (S_ISLNK(inode->i_mode)) {
427                 sbi->file_count++;
428                 inode->i_op = &page_symlink_inode_operations;
429                 inode_nohighmem(inode);
430                 inode->i_mapping->a_ops = &hfsplus_aops;
431                 hip->clump_blocks = 1;
432         } else
433                 sbi->file_count++;
434         insert_inode_hash(inode);
435         mark_inode_dirty(inode);
436         hfsplus_mark_mdb_dirty(sb);
437
438         return inode;
439 }
440
441 void hfsplus_delete_inode(struct inode *inode)
442 {
443         struct super_block *sb = inode->i_sb;
444
445         if (S_ISDIR(inode->i_mode)) {
446                 HFSPLUS_SB(sb)->folder_count--;
447                 hfsplus_mark_mdb_dirty(sb);
448                 return;
449         }
450         HFSPLUS_SB(sb)->file_count--;
451         if (S_ISREG(inode->i_mode)) {
452                 if (!inode->i_nlink) {
453                         inode->i_size = 0;
454                         hfsplus_file_truncate(inode);
455                 }
456         } else if (S_ISLNK(inode->i_mode)) {
457                 inode->i_size = 0;
458                 hfsplus_file_truncate(inode);
459         }
460         hfsplus_mark_mdb_dirty(sb);
461 }
462
463 void hfsplus_inode_read_fork(struct inode *inode, struct hfsplus_fork_raw *fork)
464 {
465         struct super_block *sb = inode->i_sb;
466         struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
467         struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
468         u32 count;
469         int i;
470
471         memcpy(&hip->first_extents, &fork->extents, sizeof(hfsplus_extent_rec));
472         for (count = 0, i = 0; i < 8; i++)
473                 count += be32_to_cpu(fork->extents[i].block_count);
474         hip->first_blocks = count;
475         memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
476         hip->cached_start = 0;
477         hip->cached_blocks = 0;
478
479         hip->alloc_blocks = be32_to_cpu(fork->total_blocks);
480         hip->phys_size = inode->i_size = be64_to_cpu(fork->total_size);
481         hip->fs_blocks =
482                 (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
483         inode_set_bytes(inode, hip->fs_blocks << sb->s_blocksize_bits);
484         hip->clump_blocks =
485                 be32_to_cpu(fork->clump_size) >> sbi->alloc_blksz_shift;
486         if (!hip->clump_blocks) {
487                 hip->clump_blocks = HFSPLUS_IS_RSRC(inode) ?
488                         sbi->rsrc_clump_blocks :
489                         sbi->data_clump_blocks;
490         }
491 }
492
493 void hfsplus_inode_write_fork(struct inode *inode,
494                 struct hfsplus_fork_raw *fork)
495 {
496         memcpy(&fork->extents, &HFSPLUS_I(inode)->first_extents,
497                sizeof(hfsplus_extent_rec));
498         fork->total_size = cpu_to_be64(inode->i_size);
499         fork->total_blocks = cpu_to_be32(HFSPLUS_I(inode)->alloc_blocks);
500 }
501
502 int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
503 {
504         hfsplus_cat_entry entry;
505         int res = 0;
506         u16 type;
507
508         type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
509
510         HFSPLUS_I(inode)->linkid = 0;
511         if (type == HFSPLUS_FOLDER) {
512                 struct hfsplus_cat_folder *folder = &entry.folder;
513
514                 WARN_ON(fd->entrylength < sizeof(struct hfsplus_cat_folder));
515                 hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
516                                         sizeof(struct hfsplus_cat_folder));
517                 hfsplus_get_perms(inode, &folder->permissions, 1);
518                 set_nlink(inode, 1);
519                 inode->i_size = 2 + be32_to_cpu(folder->valence);
520                 inode->i_atime = hfsp_mt2ut(folder->access_date);
521                 inode->i_mtime = hfsp_mt2ut(folder->content_mod_date);
522                 inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
523                 HFSPLUS_I(inode)->create_date = folder->create_date;
524                 HFSPLUS_I(inode)->fs_blocks = 0;
525                 if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
526                         HFSPLUS_I(inode)->subfolders =
527                                 be32_to_cpu(folder->subfolders);
528                 }
529                 inode->i_op = &hfsplus_dir_inode_operations;
530                 inode->i_fop = &hfsplus_dir_operations;
531         } else if (type == HFSPLUS_FILE) {
532                 struct hfsplus_cat_file *file = &entry.file;
533
534                 WARN_ON(fd->entrylength < sizeof(struct hfsplus_cat_file));
535                 hfs_bnode_read(fd->bnode, &entry, fd->entryoffset,
536                                         sizeof(struct hfsplus_cat_file));
537
538                 hfsplus_inode_read_fork(inode, HFSPLUS_IS_RSRC(inode) ?
539                                         &file->rsrc_fork : &file->data_fork);
540                 hfsplus_get_perms(inode, &file->permissions, 0);
541                 set_nlink(inode, 1);
542                 if (S_ISREG(inode->i_mode)) {
543                         if (file->permissions.dev)
544                                 set_nlink(inode,
545                                           be32_to_cpu(file->permissions.dev));
546                         inode->i_op = &hfsplus_file_inode_operations;
547                         inode->i_fop = &hfsplus_file_operations;
548                         inode->i_mapping->a_ops = &hfsplus_aops;
549                 } else if (S_ISLNK(inode->i_mode)) {
550                         inode->i_op = &page_symlink_inode_operations;
551                         inode_nohighmem(inode);
552                         inode->i_mapping->a_ops = &hfsplus_aops;
553                 } else {
554                         init_special_inode(inode, inode->i_mode,
555                                            be32_to_cpu(file->permissions.dev));
556                 }
557                 inode->i_atime = hfsp_mt2ut(file->access_date);
558                 inode->i_mtime = hfsp_mt2ut(file->content_mod_date);
559                 inode->i_ctime = hfsp_mt2ut(file->attribute_mod_date);
560                 HFSPLUS_I(inode)->create_date = file->create_date;
561         } else {
562                 pr_err("bad catalog entry used to create inode\n");
563                 res = -EIO;
564         }
565         return res;
566 }
567
568 int hfsplus_cat_write_inode(struct inode *inode)
569 {
570         struct inode *main_inode = inode;
571         struct hfs_find_data fd;
572         hfsplus_cat_entry entry;
573
574         if (HFSPLUS_IS_RSRC(inode))
575                 main_inode = HFSPLUS_I(inode)->rsrc_inode;
576
577         if (!main_inode->i_nlink)
578                 return 0;
579
580         if (hfs_find_init(HFSPLUS_SB(main_inode->i_sb)->cat_tree, &fd))
581                 /* panic? */
582                 return -EIO;
583
584         if (hfsplus_find_cat(main_inode->i_sb, main_inode->i_ino, &fd))
585                 /* panic? */
586                 goto out;
587
588         if (S_ISDIR(main_inode->i_mode)) {
589                 struct hfsplus_cat_folder *folder = &entry.folder;
590
591                 WARN_ON(fd.entrylength < sizeof(struct hfsplus_cat_folder));
592                 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
593                                         sizeof(struct hfsplus_cat_folder));
594                 /* simple node checks? */
595                 hfsplus_cat_set_perms(inode, &folder->permissions);
596                 folder->access_date = hfsp_ut2mt(inode->i_atime);
597                 folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
598                 folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
599                 folder->valence = cpu_to_be32(inode->i_size - 2);
600                 if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
601                         folder->subfolders =
602                                 cpu_to_be32(HFSPLUS_I(inode)->subfolders);
603                 }
604                 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
605                                          sizeof(struct hfsplus_cat_folder));
606         } else if (HFSPLUS_IS_RSRC(inode)) {
607                 struct hfsplus_cat_file *file = &entry.file;
608                 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
609                                sizeof(struct hfsplus_cat_file));
610                 hfsplus_inode_write_fork(inode, &file->rsrc_fork);
611                 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
612                                 sizeof(struct hfsplus_cat_file));
613         } else {
614                 struct hfsplus_cat_file *file = &entry.file;
615
616                 WARN_ON(fd.entrylength < sizeof(struct hfsplus_cat_file));
617                 hfs_bnode_read(fd.bnode, &entry, fd.entryoffset,
618                                         sizeof(struct hfsplus_cat_file));
619                 hfsplus_inode_write_fork(inode, &file->data_fork);
620                 hfsplus_cat_set_perms(inode, &file->permissions);
621                 if (HFSPLUS_FLG_IMMUTABLE &
622                                 (file->permissions.rootflags |
623                                         file->permissions.userflags))
624                         file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED);
625                 else
626                         file->flags &= cpu_to_be16(~HFSPLUS_FILE_LOCKED);
627                 file->access_date = hfsp_ut2mt(inode->i_atime);
628                 file->content_mod_date = hfsp_ut2mt(inode->i_mtime);
629                 file->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
630                 hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
631                                          sizeof(struct hfsplus_cat_file));
632         }
633
634         set_bit(HFSPLUS_I_CAT_DIRTY, &HFSPLUS_I(inode)->flags);
635 out:
636         hfs_find_exit(&fd);
637         return 0;
638 }
639
640 int hfsplus_fileattr_get(struct dentry *dentry, struct fileattr *fa)
641 {
642         struct inode *inode = d_inode(dentry);
643         struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
644         unsigned int flags = 0;
645
646         if (inode->i_flags & S_IMMUTABLE)
647                 flags |= FS_IMMUTABLE_FL;
648         if (inode->i_flags & S_APPEND)
649                 flags |= FS_APPEND_FL;
650         if (hip->userflags & HFSPLUS_FLG_NODUMP)
651                 flags |= FS_NODUMP_FL;
652
653         fileattr_fill_flags(fa, flags);
654
655         return 0;
656 }
657
658 int hfsplus_fileattr_set(struct user_namespace *mnt_userns,
659                          struct dentry *dentry, struct fileattr *fa)
660 {
661         struct inode *inode = d_inode(dentry);
662         struct hfsplus_inode_info *hip = HFSPLUS_I(inode);
663         unsigned int new_fl = 0;
664
665         if (fileattr_has_fsx(fa))
666                 return -EOPNOTSUPP;
667
668         /* don't silently ignore unsupported ext2 flags */
669         if (fa->flags & ~(FS_IMMUTABLE_FL|FS_APPEND_FL|FS_NODUMP_FL))
670                 return -EOPNOTSUPP;
671
672         if (fa->flags & FS_IMMUTABLE_FL)
673                 new_fl |= S_IMMUTABLE;
674
675         if (fa->flags & FS_APPEND_FL)
676                 new_fl |= S_APPEND;
677
678         inode_set_flags(inode, new_fl, S_IMMUTABLE | S_APPEND);
679
680         if (fa->flags & FS_NODUMP_FL)
681                 hip->userflags |= HFSPLUS_FLG_NODUMP;
682         else
683                 hip->userflags &= ~HFSPLUS_FLG_NODUMP;
684
685         inode->i_ctime = current_time(inode);
686         mark_inode_dirty(inode);
687
688         return 0;
689 }