Merge tag 'mm-nonmm-stable-2024-03-14-09-36' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-microblaze.git] / fs / nilfs2 / sufile.c
index 0a81194..6748218 100644 (file)
@@ -48,7 +48,7 @@ nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum)
 {
        __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset;
 
-       do_div(t, nilfs_sufile_segment_usages_per_block(sufile));
+       t = div64_ul(t, nilfs_sufile_segment_usages_per_block(sufile));
        return (unsigned long)t;
 }
 
@@ -107,11 +107,11 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
        struct nilfs_sufile_header *header;
        void *kaddr;
 
-       kaddr = kmap_atomic(header_bh->b_page);
+       kaddr = kmap_local_page(header_bh->b_page);
        header = kaddr + bh_offset(header_bh);
        le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
        le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
-       kunmap_atomic(kaddr);
+       kunmap_local(kaddr);
 
        mark_buffer_dirty(header_bh);
 }
@@ -315,10 +315,10 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
        ret = nilfs_sufile_get_header_block(sufile, &header_bh);
        if (ret < 0)
                goto out_sem;
-       kaddr = kmap_atomic(header_bh->b_page);
+       kaddr = kmap_local_page(header_bh->b_page);
        header = kaddr + bh_offset(header_bh);
        last_alloc = le64_to_cpu(header->sh_last_alloc);
-       kunmap_atomic(kaddr);
+       kunmap_local(kaddr);
 
        nsegments = nilfs_sufile_get_nsegments(sufile);
        maxsegnum = sui->allocmax;
@@ -352,7 +352,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
                                                           &su_bh);
                if (ret < 0)
                        goto out_header;
-               kaddr = kmap_atomic(su_bh->b_page);
+               kaddr = kmap_local_page(su_bh->b_page);
                su = nilfs_sufile_block_get_segment_usage(
                        sufile, segnum, su_bh, kaddr);
 
@@ -363,14 +363,14 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
                                continue;
                        /* found a clean segment */
                        nilfs_segment_usage_set_dirty(su);
-                       kunmap_atomic(kaddr);
+                       kunmap_local(kaddr);
 
-                       kaddr = kmap_atomic(header_bh->b_page);
+                       kaddr = kmap_local_page(header_bh->b_page);
                        header = kaddr + bh_offset(header_bh);
                        le64_add_cpu(&header->sh_ncleansegs, -1);
                        le64_add_cpu(&header->sh_ndirtysegs, 1);
                        header->sh_last_alloc = cpu_to_le64(segnum);
-                       kunmap_atomic(kaddr);
+                       kunmap_local(kaddr);
 
                        sui->ncleansegs--;
                        mark_buffer_dirty(header_bh);
@@ -384,7 +384,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
                        goto out_header;
                }
 
-               kunmap_atomic(kaddr);
+               kunmap_local(kaddr);
                brelse(su_bh);
        }
 
@@ -406,16 +406,16 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
        struct nilfs_segment_usage *su;
        void *kaddr;
 
-       kaddr = kmap_atomic(su_bh->b_page);
+       kaddr = kmap_local_page(su_bh->b_page);
        su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
        if (unlikely(!nilfs_segment_usage_clean(su))) {
                nilfs_warn(sufile->i_sb, "%s: segment %llu must be clean",
                           __func__, (unsigned long long)segnum);
-               kunmap_atomic(kaddr);
+               kunmap_local(kaddr);
                return;
        }
        nilfs_segment_usage_set_dirty(su);
-       kunmap_atomic(kaddr);
+       kunmap_local(kaddr);
 
        nilfs_sufile_mod_counter(header_bh, -1, 1);
        NILFS_SUI(sufile)->ncleansegs--;
@@ -432,11 +432,11 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
        void *kaddr;
        int clean, dirty;
 
-       kaddr = kmap_atomic(su_bh->b_page);
+       kaddr = kmap_local_page(su_bh->b_page);
        su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
        if (su->su_flags == cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)) &&
            su->su_nblocks == cpu_to_le32(0)) {
-               kunmap_atomic(kaddr);
+               kunmap_local(kaddr);
                return;
        }
        clean = nilfs_segment_usage_clean(su);
@@ -446,7 +446,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
        su->su_lastmod = cpu_to_le64(0);
        su->su_nblocks = cpu_to_le32(0);
        su->su_flags = cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY));
-       kunmap_atomic(kaddr);
+       kunmap_local(kaddr);
 
        nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
        NILFS_SUI(sufile)->ncleansegs -= clean;
@@ -463,12 +463,12 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
        void *kaddr;
        int sudirty;
 
-       kaddr = kmap_atomic(su_bh->b_page);
+       kaddr = kmap_local_page(su_bh->b_page);
        su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
        if (nilfs_segment_usage_clean(su)) {
                nilfs_warn(sufile->i_sb, "%s: segment %llu is already clean",
                           __func__, (unsigned long long)segnum);
-               kunmap_atomic(kaddr);
+               kunmap_local(kaddr);
                return;
        }
        if (unlikely(nilfs_segment_usage_error(su)))
@@ -481,7 +481,7 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
                           (unsigned long long)segnum);
 
        nilfs_segment_usage_set_clean(su);
-       kunmap_atomic(kaddr);
+       kunmap_local(kaddr);
        mark_buffer_dirty(su_bh);
 
        nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
@@ -509,12 +509,12 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
        if (ret)
                goto out_sem;
 
-       kaddr = kmap_atomic(bh->b_page);
+       kaddr = kmap_local_page(bh->b_page);
        su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
        if (unlikely(nilfs_segment_usage_error(su))) {
                struct the_nilfs *nilfs = sufile->i_sb->s_fs_info;
 
-               kunmap_atomic(kaddr);
+               kunmap_local(kaddr);
                brelse(bh);
                if (nilfs_segment_is_active(nilfs, segnum)) {
                        nilfs_error(sufile->i_sb,
@@ -532,7 +532,7 @@ int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum)
                ret = -EIO;
        } else {
                nilfs_segment_usage_set_dirty(su);
-               kunmap_atomic(kaddr);
+               kunmap_local(kaddr);
                mark_buffer_dirty(bh);
                nilfs_mdt_mark_dirty(sufile);
                brelse(bh);
@@ -562,7 +562,7 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
        if (ret < 0)
                goto out_sem;
 
-       kaddr = kmap_atomic(bh->b_page);
+       kaddr = kmap_local_page(bh->b_page);
        su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
        if (modtime) {
                /*
@@ -573,7 +573,7 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
                su->su_lastmod = cpu_to_le64(modtime);
        }
        su->su_nblocks = cpu_to_le32(nblocks);
-       kunmap_atomic(kaddr);
+       kunmap_local(kaddr);
 
        mark_buffer_dirty(bh);
        nilfs_mdt_mark_dirty(sufile);
@@ -614,7 +614,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
        if (ret < 0)
                goto out_sem;
 
-       kaddr = kmap_atomic(header_bh->b_page);
+       kaddr = kmap_local_page(header_bh->b_page);
        header = kaddr + bh_offset(header_bh);
        sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
        sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
@@ -624,7 +624,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
        spin_lock(&nilfs->ns_last_segment_lock);
        sustat->ss_prot_seq = nilfs->ns_prot_seq;
        spin_unlock(&nilfs->ns_last_segment_lock);
-       kunmap_atomic(kaddr);
+       kunmap_local(kaddr);
        brelse(header_bh);
 
  out_sem:
@@ -640,15 +640,15 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
        void *kaddr;
        int suclean;
 
-       kaddr = kmap_atomic(su_bh->b_page);
+       kaddr = kmap_local_page(su_bh->b_page);
        su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
        if (nilfs_segment_usage_error(su)) {
-               kunmap_atomic(kaddr);
+               kunmap_local(kaddr);
                return;
        }
        suclean = nilfs_segment_usage_clean(su);
        nilfs_segment_usage_set_error(su);
-       kunmap_atomic(kaddr);
+       kunmap_local(kaddr);
 
        if (suclean) {
                nilfs_sufile_mod_counter(header_bh, -1, 0);
@@ -717,7 +717,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
                        /* hole */
                        continue;
                }
-               kaddr = kmap_atomic(su_bh->b_page);
+               kaddr = kmap_local_page(su_bh->b_page);
                su = nilfs_sufile_block_get_segment_usage(
                        sufile, segnum, su_bh, kaddr);
                su2 = su;
@@ -726,7 +726,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
                             ~BIT(NILFS_SEGMENT_USAGE_ERROR)) ||
                            nilfs_segment_is_active(nilfs, segnum + j)) {
                                ret = -EBUSY;
-                               kunmap_atomic(kaddr);
+                               kunmap_local(kaddr);
                                brelse(su_bh);
                                goto out_header;
                        }
@@ -738,7 +738,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
                                nc++;
                        }
                }
-               kunmap_atomic(kaddr);
+               kunmap_local(kaddr);
                if (nc > 0) {
                        mark_buffer_dirty(su_bh);
                        ncleaned += nc;
@@ -823,10 +823,10 @@ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
                sui->allocmin = 0;
        }
 
-       kaddr = kmap_atomic(header_bh->b_page);
+       kaddr = kmap_local_page(header_bh->b_page);
        header = kaddr + bh_offset(header_bh);
        header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
-       kunmap_atomic(kaddr);
+       kunmap_local(kaddr);
 
        mark_buffer_dirty(header_bh);
        nilfs_mdt_mark_dirty(sufile);
@@ -891,7 +891,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
                        continue;
                }
 
-               kaddr = kmap_atomic(su_bh->b_page);
+               kaddr = kmap_local_page(su_bh->b_page);
                su = nilfs_sufile_block_get_segment_usage(
                        sufile, segnum, su_bh, kaddr);
                for (j = 0; j < n;
@@ -904,7 +904,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
                                si->sui_flags |=
                                        BIT(NILFS_SEGMENT_USAGE_ACTIVE);
                }
-               kunmap_atomic(kaddr);
+               kunmap_local(kaddr);
                brelse(su_bh);
        }
        ret = nsegs;
@@ -973,7 +973,7 @@ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
                goto out_header;
 
        for (;;) {
-               kaddr = kmap_atomic(bh->b_page);
+               kaddr = kmap_local_page(bh->b_page);
                su = nilfs_sufile_block_get_segment_usage(
                        sufile, sup->sup_segnum, bh, kaddr);
 
@@ -1010,7 +1010,7 @@ ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf,
                        su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags);
                }
 
-               kunmap_atomic(kaddr);
+               kunmap_local(kaddr);
 
                sup = (void *)sup + supsz;
                if (sup >= supend)
@@ -1115,7 +1115,7 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
                        continue;
                }
 
-               kaddr = kmap_atomic(su_bh->b_page);
+               kaddr = kmap_local_page(su_bh->b_page);
                su = nilfs_sufile_block_get_segment_usage(sufile, segnum,
                                su_bh, kaddr);
                for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) {
@@ -1145,7 +1145,7 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
                        }
 
                        if (nblocks >= minlen) {
-                               kunmap_atomic(kaddr);
+                               kunmap_local(kaddr);
 
                                ret = blkdev_issue_discard(nilfs->ns_bdev,
                                                start * sects_per_block,
@@ -1157,7 +1157,7 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
                                }
 
                                ndiscarded += nblocks;
-                               kaddr = kmap_atomic(su_bh->b_page);
+                               kaddr = kmap_local_page(su_bh->b_page);
                                su = nilfs_sufile_block_get_segment_usage(
                                        sufile, segnum, su_bh, kaddr);
                        }
@@ -1166,7 +1166,7 @@ int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range)
                        start = seg_start;
                        nblocks = seg_end - seg_start + 1;
                }
-               kunmap_atomic(kaddr);
+               kunmap_local(kaddr);
                put_bh(su_bh);
        }
 
@@ -1246,10 +1246,10 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize,
                goto failed;
 
        sui = NILFS_SUI(sufile);
-       kaddr = kmap_atomic(header_bh->b_page);
+       kaddr = kmap_local_page(header_bh->b_page);
        header = kaddr + bh_offset(header_bh);
        sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
-       kunmap_atomic(kaddr);
+       kunmap_local(kaddr);
        brelse(header_bh);
 
        sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;