btrfs: simplify the btrfs_csum_one_bio calling convention
[linux-2.6-microblaze.git] / fs / btrfs / compression.c
index 5122ca7..7999c28 100644 (file)
@@ -164,52 +164,15 @@ static void finish_compressed_bio_read(struct compressed_bio *cb)
        kfree(cb);
 }
 
-/*
- * Verify the checksums and kick off repair if needed on the uncompressed data
- * before decompressing it into the original bio and freeing the uncompressed
- * pages.
- */
 static void end_compressed_bio_read(struct btrfs_bio *bbio)
 {
        struct compressed_bio *cb = bbio->private;
-       struct inode *inode = cb->inode;
-       struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
-       struct btrfs_inode *bi = BTRFS_I(inode);
-       bool csum = !(bi->flags & BTRFS_INODE_NODATASUM) &&
-                   !test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state);
-       blk_status_t status = bbio->bio.bi_status;
-       struct bvec_iter iter;
-       struct bio_vec bv;
-       u32 offset;
-
-       btrfs_bio_for_each_sector(fs_info, bv, bbio, iter, offset) {
-               u64 start = bbio->file_offset + offset;
-
-               if (!status &&
-                   (!csum || !btrfs_check_data_csum(bi, bbio, offset,
-                                                    bv.bv_page, bv.bv_offset))) {
-                       btrfs_clean_io_failure(bi, start, bv.bv_page,
-                                              bv.bv_offset);
-               } else {
-                       int ret;
-
-                       refcount_inc(&cb->pending_ios);
-                       ret = btrfs_repair_one_sector(BTRFS_I(inode), bbio, offset,
-                                                     bv.bv_page, bv.bv_offset,
-                                                     true);
-                       if (ret) {
-                               refcount_dec(&cb->pending_ios);
-                               status = errno_to_blk_status(ret);
-                       }
-               }
-       }
 
-       if (status)
-               cb->status = status;
+       if (bbio->bio.bi_status)
+               cb->status = bbio->bio.bi_status;
 
        if (refcount_dec_and_test(&cb->pending_ios))
                finish_compressed_bio_read(cb);
-       btrfs_bio_free_csum(bbio);
        bio_put(&bbio->bio);
 }
 
@@ -344,7 +307,8 @@ static struct bio *alloc_compressed_bio(struct compressed_bio *cb, u64 disk_byte
        struct bio *bio;
        int ret;
 
-       bio = btrfs_bio_alloc(BIO_MAX_VECS, opf, endio_func, cb);
+       bio = btrfs_bio_alloc(BIO_MAX_VECS, opf, BTRFS_I(cb->inode), endio_func,
+                             cb);
        bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
 
        em = btrfs_get_chunk_map(fs_info, disk_bytenr, fs_info->sectorsize);
@@ -393,7 +357,8 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
        blk_status_t ret = BLK_STS_OK;
        int skip_sum = inode->flags & BTRFS_INODE_NODATASUM;
        const bool use_append = btrfs_use_zone_append(inode, disk_start);
-       const enum req_op bio_op = use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE;
+       const enum req_op bio_op = REQ_BTRFS_ONE_ORDERED |
+                                  (use_append ? REQ_OP_ZONE_APPEND : REQ_OP_WRITE);
 
        ASSERT(IS_ALIGNED(start, fs_info->sectorsize) &&
               IS_ALIGNED(len, fs_info->sectorsize));
@@ -431,6 +396,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
                                ret = errno_to_blk_status(PTR_ERR(bio));
                                break;
                        }
+                       btrfs_bio(bio)->file_offset = start;
                        if (blkcg_css)
                                bio->bi_opf |= REQ_CGROUP_PUNT;
                }
@@ -472,7 +438,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
 
                if (submit) {
                        if (!skip_sum) {
-                               ret = btrfs_csum_one_bio(inode, bio, start, true);
+                               ret = btrfs_csum_one_bio(btrfs_bio(bio));
                                if (ret) {
                                        btrfs_bio_end_io(btrfs_bio(bio), ret);
                                        break;
@@ -788,24 +754,14 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
                        submit = true;
 
                if (submit) {
-                       /* Save the original iter for read repair */
-                       if (bio_op(comp_bio) == REQ_OP_READ)
-                               btrfs_bio(comp_bio)->iter = comp_bio->bi_iter;
-
                        /*
                         * Save the initial offset of this chunk, as there
                         * is no direct correlation between compressed pages and
                         * the original file offset.  The field is only used for
-                        * priting error messages.
+                        * printing error messages.
                         */
                        btrfs_bio(comp_bio)->file_offset = file_offset;
 
-                       ret = btrfs_lookup_bio_sums(inode, comp_bio, NULL);
-                       if (ret) {
-                               btrfs_bio_end_io(btrfs_bio(comp_bio), ret);
-                               break;
-                       }
-
                        ASSERT(comp_bio->bi_iter.bi_size);
                        btrfs_submit_bio(fs_info, comp_bio, mirror_num);
                        comp_bio = NULL;
@@ -1609,7 +1565,7 @@ static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
        index_end = end >> PAGE_SHIFT;
 
        /* Don't miss unaligned end */
-       if (!IS_ALIGNED(end, PAGE_SIZE))
+       if (!PAGE_ALIGNED(end))
                index_end++;
 
        curr_sample_pos = 0;
@@ -1642,7 +1598,7 @@ static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
  *
  * For now is's a naive and optimistic 'return true', we'll extend the logic to
  * quickly (compared to direct compression) detect data characteristics
- * (compressible/uncompressible) to avoid wasting CPU time on uncompressible
+ * (compressible/incompressible) to avoid wasting CPU time on incompressible
  * data.
  *
  * The following types of analysis can be performed: