return err;
}
- /* Normal + sparse files. */
- return mpage_read_folio(folio, ntfs_get_block);
- iomap_read_folio(&ntfs_iomap_ops, &ctx);
++ iomap_read_folio(&ntfs_iomap_ops, &ctx, NULL);
+ return 0;
}
static void ntfs_readahead(struct readahead_control *rac)
return;
}
- valid = ni->i_valid;
- pos = readahead_pos(rac);
- iomap_readahead(&ntfs_iomap_ops, &ctx);
++ iomap_readahead(&ntfs_iomap_ops, &ctx, NULL);
+ }
- if (valid < i_size_read(inode) && pos <= valid &&
- valid < pos + readahead_length(rac)) {
- /* Range cross 'valid'. Read it page by page. */
- return;
+ int ntfs_set_size(struct inode *inode, u64 new_size)
+ {
+ struct super_block *sb = inode->i_sb;
+ struct ntfs_sb_info *sbi = sb->s_fs_info;
+ struct ntfs_inode *ni = ntfs_i(inode);
+ int err;
+
+ /* Check for maximum file size. */
+ if (is_sparsed(ni) || is_compressed(ni)) {
+ if (new_size > sbi->maxbytes_sparse) {
+ return -EFBIG;
+ }
+ } else if (new_size > sbi->maxbytes) {
+ return -EFBIG;
}
- mpage_readahead(rac, ntfs_get_block);
- }
+ ni_lock(ni);
+ down_write(&ni->file.run_lock);
- static int ntfs_get_block_direct_IO_R(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
- {
- return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
- bh_result, create, GET_BLOCK_DIRECT_IO_R);
- }
+ err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
+ &ni->i_valid, true);
- static int ntfs_get_block_direct_IO_W(struct inode *inode, sector_t iblock,
- struct buffer_head *bh_result, int create)
- {
- return ntfs_get_block_vbo(inode, (u64)iblock << inode->i_blkbits,
- bh_result, create, GET_BLOCK_DIRECT_IO_W);
+ if (!err) {
+ i_size_write(inode, new_size);
+ mark_inode_dirty(inode);
+ }
+
+ up_write(&ni->file.run_lock);
+ ni_unlock(ni);
+
+ return err;
}
- static ssize_t ntfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
+ /*
+ * Special value to detect ntfs_writeback_range call
+ */
+ #define WB_NO_DA (struct iomap *)1
+ /*
+ * Function to get mapping vbo -> lbo.
+ * used with:
+ * - iomap_zero_range
+ * - iomap_truncate_page
+ * - iomap_dio_rw
+ * - iomap_file_buffered_write
+ * - iomap_bmap
+ * - iomap_fiemap
+ * - iomap_bio_read_folio
+ * - iomap_bio_readahead
+ */
+ static int ntfs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
+ unsigned int flags, struct iomap *iomap,
+ struct iomap *srcmap)
{
- struct file *file = iocb->ki_filp;
- struct address_space *mapping = file->f_mapping;
- struct inode *inode = mapping->host;
struct ntfs_inode *ni = ntfs_i(inode);
- loff_t vbo = iocb->ki_pos;
- loff_t end;
- int wr = iov_iter_rw(iter) & WRITE;
- size_t iter_count = iov_iter_count(iter);
- loff_t valid;
- ssize_t ret;
+ struct ntfs_sb_info *sbi = ni->mi.sbi;
+ u8 cluster_bits = sbi->cluster_bits;
+ CLST vcn = offset >> cluster_bits;
+ u32 off = offset & sbi->cluster_mask;
+ bool rw = flags & IOMAP_WRITE;
+ loff_t endbyte = offset + length;
+ void *res = NULL;
+ int err;
+ CLST lcn, clen, clen_max = 1;
+ bool new_clst = false;
+ bool no_da;
+ bool zero = false;
+ if (unlikely(ntfs3_forced_shutdown(sbi->sb)))
+ return -EIO;
- if (is_resident(ni)) {
- /* Switch to buffered write. */
- ret = 0;
- goto out;
+ if (flags & IOMAP_REPORT) {
+ if (offset > ntfs_get_maxbytes(ni)) {
+ /* called from fiemap/bmap. */
+ return -EINVAL;
+ }
+
+ if (offset >= inode->i_size) {
+ /* special code for report. */
+ return -ENOENT;
+ }
}
- if (is_compressed(ni)) {
- ret = 0;
- goto out;
+
+ if (IOMAP_ZERO == flags && (endbyte & sbi->cluster_mask)) {
+ rw = true;
+ } else if (rw) {
+ clen_max = bytes_to_cluster(sbi, endbyte) - vcn;
}
- ret = blockdev_direct_IO(iocb, inode, iter,
- wr ? ntfs_get_block_direct_IO_W :
- ntfs_get_block_direct_IO_R);
+ /*
+ * Force to allocate clusters if directIO(write) or writeback_range.
+ * NOTE: attr_data_get_block allocates clusters only for sparse file.
+ * Normal file allocates clusters in attr_set_size.
+ */
+ no_da = flags == (IOMAP_DIRECT | IOMAP_WRITE) || srcmap == WB_NO_DA;
- if (ret > 0)
- end = vbo + ret;
- else if (wr && ret == -EIOCBQUEUED)
- end = vbo + iter_count;
- else
- goto out;
+ err = attr_data_get_block(ni, vcn, clen_max, &lcn, &clen,
+ rw ? &new_clst : NULL, zero, &res, no_da);
- valid = ni->i_valid;
- if (wr) {
- if (end > valid && !S_ISBLK(inode->i_mode)) {
- ni->i_valid = end;
- mark_inode_dirty(inode);
+ if (err) {
+ return err;
+ }
+
+ if (lcn == EOF_LCN) {
+ /* request out of file. */
+ if (flags & IOMAP_REPORT) {
+ /* special code for report. */
+ return -ENOENT;
+ }
+
+ if (rw) {
+ /* should never be here. */
+ return -EINVAL;
}
- } else if (vbo < valid && valid < end) {
- /* Fix page. */
- iov_iter_revert(iter, end - valid);
- iov_iter_zero(end - valid, iter);
+ lcn = SPARSE_LCN;
}
- out:
- return ret;
+ iomap->flags = new_clst ? IOMAP_F_NEW : 0;
+
+ if (lcn == RESIDENT_LCN) {
+ if (offset >= clen) {
+ kfree(res);
+ if (flags & IOMAP_REPORT) {
+ /* special code for report. */
+ return -ENOENT;
+ }
+ return -EFAULT;
+ }
+
+ iomap->private = iomap->inline_data = res;
+ iomap->type = IOMAP_INLINE;
+ iomap->offset = 0;
+ iomap->length = clen; /* resident size in bytes. */
+ return 0;
+ }
+
+ if (!clen) {
+ /* broken file? */
+ return -EINVAL;
+ }
+
+ iomap->bdev = inode->i_sb->s_bdev;
+ iomap->offset = offset;
+ iomap->length = ((loff_t)clen << cluster_bits) - off;
+
+ if (lcn == COMPRESSED_LCN) {
+ /* should never be here. */
+ return -EOPNOTSUPP;
+ }
+
+ if (lcn == DELALLOC_LCN) {
+ iomap->type = IOMAP_DELALLOC;
+ iomap->addr = IOMAP_NULL_ADDR;
+ } else {
+
+ /* Translate clusters into bytes. */
+ iomap->addr = ((loff_t)lcn << cluster_bits) + off;
+ if (length && iomap->length > length)
+ iomap->length = length;
+ else
+ endbyte = offset + iomap->length;
+
+ if (lcn == SPARSE_LCN) {
+ iomap->addr = IOMAP_NULL_ADDR;
+ iomap->type = IOMAP_HOLE;
+ // if (IOMAP_ZERO == flags && !off) {
+ // iomap->length = (endbyte - offset) &
+ // sbi->cluster_mask_inv;
+ // }
+ } else if (endbyte <= ni->i_valid) {
+ iomap->type = IOMAP_MAPPED;
+ } else if (offset < ni->i_valid) {
+ iomap->type = IOMAP_MAPPED;
+ if (flags & IOMAP_REPORT)
+ iomap->length = ni->i_valid - offset;
+ } else if (rw || (flags & IOMAP_ZERO)) {
+ iomap->type = IOMAP_MAPPED;
+ } else {
+ iomap->type = IOMAP_UNWRITTEN;
+ }
+ }
+
+ if ((flags & IOMAP_ZERO) &&
+ (iomap->type == IOMAP_MAPPED || iomap->type == IOMAP_DELALLOC)) {
+ /* Avoid too large requests. */
+ u32 tail;
+ u32 off_a = offset & (PAGE_SIZE - 1);
+ if (off_a)
+ tail = PAGE_SIZE - off_a;
+ else
+ tail = PAGE_SIZE;
+
+ if (iomap->length > tail)
+ iomap->length = tail;
+ }
+
+ return 0;
}
- int ntfs_set_size(struct inode *inode, u64 new_size)
+ static int ntfs_iomap_end(struct inode *inode, loff_t pos, loff_t length,
+ ssize_t written, unsigned int flags,
+ struct iomap *iomap)
{
- struct super_block *sb = inode->i_sb;
- struct ntfs_sb_info *sbi = sb->s_fs_info;
+ int err = 0;
struct ntfs_inode *ni = ntfs_i(inode);
- int err;
+ loff_t endbyte = pos + written;
- /* Check for maximum file size. */
- if (is_sparsed(ni) || is_compressed(ni)) {
- if (new_size > sbi->maxbytes_sparse) {
- err = -EFBIG;
- goto out;
- }
- } else if (new_size > sbi->maxbytes) {
- err = -EFBIG;
- goto out;
- }
+ if ((flags & IOMAP_WRITE) || (flags & IOMAP_ZERO)) {
+ if (iomap->type == IOMAP_INLINE) {
+ u32 data_size;
+ struct ATTRIB *attr;
+ struct mft_inode *mi;
- ni_lock(ni);
- down_write(&ni->file.run_lock);
+ attr = ni_find_attr(ni, NULL, NULL, ATTR_DATA, NULL, 0,
+ NULL, &mi);
+ if (!attr || attr->non_res) {
+ err = -EINVAL;
+ goto out;
+ }
- err = attr_set_size(ni, ATTR_DATA, NULL, 0, &ni->file.run, new_size,
- &ni->i_valid, true, NULL);
+ data_size = le32_to_cpu(attr->res.data_size);
+ if (!(pos < data_size && endbyte <= data_size)) {
+ err = -EINVAL;
+ goto out;
+ }
- up_write(&ni->file.run_lock);
- ni_unlock(ni);
+ /* Update resident data. */
+ memcpy(resident_data(attr) + pos,
+ iomap_inline_data(iomap, pos), written);
+ mi->dirty = true;
+ ni->i_valid = data_size;
+ } else if (ni->i_valid < endbyte) {
+ ni->i_valid = endbyte;
+ mark_inode_dirty(inode);
+ }
+ }
- mark_inode_dirty(inode);
+ if ((flags & IOMAP_ZERO) &&
+ (iomap->type == IOMAP_MAPPED || iomap->type == IOMAP_DELALLOC)) {
+ /* Pair for code in ntfs_iomap_begin. */
+ balance_dirty_pages_ratelimited(inode->i_mapping);
+ cond_resched();
+ }
out:
+ if (iomap->type == IOMAP_INLINE) {
+ kfree(iomap->private);
+ iomap->private = NULL;
+ }
+
return err;
}