int page_errors;                /* errno from get_user_pages() */
        int is_async;                   /* is IO async ? */
        bool defer_completion;          /* defer AIO completion to workqueue? */
+       bool should_dirty;              /* if pages should be dirtied */
        int io_error;                   /* IO error in completion path */
        unsigned long refcount;         /* direct_io_worker() and bios */
        struct bio *bio_list;           /* singly linked via bi_private */
        dio->refcount++;
        spin_unlock_irqrestore(&dio->bio_lock, flags);
 
-       if (dio->is_async && dio->rw == READ)
+       if (dio->is_async && dio->rw == READ && dio->should_dirty)
                bio_set_pages_dirty(bio);
 
        if (sdio->submit_io)
        if (bio->bi_error)
                dio->io_error = -EIO;
 
-       if (dio->is_async && dio->rw == READ) {
+       if (dio->is_async && dio->rw == READ && dio->should_dirty) {
                bio_check_pages_dirty(bio);     /* transfers ownership */
                err = bio->bi_error;
        } else {
                bio_for_each_segment_all(bvec, bio, i) {
                        struct page *page = bvec->bv_page;
 
-                       if (dio->rw == READ && !PageCompound(page))
+                       if (dio->rw == READ && !PageCompound(page) &&
+                                       dio->should_dirty)
                                set_page_dirty_lock(page);
                        page_cache_release(page);
                }
        spin_lock_init(&dio->bio_lock);
        dio->refcount = 1;
 
+       dio->should_dirty = (iter->type == ITER_IOVEC);
        sdio.iter = iter;
        sdio.final_block_in_request =
                (offset + iov_iter_count(iter)) >> blkbits;