block: remove i_bdev
[linux-2.6-microblaze.git] / fs / io_uring.c
index 29f1417..8f13c04 100644 (file)
@@ -2716,11 +2716,7 @@ static struct file *__io_file_get(struct io_submit_state *state, int fd)
 
 static bool io_bdev_nowait(struct block_device *bdev)
 {
-#ifdef CONFIG_BLOCK
        return !bdev || blk_queue_nowait(bdev_get_queue(bdev));
-#else
-       return true;
-#endif
 }
 
 /*
@@ -2733,14 +2729,16 @@ static bool io_file_supports_async(struct file *file, int rw)
        umode_t mode = file_inode(file)->i_mode;
 
        if (S_ISBLK(mode)) {
-               if (io_bdev_nowait(file->f_inode->i_bdev))
+               if (IS_ENABLED(CONFIG_BLOCK) &&
+                   io_bdev_nowait(I_BDEV(file->f_mapping->host)))
                        return true;
                return false;
        }
        if (S_ISCHR(mode) || S_ISSOCK(mode))
                return true;
        if (S_ISREG(mode)) {
-               if (io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
+               if (IS_ENABLED(CONFIG_BLOCK) &&
+                   io_bdev_nowait(file->f_inode->i_sb->s_bdev) &&
                    file->f_op != &io_uring_fops)
                        return true;
                return false;
@@ -3547,8 +3545,7 @@ static int io_write(struct io_kiocb *req, bool force_nonblock,
         * we return to userspace.
         */
        if (req->flags & REQ_F_ISREG) {
-               __sb_start_write(file_inode(req->file)->i_sb,
-                                       SB_FREEZE_WRITE, true);
+               sb_start_write(file_inode(req->file)->i_sb);
                __sb_writers_release(file_inode(req->file)->i_sb,
                                        SB_FREEZE_WRITE);
        }
@@ -8470,7 +8467,21 @@ static bool io_timeout_remove_link(struct io_ring_ctx *ctx,
 
 static bool io_cancel_link_cb(struct io_wq_work *work, void *data)
 {
-       return io_match_link(container_of(work, struct io_kiocb, work), data);
+       struct io_kiocb *req = container_of(work, struct io_kiocb, work);
+       bool ret;
+
+       if (req->flags & REQ_F_LINK_TIMEOUT) {
+               unsigned long flags;
+               struct io_ring_ctx *ctx = req->ctx;
+
+               /* protect against races with linked timeouts */
+               spin_lock_irqsave(&ctx->completion_lock, flags);
+               ret = io_match_link(req, data);
+               spin_unlock_irqrestore(&ctx->completion_lock, flags);
+       } else {
+               ret = io_match_link(req, data);
+       }
+       return ret;
 }
 
 static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
@@ -9212,6 +9223,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
                 * to a power-of-two, if it isn't already. We do NOT impose
                 * any cq vs sq ring sizing.
                 */
+               p->cq_entries = roundup_pow_of_two(p->cq_entries);
                if (p->cq_entries < p->sq_entries)
                        return -EINVAL;
                if (p->cq_entries > IORING_MAX_CQ_ENTRIES) {
@@ -9219,7 +9231,6 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p,
                                return -EINVAL;
                        p->cq_entries = IORING_MAX_CQ_ENTRIES;
                }
-               p->cq_entries = roundup_pow_of_two(p->cq_entries);
        } else {
                p->cq_entries = 2 * p->sq_entries;
        }