projects
/
linux-2.6-microblaze.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
[linux-2.6-microblaze.git]
/
fs
/
io_uring.c
diff --git
a/fs/io_uring.c
b/fs/io_uring.c
index
bf548af
..
04c6d05
100644
(file)
--- a/
fs/io_uring.c
+++ b/
fs/io_uring.c
@@
-78,6
+78,7
@@
#include <linux/task_work.h>
#include <linux/pagemap.h>
#include <linux/io_uring.h>
#include <linux/task_work.h>
#include <linux/pagemap.h>
#include <linux/io_uring.h>
+#include <linux/tracehook.h>
#define CREATE_TRACE_POINTS
#include <trace/events/io_uring.h>
#define CREATE_TRACE_POINTS
#include <trace/events/io_uring.h>
@@
-1499,7
+1500,8
@@
static bool __io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
all_flushed = list_empty(&ctx->cq_overflow_list);
if (all_flushed) {
clear_bit(0, &ctx->check_cq_overflow);
all_flushed = list_empty(&ctx->cq_overflow_list);
if (all_flushed) {
clear_bit(0, &ctx->check_cq_overflow);
- ctx->rings->sq_flags &= ~IORING_SQ_CQ_OVERFLOW;
+ WRITE_ONCE(ctx->rings->sq_flags,
+ ctx->rings->sq_flags & ~IORING_SQ_CQ_OVERFLOW);
}
if (posted)
}
if (posted)
@@
-1578,7
+1580,9
@@
static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
}
if (list_empty(&ctx->cq_overflow_list)) {
set_bit(0, &ctx->check_cq_overflow);
}
if (list_empty(&ctx->cq_overflow_list)) {
set_bit(0, &ctx->check_cq_overflow);
- ctx->rings->sq_flags |= IORING_SQ_CQ_OVERFLOW;
+ WRITE_ONCE(ctx->rings->sq_flags,
+ ctx->rings->sq_flags | IORING_SQ_CQ_OVERFLOW);
+
}
ocqe->cqe.user_data = user_data;
ocqe->cqe.res = res;
}
ocqe->cqe.user_data = user_data;
ocqe->cqe.res = res;
@@
-2222,9
+2226,9
@@
static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req)
static inline bool io_run_task_work(void)
{
static inline bool io_run_task_work(void)
{
- if (current->task_works) {
+ if (
test_thread_flag(TIF_NOTIFY_SIGNAL) ||
current->task_works) {
__set_current_state(TASK_RUNNING);
__set_current_state(TASK_RUNNING);
- t
ask_work_run
();
+ t
racehook_notify_signal
();
return true;
}
return true;
}
@@
-6803,14
+6807,16
@@
static inline void io_ring_set_wakeup_flag(struct io_ring_ctx *ctx)
{
/* Tell userspace we may need a wakeup call */
spin_lock_irq(&ctx->completion_lock);
{
/* Tell userspace we may need a wakeup call */
spin_lock_irq(&ctx->completion_lock);
- ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
+ WRITE_ONCE(ctx->rings->sq_flags,
+ ctx->rings->sq_flags | IORING_SQ_NEED_WAKEUP);
spin_unlock_irq(&ctx->completion_lock);
}
static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
{
spin_lock_irq(&ctx->completion_lock);
spin_unlock_irq(&ctx->completion_lock);
}
static inline void io_ring_clear_wakeup_flag(struct io_ring_ctx *ctx)
{
spin_lock_irq(&ctx->completion_lock);
- ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP;
+ WRITE_ONCE(ctx->rings->sq_flags,
+ ctx->rings->sq_flags & ~IORING_SQ_NEED_WAKEUP);
spin_unlock_irq(&ctx->completion_lock);
}
spin_unlock_irq(&ctx->completion_lock);
}
@@
-7132,16
+7138,6
@@
static void **io_alloc_page_table(size_t size)
return table;
}
return table;
}
-static inline void io_rsrc_ref_lock(struct io_ring_ctx *ctx)
-{
- spin_lock_bh(&ctx->rsrc_ref_lock);
-}
-
-static inline void io_rsrc_ref_unlock(struct io_ring_ctx *ctx)
-{
- spin_unlock_bh(&ctx->rsrc_ref_lock);
-}
-
static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
{
percpu_ref_exit(&ref_node->refs);
static void io_rsrc_node_destroy(struct io_rsrc_node *ref_node)
{
percpu_ref_exit(&ref_node->refs);
@@
-7158,9
+7154,9
@@
static void io_rsrc_node_switch(struct io_ring_ctx *ctx,
struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
rsrc_node->rsrc_data = data_to_kill;
struct io_rsrc_node *rsrc_node = ctx->rsrc_node;
rsrc_node->rsrc_data = data_to_kill;
-
io_rsrc_ref_lock(ctx
);
+
spin_lock_irq(&ctx->rsrc_ref_lock
);
list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
list_add_tail(&rsrc_node->node, &ctx->rsrc_ref_list);
-
io_rsrc_ref_unlock(ctx
);
+
spin_unlock_irq(&ctx->rsrc_ref_lock
);
atomic_inc(&data_to_kill->refs);
percpu_ref_kill(&rsrc_node->refs);
atomic_inc(&data_to_kill->refs);
percpu_ref_kill(&rsrc_node->refs);
@@
-7199,17
+7195,19
@@
static int io_rsrc_ref_quiesce(struct io_rsrc_data *data, struct io_ring_ctx *ct
/* kill initial ref, already quiesced if zero */
if (atomic_dec_and_test(&data->refs))
break;
/* kill initial ref, already quiesced if zero */
if (atomic_dec_and_test(&data->refs))
break;
+ mutex_unlock(&ctx->uring_lock);
flush_delayed_work(&ctx->rsrc_put_work);
ret = wait_for_completion_interruptible(&data->done);
flush_delayed_work(&ctx->rsrc_put_work);
ret = wait_for_completion_interruptible(&data->done);
- if (!ret)
+ if (!ret) {
+ mutex_lock(&ctx->uring_lock);
break;
break;
+ }
atomic_inc(&data->refs);
/* wait for all works potentially completing data->done */
flush_delayed_work(&ctx->rsrc_put_work);
reinit_completion(&data->done);
atomic_inc(&data->refs);
/* wait for all works potentially completing data->done */
flush_delayed_work(&ctx->rsrc_put_work);
reinit_completion(&data->done);
- mutex_unlock(&ctx->uring_lock);
ret = io_run_task_work_sig();
mutex_lock(&ctx->uring_lock);
} while (ret >= 0);
ret = io_run_task_work_sig();
mutex_lock(&ctx->uring_lock);
} while (ret >= 0);
@@
-7668,9
+7666,10
@@
static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
{
struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
struct io_ring_ctx *ctx = node->rsrc_data->ctx;
{
struct io_rsrc_node *node = container_of(ref, struct io_rsrc_node, refs);
struct io_ring_ctx *ctx = node->rsrc_data->ctx;
+ unsigned long flags;
bool first_add = false;
bool first_add = false;
-
io_rsrc_ref_lock(ctx
);
+
spin_lock_irqsave(&ctx->rsrc_ref_lock, flags
);
node->done = true;
while (!list_empty(&ctx->rsrc_ref_list)) {
node->done = true;
while (!list_empty(&ctx->rsrc_ref_list)) {
@@
-7682,7
+7681,7
@@
static void io_rsrc_node_ref_zero(struct percpu_ref *ref)
list_del(&node->node);
first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
}
list_del(&node->node);
first_add |= llist_add(&node->llist, &ctx->rsrc_put_llist);
}
-
io_rsrc_ref_unlock(ctx
);
+
spin_unlock_irqrestore(&ctx->rsrc_ref_lock, flags
);
if (first_add)
mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ);
if (first_add)
mod_delayed_work(system_wq, &ctx->rsrc_put_work, HZ);
@@
-8653,13
+8652,10
@@
static void io_req_caches_free(struct io_ring_ctx *ctx)
mutex_unlock(&ctx->uring_lock);
}
mutex_unlock(&ctx->uring_lock);
}
-static
bool
io_wait_rsrc_data(struct io_rsrc_data *data)
+static
void
io_wait_rsrc_data(struct io_rsrc_data *data)
{
{
- if (!data)
- return false;
- if (!atomic_dec_and_test(&data->refs))
+ if (data && !atomic_dec_and_test(&data->refs))
wait_for_completion(&data->done);
wait_for_completion(&data->done);
- return true;
}
static void io_ring_ctx_free(struct io_ring_ctx *ctx)
}
static void io_ring_ctx_free(struct io_ring_ctx *ctx)
@@
-8671,10
+8667,14
@@
static void io_ring_ctx_free(struct io_ring_ctx *ctx)
ctx->mm_account = NULL;
}
ctx->mm_account = NULL;
}
+ /* __io_rsrc_put_work() may need uring_lock to progress, wait w/o it */
+ io_wait_rsrc_data(ctx->buf_data);
+ io_wait_rsrc_data(ctx->file_data);
+
mutex_lock(&ctx->uring_lock);
mutex_lock(&ctx->uring_lock);
- if (
io_wait_rsrc_data(ctx->buf_data)
)
+ if (
ctx->buf_data
)
__io_sqe_buffers_unregister(ctx);
__io_sqe_buffers_unregister(ctx);
- if (
io_wait_rsrc_data(ctx->file_data)
)
+ if (
ctx->file_data
)
__io_sqe_files_unregister(ctx);
if (ctx->rings)
__io_cqring_overflow_flush(ctx, true);
__io_sqe_files_unregister(ctx);
if (ctx->rings)
__io_cqring_overflow_flush(ctx, true);