projects
/
linux-2.6-microblaze.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
c6x: switch to ->regset_get()
[linux-2.6-microblaze.git]
/
fs
/
io_uring.c
diff --git
a/fs/io_uring.c
b/fs/io_uring.c
index
5b02491
..
155f3d8
100644
(file)
--- a/
fs/io_uring.c
+++ b/
fs/io_uring.c
@@
-55,7
+55,6
@@
#include <linux/fdtable.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/fdtable.h>
#include <linux/mm.h>
#include <linux/mman.h>
-#include <linux/mmu_context.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <linux/kthread.h>
@@
-529,7
+528,6
@@
enum {
REQ_F_INFLIGHT_BIT,
REQ_F_CUR_POS_BIT,
REQ_F_NOWAIT_BIT,
REQ_F_INFLIGHT_BIT,
REQ_F_CUR_POS_BIT,
REQ_F_NOWAIT_BIT,
- REQ_F_IOPOLL_COMPLETED_BIT,
REQ_F_LINK_TIMEOUT_BIT,
REQ_F_TIMEOUT_BIT,
REQ_F_ISREG_BIT,
REQ_F_LINK_TIMEOUT_BIT,
REQ_F_TIMEOUT_BIT,
REQ_F_ISREG_BIT,
@@
-574,8
+572,6
@@
enum {
REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
/* must not punt to workers */
REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
REQ_F_CUR_POS = BIT(REQ_F_CUR_POS_BIT),
/* must not punt to workers */
REQ_F_NOWAIT = BIT(REQ_F_NOWAIT_BIT),
- /* polled IO has completed */
- REQ_F_IOPOLL_COMPLETED = BIT(REQ_F_IOPOLL_COMPLETED_BIT),
/* has linked timeout */
REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
/* timeout request */
/* has linked timeout */
REQ_F_LINK_TIMEOUT = BIT(REQ_F_LINK_TIMEOUT_BIT),
/* timeout request */
@@
-640,6
+636,8
@@
struct io_kiocb {
struct io_async_ctx *io;
int cflags;
u8 opcode;
struct io_async_ctx *io;
int cflags;
u8 opcode;
+ /* polled IO has completed */
+ u8 iopoll_completed;
u16 buf_index;
u16 buf_index;
@@
-1798,7
+1796,7
@@
static int io_do_iopoll(struct io_ring_ctx *ctx, unsigned int *nr_events,
* If we find a request that requires polling, break out
* and complete those lists first, if we have entries there.
*/
* If we find a request that requires polling, break out
* and complete those lists first, if we have entries there.
*/
- if (
req->flags & REQ_F_IOPOLL_COMPLETED
) {
+ if (
READ_ONCE(req->iopoll_completed)
) {
list_move_tail(&req->list, &done);
continue;
}
list_move_tail(&req->list, &done);
continue;
}
@@
-1979,7
+1977,7
@@
static void io_complete_rw_iopoll(struct kiocb *kiocb, long res, long res2)
req_set_fail_links(req);
req->result = res;
if (res != -EAGAIN)
req_set_fail_links(req);
req->result = res;
if (res != -EAGAIN)
-
req->flags |= REQ_F_IOPOLL_COMPLETED
;
+
WRITE_ONCE(req->iopoll_completed, 1)
;
}
/*
}
/*
@@
-2012,7
+2010,7
@@
static void io_iopoll_req_issued(struct io_kiocb *req)
* For fast devices, IO may have already completed. If it has, add
* it to the front so we find it first.
*/
* For fast devices, IO may have already completed. If it has, add
* it to the front so we find it first.
*/
- if (
req->flags & REQ_F_IOPOLL_COMPLETED
)
+ if (
READ_ONCE(req->iopoll_completed)
)
list_add(&req->list, &ctx->poll_list);
else
list_add_tail(&req->list, &ctx->poll_list);
list_add(&req->list, &ctx->poll_list);
else
list_add_tail(&req->list, &ctx->poll_list);
@@
-2140,6
+2138,7
@@
static int io_prep_rw(struct io_kiocb *req, const struct io_uring_sqe *sqe,
kiocb->ki_flags |= IOCB_HIPRI;
kiocb->ki_complete = io_complete_rw_iopoll;
req->result = 0;
kiocb->ki_flags |= IOCB_HIPRI;
kiocb->ki_complete = io_complete_rw_iopoll;
req->result = 0;
+ req->iopoll_completed = 0;
} else {
if (kiocb->ki_flags & IOCB_HIPRI)
return -EINVAL;
} else {
if (kiocb->ki_flags & IOCB_HIPRI)
return -EINVAL;
@@
-5827,7
+5826,7
@@
static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
if (io_op_defs[req->opcode].needs_mm && !current->mm) {
if (unlikely(!mmget_not_zero(ctx->sqo_mm)))
return -EFAULT;
if (io_op_defs[req->opcode].needs_mm && !current->mm) {
if (unlikely(!mmget_not_zero(ctx->sqo_mm)))
return -EFAULT;
- use_mm(ctx->sqo_mm);
+
kthread_
use_mm(ctx->sqo_mm);
}
sqe_flags = READ_ONCE(sqe->flags);
}
sqe_flags = READ_ONCE(sqe->flags);
@@
-5942,7
+5941,7
@@
static inline void io_sq_thread_drop_mm(struct io_ring_ctx *ctx)
struct mm_struct *mm = current->mm;
if (mm) {
struct mm_struct *mm = current->mm;
if (mm) {
- unuse_mm(mm);
+
kthread_
unuse_mm(mm);
mmput(mm);
}
}
mmput(mm);
}
}
@@
-5951,15
+5950,12
@@
static int io_sq_thread(void *data)
{
struct io_ring_ctx *ctx = data;
const struct cred *old_cred;
{
struct io_ring_ctx *ctx = data;
const struct cred *old_cred;
- mm_segment_t old_fs;
DEFINE_WAIT(wait);
unsigned long timeout;
int ret = 0;
complete(&ctx->sq_thread_comp);
DEFINE_WAIT(wait);
unsigned long timeout;
int ret = 0;
complete(&ctx->sq_thread_comp);
- old_fs = get_fs();
- set_fs(USER_DS);
old_cred = override_creds(ctx->creds);
timeout = jiffies + ctx->sq_thread_idle;
old_cred = override_creds(ctx->creds);
timeout = jiffies + ctx->sq_thread_idle;
@@
-6064,7
+6060,6
@@
static int io_sq_thread(void *data)
if (current->task_works)
task_work_run();
if (current->task_works)
task_work_run();
- set_fs(old_fs);
io_sq_thread_drop_mm(ctx);
revert_creds(old_cred);
io_sq_thread_drop_mm(ctx);
revert_creds(old_cred);
@@
-7148,7
+7143,7
@@
static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
}
ret = 0;
}
ret = 0;
-
down_read(¤t->mm->mmap_se
m);
+
mmap_read_lock(current->m
m);
pret = pin_user_pages(ubuf, nr_pages,
FOLL_WRITE | FOLL_LONGTERM,
pages, vmas);
pret = pin_user_pages(ubuf, nr_pages,
FOLL_WRITE | FOLL_LONGTERM,
pages, vmas);
@@
-7166,7
+7161,7
@@
static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
} else {
ret = pret < 0 ? pret : -EFAULT;
}
} else {
ret = pret < 0 ? pret : -EFAULT;
}
-
up_read(¤t->mm->mmap_se
m);
+
mmap_read_unlock(current->m
m);
if (ret) {
/*
* if we did partial map, or found file backed vmas,
if (ret) {
/*
* if we did partial map, or found file backed vmas,