1 #ifndef INTERNAL_IO_WQ_H
2 #define INTERNAL_IO_WQ_H
4 #include <linux/io_uring.h>
10 IO_WQ_WORK_HASHED = 2,
11 IO_WQ_WORK_UNBOUND = 4,
12 IO_WQ_WORK_CONCURRENT = 16,
14 IO_WQ_WORK_FILES = 32,
17 IO_WQ_WORK_CREDS = 256,
18 IO_WQ_WORK_BLKCG = 512,
19 IO_WQ_WORK_FSIZE = 1024,
21 IO_WQ_HASH_SHIFT = 24, /* upper 8 bits are used for hash key */
25 IO_WQ_CANCEL_OK, /* cancelled before started */
26 IO_WQ_CANCEL_RUNNING, /* found, running, and attempted cancelled */
27 IO_WQ_CANCEL_NOTFOUND, /* work not found */
30 struct io_wq_work_node {
31 struct io_wq_work_node *next;
34 struct io_wq_work_list {
35 struct io_wq_work_node *first;
36 struct io_wq_work_node *last;
39 static inline void wq_list_add_after(struct io_wq_work_node *node,
40 struct io_wq_work_node *pos,
41 struct io_wq_work_list *list)
43 struct io_wq_work_node *next = pos->next;
51 static inline void wq_list_add_tail(struct io_wq_work_node *node,
52 struct io_wq_work_list *list)
56 WRITE_ONCE(list->first, node);
58 list->last->next = node;
64 static inline void wq_list_cut(struct io_wq_work_list *list,
65 struct io_wq_work_node *last,
66 struct io_wq_work_node *prev)
68 /* first in the list, if prev==NULL */
70 WRITE_ONCE(list->first, last->next);
72 prev->next = last->next;
74 if (last == list->last)
79 static inline void wq_list_del(struct io_wq_work_list *list,
80 struct io_wq_work_node *node,
81 struct io_wq_work_node *prev)
83 wq_list_cut(list, node, prev);
86 #define wq_list_for_each(pos, prv, head) \
87 for (pos = (head)->first, prv = NULL; pos; prv = pos, pos = (pos)->next)
89 #define wq_list_empty(list) (READ_ONCE((list)->first) == NULL)
90 #define INIT_WQ_LIST(list) do { \
91 (list)->first = NULL; \
92 (list)->last = NULL; \
96 struct io_wq_work_node list;
97 struct io_identity *identity;
101 static inline struct io_wq_work *wq_next_work(struct io_wq_work *work)
103 if (!work->list.next)
106 return container_of(work->list.next, struct io_wq_work, list);
109 typedef void (free_work_fn)(struct io_wq_work *);
110 typedef struct io_wq_work *(io_wq_work_fn)(struct io_wq_work *);
113 struct user_struct *user;
115 io_wq_work_fn *do_work;
116 free_work_fn *free_work;
119 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data);
120 bool io_wq_get(struct io_wq *wq, struct io_wq_data *data);
121 void io_wq_destroy(struct io_wq *wq);
123 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
124 void io_wq_hash_work(struct io_wq_work *work, void *val);
126 static inline bool io_wq_is_hashed(struct io_wq_work *work)
128 return work->flags & IO_WQ_WORK_HASHED;
131 typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
133 enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
134 void *data, bool cancel_all);
136 struct task_struct *io_wq_get_task(struct io_wq *wq);
138 #if defined(CONFIG_IO_WQ)
139 extern void io_wq_worker_sleeping(struct task_struct *);
140 extern void io_wq_worker_running(struct task_struct *);
142 static inline void io_wq_worker_sleeping(struct task_struct *tsk)
145 static inline void io_wq_worker_running(struct task_struct *tsk)
150 static inline bool io_wq_current_is_worker(void)
152 return in_task() && (current->flags & PF_IO_WORKER);