1 // SPDX-License-Identifier: GPL-2.0
3 * Basic worker thread pool for io_uring
5 * Copyright (C) 2019 Jens Axboe
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
13 #include <linux/sched/mm.h>
14 #include <linux/percpu.h>
15 #include <linux/slab.h>
16 #include <linux/rculist_nulls.h>
17 #include <linux/cpu.h>
18 #include <linux/tracehook.h>
20 #include "../kernel/sched/sched.h"
23 #define WORKER_IDLE_TIMEOUT (5 * HZ)
26 IO_WORKER_F_UP = 1, /* up and active */
27 IO_WORKER_F_RUNNING = 2, /* account as running */
28 IO_WORKER_F_FREE = 4, /* worker on free list */
29 IO_WORKER_F_FIXED = 8, /* static idle worker */
30 IO_WORKER_F_BOUND = 16, /* is doing bounded work */
34 IO_WQ_BIT_EXIT = 0, /* wq exiting */
35 IO_WQ_BIT_ERROR = 1, /* error on setup */
39 IO_WQE_FLAG_STALLED = 1, /* stalled on hash */
43 * One for each thread in a wqe pool
48 struct hlist_nulls_node nulls_node;
49 struct list_head all_list;
50 struct task_struct *task;
53 struct io_wq_work *cur_work;
56 const struct cred *cur_creds;
57 const struct cred *saved_creds;
62 #if BITS_PER_LONG == 64
63 #define IO_WQ_HASH_ORDER 6
65 #define IO_WQ_HASH_ORDER 5
68 #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
82 * Per-node worker thread pool
87 struct io_wq_work_list work_list;
88 unsigned long hash_map;
90 } ____cacheline_aligned_in_smp;
93 struct io_wqe_acct acct[2];
95 struct hlist_nulls_head free_list;
96 struct list_head all_list;
99 struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
106 struct io_wqe **wqes;
109 free_work_fn *free_work;
110 io_wq_work_fn *do_work;
112 struct task_struct *manager;
113 struct user_struct *user;
115 struct completion done;
117 struct hlist_node cpuhp_node;
122 static enum cpuhp_state io_wq_online;
124 static bool io_worker_get(struct io_worker *worker)
126 return refcount_inc_not_zero(&worker->ref);
129 static void io_worker_release(struct io_worker *worker)
131 if (refcount_dec_and_test(&worker->ref))
132 wake_up_process(worker->task);
135 static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
136 struct io_wq_work *work)
138 if (work->flags & IO_WQ_WORK_UNBOUND)
139 return &wqe->acct[IO_WQ_ACCT_UNBOUND];
141 return &wqe->acct[IO_WQ_ACCT_BOUND];
144 static inline struct io_wqe_acct *io_wqe_get_acct(struct io_worker *worker)
146 struct io_wqe *wqe = worker->wqe;
148 if (worker->flags & IO_WORKER_F_BOUND)
149 return &wqe->acct[IO_WQ_ACCT_BOUND];
151 return &wqe->acct[IO_WQ_ACCT_UNBOUND];
154 static void io_worker_exit(struct io_worker *worker)
156 struct io_wqe *wqe = worker->wqe;
157 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
160 * If we're not at zero, someone else is holding a brief reference
161 * to the worker. Wait for that to go away.
163 set_current_state(TASK_INTERRUPTIBLE);
164 if (!refcount_dec_and_test(&worker->ref))
166 __set_current_state(TASK_RUNNING);
169 current->flags &= ~PF_IO_WORKER;
170 if (worker->flags & IO_WORKER_F_RUNNING)
171 atomic_dec(&acct->nr_running);
172 if (!(worker->flags & IO_WORKER_F_BOUND))
173 atomic_dec(&wqe->wq->user->processes);
177 if (worker->saved_creds) {
178 revert_creds(worker->saved_creds);
179 worker->cur_creds = worker->saved_creds = NULL;
182 raw_spin_lock_irq(&wqe->lock);
183 hlist_nulls_del_rcu(&worker->nulls_node);
184 list_del_rcu(&worker->all_list);
186 raw_spin_unlock_irq(&wqe->lock);
188 kfree_rcu(worker, rcu);
189 if (refcount_dec_and_test(&wqe->wq->refs))
190 complete(&wqe->wq->done);
193 static inline bool io_wqe_run_queue(struct io_wqe *wqe)
194 __must_hold(wqe->lock)
196 if (!wq_list_empty(&wqe->work_list) &&
197 !(wqe->flags & IO_WQE_FLAG_STALLED))
203 * Check head of free list for an available worker. If one isn't available,
204 * caller must wake up the wq manager to create one.
206 static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
209 struct hlist_nulls_node *n;
210 struct io_worker *worker;
212 n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
216 worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
217 if (io_worker_get(worker)) {
218 wake_up_process(worker->task);
219 io_worker_release(worker);
227 * We need a worker. If we find a free one, we're good. If not, and we're
228 * below the max number of workers, wake up the manager to create one.
230 static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
235 * Most likely an attempt to queue unbounded work on an io_wq that
236 * wasn't setup with any unbounded workers.
238 WARN_ON_ONCE(!acct->max_workers);
241 ret = io_wqe_activate_free_worker(wqe);
244 if (!ret && acct->nr_workers < acct->max_workers)
245 wake_up_process(wqe->wq->manager);
248 static void io_wqe_inc_running(struct io_worker *worker)
250 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
252 atomic_inc(&acct->nr_running);
255 static void io_wqe_dec_running(struct io_worker *worker)
256 __must_hold(wqe->lock)
258 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
259 struct io_wqe *wqe = worker->wqe;
261 if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe))
262 io_wqe_wake_worker(wqe, acct);
265 static void io_worker_start(struct io_worker *worker)
267 worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
268 io_wqe_inc_running(worker);
272 * Worker will start processing some work. Move it to the busy list, if
273 * it's currently on the freelist
275 static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
276 struct io_wq_work *work)
277 __must_hold(wqe->lock)
279 bool worker_bound, work_bound;
281 if (worker->flags & IO_WORKER_F_FREE) {
282 worker->flags &= ~IO_WORKER_F_FREE;
283 hlist_nulls_del_init_rcu(&worker->nulls_node);
287 * If worker is moving from bound to unbound (or vice versa), then
288 * ensure we update the running accounting.
290 worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0;
291 work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0;
292 if (worker_bound != work_bound) {
293 io_wqe_dec_running(worker);
295 worker->flags |= IO_WORKER_F_BOUND;
296 wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--;
297 wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++;
298 atomic_dec(&wqe->wq->user->processes);
300 worker->flags &= ~IO_WORKER_F_BOUND;
301 wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++;
302 wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--;
303 atomic_inc(&wqe->wq->user->processes);
305 io_wqe_inc_running(worker);
310 * No work, worker going to sleep. Move to freelist, and unuse mm if we
311 * have one attached. Dropping the mm may potentially sleep, so we drop
312 * the lock in that case and return success. Since the caller has to
313 * retry the loop in that case (we changed task state), we don't regrab
314 * the lock if we return success.
316 static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
317 __must_hold(wqe->lock)
319 if (!(worker->flags & IO_WORKER_F_FREE)) {
320 worker->flags |= IO_WORKER_F_FREE;
321 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
323 if (worker->saved_creds) {
324 revert_creds(worker->saved_creds);
325 worker->cur_creds = worker->saved_creds = NULL;
329 static inline unsigned int io_get_work_hash(struct io_wq_work *work)
331 return work->flags >> IO_WQ_HASH_SHIFT;
334 static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
335 __must_hold(wqe->lock)
337 struct io_wq_work_node *node, *prev;
338 struct io_wq_work *work, *tail;
341 wq_list_for_each(node, prev, &wqe->work_list) {
342 work = container_of(node, struct io_wq_work, list);
344 /* not hashed, can run anytime */
345 if (!io_wq_is_hashed(work)) {
346 wq_list_del(&wqe->work_list, node, prev);
350 /* hashed, can run if not already running */
351 hash = io_get_work_hash(work);
352 if (!(wqe->hash_map & BIT(hash))) {
353 wqe->hash_map |= BIT(hash);
354 /* all items with this hash lie in [work, tail] */
355 tail = wqe->hash_tail[hash];
356 wqe->hash_tail[hash] = NULL;
357 wq_list_cut(&wqe->work_list, &tail->list, prev);
365 static void io_flush_signals(void)
367 if (unlikely(test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))) {
368 if (current->task_works)
370 clear_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL);
374 static void io_wq_switch_creds(struct io_worker *worker,
375 struct io_wq_work *work)
377 const struct cred *old_creds = override_creds(work->creds);
379 worker->cur_creds = work->creds;
380 if (worker->saved_creds)
381 put_cred(old_creds); /* creds set by previous switch */
383 worker->saved_creds = old_creds;
386 static void io_assign_current_work(struct io_worker *worker,
387 struct io_wq_work *work)
394 spin_lock_irq(&worker->lock);
395 worker->cur_work = work;
396 spin_unlock_irq(&worker->lock);
399 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
401 static void io_worker_handle_work(struct io_worker *worker)
402 __releases(wqe->lock)
404 struct io_wqe *wqe = worker->wqe;
405 struct io_wq *wq = wqe->wq;
408 struct io_wq_work *work;
411 * If we got some work, mark us as busy. If we didn't, but
412 * the list isn't empty, it means we stalled on hashed work.
413 * Mark us stalled so we don't keep looking for work when we
414 * can't make progress, any work completion or insertion will
415 * clear the stalled flag.
417 work = io_get_next_work(wqe);
419 __io_worker_busy(wqe, worker, work);
420 else if (!wq_list_empty(&wqe->work_list))
421 wqe->flags |= IO_WQE_FLAG_STALLED;
423 raw_spin_unlock_irq(&wqe->lock);
426 io_assign_current_work(worker, work);
428 /* handle a whole dependent link */
430 struct io_wq_work *next_hashed, *linked;
431 unsigned int hash = io_get_work_hash(work);
433 next_hashed = wq_next_work(work);
434 if (work->creds && worker->cur_creds != work->creds)
435 io_wq_switch_creds(worker, work);
437 io_assign_current_work(worker, NULL);
439 linked = wq->free_work(work);
441 if (!work && linked && !io_wq_is_hashed(linked)) {
445 io_assign_current_work(worker, work);
447 io_wqe_enqueue(wqe, linked);
449 if (hash != -1U && !next_hashed) {
450 raw_spin_lock_irq(&wqe->lock);
451 wqe->hash_map &= ~BIT_ULL(hash);
452 wqe->flags &= ~IO_WQE_FLAG_STALLED;
453 /* skip unnecessary unlock-lock wqe->lock */
456 raw_spin_unlock_irq(&wqe->lock);
460 raw_spin_lock_irq(&wqe->lock);
464 static int io_wqe_worker(void *data)
466 struct io_worker *worker = data;
467 struct io_wqe *wqe = worker->wqe;
468 struct io_wq *wq = wqe->wq;
470 io_worker_start(worker);
472 while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
473 set_current_state(TASK_INTERRUPTIBLE);
475 raw_spin_lock_irq(&wqe->lock);
476 if (io_wqe_run_queue(wqe)) {
477 __set_current_state(TASK_RUNNING);
478 io_worker_handle_work(worker);
481 __io_worker_idle(wqe, worker);
482 raw_spin_unlock_irq(&wqe->lock);
484 if (schedule_timeout(WORKER_IDLE_TIMEOUT))
486 if (fatal_signal_pending(current))
488 /* timed out, exit unless we're the fixed worker */
489 if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
490 !(worker->flags & IO_WORKER_F_FIXED))
494 if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
495 raw_spin_lock_irq(&wqe->lock);
496 if (!wq_list_empty(&wqe->work_list))
497 io_worker_handle_work(worker);
499 raw_spin_unlock_irq(&wqe->lock);
502 io_worker_exit(worker);
507 * Called when a worker is scheduled in. Mark us as currently running.
509 void io_wq_worker_running(struct task_struct *tsk)
511 struct io_worker *worker = tsk->pf_io_worker;
515 if (!(worker->flags & IO_WORKER_F_UP))
517 if (worker->flags & IO_WORKER_F_RUNNING)
519 worker->flags |= IO_WORKER_F_RUNNING;
520 io_wqe_inc_running(worker);
524 * Called when worker is going to sleep. If there are no workers currently
525 * running and we have work pending, wake up a free one or have the manager
528 void io_wq_worker_sleeping(struct task_struct *tsk)
530 struct io_worker *worker = tsk->pf_io_worker;
534 if (!(worker->flags & IO_WORKER_F_UP))
536 if (!(worker->flags & IO_WORKER_F_RUNNING))
539 worker->flags &= ~IO_WORKER_F_RUNNING;
541 raw_spin_lock_irq(&worker->wqe->lock);
542 io_wqe_dec_running(worker);
543 raw_spin_unlock_irq(&worker->wqe->lock);
546 static int task_thread(void *data, int index)
548 struct io_worker *worker = data;
549 struct io_wqe *wqe = worker->wqe;
550 struct io_wqe_acct *acct = &wqe->acct[index];
551 struct io_wq *wq = wqe->wq;
552 char buf[TASK_COMM_LEN];
554 sprintf(buf, "iou-wrk-%d", wq->task_pid);
555 set_task_comm(current, buf);
557 current->pf_io_worker = worker;
558 worker->task = current;
560 set_cpus_allowed_ptr(current, cpumask_of_node(wqe->node));
561 current->flags |= PF_NO_SETAFFINITY;
563 raw_spin_lock_irq(&wqe->lock);
564 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
565 list_add_tail_rcu(&worker->all_list, &wqe->all_list);
566 worker->flags |= IO_WORKER_F_FREE;
567 if (index == IO_WQ_ACCT_BOUND)
568 worker->flags |= IO_WORKER_F_BOUND;
569 if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
570 worker->flags |= IO_WORKER_F_FIXED;
572 raw_spin_unlock_irq(&wqe->lock);
574 if (index == IO_WQ_ACCT_UNBOUND)
575 atomic_inc(&wq->user->processes);
581 static int task_thread_bound(void *data)
583 return task_thread(data, IO_WQ_ACCT_BOUND);
586 static int task_thread_unbound(void *data)
588 return task_thread(data, IO_WQ_ACCT_UNBOUND);
591 static pid_t fork_thread(int (*fn)(void *), void *arg)
593 unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD|
595 struct kernel_clone_args args = {
596 .flags = ((lower_32_bits(flags) | CLONE_VM |
597 CLONE_UNTRACED) & ~CSIGNAL),
598 .exit_signal = (lower_32_bits(flags) & CSIGNAL),
599 .stack = (unsigned long)fn,
600 .stack_size = (unsigned long)arg,
603 return kernel_clone(&args);
606 static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
608 struct io_worker *worker;
611 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
615 refcount_set(&worker->ref, 1);
616 worker->nulls_node.pprev = NULL;
618 spin_lock_init(&worker->lock);
620 if (index == IO_WQ_ACCT_BOUND)
621 pid = fork_thread(task_thread_bound, worker);
623 pid = fork_thread(task_thread_unbound, worker);
628 refcount_inc(&wq->refs);
632 static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
633 __must_hold(wqe->lock)
635 struct io_wqe_acct *acct = &wqe->acct[index];
637 /* if we have available workers or no work, no need */
638 if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe))
640 return acct->nr_workers < acct->max_workers;
644 * Iterate the passed in list and call the specific function for each
645 * worker that isn't exiting
647 static bool io_wq_for_each_worker(struct io_wqe *wqe,
648 bool (*func)(struct io_worker *, void *),
651 struct io_worker *worker;
654 list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
655 if (io_worker_get(worker)) {
656 /* no task if node is/was offline */
658 ret = func(worker, data);
659 io_worker_release(worker);
668 static bool io_wq_worker_wake(struct io_worker *worker, void *data)
670 wake_up_process(worker->task);
675 * Manager thread. Tasked with creating new workers, if we need them.
677 static int io_wq_manager(void *data)
679 struct io_wq *wq = data;
680 char buf[TASK_COMM_LEN];
683 sprintf(buf, "iou-mgr-%d", wq->task_pid);
684 set_task_comm(current, buf);
685 current->flags |= PF_IO_WORKER;
686 wq->manager = current;
690 while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
691 for_each_node(node) {
692 struct io_wqe *wqe = wq->wqes[node];
693 bool fork_worker[2] = { false, false };
695 if (!node_online(node))
698 raw_spin_lock_irq(&wqe->lock);
699 if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
700 fork_worker[IO_WQ_ACCT_BOUND] = true;
701 if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND))
702 fork_worker[IO_WQ_ACCT_UNBOUND] = true;
703 raw_spin_unlock_irq(&wqe->lock);
704 if (fork_worker[IO_WQ_ACCT_BOUND])
705 create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND);
706 if (fork_worker[IO_WQ_ACCT_UNBOUND])
707 create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND);
709 set_current_state(TASK_INTERRUPTIBLE);
710 schedule_timeout(HZ);
711 if (fatal_signal_pending(current))
712 set_bit(IO_WQ_BIT_EXIT, &wq->state);
715 if (refcount_dec_and_test(&wq->refs)) {
719 /* if ERROR is set and we get here, we have workers to wake */
720 if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
723 io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
729 static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
730 struct io_wq_work *work)
734 if (!(work->flags & IO_WQ_WORK_UNBOUND))
736 if (atomic_read(&acct->nr_running))
740 free_worker = !hlist_nulls_empty(&wqe->free_list);
745 if (atomic_read(&wqe->wq->user->processes) >= acct->max_workers &&
746 !(capable(CAP_SYS_RESOURCE) || capable(CAP_SYS_ADMIN)))
752 static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
754 struct io_wq *wq = wqe->wq;
757 work->flags |= IO_WQ_WORK_CANCEL;
759 work = wq->free_work(work);
763 static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
766 struct io_wq_work *tail;
768 if (!io_wq_is_hashed(work)) {
770 wq_list_add_tail(&work->list, &wqe->work_list);
774 hash = io_get_work_hash(work);
775 tail = wqe->hash_tail[hash];
776 wqe->hash_tail[hash] = work;
780 wq_list_add_after(&work->list, &tail->list, &wqe->work_list);
783 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
785 struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
790 * Do early check to see if we need a new unbound worker, and if we do,
791 * if we're allowed to do so. This isn't 100% accurate as there's a
792 * gap between this check and incrementing the value, but that's OK.
793 * It's close enough to not be an issue, fork() has the same delay.
795 if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
796 io_run_cancel(work, wqe);
800 work_flags = work->flags;
801 raw_spin_lock_irqsave(&wqe->lock, flags);
802 io_wqe_insert_work(wqe, work);
803 wqe->flags &= ~IO_WQE_FLAG_STALLED;
804 raw_spin_unlock_irqrestore(&wqe->lock, flags);
806 if ((work_flags & IO_WQ_WORK_CONCURRENT) ||
807 !atomic_read(&acct->nr_running))
808 io_wqe_wake_worker(wqe, acct);
811 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
813 struct io_wqe *wqe = wq->wqes[numa_node_id()];
815 io_wqe_enqueue(wqe, work);
819 * Work items that hash to the same value will not be done in parallel.
820 * Used to limit concurrent writes, generally hashed by inode.
822 void io_wq_hash_work(struct io_wq_work *work, void *val)
826 bit = hash_ptr(val, IO_WQ_HASH_ORDER);
827 work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
830 struct io_cb_cancel_data {
838 static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
840 struct io_cb_cancel_data *match = data;
844 * Hold the lock to avoid ->cur_work going out of scope, caller
845 * may dereference the passed in work.
847 spin_lock_irqsave(&worker->lock, flags);
848 if (worker->cur_work &&
849 match->fn(worker->cur_work, match->data)) {
850 set_notify_signal(worker->task);
853 spin_unlock_irqrestore(&worker->lock, flags);
855 return match->nr_running && !match->cancel_all;
858 static inline void io_wqe_remove_pending(struct io_wqe *wqe,
859 struct io_wq_work *work,
860 struct io_wq_work_node *prev)
862 unsigned int hash = io_get_work_hash(work);
863 struct io_wq_work *prev_work = NULL;
865 if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) {
867 prev_work = container_of(prev, struct io_wq_work, list);
868 if (prev_work && io_get_work_hash(prev_work) == hash)
869 wqe->hash_tail[hash] = prev_work;
871 wqe->hash_tail[hash] = NULL;
873 wq_list_del(&wqe->work_list, &work->list, prev);
876 static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
877 struct io_cb_cancel_data *match)
879 struct io_wq_work_node *node, *prev;
880 struct io_wq_work *work;
884 raw_spin_lock_irqsave(&wqe->lock, flags);
885 wq_list_for_each(node, prev, &wqe->work_list) {
886 work = container_of(node, struct io_wq_work, list);
887 if (!match->fn(work, match->data))
889 io_wqe_remove_pending(wqe, work, prev);
890 raw_spin_unlock_irqrestore(&wqe->lock, flags);
891 io_run_cancel(work, wqe);
893 if (!match->cancel_all)
896 /* not safe to continue after unlock */
899 raw_spin_unlock_irqrestore(&wqe->lock, flags);
902 static void io_wqe_cancel_running_work(struct io_wqe *wqe,
903 struct io_cb_cancel_data *match)
906 io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
910 enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
911 void *data, bool cancel_all)
913 struct io_cb_cancel_data match = {
916 .cancel_all = cancel_all,
921 * First check pending list, if we're lucky we can just remove it
922 * from there. CANCEL_OK means that the work is returned as-new,
923 * no completion will be posted for it.
925 for_each_node(node) {
926 struct io_wqe *wqe = wq->wqes[node];
928 io_wqe_cancel_pending_work(wqe, &match);
929 if (match.nr_pending && !match.cancel_all)
930 return IO_WQ_CANCEL_OK;
934 * Now check if a free (going busy) or busy worker has the work
935 * currently running. If we find it there, we'll return CANCEL_RUNNING
936 * as an indication that we attempt to signal cancellation. The
937 * completion will run normally in this case.
939 for_each_node(node) {
940 struct io_wqe *wqe = wq->wqes[node];
942 io_wqe_cancel_running_work(wqe, &match);
943 if (match.nr_running && !match.cancel_all)
944 return IO_WQ_CANCEL_RUNNING;
947 if (match.nr_running)
948 return IO_WQ_CANCEL_RUNNING;
949 if (match.nr_pending)
950 return IO_WQ_CANCEL_OK;
951 return IO_WQ_CANCEL_NOTFOUND;
954 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
956 int ret = -ENOMEM, node;
959 if (WARN_ON_ONCE(!data->free_work || !data->do_work))
960 return ERR_PTR(-EINVAL);
962 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
964 return ERR_PTR(-ENOMEM);
966 wq->wqes = kcalloc(nr_node_ids, sizeof(struct io_wqe *), GFP_KERNEL);
970 ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
974 wq->free_work = data->free_work;
975 wq->do_work = data->do_work;
977 /* caller must already hold a reference to this */
978 wq->user = data->user;
981 for_each_node(node) {
983 int alloc_node = node;
985 if (!node_online(alloc_node))
986 alloc_node = NUMA_NO_NODE;
987 wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
990 wq->wqes[node] = wqe;
991 wqe->node = alloc_node;
992 wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
993 atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
995 wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
996 task_rlimit(current, RLIMIT_NPROC);
998 atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
1000 raw_spin_lock_init(&wqe->lock);
1001 INIT_WQ_LIST(&wqe->work_list);
1002 INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
1003 INIT_LIST_HEAD(&wqe->all_list);
1006 wq->task_pid = current->pid;
1007 init_completion(&wq->done);
1008 refcount_set(&wq->refs, 1);
1010 current->flags |= PF_IO_WORKER;
1011 ret = fork_thread(io_wq_manager, wq);
1012 current->flags &= ~PF_IO_WORKER;
1014 wait_for_completion(&wq->done);
1015 reinit_completion(&wq->done);
1019 if (refcount_dec_and_test(&wq->refs))
1020 complete(&wq->done);
1022 cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1024 kfree(wq->wqes[node]);
1029 return ERR_PTR(ret);
1032 void io_wq_destroy(struct io_wq *wq)
1036 cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1038 set_bit(IO_WQ_BIT_EXIT, &wq->state);
1040 wake_up_process(wq->manager);
1044 io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
1047 wait_for_completion(&wq->done);
1050 kfree(wq->wqes[node]);
1055 static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
1057 struct task_struct *task = worker->task;
1061 rq = task_rq_lock(task, &rf);
1062 do_set_cpus_allowed(task, cpumask_of_node(worker->wqe->node));
1063 task->flags |= PF_NO_SETAFFINITY;
1064 task_rq_unlock(rq, task, &rf);
1068 static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
1070 struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1075 io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, NULL);
1080 static __init int io_wq_init(void)
1084 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
1085 io_wq_cpu_online, NULL);
1091 subsys_initcall(io_wq_init);