1 // SPDX-License-Identifier: GPL-2.0
3 * Basic worker thread pool for io_uring
5 * Copyright (C) 2019 Jens Axboe
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
13 #include <linux/sched/mm.h>
14 #include <linux/percpu.h>
15 #include <linux/slab.h>
16 #include <linux/rculist_nulls.h>
17 #include <linux/cpu.h>
18 #include <linux/tracehook.h>
20 #include "../kernel/sched/sched.h"
23 #define WORKER_IDLE_TIMEOUT (5 * HZ)
26 IO_WORKER_F_UP = 1, /* up and active */
27 IO_WORKER_F_RUNNING = 2, /* account as running */
28 IO_WORKER_F_FREE = 4, /* worker on free list */
29 IO_WORKER_F_FIXED = 8, /* static idle worker */
30 IO_WORKER_F_BOUND = 16, /* is doing bounded work */
34 IO_WQ_BIT_EXIT = 0, /* wq exiting */
35 IO_WQ_BIT_ERROR = 1, /* error on setup */
39 IO_WQE_FLAG_STALLED = 1, /* stalled on hash */
43 * One for each thread in a wqe pool
48 struct hlist_nulls_node nulls_node;
49 struct list_head all_list;
50 struct task_struct *task;
53 struct io_wq_work *cur_work;
56 const struct cred *cur_creds;
57 const struct cred *saved_creds;
62 #if BITS_PER_LONG == 64
63 #define IO_WQ_HASH_ORDER 6
65 #define IO_WQ_HASH_ORDER 5
68 #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
82 * Per-node worker thread pool
87 struct io_wq_work_list work_list;
88 unsigned long hash_map;
90 } ____cacheline_aligned_in_smp;
93 struct io_wqe_acct acct[2];
95 struct hlist_nulls_head free_list;
96 struct list_head all_list;
99 struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
106 struct io_wqe **wqes;
109 free_work_fn *free_work;
110 io_wq_work_fn *do_work;
112 struct task_struct *manager;
113 struct user_struct *user;
115 struct completion done;
117 struct hlist_node cpuhp_node;
122 static enum cpuhp_state io_wq_online;
124 static bool io_worker_get(struct io_worker *worker)
126 return refcount_inc_not_zero(&worker->ref);
129 static void io_worker_release(struct io_worker *worker)
131 if (refcount_dec_and_test(&worker->ref))
132 wake_up_process(worker->task);
135 static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
136 struct io_wq_work *work)
138 if (work->flags & IO_WQ_WORK_UNBOUND)
139 return &wqe->acct[IO_WQ_ACCT_UNBOUND];
141 return &wqe->acct[IO_WQ_ACCT_BOUND];
144 static inline struct io_wqe_acct *io_wqe_get_acct(struct io_worker *worker)
146 struct io_wqe *wqe = worker->wqe;
148 if (worker->flags & IO_WORKER_F_BOUND)
149 return &wqe->acct[IO_WQ_ACCT_BOUND];
151 return &wqe->acct[IO_WQ_ACCT_UNBOUND];
154 static void io_worker_exit(struct io_worker *worker)
156 struct io_wqe *wqe = worker->wqe;
157 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
161 * If we're not at zero, someone else is holding a brief reference
162 * to the worker. Wait for that to go away.
164 set_current_state(TASK_INTERRUPTIBLE);
165 if (!refcount_dec_and_test(&worker->ref))
167 __set_current_state(TASK_RUNNING);
170 current->flags &= ~PF_IO_WORKER;
171 flags = worker->flags;
173 if (flags & IO_WORKER_F_RUNNING)
174 atomic_dec(&acct->nr_running);
175 if (!(flags & IO_WORKER_F_BOUND))
176 atomic_dec(&wqe->wq->user->processes);
180 if (worker->saved_creds) {
181 revert_creds(worker->saved_creds);
182 worker->cur_creds = worker->saved_creds = NULL;
185 raw_spin_lock_irq(&wqe->lock);
186 if (flags & IO_WORKER_F_FREE)
187 hlist_nulls_del_rcu(&worker->nulls_node);
188 list_del_rcu(&worker->all_list);
190 raw_spin_unlock_irq(&wqe->lock);
192 kfree_rcu(worker, rcu);
193 if (refcount_dec_and_test(&wqe->wq->refs))
194 complete(&wqe->wq->done);
197 static inline bool io_wqe_run_queue(struct io_wqe *wqe)
198 __must_hold(wqe->lock)
200 if (!wq_list_empty(&wqe->work_list) &&
201 !(wqe->flags & IO_WQE_FLAG_STALLED))
207 * Check head of free list for an available worker. If one isn't available,
208 * caller must wake up the wq manager to create one.
210 static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
213 struct hlist_nulls_node *n;
214 struct io_worker *worker;
216 n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
220 worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
221 if (io_worker_get(worker)) {
222 wake_up_process(worker->task);
223 io_worker_release(worker);
231 * We need a worker. If we find a free one, we're good. If not, and we're
232 * below the max number of workers, wake up the manager to create one.
234 static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
239 * Most likely an attempt to queue unbounded work on an io_wq that
240 * wasn't setup with any unbounded workers.
242 WARN_ON_ONCE(!acct->max_workers);
245 ret = io_wqe_activate_free_worker(wqe);
248 if (!ret && acct->nr_workers < acct->max_workers)
249 wake_up_process(wqe->wq->manager);
252 static void io_wqe_inc_running(struct io_worker *worker)
254 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
256 atomic_inc(&acct->nr_running);
259 static void io_wqe_dec_running(struct io_worker *worker)
260 __must_hold(wqe->lock)
262 struct io_wqe_acct *acct = io_wqe_get_acct(worker);
263 struct io_wqe *wqe = worker->wqe;
265 if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe))
266 io_wqe_wake_worker(wqe, acct);
269 static void io_worker_start(struct io_worker *worker)
271 worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
272 io_wqe_inc_running(worker);
276 * Worker will start processing some work. Move it to the busy list, if
277 * it's currently on the freelist
279 static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
280 struct io_wq_work *work)
281 __must_hold(wqe->lock)
283 bool worker_bound, work_bound;
285 if (worker->flags & IO_WORKER_F_FREE) {
286 worker->flags &= ~IO_WORKER_F_FREE;
287 hlist_nulls_del_init_rcu(&worker->nulls_node);
291 * If worker is moving from bound to unbound (or vice versa), then
292 * ensure we update the running accounting.
294 worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0;
295 work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0;
296 if (worker_bound != work_bound) {
297 io_wqe_dec_running(worker);
299 worker->flags |= IO_WORKER_F_BOUND;
300 wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--;
301 wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++;
302 atomic_dec(&wqe->wq->user->processes);
304 worker->flags &= ~IO_WORKER_F_BOUND;
305 wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++;
306 wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--;
307 atomic_inc(&wqe->wq->user->processes);
309 io_wqe_inc_running(worker);
314 * No work, worker going to sleep. Move to freelist, and unuse mm if we
315 * have one attached. Dropping the mm may potentially sleep, so we drop
316 * the lock in that case and return success. Since the caller has to
317 * retry the loop in that case (we changed task state), we don't regrab
318 * the lock if we return success.
320 static void __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
321 __must_hold(wqe->lock)
323 if (!(worker->flags & IO_WORKER_F_FREE)) {
324 worker->flags |= IO_WORKER_F_FREE;
325 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
327 if (worker->saved_creds) {
328 revert_creds(worker->saved_creds);
329 worker->cur_creds = worker->saved_creds = NULL;
333 static inline unsigned int io_get_work_hash(struct io_wq_work *work)
335 return work->flags >> IO_WQ_HASH_SHIFT;
338 static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
339 __must_hold(wqe->lock)
341 struct io_wq_work_node *node, *prev;
342 struct io_wq_work *work, *tail;
345 wq_list_for_each(node, prev, &wqe->work_list) {
346 work = container_of(node, struct io_wq_work, list);
348 /* not hashed, can run anytime */
349 if (!io_wq_is_hashed(work)) {
350 wq_list_del(&wqe->work_list, node, prev);
354 /* hashed, can run if not already running */
355 hash = io_get_work_hash(work);
356 if (!(wqe->hash_map & BIT(hash))) {
357 wqe->hash_map |= BIT(hash);
358 /* all items with this hash lie in [work, tail] */
359 tail = wqe->hash_tail[hash];
360 wqe->hash_tail[hash] = NULL;
361 wq_list_cut(&wqe->work_list, &tail->list, prev);
369 static void io_flush_signals(void)
371 if (unlikely(test_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL))) {
372 if (current->task_works)
374 clear_tsk_thread_flag(current, TIF_NOTIFY_SIGNAL);
378 static void io_wq_switch_creds(struct io_worker *worker,
379 struct io_wq_work *work)
381 const struct cred *old_creds = override_creds(work->creds);
383 worker->cur_creds = work->creds;
384 if (worker->saved_creds)
385 put_cred(old_creds); /* creds set by previous switch */
387 worker->saved_creds = old_creds;
390 static void io_assign_current_work(struct io_worker *worker,
391 struct io_wq_work *work)
398 spin_lock_irq(&worker->lock);
399 worker->cur_work = work;
400 spin_unlock_irq(&worker->lock);
403 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
405 static void io_worker_handle_work(struct io_worker *worker)
406 __releases(wqe->lock)
408 struct io_wqe *wqe = worker->wqe;
409 struct io_wq *wq = wqe->wq;
412 struct io_wq_work *work;
415 * If we got some work, mark us as busy. If we didn't, but
416 * the list isn't empty, it means we stalled on hashed work.
417 * Mark us stalled so we don't keep looking for work when we
418 * can't make progress, any work completion or insertion will
419 * clear the stalled flag.
421 work = io_get_next_work(wqe);
423 __io_worker_busy(wqe, worker, work);
424 else if (!wq_list_empty(&wqe->work_list))
425 wqe->flags |= IO_WQE_FLAG_STALLED;
427 raw_spin_unlock_irq(&wqe->lock);
430 io_assign_current_work(worker, work);
432 /* handle a whole dependent link */
434 struct io_wq_work *next_hashed, *linked;
435 unsigned int hash = io_get_work_hash(work);
437 next_hashed = wq_next_work(work);
438 if (work->creds && worker->cur_creds != work->creds)
439 io_wq_switch_creds(worker, work);
441 io_assign_current_work(worker, NULL);
443 linked = wq->free_work(work);
445 if (!work && linked && !io_wq_is_hashed(linked)) {
449 io_assign_current_work(worker, work);
451 io_wqe_enqueue(wqe, linked);
453 if (hash != -1U && !next_hashed) {
454 raw_spin_lock_irq(&wqe->lock);
455 wqe->hash_map &= ~BIT_ULL(hash);
456 wqe->flags &= ~IO_WQE_FLAG_STALLED;
457 /* skip unnecessary unlock-lock wqe->lock */
460 raw_spin_unlock_irq(&wqe->lock);
464 raw_spin_lock_irq(&wqe->lock);
468 static int io_wqe_worker(void *data)
470 struct io_worker *worker = data;
471 struct io_wqe *wqe = worker->wqe;
472 struct io_wq *wq = wqe->wq;
474 io_worker_start(worker);
476 while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
477 set_current_state(TASK_INTERRUPTIBLE);
479 raw_spin_lock_irq(&wqe->lock);
480 if (io_wqe_run_queue(wqe)) {
481 __set_current_state(TASK_RUNNING);
482 io_worker_handle_work(worker);
485 __io_worker_idle(wqe, worker);
486 raw_spin_unlock_irq(&wqe->lock);
488 if (schedule_timeout(WORKER_IDLE_TIMEOUT))
490 if (fatal_signal_pending(current))
492 /* timed out, exit unless we're the fixed worker */
493 if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
494 !(worker->flags & IO_WORKER_F_FIXED))
498 if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
499 raw_spin_lock_irq(&wqe->lock);
500 if (!wq_list_empty(&wqe->work_list))
501 io_worker_handle_work(worker);
503 raw_spin_unlock_irq(&wqe->lock);
506 io_worker_exit(worker);
511 * Called when a worker is scheduled in. Mark us as currently running.
513 void io_wq_worker_running(struct task_struct *tsk)
515 struct io_worker *worker = tsk->pf_io_worker;
519 if (!(worker->flags & IO_WORKER_F_UP))
521 if (worker->flags & IO_WORKER_F_RUNNING)
523 worker->flags |= IO_WORKER_F_RUNNING;
524 io_wqe_inc_running(worker);
528 * Called when worker is going to sleep. If there are no workers currently
529 * running and we have work pending, wake up a free one or have the manager
532 void io_wq_worker_sleeping(struct task_struct *tsk)
534 struct io_worker *worker = tsk->pf_io_worker;
538 if (!(worker->flags & IO_WORKER_F_UP))
540 if (!(worker->flags & IO_WORKER_F_RUNNING))
543 worker->flags &= ~IO_WORKER_F_RUNNING;
545 raw_spin_lock_irq(&worker->wqe->lock);
546 io_wqe_dec_running(worker);
547 raw_spin_unlock_irq(&worker->wqe->lock);
550 static int task_thread(void *data, int index)
552 struct io_worker *worker = data;
553 struct io_wqe *wqe = worker->wqe;
554 struct io_wqe_acct *acct = &wqe->acct[index];
555 struct io_wq *wq = wqe->wq;
556 char buf[TASK_COMM_LEN];
558 sprintf(buf, "iou-wrk-%d", wq->task_pid);
559 set_task_comm(current, buf);
561 current->pf_io_worker = worker;
562 worker->task = current;
564 set_cpus_allowed_ptr(current, cpumask_of_node(wqe->node));
565 current->flags |= PF_NO_SETAFFINITY;
567 raw_spin_lock_irq(&wqe->lock);
568 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
569 list_add_tail_rcu(&worker->all_list, &wqe->all_list);
570 worker->flags |= IO_WORKER_F_FREE;
571 if (index == IO_WQ_ACCT_BOUND)
572 worker->flags |= IO_WORKER_F_BOUND;
573 if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
574 worker->flags |= IO_WORKER_F_FIXED;
576 raw_spin_unlock_irq(&wqe->lock);
578 if (index == IO_WQ_ACCT_UNBOUND)
579 atomic_inc(&wq->user->processes);
585 static int task_thread_bound(void *data)
587 return task_thread(data, IO_WQ_ACCT_BOUND);
590 static int task_thread_unbound(void *data)
592 return task_thread(data, IO_WQ_ACCT_UNBOUND);
595 static pid_t fork_thread(int (*fn)(void *), void *arg)
597 unsigned long flags = CLONE_FS|CLONE_FILES|CLONE_SIGHAND|CLONE_THREAD|
599 struct kernel_clone_args args = {
600 .flags = ((lower_32_bits(flags) | CLONE_VM |
601 CLONE_UNTRACED) & ~CSIGNAL),
602 .exit_signal = (lower_32_bits(flags) & CSIGNAL),
603 .stack = (unsigned long)fn,
604 .stack_size = (unsigned long)arg,
607 return kernel_clone(&args);
610 static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
612 struct io_worker *worker;
615 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
619 refcount_set(&worker->ref, 1);
620 worker->nulls_node.pprev = NULL;
622 spin_lock_init(&worker->lock);
624 if (index == IO_WQ_ACCT_BOUND)
625 pid = fork_thread(task_thread_bound, worker);
627 pid = fork_thread(task_thread_unbound, worker);
632 refcount_inc(&wq->refs);
636 static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
637 __must_hold(wqe->lock)
639 struct io_wqe_acct *acct = &wqe->acct[index];
641 /* if we have available workers or no work, no need */
642 if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe))
644 return acct->nr_workers < acct->max_workers;
648 * Iterate the passed in list and call the specific function for each
649 * worker that isn't exiting
651 static bool io_wq_for_each_worker(struct io_wqe *wqe,
652 bool (*func)(struct io_worker *, void *),
655 struct io_worker *worker;
658 list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
659 if (io_worker_get(worker)) {
660 /* no task if node is/was offline */
662 ret = func(worker, data);
663 io_worker_release(worker);
672 static bool io_wq_worker_wake(struct io_worker *worker, void *data)
674 wake_up_process(worker->task);
679 * Manager thread. Tasked with creating new workers, if we need them.
681 static int io_wq_manager(void *data)
683 struct io_wq *wq = data;
684 char buf[TASK_COMM_LEN];
687 sprintf(buf, "iou-mgr-%d", wq->task_pid);
688 set_task_comm(current, buf);
689 current->flags |= PF_IO_WORKER;
690 wq->manager = current;
694 while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
695 for_each_node(node) {
696 struct io_wqe *wqe = wq->wqes[node];
697 bool fork_worker[2] = { false, false };
699 if (!node_online(node))
702 raw_spin_lock_irq(&wqe->lock);
703 if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
704 fork_worker[IO_WQ_ACCT_BOUND] = true;
705 if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND))
706 fork_worker[IO_WQ_ACCT_UNBOUND] = true;
707 raw_spin_unlock_irq(&wqe->lock);
708 if (fork_worker[IO_WQ_ACCT_BOUND])
709 create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND);
710 if (fork_worker[IO_WQ_ACCT_UNBOUND])
711 create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND);
713 set_current_state(TASK_INTERRUPTIBLE);
714 schedule_timeout(HZ);
715 if (fatal_signal_pending(current))
716 set_bit(IO_WQ_BIT_EXIT, &wq->state);
719 if (refcount_dec_and_test(&wq->refs)) {
723 /* if ERROR is set and we get here, we have workers to wake */
724 if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
727 io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
733 static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
734 struct io_wq_work *work)
738 if (!(work->flags & IO_WQ_WORK_UNBOUND))
740 if (atomic_read(&acct->nr_running))
744 free_worker = !hlist_nulls_empty(&wqe->free_list);
749 if (atomic_read(&wqe->wq->user->processes) >= acct->max_workers &&
750 !(capable(CAP_SYS_RESOURCE) || capable(CAP_SYS_ADMIN)))
756 static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
758 struct io_wq *wq = wqe->wq;
761 work->flags |= IO_WQ_WORK_CANCEL;
763 work = wq->free_work(work);
767 static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
770 struct io_wq_work *tail;
772 if (!io_wq_is_hashed(work)) {
774 wq_list_add_tail(&work->list, &wqe->work_list);
778 hash = io_get_work_hash(work);
779 tail = wqe->hash_tail[hash];
780 wqe->hash_tail[hash] = work;
784 wq_list_add_after(&work->list, &tail->list, &wqe->work_list);
787 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
789 struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
794 * Do early check to see if we need a new unbound worker, and if we do,
795 * if we're allowed to do so. This isn't 100% accurate as there's a
796 * gap between this check and incrementing the value, but that's OK.
797 * It's close enough to not be an issue, fork() has the same delay.
799 if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
800 io_run_cancel(work, wqe);
804 work_flags = work->flags;
805 raw_spin_lock_irqsave(&wqe->lock, flags);
806 io_wqe_insert_work(wqe, work);
807 wqe->flags &= ~IO_WQE_FLAG_STALLED;
808 raw_spin_unlock_irqrestore(&wqe->lock, flags);
810 if ((work_flags & IO_WQ_WORK_CONCURRENT) ||
811 !atomic_read(&acct->nr_running))
812 io_wqe_wake_worker(wqe, acct);
815 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
817 struct io_wqe *wqe = wq->wqes[numa_node_id()];
819 io_wqe_enqueue(wqe, work);
823 * Work items that hash to the same value will not be done in parallel.
824 * Used to limit concurrent writes, generally hashed by inode.
826 void io_wq_hash_work(struct io_wq_work *work, void *val)
830 bit = hash_ptr(val, IO_WQ_HASH_ORDER);
831 work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
834 struct io_cb_cancel_data {
842 static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
844 struct io_cb_cancel_data *match = data;
848 * Hold the lock to avoid ->cur_work going out of scope, caller
849 * may dereference the passed in work.
851 spin_lock_irqsave(&worker->lock, flags);
852 if (worker->cur_work &&
853 match->fn(worker->cur_work, match->data)) {
854 set_notify_signal(worker->task);
857 spin_unlock_irqrestore(&worker->lock, flags);
859 return match->nr_running && !match->cancel_all;
862 static inline void io_wqe_remove_pending(struct io_wqe *wqe,
863 struct io_wq_work *work,
864 struct io_wq_work_node *prev)
866 unsigned int hash = io_get_work_hash(work);
867 struct io_wq_work *prev_work = NULL;
869 if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) {
871 prev_work = container_of(prev, struct io_wq_work, list);
872 if (prev_work && io_get_work_hash(prev_work) == hash)
873 wqe->hash_tail[hash] = prev_work;
875 wqe->hash_tail[hash] = NULL;
877 wq_list_del(&wqe->work_list, &work->list, prev);
880 static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
881 struct io_cb_cancel_data *match)
883 struct io_wq_work_node *node, *prev;
884 struct io_wq_work *work;
888 raw_spin_lock_irqsave(&wqe->lock, flags);
889 wq_list_for_each(node, prev, &wqe->work_list) {
890 work = container_of(node, struct io_wq_work, list);
891 if (!match->fn(work, match->data))
893 io_wqe_remove_pending(wqe, work, prev);
894 raw_spin_unlock_irqrestore(&wqe->lock, flags);
895 io_run_cancel(work, wqe);
897 if (!match->cancel_all)
900 /* not safe to continue after unlock */
903 raw_spin_unlock_irqrestore(&wqe->lock, flags);
906 static void io_wqe_cancel_running_work(struct io_wqe *wqe,
907 struct io_cb_cancel_data *match)
910 io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
914 enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
915 void *data, bool cancel_all)
917 struct io_cb_cancel_data match = {
920 .cancel_all = cancel_all,
925 * First check pending list, if we're lucky we can just remove it
926 * from there. CANCEL_OK means that the work is returned as-new,
927 * no completion will be posted for it.
929 for_each_node(node) {
930 struct io_wqe *wqe = wq->wqes[node];
932 io_wqe_cancel_pending_work(wqe, &match);
933 if (match.nr_pending && !match.cancel_all)
934 return IO_WQ_CANCEL_OK;
938 * Now check if a free (going busy) or busy worker has the work
939 * currently running. If we find it there, we'll return CANCEL_RUNNING
940 * as an indication that we attempt to signal cancellation. The
941 * completion will run normally in this case.
943 for_each_node(node) {
944 struct io_wqe *wqe = wq->wqes[node];
946 io_wqe_cancel_running_work(wqe, &match);
947 if (match.nr_running && !match.cancel_all)
948 return IO_WQ_CANCEL_RUNNING;
951 if (match.nr_running)
952 return IO_WQ_CANCEL_RUNNING;
953 if (match.nr_pending)
954 return IO_WQ_CANCEL_OK;
955 return IO_WQ_CANCEL_NOTFOUND;
958 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
960 int ret = -ENOMEM, node;
963 if (WARN_ON_ONCE(!data->free_work || !data->do_work))
964 return ERR_PTR(-EINVAL);
966 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
968 return ERR_PTR(-ENOMEM);
970 wq->wqes = kcalloc(nr_node_ids, sizeof(struct io_wqe *), GFP_KERNEL);
974 ret = cpuhp_state_add_instance_nocalls(io_wq_online, &wq->cpuhp_node);
978 wq->free_work = data->free_work;
979 wq->do_work = data->do_work;
981 /* caller must already hold a reference to this */
982 wq->user = data->user;
985 for_each_node(node) {
987 int alloc_node = node;
989 if (!node_online(alloc_node))
990 alloc_node = NUMA_NO_NODE;
991 wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
994 wq->wqes[node] = wqe;
995 wqe->node = alloc_node;
996 wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
997 atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
999 wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
1000 task_rlimit(current, RLIMIT_NPROC);
1002 atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
1004 raw_spin_lock_init(&wqe->lock);
1005 INIT_WQ_LIST(&wqe->work_list);
1006 INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
1007 INIT_LIST_HEAD(&wqe->all_list);
1010 wq->task_pid = current->pid;
1011 init_completion(&wq->done);
1012 refcount_set(&wq->refs, 1);
1014 current->flags |= PF_IO_WORKER;
1015 ret = fork_thread(io_wq_manager, wq);
1016 current->flags &= ~PF_IO_WORKER;
1018 wait_for_completion(&wq->done);
1019 reinit_completion(&wq->done);
1023 if (refcount_dec_and_test(&wq->refs))
1024 complete(&wq->done);
1026 cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1028 kfree(wq->wqes[node]);
1033 return ERR_PTR(ret);
1036 void io_wq_destroy(struct io_wq *wq)
1040 cpuhp_state_remove_instance_nocalls(io_wq_online, &wq->cpuhp_node);
1042 set_bit(IO_WQ_BIT_EXIT, &wq->state);
1044 wake_up_process(wq->manager);
1048 io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
1051 wait_for_completion(&wq->done);
1054 kfree(wq->wqes[node]);
1059 static bool io_wq_worker_affinity(struct io_worker *worker, void *data)
1061 struct task_struct *task = worker->task;
1065 rq = task_rq_lock(task, &rf);
1066 do_set_cpus_allowed(task, cpumask_of_node(worker->wqe->node));
1067 task->flags |= PF_NO_SETAFFINITY;
1068 task_rq_unlock(rq, task, &rf);
1072 static int io_wq_cpu_online(unsigned int cpu, struct hlist_node *node)
1074 struct io_wq *wq = hlist_entry_safe(node, struct io_wq, cpuhp_node);
1079 io_wq_for_each_worker(wq->wqes[i], io_wq_worker_affinity, NULL);
1084 static __init int io_wq_init(void)
1088 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "io-wq/online",
1089 io_wq_cpu_online, NULL);
1095 subsys_initcall(io_wq_init);