1 // SPDX-License-Identifier: GPL-2.0
3 * Basic worker thread pool for io_uring
5 * Copyright (C) 2019 Jens Axboe
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched/signal.h>
13 #include <linux/sched/mm.h>
14 #include <linux/percpu.h>
15 #include <linux/slab.h>
16 #include <linux/kthread.h>
17 #include <linux/rculist_nulls.h>
18 #include <linux/fs_struct.h>
19 #include <linux/task_work.h>
20 #include <linux/blk-cgroup.h>
24 #define WORKER_IDLE_TIMEOUT (5 * HZ)
27 IO_WORKER_F_UP = 1, /* up and active */
28 IO_WORKER_F_RUNNING = 2, /* account as running */
29 IO_WORKER_F_FREE = 4, /* worker on free list */
30 IO_WORKER_F_FIXED = 8, /* static idle worker */
31 IO_WORKER_F_BOUND = 16, /* is doing bounded work */
35 IO_WQ_BIT_EXIT = 0, /* wq exiting */
36 IO_WQ_BIT_CANCEL = 1, /* cancel work on list */
37 IO_WQ_BIT_ERROR = 2, /* error on setup */
41 IO_WQE_FLAG_STALLED = 1, /* stalled on hash */
45 * One for each thread in a wqe pool
50 struct hlist_nulls_node nulls_node;
51 struct list_head all_list;
52 struct task_struct *task;
55 struct io_wq_work *cur_work;
60 #ifdef CONFIG_BLK_CGROUP
61 struct cgroup_subsys_state *blkcg_css;
63 const struct cred *cur_creds;
64 const struct cred *saved_creds;
65 struct files_struct *restore_files;
66 struct nsproxy *restore_nsproxy;
67 struct fs_struct *restore_fs;
70 #if BITS_PER_LONG == 64
71 #define IO_WQ_HASH_ORDER 6
73 #define IO_WQ_HASH_ORDER 5
76 #define IO_WQ_NR_HASH_BUCKETS (1u << IO_WQ_HASH_ORDER)
90 * Per-node worker thread pool
95 struct io_wq_work_list work_list;
96 unsigned long hash_map;
98 } ____cacheline_aligned_in_smp;
101 struct io_wqe_acct acct[2];
103 struct hlist_nulls_head free_list;
104 struct list_head all_list;
107 struct io_wq_work *hash_tail[IO_WQ_NR_HASH_BUCKETS];
114 struct io_wqe **wqes;
117 free_work_fn *free_work;
118 io_wq_work_fn *do_work;
120 struct task_struct *manager;
121 struct user_struct *user;
123 struct completion done;
128 static bool io_worker_get(struct io_worker *worker)
130 return refcount_inc_not_zero(&worker->ref);
133 static void io_worker_release(struct io_worker *worker)
135 if (refcount_dec_and_test(&worker->ref))
136 wake_up_process(worker->task);
140 * Note: drops the wqe->lock if returning true! The caller must re-acquire
141 * the lock in that case. Some callers need to restart handling if this
142 * happens, so we can't just re-acquire the lock on behalf of the caller.
144 static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
146 bool dropped_lock = false;
148 if (worker->saved_creds) {
149 revert_creds(worker->saved_creds);
150 worker->cur_creds = worker->saved_creds = NULL;
153 if (current->files != worker->restore_files) {
154 __acquire(&wqe->lock);
155 raw_spin_unlock_irq(&wqe->lock);
159 current->files = worker->restore_files;
160 current->nsproxy = worker->restore_nsproxy;
161 task_unlock(current);
164 if (current->fs != worker->restore_fs)
165 current->fs = worker->restore_fs;
168 * If we have an active mm, we need to drop the wq lock before unusing
169 * it. If we do, return true and let the caller retry the idle loop.
173 __acquire(&wqe->lock);
174 raw_spin_unlock_irq(&wqe->lock);
177 __set_current_state(TASK_RUNNING);
178 kthread_unuse_mm(worker->mm);
183 #ifdef CONFIG_BLK_CGROUP
184 if (worker->blkcg_css) {
185 kthread_associate_blkcg(NULL);
186 worker->blkcg_css = NULL;
193 static inline struct io_wqe_acct *io_work_get_acct(struct io_wqe *wqe,
194 struct io_wq_work *work)
196 if (work->flags & IO_WQ_WORK_UNBOUND)
197 return &wqe->acct[IO_WQ_ACCT_UNBOUND];
199 return &wqe->acct[IO_WQ_ACCT_BOUND];
202 static inline struct io_wqe_acct *io_wqe_get_acct(struct io_wqe *wqe,
203 struct io_worker *worker)
205 if (worker->flags & IO_WORKER_F_BOUND)
206 return &wqe->acct[IO_WQ_ACCT_BOUND];
208 return &wqe->acct[IO_WQ_ACCT_UNBOUND];
211 static void io_worker_exit(struct io_worker *worker)
213 struct io_wqe *wqe = worker->wqe;
214 struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
217 * If we're not at zero, someone else is holding a brief reference
218 * to the worker. Wait for that to go away.
220 set_current_state(TASK_INTERRUPTIBLE);
221 if (!refcount_dec_and_test(&worker->ref))
223 __set_current_state(TASK_RUNNING);
226 current->flags &= ~PF_IO_WORKER;
227 if (worker->flags & IO_WORKER_F_RUNNING)
228 atomic_dec(&acct->nr_running);
229 if (!(worker->flags & IO_WORKER_F_BOUND))
230 atomic_dec(&wqe->wq->user->processes);
234 raw_spin_lock_irq(&wqe->lock);
235 hlist_nulls_del_rcu(&worker->nulls_node);
236 list_del_rcu(&worker->all_list);
237 if (__io_worker_unuse(wqe, worker)) {
238 __release(&wqe->lock);
239 raw_spin_lock_irq(&wqe->lock);
242 raw_spin_unlock_irq(&wqe->lock);
244 kfree_rcu(worker, rcu);
245 if (refcount_dec_and_test(&wqe->wq->refs))
246 complete(&wqe->wq->done);
249 static inline bool io_wqe_run_queue(struct io_wqe *wqe)
250 __must_hold(wqe->lock)
252 if (!wq_list_empty(&wqe->work_list) &&
253 !(wqe->flags & IO_WQE_FLAG_STALLED))
259 * Check head of free list for an available worker. If one isn't available,
260 * caller must wake up the wq manager to create one.
262 static bool io_wqe_activate_free_worker(struct io_wqe *wqe)
265 struct hlist_nulls_node *n;
266 struct io_worker *worker;
268 n = rcu_dereference(hlist_nulls_first_rcu(&wqe->free_list));
272 worker = hlist_nulls_entry(n, struct io_worker, nulls_node);
273 if (io_worker_get(worker)) {
274 wake_up_process(worker->task);
275 io_worker_release(worker);
283 * We need a worker. If we find a free one, we're good. If not, and we're
284 * below the max number of workers, wake up the manager to create one.
286 static void io_wqe_wake_worker(struct io_wqe *wqe, struct io_wqe_acct *acct)
291 * Most likely an attempt to queue unbounded work on an io_wq that
292 * wasn't setup with any unbounded workers.
294 WARN_ON_ONCE(!acct->max_workers);
297 ret = io_wqe_activate_free_worker(wqe);
300 if (!ret && acct->nr_workers < acct->max_workers)
301 wake_up_process(wqe->wq->manager);
304 static void io_wqe_inc_running(struct io_wqe *wqe, struct io_worker *worker)
306 struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
308 atomic_inc(&acct->nr_running);
311 static void io_wqe_dec_running(struct io_wqe *wqe, struct io_worker *worker)
312 __must_hold(wqe->lock)
314 struct io_wqe_acct *acct = io_wqe_get_acct(wqe, worker);
316 if (atomic_dec_and_test(&acct->nr_running) && io_wqe_run_queue(wqe))
317 io_wqe_wake_worker(wqe, acct);
320 static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker)
322 allow_kernel_signal(SIGINT);
324 current->flags |= PF_IO_WORKER;
326 worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
327 worker->restore_files = current->files;
328 worker->restore_nsproxy = current->nsproxy;
329 worker->restore_fs = current->fs;
330 io_wqe_inc_running(wqe, worker);
334 * Worker will start processing some work. Move it to the busy list, if
335 * it's currently on the freelist
337 static void __io_worker_busy(struct io_wqe *wqe, struct io_worker *worker,
338 struct io_wq_work *work)
339 __must_hold(wqe->lock)
341 bool worker_bound, work_bound;
343 if (worker->flags & IO_WORKER_F_FREE) {
344 worker->flags &= ~IO_WORKER_F_FREE;
345 hlist_nulls_del_init_rcu(&worker->nulls_node);
349 * If worker is moving from bound to unbound (or vice versa), then
350 * ensure we update the running accounting.
352 worker_bound = (worker->flags & IO_WORKER_F_BOUND) != 0;
353 work_bound = (work->flags & IO_WQ_WORK_UNBOUND) == 0;
354 if (worker_bound != work_bound) {
355 io_wqe_dec_running(wqe, worker);
357 worker->flags |= IO_WORKER_F_BOUND;
358 wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers--;
359 wqe->acct[IO_WQ_ACCT_BOUND].nr_workers++;
360 atomic_dec(&wqe->wq->user->processes);
362 worker->flags &= ~IO_WORKER_F_BOUND;
363 wqe->acct[IO_WQ_ACCT_UNBOUND].nr_workers++;
364 wqe->acct[IO_WQ_ACCT_BOUND].nr_workers--;
365 atomic_inc(&wqe->wq->user->processes);
367 io_wqe_inc_running(wqe, worker);
372 * No work, worker going to sleep. Move to freelist, and unuse mm if we
373 * have one attached. Dropping the mm may potentially sleep, so we drop
374 * the lock in that case and return success. Since the caller has to
375 * retry the loop in that case (we changed task state), we don't regrab
376 * the lock if we return success.
378 static bool __io_worker_idle(struct io_wqe *wqe, struct io_worker *worker)
379 __must_hold(wqe->lock)
381 if (!(worker->flags & IO_WORKER_F_FREE)) {
382 worker->flags |= IO_WORKER_F_FREE;
383 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
386 return __io_worker_unuse(wqe, worker);
389 static inline unsigned int io_get_work_hash(struct io_wq_work *work)
391 return work->flags >> IO_WQ_HASH_SHIFT;
394 static struct io_wq_work *io_get_next_work(struct io_wqe *wqe)
395 __must_hold(wqe->lock)
397 struct io_wq_work_node *node, *prev;
398 struct io_wq_work *work, *tail;
401 wq_list_for_each(node, prev, &wqe->work_list) {
402 work = container_of(node, struct io_wq_work, list);
404 /* not hashed, can run anytime */
405 if (!io_wq_is_hashed(work)) {
406 wq_list_del(&wqe->work_list, node, prev);
410 /* hashed, can run if not already running */
411 hash = io_get_work_hash(work);
412 if (!(wqe->hash_map & BIT(hash))) {
413 wqe->hash_map |= BIT(hash);
414 /* all items with this hash lie in [work, tail] */
415 tail = wqe->hash_tail[hash];
416 wqe->hash_tail[hash] = NULL;
417 wq_list_cut(&wqe->work_list, &tail->list, prev);
425 static void io_wq_switch_mm(struct io_worker *worker, struct io_wq_work *work)
428 kthread_unuse_mm(worker->mm);
435 if (mmget_not_zero(work->mm)) {
436 kthread_use_mm(work->mm);
437 worker->mm = work->mm;
438 /* hang on to this mm */
443 /* failed grabbing mm, ensure work gets cancelled */
444 work->flags |= IO_WQ_WORK_CANCEL;
447 static inline void io_wq_switch_blkcg(struct io_worker *worker,
448 struct io_wq_work *work)
450 #ifdef CONFIG_BLK_CGROUP
451 if (!(work->flags & IO_WQ_WORK_BLKCG))
453 if (work->blkcg_css != worker->blkcg_css) {
454 kthread_associate_blkcg(work->blkcg_css);
455 worker->blkcg_css = work->blkcg_css;
460 static void io_wq_switch_creds(struct io_worker *worker,
461 struct io_wq_work *work)
463 const struct cred *old_creds = override_creds(work->creds);
465 worker->cur_creds = work->creds;
466 if (worker->saved_creds)
467 put_cred(old_creds); /* creds set by previous switch */
469 worker->saved_creds = old_creds;
472 static void io_impersonate_work(struct io_worker *worker,
473 struct io_wq_work *work)
475 if ((work->flags & IO_WQ_WORK_FILES) && current->files != work->files) {
477 current->files = work->files;
478 current->nsproxy = work->nsproxy;
479 task_unlock(current);
481 if ((work->flags & IO_WQ_WORK_FS) && current->fs != work->fs)
482 current->fs = work->fs;
483 if ((work->flags & IO_WQ_WORK_MM) && work->mm != worker->mm)
484 io_wq_switch_mm(worker, work);
485 if ((work->flags & IO_WQ_WORK_CREDS) && worker->cur_creds != work->creds)
486 io_wq_switch_creds(worker, work);
487 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = work->fsize;
488 io_wq_switch_blkcg(worker, work);
491 static void io_assign_current_work(struct io_worker *worker,
492 struct io_wq_work *work)
495 /* flush pending signals before assigning new work */
496 if (signal_pending(current))
497 flush_signals(current);
501 spin_lock_irq(&worker->lock);
502 worker->cur_work = work;
503 spin_unlock_irq(&worker->lock);
506 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work);
508 static void io_worker_handle_work(struct io_worker *worker)
509 __releases(wqe->lock)
511 struct io_wqe *wqe = worker->wqe;
512 struct io_wq *wq = wqe->wq;
515 struct io_wq_work *work;
518 * If we got some work, mark us as busy. If we didn't, but
519 * the list isn't empty, it means we stalled on hashed work.
520 * Mark us stalled so we don't keep looking for work when we
521 * can't make progress, any work completion or insertion will
522 * clear the stalled flag.
524 work = io_get_next_work(wqe);
526 __io_worker_busy(wqe, worker, work);
527 else if (!wq_list_empty(&wqe->work_list))
528 wqe->flags |= IO_WQE_FLAG_STALLED;
530 raw_spin_unlock_irq(&wqe->lock);
533 io_assign_current_work(worker, work);
535 /* handle a whole dependent link */
537 struct io_wq_work *old_work, *next_hashed, *linked;
538 unsigned int hash = io_get_work_hash(work);
540 next_hashed = wq_next_work(work);
541 io_impersonate_work(worker, work);
543 * OK to set IO_WQ_WORK_CANCEL even for uncancellable
544 * work, the worker function will do the right thing.
546 if (test_bit(IO_WQ_BIT_CANCEL, &wq->state))
547 work->flags |= IO_WQ_WORK_CANCEL;
550 linked = wq->do_work(work);
553 if (!work && linked && !io_wq_is_hashed(linked)) {
557 io_assign_current_work(worker, work);
558 wq->free_work(old_work);
561 io_wqe_enqueue(wqe, linked);
563 if (hash != -1U && !next_hashed) {
564 raw_spin_lock_irq(&wqe->lock);
565 wqe->hash_map &= ~BIT_ULL(hash);
566 wqe->flags &= ~IO_WQE_FLAG_STALLED;
567 /* skip unnecessary unlock-lock wqe->lock */
570 raw_spin_unlock_irq(&wqe->lock);
574 raw_spin_lock_irq(&wqe->lock);
578 static int io_wqe_worker(void *data)
580 struct io_worker *worker = data;
581 struct io_wqe *wqe = worker->wqe;
582 struct io_wq *wq = wqe->wq;
584 io_worker_start(wqe, worker);
586 while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
587 set_current_state(TASK_INTERRUPTIBLE);
589 raw_spin_lock_irq(&wqe->lock);
590 if (io_wqe_run_queue(wqe)) {
591 __set_current_state(TASK_RUNNING);
592 io_worker_handle_work(worker);
595 /* drops the lock on success, retry */
596 if (__io_worker_idle(wqe, worker)) {
597 __release(&wqe->lock);
600 raw_spin_unlock_irq(&wqe->lock);
601 if (signal_pending(current))
602 flush_signals(current);
603 if (schedule_timeout(WORKER_IDLE_TIMEOUT))
605 /* timed out, exit unless we're the fixed worker */
606 if (test_bit(IO_WQ_BIT_EXIT, &wq->state) ||
607 !(worker->flags & IO_WORKER_F_FIXED))
611 if (test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
612 raw_spin_lock_irq(&wqe->lock);
613 if (!wq_list_empty(&wqe->work_list))
614 io_worker_handle_work(worker);
616 raw_spin_unlock_irq(&wqe->lock);
619 io_worker_exit(worker);
624 * Called when a worker is scheduled in. Mark us as currently running.
626 void io_wq_worker_running(struct task_struct *tsk)
628 struct io_worker *worker = kthread_data(tsk);
629 struct io_wqe *wqe = worker->wqe;
631 if (!(worker->flags & IO_WORKER_F_UP))
633 if (worker->flags & IO_WORKER_F_RUNNING)
635 worker->flags |= IO_WORKER_F_RUNNING;
636 io_wqe_inc_running(wqe, worker);
640 * Called when worker is going to sleep. If there are no workers currently
641 * running and we have work pending, wake up a free one or have the manager
644 void io_wq_worker_sleeping(struct task_struct *tsk)
646 struct io_worker *worker = kthread_data(tsk);
647 struct io_wqe *wqe = worker->wqe;
649 if (!(worker->flags & IO_WORKER_F_UP))
651 if (!(worker->flags & IO_WORKER_F_RUNNING))
654 worker->flags &= ~IO_WORKER_F_RUNNING;
656 raw_spin_lock_irq(&wqe->lock);
657 io_wqe_dec_running(wqe, worker);
658 raw_spin_unlock_irq(&wqe->lock);
661 static bool create_io_worker(struct io_wq *wq, struct io_wqe *wqe, int index)
663 struct io_wqe_acct *acct = &wqe->acct[index];
664 struct io_worker *worker;
666 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, wqe->node);
670 refcount_set(&worker->ref, 1);
671 worker->nulls_node.pprev = NULL;
673 spin_lock_init(&worker->lock);
675 worker->task = kthread_create_on_node(io_wqe_worker, worker, wqe->node,
676 "io_wqe_worker-%d/%d", index, wqe->node);
677 if (IS_ERR(worker->task)) {
681 kthread_bind_mask(worker->task, cpumask_of_node(wqe->node));
683 raw_spin_lock_irq(&wqe->lock);
684 hlist_nulls_add_head_rcu(&worker->nulls_node, &wqe->free_list);
685 list_add_tail_rcu(&worker->all_list, &wqe->all_list);
686 worker->flags |= IO_WORKER_F_FREE;
687 if (index == IO_WQ_ACCT_BOUND)
688 worker->flags |= IO_WORKER_F_BOUND;
689 if (!acct->nr_workers && (worker->flags & IO_WORKER_F_BOUND))
690 worker->flags |= IO_WORKER_F_FIXED;
692 raw_spin_unlock_irq(&wqe->lock);
694 if (index == IO_WQ_ACCT_UNBOUND)
695 atomic_inc(&wq->user->processes);
697 refcount_inc(&wq->refs);
698 wake_up_process(worker->task);
702 static inline bool io_wqe_need_worker(struct io_wqe *wqe, int index)
703 __must_hold(wqe->lock)
705 struct io_wqe_acct *acct = &wqe->acct[index];
707 /* if we have available workers or no work, no need */
708 if (!hlist_nulls_empty(&wqe->free_list) || !io_wqe_run_queue(wqe))
710 return acct->nr_workers < acct->max_workers;
713 static bool io_wqe_worker_send_sig(struct io_worker *worker, void *data)
715 send_sig(SIGINT, worker->task, 1);
720 * Iterate the passed in list and call the specific function for each
721 * worker that isn't exiting
723 static bool io_wq_for_each_worker(struct io_wqe *wqe,
724 bool (*func)(struct io_worker *, void *),
727 struct io_worker *worker;
730 list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
731 if (io_worker_get(worker)) {
732 /* no task if node is/was offline */
734 ret = func(worker, data);
735 io_worker_release(worker);
744 static bool io_wq_worker_wake(struct io_worker *worker, void *data)
746 wake_up_process(worker->task);
751 * Manager thread. Tasked with creating new workers, if we need them.
753 static int io_wq_manager(void *data)
755 struct io_wq *wq = data;
758 /* create fixed workers */
759 refcount_set(&wq->refs, 1);
760 for_each_node(node) {
761 if (!node_online(node))
763 if (create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
765 set_bit(IO_WQ_BIT_ERROR, &wq->state);
766 set_bit(IO_WQ_BIT_EXIT, &wq->state);
772 while (!kthread_should_stop()) {
773 if (current->task_works)
776 for_each_node(node) {
777 struct io_wqe *wqe = wq->wqes[node];
778 bool fork_worker[2] = { false, false };
780 if (!node_online(node))
783 raw_spin_lock_irq(&wqe->lock);
784 if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
785 fork_worker[IO_WQ_ACCT_BOUND] = true;
786 if (io_wqe_need_worker(wqe, IO_WQ_ACCT_UNBOUND))
787 fork_worker[IO_WQ_ACCT_UNBOUND] = true;
788 raw_spin_unlock_irq(&wqe->lock);
789 if (fork_worker[IO_WQ_ACCT_BOUND])
790 create_io_worker(wq, wqe, IO_WQ_ACCT_BOUND);
791 if (fork_worker[IO_WQ_ACCT_UNBOUND])
792 create_io_worker(wq, wqe, IO_WQ_ACCT_UNBOUND);
794 set_current_state(TASK_INTERRUPTIBLE);
795 schedule_timeout(HZ);
798 if (current->task_works)
802 if (refcount_dec_and_test(&wq->refs)) {
806 /* if ERROR is set and we get here, we have workers to wake */
807 if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
810 io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
816 static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
817 struct io_wq_work *work)
821 if (!(work->flags & IO_WQ_WORK_UNBOUND))
823 if (atomic_read(&acct->nr_running))
827 free_worker = !hlist_nulls_empty(&wqe->free_list);
832 if (atomic_read(&wqe->wq->user->processes) >= acct->max_workers &&
833 !(capable(CAP_SYS_RESOURCE) || capable(CAP_SYS_ADMIN)))
839 static void io_run_cancel(struct io_wq_work *work, struct io_wqe *wqe)
841 struct io_wq *wq = wqe->wq;
844 struct io_wq_work *old_work = work;
846 work->flags |= IO_WQ_WORK_CANCEL;
847 work = wq->do_work(work);
848 wq->free_work(old_work);
852 static void io_wqe_insert_work(struct io_wqe *wqe, struct io_wq_work *work)
855 struct io_wq_work *tail;
857 if (!io_wq_is_hashed(work)) {
859 wq_list_add_tail(&work->list, &wqe->work_list);
863 hash = io_get_work_hash(work);
864 tail = wqe->hash_tail[hash];
865 wqe->hash_tail[hash] = work;
869 wq_list_add_after(&work->list, &tail->list, &wqe->work_list);
872 static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
874 struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
879 * Do early check to see if we need a new unbound worker, and if we do,
880 * if we're allowed to do so. This isn't 100% accurate as there's a
881 * gap between this check and incrementing the value, but that's OK.
882 * It's close enough to not be an issue, fork() has the same delay.
884 if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
885 io_run_cancel(work, wqe);
889 work_flags = work->flags;
890 raw_spin_lock_irqsave(&wqe->lock, flags);
891 io_wqe_insert_work(wqe, work);
892 wqe->flags &= ~IO_WQE_FLAG_STALLED;
893 raw_spin_unlock_irqrestore(&wqe->lock, flags);
895 if ((work_flags & IO_WQ_WORK_CONCURRENT) ||
896 !atomic_read(&acct->nr_running))
897 io_wqe_wake_worker(wqe, acct);
900 void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work)
902 struct io_wqe *wqe = wq->wqes[numa_node_id()];
904 io_wqe_enqueue(wqe, work);
908 * Work items that hash to the same value will not be done in parallel.
909 * Used to limit concurrent writes, generally hashed by inode.
911 void io_wq_hash_work(struct io_wq_work *work, void *val)
915 bit = hash_ptr(val, IO_WQ_HASH_ORDER);
916 work->flags |= (IO_WQ_WORK_HASHED | (bit << IO_WQ_HASH_SHIFT));
919 void io_wq_cancel_all(struct io_wq *wq)
923 set_bit(IO_WQ_BIT_CANCEL, &wq->state);
926 for_each_node(node) {
927 struct io_wqe *wqe = wq->wqes[node];
929 io_wq_for_each_worker(wqe, io_wqe_worker_send_sig, NULL);
934 struct io_cb_cancel_data {
942 static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
944 struct io_cb_cancel_data *match = data;
948 * Hold the lock to avoid ->cur_work going out of scope, caller
949 * may dereference the passed in work.
951 spin_lock_irqsave(&worker->lock, flags);
952 if (worker->cur_work &&
953 !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) &&
954 match->fn(worker->cur_work, match->data)) {
955 send_sig(SIGINT, worker->task, 1);
958 spin_unlock_irqrestore(&worker->lock, flags);
960 return match->nr_running && !match->cancel_all;
963 static inline void io_wqe_remove_pending(struct io_wqe *wqe,
964 struct io_wq_work *work,
965 struct io_wq_work_node *prev)
967 unsigned int hash = io_get_work_hash(work);
968 struct io_wq_work *prev_work = NULL;
970 if (io_wq_is_hashed(work) && work == wqe->hash_tail[hash]) {
972 prev_work = container_of(prev, struct io_wq_work, list);
973 if (prev_work && io_get_work_hash(prev_work) == hash)
974 wqe->hash_tail[hash] = prev_work;
976 wqe->hash_tail[hash] = NULL;
978 wq_list_del(&wqe->work_list, &work->list, prev);
981 static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
982 struct io_cb_cancel_data *match)
984 struct io_wq_work_node *node, *prev;
985 struct io_wq_work *work;
989 raw_spin_lock_irqsave(&wqe->lock, flags);
990 wq_list_for_each(node, prev, &wqe->work_list) {
991 work = container_of(node, struct io_wq_work, list);
992 if (!match->fn(work, match->data))
994 io_wqe_remove_pending(wqe, work, prev);
995 raw_spin_unlock_irqrestore(&wqe->lock, flags);
996 io_run_cancel(work, wqe);
998 if (!match->cancel_all)
1001 /* not safe to continue after unlock */
1004 raw_spin_unlock_irqrestore(&wqe->lock, flags);
1007 static void io_wqe_cancel_running_work(struct io_wqe *wqe,
1008 struct io_cb_cancel_data *match)
1011 io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
1015 enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
1016 void *data, bool cancel_all)
1018 struct io_cb_cancel_data match = {
1021 .cancel_all = cancel_all,
1026 * First check pending list, if we're lucky we can just remove it
1027 * from there. CANCEL_OK means that the work is returned as-new,
1028 * no completion will be posted for it.
1030 for_each_node(node) {
1031 struct io_wqe *wqe = wq->wqes[node];
1033 io_wqe_cancel_pending_work(wqe, &match);
1034 if (match.nr_pending && !match.cancel_all)
1035 return IO_WQ_CANCEL_OK;
1039 * Now check if a free (going busy) or busy worker has the work
1040 * currently running. If we find it there, we'll return CANCEL_RUNNING
1041 * as an indication that we attempt to signal cancellation. The
1042 * completion will run normally in this case.
1044 for_each_node(node) {
1045 struct io_wqe *wqe = wq->wqes[node];
1047 io_wqe_cancel_running_work(wqe, &match);
1048 if (match.nr_running && !match.cancel_all)
1049 return IO_WQ_CANCEL_RUNNING;
1052 if (match.nr_running)
1053 return IO_WQ_CANCEL_RUNNING;
1054 if (match.nr_pending)
1055 return IO_WQ_CANCEL_OK;
1056 return IO_WQ_CANCEL_NOTFOUND;
1059 static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data)
1061 return work == data;
1064 enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
1066 return io_wq_cancel_cb(wq, io_wq_io_cb_cancel_data, (void *)cwork, false);
1069 struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
1071 int ret = -ENOMEM, node;
1074 if (WARN_ON_ONCE(!data->free_work || !data->do_work))
1075 return ERR_PTR(-EINVAL);
1077 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
1079 return ERR_PTR(-ENOMEM);
1081 wq->wqes = kcalloc(nr_node_ids, sizeof(struct io_wqe *), GFP_KERNEL);
1084 return ERR_PTR(-ENOMEM);
1087 wq->free_work = data->free_work;
1088 wq->do_work = data->do_work;
1090 /* caller must already hold a reference to this */
1091 wq->user = data->user;
1093 for_each_node(node) {
1095 int alloc_node = node;
1097 if (!node_online(alloc_node))
1098 alloc_node = NUMA_NO_NODE;
1099 wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
1102 wq->wqes[node] = wqe;
1103 wqe->node = alloc_node;
1104 wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
1105 atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
1107 wqe->acct[IO_WQ_ACCT_UNBOUND].max_workers =
1108 task_rlimit(current, RLIMIT_NPROC);
1110 atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
1112 raw_spin_lock_init(&wqe->lock);
1113 INIT_WQ_LIST(&wqe->work_list);
1114 INIT_HLIST_NULLS_HEAD(&wqe->free_list, 0);
1115 INIT_LIST_HEAD(&wqe->all_list);
1118 init_completion(&wq->done);
1120 wq->manager = kthread_create(io_wq_manager, wq, "io_wq_manager");
1121 if (!IS_ERR(wq->manager)) {
1122 wake_up_process(wq->manager);
1123 wait_for_completion(&wq->done);
1124 if (test_bit(IO_WQ_BIT_ERROR, &wq->state)) {
1128 refcount_set(&wq->use_refs, 1);
1129 reinit_completion(&wq->done);
1133 ret = PTR_ERR(wq->manager);
1134 complete(&wq->done);
1137 kfree(wq->wqes[node]);
1140 return ERR_PTR(ret);
1143 bool io_wq_get(struct io_wq *wq, struct io_wq_data *data)
1145 if (data->free_work != wq->free_work || data->do_work != wq->do_work)
1148 return refcount_inc_not_zero(&wq->use_refs);
1151 static void __io_wq_destroy(struct io_wq *wq)
1155 set_bit(IO_WQ_BIT_EXIT, &wq->state);
1157 kthread_stop(wq->manager);
1161 io_wq_for_each_worker(wq->wqes[node], io_wq_worker_wake, NULL);
1164 wait_for_completion(&wq->done);
1167 kfree(wq->wqes[node]);
1172 void io_wq_destroy(struct io_wq *wq)
1174 if (refcount_dec_and_test(&wq->use_refs))
1175 __io_wq_destroy(wq);
1178 struct task_struct *io_wq_get_task(struct io_wq *wq)