2 * linux/kernel/workqueue.c
4 * Generic mechanism for defining kernel helper threads for running
5 * arbitrary tasks in process context.
7 * Started by Ingo Molnar, Copyright (C) 2002
9 * Derived from the taskqueue/keventd code by:
11 * David Woodhouse <dwmw2@infradead.org>
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu>
16 * Made to use alloc_percpu by Christoph Lameter.
19 #include <linux/module.h>
20 #include <linux/kernel.h>
21 #include <linux/sched.h>
22 #include <linux/init.h>
23 #include <linux/signal.h>
24 #include <linux/completion.h>
25 #include <linux/workqueue.h>
26 #include <linux/slab.h>
27 #include <linux/cpu.h>
28 #include <linux/notifier.h>
29 #include <linux/kthread.h>
30 #include <linux/hardirq.h>
31 #include <linux/mempolicy.h>
32 #include <linux/freezer.h>
33 #include <linux/kallsyms.h>
34 #include <linux/debug_locks.h>
35 #include <linux/lockdep.h>
36 #define CREATE_TRACE_POINTS
37 #include <trace/events/workqueue.h>
40 * Structure fields follow one of the following exclusion rules.
42 * I: Set during initialization and read-only afterwards.
44 * L: cwq->lock protected. Access with cwq->lock held.
46 * W: workqueue_lock protected.
50 * The per-CPU workqueue (if single thread, we always use the first
53 struct cpu_workqueue_struct {
57 struct list_head worklist;
58 wait_queue_head_t more_work;
59 struct work_struct *current_work;
61 struct workqueue_struct *wq; /* I: the owning workqueue */
62 struct task_struct *thread;
63 } ____cacheline_aligned;
66 * The externally visible workqueue abstraction is an array of
69 struct workqueue_struct {
70 unsigned int flags; /* I: WQ_* flags */
71 struct cpu_workqueue_struct *cpu_wq; /* I: cwq's */
72 struct list_head list; /* W: list of all workqueues */
73 const char *name; /* I: workqueue name */
75 struct lockdep_map lockdep_map;
79 #ifdef CONFIG_DEBUG_OBJECTS_WORK
81 static struct debug_obj_descr work_debug_descr;
84 * fixup_init is called when:
85 * - an active object is initialized
87 static int work_fixup_init(void *addr, enum debug_obj_state state)
89 struct work_struct *work = addr;
92 case ODEBUG_STATE_ACTIVE:
93 cancel_work_sync(work);
94 debug_object_init(work, &work_debug_descr);
102 * fixup_activate is called when:
103 * - an active object is activated
104 * - an unknown object is activated (might be a statically initialized object)
106 static int work_fixup_activate(void *addr, enum debug_obj_state state)
108 struct work_struct *work = addr;
112 case ODEBUG_STATE_NOTAVAILABLE:
114 * This is not really a fixup. The work struct was
115 * statically initialized. We just make sure that it
116 * is tracked in the object tracker.
118 if (test_bit(WORK_STRUCT_STATIC_BIT, work_data_bits(work))) {
119 debug_object_init(work, &work_debug_descr);
120 debug_object_activate(work, &work_debug_descr);
126 case ODEBUG_STATE_ACTIVE:
135 * fixup_free is called when:
136 * - an active object is freed
138 static int work_fixup_free(void *addr, enum debug_obj_state state)
140 struct work_struct *work = addr;
143 case ODEBUG_STATE_ACTIVE:
144 cancel_work_sync(work);
145 debug_object_free(work, &work_debug_descr);
152 static struct debug_obj_descr work_debug_descr = {
153 .name = "work_struct",
154 .fixup_init = work_fixup_init,
155 .fixup_activate = work_fixup_activate,
156 .fixup_free = work_fixup_free,
159 static inline void debug_work_activate(struct work_struct *work)
161 debug_object_activate(work, &work_debug_descr);
164 static inline void debug_work_deactivate(struct work_struct *work)
166 debug_object_deactivate(work, &work_debug_descr);
169 void __init_work(struct work_struct *work, int onstack)
172 debug_object_init_on_stack(work, &work_debug_descr);
174 debug_object_init(work, &work_debug_descr);
176 EXPORT_SYMBOL_GPL(__init_work);
178 void destroy_work_on_stack(struct work_struct *work)
180 debug_object_free(work, &work_debug_descr);
182 EXPORT_SYMBOL_GPL(destroy_work_on_stack);
185 static inline void debug_work_activate(struct work_struct *work) { }
186 static inline void debug_work_deactivate(struct work_struct *work) { }
189 /* Serializes the accesses to the list of workqueues. */
190 static DEFINE_SPINLOCK(workqueue_lock);
191 static LIST_HEAD(workqueues);
193 static int singlethread_cpu __read_mostly;
194 static const struct cpumask *cpu_singlethread_map __read_mostly;
196 * _cpu_down() first removes CPU from cpu_online_map, then CPU_DEAD
197 * flushes cwq->worklist. This means that flush_workqueue/wait_on_work
198 * which comes in between can't use for_each_online_cpu(). We could
199 * use cpu_possible_map, the cpumask below is more a documentation
202 static cpumask_var_t cpu_populated_map __read_mostly;
204 /* If it's single threaded, it isn't in the list of workqueues. */
205 static inline bool is_wq_single_threaded(struct workqueue_struct *wq)
207 return wq->flags & WQ_SINGLE_THREAD;
210 static const struct cpumask *wq_cpu_map(struct workqueue_struct *wq)
212 return is_wq_single_threaded(wq)
213 ? cpu_singlethread_map : cpu_populated_map;
216 static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
217 struct workqueue_struct *wq)
219 if (unlikely(is_wq_single_threaded(wq)))
220 cpu = singlethread_cpu;
221 return per_cpu_ptr(wq->cpu_wq, cpu);
225 * Set the workqueue on which a work item is to be run
226 * - Must *only* be called if the pending flag is set
228 static inline void set_wq_data(struct work_struct *work,
229 struct cpu_workqueue_struct *cwq,
230 unsigned long extra_flags)
232 BUG_ON(!work_pending(work));
234 atomic_long_set(&work->data, (unsigned long)cwq | work_static(work) |
235 WORK_STRUCT_PENDING | extra_flags);
239 * Clear WORK_STRUCT_PENDING and the workqueue on which it was queued.
241 static inline void clear_wq_data(struct work_struct *work)
243 atomic_long_set(&work->data, work_static(work));
247 struct cpu_workqueue_struct *get_wq_data(struct work_struct *work)
249 return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK);
253 * insert_work - insert a work into cwq
254 * @cwq: cwq @work belongs to
255 * @work: work to insert
256 * @head: insertion point
257 * @extra_flags: extra WORK_STRUCT_* flags to set
259 * Insert @work into @cwq after @head.
262 * spin_lock_irq(cwq->lock).
264 static void insert_work(struct cpu_workqueue_struct *cwq,
265 struct work_struct *work, struct list_head *head,
266 unsigned int extra_flags)
268 trace_workqueue_insertion(cwq->thread, work);
270 /* we own @work, set data and link */
271 set_wq_data(work, cwq, extra_flags);
274 * Ensure that we get the right work->data if we see the
275 * result of list_add() below, see try_to_grab_pending().
279 list_add_tail(&work->entry, head);
280 wake_up(&cwq->more_work);
283 static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
284 struct work_struct *work)
286 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
289 debug_work_activate(work);
290 spin_lock_irqsave(&cwq->lock, flags);
291 BUG_ON(!list_empty(&work->entry));
292 insert_work(cwq, work, &cwq->worklist, 0);
293 spin_unlock_irqrestore(&cwq->lock, flags);
297 * queue_work - queue work on a workqueue
298 * @wq: workqueue to use
299 * @work: work to queue
301 * Returns 0 if @work was already on a queue, non-zero otherwise.
303 * We queue the work to the CPU on which it was submitted, but if the CPU dies
304 * it can be processed by another CPU.
306 int queue_work(struct workqueue_struct *wq, struct work_struct *work)
310 ret = queue_work_on(get_cpu(), wq, work);
315 EXPORT_SYMBOL_GPL(queue_work);
318 * queue_work_on - queue work on specific cpu
319 * @cpu: CPU number to execute work on
320 * @wq: workqueue to use
321 * @work: work to queue
323 * Returns 0 if @work was already on a queue, non-zero otherwise.
325 * We queue the work to a specific CPU, the caller must ensure it
329 queue_work_on(int cpu, struct workqueue_struct *wq, struct work_struct *work)
333 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
334 __queue_work(cpu, wq, work);
339 EXPORT_SYMBOL_GPL(queue_work_on);
341 static void delayed_work_timer_fn(unsigned long __data)
343 struct delayed_work *dwork = (struct delayed_work *)__data;
344 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
346 __queue_work(smp_processor_id(), cwq->wq, &dwork->work);
350 * queue_delayed_work - queue work on a workqueue after delay
351 * @wq: workqueue to use
352 * @dwork: delayable work to queue
353 * @delay: number of jiffies to wait before queueing
355 * Returns 0 if @work was already on a queue, non-zero otherwise.
357 int queue_delayed_work(struct workqueue_struct *wq,
358 struct delayed_work *dwork, unsigned long delay)
361 return queue_work(wq, &dwork->work);
363 return queue_delayed_work_on(-1, wq, dwork, delay);
365 EXPORT_SYMBOL_GPL(queue_delayed_work);
368 * queue_delayed_work_on - queue work on specific CPU after delay
369 * @cpu: CPU number to execute work on
370 * @wq: workqueue to use
371 * @dwork: work to queue
372 * @delay: number of jiffies to wait before queueing
374 * Returns 0 if @work was already on a queue, non-zero otherwise.
376 int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
377 struct delayed_work *dwork, unsigned long delay)
380 struct timer_list *timer = &dwork->timer;
381 struct work_struct *work = &dwork->work;
383 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
384 BUG_ON(timer_pending(timer));
385 BUG_ON(!list_empty(&work->entry));
387 timer_stats_timer_set_start_info(&dwork->timer);
389 /* This stores cwq for the moment, for the timer_fn */
390 set_wq_data(work, get_cwq(raw_smp_processor_id(), wq), 0);
391 timer->expires = jiffies + delay;
392 timer->data = (unsigned long)dwork;
393 timer->function = delayed_work_timer_fn;
395 if (unlikely(cpu >= 0))
396 add_timer_on(timer, cpu);
403 EXPORT_SYMBOL_GPL(queue_delayed_work_on);
406 * process_one_work - process single work
407 * @cwq: cwq to process work for
408 * @work: work to process
410 * Process @work. This function contains all the logics necessary to
411 * process a single work including synchronization against and
412 * interaction with other workers on the same cpu, queueing and
413 * flushing. As long as context requirement is met, any worker can
414 * call this function to process a work.
417 * spin_lock_irq(cwq->lock) which is released and regrabbed.
419 static void process_one_work(struct cpu_workqueue_struct *cwq,
420 struct work_struct *work)
422 work_func_t f = work->func;
423 #ifdef CONFIG_LOCKDEP
425 * It is permissible to free the struct work_struct from
426 * inside the function that is called from it, this we need to
427 * take into account for lockdep too. To avoid bogus "held
428 * lock freed" warnings as well as problems when looking into
429 * work->lockdep_map, make a copy and use that here.
431 struct lockdep_map lockdep_map = work->lockdep_map;
433 /* claim and process */
434 trace_workqueue_execution(cwq->thread, work);
435 debug_work_deactivate(work);
436 cwq->current_work = work;
437 list_del_init(&work->entry);
439 spin_unlock_irq(&cwq->lock);
441 BUG_ON(get_wq_data(work) != cwq);
442 work_clear_pending(work);
443 lock_map_acquire(&cwq->wq->lockdep_map);
444 lock_map_acquire(&lockdep_map);
446 lock_map_release(&lockdep_map);
447 lock_map_release(&cwq->wq->lockdep_map);
449 if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
450 printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
452 current->comm, preempt_count(), task_pid_nr(current));
453 printk(KERN_ERR " last function: ");
454 print_symbol("%s\n", (unsigned long)f);
455 debug_show_held_locks(current);
459 spin_lock_irq(&cwq->lock);
461 /* we're done with it, release */
462 cwq->current_work = NULL;
465 static void run_workqueue(struct cpu_workqueue_struct *cwq)
467 spin_lock_irq(&cwq->lock);
468 while (!list_empty(&cwq->worklist)) {
469 struct work_struct *work = list_entry(cwq->worklist.next,
470 struct work_struct, entry);
471 process_one_work(cwq, work);
473 spin_unlock_irq(&cwq->lock);
477 * worker_thread - the worker thread function
478 * @__cwq: cwq to serve
480 * The cwq worker thread function.
482 static int worker_thread(void *__cwq)
484 struct cpu_workqueue_struct *cwq = __cwq;
487 if (cwq->wq->flags & WQ_FREEZEABLE)
491 prepare_to_wait(&cwq->more_work, &wait, TASK_INTERRUPTIBLE);
492 if (!freezing(current) &&
493 !kthread_should_stop() &&
494 list_empty(&cwq->worklist))
496 finish_wait(&cwq->more_work, &wait);
500 if (kthread_should_stop())
510 struct work_struct work;
511 struct completion done;
514 static void wq_barrier_func(struct work_struct *work)
516 struct wq_barrier *barr = container_of(work, struct wq_barrier, work);
517 complete(&barr->done);
521 * insert_wq_barrier - insert a barrier work
522 * @cwq: cwq to insert barrier into
523 * @barr: wq_barrier to insert
524 * @head: insertion point
526 * Insert barrier @barr into @cwq before @head.
529 * spin_lock_irq(cwq->lock).
531 static void insert_wq_barrier(struct cpu_workqueue_struct *cwq,
532 struct wq_barrier *barr, struct list_head *head)
535 * debugobject calls are safe here even with cwq->lock locked
536 * as we know for sure that this will not trigger any of the
537 * checks and call back into the fixup functions where we
540 INIT_WORK_ON_STACK(&barr->work, wq_barrier_func);
541 __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
542 init_completion(&barr->done);
544 debug_work_activate(&barr->work);
545 insert_work(cwq, &barr->work, head, 0);
548 static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
551 struct wq_barrier barr;
553 WARN_ON(cwq->thread == current);
555 spin_lock_irq(&cwq->lock);
556 if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) {
557 insert_wq_barrier(cwq, &barr, &cwq->worklist);
560 spin_unlock_irq(&cwq->lock);
563 wait_for_completion(&barr.done);
564 destroy_work_on_stack(&barr.work);
571 * flush_workqueue - ensure that any scheduled work has run to completion.
572 * @wq: workqueue to flush
574 * Forces execution of the workqueue and blocks until its completion.
575 * This is typically used in driver shutdown handlers.
577 * We sleep until all works which were queued on entry have been handled,
578 * but we are not livelocked by new incoming ones.
580 void flush_workqueue(struct workqueue_struct *wq)
582 const struct cpumask *cpu_map = wq_cpu_map(wq);
586 lock_map_acquire(&wq->lockdep_map);
587 lock_map_release(&wq->lockdep_map);
588 for_each_cpu(cpu, cpu_map)
589 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
591 EXPORT_SYMBOL_GPL(flush_workqueue);
594 * flush_work - block until a work_struct's callback has terminated
595 * @work: the work which is to be flushed
597 * Returns false if @work has already terminated.
599 * It is expected that, prior to calling flush_work(), the caller has
600 * arranged for the work to not be requeued, otherwise it doesn't make
601 * sense to use this function.
603 int flush_work(struct work_struct *work)
605 struct cpu_workqueue_struct *cwq;
606 struct list_head *prev;
607 struct wq_barrier barr;
610 cwq = get_wq_data(work);
614 lock_map_acquire(&cwq->wq->lockdep_map);
615 lock_map_release(&cwq->wq->lockdep_map);
617 spin_lock_irq(&cwq->lock);
618 if (!list_empty(&work->entry)) {
620 * See the comment near try_to_grab_pending()->smp_rmb().
621 * If it was re-queued under us we are not going to wait.
624 if (unlikely(cwq != get_wq_data(work)))
628 if (cwq->current_work != work)
630 prev = &cwq->worklist;
632 insert_wq_barrier(cwq, &barr, prev->next);
634 spin_unlock_irq(&cwq->lock);
635 wait_for_completion(&barr.done);
636 destroy_work_on_stack(&barr.work);
639 spin_unlock_irq(&cwq->lock);
642 EXPORT_SYMBOL_GPL(flush_work);
645 * Upon a successful return (>= 0), the caller "owns" WORK_STRUCT_PENDING bit,
646 * so this work can't be re-armed in any way.
648 static int try_to_grab_pending(struct work_struct *work)
650 struct cpu_workqueue_struct *cwq;
653 if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)))
657 * The queueing is in progress, or it is already queued. Try to
658 * steal it from ->worklist without clearing WORK_STRUCT_PENDING.
661 cwq = get_wq_data(work);
665 spin_lock_irq(&cwq->lock);
666 if (!list_empty(&work->entry)) {
668 * This work is queued, but perhaps we locked the wrong cwq.
669 * In that case we must see the new value after rmb(), see
670 * insert_work()->wmb().
673 if (cwq == get_wq_data(work)) {
674 debug_work_deactivate(work);
675 list_del_init(&work->entry);
679 spin_unlock_irq(&cwq->lock);
684 static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq,
685 struct work_struct *work)
687 struct wq_barrier barr;
690 spin_lock_irq(&cwq->lock);
691 if (unlikely(cwq->current_work == work)) {
692 insert_wq_barrier(cwq, &barr, cwq->worklist.next);
695 spin_unlock_irq(&cwq->lock);
697 if (unlikely(running)) {
698 wait_for_completion(&barr.done);
699 destroy_work_on_stack(&barr.work);
703 static void wait_on_work(struct work_struct *work)
705 struct cpu_workqueue_struct *cwq;
706 struct workqueue_struct *wq;
707 const struct cpumask *cpu_map;
712 lock_map_acquire(&work->lockdep_map);
713 lock_map_release(&work->lockdep_map);
715 cwq = get_wq_data(work);
720 cpu_map = wq_cpu_map(wq);
722 for_each_cpu(cpu, cpu_map)
723 wait_on_cpu_work(get_cwq(cpu, wq), work);
726 static int __cancel_work_timer(struct work_struct *work,
727 struct timer_list* timer)
732 ret = (timer && likely(del_timer(timer)));
734 ret = try_to_grab_pending(work);
736 } while (unlikely(ret < 0));
743 * cancel_work_sync - block until a work_struct's callback has terminated
744 * @work: the work which is to be flushed
746 * Returns true if @work was pending.
748 * cancel_work_sync() will cancel the work if it is queued. If the work's
749 * callback appears to be running, cancel_work_sync() will block until it
752 * It is possible to use this function if the work re-queues itself. It can
753 * cancel the work even if it migrates to another workqueue, however in that
754 * case it only guarantees that work->func() has completed on the last queued
757 * cancel_work_sync(&delayed_work->work) should be used only if ->timer is not
758 * pending, otherwise it goes into a busy-wait loop until the timer expires.
760 * The caller must ensure that workqueue_struct on which this work was last
761 * queued can't be destroyed before this function returns.
763 int cancel_work_sync(struct work_struct *work)
765 return __cancel_work_timer(work, NULL);
767 EXPORT_SYMBOL_GPL(cancel_work_sync);
770 * cancel_delayed_work_sync - reliably kill off a delayed work.
771 * @dwork: the delayed work struct
773 * Returns true if @dwork was pending.
775 * It is possible to use this function if @dwork rearms itself via queue_work()
776 * or queue_delayed_work(). See also the comment for cancel_work_sync().
778 int cancel_delayed_work_sync(struct delayed_work *dwork)
780 return __cancel_work_timer(&dwork->work, &dwork->timer);
782 EXPORT_SYMBOL(cancel_delayed_work_sync);
784 static struct workqueue_struct *keventd_wq __read_mostly;
787 * schedule_work - put work task in global workqueue
788 * @work: job to be done
790 * Returns zero if @work was already on the kernel-global workqueue and
791 * non-zero otherwise.
793 * This puts a job in the kernel-global workqueue if it was not already
794 * queued and leaves it in the same position on the kernel-global
795 * workqueue otherwise.
797 int schedule_work(struct work_struct *work)
799 return queue_work(keventd_wq, work);
801 EXPORT_SYMBOL(schedule_work);
804 * schedule_work_on - put work task on a specific cpu
805 * @cpu: cpu to put the work task on
806 * @work: job to be done
808 * This puts a job on a specific cpu
810 int schedule_work_on(int cpu, struct work_struct *work)
812 return queue_work_on(cpu, keventd_wq, work);
814 EXPORT_SYMBOL(schedule_work_on);
817 * schedule_delayed_work - put work task in global workqueue after delay
818 * @dwork: job to be done
819 * @delay: number of jiffies to wait or 0 for immediate execution
821 * After waiting for a given time this puts a job in the kernel-global
824 int schedule_delayed_work(struct delayed_work *dwork,
827 return queue_delayed_work(keventd_wq, dwork, delay);
829 EXPORT_SYMBOL(schedule_delayed_work);
832 * flush_delayed_work - block until a dwork_struct's callback has terminated
833 * @dwork: the delayed work which is to be flushed
835 * Any timeout is cancelled, and any pending work is run immediately.
837 void flush_delayed_work(struct delayed_work *dwork)
839 if (del_timer_sync(&dwork->timer)) {
840 __queue_work(get_cpu(), get_wq_data(&dwork->work)->wq,
844 flush_work(&dwork->work);
846 EXPORT_SYMBOL(flush_delayed_work);
849 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
851 * @dwork: job to be done
852 * @delay: number of jiffies to wait
854 * After waiting for a given time this puts a job in the kernel-global
855 * workqueue on the specified CPU.
857 int schedule_delayed_work_on(int cpu,
858 struct delayed_work *dwork, unsigned long delay)
860 return queue_delayed_work_on(cpu, keventd_wq, dwork, delay);
862 EXPORT_SYMBOL(schedule_delayed_work_on);
865 * schedule_on_each_cpu - call a function on each online CPU from keventd
866 * @func: the function to call
868 * Returns zero on success.
869 * Returns -ve errno on failure.
871 * schedule_on_each_cpu() is very slow.
873 int schedule_on_each_cpu(work_func_t func)
877 struct work_struct *works;
879 works = alloc_percpu(struct work_struct);
886 * When running in keventd don't schedule a work item on
887 * itself. Can just call directly because the work queue is
888 * already bound. This also is faster.
890 if (current_is_keventd())
891 orig = raw_smp_processor_id();
893 for_each_online_cpu(cpu) {
894 struct work_struct *work = per_cpu_ptr(works, cpu);
896 INIT_WORK(work, func);
898 schedule_work_on(cpu, work);
901 func(per_cpu_ptr(works, orig));
903 for_each_online_cpu(cpu)
904 flush_work(per_cpu_ptr(works, cpu));
912 * flush_scheduled_work - ensure that any scheduled work has run to completion.
914 * Forces execution of the kernel-global workqueue and blocks until its
917 * Think twice before calling this function! It's very easy to get into
918 * trouble if you don't take great care. Either of the following situations
919 * will lead to deadlock:
921 * One of the work items currently on the workqueue needs to acquire
922 * a lock held by your code or its caller.
924 * Your code is running in the context of a work routine.
926 * They will be detected by lockdep when they occur, but the first might not
927 * occur very often. It depends on what work items are on the workqueue and
928 * what locks they need, which you have no control over.
930 * In most situations flushing the entire workqueue is overkill; you merely
931 * need to know that a particular work item isn't queued and isn't running.
932 * In such cases you should use cancel_delayed_work_sync() or
933 * cancel_work_sync() instead.
935 void flush_scheduled_work(void)
937 flush_workqueue(keventd_wq);
939 EXPORT_SYMBOL(flush_scheduled_work);
942 * execute_in_process_context - reliably execute the routine with user context
943 * @fn: the function to execute
944 * @ew: guaranteed storage for the execute work structure (must
945 * be available when the work executes)
947 * Executes the function immediately if process context is available,
948 * otherwise schedules the function for delayed execution.
950 * Returns: 0 - function was executed
951 * 1 - function was scheduled for execution
953 int execute_in_process_context(work_func_t fn, struct execute_work *ew)
955 if (!in_interrupt()) {
960 INIT_WORK(&ew->work, fn);
961 schedule_work(&ew->work);
965 EXPORT_SYMBOL_GPL(execute_in_process_context);
969 return keventd_wq != NULL;
972 int current_is_keventd(void)
974 struct cpu_workqueue_struct *cwq;
975 int cpu = raw_smp_processor_id(); /* preempt-safe: keventd is per-cpu */
980 cwq = per_cpu_ptr(keventd_wq->cpu_wq, cpu);
981 if (current == cwq->thread)
988 static struct cpu_workqueue_struct *
989 init_cpu_workqueue(struct workqueue_struct *wq, int cpu)
991 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
994 spin_lock_init(&cwq->lock);
995 INIT_LIST_HEAD(&cwq->worklist);
996 init_waitqueue_head(&cwq->more_work);
1001 static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
1003 struct workqueue_struct *wq = cwq->wq;
1004 const char *fmt = is_wq_single_threaded(wq) ? "%s" : "%s/%d";
1005 struct task_struct *p;
1007 p = kthread_create(worker_thread, cwq, fmt, wq->name, cpu);
1009 * Nobody can add the work_struct to this cwq,
1010 * if (caller is __create_workqueue)
1011 * nobody should see this wq
1012 * else // caller is CPU_UP_PREPARE
1013 * cpu is not on cpu_online_map
1014 * so we can abort safely.
1020 trace_workqueue_creation(cwq->thread, cpu);
1025 static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu)
1027 struct task_struct *p = cwq->thread;
1031 kthread_bind(p, cpu);
1036 struct workqueue_struct *__create_workqueue_key(const char *name,
1038 struct lock_class_key *key,
1039 const char *lock_name)
1041 struct workqueue_struct *wq;
1042 struct cpu_workqueue_struct *cwq;
1045 wq = kzalloc(sizeof(*wq), GFP_KERNEL);
1049 wq->cpu_wq = alloc_percpu(struct cpu_workqueue_struct);
1055 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
1056 INIT_LIST_HEAD(&wq->list);
1058 if (flags & WQ_SINGLE_THREAD) {
1059 cwq = init_cpu_workqueue(wq, singlethread_cpu);
1060 err = create_workqueue_thread(cwq, singlethread_cpu);
1061 start_workqueue_thread(cwq, -1);
1063 cpu_maps_update_begin();
1065 * We must place this wq on list even if the code below fails.
1066 * cpu_down(cpu) can remove cpu from cpu_populated_map before
1067 * destroy_workqueue() takes the lock, in that case we leak
1070 spin_lock(&workqueue_lock);
1071 list_add(&wq->list, &workqueues);
1072 spin_unlock(&workqueue_lock);
1074 * We must initialize cwqs for each possible cpu even if we
1075 * are going to call destroy_workqueue() finally. Otherwise
1076 * cpu_up() can hit the uninitialized cwq once we drop the
1079 for_each_possible_cpu(cpu) {
1080 cwq = init_cpu_workqueue(wq, cpu);
1081 if (err || !cpu_online(cpu))
1083 err = create_workqueue_thread(cwq, cpu);
1084 start_workqueue_thread(cwq, cpu);
1086 cpu_maps_update_done();
1090 destroy_workqueue(wq);
1096 free_percpu(wq->cpu_wq);
1101 EXPORT_SYMBOL_GPL(__create_workqueue_key);
1103 static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq)
1106 * Our caller is either destroy_workqueue() or CPU_POST_DEAD,
1107 * cpu_add_remove_lock protects cwq->thread.
1109 if (cwq->thread == NULL)
1112 lock_map_acquire(&cwq->wq->lockdep_map);
1113 lock_map_release(&cwq->wq->lockdep_map);
1115 flush_cpu_workqueue(cwq);
1117 * If the caller is CPU_POST_DEAD and cwq->worklist was not empty,
1118 * a concurrent flush_workqueue() can insert a barrier after us.
1119 * However, in that case run_workqueue() won't return and check
1120 * kthread_should_stop() until it flushes all work_struct's.
1121 * When ->worklist becomes empty it is safe to exit because no
1122 * more work_structs can be queued on this cwq: flush_workqueue
1123 * checks list_empty(), and a "normal" queue_work() can't use
1126 trace_workqueue_destruction(cwq->thread);
1127 kthread_stop(cwq->thread);
1132 * destroy_workqueue - safely terminate a workqueue
1133 * @wq: target workqueue
1135 * Safely destroy a workqueue. All work currently pending will be done first.
1137 void destroy_workqueue(struct workqueue_struct *wq)
1139 const struct cpumask *cpu_map = wq_cpu_map(wq);
1142 cpu_maps_update_begin();
1143 spin_lock(&workqueue_lock);
1144 list_del(&wq->list);
1145 spin_unlock(&workqueue_lock);
1147 for_each_cpu(cpu, cpu_map)
1148 cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu));
1149 cpu_maps_update_done();
1151 free_percpu(wq->cpu_wq);
1154 EXPORT_SYMBOL_GPL(destroy_workqueue);
1156 static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
1157 unsigned long action,
1160 unsigned int cpu = (unsigned long)hcpu;
1161 struct cpu_workqueue_struct *cwq;
1162 struct workqueue_struct *wq;
1165 action &= ~CPU_TASKS_FROZEN;
1168 case CPU_UP_PREPARE:
1169 cpumask_set_cpu(cpu, cpu_populated_map);
1172 list_for_each_entry(wq, &workqueues, list) {
1173 cwq = per_cpu_ptr(wq->cpu_wq, cpu);
1176 case CPU_UP_PREPARE:
1177 err = create_workqueue_thread(cwq, cpu);
1180 printk(KERN_ERR "workqueue [%s] for %i failed\n",
1182 action = CPU_UP_CANCELED;
1187 start_workqueue_thread(cwq, cpu);
1190 case CPU_UP_CANCELED:
1191 start_workqueue_thread(cwq, -1);
1193 cleanup_workqueue_thread(cwq);
1199 case CPU_UP_CANCELED:
1201 cpumask_clear_cpu(cpu, cpu_populated_map);
1204 return notifier_from_errno(err);
1209 struct work_for_cpu {
1210 struct completion completion;
1216 static int do_work_for_cpu(void *_wfc)
1218 struct work_for_cpu *wfc = _wfc;
1219 wfc->ret = wfc->fn(wfc->arg);
1220 complete(&wfc->completion);
1225 * work_on_cpu - run a function in user context on a particular cpu
1226 * @cpu: the cpu to run on
1227 * @fn: the function to run
1228 * @arg: the function arg
1230 * This will return the value @fn returns.
1231 * It is up to the caller to ensure that the cpu doesn't go offline.
1232 * The caller must not hold any locks which would prevent @fn from completing.
1234 long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg)
1236 struct task_struct *sub_thread;
1237 struct work_for_cpu wfc = {
1238 .completion = COMPLETION_INITIALIZER_ONSTACK(wfc.completion),
1243 sub_thread = kthread_create(do_work_for_cpu, &wfc, "work_for_cpu");
1244 if (IS_ERR(sub_thread))
1245 return PTR_ERR(sub_thread);
1246 kthread_bind(sub_thread, cpu);
1247 wake_up_process(sub_thread);
1248 wait_for_completion(&wfc.completion);
1251 EXPORT_SYMBOL_GPL(work_on_cpu);
1252 #endif /* CONFIG_SMP */
1254 void __init init_workqueues(void)
1256 alloc_cpumask_var(&cpu_populated_map, GFP_KERNEL);
1258 cpumask_copy(cpu_populated_map, cpu_online_mask);
1259 singlethread_cpu = cpumask_first(cpu_possible_mask);
1260 cpu_singlethread_map = cpumask_of(singlethread_cpu);
1261 hotcpu_notifier(workqueue_cpu_callback, 0);
1262 keventd_wq = create_workqueue("events");
1263 BUG_ON(!keventd_wq);