1 /* Worker thread pool for slow items, such as filesystem lookups or mkdirs
3 * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
11 * See Documentation/slow-work.txt
14 #include <linux/module.h>
15 #include <linux/slow-work.h>
16 #include <linux/kthread.h>
17 #include <linux/freezer.h>
18 #include <linux/wait.h>
20 #define SLOW_WORK_CULL_TIMEOUT (5 * HZ) /* cull threads 5s after running out of
22 #define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
25 #define SLOW_WORK_THREAD_LIMIT 255 /* abs maximum number of slow-work threads */
27 static void slow_work_cull_timeout(unsigned long);
28 static void slow_work_oom_timeout(unsigned long);
31 static int slow_work_min_threads_sysctl(struct ctl_table *, int,
32 void __user *, size_t *, loff_t *);
34 static int slow_work_max_threads_sysctl(struct ctl_table *, int ,
35 void __user *, size_t *, loff_t *);
39 * The pool of threads has at least min threads in it as long as someone is
40 * using the facility, and may have as many as max.
42 * A portion of the pool may be processing very slow operations.
44 static unsigned slow_work_min_threads = 2;
45 static unsigned slow_work_max_threads = 4;
46 static unsigned vslow_work_proportion = 50; /* % of threads that may process
50 static const int slow_work_min_min_threads = 2;
51 static int slow_work_max_max_threads = SLOW_WORK_THREAD_LIMIT;
52 static const int slow_work_min_vslow = 1;
53 static const int slow_work_max_vslow = 99;
55 ctl_table slow_work_sysctls[] = {
57 .ctl_name = CTL_UNNUMBERED,
58 .procname = "min-threads",
59 .data = &slow_work_min_threads,
60 .maxlen = sizeof(unsigned),
62 .proc_handler = slow_work_min_threads_sysctl,
63 .extra1 = (void *) &slow_work_min_min_threads,
64 .extra2 = &slow_work_max_threads,
67 .ctl_name = CTL_UNNUMBERED,
68 .procname = "max-threads",
69 .data = &slow_work_max_threads,
70 .maxlen = sizeof(unsigned),
72 .proc_handler = slow_work_max_threads_sysctl,
73 .extra1 = &slow_work_min_threads,
74 .extra2 = (void *) &slow_work_max_max_threads,
77 .ctl_name = CTL_UNNUMBERED,
78 .procname = "vslow-percentage",
79 .data = &vslow_work_proportion,
80 .maxlen = sizeof(unsigned),
82 .proc_handler = &proc_dointvec_minmax,
83 .extra1 = (void *) &slow_work_min_vslow,
84 .extra2 = (void *) &slow_work_max_vslow,
91 * The active state of the thread pool
93 static atomic_t slow_work_thread_count;
94 static atomic_t vslow_work_executing_count;
96 static bool slow_work_may_not_start_new_thread;
97 static bool slow_work_cull; /* cull a thread due to lack of activity */
98 static DEFINE_TIMER(slow_work_cull_timer, slow_work_cull_timeout, 0, 0);
99 static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
100 static struct slow_work slow_work_new_thread; /* new thread starter */
103 * slow work ID allocation (use slow_work_queue_lock)
105 static DECLARE_BITMAP(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
108 * Unregistration tracking to prevent put_ref() from disappearing during module
111 #ifdef CONFIG_MODULES
112 static struct module *slow_work_thread_processing[SLOW_WORK_THREAD_LIMIT];
113 static struct module *slow_work_unreg_module;
114 static struct slow_work *slow_work_unreg_work_item;
115 static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq);
116 static DEFINE_MUTEX(slow_work_unreg_sync_lock);
120 * The queues of work items and the lock governing access to them. These are
121 * shared between all the CPUs. It doesn't make sense to have per-CPU queues
122 * as the number of threads bears no relation to the number of CPUs.
124 * There are two queues of work items: one for slow work items, and one for
125 * very slow work items.
127 static LIST_HEAD(slow_work_queue);
128 static LIST_HEAD(vslow_work_queue);
129 static DEFINE_SPINLOCK(slow_work_queue_lock);
132 * The thread controls. A variable used to signal to the threads that they
133 * should exit when the queue is empty, a waitqueue used by the threads to wait
134 * for signals, and a completion set by the last thread to exit.
136 static bool slow_work_threads_should_exit;
137 static DECLARE_WAIT_QUEUE_HEAD(slow_work_thread_wq);
138 static DECLARE_COMPLETION(slow_work_last_thread_exited);
141 * The number of users of the thread pool and its lock. Whilst this is zero we
142 * have no threads hanging around, and when this reaches zero, we wait for all
143 * active or queued work items to complete and kill all the threads we do have.
145 static int slow_work_user_count;
146 static DEFINE_MUTEX(slow_work_user_lock);
148 static inline int slow_work_get_ref(struct slow_work *work)
150 if (work->ops->get_ref)
151 return work->ops->get_ref(work);
156 static inline void slow_work_put_ref(struct slow_work *work)
158 if (work->ops->put_ref)
159 work->ops->put_ref(work);
163 * Calculate the maximum number of active threads in the pool that are
164 * permitted to process very slow work items.
166 * The answer is rounded up to at least 1, but may not equal or exceed the
167 * maximum number of the threads in the pool. This means we always have at
168 * least one thread that can process slow work items, and we always have at
169 * least one thread that won't get tied up doing so.
171 static unsigned slow_work_calc_vsmax(void)
175 vsmax = atomic_read(&slow_work_thread_count) * vslow_work_proportion;
177 vsmax = max(vsmax, 1U);
178 return min(vsmax, slow_work_max_threads - 1);
182 * Attempt to execute stuff queued on a slow thread. Return true if we managed
183 * it, false if there was nothing to do.
185 static bool slow_work_execute(int id)
187 #ifdef CONFIG_MODULES
188 struct module *module;
190 struct slow_work *work = NULL;
194 vsmax = slow_work_calc_vsmax();
196 /* see if we can schedule a new thread to be started if we're not
197 * keeping up with the work */
198 if (!waitqueue_active(&slow_work_thread_wq) &&
199 (!list_empty(&slow_work_queue) || !list_empty(&vslow_work_queue)) &&
200 atomic_read(&slow_work_thread_count) < slow_work_max_threads &&
201 !slow_work_may_not_start_new_thread)
202 slow_work_enqueue(&slow_work_new_thread);
204 /* find something to execute */
205 spin_lock_irq(&slow_work_queue_lock);
206 if (!list_empty(&vslow_work_queue) &&
207 atomic_read(&vslow_work_executing_count) < vsmax) {
208 work = list_entry(vslow_work_queue.next,
209 struct slow_work, link);
210 if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags))
212 list_del_init(&work->link);
213 atomic_inc(&vslow_work_executing_count);
215 } else if (!list_empty(&slow_work_queue)) {
216 work = list_entry(slow_work_queue.next,
217 struct slow_work, link);
218 if (test_and_set_bit_lock(SLOW_WORK_EXECUTING, &work->flags))
220 list_del_init(&work->link);
223 very_slow = false; /* avoid the compiler warning */
226 #ifdef CONFIG_MODULES
228 slow_work_thread_processing[id] = work->owner;
231 spin_unlock_irq(&slow_work_queue_lock);
236 if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags))
239 /* don't execute if the work is in the process of being cancelled */
240 if (!test_bit(SLOW_WORK_CANCELLING, &work->flags))
241 work->ops->execute(work);
244 atomic_dec(&vslow_work_executing_count);
245 clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags);
247 /* wake up anyone waiting for this work to be complete */
248 wake_up_bit(&work->flags, SLOW_WORK_EXECUTING);
250 /* if someone tried to enqueue the item whilst we were executing it,
251 * then it'll be left unenqueued to avoid multiple threads trying to
252 * execute it simultaneously
254 * there is, however, a race between us testing the pending flag and
255 * getting the spinlock, and between the enqueuer setting the pending
256 * flag and getting the spinlock, so we use a deferral bit to tell us
257 * if the enqueuer got there first
259 if (test_bit(SLOW_WORK_PENDING, &work->flags)) {
260 spin_lock_irq(&slow_work_queue_lock);
262 if (!test_bit(SLOW_WORK_EXECUTING, &work->flags) &&
263 test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags))
266 spin_unlock_irq(&slow_work_queue_lock);
269 /* sort out the race between module unloading and put_ref() */
270 slow_work_put_ref(work);
272 #ifdef CONFIG_MODULES
273 module = slow_work_thread_processing[id];
274 slow_work_thread_processing[id] = NULL;
276 if (slow_work_unreg_work_item == work ||
277 slow_work_unreg_module == module)
278 wake_up_all(&slow_work_unreg_wq);
284 /* we must complete the enqueue operation
285 * - we transfer our ref on the item back to the appropriate queue
286 * - don't wake another thread up as we're awake already
288 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
289 list_add_tail(&work->link, &vslow_work_queue);
291 list_add_tail(&work->link, &slow_work_queue);
292 spin_unlock_irq(&slow_work_queue_lock);
293 slow_work_thread_processing[id] = NULL;
298 * slow_work_enqueue - Schedule a slow work item for processing
299 * @work: The work item to queue
301 * Schedule a slow work item for processing. If the item is already undergoing
302 * execution, this guarantees not to re-enter the execution routine until the
303 * first execution finishes.
305 * The item is pinned by this function as it retains a reference to it, managed
306 * through the item operations. The item is unpinned once it has been
309 * An item may hog the thread that is running it for a relatively large amount
310 * of time, sufficient, for example, to perform several lookup, mkdir, create
311 * and setxattr operations. It may sleep on I/O and may sleep to obtain locks.
313 * Conversely, if a number of items are awaiting processing, it may take some
314 * time before any given item is given attention. The number of threads in the
315 * pool may be increased to deal with demand, but only up to a limit.
317 * If SLOW_WORK_VERY_SLOW is set on the work item, then it will be placed in
318 * the very slow queue, from which only a portion of the threads will be
319 * allowed to pick items to execute. This ensures that very slow items won't
320 * overly block ones that are just ordinarily slow.
322 * Returns 0 if successful, -EAGAIN if not (or -ECANCELED if cancelled work is
325 int slow_work_enqueue(struct slow_work *work)
330 if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
333 BUG_ON(slow_work_user_count <= 0);
337 /* when honouring an enqueue request, we only promise that we will run
338 * the work function in the future; we do not promise to run it once
339 * per enqueue request
341 * we use the PENDING bit to merge together repeat requests without
342 * having to disable IRQs and take the spinlock, whilst still
343 * maintaining our promise
345 if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
346 spin_lock_irqsave(&slow_work_queue_lock, flags);
348 if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags)))
351 /* we promise that we will not attempt to execute the work
352 * function in more than one thread simultaneously
354 * this, however, leaves us with a problem if we're asked to
355 * enqueue the work whilst someone is executing the work
356 * function as simply queueing the work immediately means that
357 * another thread may try executing it whilst it is already
360 * to deal with this, we set the ENQ_DEFERRED bit instead of
361 * enqueueing, and the thread currently executing the work
362 * function will enqueue the work item when the work function
363 * returns and it has cleared the EXECUTING bit
365 if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
366 set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
368 ret = slow_work_get_ref(work);
371 if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
372 list_add_tail(&work->link, &vslow_work_queue);
374 list_add_tail(&work->link, &slow_work_queue);
375 wake_up(&slow_work_thread_wq);
378 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
385 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
388 EXPORT_SYMBOL(slow_work_enqueue);
390 static int slow_work_wait(void *word)
397 * slow_work_cancel - Cancel a slow work item
398 * @work: The work item to cancel
400 * This function will cancel a previously enqueued work item. If we cannot
401 * cancel the work item, it is guarenteed to have run when this function
404 void slow_work_cancel(struct slow_work *work)
406 bool wait = true, put = false;
408 set_bit(SLOW_WORK_CANCELLING, &work->flags);
410 spin_lock_irq(&slow_work_queue_lock);
412 if (test_bit(SLOW_WORK_PENDING, &work->flags) &&
413 !list_empty(&work->link)) {
414 /* the link in the pending queue holds a reference on the item
415 * that we will need to release */
416 list_del_init(&work->link);
419 clear_bit(SLOW_WORK_PENDING, &work->flags);
421 } else if (test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) {
422 /* the executor is holding our only reference on the item, so
423 * we merely need to wait for it to finish executing */
424 clear_bit(SLOW_WORK_PENDING, &work->flags);
427 spin_unlock_irq(&slow_work_queue_lock);
429 /* the EXECUTING flag is set by the executor whilst the spinlock is set
430 * and before the item is dequeued - so assuming the above doesn't
431 * actually dequeue it, simply waiting for the EXECUTING flag to be
432 * released here should be sufficient */
434 wait_on_bit(&work->flags, SLOW_WORK_EXECUTING, slow_work_wait,
435 TASK_UNINTERRUPTIBLE);
437 clear_bit(SLOW_WORK_CANCELLING, &work->flags);
439 slow_work_put_ref(work);
441 EXPORT_SYMBOL(slow_work_cancel);
444 * Schedule a cull of the thread pool at some time in the near future
446 static void slow_work_schedule_cull(void)
448 mod_timer(&slow_work_cull_timer,
449 round_jiffies(jiffies + SLOW_WORK_CULL_TIMEOUT));
453 * Worker thread culling algorithm
455 static bool slow_work_cull_thread(void)
458 bool do_cull = false;
460 spin_lock_irqsave(&slow_work_queue_lock, flags);
462 if (slow_work_cull) {
463 slow_work_cull = false;
465 if (list_empty(&slow_work_queue) &&
466 list_empty(&vslow_work_queue) &&
467 atomic_read(&slow_work_thread_count) >
468 slow_work_min_threads) {
469 slow_work_schedule_cull();
474 spin_unlock_irqrestore(&slow_work_queue_lock, flags);
479 * Determine if there is slow work available for dispatch
481 static inline bool slow_work_available(int vsmax)
483 return !list_empty(&slow_work_queue) ||
484 (!list_empty(&vslow_work_queue) &&
485 atomic_read(&vslow_work_executing_count) < vsmax);
489 * Worker thread dispatcher
491 static int slow_work_thread(void *_data)
498 set_user_nice(current, -5);
500 /* allocate ourselves an ID */
501 spin_lock_irq(&slow_work_queue_lock);
502 id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
503 BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT);
504 __set_bit(id, slow_work_ids);
505 spin_unlock_irq(&slow_work_queue_lock);
507 sprintf(current->comm, "kslowd%03u", id);
510 vsmax = vslow_work_proportion;
511 vsmax *= atomic_read(&slow_work_thread_count);
514 prepare_to_wait_exclusive(&slow_work_thread_wq, &wait,
516 if (!freezing(current) &&
517 !slow_work_threads_should_exit &&
518 !slow_work_available(vsmax) &&
521 finish_wait(&slow_work_thread_wq, &wait);
525 vsmax = vslow_work_proportion;
526 vsmax *= atomic_read(&slow_work_thread_count);
529 if (slow_work_available(vsmax) && slow_work_execute(id)) {
531 if (list_empty(&slow_work_queue) &&
532 list_empty(&vslow_work_queue) &&
533 atomic_read(&slow_work_thread_count) >
534 slow_work_min_threads)
535 slow_work_schedule_cull();
539 if (slow_work_threads_should_exit)
542 if (slow_work_cull && slow_work_cull_thread())
546 spin_lock_irq(&slow_work_queue_lock);
547 __clear_bit(id, slow_work_ids);
548 spin_unlock_irq(&slow_work_queue_lock);
550 if (atomic_dec_and_test(&slow_work_thread_count))
551 complete_and_exit(&slow_work_last_thread_exited, 0);
556 * Handle thread cull timer expiration
558 static void slow_work_cull_timeout(unsigned long data)
560 slow_work_cull = true;
561 wake_up(&slow_work_thread_wq);
565 * Start a new slow work thread
567 static void slow_work_new_thread_execute(struct slow_work *work)
569 struct task_struct *p;
571 if (slow_work_threads_should_exit)
574 if (atomic_read(&slow_work_thread_count) >= slow_work_max_threads)
577 if (!mutex_trylock(&slow_work_user_lock))
580 slow_work_may_not_start_new_thread = true;
581 atomic_inc(&slow_work_thread_count);
582 p = kthread_run(slow_work_thread, NULL, "kslowd");
584 printk(KERN_DEBUG "Slow work thread pool: OOM\n");
585 if (atomic_dec_and_test(&slow_work_thread_count))
586 BUG(); /* we're running on a slow work thread... */
587 mod_timer(&slow_work_oom_timer,
588 round_jiffies(jiffies + SLOW_WORK_OOM_TIMEOUT));
590 /* ratelimit the starting of new threads */
591 mod_timer(&slow_work_oom_timer, jiffies + 1);
594 mutex_unlock(&slow_work_user_lock);
597 static const struct slow_work_ops slow_work_new_thread_ops = {
598 .owner = THIS_MODULE,
599 .execute = slow_work_new_thread_execute,
603 * post-OOM new thread start suppression expiration
605 static void slow_work_oom_timeout(unsigned long data)
607 slow_work_may_not_start_new_thread = false;
612 * Handle adjustment of the minimum number of threads
614 static int slow_work_min_threads_sysctl(struct ctl_table *table, int write,
616 size_t *lenp, loff_t *ppos)
618 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
622 mutex_lock(&slow_work_user_lock);
623 if (slow_work_user_count > 0) {
624 /* see if we need to start or stop threads */
625 n = atomic_read(&slow_work_thread_count) -
626 slow_work_min_threads;
628 if (n < 0 && !slow_work_may_not_start_new_thread)
629 slow_work_enqueue(&slow_work_new_thread);
631 slow_work_schedule_cull();
633 mutex_unlock(&slow_work_user_lock);
640 * Handle adjustment of the maximum number of threads
642 static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
644 size_t *lenp, loff_t *ppos)
646 int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
650 mutex_lock(&slow_work_user_lock);
651 if (slow_work_user_count > 0) {
652 /* see if we need to stop threads */
653 n = slow_work_max_threads -
654 atomic_read(&slow_work_thread_count);
657 slow_work_schedule_cull();
659 mutex_unlock(&slow_work_user_lock);
664 #endif /* CONFIG_SYSCTL */
667 * slow_work_register_user - Register a user of the facility
668 * @module: The module about to make use of the facility
670 * Register a user of the facility, starting up the initial threads if there
671 * aren't any other users at this point. This will return 0 if successful, or
674 int slow_work_register_user(struct module *module)
676 struct task_struct *p;
679 mutex_lock(&slow_work_user_lock);
681 if (slow_work_user_count == 0) {
682 printk(KERN_NOTICE "Slow work thread pool: Starting up\n");
683 init_completion(&slow_work_last_thread_exited);
685 slow_work_threads_should_exit = false;
686 slow_work_init(&slow_work_new_thread,
687 &slow_work_new_thread_ops);
688 slow_work_may_not_start_new_thread = false;
689 slow_work_cull = false;
691 /* start the minimum number of threads */
692 for (loop = 0; loop < slow_work_min_threads; loop++) {
693 atomic_inc(&slow_work_thread_count);
694 p = kthread_run(slow_work_thread, NULL, "kslowd");
698 printk(KERN_NOTICE "Slow work thread pool: Ready\n");
701 slow_work_user_count++;
702 mutex_unlock(&slow_work_user_lock);
706 if (atomic_dec_and_test(&slow_work_thread_count))
707 complete(&slow_work_last_thread_exited);
709 printk(KERN_ERR "Slow work thread pool:"
710 " Aborting startup on ENOMEM\n");
711 slow_work_threads_should_exit = true;
712 wake_up_all(&slow_work_thread_wq);
713 wait_for_completion(&slow_work_last_thread_exited);
714 printk(KERN_ERR "Slow work thread pool: Aborted\n");
716 mutex_unlock(&slow_work_user_lock);
719 EXPORT_SYMBOL(slow_work_register_user);
722 * wait for all outstanding items from the calling module to complete
723 * - note that more items may be queued whilst we're waiting
725 static void slow_work_wait_for_items(struct module *module)
727 DECLARE_WAITQUEUE(myself, current);
728 struct slow_work *work;
731 mutex_lock(&slow_work_unreg_sync_lock);
732 add_wait_queue(&slow_work_unreg_wq, &myself);
735 spin_lock_irq(&slow_work_queue_lock);
737 /* first of all, we wait for the last queued item in each list
739 list_for_each_entry_reverse(work, &vslow_work_queue, link) {
740 if (work->owner == module) {
741 set_current_state(TASK_UNINTERRUPTIBLE);
742 slow_work_unreg_work_item = work;
746 list_for_each_entry_reverse(work, &slow_work_queue, link) {
747 if (work->owner == module) {
748 set_current_state(TASK_UNINTERRUPTIBLE);
749 slow_work_unreg_work_item = work;
754 /* then we wait for the items being processed to finish */
755 slow_work_unreg_module = module;
757 for (loop = 0; loop < SLOW_WORK_THREAD_LIMIT; loop++) {
758 if (slow_work_thread_processing[loop] == module)
761 spin_unlock_irq(&slow_work_queue_lock);
762 break; /* okay, we're done */
765 spin_unlock_irq(&slow_work_queue_lock);
767 slow_work_unreg_work_item = NULL;
768 slow_work_unreg_module = NULL;
771 remove_wait_queue(&slow_work_unreg_wq, &myself);
772 mutex_unlock(&slow_work_unreg_sync_lock);
776 * slow_work_unregister_user - Unregister a user of the facility
777 * @module: The module whose items should be cleared
779 * Unregister a user of the facility, killing all the threads if this was the
782 * This waits for all the work items belonging to the nominated module to go
783 * away before proceeding.
785 void slow_work_unregister_user(struct module *module)
787 /* first of all, wait for all outstanding items from the calling module
790 slow_work_wait_for_items(module);
792 /* then we can actually go about shutting down the facility if need
794 mutex_lock(&slow_work_user_lock);
796 BUG_ON(slow_work_user_count <= 0);
798 slow_work_user_count--;
799 if (slow_work_user_count == 0) {
800 printk(KERN_NOTICE "Slow work thread pool: Shutting down\n");
801 slow_work_threads_should_exit = true;
802 del_timer_sync(&slow_work_cull_timer);
803 del_timer_sync(&slow_work_oom_timer);
804 wake_up_all(&slow_work_thread_wq);
805 wait_for_completion(&slow_work_last_thread_exited);
806 printk(KERN_NOTICE "Slow work thread pool:"
807 " Shut down complete\n");
810 mutex_unlock(&slow_work_user_lock);
812 EXPORT_SYMBOL(slow_work_unregister_user);
815 * Initialise the slow work facility
817 static int __init init_slow_work(void)
819 unsigned nr_cpus = num_possible_cpus();
821 if (slow_work_max_threads < nr_cpus)
822 slow_work_max_threads = nr_cpus;
824 if (slow_work_max_max_threads < nr_cpus * 2)
825 slow_work_max_max_threads = nr_cpus * 2;
830 subsys_initcall(init_slow_work);