io_uring: don't assume mm is constant across submits
[linux-2.6-microblaze.git] / net / sunrpc / sched.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/net/sunrpc/sched.c
4  *
5  * Scheduling for synchronous and asynchronous RPC requests.
6  *
7  * Copyright (C) 1996 Olaf Kirch, <okir@monad.swb.de>
8  *
9  * TCP NFS related read + write fixes
10  * (C) 1999 Dave Airlie, University of Limerick, Ireland <airlied@linux.ie>
11  */
12
13 #include <linux/module.h>
14
15 #include <linux/sched.h>
16 #include <linux/interrupt.h>
17 #include <linux/slab.h>
18 #include <linux/mempool.h>
19 #include <linux/smp.h>
20 #include <linux/spinlock.h>
21 #include <linux/mutex.h>
22 #include <linux/freezer.h>
23 #include <linux/sched/mm.h>
24
25 #include <linux/sunrpc/clnt.h>
26 #include <linux/sunrpc/metrics.h>
27
28 #include "sunrpc.h"
29
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/sunrpc.h>
32
33 /*
34  * RPC slabs and memory pools
35  */
36 #define RPC_BUFFER_MAXSIZE      (2048)
37 #define RPC_BUFFER_POOLSIZE     (8)
38 #define RPC_TASK_POOLSIZE       (8)
39 static struct kmem_cache        *rpc_task_slabp __read_mostly;
40 static struct kmem_cache        *rpc_buffer_slabp __read_mostly;
41 static mempool_t        *rpc_task_mempool __read_mostly;
42 static mempool_t        *rpc_buffer_mempool __read_mostly;
43
44 static void                     rpc_async_schedule(struct work_struct *);
45 static void                      rpc_release_task(struct rpc_task *task);
46 static void __rpc_queue_timer_fn(struct work_struct *);
47
48 /*
49  * RPC tasks sit here while waiting for conditions to improve.
50  */
51 static struct rpc_wait_queue delay_queue;
52
53 /*
54  * rpciod-related stuff
55  */
56 struct workqueue_struct *rpciod_workqueue __read_mostly;
57 struct workqueue_struct *xprtiod_workqueue __read_mostly;
58 EXPORT_SYMBOL_GPL(xprtiod_workqueue);
59
60 unsigned long
61 rpc_task_timeout(const struct rpc_task *task)
62 {
63         unsigned long timeout = READ_ONCE(task->tk_timeout);
64
65         if (timeout != 0) {
66                 unsigned long now = jiffies;
67                 if (time_before(now, timeout))
68                         return timeout - now;
69         }
70         return 0;
71 }
72 EXPORT_SYMBOL_GPL(rpc_task_timeout);
73
74 /*
75  * Disable the timer for a given RPC task. Should be called with
76  * queue->lock and bh_disabled in order to avoid races within
77  * rpc_run_timer().
78  */
79 static void
80 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
81 {
82         if (list_empty(&task->u.tk_wait.timer_list))
83                 return;
84         task->tk_timeout = 0;
85         list_del(&task->u.tk_wait.timer_list);
86         if (list_empty(&queue->timer_list.list))
87                 cancel_delayed_work(&queue->timer_list.dwork);
88 }
89
90 static void
91 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
92 {
93         unsigned long now = jiffies;
94         queue->timer_list.expires = expires;
95         if (time_before_eq(expires, now))
96                 expires = 0;
97         else
98                 expires -= now;
99         mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires);
100 }
101
102 /*
103  * Set up a timer for the current task.
104  */
105 static void
106 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task,
107                 unsigned long timeout)
108 {
109         task->tk_timeout = timeout;
110         if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires))
111                 rpc_set_queue_timer(queue, timeout);
112         list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
113 }
114
115 static void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int priority)
116 {
117         if (queue->priority != priority) {
118                 queue->priority = priority;
119                 queue->nr = 1U << priority;
120         }
121 }
122
123 static void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue)
124 {
125         rpc_set_waitqueue_priority(queue, queue->maxpriority);
126 }
127
128 /*
129  * Add a request to a queue list
130  */
131 static void
132 __rpc_list_enqueue_task(struct list_head *q, struct rpc_task *task)
133 {
134         struct rpc_task *t;
135
136         list_for_each_entry(t, q, u.tk_wait.list) {
137                 if (t->tk_owner == task->tk_owner) {
138                         list_add_tail(&task->u.tk_wait.links,
139                                         &t->u.tk_wait.links);
140                         /* Cache the queue head in task->u.tk_wait.list */
141                         task->u.tk_wait.list.next = q;
142                         task->u.tk_wait.list.prev = NULL;
143                         return;
144                 }
145         }
146         INIT_LIST_HEAD(&task->u.tk_wait.links);
147         list_add_tail(&task->u.tk_wait.list, q);
148 }
149
150 /*
151  * Remove request from a queue list
152  */
153 static void
154 __rpc_list_dequeue_task(struct rpc_task *task)
155 {
156         struct list_head *q;
157         struct rpc_task *t;
158
159         if (task->u.tk_wait.list.prev == NULL) {
160                 list_del(&task->u.tk_wait.links);
161                 return;
162         }
163         if (!list_empty(&task->u.tk_wait.links)) {
164                 t = list_first_entry(&task->u.tk_wait.links,
165                                 struct rpc_task,
166                                 u.tk_wait.links);
167                 /* Assume __rpc_list_enqueue_task() cached the queue head */
168                 q = t->u.tk_wait.list.next;
169                 list_add_tail(&t->u.tk_wait.list, q);
170                 list_del(&task->u.tk_wait.links);
171         }
172         list_del(&task->u.tk_wait.list);
173 }
174
175 /*
176  * Add new request to a priority queue.
177  */
178 static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue,
179                 struct rpc_task *task,
180                 unsigned char queue_priority)
181 {
182         if (unlikely(queue_priority > queue->maxpriority))
183                 queue_priority = queue->maxpriority;
184         __rpc_list_enqueue_task(&queue->tasks[queue_priority], task);
185 }
186
187 /*
188  * Add new request to wait queue.
189  *
190  * Swapper tasks always get inserted at the head of the queue.
191  * This should avoid many nasty memory deadlocks and hopefully
192  * improve overall performance.
193  * Everyone else gets appended to the queue to ensure proper FIFO behavior.
194  */
195 static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,
196                 struct rpc_task *task,
197                 unsigned char queue_priority)
198 {
199         INIT_LIST_HEAD(&task->u.tk_wait.timer_list);
200         if (RPC_IS_PRIORITY(queue))
201                 __rpc_add_wait_queue_priority(queue, task, queue_priority);
202         else if (RPC_IS_SWAPPER(task))
203                 list_add(&task->u.tk_wait.list, &queue->tasks[0]);
204         else
205                 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
206         task->tk_waitqueue = queue;
207         queue->qlen++;
208         /* barrier matches the read in rpc_wake_up_task_queue_locked() */
209         smp_wmb();
210         rpc_set_queued(task);
211 }
212
213 /*
214  * Remove request from a priority queue.
215  */
216 static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
217 {
218         __rpc_list_dequeue_task(task);
219 }
220
221 /*
222  * Remove request from queue.
223  * Note: must be called with spin lock held.
224  */
225 static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
226 {
227         __rpc_disable_timer(queue, task);
228         if (RPC_IS_PRIORITY(queue))
229                 __rpc_remove_wait_queue_priority(task);
230         else
231                 list_del(&task->u.tk_wait.list);
232         queue->qlen--;
233 }
234
235 static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues)
236 {
237         int i;
238
239         spin_lock_init(&queue->lock);
240         for (i = 0; i < ARRAY_SIZE(queue->tasks); i++)
241                 INIT_LIST_HEAD(&queue->tasks[i]);
242         queue->maxpriority = nr_queues - 1;
243         rpc_reset_waitqueue_priority(queue);
244         queue->qlen = 0;
245         queue->timer_list.expires = 0;
246         INIT_DELAYED_WORK(&queue->timer_list.dwork, __rpc_queue_timer_fn);
247         INIT_LIST_HEAD(&queue->timer_list.list);
248         rpc_assign_waitqueue_name(queue, qname);
249 }
250
251 void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname)
252 {
253         __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY);
254 }
255 EXPORT_SYMBOL_GPL(rpc_init_priority_wait_queue);
256
257 void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
258 {
259         __rpc_init_priority_wait_queue(queue, qname, 1);
260 }
261 EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
262
263 void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
264 {
265         cancel_delayed_work_sync(&queue->timer_list.dwork);
266 }
267 EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
268
269 static int rpc_wait_bit_killable(struct wait_bit_key *key, int mode)
270 {
271         freezable_schedule_unsafe();
272         if (signal_pending_state(mode, current))
273                 return -ERESTARTSYS;
274         return 0;
275 }
276
277 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) || IS_ENABLED(CONFIG_TRACEPOINTS)
278 static void rpc_task_set_debuginfo(struct rpc_task *task)
279 {
280         static atomic_t rpc_pid;
281
282         task->tk_pid = atomic_inc_return(&rpc_pid);
283 }
284 #else
285 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
286 {
287 }
288 #endif
289
290 static void rpc_set_active(struct rpc_task *task)
291 {
292         rpc_task_set_debuginfo(task);
293         set_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
294         trace_rpc_task_begin(task, NULL);
295 }
296
297 /*
298  * Mark an RPC call as having completed by clearing the 'active' bit
299  * and then waking up all tasks that were sleeping.
300  */
301 static int rpc_complete_task(struct rpc_task *task)
302 {
303         void *m = &task->tk_runstate;
304         wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE);
305         struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE);
306         unsigned long flags;
307         int ret;
308
309         trace_rpc_task_complete(task, NULL);
310
311         spin_lock_irqsave(&wq->lock, flags);
312         clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate);
313         ret = atomic_dec_and_test(&task->tk_count);
314         if (waitqueue_active(wq))
315                 __wake_up_locked_key(wq, TASK_NORMAL, &k);
316         spin_unlock_irqrestore(&wq->lock, flags);
317         return ret;
318 }
319
320 /*
321  * Allow callers to wait for completion of an RPC call
322  *
323  * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit()
324  * to enforce taking of the wq->lock and hence avoid races with
325  * rpc_complete_task().
326  */
327 int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *action)
328 {
329         if (action == NULL)
330                 action = rpc_wait_bit_killable;
331         return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE,
332                         action, TASK_KILLABLE);
333 }
334 EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
335
336 /*
337  * Make an RPC task runnable.
338  *
339  * Note: If the task is ASYNC, and is being made runnable after sitting on an
340  * rpc_wait_queue, this must be called with the queue spinlock held to protect
341  * the wait queue operation.
342  * Note the ordering of rpc_test_and_set_running() and rpc_clear_queued(),
343  * which is needed to ensure that __rpc_execute() doesn't loop (due to the
344  * lockless RPC_IS_QUEUED() test) before we've had a chance to test
345  * the RPC_TASK_RUNNING flag.
346  */
347 static void rpc_make_runnable(struct workqueue_struct *wq,
348                 struct rpc_task *task)
349 {
350         bool need_wakeup = !rpc_test_and_set_running(task);
351
352         rpc_clear_queued(task);
353         if (!need_wakeup)
354                 return;
355         if (RPC_IS_ASYNC(task)) {
356                 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
357                 queue_work(wq, &task->u.tk_work);
358         } else
359                 wake_up_bit(&task->tk_runstate, RPC_TASK_QUEUED);
360 }
361
362 /*
363  * Prepare for sleeping on a wait queue.
364  * By always appending tasks to the list we ensure FIFO behavior.
365  * NB: An RPC task will only receive interrupt-driven events as long
366  * as it's on a wait queue.
367  */
368 static void __rpc_do_sleep_on_priority(struct rpc_wait_queue *q,
369                 struct rpc_task *task,
370                 unsigned char queue_priority)
371 {
372         trace_rpc_task_sleep(task, q);
373
374         __rpc_add_wait_queue(q, task, queue_priority);
375 }
376
377 static void __rpc_sleep_on_priority(struct rpc_wait_queue *q,
378                 struct rpc_task *task,
379                 unsigned char queue_priority)
380 {
381         if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
382                 return;
383         __rpc_do_sleep_on_priority(q, task, queue_priority);
384 }
385
386 static void __rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
387                 struct rpc_task *task, unsigned long timeout,
388                 unsigned char queue_priority)
389 {
390         if (WARN_ON_ONCE(RPC_IS_QUEUED(task)))
391                 return;
392         if (time_is_after_jiffies(timeout)) {
393                 __rpc_do_sleep_on_priority(q, task, queue_priority);
394                 __rpc_add_timer(q, task, timeout);
395         } else
396                 task->tk_status = -ETIMEDOUT;
397 }
398
399 static void rpc_set_tk_callback(struct rpc_task *task, rpc_action action)
400 {
401         if (action && !WARN_ON_ONCE(task->tk_callback != NULL))
402                 task->tk_callback = action;
403 }
404
405 static bool rpc_sleep_check_activated(struct rpc_task *task)
406 {
407         /* We shouldn't ever put an inactive task to sleep */
408         if (WARN_ON_ONCE(!RPC_IS_ACTIVATED(task))) {
409                 task->tk_status = -EIO;
410                 rpc_put_task_async(task);
411                 return false;
412         }
413         return true;
414 }
415
416 void rpc_sleep_on_timeout(struct rpc_wait_queue *q, struct rpc_task *task,
417                                 rpc_action action, unsigned long timeout)
418 {
419         if (!rpc_sleep_check_activated(task))
420                 return;
421
422         rpc_set_tk_callback(task, action);
423
424         /*
425          * Protect the queue operations.
426          */
427         spin_lock(&q->lock);
428         __rpc_sleep_on_priority_timeout(q, task, timeout, task->tk_priority);
429         spin_unlock(&q->lock);
430 }
431 EXPORT_SYMBOL_GPL(rpc_sleep_on_timeout);
432
433 void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
434                                 rpc_action action)
435 {
436         if (!rpc_sleep_check_activated(task))
437                 return;
438
439         rpc_set_tk_callback(task, action);
440
441         WARN_ON_ONCE(task->tk_timeout != 0);
442         /*
443          * Protect the queue operations.
444          */
445         spin_lock(&q->lock);
446         __rpc_sleep_on_priority(q, task, task->tk_priority);
447         spin_unlock(&q->lock);
448 }
449 EXPORT_SYMBOL_GPL(rpc_sleep_on);
450
451 void rpc_sleep_on_priority_timeout(struct rpc_wait_queue *q,
452                 struct rpc_task *task, unsigned long timeout, int priority)
453 {
454         if (!rpc_sleep_check_activated(task))
455                 return;
456
457         priority -= RPC_PRIORITY_LOW;
458         /*
459          * Protect the queue operations.
460          */
461         spin_lock(&q->lock);
462         __rpc_sleep_on_priority_timeout(q, task, timeout, priority);
463         spin_unlock(&q->lock);
464 }
465 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority_timeout);
466
467 void rpc_sleep_on_priority(struct rpc_wait_queue *q, struct rpc_task *task,
468                 int priority)
469 {
470         if (!rpc_sleep_check_activated(task))
471                 return;
472
473         WARN_ON_ONCE(task->tk_timeout != 0);
474         priority -= RPC_PRIORITY_LOW;
475         /*
476          * Protect the queue operations.
477          */
478         spin_lock(&q->lock);
479         __rpc_sleep_on_priority(q, task, priority);
480         spin_unlock(&q->lock);
481 }
482 EXPORT_SYMBOL_GPL(rpc_sleep_on_priority);
483
484 /**
485  * __rpc_do_wake_up_task_on_wq - wake up a single rpc_task
486  * @wq: workqueue on which to run task
487  * @queue: wait queue
488  * @task: task to be woken up
489  *
490  * Caller must hold queue->lock, and have cleared the task queued flag.
491  */
492 static void __rpc_do_wake_up_task_on_wq(struct workqueue_struct *wq,
493                 struct rpc_wait_queue *queue,
494                 struct rpc_task *task)
495 {
496         /* Has the task been executed yet? If not, we cannot wake it up! */
497         if (!RPC_IS_ACTIVATED(task)) {
498                 printk(KERN_ERR "RPC: Inactive task (%p) being woken up!\n", task);
499                 return;
500         }
501
502         trace_rpc_task_wakeup(task, queue);
503
504         __rpc_remove_wait_queue(queue, task);
505
506         rpc_make_runnable(wq, task);
507 }
508
509 /*
510  * Wake up a queued task while the queue lock is being held
511  */
512 static struct rpc_task *
513 rpc_wake_up_task_on_wq_queue_action_locked(struct workqueue_struct *wq,
514                 struct rpc_wait_queue *queue, struct rpc_task *task,
515                 bool (*action)(struct rpc_task *, void *), void *data)
516 {
517         if (RPC_IS_QUEUED(task)) {
518                 smp_rmb();
519                 if (task->tk_waitqueue == queue) {
520                         if (action == NULL || action(task, data)) {
521                                 __rpc_do_wake_up_task_on_wq(wq, queue, task);
522                                 return task;
523                         }
524                 }
525         }
526         return NULL;
527 }
528
529 /*
530  * Wake up a queued task while the queue lock is being held
531  */
532 static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue,
533                                           struct rpc_task *task)
534 {
535         rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
536                                                    task, NULL, NULL);
537 }
538
539 /*
540  * Wake up a task on a specific queue
541  */
542 void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
543 {
544         if (!RPC_IS_QUEUED(task))
545                 return;
546         spin_lock(&queue->lock);
547         rpc_wake_up_task_queue_locked(queue, task);
548         spin_unlock(&queue->lock);
549 }
550 EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
551
552 static bool rpc_task_action_set_status(struct rpc_task *task, void *status)
553 {
554         task->tk_status = *(int *)status;
555         return true;
556 }
557
558 static void
559 rpc_wake_up_task_queue_set_status_locked(struct rpc_wait_queue *queue,
560                 struct rpc_task *task, int status)
561 {
562         rpc_wake_up_task_on_wq_queue_action_locked(rpciod_workqueue, queue,
563                         task, rpc_task_action_set_status, &status);
564 }
565
566 /**
567  * rpc_wake_up_queued_task_set_status - wake up a task and set task->tk_status
568  * @queue: pointer to rpc_wait_queue
569  * @task: pointer to rpc_task
570  * @status: integer error value
571  *
572  * If @task is queued on @queue, then it is woken up, and @task->tk_status is
573  * set to the value of @status.
574  */
575 void
576 rpc_wake_up_queued_task_set_status(struct rpc_wait_queue *queue,
577                 struct rpc_task *task, int status)
578 {
579         if (!RPC_IS_QUEUED(task))
580                 return;
581         spin_lock(&queue->lock);
582         rpc_wake_up_task_queue_set_status_locked(queue, task, status);
583         spin_unlock(&queue->lock);
584 }
585
586 /*
587  * Wake up the next task on a priority queue.
588  */
589 static struct rpc_task *__rpc_find_next_queued_priority(struct rpc_wait_queue *queue)
590 {
591         struct list_head *q;
592         struct rpc_task *task;
593
594         /*
595          * Service a batch of tasks from a single owner.
596          */
597         q = &queue->tasks[queue->priority];
598         if (!list_empty(q) && --queue->nr) {
599                 task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
600                 goto out;
601         }
602
603         /*
604          * Service the next queue.
605          */
606         do {
607                 if (q == &queue->tasks[0])
608                         q = &queue->tasks[queue->maxpriority];
609                 else
610                         q = q - 1;
611                 if (!list_empty(q)) {
612                         task = list_first_entry(q, struct rpc_task, u.tk_wait.list);
613                         goto new_queue;
614                 }
615         } while (q != &queue->tasks[queue->priority]);
616
617         rpc_reset_waitqueue_priority(queue);
618         return NULL;
619
620 new_queue:
621         rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0]));
622 out:
623         return task;
624 }
625
626 static struct rpc_task *__rpc_find_next_queued(struct rpc_wait_queue *queue)
627 {
628         if (RPC_IS_PRIORITY(queue))
629                 return __rpc_find_next_queued_priority(queue);
630         if (!list_empty(&queue->tasks[0]))
631                 return list_first_entry(&queue->tasks[0], struct rpc_task, u.tk_wait.list);
632         return NULL;
633 }
634
635 /*
636  * Wake up the first task on the wait queue.
637  */
638 struct rpc_task *rpc_wake_up_first_on_wq(struct workqueue_struct *wq,
639                 struct rpc_wait_queue *queue,
640                 bool (*func)(struct rpc_task *, void *), void *data)
641 {
642         struct rpc_task *task = NULL;
643
644         spin_lock(&queue->lock);
645         task = __rpc_find_next_queued(queue);
646         if (task != NULL)
647                 task = rpc_wake_up_task_on_wq_queue_action_locked(wq, queue,
648                                 task, func, data);
649         spin_unlock(&queue->lock);
650
651         return task;
652 }
653
654 /*
655  * Wake up the first task on the wait queue.
656  */
657 struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *queue,
658                 bool (*func)(struct rpc_task *, void *), void *data)
659 {
660         return rpc_wake_up_first_on_wq(rpciod_workqueue, queue, func, data);
661 }
662 EXPORT_SYMBOL_GPL(rpc_wake_up_first);
663
664 static bool rpc_wake_up_next_func(struct rpc_task *task, void *data)
665 {
666         return true;
667 }
668
669 /*
670  * Wake up the next task on the wait queue.
671 */
672 struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *queue)
673 {
674         return rpc_wake_up_first(queue, rpc_wake_up_next_func, NULL);
675 }
676 EXPORT_SYMBOL_GPL(rpc_wake_up_next);
677
678 /**
679  * rpc_wake_up_locked - wake up all rpc_tasks
680  * @queue: rpc_wait_queue on which the tasks are sleeping
681  *
682  */
683 static void rpc_wake_up_locked(struct rpc_wait_queue *queue)
684 {
685         struct rpc_task *task;
686
687         for (;;) {
688                 task = __rpc_find_next_queued(queue);
689                 if (task == NULL)
690                         break;
691                 rpc_wake_up_task_queue_locked(queue, task);
692         }
693 }
694
695 /**
696  * rpc_wake_up - wake up all rpc_tasks
697  * @queue: rpc_wait_queue on which the tasks are sleeping
698  *
699  * Grabs queue->lock
700  */
701 void rpc_wake_up(struct rpc_wait_queue *queue)
702 {
703         spin_lock(&queue->lock);
704         rpc_wake_up_locked(queue);
705         spin_unlock(&queue->lock);
706 }
707 EXPORT_SYMBOL_GPL(rpc_wake_up);
708
709 /**
710  * rpc_wake_up_status_locked - wake up all rpc_tasks and set their status value.
711  * @queue: rpc_wait_queue on which the tasks are sleeping
712  * @status: status value to set
713  */
714 static void rpc_wake_up_status_locked(struct rpc_wait_queue *queue, int status)
715 {
716         struct rpc_task *task;
717
718         for (;;) {
719                 task = __rpc_find_next_queued(queue);
720                 if (task == NULL)
721                         break;
722                 rpc_wake_up_task_queue_set_status_locked(queue, task, status);
723         }
724 }
725
726 /**
727  * rpc_wake_up_status - wake up all rpc_tasks and set their status value.
728  * @queue: rpc_wait_queue on which the tasks are sleeping
729  * @status: status value to set
730  *
731  * Grabs queue->lock
732  */
733 void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
734 {
735         spin_lock(&queue->lock);
736         rpc_wake_up_status_locked(queue, status);
737         spin_unlock(&queue->lock);
738 }
739 EXPORT_SYMBOL_GPL(rpc_wake_up_status);
740
741 static void __rpc_queue_timer_fn(struct work_struct *work)
742 {
743         struct rpc_wait_queue *queue = container_of(work,
744                         struct rpc_wait_queue,
745                         timer_list.dwork.work);
746         struct rpc_task *task, *n;
747         unsigned long expires, now, timeo;
748
749         spin_lock(&queue->lock);
750         expires = now = jiffies;
751         list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
752                 timeo = task->tk_timeout;
753                 if (time_after_eq(now, timeo)) {
754                         trace_rpc_task_timeout(task, task->tk_action);
755                         task->tk_status = -ETIMEDOUT;
756                         rpc_wake_up_task_queue_locked(queue, task);
757                         continue;
758                 }
759                 if (expires == now || time_after(expires, timeo))
760                         expires = timeo;
761         }
762         if (!list_empty(&queue->timer_list.list))
763                 rpc_set_queue_timer(queue, expires);
764         spin_unlock(&queue->lock);
765 }
766
767 static void __rpc_atrun(struct rpc_task *task)
768 {
769         if (task->tk_status == -ETIMEDOUT)
770                 task->tk_status = 0;
771 }
772
773 /*
774  * Run a task at a later time
775  */
776 void rpc_delay(struct rpc_task *task, unsigned long delay)
777 {
778         rpc_sleep_on_timeout(&delay_queue, task, __rpc_atrun, jiffies + delay);
779 }
780 EXPORT_SYMBOL_GPL(rpc_delay);
781
782 /*
783  * Helper to call task->tk_ops->rpc_call_prepare
784  */
785 void rpc_prepare_task(struct rpc_task *task)
786 {
787         task->tk_ops->rpc_call_prepare(task, task->tk_calldata);
788 }
789
790 static void
791 rpc_init_task_statistics(struct rpc_task *task)
792 {
793         /* Initialize retry counters */
794         task->tk_garb_retry = 2;
795         task->tk_cred_retry = 2;
796         task->tk_rebind_retry = 2;
797
798         /* starting timestamp */
799         task->tk_start = ktime_get();
800 }
801
802 static void
803 rpc_reset_task_statistics(struct rpc_task *task)
804 {
805         task->tk_timeouts = 0;
806         task->tk_flags &= ~(RPC_CALL_MAJORSEEN|RPC_TASK_SENT);
807         rpc_init_task_statistics(task);
808 }
809
810 /*
811  * Helper that calls task->tk_ops->rpc_call_done if it exists
812  */
813 void rpc_exit_task(struct rpc_task *task)
814 {
815         trace_rpc_task_end(task, task->tk_action);
816         task->tk_action = NULL;
817         if (task->tk_ops->rpc_count_stats)
818                 task->tk_ops->rpc_count_stats(task, task->tk_calldata);
819         else if (task->tk_client)
820                 rpc_count_iostats(task, task->tk_client->cl_metrics);
821         if (task->tk_ops->rpc_call_done != NULL) {
822                 task->tk_ops->rpc_call_done(task, task->tk_calldata);
823                 if (task->tk_action != NULL) {
824                         /* Always release the RPC slot and buffer memory */
825                         xprt_release(task);
826                         rpc_reset_task_statistics(task);
827                 }
828         }
829 }
830
831 void rpc_signal_task(struct rpc_task *task)
832 {
833         struct rpc_wait_queue *queue;
834
835         if (!RPC_IS_ACTIVATED(task))
836                 return;
837
838         trace_rpc_task_signalled(task, task->tk_action);
839         set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
840         smp_mb__after_atomic();
841         queue = READ_ONCE(task->tk_waitqueue);
842         if (queue)
843                 rpc_wake_up_queued_task_set_status(queue, task, -ERESTARTSYS);
844 }
845
846 void rpc_exit(struct rpc_task *task, int status)
847 {
848         task->tk_status = status;
849         task->tk_action = rpc_exit_task;
850         rpc_wake_up_queued_task(task->tk_waitqueue, task);
851 }
852 EXPORT_SYMBOL_GPL(rpc_exit);
853
854 void rpc_release_calldata(const struct rpc_call_ops *ops, void *calldata)
855 {
856         if (ops->rpc_release != NULL)
857                 ops->rpc_release(calldata);
858 }
859
860 /*
861  * This is the RPC `scheduler' (or rather, the finite state machine).
862  */
863 static void __rpc_execute(struct rpc_task *task)
864 {
865         struct rpc_wait_queue *queue;
866         int task_is_async = RPC_IS_ASYNC(task);
867         int status = 0;
868
869         WARN_ON_ONCE(RPC_IS_QUEUED(task));
870         if (RPC_IS_QUEUED(task))
871                 return;
872
873         for (;;) {
874                 void (*do_action)(struct rpc_task *);
875
876                 /*
877                  * Perform the next FSM step or a pending callback.
878                  *
879                  * tk_action may be NULL if the task has been killed.
880                  * In particular, note that rpc_killall_tasks may
881                  * do this at any time, so beware when dereferencing.
882                  */
883                 do_action = task->tk_action;
884                 if (task->tk_callback) {
885                         do_action = task->tk_callback;
886                         task->tk_callback = NULL;
887                 }
888                 if (!do_action)
889                         break;
890                 trace_rpc_task_run_action(task, do_action);
891                 do_action(task);
892
893                 /*
894                  * Lockless check for whether task is sleeping or not.
895                  */
896                 if (!RPC_IS_QUEUED(task))
897                         continue;
898
899                 /*
900                  * Signalled tasks should exit rather than sleep.
901                  */
902                 if (RPC_SIGNALLED(task)) {
903                         task->tk_rpc_status = -ERESTARTSYS;
904                         rpc_exit(task, -ERESTARTSYS);
905                 }
906
907                 /*
908                  * The queue->lock protects against races with
909                  * rpc_make_runnable().
910                  *
911                  * Note that once we clear RPC_TASK_RUNNING on an asynchronous
912                  * rpc_task, rpc_make_runnable() can assign it to a
913                  * different workqueue. We therefore cannot assume that the
914                  * rpc_task pointer may still be dereferenced.
915                  */
916                 queue = task->tk_waitqueue;
917                 spin_lock(&queue->lock);
918                 if (!RPC_IS_QUEUED(task)) {
919                         spin_unlock(&queue->lock);
920                         continue;
921                 }
922                 rpc_clear_running(task);
923                 spin_unlock(&queue->lock);
924                 if (task_is_async)
925                         return;
926
927                 /* sync task: sleep here */
928                 trace_rpc_task_sync_sleep(task, task->tk_action);
929                 status = out_of_line_wait_on_bit(&task->tk_runstate,
930                                 RPC_TASK_QUEUED, rpc_wait_bit_killable,
931                                 TASK_KILLABLE);
932                 if (status < 0) {
933                         /*
934                          * When a sync task receives a signal, it exits with
935                          * -ERESTARTSYS. In order to catch any callbacks that
936                          * clean up after sleeping on some queue, we don't
937                          * break the loop here, but go around once more.
938                          */
939                         trace_rpc_task_signalled(task, task->tk_action);
940                         set_bit(RPC_TASK_SIGNALLED, &task->tk_runstate);
941                         task->tk_rpc_status = -ERESTARTSYS;
942                         rpc_exit(task, -ERESTARTSYS);
943                 }
944                 trace_rpc_task_sync_wake(task, task->tk_action);
945         }
946
947         /* Release all resources associated with the task */
948         rpc_release_task(task);
949 }
950
951 /*
952  * User-visible entry point to the scheduler.
953  *
954  * This may be called recursively if e.g. an async NFS task updates
955  * the attributes and finds that dirty pages must be flushed.
956  * NOTE: Upon exit of this function the task is guaranteed to be
957  *       released. In particular note that tk_release() will have
958  *       been called, so your task memory may have been freed.
959  */
960 void rpc_execute(struct rpc_task *task)
961 {
962         bool is_async = RPC_IS_ASYNC(task);
963
964         rpc_set_active(task);
965         rpc_make_runnable(rpciod_workqueue, task);
966         if (!is_async)
967                 __rpc_execute(task);
968 }
969
970 static void rpc_async_schedule(struct work_struct *work)
971 {
972         unsigned int pflags = memalloc_nofs_save();
973
974         __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
975         memalloc_nofs_restore(pflags);
976 }
977
978 /**
979  * rpc_malloc - allocate RPC buffer resources
980  * @task: RPC task
981  *
982  * A single memory region is allocated, which is split between the
983  * RPC call and RPC reply that this task is being used for. When
984  * this RPC is retired, the memory is released by calling rpc_free.
985  *
986  * To prevent rpciod from hanging, this allocator never sleeps,
987  * returning -ENOMEM and suppressing warning if the request cannot
988  * be serviced immediately. The caller can arrange to sleep in a
989  * way that is safe for rpciod.
990  *
991  * Most requests are 'small' (under 2KiB) and can be serviced from a
992  * mempool, ensuring that NFS reads and writes can always proceed,
993  * and that there is good locality of reference for these buffers.
994  */
995 int rpc_malloc(struct rpc_task *task)
996 {
997         struct rpc_rqst *rqst = task->tk_rqstp;
998         size_t size = rqst->rq_callsize + rqst->rq_rcvsize;
999         struct rpc_buffer *buf;
1000         gfp_t gfp = GFP_NOFS;
1001
1002         if (RPC_IS_SWAPPER(task))
1003                 gfp = __GFP_MEMALLOC | GFP_NOWAIT | __GFP_NOWARN;
1004
1005         size += sizeof(struct rpc_buffer);
1006         if (size <= RPC_BUFFER_MAXSIZE)
1007                 buf = mempool_alloc(rpc_buffer_mempool, gfp);
1008         else
1009                 buf = kmalloc(size, gfp);
1010
1011         if (!buf)
1012                 return -ENOMEM;
1013
1014         buf->len = size;
1015         rqst->rq_buffer = buf->data;
1016         rqst->rq_rbuffer = (char *)rqst->rq_buffer + rqst->rq_callsize;
1017         return 0;
1018 }
1019 EXPORT_SYMBOL_GPL(rpc_malloc);
1020
1021 /**
1022  * rpc_free - free RPC buffer resources allocated via rpc_malloc
1023  * @task: RPC task
1024  *
1025  */
1026 void rpc_free(struct rpc_task *task)
1027 {
1028         void *buffer = task->tk_rqstp->rq_buffer;
1029         size_t size;
1030         struct rpc_buffer *buf;
1031
1032         buf = container_of(buffer, struct rpc_buffer, data);
1033         size = buf->len;
1034
1035         if (size <= RPC_BUFFER_MAXSIZE)
1036                 mempool_free(buf, rpc_buffer_mempool);
1037         else
1038                 kfree(buf);
1039 }
1040 EXPORT_SYMBOL_GPL(rpc_free);
1041
1042 /*
1043  * Creation and deletion of RPC task structures
1044  */
1045 static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
1046 {
1047         memset(task, 0, sizeof(*task));
1048         atomic_set(&task->tk_count, 1);
1049         task->tk_flags  = task_setup_data->flags;
1050         task->tk_ops = task_setup_data->callback_ops;
1051         task->tk_calldata = task_setup_data->callback_data;
1052         INIT_LIST_HEAD(&task->tk_task);
1053
1054         task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW;
1055         task->tk_owner = current->tgid;
1056
1057         /* Initialize workqueue for async tasks */
1058         task->tk_workqueue = task_setup_data->workqueue;
1059
1060         task->tk_xprt = rpc_task_get_xprt(task_setup_data->rpc_client,
1061                         xprt_get(task_setup_data->rpc_xprt));
1062
1063         task->tk_op_cred = get_rpccred(task_setup_data->rpc_op_cred);
1064
1065         if (task->tk_ops->rpc_call_prepare != NULL)
1066                 task->tk_action = rpc_prepare_task;
1067
1068         rpc_init_task_statistics(task);
1069 }
1070
1071 static struct rpc_task *
1072 rpc_alloc_task(void)
1073 {
1074         return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
1075 }
1076
1077 /*
1078  * Create a new task for the specified client.
1079  */
1080 struct rpc_task *rpc_new_task(const struct rpc_task_setup *setup_data)
1081 {
1082         struct rpc_task *task = setup_data->task;
1083         unsigned short flags = 0;
1084
1085         if (task == NULL) {
1086                 task = rpc_alloc_task();
1087                 flags = RPC_TASK_DYNAMIC;
1088         }
1089
1090         rpc_init_task(task, setup_data);
1091         task->tk_flags |= flags;
1092         return task;
1093 }
1094
1095 /*
1096  * rpc_free_task - release rpc task and perform cleanups
1097  *
1098  * Note that we free up the rpc_task _after_ rpc_release_calldata()
1099  * in order to work around a workqueue dependency issue.
1100  *
1101  * Tejun Heo states:
1102  * "Workqueue currently considers two work items to be the same if they're
1103  * on the same address and won't execute them concurrently - ie. it
1104  * makes a work item which is queued again while being executed wait
1105  * for the previous execution to complete.
1106  *
1107  * If a work function frees the work item, and then waits for an event
1108  * which should be performed by another work item and *that* work item
1109  * recycles the freed work item, it can create a false dependency loop.
1110  * There really is no reliable way to detect this short of verifying
1111  * every memory free."
1112  *
1113  */
1114 static void rpc_free_task(struct rpc_task *task)
1115 {
1116         unsigned short tk_flags = task->tk_flags;
1117
1118         put_rpccred(task->tk_op_cred);
1119         rpc_release_calldata(task->tk_ops, task->tk_calldata);
1120
1121         if (tk_flags & RPC_TASK_DYNAMIC)
1122                 mempool_free(task, rpc_task_mempool);
1123 }
1124
1125 static void rpc_async_release(struct work_struct *work)
1126 {
1127         unsigned int pflags = memalloc_nofs_save();
1128
1129         rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
1130         memalloc_nofs_restore(pflags);
1131 }
1132
1133 static void rpc_release_resources_task(struct rpc_task *task)
1134 {
1135         xprt_release(task);
1136         if (task->tk_msg.rpc_cred) {
1137                 if (!(task->tk_flags & RPC_TASK_CRED_NOREF))
1138                         put_cred(task->tk_msg.rpc_cred);
1139                 task->tk_msg.rpc_cred = NULL;
1140         }
1141         rpc_task_release_client(task);
1142 }
1143
1144 static void rpc_final_put_task(struct rpc_task *task,
1145                 struct workqueue_struct *q)
1146 {
1147         if (q != NULL) {
1148                 INIT_WORK(&task->u.tk_work, rpc_async_release);
1149                 queue_work(q, &task->u.tk_work);
1150         } else
1151                 rpc_free_task(task);
1152 }
1153
1154 static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q)
1155 {
1156         if (atomic_dec_and_test(&task->tk_count)) {
1157                 rpc_release_resources_task(task);
1158                 rpc_final_put_task(task, q);
1159         }
1160 }
1161
1162 void rpc_put_task(struct rpc_task *task)
1163 {
1164         rpc_do_put_task(task, NULL);
1165 }
1166 EXPORT_SYMBOL_GPL(rpc_put_task);
1167
1168 void rpc_put_task_async(struct rpc_task *task)
1169 {
1170         rpc_do_put_task(task, task->tk_workqueue);
1171 }
1172 EXPORT_SYMBOL_GPL(rpc_put_task_async);
1173
1174 static void rpc_release_task(struct rpc_task *task)
1175 {
1176         WARN_ON_ONCE(RPC_IS_QUEUED(task));
1177
1178         rpc_release_resources_task(task);
1179
1180         /*
1181          * Note: at this point we have been removed from rpc_clnt->cl_tasks,
1182          * so it should be safe to use task->tk_count as a test for whether
1183          * or not any other processes still hold references to our rpc_task.
1184          */
1185         if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) {
1186                 /* Wake up anyone who may be waiting for task completion */
1187                 if (!rpc_complete_task(task))
1188                         return;
1189         } else {
1190                 if (!atomic_dec_and_test(&task->tk_count))
1191                         return;
1192         }
1193         rpc_final_put_task(task, task->tk_workqueue);
1194 }
1195
1196 int rpciod_up(void)
1197 {
1198         return try_module_get(THIS_MODULE) ? 0 : -EINVAL;
1199 }
1200
1201 void rpciod_down(void)
1202 {
1203         module_put(THIS_MODULE);
1204 }
1205
1206 /*
1207  * Start up the rpciod workqueue.
1208  */
1209 static int rpciod_start(void)
1210 {
1211         struct workqueue_struct *wq;
1212
1213         /*
1214          * Create the rpciod thread and wait for it to start.
1215          */
1216         wq = alloc_workqueue("rpciod", WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
1217         if (!wq)
1218                 goto out_failed;
1219         rpciod_workqueue = wq;
1220         /* Note: highpri because network receive is latency sensitive */
1221         wq = alloc_workqueue("xprtiod", WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_HIGHPRI, 0);
1222         if (!wq)
1223                 goto free_rpciod;
1224         xprtiod_workqueue = wq;
1225         return 1;
1226 free_rpciod:
1227         wq = rpciod_workqueue;
1228         rpciod_workqueue = NULL;
1229         destroy_workqueue(wq);
1230 out_failed:
1231         return 0;
1232 }
1233
1234 static void rpciod_stop(void)
1235 {
1236         struct workqueue_struct *wq = NULL;
1237
1238         if (rpciod_workqueue == NULL)
1239                 return;
1240
1241         wq = rpciod_workqueue;
1242         rpciod_workqueue = NULL;
1243         destroy_workqueue(wq);
1244         wq = xprtiod_workqueue;
1245         xprtiod_workqueue = NULL;
1246         destroy_workqueue(wq);
1247 }
1248
1249 void
1250 rpc_destroy_mempool(void)
1251 {
1252         rpciod_stop();
1253         mempool_destroy(rpc_buffer_mempool);
1254         mempool_destroy(rpc_task_mempool);
1255         kmem_cache_destroy(rpc_task_slabp);
1256         kmem_cache_destroy(rpc_buffer_slabp);
1257         rpc_destroy_wait_queue(&delay_queue);
1258 }
1259
1260 int
1261 rpc_init_mempool(void)
1262 {
1263         /*
1264          * The following is not strictly a mempool initialisation,
1265          * but there is no harm in doing it here
1266          */
1267         rpc_init_wait_queue(&delay_queue, "delayq");
1268         if (!rpciod_start())
1269                 goto err_nomem;
1270
1271         rpc_task_slabp = kmem_cache_create("rpc_tasks",
1272                                              sizeof(struct rpc_task),
1273                                              0, SLAB_HWCACHE_ALIGN,
1274                                              NULL);
1275         if (!rpc_task_slabp)
1276                 goto err_nomem;
1277         rpc_buffer_slabp = kmem_cache_create("rpc_buffers",
1278                                              RPC_BUFFER_MAXSIZE,
1279                                              0, SLAB_HWCACHE_ALIGN,
1280                                              NULL);
1281         if (!rpc_buffer_slabp)
1282                 goto err_nomem;
1283         rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE,
1284                                                     rpc_task_slabp);
1285         if (!rpc_task_mempool)
1286                 goto err_nomem;
1287         rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE,
1288                                                       rpc_buffer_slabp);
1289         if (!rpc_buffer_mempool)
1290                 goto err_nomem;
1291         return 0;
1292 err_nomem:
1293         rpc_destroy_mempool();
1294         return -ENOMEM;
1295 }