Merge tag 'gfs2-for-5.13' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux...
[linux-2.6-microblaze.git] / net / sunrpc / xprt.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/net/sunrpc/xprt.c
4  *
5  *  This is a generic RPC call interface supporting congestion avoidance,
6  *  and asynchronous calls.
7  *
8  *  The interface works like this:
9  *
10  *  -   When a process places a call, it allocates a request slot if
11  *      one is available. Otherwise, it sleeps on the backlog queue
12  *      (xprt_reserve).
13  *  -   Next, the caller puts together the RPC message, stuffs it into
14  *      the request struct, and calls xprt_transmit().
15  *  -   xprt_transmit sends the message and installs the caller on the
16  *      transport's wait list. At the same time, if a reply is expected,
17  *      it installs a timer that is run after the packet's timeout has
18  *      expired.
19  *  -   When a packet arrives, the data_ready handler walks the list of
20  *      pending requests for that transport. If a matching XID is found, the
21  *      caller is woken up, and the timer removed.
22  *  -   When no reply arrives within the timeout interval, the timer is
23  *      fired by the kernel and runs xprt_timer(). It either adjusts the
24  *      timeout values (minor timeout) or wakes up the caller with a status
25  *      of -ETIMEDOUT.
26  *  -   When the caller receives a notification from RPC that a reply arrived,
27  *      it should release the RPC slot, and process the reply.
28  *      If the call timed out, it may choose to retry the operation by
29  *      adjusting the initial timeout value, and simply calling rpc_call
30  *      again.
31  *
32  *  Support for async RPC is done through a set of RPC-specific scheduling
33  *  primitives that `transparently' work for processes as well as async
34  *  tasks that rely on callbacks.
35  *
36  *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
37  *
38  *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
39  */
40
41 #include <linux/module.h>
42
43 #include <linux/types.h>
44 #include <linux/interrupt.h>
45 #include <linux/workqueue.h>
46 #include <linux/net.h>
47 #include <linux/ktime.h>
48
49 #include <linux/sunrpc/clnt.h>
50 #include <linux/sunrpc/metrics.h>
51 #include <linux/sunrpc/bc_xprt.h>
52 #include <linux/rcupdate.h>
53 #include <linux/sched/mm.h>
54
55 #include <trace/events/sunrpc.h>
56
57 #include "sunrpc.h"
58
59 /*
60  * Local variables
61  */
62
63 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
64 # define RPCDBG_FACILITY        RPCDBG_XPRT
65 #endif
66
67 /*
68  * Local functions
69  */
70 static void      xprt_init(struct rpc_xprt *xprt, struct net *net);
71 static __be32   xprt_alloc_xid(struct rpc_xprt *xprt);
72 static void      xprt_destroy(struct rpc_xprt *xprt);
73
74 static DEFINE_SPINLOCK(xprt_list_lock);
75 static LIST_HEAD(xprt_list);
76
77 static unsigned long xprt_request_timeout(const struct rpc_rqst *req)
78 {
79         unsigned long timeout = jiffies + req->rq_timeout;
80
81         if (time_before(timeout, req->rq_majortimeo))
82                 return timeout;
83         return req->rq_majortimeo;
84 }
85
86 /**
87  * xprt_register_transport - register a transport implementation
88  * @transport: transport to register
89  *
90  * If a transport implementation is loaded as a kernel module, it can
91  * call this interface to make itself known to the RPC client.
92  *
93  * Returns:
94  * 0:           transport successfully registered
95  * -EEXIST:     transport already registered
96  * -EINVAL:     transport module being unloaded
97  */
98 int xprt_register_transport(struct xprt_class *transport)
99 {
100         struct xprt_class *t;
101         int result;
102
103         result = -EEXIST;
104         spin_lock(&xprt_list_lock);
105         list_for_each_entry(t, &xprt_list, list) {
106                 /* don't register the same transport class twice */
107                 if (t->ident == transport->ident)
108                         goto out;
109         }
110
111         list_add_tail(&transport->list, &xprt_list);
112         printk(KERN_INFO "RPC: Registered %s transport module.\n",
113                transport->name);
114         result = 0;
115
116 out:
117         spin_unlock(&xprt_list_lock);
118         return result;
119 }
120 EXPORT_SYMBOL_GPL(xprt_register_transport);
121
122 /**
123  * xprt_unregister_transport - unregister a transport implementation
124  * @transport: transport to unregister
125  *
126  * Returns:
127  * 0:           transport successfully unregistered
128  * -ENOENT:     transport never registered
129  */
130 int xprt_unregister_transport(struct xprt_class *transport)
131 {
132         struct xprt_class *t;
133         int result;
134
135         result = 0;
136         spin_lock(&xprt_list_lock);
137         list_for_each_entry(t, &xprt_list, list) {
138                 if (t == transport) {
139                         printk(KERN_INFO
140                                 "RPC: Unregistered %s transport module.\n",
141                                 transport->name);
142                         list_del_init(&transport->list);
143                         goto out;
144                 }
145         }
146         result = -ENOENT;
147
148 out:
149         spin_unlock(&xprt_list_lock);
150         return result;
151 }
152 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
153
154 static void
155 xprt_class_release(const struct xprt_class *t)
156 {
157         module_put(t->owner);
158 }
159
160 static const struct xprt_class *
161 xprt_class_find_by_ident_locked(int ident)
162 {
163         const struct xprt_class *t;
164
165         list_for_each_entry(t, &xprt_list, list) {
166                 if (t->ident != ident)
167                         continue;
168                 if (!try_module_get(t->owner))
169                         continue;
170                 return t;
171         }
172         return NULL;
173 }
174
175 static const struct xprt_class *
176 xprt_class_find_by_ident(int ident)
177 {
178         const struct xprt_class *t;
179
180         spin_lock(&xprt_list_lock);
181         t = xprt_class_find_by_ident_locked(ident);
182         spin_unlock(&xprt_list_lock);
183         return t;
184 }
185
186 static const struct xprt_class *
187 xprt_class_find_by_netid_locked(const char *netid)
188 {
189         const struct xprt_class *t;
190         unsigned int i;
191
192         list_for_each_entry(t, &xprt_list, list) {
193                 for (i = 0; t->netid[i][0] != '\0'; i++) {
194                         if (strcmp(t->netid[i], netid) != 0)
195                                 continue;
196                         if (!try_module_get(t->owner))
197                                 continue;
198                         return t;
199                 }
200         }
201         return NULL;
202 }
203
204 static const struct xprt_class *
205 xprt_class_find_by_netid(const char *netid)
206 {
207         const struct xprt_class *t;
208
209         spin_lock(&xprt_list_lock);
210         t = xprt_class_find_by_netid_locked(netid);
211         if (!t) {
212                 spin_unlock(&xprt_list_lock);
213                 request_module("rpc%s", netid);
214                 spin_lock(&xprt_list_lock);
215                 t = xprt_class_find_by_netid_locked(netid);
216         }
217         spin_unlock(&xprt_list_lock);
218         return t;
219 }
220
221 /**
222  * xprt_find_transport_ident - convert a netid into a transport identifier
223  * @netid: transport to load
224  *
225  * Returns:
226  * > 0:         transport identifier
227  * -ENOENT:     transport module not available
228  */
229 int xprt_find_transport_ident(const char *netid)
230 {
231         const struct xprt_class *t;
232         int ret;
233
234         t = xprt_class_find_by_netid(netid);
235         if (!t)
236                 return -ENOENT;
237         ret = t->ident;
238         xprt_class_release(t);
239         return ret;
240 }
241 EXPORT_SYMBOL_GPL(xprt_find_transport_ident);
242
243 static void xprt_clear_locked(struct rpc_xprt *xprt)
244 {
245         xprt->snd_task = NULL;
246         if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
247                 smp_mb__before_atomic();
248                 clear_bit(XPRT_LOCKED, &xprt->state);
249                 smp_mb__after_atomic();
250         } else
251                 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
252 }
253
254 /**
255  * xprt_reserve_xprt - serialize write access to transports
256  * @task: task that is requesting access to the transport
257  * @xprt: pointer to the target transport
258  *
259  * This prevents mixing the payload of separate requests, and prevents
260  * transport connects from colliding with writes.  No congestion control
261  * is provided.
262  */
263 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
264 {
265         struct rpc_rqst *req = task->tk_rqstp;
266
267         if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
268                 if (task == xprt->snd_task)
269                         goto out_locked;
270                 goto out_sleep;
271         }
272         if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
273                 goto out_unlock;
274         xprt->snd_task = task;
275
276 out_locked:
277         trace_xprt_reserve_xprt(xprt, task);
278         return 1;
279
280 out_unlock:
281         xprt_clear_locked(xprt);
282 out_sleep:
283         task->tk_status = -EAGAIN;
284         if  (RPC_IS_SOFT(task))
285                 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
286                                 xprt_request_timeout(req));
287         else
288                 rpc_sleep_on(&xprt->sending, task, NULL);
289         return 0;
290 }
291 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
292
293 static bool
294 xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
295 {
296         return test_bit(XPRT_CWND_WAIT, &xprt->state);
297 }
298
299 static void
300 xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
301 {
302         if (!list_empty(&xprt->xmit_queue)) {
303                 /* Peek at head of queue to see if it can make progress */
304                 if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
305                                         rq_xmit)->rq_cong)
306                         return;
307         }
308         set_bit(XPRT_CWND_WAIT, &xprt->state);
309 }
310
311 static void
312 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
313 {
314         if (!RPCXPRT_CONGESTED(xprt))
315                 clear_bit(XPRT_CWND_WAIT, &xprt->state);
316 }
317
318 /*
319  * xprt_reserve_xprt_cong - serialize write access to transports
320  * @task: task that is requesting access to the transport
321  *
322  * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
323  * integrated into the decision of whether a request is allowed to be
324  * woken up and given access to the transport.
325  * Note that the lock is only granted if we know there are free slots.
326  */
327 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
328 {
329         struct rpc_rqst *req = task->tk_rqstp;
330
331         if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
332                 if (task == xprt->snd_task)
333                         goto out_locked;
334                 goto out_sleep;
335         }
336         if (req == NULL) {
337                 xprt->snd_task = task;
338                 goto out_locked;
339         }
340         if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
341                 goto out_unlock;
342         if (!xprt_need_congestion_window_wait(xprt)) {
343                 xprt->snd_task = task;
344                 goto out_locked;
345         }
346 out_unlock:
347         xprt_clear_locked(xprt);
348 out_sleep:
349         task->tk_status = -EAGAIN;
350         if (RPC_IS_SOFT(task))
351                 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
352                                 xprt_request_timeout(req));
353         else
354                 rpc_sleep_on(&xprt->sending, task, NULL);
355         return 0;
356 out_locked:
357         trace_xprt_reserve_cong(xprt, task);
358         return 1;
359 }
360 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
361
362 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
363 {
364         int retval;
365
366         if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
367                 return 1;
368         spin_lock(&xprt->transport_lock);
369         retval = xprt->ops->reserve_xprt(xprt, task);
370         spin_unlock(&xprt->transport_lock);
371         return retval;
372 }
373
374 static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
375 {
376         struct rpc_xprt *xprt = data;
377
378         xprt->snd_task = task;
379         return true;
380 }
381
382 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
383 {
384         if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
385                 return;
386         if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
387                 goto out_unlock;
388         if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
389                                 __xprt_lock_write_func, xprt))
390                 return;
391 out_unlock:
392         xprt_clear_locked(xprt);
393 }
394
395 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
396 {
397         if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
398                 return;
399         if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
400                 goto out_unlock;
401         if (xprt_need_congestion_window_wait(xprt))
402                 goto out_unlock;
403         if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
404                                 __xprt_lock_write_func, xprt))
405                 return;
406 out_unlock:
407         xprt_clear_locked(xprt);
408 }
409
410 /**
411  * xprt_release_xprt - allow other requests to use a transport
412  * @xprt: transport with other tasks potentially waiting
413  * @task: task that is releasing access to the transport
414  *
415  * Note that "task" can be NULL.  No congestion control is provided.
416  */
417 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
418 {
419         if (xprt->snd_task == task) {
420                 xprt_clear_locked(xprt);
421                 __xprt_lock_write_next(xprt);
422         }
423         trace_xprt_release_xprt(xprt, task);
424 }
425 EXPORT_SYMBOL_GPL(xprt_release_xprt);
426
427 /**
428  * xprt_release_xprt_cong - allow other requests to use a transport
429  * @xprt: transport with other tasks potentially waiting
430  * @task: task that is releasing access to the transport
431  *
432  * Note that "task" can be NULL.  Another task is awoken to use the
433  * transport if the transport's congestion window allows it.
434  */
435 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
436 {
437         if (xprt->snd_task == task) {
438                 xprt_clear_locked(xprt);
439                 __xprt_lock_write_next_cong(xprt);
440         }
441         trace_xprt_release_cong(xprt, task);
442 }
443 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
444
445 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
446 {
447         if (xprt->snd_task != task)
448                 return;
449         spin_lock(&xprt->transport_lock);
450         xprt->ops->release_xprt(xprt, task);
451         spin_unlock(&xprt->transport_lock);
452 }
453
454 /*
455  * Van Jacobson congestion avoidance. Check if the congestion window
456  * overflowed. Put the task to sleep if this is the case.
457  */
458 static int
459 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
460 {
461         if (req->rq_cong)
462                 return 1;
463         trace_xprt_get_cong(xprt, req->rq_task);
464         if (RPCXPRT_CONGESTED(xprt)) {
465                 xprt_set_congestion_window_wait(xprt);
466                 return 0;
467         }
468         req->rq_cong = 1;
469         xprt->cong += RPC_CWNDSCALE;
470         return 1;
471 }
472
473 /*
474  * Adjust the congestion window, and wake up the next task
475  * that has been sleeping due to congestion
476  */
477 static void
478 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
479 {
480         if (!req->rq_cong)
481                 return;
482         req->rq_cong = 0;
483         xprt->cong -= RPC_CWNDSCALE;
484         xprt_test_and_clear_congestion_window_wait(xprt);
485         trace_xprt_put_cong(xprt, req->rq_task);
486         __xprt_lock_write_next_cong(xprt);
487 }
488
489 /**
490  * xprt_request_get_cong - Request congestion control credits
491  * @xprt: pointer to transport
492  * @req: pointer to RPC request
493  *
494  * Useful for transports that require congestion control.
495  */
496 bool
497 xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
498 {
499         bool ret = false;
500
501         if (req->rq_cong)
502                 return true;
503         spin_lock(&xprt->transport_lock);
504         ret = __xprt_get_cong(xprt, req) != 0;
505         spin_unlock(&xprt->transport_lock);
506         return ret;
507 }
508 EXPORT_SYMBOL_GPL(xprt_request_get_cong);
509
510 /**
511  * xprt_release_rqst_cong - housekeeping when request is complete
512  * @task: RPC request that recently completed
513  *
514  * Useful for transports that require congestion control.
515  */
516 void xprt_release_rqst_cong(struct rpc_task *task)
517 {
518         struct rpc_rqst *req = task->tk_rqstp;
519
520         __xprt_put_cong(req->rq_xprt, req);
521 }
522 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
523
524 static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
525 {
526         if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
527                 __xprt_lock_write_next_cong(xprt);
528 }
529
530 /*
531  * Clear the congestion window wait flag and wake up the next
532  * entry on xprt->sending
533  */
534 static void
535 xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
536 {
537         if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
538                 spin_lock(&xprt->transport_lock);
539                 __xprt_lock_write_next_cong(xprt);
540                 spin_unlock(&xprt->transport_lock);
541         }
542 }
543
544 /**
545  * xprt_adjust_cwnd - adjust transport congestion window
546  * @xprt: pointer to xprt
547  * @task: recently completed RPC request used to adjust window
548  * @result: result code of completed RPC request
549  *
550  * The transport code maintains an estimate on the maximum number of out-
551  * standing RPC requests, using a smoothed version of the congestion
552  * avoidance implemented in 44BSD. This is basically the Van Jacobson
553  * congestion algorithm: If a retransmit occurs, the congestion window is
554  * halved; otherwise, it is incremented by 1/cwnd when
555  *
556  *      -       a reply is received and
557  *      -       a full number of requests are outstanding and
558  *      -       the congestion window hasn't been updated recently.
559  */
560 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
561 {
562         struct rpc_rqst *req = task->tk_rqstp;
563         unsigned long cwnd = xprt->cwnd;
564
565         if (result >= 0 && cwnd <= xprt->cong) {
566                 /* The (cwnd >> 1) term makes sure
567                  * the result gets rounded properly. */
568                 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
569                 if (cwnd > RPC_MAXCWND(xprt))
570                         cwnd = RPC_MAXCWND(xprt);
571                 __xprt_lock_write_next_cong(xprt);
572         } else if (result == -ETIMEDOUT) {
573                 cwnd >>= 1;
574                 if (cwnd < RPC_CWNDSCALE)
575                         cwnd = RPC_CWNDSCALE;
576         }
577         dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
578                         xprt->cong, xprt->cwnd, cwnd);
579         xprt->cwnd = cwnd;
580         __xprt_put_cong(xprt, req);
581 }
582 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
583
584 /**
585  * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
586  * @xprt: transport with waiting tasks
587  * @status: result code to plant in each task before waking it
588  *
589  */
590 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
591 {
592         if (status < 0)
593                 rpc_wake_up_status(&xprt->pending, status);
594         else
595                 rpc_wake_up(&xprt->pending);
596 }
597 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
598
599 /**
600  * xprt_wait_for_buffer_space - wait for transport output buffer to clear
601  * @xprt: transport
602  *
603  * Note that we only set the timer for the case of RPC_IS_SOFT(), since
604  * we don't in general want to force a socket disconnection due to
605  * an incomplete RPC call transmission.
606  */
607 void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
608 {
609         set_bit(XPRT_WRITE_SPACE, &xprt->state);
610 }
611 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
612
613 static bool
614 xprt_clear_write_space_locked(struct rpc_xprt *xprt)
615 {
616         if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
617                 __xprt_lock_write_next(xprt);
618                 dprintk("RPC:       write space: waking waiting task on "
619                                 "xprt %p\n", xprt);
620                 return true;
621         }
622         return false;
623 }
624
625 /**
626  * xprt_write_space - wake the task waiting for transport output buffer space
627  * @xprt: transport with waiting tasks
628  *
629  * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
630  */
631 bool xprt_write_space(struct rpc_xprt *xprt)
632 {
633         bool ret;
634
635         if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
636                 return false;
637         spin_lock(&xprt->transport_lock);
638         ret = xprt_clear_write_space_locked(xprt);
639         spin_unlock(&xprt->transport_lock);
640         return ret;
641 }
642 EXPORT_SYMBOL_GPL(xprt_write_space);
643
644 static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime)
645 {
646         s64 delta = ktime_to_ns(ktime_get() - abstime);
647         return likely(delta >= 0) ?
648                 jiffies - nsecs_to_jiffies(delta) :
649                 jiffies + nsecs_to_jiffies(-delta);
650 }
651
652 static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req)
653 {
654         const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
655         unsigned long majortimeo = req->rq_timeout;
656
657         if (to->to_exponential)
658                 majortimeo <<= to->to_retries;
659         else
660                 majortimeo += to->to_increment * to->to_retries;
661         if (majortimeo > to->to_maxval || majortimeo == 0)
662                 majortimeo = to->to_maxval;
663         return majortimeo;
664 }
665
666 static void xprt_reset_majortimeo(struct rpc_rqst *req)
667 {
668         req->rq_majortimeo += xprt_calc_majortimeo(req);
669 }
670
671 static void xprt_reset_minortimeo(struct rpc_rqst *req)
672 {
673         req->rq_minortimeo += req->rq_timeout;
674 }
675
676 static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
677 {
678         unsigned long time_init;
679         struct rpc_xprt *xprt = req->rq_xprt;
680
681         if (likely(xprt && xprt_connected(xprt)))
682                 time_init = jiffies;
683         else
684                 time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
685         req->rq_timeout = task->tk_client->cl_timeout->to_initval;
686         req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
687         req->rq_minortimeo = time_init + req->rq_timeout;
688 }
689
690 /**
691  * xprt_adjust_timeout - adjust timeout values for next retransmit
692  * @req: RPC request containing parameters to use for the adjustment
693  *
694  */
695 int xprt_adjust_timeout(struct rpc_rqst *req)
696 {
697         struct rpc_xprt *xprt = req->rq_xprt;
698         const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
699         int status = 0;
700
701         if (time_before(jiffies, req->rq_minortimeo))
702                 return status;
703         if (time_before(jiffies, req->rq_majortimeo)) {
704                 if (to->to_exponential)
705                         req->rq_timeout <<= 1;
706                 else
707                         req->rq_timeout += to->to_increment;
708                 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
709                         req->rq_timeout = to->to_maxval;
710                 req->rq_retries++;
711         } else {
712                 req->rq_timeout = to->to_initval;
713                 req->rq_retries = 0;
714                 xprt_reset_majortimeo(req);
715                 /* Reset the RTT counters == "slow start" */
716                 spin_lock(&xprt->transport_lock);
717                 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
718                 spin_unlock(&xprt->transport_lock);
719                 status = -ETIMEDOUT;
720         }
721         xprt_reset_minortimeo(req);
722
723         if (req->rq_timeout == 0) {
724                 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
725                 req->rq_timeout = 5 * HZ;
726         }
727         return status;
728 }
729
730 static void xprt_autoclose(struct work_struct *work)
731 {
732         struct rpc_xprt *xprt =
733                 container_of(work, struct rpc_xprt, task_cleanup);
734         unsigned int pflags = memalloc_nofs_save();
735
736         trace_xprt_disconnect_auto(xprt);
737         clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
738         xprt->ops->close(xprt);
739         xprt_release_write(xprt, NULL);
740         wake_up_bit(&xprt->state, XPRT_LOCKED);
741         memalloc_nofs_restore(pflags);
742 }
743
744 /**
745  * xprt_disconnect_done - mark a transport as disconnected
746  * @xprt: transport to flag for disconnect
747  *
748  */
749 void xprt_disconnect_done(struct rpc_xprt *xprt)
750 {
751         trace_xprt_disconnect_done(xprt);
752         spin_lock(&xprt->transport_lock);
753         xprt_clear_connected(xprt);
754         xprt_clear_write_space_locked(xprt);
755         xprt_clear_congestion_window_wait_locked(xprt);
756         xprt_wake_pending_tasks(xprt, -ENOTCONN);
757         spin_unlock(&xprt->transport_lock);
758 }
759 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
760
761 /**
762  * xprt_force_disconnect - force a transport to disconnect
763  * @xprt: transport to disconnect
764  *
765  */
766 void xprt_force_disconnect(struct rpc_xprt *xprt)
767 {
768         trace_xprt_disconnect_force(xprt);
769
770         /* Don't race with the test_bit() in xprt_clear_locked() */
771         spin_lock(&xprt->transport_lock);
772         set_bit(XPRT_CLOSE_WAIT, &xprt->state);
773         /* Try to schedule an autoclose RPC call */
774         if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
775                 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
776         else if (xprt->snd_task)
777                 rpc_wake_up_queued_task_set_status(&xprt->pending,
778                                 xprt->snd_task, -ENOTCONN);
779         spin_unlock(&xprt->transport_lock);
780 }
781 EXPORT_SYMBOL_GPL(xprt_force_disconnect);
782
783 static unsigned int
784 xprt_connect_cookie(struct rpc_xprt *xprt)
785 {
786         return READ_ONCE(xprt->connect_cookie);
787 }
788
789 static bool
790 xprt_request_retransmit_after_disconnect(struct rpc_task *task)
791 {
792         struct rpc_rqst *req = task->tk_rqstp;
793         struct rpc_xprt *xprt = req->rq_xprt;
794
795         return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
796                 !xprt_connected(xprt);
797 }
798
799 /**
800  * xprt_conditional_disconnect - force a transport to disconnect
801  * @xprt: transport to disconnect
802  * @cookie: 'connection cookie'
803  *
804  * This attempts to break the connection if and only if 'cookie' matches
805  * the current transport 'connection cookie'. It ensures that we don't
806  * try to break the connection more than once when we need to retransmit
807  * a batch of RPC requests.
808  *
809  */
810 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
811 {
812         /* Don't race with the test_bit() in xprt_clear_locked() */
813         spin_lock(&xprt->transport_lock);
814         if (cookie != xprt->connect_cookie)
815                 goto out;
816         if (test_bit(XPRT_CLOSING, &xprt->state))
817                 goto out;
818         set_bit(XPRT_CLOSE_WAIT, &xprt->state);
819         /* Try to schedule an autoclose RPC call */
820         if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
821                 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
822         xprt_wake_pending_tasks(xprt, -EAGAIN);
823 out:
824         spin_unlock(&xprt->transport_lock);
825 }
826
827 static bool
828 xprt_has_timer(const struct rpc_xprt *xprt)
829 {
830         return xprt->idle_timeout != 0;
831 }
832
833 static void
834 xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
835         __must_hold(&xprt->transport_lock)
836 {
837         xprt->last_used = jiffies;
838         if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
839                 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
840 }
841
842 static void
843 xprt_init_autodisconnect(struct timer_list *t)
844 {
845         struct rpc_xprt *xprt = from_timer(xprt, t, timer);
846
847         if (!RB_EMPTY_ROOT(&xprt->recv_queue))
848                 return;
849         /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
850         xprt->last_used = jiffies;
851         if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
852                 return;
853         queue_work(xprtiod_workqueue, &xprt->task_cleanup);
854 }
855
856 bool xprt_lock_connect(struct rpc_xprt *xprt,
857                 struct rpc_task *task,
858                 void *cookie)
859 {
860         bool ret = false;
861
862         spin_lock(&xprt->transport_lock);
863         if (!test_bit(XPRT_LOCKED, &xprt->state))
864                 goto out;
865         if (xprt->snd_task != task)
866                 goto out;
867         xprt->snd_task = cookie;
868         ret = true;
869 out:
870         spin_unlock(&xprt->transport_lock);
871         return ret;
872 }
873
874 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
875 {
876         spin_lock(&xprt->transport_lock);
877         if (xprt->snd_task != cookie)
878                 goto out;
879         if (!test_bit(XPRT_LOCKED, &xprt->state))
880                 goto out;
881         xprt->snd_task =NULL;
882         xprt->ops->release_xprt(xprt, NULL);
883         xprt_schedule_autodisconnect(xprt);
884 out:
885         spin_unlock(&xprt->transport_lock);
886         wake_up_bit(&xprt->state, XPRT_LOCKED);
887 }
888
889 /**
890  * xprt_connect - schedule a transport connect operation
891  * @task: RPC task that is requesting the connect
892  *
893  */
894 void xprt_connect(struct rpc_task *task)
895 {
896         struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
897
898         trace_xprt_connect(xprt);
899
900         if (!xprt_bound(xprt)) {
901                 task->tk_status = -EAGAIN;
902                 return;
903         }
904         if (!xprt_lock_write(xprt, task))
905                 return;
906
907         if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
908                 trace_xprt_disconnect_cleanup(xprt);
909                 xprt->ops->close(xprt);
910         }
911
912         if (!xprt_connected(xprt)) {
913                 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
914                 rpc_sleep_on_timeout(&xprt->pending, task, NULL,
915                                 xprt_request_timeout(task->tk_rqstp));
916
917                 if (test_bit(XPRT_CLOSING, &xprt->state))
918                         return;
919                 if (xprt_test_and_set_connecting(xprt))
920                         return;
921                 /* Race breaker */
922                 if (!xprt_connected(xprt)) {
923                         xprt->stat.connect_start = jiffies;
924                         xprt->ops->connect(xprt, task);
925                 } else {
926                         xprt_clear_connecting(xprt);
927                         task->tk_status = 0;
928                         rpc_wake_up_queued_task(&xprt->pending, task);
929                 }
930         }
931         xprt_release_write(xprt, task);
932 }
933
934 /**
935  * xprt_reconnect_delay - compute the wait before scheduling a connect
936  * @xprt: transport instance
937  *
938  */
939 unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
940 {
941         unsigned long start, now = jiffies;
942
943         start = xprt->stat.connect_start + xprt->reestablish_timeout;
944         if (time_after(start, now))
945                 return start - now;
946         return 0;
947 }
948 EXPORT_SYMBOL_GPL(xprt_reconnect_delay);
949
950 /**
951  * xprt_reconnect_backoff - compute the new re-establish timeout
952  * @xprt: transport instance
953  * @init_to: initial reestablish timeout
954  *
955  */
956 void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
957 {
958         xprt->reestablish_timeout <<= 1;
959         if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
960                 xprt->reestablish_timeout = xprt->max_reconnect_timeout;
961         if (xprt->reestablish_timeout < init_to)
962                 xprt->reestablish_timeout = init_to;
963 }
964 EXPORT_SYMBOL_GPL(xprt_reconnect_backoff);
965
966 enum xprt_xid_rb_cmp {
967         XID_RB_EQUAL,
968         XID_RB_LEFT,
969         XID_RB_RIGHT,
970 };
971 static enum xprt_xid_rb_cmp
972 xprt_xid_cmp(__be32 xid1, __be32 xid2)
973 {
974         if (xid1 == xid2)
975                 return XID_RB_EQUAL;
976         if ((__force u32)xid1 < (__force u32)xid2)
977                 return XID_RB_LEFT;
978         return XID_RB_RIGHT;
979 }
980
981 static struct rpc_rqst *
982 xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
983 {
984         struct rb_node *n = xprt->recv_queue.rb_node;
985         struct rpc_rqst *req;
986
987         while (n != NULL) {
988                 req = rb_entry(n, struct rpc_rqst, rq_recv);
989                 switch (xprt_xid_cmp(xid, req->rq_xid)) {
990                 case XID_RB_LEFT:
991                         n = n->rb_left;
992                         break;
993                 case XID_RB_RIGHT:
994                         n = n->rb_right;
995                         break;
996                 case XID_RB_EQUAL:
997                         return req;
998                 }
999         }
1000         return NULL;
1001 }
1002
1003 static void
1004 xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
1005 {
1006         struct rb_node **p = &xprt->recv_queue.rb_node;
1007         struct rb_node *n = NULL;
1008         struct rpc_rqst *req;
1009
1010         while (*p != NULL) {
1011                 n = *p;
1012                 req = rb_entry(n, struct rpc_rqst, rq_recv);
1013                 switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
1014                 case XID_RB_LEFT:
1015                         p = &n->rb_left;
1016                         break;
1017                 case XID_RB_RIGHT:
1018                         p = &n->rb_right;
1019                         break;
1020                 case XID_RB_EQUAL:
1021                         WARN_ON_ONCE(new != req);
1022                         return;
1023                 }
1024         }
1025         rb_link_node(&new->rq_recv, n, p);
1026         rb_insert_color(&new->rq_recv, &xprt->recv_queue);
1027 }
1028
1029 static void
1030 xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
1031 {
1032         rb_erase(&req->rq_recv, &xprt->recv_queue);
1033 }
1034
1035 /**
1036  * xprt_lookup_rqst - find an RPC request corresponding to an XID
1037  * @xprt: transport on which the original request was transmitted
1038  * @xid: RPC XID of incoming reply
1039  *
1040  * Caller holds xprt->queue_lock.
1041  */
1042 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
1043 {
1044         struct rpc_rqst *entry;
1045
1046         entry = xprt_request_rb_find(xprt, xid);
1047         if (entry != NULL) {
1048                 trace_xprt_lookup_rqst(xprt, xid, 0);
1049                 entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
1050                 return entry;
1051         }
1052
1053         dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
1054                         ntohl(xid));
1055         trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
1056         xprt->stat.bad_xids++;
1057         return NULL;
1058 }
1059 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
1060
1061 static bool
1062 xprt_is_pinned_rqst(struct rpc_rqst *req)
1063 {
1064         return atomic_read(&req->rq_pin) != 0;
1065 }
1066
1067 /**
1068  * xprt_pin_rqst - Pin a request on the transport receive list
1069  * @req: Request to pin
1070  *
1071  * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
1072  * so should be holding xprt->queue_lock.
1073  */
1074 void xprt_pin_rqst(struct rpc_rqst *req)
1075 {
1076         atomic_inc(&req->rq_pin);
1077 }
1078 EXPORT_SYMBOL_GPL(xprt_pin_rqst);
1079
1080 /**
1081  * xprt_unpin_rqst - Unpin a request on the transport receive list
1082  * @req: Request to pin
1083  *
1084  * Caller should be holding xprt->queue_lock.
1085  */
1086 void xprt_unpin_rqst(struct rpc_rqst *req)
1087 {
1088         if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
1089                 atomic_dec(&req->rq_pin);
1090                 return;
1091         }
1092         if (atomic_dec_and_test(&req->rq_pin))
1093                 wake_up_var(&req->rq_pin);
1094 }
1095 EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
1096
1097 static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
1098 {
1099         wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
1100 }
1101
1102 static bool
1103 xprt_request_data_received(struct rpc_task *task)
1104 {
1105         return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1106                 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
1107 }
1108
1109 static bool
1110 xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
1111 {
1112         return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1113                 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
1114 }
1115
1116 /**
1117  * xprt_request_enqueue_receive - Add an request to the receive queue
1118  * @task: RPC task
1119  *
1120  */
1121 void
1122 xprt_request_enqueue_receive(struct rpc_task *task)
1123 {
1124         struct rpc_rqst *req = task->tk_rqstp;
1125         struct rpc_xprt *xprt = req->rq_xprt;
1126
1127         if (!xprt_request_need_enqueue_receive(task, req))
1128                 return;
1129
1130         xprt_request_prepare(task->tk_rqstp);
1131         spin_lock(&xprt->queue_lock);
1132
1133         /* Update the softirq receive buffer */
1134         memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1135                         sizeof(req->rq_private_buf));
1136
1137         /* Add request to the receive list */
1138         xprt_request_rb_insert(xprt, req);
1139         set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
1140         spin_unlock(&xprt->queue_lock);
1141
1142         /* Turn off autodisconnect */
1143         del_singleshot_timer_sync(&xprt->timer);
1144 }
1145
1146 /**
1147  * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1148  * @task: RPC task
1149  *
1150  * Caller must hold xprt->queue_lock.
1151  */
1152 static void
1153 xprt_request_dequeue_receive_locked(struct rpc_task *task)
1154 {
1155         struct rpc_rqst *req = task->tk_rqstp;
1156
1157         if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1158                 xprt_request_rb_remove(req->rq_xprt, req);
1159 }
1160
1161 /**
1162  * xprt_update_rtt - Update RPC RTT statistics
1163  * @task: RPC request that recently completed
1164  *
1165  * Caller holds xprt->queue_lock.
1166  */
1167 void xprt_update_rtt(struct rpc_task *task)
1168 {
1169         struct rpc_rqst *req = task->tk_rqstp;
1170         struct rpc_rtt *rtt = task->tk_client->cl_rtt;
1171         unsigned int timer = task->tk_msg.rpc_proc->p_timer;
1172         long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1173
1174         if (timer) {
1175                 if (req->rq_ntrans == 1)
1176                         rpc_update_rtt(rtt, timer, m);
1177                 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
1178         }
1179 }
1180 EXPORT_SYMBOL_GPL(xprt_update_rtt);
1181
1182 /**
1183  * xprt_complete_rqst - called when reply processing is complete
1184  * @task: RPC request that recently completed
1185  * @copied: actual number of bytes received from the transport
1186  *
1187  * Caller holds xprt->queue_lock.
1188  */
1189 void xprt_complete_rqst(struct rpc_task *task, int copied)
1190 {
1191         struct rpc_rqst *req = task->tk_rqstp;
1192         struct rpc_xprt *xprt = req->rq_xprt;
1193
1194         xprt->stat.recvs++;
1195
1196         req->rq_private_buf.len = copied;
1197         /* Ensure all writes are done before we update */
1198         /* req->rq_reply_bytes_recvd */
1199         smp_wmb();
1200         req->rq_reply_bytes_recvd = copied;
1201         xprt_request_dequeue_receive_locked(task);
1202         rpc_wake_up_queued_task(&xprt->pending, task);
1203 }
1204 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
1205
1206 static void xprt_timer(struct rpc_task *task)
1207 {
1208         struct rpc_rqst *req = task->tk_rqstp;
1209         struct rpc_xprt *xprt = req->rq_xprt;
1210
1211         if (task->tk_status != -ETIMEDOUT)
1212                 return;
1213
1214         trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
1215         if (!req->rq_reply_bytes_recvd) {
1216                 if (xprt->ops->timer)
1217                         xprt->ops->timer(xprt, task);
1218         } else
1219                 task->tk_status = 0;
1220 }
1221
1222 /**
1223  * xprt_wait_for_reply_request_def - wait for reply
1224  * @task: pointer to rpc_task
1225  *
1226  * Set a request's retransmit timeout based on the transport's
1227  * default timeout parameters.  Used by transports that don't adjust
1228  * the retransmit timeout based on round-trip time estimation,
1229  * and put the task to sleep on the pending queue.
1230  */
1231 void xprt_wait_for_reply_request_def(struct rpc_task *task)
1232 {
1233         struct rpc_rqst *req = task->tk_rqstp;
1234
1235         rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1236                         xprt_request_timeout(req));
1237 }
1238 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);
1239
1240 /**
1241  * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1242  * @task: pointer to rpc_task
1243  *
1244  * Set a request's retransmit timeout using the RTT estimator,
1245  * and put the task to sleep on the pending queue.
1246  */
1247 void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
1248 {
1249         int timer = task->tk_msg.rpc_proc->p_timer;
1250         struct rpc_clnt *clnt = task->tk_client;
1251         struct rpc_rtt *rtt = clnt->cl_rtt;
1252         struct rpc_rqst *req = task->tk_rqstp;
1253         unsigned long max_timeout = clnt->cl_timeout->to_maxval;
1254         unsigned long timeout;
1255
1256         timeout = rpc_calc_rto(rtt, timer);
1257         timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
1258         if (timeout > max_timeout || timeout == 0)
1259                 timeout = max_timeout;
1260         rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1261                         jiffies + timeout);
1262 }
1263 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);
1264
1265 /**
1266  * xprt_request_wait_receive - wait for the reply to an RPC request
1267  * @task: RPC task about to send a request
1268  *
1269  */
1270 void xprt_request_wait_receive(struct rpc_task *task)
1271 {
1272         struct rpc_rqst *req = task->tk_rqstp;
1273         struct rpc_xprt *xprt = req->rq_xprt;
1274
1275         if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1276                 return;
1277         /*
1278          * Sleep on the pending queue if we're expecting a reply.
1279          * The spinlock ensures atomicity between the test of
1280          * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1281          */
1282         spin_lock(&xprt->queue_lock);
1283         if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1284                 xprt->ops->wait_for_reply_request(task);
1285                 /*
1286                  * Send an extra queue wakeup call if the
1287                  * connection was dropped in case the call to
1288                  * rpc_sleep_on() raced.
1289                  */
1290                 if (xprt_request_retransmit_after_disconnect(task))
1291                         rpc_wake_up_queued_task_set_status(&xprt->pending,
1292                                         task, -ENOTCONN);
1293         }
1294         spin_unlock(&xprt->queue_lock);
1295 }
1296
1297 static bool
1298 xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
1299 {
1300         return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1301 }
1302
1303 /**
1304  * xprt_request_enqueue_transmit - queue a task for transmission
1305  * @task: pointer to rpc_task
1306  *
1307  * Add a task to the transmission queue.
1308  */
1309 void
1310 xprt_request_enqueue_transmit(struct rpc_task *task)
1311 {
1312         struct rpc_rqst *pos, *req = task->tk_rqstp;
1313         struct rpc_xprt *xprt = req->rq_xprt;
1314
1315         if (xprt_request_need_enqueue_transmit(task, req)) {
1316                 req->rq_bytes_sent = 0;
1317                 spin_lock(&xprt->queue_lock);
1318                 /*
1319                  * Requests that carry congestion control credits are added
1320                  * to the head of the list to avoid starvation issues.
1321                  */
1322                 if (req->rq_cong) {
1323                         xprt_clear_congestion_window_wait(xprt);
1324                         list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1325                                 if (pos->rq_cong)
1326                                         continue;
1327                                 /* Note: req is added _before_ pos */
1328                                 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1329                                 INIT_LIST_HEAD(&req->rq_xmit2);
1330                                 goto out;
1331                         }
1332                 } else if (RPC_IS_SWAPPER(task)) {
1333                         list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1334                                 if (pos->rq_cong || pos->rq_bytes_sent)
1335                                         continue;
1336                                 if (RPC_IS_SWAPPER(pos->rq_task))
1337                                         continue;
1338                                 /* Note: req is added _before_ pos */
1339                                 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1340                                 INIT_LIST_HEAD(&req->rq_xmit2);
1341                                 goto out;
1342                         }
1343                 } else if (!req->rq_seqno) {
1344                         list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1345                                 if (pos->rq_task->tk_owner != task->tk_owner)
1346                                         continue;
1347                                 list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
1348                                 INIT_LIST_HEAD(&req->rq_xmit);
1349                                 goto out;
1350                         }
1351                 }
1352                 list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
1353                 INIT_LIST_HEAD(&req->rq_xmit2);
1354 out:
1355                 set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1356                 spin_unlock(&xprt->queue_lock);
1357         }
1358 }
1359
1360 /**
1361  * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1362  * @task: pointer to rpc_task
1363  *
1364  * Remove a task from the transmission queue
1365  * Caller must hold xprt->queue_lock
1366  */
1367 static void
1368 xprt_request_dequeue_transmit_locked(struct rpc_task *task)
1369 {
1370         struct rpc_rqst *req = task->tk_rqstp;
1371
1372         if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1373                 return;
1374         if (!list_empty(&req->rq_xmit)) {
1375                 list_del(&req->rq_xmit);
1376                 if (!list_empty(&req->rq_xmit2)) {
1377                         struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
1378                                         struct rpc_rqst, rq_xmit2);
1379                         list_del(&req->rq_xmit2);
1380                         list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
1381                 }
1382         } else
1383                 list_del(&req->rq_xmit2);
1384 }
1385
1386 /**
1387  * xprt_request_dequeue_transmit - remove a task from the transmission queue
1388  * @task: pointer to rpc_task
1389  *
1390  * Remove a task from the transmission queue
1391  */
1392 static void
1393 xprt_request_dequeue_transmit(struct rpc_task *task)
1394 {
1395         struct rpc_rqst *req = task->tk_rqstp;
1396         struct rpc_xprt *xprt = req->rq_xprt;
1397
1398         spin_lock(&xprt->queue_lock);
1399         xprt_request_dequeue_transmit_locked(task);
1400         spin_unlock(&xprt->queue_lock);
1401 }
1402
1403 /**
1404  * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
1405  * @task: pointer to rpc_task
1406  *
1407  * Remove a task from the transmit and receive queues, and ensure that
1408  * it is not pinned by the receive work item.
1409  */
1410 void
1411 xprt_request_dequeue_xprt(struct rpc_task *task)
1412 {
1413         struct rpc_rqst *req = task->tk_rqstp;
1414         struct rpc_xprt *xprt = req->rq_xprt;
1415
1416         if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
1417             test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
1418             xprt_is_pinned_rqst(req)) {
1419                 spin_lock(&xprt->queue_lock);
1420                 xprt_request_dequeue_transmit_locked(task);
1421                 xprt_request_dequeue_receive_locked(task);
1422                 while (xprt_is_pinned_rqst(req)) {
1423                         set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1424                         spin_unlock(&xprt->queue_lock);
1425                         xprt_wait_on_pinned_rqst(req);
1426                         spin_lock(&xprt->queue_lock);
1427                         clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1428                 }
1429                 spin_unlock(&xprt->queue_lock);
1430         }
1431 }
1432
1433 /**
1434  * xprt_request_prepare - prepare an encoded request for transport
1435  * @req: pointer to rpc_rqst
1436  *
1437  * Calls into the transport layer to do whatever is needed to prepare
1438  * the request for transmission or receive.
1439  */
1440 void
1441 xprt_request_prepare(struct rpc_rqst *req)
1442 {
1443         struct rpc_xprt *xprt = req->rq_xprt;
1444
1445         if (xprt->ops->prepare_request)
1446                 xprt->ops->prepare_request(req);
1447 }
1448
1449 /**
1450  * xprt_request_need_retransmit - Test if a task needs retransmission
1451  * @task: pointer to rpc_task
1452  *
1453  * Test for whether a connection breakage requires the task to retransmit
1454  */
1455 bool
1456 xprt_request_need_retransmit(struct rpc_task *task)
1457 {
1458         return xprt_request_retransmit_after_disconnect(task);
1459 }
1460
1461 /**
1462  * xprt_prepare_transmit - reserve the transport before sending a request
1463  * @task: RPC task about to send a request
1464  *
1465  */
1466 bool xprt_prepare_transmit(struct rpc_task *task)
1467 {
1468         struct rpc_rqst *req = task->tk_rqstp;
1469         struct rpc_xprt *xprt = req->rq_xprt;
1470
1471         if (!xprt_lock_write(xprt, task)) {
1472                 trace_xprt_transmit_queued(xprt, task);
1473
1474                 /* Race breaker: someone may have transmitted us */
1475                 if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1476                         rpc_wake_up_queued_task_set_status(&xprt->sending,
1477                                         task, 0);
1478                 return false;
1479
1480         }
1481         return true;
1482 }
1483
1484 void xprt_end_transmit(struct rpc_task *task)
1485 {
1486         xprt_release_write(task->tk_rqstp->rq_xprt, task);
1487 }
1488
1489 /**
1490  * xprt_request_transmit - send an RPC request on a transport
1491  * @req: pointer to request to transmit
1492  * @snd_task: RPC task that owns the transport lock
1493  *
1494  * This performs the transmission of a single request.
1495  * Note that if the request is not the same as snd_task, then it
1496  * does need to be pinned.
1497  * Returns '0' on success.
1498  */
1499 static int
1500 xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1501 {
1502         struct rpc_xprt *xprt = req->rq_xprt;
1503         struct rpc_task *task = req->rq_task;
1504         unsigned int connect_cookie;
1505         int is_retrans = RPC_WAS_SENT(task);
1506         int status;
1507
1508         if (!req->rq_bytes_sent) {
1509                 if (xprt_request_data_received(task)) {
1510                         status = 0;
1511                         goto out_dequeue;
1512                 }
1513                 /* Verify that our message lies in the RPCSEC_GSS window */
1514                 if (rpcauth_xmit_need_reencode(task)) {
1515                         status = -EBADMSG;
1516                         goto out_dequeue;
1517                 }
1518                 if (RPC_SIGNALLED(task)) {
1519                         status = -ERESTARTSYS;
1520                         goto out_dequeue;
1521                 }
1522         }
1523
1524         /*
1525          * Update req->rq_ntrans before transmitting to avoid races with
1526          * xprt_update_rtt(), which needs to know that it is recording a
1527          * reply to the first transmission.
1528          */
1529         req->rq_ntrans++;
1530
1531         trace_rpc_xdr_sendto(task, &req->rq_snd_buf);
1532         connect_cookie = xprt->connect_cookie;
1533         status = xprt->ops->send_request(req);
1534         if (status != 0) {
1535                 req->rq_ntrans--;
1536                 trace_xprt_transmit(req, status);
1537                 return status;
1538         }
1539
1540         if (is_retrans)
1541                 task->tk_client->cl_stats->rpcretrans++;
1542
1543         xprt_inject_disconnect(xprt);
1544
1545         task->tk_flags |= RPC_TASK_SENT;
1546         spin_lock(&xprt->transport_lock);
1547
1548         xprt->stat.sends++;
1549         xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1550         xprt->stat.bklog_u += xprt->backlog.qlen;
1551         xprt->stat.sending_u += xprt->sending.qlen;
1552         xprt->stat.pending_u += xprt->pending.qlen;
1553         spin_unlock(&xprt->transport_lock);
1554
1555         req->rq_connect_cookie = connect_cookie;
1556 out_dequeue:
1557         trace_xprt_transmit(req, status);
1558         xprt_request_dequeue_transmit(task);
1559         rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1560         return status;
1561 }
1562
1563 /**
1564  * xprt_transmit - send an RPC request on a transport
1565  * @task: controlling RPC task
1566  *
1567  * Attempts to drain the transmit queue. On exit, either the transport
1568  * signalled an error that needs to be handled before transmission can
1569  * resume, or @task finished transmitting, and detected that it already
1570  * received a reply.
1571  */
1572 void
1573 xprt_transmit(struct rpc_task *task)
1574 {
1575         struct rpc_rqst *next, *req = task->tk_rqstp;
1576         struct rpc_xprt *xprt = req->rq_xprt;
1577         int counter, status;
1578
1579         spin_lock(&xprt->queue_lock);
1580         counter = 0;
1581         while (!list_empty(&xprt->xmit_queue)) {
1582                 if (++counter == 20)
1583                         break;
1584                 next = list_first_entry(&xprt->xmit_queue,
1585                                 struct rpc_rqst, rq_xmit);
1586                 xprt_pin_rqst(next);
1587                 spin_unlock(&xprt->queue_lock);
1588                 status = xprt_request_transmit(next, task);
1589                 if (status == -EBADMSG && next != req)
1590                         status = 0;
1591                 spin_lock(&xprt->queue_lock);
1592                 xprt_unpin_rqst(next);
1593                 if (status == 0) {
1594                         if (!xprt_request_data_received(task) ||
1595                             test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1596                                 continue;
1597                 } else if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1598                         task->tk_status = status;
1599                 break;
1600         }
1601         spin_unlock(&xprt->queue_lock);
1602 }
1603
1604 static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1605 {
1606         set_bit(XPRT_CONGESTED, &xprt->state);
1607         rpc_sleep_on(&xprt->backlog, task, NULL);
1608 }
1609
1610 static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
1611 {
1612         if (rpc_wake_up_next(&xprt->backlog) == NULL)
1613                 clear_bit(XPRT_CONGESTED, &xprt->state);
1614 }
1615
1616 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1617 {
1618         bool ret = false;
1619
1620         if (!test_bit(XPRT_CONGESTED, &xprt->state))
1621                 goto out;
1622         spin_lock(&xprt->reserve_lock);
1623         if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1624                 rpc_sleep_on(&xprt->backlog, task, NULL);
1625                 ret = true;
1626         }
1627         spin_unlock(&xprt->reserve_lock);
1628 out:
1629         return ret;
1630 }
1631
1632 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1633 {
1634         struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1635
1636         if (xprt->num_reqs >= xprt->max_reqs)
1637                 goto out;
1638         ++xprt->num_reqs;
1639         spin_unlock(&xprt->reserve_lock);
1640         req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
1641         spin_lock(&xprt->reserve_lock);
1642         if (req != NULL)
1643                 goto out;
1644         --xprt->num_reqs;
1645         req = ERR_PTR(-ENOMEM);
1646 out:
1647         return req;
1648 }
1649
1650 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1651 {
1652         if (xprt->num_reqs > xprt->min_reqs) {
1653                 --xprt->num_reqs;
1654                 kfree(req);
1655                 return true;
1656         }
1657         return false;
1658 }
1659
1660 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1661 {
1662         struct rpc_rqst *req;
1663
1664         spin_lock(&xprt->reserve_lock);
1665         if (!list_empty(&xprt->free)) {
1666                 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1667                 list_del(&req->rq_list);
1668                 goto out_init_req;
1669         }
1670         req = xprt_dynamic_alloc_slot(xprt);
1671         if (!IS_ERR(req))
1672                 goto out_init_req;
1673         switch (PTR_ERR(req)) {
1674         case -ENOMEM:
1675                 dprintk("RPC:       dynamic allocation of request slot "
1676                                 "failed! Retrying\n");
1677                 task->tk_status = -ENOMEM;
1678                 break;
1679         case -EAGAIN:
1680                 xprt_add_backlog(xprt, task);
1681                 dprintk("RPC:       waiting for request slot\n");
1682                 fallthrough;
1683         default:
1684                 task->tk_status = -EAGAIN;
1685         }
1686         spin_unlock(&xprt->reserve_lock);
1687         return;
1688 out_init_req:
1689         xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1690                                      xprt->num_reqs);
1691         spin_unlock(&xprt->reserve_lock);
1692
1693         task->tk_status = 0;
1694         task->tk_rqstp = req;
1695 }
1696 EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1697
1698 void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1699 {
1700         spin_lock(&xprt->reserve_lock);
1701         if (!xprt_dynamic_free_slot(xprt, req)) {
1702                 memset(req, 0, sizeof(*req));   /* mark unused */
1703                 list_add(&req->rq_list, &xprt->free);
1704         }
1705         xprt_wake_up_backlog(xprt);
1706         spin_unlock(&xprt->reserve_lock);
1707 }
1708 EXPORT_SYMBOL_GPL(xprt_free_slot);
1709
1710 static void xprt_free_all_slots(struct rpc_xprt *xprt)
1711 {
1712         struct rpc_rqst *req;
1713         while (!list_empty(&xprt->free)) {
1714                 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1715                 list_del(&req->rq_list);
1716                 kfree(req);
1717         }
1718 }
1719
1720 struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1721                 unsigned int num_prealloc,
1722                 unsigned int max_alloc)
1723 {
1724         struct rpc_xprt *xprt;
1725         struct rpc_rqst *req;
1726         int i;
1727
1728         xprt = kzalloc(size, GFP_KERNEL);
1729         if (xprt == NULL)
1730                 goto out;
1731
1732         xprt_init(xprt, net);
1733
1734         for (i = 0; i < num_prealloc; i++) {
1735                 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1736                 if (!req)
1737                         goto out_free;
1738                 list_add(&req->rq_list, &xprt->free);
1739         }
1740         if (max_alloc > num_prealloc)
1741                 xprt->max_reqs = max_alloc;
1742         else
1743                 xprt->max_reqs = num_prealloc;
1744         xprt->min_reqs = num_prealloc;
1745         xprt->num_reqs = num_prealloc;
1746
1747         return xprt;
1748
1749 out_free:
1750         xprt_free(xprt);
1751 out:
1752         return NULL;
1753 }
1754 EXPORT_SYMBOL_GPL(xprt_alloc);
1755
1756 void xprt_free(struct rpc_xprt *xprt)
1757 {
1758         put_net(xprt->xprt_net);
1759         xprt_free_all_slots(xprt);
1760         kfree_rcu(xprt, rcu);
1761 }
1762 EXPORT_SYMBOL_GPL(xprt_free);
1763
1764 static void
1765 xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1766 {
1767         req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1768 }
1769
1770 static __be32
1771 xprt_alloc_xid(struct rpc_xprt *xprt)
1772 {
1773         __be32 xid;
1774
1775         spin_lock(&xprt->reserve_lock);
1776         xid = (__force __be32)xprt->xid++;
1777         spin_unlock(&xprt->reserve_lock);
1778         return xid;
1779 }
1780
1781 static void
1782 xprt_init_xid(struct rpc_xprt *xprt)
1783 {
1784         xprt->xid = prandom_u32();
1785 }
1786
1787 static void
1788 xprt_request_init(struct rpc_task *task)
1789 {
1790         struct rpc_xprt *xprt = task->tk_xprt;
1791         struct rpc_rqst *req = task->tk_rqstp;
1792
1793         req->rq_task    = task;
1794         req->rq_xprt    = xprt;
1795         req->rq_buffer  = NULL;
1796         req->rq_xid     = xprt_alloc_xid(xprt);
1797         xprt_init_connect_cookie(req, xprt);
1798         req->rq_snd_buf.len = 0;
1799         req->rq_snd_buf.buflen = 0;
1800         req->rq_rcv_buf.len = 0;
1801         req->rq_rcv_buf.buflen = 0;
1802         req->rq_snd_buf.bvec = NULL;
1803         req->rq_rcv_buf.bvec = NULL;
1804         req->rq_release_snd_buf = NULL;
1805         xprt_init_majortimeo(task, req);
1806
1807         trace_xprt_reserve(req);
1808 }
1809
1810 static void
1811 xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1812 {
1813         xprt->ops->alloc_slot(xprt, task);
1814         if (task->tk_rqstp != NULL)
1815                 xprt_request_init(task);
1816 }
1817
1818 /**
1819  * xprt_reserve - allocate an RPC request slot
1820  * @task: RPC task requesting a slot allocation
1821  *
1822  * If the transport is marked as being congested, or if no more
1823  * slots are available, place the task on the transport's
1824  * backlog queue.
1825  */
1826 void xprt_reserve(struct rpc_task *task)
1827 {
1828         struct rpc_xprt *xprt = task->tk_xprt;
1829
1830         task->tk_status = 0;
1831         if (task->tk_rqstp != NULL)
1832                 return;
1833
1834         task->tk_status = -EAGAIN;
1835         if (!xprt_throttle_congested(xprt, task))
1836                 xprt_do_reserve(xprt, task);
1837 }
1838
1839 /**
1840  * xprt_retry_reserve - allocate an RPC request slot
1841  * @task: RPC task requesting a slot allocation
1842  *
1843  * If no more slots are available, place the task on the transport's
1844  * backlog queue.
1845  * Note that the only difference with xprt_reserve is that we now
1846  * ignore the value of the XPRT_CONGESTED flag.
1847  */
1848 void xprt_retry_reserve(struct rpc_task *task)
1849 {
1850         struct rpc_xprt *xprt = task->tk_xprt;
1851
1852         task->tk_status = 0;
1853         if (task->tk_rqstp != NULL)
1854                 return;
1855
1856         task->tk_status = -EAGAIN;
1857         xprt_do_reserve(xprt, task);
1858 }
1859
1860 /**
1861  * xprt_release - release an RPC request slot
1862  * @task: task which is finished with the slot
1863  *
1864  */
1865 void xprt_release(struct rpc_task *task)
1866 {
1867         struct rpc_xprt *xprt;
1868         struct rpc_rqst *req = task->tk_rqstp;
1869
1870         if (req == NULL) {
1871                 if (task->tk_client) {
1872                         xprt = task->tk_xprt;
1873                         xprt_release_write(xprt, task);
1874                 }
1875                 return;
1876         }
1877
1878         xprt = req->rq_xprt;
1879         xprt_request_dequeue_xprt(task);
1880         spin_lock(&xprt->transport_lock);
1881         xprt->ops->release_xprt(xprt, task);
1882         if (xprt->ops->release_request)
1883                 xprt->ops->release_request(task);
1884         xprt_schedule_autodisconnect(xprt);
1885         spin_unlock(&xprt->transport_lock);
1886         if (req->rq_buffer)
1887                 xprt->ops->buf_free(task);
1888         xprt_inject_disconnect(xprt);
1889         xdr_free_bvec(&req->rq_rcv_buf);
1890         xdr_free_bvec(&req->rq_snd_buf);
1891         if (req->rq_cred != NULL)
1892                 put_rpccred(req->rq_cred);
1893         task->tk_rqstp = NULL;
1894         if (req->rq_release_snd_buf)
1895                 req->rq_release_snd_buf(req);
1896
1897         if (likely(!bc_prealloc(req)))
1898                 xprt->ops->free_slot(xprt, req);
1899         else
1900                 xprt_free_bc_request(req);
1901 }
1902
1903 #ifdef CONFIG_SUNRPC_BACKCHANNEL
1904 void
1905 xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
1906 {
1907         struct xdr_buf *xbufp = &req->rq_snd_buf;
1908
1909         task->tk_rqstp = req;
1910         req->rq_task = task;
1911         xprt_init_connect_cookie(req, req->rq_xprt);
1912         /*
1913          * Set up the xdr_buf length.
1914          * This also indicates that the buffer is XDR encoded already.
1915          */
1916         xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1917                 xbufp->tail[0].iov_len;
1918 }
1919 #endif
1920
1921 static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1922 {
1923         kref_init(&xprt->kref);
1924
1925         spin_lock_init(&xprt->transport_lock);
1926         spin_lock_init(&xprt->reserve_lock);
1927         spin_lock_init(&xprt->queue_lock);
1928
1929         INIT_LIST_HEAD(&xprt->free);
1930         xprt->recv_queue = RB_ROOT;
1931         INIT_LIST_HEAD(&xprt->xmit_queue);
1932 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1933         spin_lock_init(&xprt->bc_pa_lock);
1934         INIT_LIST_HEAD(&xprt->bc_pa_list);
1935 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1936         INIT_LIST_HEAD(&xprt->xprt_switch);
1937
1938         xprt->last_used = jiffies;
1939         xprt->cwnd = RPC_INITCWND;
1940         xprt->bind_index = 0;
1941
1942         rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1943         rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1944         rpc_init_wait_queue(&xprt->sending, "xprt_sending");
1945         rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1946
1947         xprt_init_xid(xprt);
1948
1949         xprt->xprt_net = get_net(net);
1950 }
1951
1952 /**
1953  * xprt_create_transport - create an RPC transport
1954  * @args: rpc transport creation arguments
1955  *
1956  */
1957 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1958 {
1959         struct rpc_xprt *xprt;
1960         const struct xprt_class *t;
1961
1962         t = xprt_class_find_by_ident(args->ident);
1963         if (!t) {
1964                 dprintk("RPC: transport (%d) not supported\n", args->ident);
1965                 return ERR_PTR(-EIO);
1966         }
1967
1968         xprt = t->setup(args);
1969         xprt_class_release(t);
1970
1971         if (IS_ERR(xprt))
1972                 goto out;
1973         if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1974                 xprt->idle_timeout = 0;
1975         INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1976         if (xprt_has_timer(xprt))
1977                 timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
1978         else
1979                 timer_setup(&xprt->timer, NULL, 0);
1980
1981         if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1982                 xprt_destroy(xprt);
1983                 return ERR_PTR(-EINVAL);
1984         }
1985         xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1986         if (xprt->servername == NULL) {
1987                 xprt_destroy(xprt);
1988                 return ERR_PTR(-ENOMEM);
1989         }
1990
1991         rpc_xprt_debugfs_register(xprt);
1992
1993         trace_xprt_create(xprt);
1994 out:
1995         return xprt;
1996 }
1997
1998 static void xprt_destroy_cb(struct work_struct *work)
1999 {
2000         struct rpc_xprt *xprt =
2001                 container_of(work, struct rpc_xprt, task_cleanup);
2002
2003         trace_xprt_destroy(xprt);
2004
2005         rpc_xprt_debugfs_unregister(xprt);
2006         rpc_destroy_wait_queue(&xprt->binding);
2007         rpc_destroy_wait_queue(&xprt->pending);
2008         rpc_destroy_wait_queue(&xprt->sending);
2009         rpc_destroy_wait_queue(&xprt->backlog);
2010         kfree(xprt->servername);
2011         /*
2012          * Destroy any existing back channel
2013          */
2014         xprt_destroy_backchannel(xprt, UINT_MAX);
2015
2016         /*
2017          * Tear down transport state and free the rpc_xprt
2018          */
2019         xprt->ops->destroy(xprt);
2020 }
2021
2022 /**
2023  * xprt_destroy - destroy an RPC transport, killing off all requests.
2024  * @xprt: transport to destroy
2025  *
2026  */
2027 static void xprt_destroy(struct rpc_xprt *xprt)
2028 {
2029         /*
2030          * Exclude transport connect/disconnect handlers and autoclose
2031          */
2032         wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
2033
2034         del_timer_sync(&xprt->timer);
2035
2036         /*
2037          * Destroy sockets etc from the system workqueue so they can
2038          * safely flush receive work running on rpciod.
2039          */
2040         INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
2041         schedule_work(&xprt->task_cleanup);
2042 }
2043
2044 static void xprt_destroy_kref(struct kref *kref)
2045 {
2046         xprt_destroy(container_of(kref, struct rpc_xprt, kref));
2047 }
2048
2049 /**
2050  * xprt_get - return a reference to an RPC transport.
2051  * @xprt: pointer to the transport
2052  *
2053  */
2054 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
2055 {
2056         if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
2057                 return xprt;
2058         return NULL;
2059 }
2060 EXPORT_SYMBOL_GPL(xprt_get);
2061
2062 /**
2063  * xprt_put - release a reference to an RPC transport.
2064  * @xprt: pointer to the transport
2065  *
2066  */
2067 void xprt_put(struct rpc_xprt *xprt)
2068 {
2069         if (xprt != NULL)
2070                 kref_put(&xprt->kref, xprt_destroy_kref);
2071 }
2072 EXPORT_SYMBOL_GPL(xprt_put);