virt: vbox: Fix some comments which talk about the "session spinlock"
[linux-2.6-microblaze.git] / net / sunrpc / xprt.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/net/sunrpc/xprt.c
4  *
5  *  This is a generic RPC call interface supporting congestion avoidance,
6  *  and asynchronous calls.
7  *
8  *  The interface works like this:
9  *
10  *  -   When a process places a call, it allocates a request slot if
11  *      one is available. Otherwise, it sleeps on the backlog queue
12  *      (xprt_reserve).
13  *  -   Next, the caller puts together the RPC message, stuffs it into
14  *      the request struct, and calls xprt_transmit().
15  *  -   xprt_transmit sends the message and installs the caller on the
16  *      transport's wait list. At the same time, if a reply is expected,
17  *      it installs a timer that is run after the packet's timeout has
18  *      expired.
19  *  -   When a packet arrives, the data_ready handler walks the list of
20  *      pending requests for that transport. If a matching XID is found, the
21  *      caller is woken up, and the timer removed.
22  *  -   When no reply arrives within the timeout interval, the timer is
23  *      fired by the kernel and runs xprt_timer(). It either adjusts the
24  *      timeout values (minor timeout) or wakes up the caller with a status
25  *      of -ETIMEDOUT.
26  *  -   When the caller receives a notification from RPC that a reply arrived,
27  *      it should release the RPC slot, and process the reply.
28  *      If the call timed out, it may choose to retry the operation by
29  *      adjusting the initial timeout value, and simply calling rpc_call
30  *      again.
31  *
32  *  Support for async RPC is done through a set of RPC-specific scheduling
33  *  primitives that `transparently' work for processes as well as async
34  *  tasks that rely on callbacks.
35  *
36  *  Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
37  *
38  *  Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
39  */
40
41 #include <linux/module.h>
42
43 #include <linux/types.h>
44 #include <linux/interrupt.h>
45 #include <linux/workqueue.h>
46 #include <linux/net.h>
47 #include <linux/ktime.h>
48
49 #include <linux/sunrpc/clnt.h>
50 #include <linux/sunrpc/metrics.h>
51 #include <linux/sunrpc/bc_xprt.h>
52 #include <linux/rcupdate.h>
53 #include <linux/sched/mm.h>
54
55 #include <trace/events/sunrpc.h>
56
57 #include "sunrpc.h"
58
59 /*
60  * Local variables
61  */
62
63 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
64 # define RPCDBG_FACILITY        RPCDBG_XPRT
65 #endif
66
67 /*
68  * Local functions
69  */
70 static void      xprt_init(struct rpc_xprt *xprt, struct net *net);
71 static __be32   xprt_alloc_xid(struct rpc_xprt *xprt);
72 static void      xprt_destroy(struct rpc_xprt *xprt);
73
74 static DEFINE_SPINLOCK(xprt_list_lock);
75 static LIST_HEAD(xprt_list);
76
77 static unsigned long xprt_request_timeout(const struct rpc_rqst *req)
78 {
79         unsigned long timeout = jiffies + req->rq_timeout;
80
81         if (time_before(timeout, req->rq_majortimeo))
82                 return timeout;
83         return req->rq_majortimeo;
84 }
85
86 /**
87  * xprt_register_transport - register a transport implementation
88  * @transport: transport to register
89  *
90  * If a transport implementation is loaded as a kernel module, it can
91  * call this interface to make itself known to the RPC client.
92  *
93  * Returns:
94  * 0:           transport successfully registered
95  * -EEXIST:     transport already registered
96  * -EINVAL:     transport module being unloaded
97  */
98 int xprt_register_transport(struct xprt_class *transport)
99 {
100         struct xprt_class *t;
101         int result;
102
103         result = -EEXIST;
104         spin_lock(&xprt_list_lock);
105         list_for_each_entry(t, &xprt_list, list) {
106                 /* don't register the same transport class twice */
107                 if (t->ident == transport->ident)
108                         goto out;
109         }
110
111         list_add_tail(&transport->list, &xprt_list);
112         printk(KERN_INFO "RPC: Registered %s transport module.\n",
113                transport->name);
114         result = 0;
115
116 out:
117         spin_unlock(&xprt_list_lock);
118         return result;
119 }
120 EXPORT_SYMBOL_GPL(xprt_register_transport);
121
122 /**
123  * xprt_unregister_transport - unregister a transport implementation
124  * @transport: transport to unregister
125  *
126  * Returns:
127  * 0:           transport successfully unregistered
128  * -ENOENT:     transport never registered
129  */
130 int xprt_unregister_transport(struct xprt_class *transport)
131 {
132         struct xprt_class *t;
133         int result;
134
135         result = 0;
136         spin_lock(&xprt_list_lock);
137         list_for_each_entry(t, &xprt_list, list) {
138                 if (t == transport) {
139                         printk(KERN_INFO
140                                 "RPC: Unregistered %s transport module.\n",
141                                 transport->name);
142                         list_del_init(&transport->list);
143                         goto out;
144                 }
145         }
146         result = -ENOENT;
147
148 out:
149         spin_unlock(&xprt_list_lock);
150         return result;
151 }
152 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
153
154 /**
155  * xprt_load_transport - load a transport implementation
156  * @transport_name: transport to load
157  *
158  * Returns:
159  * 0:           transport successfully loaded
160  * -ENOENT:     transport module not available
161  */
162 int xprt_load_transport(const char *transport_name)
163 {
164         struct xprt_class *t;
165         int result;
166
167         result = 0;
168         spin_lock(&xprt_list_lock);
169         list_for_each_entry(t, &xprt_list, list) {
170                 if (strcmp(t->name, transport_name) == 0) {
171                         spin_unlock(&xprt_list_lock);
172                         goto out;
173                 }
174         }
175         spin_unlock(&xprt_list_lock);
176         result = request_module("xprt%s", transport_name);
177 out:
178         return result;
179 }
180 EXPORT_SYMBOL_GPL(xprt_load_transport);
181
182 static void xprt_clear_locked(struct rpc_xprt *xprt)
183 {
184         xprt->snd_task = NULL;
185         if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
186                 smp_mb__before_atomic();
187                 clear_bit(XPRT_LOCKED, &xprt->state);
188                 smp_mb__after_atomic();
189         } else
190                 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
191 }
192
193 /**
194  * xprt_reserve_xprt - serialize write access to transports
195  * @task: task that is requesting access to the transport
196  * @xprt: pointer to the target transport
197  *
198  * This prevents mixing the payload of separate requests, and prevents
199  * transport connects from colliding with writes.  No congestion control
200  * is provided.
201  */
202 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
203 {
204         struct rpc_rqst *req = task->tk_rqstp;
205
206         if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
207                 if (task == xprt->snd_task)
208                         goto out_locked;
209                 goto out_sleep;
210         }
211         if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
212                 goto out_unlock;
213         xprt->snd_task = task;
214
215 out_locked:
216         trace_xprt_reserve_xprt(xprt, task);
217         return 1;
218
219 out_unlock:
220         xprt_clear_locked(xprt);
221 out_sleep:
222         task->tk_status = -EAGAIN;
223         if  (RPC_IS_SOFT(task))
224                 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
225                                 xprt_request_timeout(req));
226         else
227                 rpc_sleep_on(&xprt->sending, task, NULL);
228         return 0;
229 }
230 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
231
232 static bool
233 xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
234 {
235         return test_bit(XPRT_CWND_WAIT, &xprt->state);
236 }
237
238 static void
239 xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
240 {
241         if (!list_empty(&xprt->xmit_queue)) {
242                 /* Peek at head of queue to see if it can make progress */
243                 if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
244                                         rq_xmit)->rq_cong)
245                         return;
246         }
247         set_bit(XPRT_CWND_WAIT, &xprt->state);
248 }
249
250 static void
251 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
252 {
253         if (!RPCXPRT_CONGESTED(xprt))
254                 clear_bit(XPRT_CWND_WAIT, &xprt->state);
255 }
256
257 /*
258  * xprt_reserve_xprt_cong - serialize write access to transports
259  * @task: task that is requesting access to the transport
260  *
261  * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
262  * integrated into the decision of whether a request is allowed to be
263  * woken up and given access to the transport.
264  * Note that the lock is only granted if we know there are free slots.
265  */
266 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
267 {
268         struct rpc_rqst *req = task->tk_rqstp;
269
270         if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
271                 if (task == xprt->snd_task)
272                         goto out_locked;
273                 goto out_sleep;
274         }
275         if (req == NULL) {
276                 xprt->snd_task = task;
277                 goto out_locked;
278         }
279         if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
280                 goto out_unlock;
281         if (!xprt_need_congestion_window_wait(xprt)) {
282                 xprt->snd_task = task;
283                 goto out_locked;
284         }
285 out_unlock:
286         xprt_clear_locked(xprt);
287 out_sleep:
288         task->tk_status = -EAGAIN;
289         if (RPC_IS_SOFT(task))
290                 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
291                                 xprt_request_timeout(req));
292         else
293                 rpc_sleep_on(&xprt->sending, task, NULL);
294         return 0;
295 out_locked:
296         trace_xprt_reserve_cong(xprt, task);
297         return 1;
298 }
299 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
300
301 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
302 {
303         int retval;
304
305         if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
306                 return 1;
307         spin_lock(&xprt->transport_lock);
308         retval = xprt->ops->reserve_xprt(xprt, task);
309         spin_unlock(&xprt->transport_lock);
310         return retval;
311 }
312
313 static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
314 {
315         struct rpc_xprt *xprt = data;
316
317         xprt->snd_task = task;
318         return true;
319 }
320
321 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
322 {
323         if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
324                 return;
325         if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
326                 goto out_unlock;
327         if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
328                                 __xprt_lock_write_func, xprt))
329                 return;
330 out_unlock:
331         xprt_clear_locked(xprt);
332 }
333
334 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
335 {
336         if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
337                 return;
338         if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
339                 goto out_unlock;
340         if (xprt_need_congestion_window_wait(xprt))
341                 goto out_unlock;
342         if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
343                                 __xprt_lock_write_func, xprt))
344                 return;
345 out_unlock:
346         xprt_clear_locked(xprt);
347 }
348
349 /**
350  * xprt_release_xprt - allow other requests to use a transport
351  * @xprt: transport with other tasks potentially waiting
352  * @task: task that is releasing access to the transport
353  *
354  * Note that "task" can be NULL.  No congestion control is provided.
355  */
356 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
357 {
358         if (xprt->snd_task == task) {
359                 xprt_clear_locked(xprt);
360                 __xprt_lock_write_next(xprt);
361         }
362         trace_xprt_release_xprt(xprt, task);
363 }
364 EXPORT_SYMBOL_GPL(xprt_release_xprt);
365
366 /**
367  * xprt_release_xprt_cong - allow other requests to use a transport
368  * @xprt: transport with other tasks potentially waiting
369  * @task: task that is releasing access to the transport
370  *
371  * Note that "task" can be NULL.  Another task is awoken to use the
372  * transport if the transport's congestion window allows it.
373  */
374 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
375 {
376         if (xprt->snd_task == task) {
377                 xprt_clear_locked(xprt);
378                 __xprt_lock_write_next_cong(xprt);
379         }
380         trace_xprt_release_cong(xprt, task);
381 }
382 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
383
384 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
385 {
386         if (xprt->snd_task != task)
387                 return;
388         spin_lock(&xprt->transport_lock);
389         xprt->ops->release_xprt(xprt, task);
390         spin_unlock(&xprt->transport_lock);
391 }
392
393 /*
394  * Van Jacobson congestion avoidance. Check if the congestion window
395  * overflowed. Put the task to sleep if this is the case.
396  */
397 static int
398 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
399 {
400         if (req->rq_cong)
401                 return 1;
402         trace_xprt_get_cong(xprt, req->rq_task);
403         if (RPCXPRT_CONGESTED(xprt)) {
404                 xprt_set_congestion_window_wait(xprt);
405                 return 0;
406         }
407         req->rq_cong = 1;
408         xprt->cong += RPC_CWNDSCALE;
409         return 1;
410 }
411
412 /*
413  * Adjust the congestion window, and wake up the next task
414  * that has been sleeping due to congestion
415  */
416 static void
417 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
418 {
419         if (!req->rq_cong)
420                 return;
421         req->rq_cong = 0;
422         xprt->cong -= RPC_CWNDSCALE;
423         xprt_test_and_clear_congestion_window_wait(xprt);
424         trace_xprt_put_cong(xprt, req->rq_task);
425         __xprt_lock_write_next_cong(xprt);
426 }
427
428 /**
429  * xprt_request_get_cong - Request congestion control credits
430  * @xprt: pointer to transport
431  * @req: pointer to RPC request
432  *
433  * Useful for transports that require congestion control.
434  */
435 bool
436 xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
437 {
438         bool ret = false;
439
440         if (req->rq_cong)
441                 return true;
442         spin_lock(&xprt->transport_lock);
443         ret = __xprt_get_cong(xprt, req) != 0;
444         spin_unlock(&xprt->transport_lock);
445         return ret;
446 }
447 EXPORT_SYMBOL_GPL(xprt_request_get_cong);
448
449 /**
450  * xprt_release_rqst_cong - housekeeping when request is complete
451  * @task: RPC request that recently completed
452  *
453  * Useful for transports that require congestion control.
454  */
455 void xprt_release_rqst_cong(struct rpc_task *task)
456 {
457         struct rpc_rqst *req = task->tk_rqstp;
458
459         __xprt_put_cong(req->rq_xprt, req);
460 }
461 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
462
463 static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
464 {
465         if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
466                 __xprt_lock_write_next_cong(xprt);
467 }
468
469 /*
470  * Clear the congestion window wait flag and wake up the next
471  * entry on xprt->sending
472  */
473 static void
474 xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
475 {
476         if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
477                 spin_lock(&xprt->transport_lock);
478                 __xprt_lock_write_next_cong(xprt);
479                 spin_unlock(&xprt->transport_lock);
480         }
481 }
482
483 /**
484  * xprt_adjust_cwnd - adjust transport congestion window
485  * @xprt: pointer to xprt
486  * @task: recently completed RPC request used to adjust window
487  * @result: result code of completed RPC request
488  *
489  * The transport code maintains an estimate on the maximum number of out-
490  * standing RPC requests, using a smoothed version of the congestion
491  * avoidance implemented in 44BSD. This is basically the Van Jacobson
492  * congestion algorithm: If a retransmit occurs, the congestion window is
493  * halved; otherwise, it is incremented by 1/cwnd when
494  *
495  *      -       a reply is received and
496  *      -       a full number of requests are outstanding and
497  *      -       the congestion window hasn't been updated recently.
498  */
499 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
500 {
501         struct rpc_rqst *req = task->tk_rqstp;
502         unsigned long cwnd = xprt->cwnd;
503
504         if (result >= 0 && cwnd <= xprt->cong) {
505                 /* The (cwnd >> 1) term makes sure
506                  * the result gets rounded properly. */
507                 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
508                 if (cwnd > RPC_MAXCWND(xprt))
509                         cwnd = RPC_MAXCWND(xprt);
510                 __xprt_lock_write_next_cong(xprt);
511         } else if (result == -ETIMEDOUT) {
512                 cwnd >>= 1;
513                 if (cwnd < RPC_CWNDSCALE)
514                         cwnd = RPC_CWNDSCALE;
515         }
516         dprintk("RPC:       cong %ld, cwnd was %ld, now %ld\n",
517                         xprt->cong, xprt->cwnd, cwnd);
518         xprt->cwnd = cwnd;
519         __xprt_put_cong(xprt, req);
520 }
521 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
522
523 /**
524  * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
525  * @xprt: transport with waiting tasks
526  * @status: result code to plant in each task before waking it
527  *
528  */
529 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
530 {
531         if (status < 0)
532                 rpc_wake_up_status(&xprt->pending, status);
533         else
534                 rpc_wake_up(&xprt->pending);
535 }
536 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
537
538 /**
539  * xprt_wait_for_buffer_space - wait for transport output buffer to clear
540  * @xprt: transport
541  *
542  * Note that we only set the timer for the case of RPC_IS_SOFT(), since
543  * we don't in general want to force a socket disconnection due to
544  * an incomplete RPC call transmission.
545  */
546 void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
547 {
548         set_bit(XPRT_WRITE_SPACE, &xprt->state);
549 }
550 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
551
552 static bool
553 xprt_clear_write_space_locked(struct rpc_xprt *xprt)
554 {
555         if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
556                 __xprt_lock_write_next(xprt);
557                 dprintk("RPC:       write space: waking waiting task on "
558                                 "xprt %p\n", xprt);
559                 return true;
560         }
561         return false;
562 }
563
564 /**
565  * xprt_write_space - wake the task waiting for transport output buffer space
566  * @xprt: transport with waiting tasks
567  *
568  * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
569  */
570 bool xprt_write_space(struct rpc_xprt *xprt)
571 {
572         bool ret;
573
574         if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
575                 return false;
576         spin_lock(&xprt->transport_lock);
577         ret = xprt_clear_write_space_locked(xprt);
578         spin_unlock(&xprt->transport_lock);
579         return ret;
580 }
581 EXPORT_SYMBOL_GPL(xprt_write_space);
582
583 static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime)
584 {
585         s64 delta = ktime_to_ns(ktime_get() - abstime);
586         return likely(delta >= 0) ?
587                 jiffies - nsecs_to_jiffies(delta) :
588                 jiffies + nsecs_to_jiffies(-delta);
589 }
590
591 static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req)
592 {
593         const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
594         unsigned long majortimeo = req->rq_timeout;
595
596         if (to->to_exponential)
597                 majortimeo <<= to->to_retries;
598         else
599                 majortimeo += to->to_increment * to->to_retries;
600         if (majortimeo > to->to_maxval || majortimeo == 0)
601                 majortimeo = to->to_maxval;
602         return majortimeo;
603 }
604
605 static void xprt_reset_majortimeo(struct rpc_rqst *req)
606 {
607         req->rq_majortimeo += xprt_calc_majortimeo(req);
608 }
609
610 static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
611 {
612         unsigned long time_init;
613         struct rpc_xprt *xprt = req->rq_xprt;
614
615         if (likely(xprt && xprt_connected(xprt)))
616                 time_init = jiffies;
617         else
618                 time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
619         req->rq_timeout = task->tk_client->cl_timeout->to_initval;
620         req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
621 }
622
623 /**
624  * xprt_adjust_timeout - adjust timeout values for next retransmit
625  * @req: RPC request containing parameters to use for the adjustment
626  *
627  */
628 int xprt_adjust_timeout(struct rpc_rqst *req)
629 {
630         struct rpc_xprt *xprt = req->rq_xprt;
631         const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
632         int status = 0;
633
634         if (time_before(jiffies, req->rq_majortimeo)) {
635                 if (to->to_exponential)
636                         req->rq_timeout <<= 1;
637                 else
638                         req->rq_timeout += to->to_increment;
639                 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
640                         req->rq_timeout = to->to_maxval;
641                 req->rq_retries++;
642         } else {
643                 req->rq_timeout = to->to_initval;
644                 req->rq_retries = 0;
645                 xprt_reset_majortimeo(req);
646                 /* Reset the RTT counters == "slow start" */
647                 spin_lock(&xprt->transport_lock);
648                 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
649                 spin_unlock(&xprt->transport_lock);
650                 status = -ETIMEDOUT;
651         }
652
653         if (req->rq_timeout == 0) {
654                 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
655                 req->rq_timeout = 5 * HZ;
656         }
657         return status;
658 }
659
660 static void xprt_autoclose(struct work_struct *work)
661 {
662         struct rpc_xprt *xprt =
663                 container_of(work, struct rpc_xprt, task_cleanup);
664         unsigned int pflags = memalloc_nofs_save();
665
666         trace_xprt_disconnect_auto(xprt);
667         clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
668         xprt->ops->close(xprt);
669         xprt_release_write(xprt, NULL);
670         wake_up_bit(&xprt->state, XPRT_LOCKED);
671         memalloc_nofs_restore(pflags);
672 }
673
674 /**
675  * xprt_disconnect_done - mark a transport as disconnected
676  * @xprt: transport to flag for disconnect
677  *
678  */
679 void xprt_disconnect_done(struct rpc_xprt *xprt)
680 {
681         trace_xprt_disconnect_done(xprt);
682         spin_lock(&xprt->transport_lock);
683         xprt_clear_connected(xprt);
684         xprt_clear_write_space_locked(xprt);
685         xprt_clear_congestion_window_wait_locked(xprt);
686         xprt_wake_pending_tasks(xprt, -ENOTCONN);
687         spin_unlock(&xprt->transport_lock);
688 }
689 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
690
691 /**
692  * xprt_force_disconnect - force a transport to disconnect
693  * @xprt: transport to disconnect
694  *
695  */
696 void xprt_force_disconnect(struct rpc_xprt *xprt)
697 {
698         trace_xprt_disconnect_force(xprt);
699
700         /* Don't race with the test_bit() in xprt_clear_locked() */
701         spin_lock(&xprt->transport_lock);
702         set_bit(XPRT_CLOSE_WAIT, &xprt->state);
703         /* Try to schedule an autoclose RPC call */
704         if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
705                 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
706         else if (xprt->snd_task)
707                 rpc_wake_up_queued_task_set_status(&xprt->pending,
708                                 xprt->snd_task, -ENOTCONN);
709         spin_unlock(&xprt->transport_lock);
710 }
711 EXPORT_SYMBOL_GPL(xprt_force_disconnect);
712
713 static unsigned int
714 xprt_connect_cookie(struct rpc_xprt *xprt)
715 {
716         return READ_ONCE(xprt->connect_cookie);
717 }
718
719 static bool
720 xprt_request_retransmit_after_disconnect(struct rpc_task *task)
721 {
722         struct rpc_rqst *req = task->tk_rqstp;
723         struct rpc_xprt *xprt = req->rq_xprt;
724
725         return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
726                 !xprt_connected(xprt);
727 }
728
729 /**
730  * xprt_conditional_disconnect - force a transport to disconnect
731  * @xprt: transport to disconnect
732  * @cookie: 'connection cookie'
733  *
734  * This attempts to break the connection if and only if 'cookie' matches
735  * the current transport 'connection cookie'. It ensures that we don't
736  * try to break the connection more than once when we need to retransmit
737  * a batch of RPC requests.
738  *
739  */
740 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
741 {
742         /* Don't race with the test_bit() in xprt_clear_locked() */
743         spin_lock(&xprt->transport_lock);
744         if (cookie != xprt->connect_cookie)
745                 goto out;
746         if (test_bit(XPRT_CLOSING, &xprt->state))
747                 goto out;
748         set_bit(XPRT_CLOSE_WAIT, &xprt->state);
749         /* Try to schedule an autoclose RPC call */
750         if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
751                 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
752         xprt_wake_pending_tasks(xprt, -EAGAIN);
753 out:
754         spin_unlock(&xprt->transport_lock);
755 }
756
757 static bool
758 xprt_has_timer(const struct rpc_xprt *xprt)
759 {
760         return xprt->idle_timeout != 0;
761 }
762
763 static void
764 xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
765         __must_hold(&xprt->transport_lock)
766 {
767         xprt->last_used = jiffies;
768         if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
769                 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
770 }
771
772 static void
773 xprt_init_autodisconnect(struct timer_list *t)
774 {
775         struct rpc_xprt *xprt = from_timer(xprt, t, timer);
776
777         if (!RB_EMPTY_ROOT(&xprt->recv_queue))
778                 return;
779         /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
780         xprt->last_used = jiffies;
781         if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
782                 return;
783         queue_work(xprtiod_workqueue, &xprt->task_cleanup);
784 }
785
786 bool xprt_lock_connect(struct rpc_xprt *xprt,
787                 struct rpc_task *task,
788                 void *cookie)
789 {
790         bool ret = false;
791
792         spin_lock(&xprt->transport_lock);
793         if (!test_bit(XPRT_LOCKED, &xprt->state))
794                 goto out;
795         if (xprt->snd_task != task)
796                 goto out;
797         xprt->snd_task = cookie;
798         ret = true;
799 out:
800         spin_unlock(&xprt->transport_lock);
801         return ret;
802 }
803
804 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
805 {
806         spin_lock(&xprt->transport_lock);
807         if (xprt->snd_task != cookie)
808                 goto out;
809         if (!test_bit(XPRT_LOCKED, &xprt->state))
810                 goto out;
811         xprt->snd_task =NULL;
812         xprt->ops->release_xprt(xprt, NULL);
813         xprt_schedule_autodisconnect(xprt);
814 out:
815         spin_unlock(&xprt->transport_lock);
816         wake_up_bit(&xprt->state, XPRT_LOCKED);
817 }
818
819 /**
820  * xprt_connect - schedule a transport connect operation
821  * @task: RPC task that is requesting the connect
822  *
823  */
824 void xprt_connect(struct rpc_task *task)
825 {
826         struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
827
828         dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
829                         xprt, (xprt_connected(xprt) ? "is" : "is not"));
830
831         if (!xprt_bound(xprt)) {
832                 task->tk_status = -EAGAIN;
833                 return;
834         }
835         if (!xprt_lock_write(xprt, task))
836                 return;
837
838         if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
839                 trace_xprt_disconnect_cleanup(xprt);
840                 xprt->ops->close(xprt);
841         }
842
843         if (!xprt_connected(xprt)) {
844                 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
845                 rpc_sleep_on_timeout(&xprt->pending, task, NULL,
846                                 xprt_request_timeout(task->tk_rqstp));
847
848                 if (test_bit(XPRT_CLOSING, &xprt->state))
849                         return;
850                 if (xprt_test_and_set_connecting(xprt))
851                         return;
852                 /* Race breaker */
853                 if (!xprt_connected(xprt)) {
854                         xprt->stat.connect_start = jiffies;
855                         xprt->ops->connect(xprt, task);
856                 } else {
857                         xprt_clear_connecting(xprt);
858                         task->tk_status = 0;
859                         rpc_wake_up_queued_task(&xprt->pending, task);
860                 }
861         }
862         xprt_release_write(xprt, task);
863 }
864
865 /**
866  * xprt_reconnect_delay - compute the wait before scheduling a connect
867  * @xprt: transport instance
868  *
869  */
870 unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
871 {
872         unsigned long start, now = jiffies;
873
874         start = xprt->stat.connect_start + xprt->reestablish_timeout;
875         if (time_after(start, now))
876                 return start - now;
877         return 0;
878 }
879 EXPORT_SYMBOL_GPL(xprt_reconnect_delay);
880
881 /**
882  * xprt_reconnect_backoff - compute the new re-establish timeout
883  * @xprt: transport instance
884  * @init_to: initial reestablish timeout
885  *
886  */
887 void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
888 {
889         xprt->reestablish_timeout <<= 1;
890         if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
891                 xprt->reestablish_timeout = xprt->max_reconnect_timeout;
892         if (xprt->reestablish_timeout < init_to)
893                 xprt->reestablish_timeout = init_to;
894 }
895 EXPORT_SYMBOL_GPL(xprt_reconnect_backoff);
896
897 enum xprt_xid_rb_cmp {
898         XID_RB_EQUAL,
899         XID_RB_LEFT,
900         XID_RB_RIGHT,
901 };
902 static enum xprt_xid_rb_cmp
903 xprt_xid_cmp(__be32 xid1, __be32 xid2)
904 {
905         if (xid1 == xid2)
906                 return XID_RB_EQUAL;
907         if ((__force u32)xid1 < (__force u32)xid2)
908                 return XID_RB_LEFT;
909         return XID_RB_RIGHT;
910 }
911
912 static struct rpc_rqst *
913 xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
914 {
915         struct rb_node *n = xprt->recv_queue.rb_node;
916         struct rpc_rqst *req;
917
918         while (n != NULL) {
919                 req = rb_entry(n, struct rpc_rqst, rq_recv);
920                 switch (xprt_xid_cmp(xid, req->rq_xid)) {
921                 case XID_RB_LEFT:
922                         n = n->rb_left;
923                         break;
924                 case XID_RB_RIGHT:
925                         n = n->rb_right;
926                         break;
927                 case XID_RB_EQUAL:
928                         return req;
929                 }
930         }
931         return NULL;
932 }
933
934 static void
935 xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
936 {
937         struct rb_node **p = &xprt->recv_queue.rb_node;
938         struct rb_node *n = NULL;
939         struct rpc_rqst *req;
940
941         while (*p != NULL) {
942                 n = *p;
943                 req = rb_entry(n, struct rpc_rqst, rq_recv);
944                 switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
945                 case XID_RB_LEFT:
946                         p = &n->rb_left;
947                         break;
948                 case XID_RB_RIGHT:
949                         p = &n->rb_right;
950                         break;
951                 case XID_RB_EQUAL:
952                         WARN_ON_ONCE(new != req);
953                         return;
954                 }
955         }
956         rb_link_node(&new->rq_recv, n, p);
957         rb_insert_color(&new->rq_recv, &xprt->recv_queue);
958 }
959
960 static void
961 xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
962 {
963         rb_erase(&req->rq_recv, &xprt->recv_queue);
964 }
965
966 /**
967  * xprt_lookup_rqst - find an RPC request corresponding to an XID
968  * @xprt: transport on which the original request was transmitted
969  * @xid: RPC XID of incoming reply
970  *
971  * Caller holds xprt->queue_lock.
972  */
973 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
974 {
975         struct rpc_rqst *entry;
976
977         entry = xprt_request_rb_find(xprt, xid);
978         if (entry != NULL) {
979                 trace_xprt_lookup_rqst(xprt, xid, 0);
980                 entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
981                 return entry;
982         }
983
984         dprintk("RPC:       xprt_lookup_rqst did not find xid %08x\n",
985                         ntohl(xid));
986         trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
987         xprt->stat.bad_xids++;
988         return NULL;
989 }
990 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
991
992 static bool
993 xprt_is_pinned_rqst(struct rpc_rqst *req)
994 {
995         return atomic_read(&req->rq_pin) != 0;
996 }
997
998 /**
999  * xprt_pin_rqst - Pin a request on the transport receive list
1000  * @req: Request to pin
1001  *
1002  * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
1003  * so should be holding xprt->queue_lock.
1004  */
1005 void xprt_pin_rqst(struct rpc_rqst *req)
1006 {
1007         atomic_inc(&req->rq_pin);
1008 }
1009 EXPORT_SYMBOL_GPL(xprt_pin_rqst);
1010
1011 /**
1012  * xprt_unpin_rqst - Unpin a request on the transport receive list
1013  * @req: Request to pin
1014  *
1015  * Caller should be holding xprt->queue_lock.
1016  */
1017 void xprt_unpin_rqst(struct rpc_rqst *req)
1018 {
1019         if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
1020                 atomic_dec(&req->rq_pin);
1021                 return;
1022         }
1023         if (atomic_dec_and_test(&req->rq_pin))
1024                 wake_up_var(&req->rq_pin);
1025 }
1026 EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
1027
1028 static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
1029 {
1030         wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
1031 }
1032
1033 static bool
1034 xprt_request_data_received(struct rpc_task *task)
1035 {
1036         return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1037                 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
1038 }
1039
1040 static bool
1041 xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
1042 {
1043         return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1044                 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
1045 }
1046
1047 /**
1048  * xprt_request_enqueue_receive - Add an request to the receive queue
1049  * @task: RPC task
1050  *
1051  */
1052 void
1053 xprt_request_enqueue_receive(struct rpc_task *task)
1054 {
1055         struct rpc_rqst *req = task->tk_rqstp;
1056         struct rpc_xprt *xprt = req->rq_xprt;
1057
1058         if (!xprt_request_need_enqueue_receive(task, req))
1059                 return;
1060
1061         xprt_request_prepare(task->tk_rqstp);
1062         spin_lock(&xprt->queue_lock);
1063
1064         /* Update the softirq receive buffer */
1065         memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1066                         sizeof(req->rq_private_buf));
1067
1068         /* Add request to the receive list */
1069         xprt_request_rb_insert(xprt, req);
1070         set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
1071         spin_unlock(&xprt->queue_lock);
1072
1073         /* Turn off autodisconnect */
1074         del_singleshot_timer_sync(&xprt->timer);
1075 }
1076
1077 /**
1078  * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1079  * @task: RPC task
1080  *
1081  * Caller must hold xprt->queue_lock.
1082  */
1083 static void
1084 xprt_request_dequeue_receive_locked(struct rpc_task *task)
1085 {
1086         struct rpc_rqst *req = task->tk_rqstp;
1087
1088         if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1089                 xprt_request_rb_remove(req->rq_xprt, req);
1090 }
1091
1092 /**
1093  * xprt_update_rtt - Update RPC RTT statistics
1094  * @task: RPC request that recently completed
1095  *
1096  * Caller holds xprt->queue_lock.
1097  */
1098 void xprt_update_rtt(struct rpc_task *task)
1099 {
1100         struct rpc_rqst *req = task->tk_rqstp;
1101         struct rpc_rtt *rtt = task->tk_client->cl_rtt;
1102         unsigned int timer = task->tk_msg.rpc_proc->p_timer;
1103         long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1104
1105         if (timer) {
1106                 if (req->rq_ntrans == 1)
1107                         rpc_update_rtt(rtt, timer, m);
1108                 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
1109         }
1110 }
1111 EXPORT_SYMBOL_GPL(xprt_update_rtt);
1112
1113 /**
1114  * xprt_complete_rqst - called when reply processing is complete
1115  * @task: RPC request that recently completed
1116  * @copied: actual number of bytes received from the transport
1117  *
1118  * Caller holds xprt->queue_lock.
1119  */
1120 void xprt_complete_rqst(struct rpc_task *task, int copied)
1121 {
1122         struct rpc_rqst *req = task->tk_rqstp;
1123         struct rpc_xprt *xprt = req->rq_xprt;
1124
1125         trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
1126
1127         xprt->stat.recvs++;
1128
1129         req->rq_private_buf.len = copied;
1130         /* Ensure all writes are done before we update */
1131         /* req->rq_reply_bytes_recvd */
1132         smp_wmb();
1133         req->rq_reply_bytes_recvd = copied;
1134         xprt_request_dequeue_receive_locked(task);
1135         rpc_wake_up_queued_task(&xprt->pending, task);
1136 }
1137 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
1138
1139 static void xprt_timer(struct rpc_task *task)
1140 {
1141         struct rpc_rqst *req = task->tk_rqstp;
1142         struct rpc_xprt *xprt = req->rq_xprt;
1143
1144         if (task->tk_status != -ETIMEDOUT)
1145                 return;
1146
1147         trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
1148         if (!req->rq_reply_bytes_recvd) {
1149                 if (xprt->ops->timer)
1150                         xprt->ops->timer(xprt, task);
1151         } else
1152                 task->tk_status = 0;
1153 }
1154
1155 /**
1156  * xprt_wait_for_reply_request_def - wait for reply
1157  * @task: pointer to rpc_task
1158  *
1159  * Set a request's retransmit timeout based on the transport's
1160  * default timeout parameters.  Used by transports that don't adjust
1161  * the retransmit timeout based on round-trip time estimation,
1162  * and put the task to sleep on the pending queue.
1163  */
1164 void xprt_wait_for_reply_request_def(struct rpc_task *task)
1165 {
1166         struct rpc_rqst *req = task->tk_rqstp;
1167
1168         rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1169                         xprt_request_timeout(req));
1170 }
1171 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);
1172
1173 /**
1174  * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1175  * @task: pointer to rpc_task
1176  *
1177  * Set a request's retransmit timeout using the RTT estimator,
1178  * and put the task to sleep on the pending queue.
1179  */
1180 void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
1181 {
1182         int timer = task->tk_msg.rpc_proc->p_timer;
1183         struct rpc_clnt *clnt = task->tk_client;
1184         struct rpc_rtt *rtt = clnt->cl_rtt;
1185         struct rpc_rqst *req = task->tk_rqstp;
1186         unsigned long max_timeout = clnt->cl_timeout->to_maxval;
1187         unsigned long timeout;
1188
1189         timeout = rpc_calc_rto(rtt, timer);
1190         timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
1191         if (timeout > max_timeout || timeout == 0)
1192                 timeout = max_timeout;
1193         rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1194                         jiffies + timeout);
1195 }
1196 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);
1197
1198 /**
1199  * xprt_request_wait_receive - wait for the reply to an RPC request
1200  * @task: RPC task about to send a request
1201  *
1202  */
1203 void xprt_request_wait_receive(struct rpc_task *task)
1204 {
1205         struct rpc_rqst *req = task->tk_rqstp;
1206         struct rpc_xprt *xprt = req->rq_xprt;
1207
1208         if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1209                 return;
1210         /*
1211          * Sleep on the pending queue if we're expecting a reply.
1212          * The spinlock ensures atomicity between the test of
1213          * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1214          */
1215         spin_lock(&xprt->queue_lock);
1216         if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1217                 xprt->ops->wait_for_reply_request(task);
1218                 /*
1219                  * Send an extra queue wakeup call if the
1220                  * connection was dropped in case the call to
1221                  * rpc_sleep_on() raced.
1222                  */
1223                 if (xprt_request_retransmit_after_disconnect(task))
1224                         rpc_wake_up_queued_task_set_status(&xprt->pending,
1225                                         task, -ENOTCONN);
1226         }
1227         spin_unlock(&xprt->queue_lock);
1228 }
1229
1230 static bool
1231 xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
1232 {
1233         return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1234 }
1235
1236 /**
1237  * xprt_request_enqueue_transmit - queue a task for transmission
1238  * @task: pointer to rpc_task
1239  *
1240  * Add a task to the transmission queue.
1241  */
1242 void
1243 xprt_request_enqueue_transmit(struct rpc_task *task)
1244 {
1245         struct rpc_rqst *pos, *req = task->tk_rqstp;
1246         struct rpc_xprt *xprt = req->rq_xprt;
1247
1248         if (xprt_request_need_enqueue_transmit(task, req)) {
1249                 req->rq_bytes_sent = 0;
1250                 spin_lock(&xprt->queue_lock);
1251                 /*
1252                  * Requests that carry congestion control credits are added
1253                  * to the head of the list to avoid starvation issues.
1254                  */
1255                 if (req->rq_cong) {
1256                         xprt_clear_congestion_window_wait(xprt);
1257                         list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1258                                 if (pos->rq_cong)
1259                                         continue;
1260                                 /* Note: req is added _before_ pos */
1261                                 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1262                                 INIT_LIST_HEAD(&req->rq_xmit2);
1263                                 trace_xprt_enq_xmit(task, 1);
1264                                 goto out;
1265                         }
1266                 } else if (RPC_IS_SWAPPER(task)) {
1267                         list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1268                                 if (pos->rq_cong || pos->rq_bytes_sent)
1269                                         continue;
1270                                 if (RPC_IS_SWAPPER(pos->rq_task))
1271                                         continue;
1272                                 /* Note: req is added _before_ pos */
1273                                 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1274                                 INIT_LIST_HEAD(&req->rq_xmit2);
1275                                 trace_xprt_enq_xmit(task, 2);
1276                                 goto out;
1277                         }
1278                 } else if (!req->rq_seqno) {
1279                         list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1280                                 if (pos->rq_task->tk_owner != task->tk_owner)
1281                                         continue;
1282                                 list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
1283                                 INIT_LIST_HEAD(&req->rq_xmit);
1284                                 trace_xprt_enq_xmit(task, 3);
1285                                 goto out;
1286                         }
1287                 }
1288                 list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
1289                 INIT_LIST_HEAD(&req->rq_xmit2);
1290                 trace_xprt_enq_xmit(task, 4);
1291 out:
1292                 set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1293                 spin_unlock(&xprt->queue_lock);
1294         }
1295 }
1296
1297 /**
1298  * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1299  * @task: pointer to rpc_task
1300  *
1301  * Remove a task from the transmission queue
1302  * Caller must hold xprt->queue_lock
1303  */
1304 static void
1305 xprt_request_dequeue_transmit_locked(struct rpc_task *task)
1306 {
1307         struct rpc_rqst *req = task->tk_rqstp;
1308
1309         if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1310                 return;
1311         if (!list_empty(&req->rq_xmit)) {
1312                 list_del(&req->rq_xmit);
1313                 if (!list_empty(&req->rq_xmit2)) {
1314                         struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
1315                                         struct rpc_rqst, rq_xmit2);
1316                         list_del(&req->rq_xmit2);
1317                         list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
1318                 }
1319         } else
1320                 list_del(&req->rq_xmit2);
1321 }
1322
1323 /**
1324  * xprt_request_dequeue_transmit - remove a task from the transmission queue
1325  * @task: pointer to rpc_task
1326  *
1327  * Remove a task from the transmission queue
1328  */
1329 static void
1330 xprt_request_dequeue_transmit(struct rpc_task *task)
1331 {
1332         struct rpc_rqst *req = task->tk_rqstp;
1333         struct rpc_xprt *xprt = req->rq_xprt;
1334
1335         spin_lock(&xprt->queue_lock);
1336         xprt_request_dequeue_transmit_locked(task);
1337         spin_unlock(&xprt->queue_lock);
1338 }
1339
1340 /**
1341  * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
1342  * @task: pointer to rpc_task
1343  *
1344  * Remove a task from the transmit and receive queues, and ensure that
1345  * it is not pinned by the receive work item.
1346  */
1347 void
1348 xprt_request_dequeue_xprt(struct rpc_task *task)
1349 {
1350         struct rpc_rqst *req = task->tk_rqstp;
1351         struct rpc_xprt *xprt = req->rq_xprt;
1352
1353         if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
1354             test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
1355             xprt_is_pinned_rqst(req)) {
1356                 spin_lock(&xprt->queue_lock);
1357                 xprt_request_dequeue_transmit_locked(task);
1358                 xprt_request_dequeue_receive_locked(task);
1359                 while (xprt_is_pinned_rqst(req)) {
1360                         set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1361                         spin_unlock(&xprt->queue_lock);
1362                         xprt_wait_on_pinned_rqst(req);
1363                         spin_lock(&xprt->queue_lock);
1364                         clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1365                 }
1366                 spin_unlock(&xprt->queue_lock);
1367         }
1368 }
1369
1370 /**
1371  * xprt_request_prepare - prepare an encoded request for transport
1372  * @req: pointer to rpc_rqst
1373  *
1374  * Calls into the transport layer to do whatever is needed to prepare
1375  * the request for transmission or receive.
1376  */
1377 void
1378 xprt_request_prepare(struct rpc_rqst *req)
1379 {
1380         struct rpc_xprt *xprt = req->rq_xprt;
1381
1382         if (xprt->ops->prepare_request)
1383                 xprt->ops->prepare_request(req);
1384 }
1385
1386 /**
1387  * xprt_request_need_retransmit - Test if a task needs retransmission
1388  * @task: pointer to rpc_task
1389  *
1390  * Test for whether a connection breakage requires the task to retransmit
1391  */
1392 bool
1393 xprt_request_need_retransmit(struct rpc_task *task)
1394 {
1395         return xprt_request_retransmit_after_disconnect(task);
1396 }
1397
1398 /**
1399  * xprt_prepare_transmit - reserve the transport before sending a request
1400  * @task: RPC task about to send a request
1401  *
1402  */
1403 bool xprt_prepare_transmit(struct rpc_task *task)
1404 {
1405         struct rpc_rqst *req = task->tk_rqstp;
1406         struct rpc_xprt *xprt = req->rq_xprt;
1407
1408         dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
1409
1410         if (!xprt_lock_write(xprt, task)) {
1411                 /* Race breaker: someone may have transmitted us */
1412                 if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1413                         rpc_wake_up_queued_task_set_status(&xprt->sending,
1414                                         task, 0);
1415                 return false;
1416
1417         }
1418         return true;
1419 }
1420
1421 void xprt_end_transmit(struct rpc_task *task)
1422 {
1423         xprt_release_write(task->tk_rqstp->rq_xprt, task);
1424 }
1425
1426 /**
1427  * xprt_request_transmit - send an RPC request on a transport
1428  * @req: pointer to request to transmit
1429  * @snd_task: RPC task that owns the transport lock
1430  *
1431  * This performs the transmission of a single request.
1432  * Note that if the request is not the same as snd_task, then it
1433  * does need to be pinned.
1434  * Returns '0' on success.
1435  */
1436 static int
1437 xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1438 {
1439         struct rpc_xprt *xprt = req->rq_xprt;
1440         struct rpc_task *task = req->rq_task;
1441         unsigned int connect_cookie;
1442         int is_retrans = RPC_WAS_SENT(task);
1443         int status;
1444
1445         if (!req->rq_bytes_sent) {
1446                 if (xprt_request_data_received(task)) {
1447                         status = 0;
1448                         goto out_dequeue;
1449                 }
1450                 /* Verify that our message lies in the RPCSEC_GSS window */
1451                 if (rpcauth_xmit_need_reencode(task)) {
1452                         status = -EBADMSG;
1453                         goto out_dequeue;
1454                 }
1455                 if (RPC_SIGNALLED(task)) {
1456                         status = -ERESTARTSYS;
1457                         goto out_dequeue;
1458                 }
1459         }
1460
1461         /*
1462          * Update req->rq_ntrans before transmitting to avoid races with
1463          * xprt_update_rtt(), which needs to know that it is recording a
1464          * reply to the first transmission.
1465          */
1466         req->rq_ntrans++;
1467
1468         trace_rpc_xdr_sendto(task, &req->rq_snd_buf);
1469         connect_cookie = xprt->connect_cookie;
1470         status = xprt->ops->send_request(req);
1471         if (status != 0) {
1472                 req->rq_ntrans--;
1473                 trace_xprt_transmit(req, status);
1474                 return status;
1475         }
1476
1477         if (is_retrans)
1478                 task->tk_client->cl_stats->rpcretrans++;
1479
1480         xprt_inject_disconnect(xprt);
1481
1482         task->tk_flags |= RPC_TASK_SENT;
1483         spin_lock(&xprt->transport_lock);
1484
1485         xprt->stat.sends++;
1486         xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1487         xprt->stat.bklog_u += xprt->backlog.qlen;
1488         xprt->stat.sending_u += xprt->sending.qlen;
1489         xprt->stat.pending_u += xprt->pending.qlen;
1490         spin_unlock(&xprt->transport_lock);
1491
1492         req->rq_connect_cookie = connect_cookie;
1493 out_dequeue:
1494         trace_xprt_transmit(req, status);
1495         xprt_request_dequeue_transmit(task);
1496         rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1497         return status;
1498 }
1499
1500 /**
1501  * xprt_transmit - send an RPC request on a transport
1502  * @task: controlling RPC task
1503  *
1504  * Attempts to drain the transmit queue. On exit, either the transport
1505  * signalled an error that needs to be handled before transmission can
1506  * resume, or @task finished transmitting, and detected that it already
1507  * received a reply.
1508  */
1509 void
1510 xprt_transmit(struct rpc_task *task)
1511 {
1512         struct rpc_rqst *next, *req = task->tk_rqstp;
1513         struct rpc_xprt *xprt = req->rq_xprt;
1514         int status;
1515
1516         spin_lock(&xprt->queue_lock);
1517         while (!list_empty(&xprt->xmit_queue)) {
1518                 next = list_first_entry(&xprt->xmit_queue,
1519                                 struct rpc_rqst, rq_xmit);
1520                 xprt_pin_rqst(next);
1521                 spin_unlock(&xprt->queue_lock);
1522                 status = xprt_request_transmit(next, task);
1523                 if (status == -EBADMSG && next != req)
1524                         status = 0;
1525                 cond_resched();
1526                 spin_lock(&xprt->queue_lock);
1527                 xprt_unpin_rqst(next);
1528                 if (status == 0) {
1529                         if (!xprt_request_data_received(task) ||
1530                             test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1531                                 continue;
1532                 } else if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1533                         task->tk_status = status;
1534                 break;
1535         }
1536         spin_unlock(&xprt->queue_lock);
1537 }
1538
1539 static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1540 {
1541         set_bit(XPRT_CONGESTED, &xprt->state);
1542         rpc_sleep_on(&xprt->backlog, task, NULL);
1543 }
1544
1545 static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
1546 {
1547         if (rpc_wake_up_next(&xprt->backlog) == NULL)
1548                 clear_bit(XPRT_CONGESTED, &xprt->state);
1549 }
1550
1551 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1552 {
1553         bool ret = false;
1554
1555         if (!test_bit(XPRT_CONGESTED, &xprt->state))
1556                 goto out;
1557         spin_lock(&xprt->reserve_lock);
1558         if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1559                 rpc_sleep_on(&xprt->backlog, task, NULL);
1560                 ret = true;
1561         }
1562         spin_unlock(&xprt->reserve_lock);
1563 out:
1564         return ret;
1565 }
1566
1567 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1568 {
1569         struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1570
1571         if (xprt->num_reqs >= xprt->max_reqs)
1572                 goto out;
1573         ++xprt->num_reqs;
1574         spin_unlock(&xprt->reserve_lock);
1575         req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
1576         spin_lock(&xprt->reserve_lock);
1577         if (req != NULL)
1578                 goto out;
1579         --xprt->num_reqs;
1580         req = ERR_PTR(-ENOMEM);
1581 out:
1582         return req;
1583 }
1584
1585 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1586 {
1587         if (xprt->num_reqs > xprt->min_reqs) {
1588                 --xprt->num_reqs;
1589                 kfree(req);
1590                 return true;
1591         }
1592         return false;
1593 }
1594
1595 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1596 {
1597         struct rpc_rqst *req;
1598
1599         spin_lock(&xprt->reserve_lock);
1600         if (!list_empty(&xprt->free)) {
1601                 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1602                 list_del(&req->rq_list);
1603                 goto out_init_req;
1604         }
1605         req = xprt_dynamic_alloc_slot(xprt);
1606         if (!IS_ERR(req))
1607                 goto out_init_req;
1608         switch (PTR_ERR(req)) {
1609         case -ENOMEM:
1610                 dprintk("RPC:       dynamic allocation of request slot "
1611                                 "failed! Retrying\n");
1612                 task->tk_status = -ENOMEM;
1613                 break;
1614         case -EAGAIN:
1615                 xprt_add_backlog(xprt, task);
1616                 dprintk("RPC:       waiting for request slot\n");
1617                 /* fall through */
1618         default:
1619                 task->tk_status = -EAGAIN;
1620         }
1621         spin_unlock(&xprt->reserve_lock);
1622         return;
1623 out_init_req:
1624         xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1625                                      xprt->num_reqs);
1626         spin_unlock(&xprt->reserve_lock);
1627
1628         task->tk_status = 0;
1629         task->tk_rqstp = req;
1630 }
1631 EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1632
1633 void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1634 {
1635         spin_lock(&xprt->reserve_lock);
1636         if (!xprt_dynamic_free_slot(xprt, req)) {
1637                 memset(req, 0, sizeof(*req));   /* mark unused */
1638                 list_add(&req->rq_list, &xprt->free);
1639         }
1640         xprt_wake_up_backlog(xprt);
1641         spin_unlock(&xprt->reserve_lock);
1642 }
1643 EXPORT_SYMBOL_GPL(xprt_free_slot);
1644
1645 static void xprt_free_all_slots(struct rpc_xprt *xprt)
1646 {
1647         struct rpc_rqst *req;
1648         while (!list_empty(&xprt->free)) {
1649                 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1650                 list_del(&req->rq_list);
1651                 kfree(req);
1652         }
1653 }
1654
1655 struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1656                 unsigned int num_prealloc,
1657                 unsigned int max_alloc)
1658 {
1659         struct rpc_xprt *xprt;
1660         struct rpc_rqst *req;
1661         int i;
1662
1663         xprt = kzalloc(size, GFP_KERNEL);
1664         if (xprt == NULL)
1665                 goto out;
1666
1667         xprt_init(xprt, net);
1668
1669         for (i = 0; i < num_prealloc; i++) {
1670                 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1671                 if (!req)
1672                         goto out_free;
1673                 list_add(&req->rq_list, &xprt->free);
1674         }
1675         if (max_alloc > num_prealloc)
1676                 xprt->max_reqs = max_alloc;
1677         else
1678                 xprt->max_reqs = num_prealloc;
1679         xprt->min_reqs = num_prealloc;
1680         xprt->num_reqs = num_prealloc;
1681
1682         return xprt;
1683
1684 out_free:
1685         xprt_free(xprt);
1686 out:
1687         return NULL;
1688 }
1689 EXPORT_SYMBOL_GPL(xprt_alloc);
1690
1691 void xprt_free(struct rpc_xprt *xprt)
1692 {
1693         put_net(xprt->xprt_net);
1694         xprt_free_all_slots(xprt);
1695         kfree_rcu(xprt, rcu);
1696 }
1697 EXPORT_SYMBOL_GPL(xprt_free);
1698
1699 static void
1700 xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1701 {
1702         req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1703 }
1704
1705 static __be32
1706 xprt_alloc_xid(struct rpc_xprt *xprt)
1707 {
1708         __be32 xid;
1709
1710         spin_lock(&xprt->reserve_lock);
1711         xid = (__force __be32)xprt->xid++;
1712         spin_unlock(&xprt->reserve_lock);
1713         return xid;
1714 }
1715
1716 static void
1717 xprt_init_xid(struct rpc_xprt *xprt)
1718 {
1719         xprt->xid = prandom_u32();
1720 }
1721
1722 static void
1723 xprt_request_init(struct rpc_task *task)
1724 {
1725         struct rpc_xprt *xprt = task->tk_xprt;
1726         struct rpc_rqst *req = task->tk_rqstp;
1727
1728         req->rq_task    = task;
1729         req->rq_xprt    = xprt;
1730         req->rq_buffer  = NULL;
1731         req->rq_xid     = xprt_alloc_xid(xprt);
1732         xprt_init_connect_cookie(req, xprt);
1733         req->rq_snd_buf.len = 0;
1734         req->rq_snd_buf.buflen = 0;
1735         req->rq_rcv_buf.len = 0;
1736         req->rq_rcv_buf.buflen = 0;
1737         req->rq_snd_buf.bvec = NULL;
1738         req->rq_rcv_buf.bvec = NULL;
1739         req->rq_release_snd_buf = NULL;
1740         xprt_init_majortimeo(task, req);
1741         dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1742                         req, ntohl(req->rq_xid));
1743 }
1744
1745 static void
1746 xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1747 {
1748         xprt->ops->alloc_slot(xprt, task);
1749         if (task->tk_rqstp != NULL)
1750                 xprt_request_init(task);
1751 }
1752
1753 /**
1754  * xprt_reserve - allocate an RPC request slot
1755  * @task: RPC task requesting a slot allocation
1756  *
1757  * If the transport is marked as being congested, or if no more
1758  * slots are available, place the task on the transport's
1759  * backlog queue.
1760  */
1761 void xprt_reserve(struct rpc_task *task)
1762 {
1763         struct rpc_xprt *xprt = task->tk_xprt;
1764
1765         task->tk_status = 0;
1766         if (task->tk_rqstp != NULL)
1767                 return;
1768
1769         task->tk_status = -EAGAIN;
1770         if (!xprt_throttle_congested(xprt, task))
1771                 xprt_do_reserve(xprt, task);
1772 }
1773
1774 /**
1775  * xprt_retry_reserve - allocate an RPC request slot
1776  * @task: RPC task requesting a slot allocation
1777  *
1778  * If no more slots are available, place the task on the transport's
1779  * backlog queue.
1780  * Note that the only difference with xprt_reserve is that we now
1781  * ignore the value of the XPRT_CONGESTED flag.
1782  */
1783 void xprt_retry_reserve(struct rpc_task *task)
1784 {
1785         struct rpc_xprt *xprt = task->tk_xprt;
1786
1787         task->tk_status = 0;
1788         if (task->tk_rqstp != NULL)
1789                 return;
1790
1791         task->tk_status = -EAGAIN;
1792         xprt_do_reserve(xprt, task);
1793 }
1794
1795 /**
1796  * xprt_release - release an RPC request slot
1797  * @task: task which is finished with the slot
1798  *
1799  */
1800 void xprt_release(struct rpc_task *task)
1801 {
1802         struct rpc_xprt *xprt;
1803         struct rpc_rqst *req = task->tk_rqstp;
1804
1805         if (req == NULL) {
1806                 if (task->tk_client) {
1807                         xprt = task->tk_xprt;
1808                         xprt_release_write(xprt, task);
1809                 }
1810                 return;
1811         }
1812
1813         xprt = req->rq_xprt;
1814         xprt_request_dequeue_xprt(task);
1815         spin_lock(&xprt->transport_lock);
1816         xprt->ops->release_xprt(xprt, task);
1817         if (xprt->ops->release_request)
1818                 xprt->ops->release_request(task);
1819         xprt_schedule_autodisconnect(xprt);
1820         spin_unlock(&xprt->transport_lock);
1821         if (req->rq_buffer)
1822                 xprt->ops->buf_free(task);
1823         xprt_inject_disconnect(xprt);
1824         xdr_free_bvec(&req->rq_rcv_buf);
1825         xdr_free_bvec(&req->rq_snd_buf);
1826         if (req->rq_cred != NULL)
1827                 put_rpccred(req->rq_cred);
1828         task->tk_rqstp = NULL;
1829         if (req->rq_release_snd_buf)
1830                 req->rq_release_snd_buf(req);
1831
1832         dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1833         if (likely(!bc_prealloc(req)))
1834                 xprt->ops->free_slot(xprt, req);
1835         else
1836                 xprt_free_bc_request(req);
1837 }
1838
1839 #ifdef CONFIG_SUNRPC_BACKCHANNEL
1840 void
1841 xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
1842 {
1843         struct xdr_buf *xbufp = &req->rq_snd_buf;
1844
1845         task->tk_rqstp = req;
1846         req->rq_task = task;
1847         xprt_init_connect_cookie(req, req->rq_xprt);
1848         /*
1849          * Set up the xdr_buf length.
1850          * This also indicates that the buffer is XDR encoded already.
1851          */
1852         xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1853                 xbufp->tail[0].iov_len;
1854 }
1855 #endif
1856
1857 static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1858 {
1859         kref_init(&xprt->kref);
1860
1861         spin_lock_init(&xprt->transport_lock);
1862         spin_lock_init(&xprt->reserve_lock);
1863         spin_lock_init(&xprt->queue_lock);
1864
1865         INIT_LIST_HEAD(&xprt->free);
1866         xprt->recv_queue = RB_ROOT;
1867         INIT_LIST_HEAD(&xprt->xmit_queue);
1868 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1869         spin_lock_init(&xprt->bc_pa_lock);
1870         INIT_LIST_HEAD(&xprt->bc_pa_list);
1871 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1872         INIT_LIST_HEAD(&xprt->xprt_switch);
1873
1874         xprt->last_used = jiffies;
1875         xprt->cwnd = RPC_INITCWND;
1876         xprt->bind_index = 0;
1877
1878         rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1879         rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1880         rpc_init_wait_queue(&xprt->sending, "xprt_sending");
1881         rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1882
1883         xprt_init_xid(xprt);
1884
1885         xprt->xprt_net = get_net(net);
1886 }
1887
1888 /**
1889  * xprt_create_transport - create an RPC transport
1890  * @args: rpc transport creation arguments
1891  *
1892  */
1893 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1894 {
1895         struct rpc_xprt *xprt;
1896         struct xprt_class *t;
1897
1898         spin_lock(&xprt_list_lock);
1899         list_for_each_entry(t, &xprt_list, list) {
1900                 if (t->ident == args->ident) {
1901                         spin_unlock(&xprt_list_lock);
1902                         goto found;
1903                 }
1904         }
1905         spin_unlock(&xprt_list_lock);
1906         dprintk("RPC: transport (%d) not supported\n", args->ident);
1907         return ERR_PTR(-EIO);
1908
1909 found:
1910         xprt = t->setup(args);
1911         if (IS_ERR(xprt))
1912                 goto out;
1913         if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1914                 xprt->idle_timeout = 0;
1915         INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1916         if (xprt_has_timer(xprt))
1917                 timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
1918         else
1919                 timer_setup(&xprt->timer, NULL, 0);
1920
1921         if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1922                 xprt_destroy(xprt);
1923                 return ERR_PTR(-EINVAL);
1924         }
1925         xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1926         if (xprt->servername == NULL) {
1927                 xprt_destroy(xprt);
1928                 return ERR_PTR(-ENOMEM);
1929         }
1930
1931         rpc_xprt_debugfs_register(xprt);
1932
1933         trace_xprt_create(xprt);
1934 out:
1935         return xprt;
1936 }
1937
1938 static void xprt_destroy_cb(struct work_struct *work)
1939 {
1940         struct rpc_xprt *xprt =
1941                 container_of(work, struct rpc_xprt, task_cleanup);
1942
1943         trace_xprt_destroy(xprt);
1944
1945         rpc_xprt_debugfs_unregister(xprt);
1946         rpc_destroy_wait_queue(&xprt->binding);
1947         rpc_destroy_wait_queue(&xprt->pending);
1948         rpc_destroy_wait_queue(&xprt->sending);
1949         rpc_destroy_wait_queue(&xprt->backlog);
1950         kfree(xprt->servername);
1951         /*
1952          * Destroy any existing back channel
1953          */
1954         xprt_destroy_backchannel(xprt, UINT_MAX);
1955
1956         /*
1957          * Tear down transport state and free the rpc_xprt
1958          */
1959         xprt->ops->destroy(xprt);
1960 }
1961
1962 /**
1963  * xprt_destroy - destroy an RPC transport, killing off all requests.
1964  * @xprt: transport to destroy
1965  *
1966  */
1967 static void xprt_destroy(struct rpc_xprt *xprt)
1968 {
1969         /*
1970          * Exclude transport connect/disconnect handlers and autoclose
1971          */
1972         wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
1973
1974         del_timer_sync(&xprt->timer);
1975
1976         /*
1977          * Destroy sockets etc from the system workqueue so they can
1978          * safely flush receive work running on rpciod.
1979          */
1980         INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
1981         schedule_work(&xprt->task_cleanup);
1982 }
1983
1984 static void xprt_destroy_kref(struct kref *kref)
1985 {
1986         xprt_destroy(container_of(kref, struct rpc_xprt, kref));
1987 }
1988
1989 /**
1990  * xprt_get - return a reference to an RPC transport.
1991  * @xprt: pointer to the transport
1992  *
1993  */
1994 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
1995 {
1996         if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
1997                 return xprt;
1998         return NULL;
1999 }
2000 EXPORT_SYMBOL_GPL(xprt_get);
2001
2002 /**
2003  * xprt_put - release a reference to an RPC transport.
2004  * @xprt: pointer to the transport
2005  *
2006  */
2007 void xprt_put(struct rpc_xprt *xprt)
2008 {
2009         if (xprt != NULL)
2010                 kref_put(&xprt->kref, xprt_destroy_kref);
2011 }
2012 EXPORT_SYMBOL_GPL(xprt_put);