1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/net/sunrpc/xprt.c
5 * This is a generic RPC call interface supporting congestion avoidance,
6 * and asynchronous calls.
8 * The interface works like this:
10 * - When a process places a call, it allocates a request slot if
11 * one is available. Otherwise, it sleeps on the backlog queue
13 * - Next, the caller puts together the RPC message, stuffs it into
14 * the request struct, and calls xprt_transmit().
15 * - xprt_transmit sends the message and installs the caller on the
16 * transport's wait list. At the same time, if a reply is expected,
17 * it installs a timer that is run after the packet's timeout has
19 * - When a packet arrives, the data_ready handler walks the list of
20 * pending requests for that transport. If a matching XID is found, the
21 * caller is woken up, and the timer removed.
22 * - When no reply arrives within the timeout interval, the timer is
23 * fired by the kernel and runs xprt_timer(). It either adjusts the
24 * timeout values (minor timeout) or wakes up the caller with a status
26 * - When the caller receives a notification from RPC that a reply arrived,
27 * it should release the RPC slot, and process the reply.
28 * If the call timed out, it may choose to retry the operation by
29 * adjusting the initial timeout value, and simply calling rpc_call
32 * Support for async RPC is done through a set of RPC-specific scheduling
33 * primitives that `transparently' work for processes as well as async
34 * tasks that rely on callbacks.
36 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
38 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
41 #include <linux/module.h>
43 #include <linux/types.h>
44 #include <linux/interrupt.h>
45 #include <linux/workqueue.h>
46 #include <linux/net.h>
47 #include <linux/ktime.h>
49 #include <linux/sunrpc/clnt.h>
50 #include <linux/sunrpc/metrics.h>
51 #include <linux/sunrpc/bc_xprt.h>
52 #include <linux/rcupdate.h>
53 #include <linux/sched/mm.h>
55 #include <trace/events/sunrpc.h>
63 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
64 # define RPCDBG_FACILITY RPCDBG_XPRT
70 static void xprt_init(struct rpc_xprt *xprt, struct net *net);
71 static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
72 static void xprt_destroy(struct rpc_xprt *xprt);
73 static void xprt_request_init(struct rpc_task *task);
75 static DEFINE_SPINLOCK(xprt_list_lock);
76 static LIST_HEAD(xprt_list);
78 static unsigned long xprt_request_timeout(const struct rpc_rqst *req)
80 unsigned long timeout = jiffies + req->rq_timeout;
82 if (time_before(timeout, req->rq_majortimeo))
84 return req->rq_majortimeo;
88 * xprt_register_transport - register a transport implementation
89 * @transport: transport to register
91 * If a transport implementation is loaded as a kernel module, it can
92 * call this interface to make itself known to the RPC client.
95 * 0: transport successfully registered
96 * -EEXIST: transport already registered
97 * -EINVAL: transport module being unloaded
99 int xprt_register_transport(struct xprt_class *transport)
101 struct xprt_class *t;
105 spin_lock(&xprt_list_lock);
106 list_for_each_entry(t, &xprt_list, list) {
107 /* don't register the same transport class twice */
108 if (t->ident == transport->ident)
112 list_add_tail(&transport->list, &xprt_list);
113 printk(KERN_INFO "RPC: Registered %s transport module.\n",
118 spin_unlock(&xprt_list_lock);
121 EXPORT_SYMBOL_GPL(xprt_register_transport);
124 * xprt_unregister_transport - unregister a transport implementation
125 * @transport: transport to unregister
128 * 0: transport successfully unregistered
129 * -ENOENT: transport never registered
131 int xprt_unregister_transport(struct xprt_class *transport)
133 struct xprt_class *t;
137 spin_lock(&xprt_list_lock);
138 list_for_each_entry(t, &xprt_list, list) {
139 if (t == transport) {
141 "RPC: Unregistered %s transport module.\n",
143 list_del_init(&transport->list);
150 spin_unlock(&xprt_list_lock);
153 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
156 xprt_class_release(const struct xprt_class *t)
158 module_put(t->owner);
161 static const struct xprt_class *
162 xprt_class_find_by_ident_locked(int ident)
164 const struct xprt_class *t;
166 list_for_each_entry(t, &xprt_list, list) {
167 if (t->ident != ident)
169 if (!try_module_get(t->owner))
176 static const struct xprt_class *
177 xprt_class_find_by_ident(int ident)
179 const struct xprt_class *t;
181 spin_lock(&xprt_list_lock);
182 t = xprt_class_find_by_ident_locked(ident);
183 spin_unlock(&xprt_list_lock);
187 static const struct xprt_class *
188 xprt_class_find_by_netid_locked(const char *netid)
190 const struct xprt_class *t;
193 list_for_each_entry(t, &xprt_list, list) {
194 for (i = 0; t->netid[i][0] != '\0'; i++) {
195 if (strcmp(t->netid[i], netid) != 0)
197 if (!try_module_get(t->owner))
205 static const struct xprt_class *
206 xprt_class_find_by_netid(const char *netid)
208 const struct xprt_class *t;
210 spin_lock(&xprt_list_lock);
211 t = xprt_class_find_by_netid_locked(netid);
213 spin_unlock(&xprt_list_lock);
214 request_module("rpc%s", netid);
215 spin_lock(&xprt_list_lock);
216 t = xprt_class_find_by_netid_locked(netid);
218 spin_unlock(&xprt_list_lock);
223 * xprt_find_transport_ident - convert a netid into a transport identifier
224 * @netid: transport to load
227 * > 0: transport identifier
228 * -ENOENT: transport module not available
230 int xprt_find_transport_ident(const char *netid)
232 const struct xprt_class *t;
235 t = xprt_class_find_by_netid(netid);
239 xprt_class_release(t);
242 EXPORT_SYMBOL_GPL(xprt_find_transport_ident);
244 static void xprt_clear_locked(struct rpc_xprt *xprt)
246 xprt->snd_task = NULL;
247 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
248 smp_mb__before_atomic();
249 clear_bit(XPRT_LOCKED, &xprt->state);
250 smp_mb__after_atomic();
252 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
256 * xprt_reserve_xprt - serialize write access to transports
257 * @task: task that is requesting access to the transport
258 * @xprt: pointer to the target transport
260 * This prevents mixing the payload of separate requests, and prevents
261 * transport connects from colliding with writes. No congestion control
264 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
266 struct rpc_rqst *req = task->tk_rqstp;
268 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
269 if (task == xprt->snd_task)
273 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
275 xprt->snd_task = task;
278 trace_xprt_reserve_xprt(xprt, task);
282 xprt_clear_locked(xprt);
284 task->tk_status = -EAGAIN;
285 if (RPC_IS_SOFT(task))
286 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
287 xprt_request_timeout(req));
289 rpc_sleep_on(&xprt->sending, task, NULL);
292 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
295 xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
297 return test_bit(XPRT_CWND_WAIT, &xprt->state);
301 xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
303 if (!list_empty(&xprt->xmit_queue)) {
304 /* Peek at head of queue to see if it can make progress */
305 if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
309 set_bit(XPRT_CWND_WAIT, &xprt->state);
313 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
315 if (!RPCXPRT_CONGESTED(xprt))
316 clear_bit(XPRT_CWND_WAIT, &xprt->state);
320 * xprt_reserve_xprt_cong - serialize write access to transports
321 * @task: task that is requesting access to the transport
323 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
324 * integrated into the decision of whether a request is allowed to be
325 * woken up and given access to the transport.
326 * Note that the lock is only granted if we know there are free slots.
328 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
330 struct rpc_rqst *req = task->tk_rqstp;
332 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
333 if (task == xprt->snd_task)
338 xprt->snd_task = task;
341 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
343 if (!xprt_need_congestion_window_wait(xprt)) {
344 xprt->snd_task = task;
348 xprt_clear_locked(xprt);
350 task->tk_status = -EAGAIN;
351 if (RPC_IS_SOFT(task))
352 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
353 xprt_request_timeout(req));
355 rpc_sleep_on(&xprt->sending, task, NULL);
358 trace_xprt_reserve_cong(xprt, task);
361 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
363 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
367 if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
369 spin_lock(&xprt->transport_lock);
370 retval = xprt->ops->reserve_xprt(xprt, task);
371 spin_unlock(&xprt->transport_lock);
375 static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
377 struct rpc_xprt *xprt = data;
379 xprt->snd_task = task;
383 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
385 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
387 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
389 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
390 __xprt_lock_write_func, xprt))
393 xprt_clear_locked(xprt);
396 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
398 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
400 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
402 if (xprt_need_congestion_window_wait(xprt))
404 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
405 __xprt_lock_write_func, xprt))
408 xprt_clear_locked(xprt);
412 * xprt_release_xprt - allow other requests to use a transport
413 * @xprt: transport with other tasks potentially waiting
414 * @task: task that is releasing access to the transport
416 * Note that "task" can be NULL. No congestion control is provided.
418 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
420 if (xprt->snd_task == task) {
421 xprt_clear_locked(xprt);
422 __xprt_lock_write_next(xprt);
424 trace_xprt_release_xprt(xprt, task);
426 EXPORT_SYMBOL_GPL(xprt_release_xprt);
429 * xprt_release_xprt_cong - allow other requests to use a transport
430 * @xprt: transport with other tasks potentially waiting
431 * @task: task that is releasing access to the transport
433 * Note that "task" can be NULL. Another task is awoken to use the
434 * transport if the transport's congestion window allows it.
436 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
438 if (xprt->snd_task == task) {
439 xprt_clear_locked(xprt);
440 __xprt_lock_write_next_cong(xprt);
442 trace_xprt_release_cong(xprt, task);
444 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
446 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
448 if (xprt->snd_task != task)
450 spin_lock(&xprt->transport_lock);
451 xprt->ops->release_xprt(xprt, task);
452 spin_unlock(&xprt->transport_lock);
456 * Van Jacobson congestion avoidance. Check if the congestion window
457 * overflowed. Put the task to sleep if this is the case.
460 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
464 trace_xprt_get_cong(xprt, req->rq_task);
465 if (RPCXPRT_CONGESTED(xprt)) {
466 xprt_set_congestion_window_wait(xprt);
470 xprt->cong += RPC_CWNDSCALE;
475 * Adjust the congestion window, and wake up the next task
476 * that has been sleeping due to congestion
479 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
484 xprt->cong -= RPC_CWNDSCALE;
485 xprt_test_and_clear_congestion_window_wait(xprt);
486 trace_xprt_put_cong(xprt, req->rq_task);
487 __xprt_lock_write_next_cong(xprt);
491 * xprt_request_get_cong - Request congestion control credits
492 * @xprt: pointer to transport
493 * @req: pointer to RPC request
495 * Useful for transports that require congestion control.
498 xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
504 spin_lock(&xprt->transport_lock);
505 ret = __xprt_get_cong(xprt, req) != 0;
506 spin_unlock(&xprt->transport_lock);
509 EXPORT_SYMBOL_GPL(xprt_request_get_cong);
512 * xprt_release_rqst_cong - housekeeping when request is complete
513 * @task: RPC request that recently completed
515 * Useful for transports that require congestion control.
517 void xprt_release_rqst_cong(struct rpc_task *task)
519 struct rpc_rqst *req = task->tk_rqstp;
521 __xprt_put_cong(req->rq_xprt, req);
523 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
525 static void xprt_clear_congestion_window_wait_locked(struct rpc_xprt *xprt)
527 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state))
528 __xprt_lock_write_next_cong(xprt);
532 * Clear the congestion window wait flag and wake up the next
533 * entry on xprt->sending
536 xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
538 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
539 spin_lock(&xprt->transport_lock);
540 __xprt_lock_write_next_cong(xprt);
541 spin_unlock(&xprt->transport_lock);
546 * xprt_adjust_cwnd - adjust transport congestion window
547 * @xprt: pointer to xprt
548 * @task: recently completed RPC request used to adjust window
549 * @result: result code of completed RPC request
551 * The transport code maintains an estimate on the maximum number of out-
552 * standing RPC requests, using a smoothed version of the congestion
553 * avoidance implemented in 44BSD. This is basically the Van Jacobson
554 * congestion algorithm: If a retransmit occurs, the congestion window is
555 * halved; otherwise, it is incremented by 1/cwnd when
557 * - a reply is received and
558 * - a full number of requests are outstanding and
559 * - the congestion window hasn't been updated recently.
561 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
563 struct rpc_rqst *req = task->tk_rqstp;
564 unsigned long cwnd = xprt->cwnd;
566 if (result >= 0 && cwnd <= xprt->cong) {
567 /* The (cwnd >> 1) term makes sure
568 * the result gets rounded properly. */
569 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
570 if (cwnd > RPC_MAXCWND(xprt))
571 cwnd = RPC_MAXCWND(xprt);
572 __xprt_lock_write_next_cong(xprt);
573 } else if (result == -ETIMEDOUT) {
575 if (cwnd < RPC_CWNDSCALE)
576 cwnd = RPC_CWNDSCALE;
578 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
579 xprt->cong, xprt->cwnd, cwnd);
581 __xprt_put_cong(xprt, req);
583 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
586 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
587 * @xprt: transport with waiting tasks
588 * @status: result code to plant in each task before waking it
591 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
594 rpc_wake_up_status(&xprt->pending, status);
596 rpc_wake_up(&xprt->pending);
598 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
601 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
604 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
605 * we don't in general want to force a socket disconnection due to
606 * an incomplete RPC call transmission.
608 void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
610 set_bit(XPRT_WRITE_SPACE, &xprt->state);
612 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
615 xprt_clear_write_space_locked(struct rpc_xprt *xprt)
617 if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
618 __xprt_lock_write_next(xprt);
619 dprintk("RPC: write space: waking waiting task on "
627 * xprt_write_space - wake the task waiting for transport output buffer space
628 * @xprt: transport with waiting tasks
630 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
632 bool xprt_write_space(struct rpc_xprt *xprt)
636 if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
638 spin_lock(&xprt->transport_lock);
639 ret = xprt_clear_write_space_locked(xprt);
640 spin_unlock(&xprt->transport_lock);
643 EXPORT_SYMBOL_GPL(xprt_write_space);
645 static unsigned long xprt_abs_ktime_to_jiffies(ktime_t abstime)
647 s64 delta = ktime_to_ns(ktime_get() - abstime);
648 return likely(delta >= 0) ?
649 jiffies - nsecs_to_jiffies(delta) :
650 jiffies + nsecs_to_jiffies(-delta);
653 static unsigned long xprt_calc_majortimeo(struct rpc_rqst *req)
655 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
656 unsigned long majortimeo = req->rq_timeout;
658 if (to->to_exponential)
659 majortimeo <<= to->to_retries;
661 majortimeo += to->to_increment * to->to_retries;
662 if (majortimeo > to->to_maxval || majortimeo == 0)
663 majortimeo = to->to_maxval;
667 static void xprt_reset_majortimeo(struct rpc_rqst *req)
669 req->rq_majortimeo += xprt_calc_majortimeo(req);
672 static void xprt_reset_minortimeo(struct rpc_rqst *req)
674 req->rq_minortimeo += req->rq_timeout;
677 static void xprt_init_majortimeo(struct rpc_task *task, struct rpc_rqst *req)
679 unsigned long time_init;
680 struct rpc_xprt *xprt = req->rq_xprt;
682 if (likely(xprt && xprt_connected(xprt)))
685 time_init = xprt_abs_ktime_to_jiffies(task->tk_start);
686 req->rq_timeout = task->tk_client->cl_timeout->to_initval;
687 req->rq_majortimeo = time_init + xprt_calc_majortimeo(req);
688 req->rq_minortimeo = time_init + req->rq_timeout;
692 * xprt_adjust_timeout - adjust timeout values for next retransmit
693 * @req: RPC request containing parameters to use for the adjustment
696 int xprt_adjust_timeout(struct rpc_rqst *req)
698 struct rpc_xprt *xprt = req->rq_xprt;
699 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
702 if (time_before(jiffies, req->rq_majortimeo)) {
703 if (time_before(jiffies, req->rq_minortimeo))
705 if (to->to_exponential)
706 req->rq_timeout <<= 1;
708 req->rq_timeout += to->to_increment;
709 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
710 req->rq_timeout = to->to_maxval;
713 req->rq_timeout = to->to_initval;
715 xprt_reset_majortimeo(req);
716 /* Reset the RTT counters == "slow start" */
717 spin_lock(&xprt->transport_lock);
718 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
719 spin_unlock(&xprt->transport_lock);
722 xprt_reset_minortimeo(req);
724 if (req->rq_timeout == 0) {
725 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
726 req->rq_timeout = 5 * HZ;
731 static void xprt_autoclose(struct work_struct *work)
733 struct rpc_xprt *xprt =
734 container_of(work, struct rpc_xprt, task_cleanup);
735 unsigned int pflags = memalloc_nofs_save();
737 trace_xprt_disconnect_auto(xprt);
738 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
739 xprt->ops->close(xprt);
740 xprt_release_write(xprt, NULL);
741 wake_up_bit(&xprt->state, XPRT_LOCKED);
742 memalloc_nofs_restore(pflags);
746 * xprt_disconnect_done - mark a transport as disconnected
747 * @xprt: transport to flag for disconnect
750 void xprt_disconnect_done(struct rpc_xprt *xprt)
752 trace_xprt_disconnect_done(xprt);
753 spin_lock(&xprt->transport_lock);
754 xprt_clear_connected(xprt);
755 xprt_clear_write_space_locked(xprt);
756 xprt_clear_congestion_window_wait_locked(xprt);
757 xprt_wake_pending_tasks(xprt, -ENOTCONN);
758 spin_unlock(&xprt->transport_lock);
760 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
763 * xprt_force_disconnect - force a transport to disconnect
764 * @xprt: transport to disconnect
767 void xprt_force_disconnect(struct rpc_xprt *xprt)
769 trace_xprt_disconnect_force(xprt);
771 /* Don't race with the test_bit() in xprt_clear_locked() */
772 spin_lock(&xprt->transport_lock);
773 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
774 /* Try to schedule an autoclose RPC call */
775 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
776 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
777 else if (xprt->snd_task)
778 rpc_wake_up_queued_task_set_status(&xprt->pending,
779 xprt->snd_task, -ENOTCONN);
780 spin_unlock(&xprt->transport_lock);
782 EXPORT_SYMBOL_GPL(xprt_force_disconnect);
785 xprt_connect_cookie(struct rpc_xprt *xprt)
787 return READ_ONCE(xprt->connect_cookie);
791 xprt_request_retransmit_after_disconnect(struct rpc_task *task)
793 struct rpc_rqst *req = task->tk_rqstp;
794 struct rpc_xprt *xprt = req->rq_xprt;
796 return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
797 !xprt_connected(xprt);
801 * xprt_conditional_disconnect - force a transport to disconnect
802 * @xprt: transport to disconnect
803 * @cookie: 'connection cookie'
805 * This attempts to break the connection if and only if 'cookie' matches
806 * the current transport 'connection cookie'. It ensures that we don't
807 * try to break the connection more than once when we need to retransmit
808 * a batch of RPC requests.
811 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
813 /* Don't race with the test_bit() in xprt_clear_locked() */
814 spin_lock(&xprt->transport_lock);
815 if (cookie != xprt->connect_cookie)
817 if (test_bit(XPRT_CLOSING, &xprt->state))
819 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
820 /* Try to schedule an autoclose RPC call */
821 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
822 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
823 xprt_wake_pending_tasks(xprt, -EAGAIN);
825 spin_unlock(&xprt->transport_lock);
829 xprt_has_timer(const struct rpc_xprt *xprt)
831 return xprt->idle_timeout != 0;
835 xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
836 __must_hold(&xprt->transport_lock)
838 xprt->last_used = jiffies;
839 if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
840 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
844 xprt_init_autodisconnect(struct timer_list *t)
846 struct rpc_xprt *xprt = from_timer(xprt, t, timer);
848 if (!RB_EMPTY_ROOT(&xprt->recv_queue))
850 /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
851 xprt->last_used = jiffies;
852 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
854 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
857 bool xprt_lock_connect(struct rpc_xprt *xprt,
858 struct rpc_task *task,
863 spin_lock(&xprt->transport_lock);
864 if (!test_bit(XPRT_LOCKED, &xprt->state))
866 if (xprt->snd_task != task)
868 xprt->snd_task = cookie;
871 spin_unlock(&xprt->transport_lock);
875 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
877 spin_lock(&xprt->transport_lock);
878 if (xprt->snd_task != cookie)
880 if (!test_bit(XPRT_LOCKED, &xprt->state))
882 xprt->snd_task =NULL;
883 xprt->ops->release_xprt(xprt, NULL);
884 xprt_schedule_autodisconnect(xprt);
886 spin_unlock(&xprt->transport_lock);
887 wake_up_bit(&xprt->state, XPRT_LOCKED);
891 * xprt_connect - schedule a transport connect operation
892 * @task: RPC task that is requesting the connect
895 void xprt_connect(struct rpc_task *task)
897 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
899 trace_xprt_connect(xprt);
901 if (!xprt_bound(xprt)) {
902 task->tk_status = -EAGAIN;
905 if (!xprt_lock_write(xprt, task))
908 if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
909 trace_xprt_disconnect_cleanup(xprt);
910 xprt->ops->close(xprt);
913 if (!xprt_connected(xprt)) {
914 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
915 rpc_sleep_on_timeout(&xprt->pending, task, NULL,
916 xprt_request_timeout(task->tk_rqstp));
918 if (test_bit(XPRT_CLOSING, &xprt->state))
920 if (xprt_test_and_set_connecting(xprt))
923 if (!xprt_connected(xprt)) {
924 xprt->stat.connect_start = jiffies;
925 xprt->ops->connect(xprt, task);
927 xprt_clear_connecting(xprt);
929 rpc_wake_up_queued_task(&xprt->pending, task);
932 xprt_release_write(xprt, task);
936 * xprt_reconnect_delay - compute the wait before scheduling a connect
937 * @xprt: transport instance
940 unsigned long xprt_reconnect_delay(const struct rpc_xprt *xprt)
942 unsigned long start, now = jiffies;
944 start = xprt->stat.connect_start + xprt->reestablish_timeout;
945 if (time_after(start, now))
949 EXPORT_SYMBOL_GPL(xprt_reconnect_delay);
952 * xprt_reconnect_backoff - compute the new re-establish timeout
953 * @xprt: transport instance
954 * @init_to: initial reestablish timeout
957 void xprt_reconnect_backoff(struct rpc_xprt *xprt, unsigned long init_to)
959 xprt->reestablish_timeout <<= 1;
960 if (xprt->reestablish_timeout > xprt->max_reconnect_timeout)
961 xprt->reestablish_timeout = xprt->max_reconnect_timeout;
962 if (xprt->reestablish_timeout < init_to)
963 xprt->reestablish_timeout = init_to;
965 EXPORT_SYMBOL_GPL(xprt_reconnect_backoff);
967 enum xprt_xid_rb_cmp {
972 static enum xprt_xid_rb_cmp
973 xprt_xid_cmp(__be32 xid1, __be32 xid2)
977 if ((__force u32)xid1 < (__force u32)xid2)
982 static struct rpc_rqst *
983 xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
985 struct rb_node *n = xprt->recv_queue.rb_node;
986 struct rpc_rqst *req;
989 req = rb_entry(n, struct rpc_rqst, rq_recv);
990 switch (xprt_xid_cmp(xid, req->rq_xid)) {
1005 xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
1007 struct rb_node **p = &xprt->recv_queue.rb_node;
1008 struct rb_node *n = NULL;
1009 struct rpc_rqst *req;
1011 while (*p != NULL) {
1013 req = rb_entry(n, struct rpc_rqst, rq_recv);
1014 switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
1022 WARN_ON_ONCE(new != req);
1026 rb_link_node(&new->rq_recv, n, p);
1027 rb_insert_color(&new->rq_recv, &xprt->recv_queue);
1031 xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
1033 rb_erase(&req->rq_recv, &xprt->recv_queue);
1037 * xprt_lookup_rqst - find an RPC request corresponding to an XID
1038 * @xprt: transport on which the original request was transmitted
1039 * @xid: RPC XID of incoming reply
1041 * Caller holds xprt->queue_lock.
1043 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
1045 struct rpc_rqst *entry;
1047 entry = xprt_request_rb_find(xprt, xid);
1048 if (entry != NULL) {
1049 trace_xprt_lookup_rqst(xprt, xid, 0);
1050 entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
1054 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
1056 trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
1057 xprt->stat.bad_xids++;
1060 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
1063 xprt_is_pinned_rqst(struct rpc_rqst *req)
1065 return atomic_read(&req->rq_pin) != 0;
1069 * xprt_pin_rqst - Pin a request on the transport receive list
1070 * @req: Request to pin
1072 * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
1073 * so should be holding xprt->queue_lock.
1075 void xprt_pin_rqst(struct rpc_rqst *req)
1077 atomic_inc(&req->rq_pin);
1079 EXPORT_SYMBOL_GPL(xprt_pin_rqst);
1082 * xprt_unpin_rqst - Unpin a request on the transport receive list
1083 * @req: Request to pin
1085 * Caller should be holding xprt->queue_lock.
1087 void xprt_unpin_rqst(struct rpc_rqst *req)
1089 if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
1090 atomic_dec(&req->rq_pin);
1093 if (atomic_dec_and_test(&req->rq_pin))
1094 wake_up_var(&req->rq_pin);
1096 EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
1098 static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
1100 wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
1104 xprt_request_data_received(struct rpc_task *task)
1106 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1107 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
1111 xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
1113 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
1114 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
1118 * xprt_request_enqueue_receive - Add an request to the receive queue
1123 xprt_request_enqueue_receive(struct rpc_task *task)
1125 struct rpc_rqst *req = task->tk_rqstp;
1126 struct rpc_xprt *xprt = req->rq_xprt;
1128 if (!xprt_request_need_enqueue_receive(task, req))
1131 xprt_request_prepare(task->tk_rqstp);
1132 spin_lock(&xprt->queue_lock);
1134 /* Update the softirq receive buffer */
1135 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
1136 sizeof(req->rq_private_buf));
1138 /* Add request to the receive list */
1139 xprt_request_rb_insert(xprt, req);
1140 set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
1141 spin_unlock(&xprt->queue_lock);
1143 /* Turn off autodisconnect */
1144 del_singleshot_timer_sync(&xprt->timer);
1148 * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1151 * Caller must hold xprt->queue_lock.
1154 xprt_request_dequeue_receive_locked(struct rpc_task *task)
1156 struct rpc_rqst *req = task->tk_rqstp;
1158 if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1159 xprt_request_rb_remove(req->rq_xprt, req);
1163 * xprt_update_rtt - Update RPC RTT statistics
1164 * @task: RPC request that recently completed
1166 * Caller holds xprt->queue_lock.
1168 void xprt_update_rtt(struct rpc_task *task)
1170 struct rpc_rqst *req = task->tk_rqstp;
1171 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
1172 unsigned int timer = task->tk_msg.rpc_proc->p_timer;
1173 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1176 if (req->rq_ntrans == 1)
1177 rpc_update_rtt(rtt, timer, m);
1178 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
1181 EXPORT_SYMBOL_GPL(xprt_update_rtt);
1184 * xprt_complete_rqst - called when reply processing is complete
1185 * @task: RPC request that recently completed
1186 * @copied: actual number of bytes received from the transport
1188 * Caller holds xprt->queue_lock.
1190 void xprt_complete_rqst(struct rpc_task *task, int copied)
1192 struct rpc_rqst *req = task->tk_rqstp;
1193 struct rpc_xprt *xprt = req->rq_xprt;
1197 req->rq_private_buf.len = copied;
1198 /* Ensure all writes are done before we update */
1199 /* req->rq_reply_bytes_recvd */
1201 req->rq_reply_bytes_recvd = copied;
1202 xprt_request_dequeue_receive_locked(task);
1203 rpc_wake_up_queued_task(&xprt->pending, task);
1205 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
1207 static void xprt_timer(struct rpc_task *task)
1209 struct rpc_rqst *req = task->tk_rqstp;
1210 struct rpc_xprt *xprt = req->rq_xprt;
1212 if (task->tk_status != -ETIMEDOUT)
1215 trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
1216 if (!req->rq_reply_bytes_recvd) {
1217 if (xprt->ops->timer)
1218 xprt->ops->timer(xprt, task);
1220 task->tk_status = 0;
1224 * xprt_wait_for_reply_request_def - wait for reply
1225 * @task: pointer to rpc_task
1227 * Set a request's retransmit timeout based on the transport's
1228 * default timeout parameters. Used by transports that don't adjust
1229 * the retransmit timeout based on round-trip time estimation,
1230 * and put the task to sleep on the pending queue.
1232 void xprt_wait_for_reply_request_def(struct rpc_task *task)
1234 struct rpc_rqst *req = task->tk_rqstp;
1236 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1237 xprt_request_timeout(req));
1239 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);
1242 * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1243 * @task: pointer to rpc_task
1245 * Set a request's retransmit timeout using the RTT estimator,
1246 * and put the task to sleep on the pending queue.
1248 void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
1250 int timer = task->tk_msg.rpc_proc->p_timer;
1251 struct rpc_clnt *clnt = task->tk_client;
1252 struct rpc_rtt *rtt = clnt->cl_rtt;
1253 struct rpc_rqst *req = task->tk_rqstp;
1254 unsigned long max_timeout = clnt->cl_timeout->to_maxval;
1255 unsigned long timeout;
1257 timeout = rpc_calc_rto(rtt, timer);
1258 timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
1259 if (timeout > max_timeout || timeout == 0)
1260 timeout = max_timeout;
1261 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1264 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);
1267 * xprt_request_wait_receive - wait for the reply to an RPC request
1268 * @task: RPC task about to send a request
1271 void xprt_request_wait_receive(struct rpc_task *task)
1273 struct rpc_rqst *req = task->tk_rqstp;
1274 struct rpc_xprt *xprt = req->rq_xprt;
1276 if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1279 * Sleep on the pending queue if we're expecting a reply.
1280 * The spinlock ensures atomicity between the test of
1281 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1283 spin_lock(&xprt->queue_lock);
1284 if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1285 xprt->ops->wait_for_reply_request(task);
1287 * Send an extra queue wakeup call if the
1288 * connection was dropped in case the call to
1289 * rpc_sleep_on() raced.
1291 if (xprt_request_retransmit_after_disconnect(task))
1292 rpc_wake_up_queued_task_set_status(&xprt->pending,
1295 spin_unlock(&xprt->queue_lock);
1299 xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
1301 return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1305 * xprt_request_enqueue_transmit - queue a task for transmission
1306 * @task: pointer to rpc_task
1308 * Add a task to the transmission queue.
1311 xprt_request_enqueue_transmit(struct rpc_task *task)
1313 struct rpc_rqst *pos, *req = task->tk_rqstp;
1314 struct rpc_xprt *xprt = req->rq_xprt;
1316 if (xprt_request_need_enqueue_transmit(task, req)) {
1317 req->rq_bytes_sent = 0;
1318 spin_lock(&xprt->queue_lock);
1320 * Requests that carry congestion control credits are added
1321 * to the head of the list to avoid starvation issues.
1324 xprt_clear_congestion_window_wait(xprt);
1325 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1328 /* Note: req is added _before_ pos */
1329 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1330 INIT_LIST_HEAD(&req->rq_xmit2);
1333 } else if (RPC_IS_SWAPPER(task)) {
1334 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1335 if (pos->rq_cong || pos->rq_bytes_sent)
1337 if (RPC_IS_SWAPPER(pos->rq_task))
1339 /* Note: req is added _before_ pos */
1340 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1341 INIT_LIST_HEAD(&req->rq_xmit2);
1344 } else if (!req->rq_seqno) {
1345 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1346 if (pos->rq_task->tk_owner != task->tk_owner)
1348 list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
1349 INIT_LIST_HEAD(&req->rq_xmit);
1353 list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
1354 INIT_LIST_HEAD(&req->rq_xmit2);
1356 atomic_long_inc(&xprt->xmit_queuelen);
1357 set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1358 spin_unlock(&xprt->queue_lock);
1363 * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1364 * @task: pointer to rpc_task
1366 * Remove a task from the transmission queue
1367 * Caller must hold xprt->queue_lock
1370 xprt_request_dequeue_transmit_locked(struct rpc_task *task)
1372 struct rpc_rqst *req = task->tk_rqstp;
1374 if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1376 if (!list_empty(&req->rq_xmit)) {
1377 list_del(&req->rq_xmit);
1378 if (!list_empty(&req->rq_xmit2)) {
1379 struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
1380 struct rpc_rqst, rq_xmit2);
1381 list_del(&req->rq_xmit2);
1382 list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
1385 list_del(&req->rq_xmit2);
1386 atomic_long_dec(&req->rq_xprt->xmit_queuelen);
1390 * xprt_request_dequeue_transmit - remove a task from the transmission queue
1391 * @task: pointer to rpc_task
1393 * Remove a task from the transmission queue
1396 xprt_request_dequeue_transmit(struct rpc_task *task)
1398 struct rpc_rqst *req = task->tk_rqstp;
1399 struct rpc_xprt *xprt = req->rq_xprt;
1401 spin_lock(&xprt->queue_lock);
1402 xprt_request_dequeue_transmit_locked(task);
1403 spin_unlock(&xprt->queue_lock);
1407 * xprt_request_dequeue_xprt - remove a task from the transmit+receive queue
1408 * @task: pointer to rpc_task
1410 * Remove a task from the transmit and receive queues, and ensure that
1411 * it is not pinned by the receive work item.
1414 xprt_request_dequeue_xprt(struct rpc_task *task)
1416 struct rpc_rqst *req = task->tk_rqstp;
1417 struct rpc_xprt *xprt = req->rq_xprt;
1419 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
1420 test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
1421 xprt_is_pinned_rqst(req)) {
1422 spin_lock(&xprt->queue_lock);
1423 xprt_request_dequeue_transmit_locked(task);
1424 xprt_request_dequeue_receive_locked(task);
1425 while (xprt_is_pinned_rqst(req)) {
1426 set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1427 spin_unlock(&xprt->queue_lock);
1428 xprt_wait_on_pinned_rqst(req);
1429 spin_lock(&xprt->queue_lock);
1430 clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1432 spin_unlock(&xprt->queue_lock);
1437 * xprt_request_prepare - prepare an encoded request for transport
1438 * @req: pointer to rpc_rqst
1440 * Calls into the transport layer to do whatever is needed to prepare
1441 * the request for transmission or receive.
1444 xprt_request_prepare(struct rpc_rqst *req)
1446 struct rpc_xprt *xprt = req->rq_xprt;
1448 if (xprt->ops->prepare_request)
1449 xprt->ops->prepare_request(req);
1453 * xprt_request_need_retransmit - Test if a task needs retransmission
1454 * @task: pointer to rpc_task
1456 * Test for whether a connection breakage requires the task to retransmit
1459 xprt_request_need_retransmit(struct rpc_task *task)
1461 return xprt_request_retransmit_after_disconnect(task);
1465 * xprt_prepare_transmit - reserve the transport before sending a request
1466 * @task: RPC task about to send a request
1469 bool xprt_prepare_transmit(struct rpc_task *task)
1471 struct rpc_rqst *req = task->tk_rqstp;
1472 struct rpc_xprt *xprt = req->rq_xprt;
1474 if (!xprt_lock_write(xprt, task)) {
1475 /* Race breaker: someone may have transmitted us */
1476 if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1477 rpc_wake_up_queued_task_set_status(&xprt->sending,
1485 void xprt_end_transmit(struct rpc_task *task)
1487 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
1489 xprt_inject_disconnect(xprt);
1490 xprt_release_write(xprt, task);
1494 * xprt_request_transmit - send an RPC request on a transport
1495 * @req: pointer to request to transmit
1496 * @snd_task: RPC task that owns the transport lock
1498 * This performs the transmission of a single request.
1499 * Note that if the request is not the same as snd_task, then it
1500 * does need to be pinned.
1501 * Returns '0' on success.
1504 xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1506 struct rpc_xprt *xprt = req->rq_xprt;
1507 struct rpc_task *task = req->rq_task;
1508 unsigned int connect_cookie;
1509 int is_retrans = RPC_WAS_SENT(task);
1512 if (!req->rq_bytes_sent) {
1513 if (xprt_request_data_received(task)) {
1517 /* Verify that our message lies in the RPCSEC_GSS window */
1518 if (rpcauth_xmit_need_reencode(task)) {
1522 if (RPC_SIGNALLED(task)) {
1523 status = -ERESTARTSYS;
1529 * Update req->rq_ntrans before transmitting to avoid races with
1530 * xprt_update_rtt(), which needs to know that it is recording a
1531 * reply to the first transmission.
1535 trace_rpc_xdr_sendto(task, &req->rq_snd_buf);
1536 connect_cookie = xprt->connect_cookie;
1537 status = xprt->ops->send_request(req);
1540 trace_xprt_transmit(req, status);
1545 task->tk_client->cl_stats->rpcretrans++;
1546 trace_xprt_retransmit(req);
1549 xprt_inject_disconnect(xprt);
1551 task->tk_flags |= RPC_TASK_SENT;
1552 spin_lock(&xprt->transport_lock);
1555 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1556 xprt->stat.bklog_u += xprt->backlog.qlen;
1557 xprt->stat.sending_u += xprt->sending.qlen;
1558 xprt->stat.pending_u += xprt->pending.qlen;
1559 spin_unlock(&xprt->transport_lock);
1561 req->rq_connect_cookie = connect_cookie;
1563 trace_xprt_transmit(req, status);
1564 xprt_request_dequeue_transmit(task);
1565 rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1570 * xprt_transmit - send an RPC request on a transport
1571 * @task: controlling RPC task
1573 * Attempts to drain the transmit queue. On exit, either the transport
1574 * signalled an error that needs to be handled before transmission can
1575 * resume, or @task finished transmitting, and detected that it already
1579 xprt_transmit(struct rpc_task *task)
1581 struct rpc_rqst *next, *req = task->tk_rqstp;
1582 struct rpc_xprt *xprt = req->rq_xprt;
1583 int counter, status;
1585 spin_lock(&xprt->queue_lock);
1587 while (!list_empty(&xprt->xmit_queue)) {
1588 if (++counter == 20)
1590 next = list_first_entry(&xprt->xmit_queue,
1591 struct rpc_rqst, rq_xmit);
1592 xprt_pin_rqst(next);
1593 spin_unlock(&xprt->queue_lock);
1594 status = xprt_request_transmit(next, task);
1595 if (status == -EBADMSG && next != req)
1597 spin_lock(&xprt->queue_lock);
1598 xprt_unpin_rqst(next);
1600 if (!xprt_request_data_received(task) ||
1601 test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1603 } else if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1604 task->tk_status = status;
1607 spin_unlock(&xprt->queue_lock);
1610 static void xprt_complete_request_init(struct rpc_task *task)
1613 xprt_request_init(task);
1616 void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1618 set_bit(XPRT_CONGESTED, &xprt->state);
1619 rpc_sleep_on(&xprt->backlog, task, xprt_complete_request_init);
1621 EXPORT_SYMBOL_GPL(xprt_add_backlog);
1623 static bool __xprt_set_rq(struct rpc_task *task, void *data)
1625 struct rpc_rqst *req = data;
1627 if (task->tk_rqstp == NULL) {
1628 memset(req, 0, sizeof(*req)); /* mark unused */
1629 task->tk_rqstp = req;
1635 bool xprt_wake_up_backlog(struct rpc_xprt *xprt, struct rpc_rqst *req)
1637 if (rpc_wake_up_first(&xprt->backlog, __xprt_set_rq, req) == NULL) {
1638 clear_bit(XPRT_CONGESTED, &xprt->state);
1643 EXPORT_SYMBOL_GPL(xprt_wake_up_backlog);
1645 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1649 if (!test_bit(XPRT_CONGESTED, &xprt->state))
1651 spin_lock(&xprt->reserve_lock);
1652 if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1653 xprt_add_backlog(xprt, task);
1656 spin_unlock(&xprt->reserve_lock);
1661 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1663 struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1665 if (xprt->num_reqs >= xprt->max_reqs)
1668 spin_unlock(&xprt->reserve_lock);
1669 req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
1670 spin_lock(&xprt->reserve_lock);
1674 req = ERR_PTR(-ENOMEM);
1679 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1681 if (xprt->num_reqs > xprt->min_reqs) {
1689 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1691 struct rpc_rqst *req;
1693 spin_lock(&xprt->reserve_lock);
1694 if (!list_empty(&xprt->free)) {
1695 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1696 list_del(&req->rq_list);
1699 req = xprt_dynamic_alloc_slot(xprt);
1702 switch (PTR_ERR(req)) {
1704 dprintk("RPC: dynamic allocation of request slot "
1705 "failed! Retrying\n");
1706 task->tk_status = -ENOMEM;
1709 xprt_add_backlog(xprt, task);
1710 dprintk("RPC: waiting for request slot\n");
1713 task->tk_status = -EAGAIN;
1715 spin_unlock(&xprt->reserve_lock);
1718 xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1720 spin_unlock(&xprt->reserve_lock);
1722 task->tk_status = 0;
1723 task->tk_rqstp = req;
1725 EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1727 void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1729 spin_lock(&xprt->reserve_lock);
1730 if (!xprt_wake_up_backlog(xprt, req) &&
1731 !xprt_dynamic_free_slot(xprt, req)) {
1732 memset(req, 0, sizeof(*req)); /* mark unused */
1733 list_add(&req->rq_list, &xprt->free);
1735 spin_unlock(&xprt->reserve_lock);
1737 EXPORT_SYMBOL_GPL(xprt_free_slot);
1739 static void xprt_free_all_slots(struct rpc_xprt *xprt)
1741 struct rpc_rqst *req;
1742 while (!list_empty(&xprt->free)) {
1743 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1744 list_del(&req->rq_list);
1749 struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1750 unsigned int num_prealloc,
1751 unsigned int max_alloc)
1753 struct rpc_xprt *xprt;
1754 struct rpc_rqst *req;
1757 xprt = kzalloc(size, GFP_KERNEL);
1761 xprt_init(xprt, net);
1763 for (i = 0; i < num_prealloc; i++) {
1764 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1767 list_add(&req->rq_list, &xprt->free);
1769 if (max_alloc > num_prealloc)
1770 xprt->max_reqs = max_alloc;
1772 xprt->max_reqs = num_prealloc;
1773 xprt->min_reqs = num_prealloc;
1774 xprt->num_reqs = num_prealloc;
1783 EXPORT_SYMBOL_GPL(xprt_alloc);
1785 void xprt_free(struct rpc_xprt *xprt)
1787 put_net(xprt->xprt_net);
1788 xprt_free_all_slots(xprt);
1789 kfree_rcu(xprt, rcu);
1791 EXPORT_SYMBOL_GPL(xprt_free);
1794 xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1796 req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1800 xprt_alloc_xid(struct rpc_xprt *xprt)
1804 spin_lock(&xprt->reserve_lock);
1805 xid = (__force __be32)xprt->xid++;
1806 spin_unlock(&xprt->reserve_lock);
1811 xprt_init_xid(struct rpc_xprt *xprt)
1813 xprt->xid = prandom_u32();
1817 xprt_request_init(struct rpc_task *task)
1819 struct rpc_xprt *xprt = task->tk_xprt;
1820 struct rpc_rqst *req = task->tk_rqstp;
1822 req->rq_task = task;
1823 req->rq_xprt = xprt;
1824 req->rq_buffer = NULL;
1825 req->rq_xid = xprt_alloc_xid(xprt);
1826 xprt_init_connect_cookie(req, xprt);
1827 req->rq_snd_buf.len = 0;
1828 req->rq_snd_buf.buflen = 0;
1829 req->rq_rcv_buf.len = 0;
1830 req->rq_rcv_buf.buflen = 0;
1831 req->rq_snd_buf.bvec = NULL;
1832 req->rq_rcv_buf.bvec = NULL;
1833 req->rq_release_snd_buf = NULL;
1834 xprt_init_majortimeo(task, req);
1836 trace_xprt_reserve(req);
1840 xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1842 xprt->ops->alloc_slot(xprt, task);
1843 if (task->tk_rqstp != NULL)
1844 xprt_request_init(task);
1848 * xprt_reserve - allocate an RPC request slot
1849 * @task: RPC task requesting a slot allocation
1851 * If the transport is marked as being congested, or if no more
1852 * slots are available, place the task on the transport's
1855 void xprt_reserve(struct rpc_task *task)
1857 struct rpc_xprt *xprt = task->tk_xprt;
1859 task->tk_status = 0;
1860 if (task->tk_rqstp != NULL)
1863 task->tk_status = -EAGAIN;
1864 if (!xprt_throttle_congested(xprt, task))
1865 xprt_do_reserve(xprt, task);
1869 * xprt_retry_reserve - allocate an RPC request slot
1870 * @task: RPC task requesting a slot allocation
1872 * If no more slots are available, place the task on the transport's
1874 * Note that the only difference with xprt_reserve is that we now
1875 * ignore the value of the XPRT_CONGESTED flag.
1877 void xprt_retry_reserve(struct rpc_task *task)
1879 struct rpc_xprt *xprt = task->tk_xprt;
1881 task->tk_status = 0;
1882 if (task->tk_rqstp != NULL)
1885 task->tk_status = -EAGAIN;
1886 xprt_do_reserve(xprt, task);
1890 * xprt_release - release an RPC request slot
1891 * @task: task which is finished with the slot
1894 void xprt_release(struct rpc_task *task)
1896 struct rpc_xprt *xprt;
1897 struct rpc_rqst *req = task->tk_rqstp;
1900 if (task->tk_client) {
1901 xprt = task->tk_xprt;
1902 xprt_release_write(xprt, task);
1907 xprt = req->rq_xprt;
1908 xprt_request_dequeue_xprt(task);
1909 spin_lock(&xprt->transport_lock);
1910 xprt->ops->release_xprt(xprt, task);
1911 if (xprt->ops->release_request)
1912 xprt->ops->release_request(task);
1913 xprt_schedule_autodisconnect(xprt);
1914 spin_unlock(&xprt->transport_lock);
1916 xprt->ops->buf_free(task);
1917 xdr_free_bvec(&req->rq_rcv_buf);
1918 xdr_free_bvec(&req->rq_snd_buf);
1919 if (req->rq_cred != NULL)
1920 put_rpccred(req->rq_cred);
1921 if (req->rq_release_snd_buf)
1922 req->rq_release_snd_buf(req);
1924 task->tk_rqstp = NULL;
1925 if (likely(!bc_prealloc(req)))
1926 xprt->ops->free_slot(xprt, req);
1928 xprt_free_bc_request(req);
1931 #ifdef CONFIG_SUNRPC_BACKCHANNEL
1933 xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
1935 struct xdr_buf *xbufp = &req->rq_snd_buf;
1937 task->tk_rqstp = req;
1938 req->rq_task = task;
1939 xprt_init_connect_cookie(req, req->rq_xprt);
1941 * Set up the xdr_buf length.
1942 * This also indicates that the buffer is XDR encoded already.
1944 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1945 xbufp->tail[0].iov_len;
1949 static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1951 kref_init(&xprt->kref);
1953 spin_lock_init(&xprt->transport_lock);
1954 spin_lock_init(&xprt->reserve_lock);
1955 spin_lock_init(&xprt->queue_lock);
1957 INIT_LIST_HEAD(&xprt->free);
1958 xprt->recv_queue = RB_ROOT;
1959 INIT_LIST_HEAD(&xprt->xmit_queue);
1960 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1961 spin_lock_init(&xprt->bc_pa_lock);
1962 INIT_LIST_HEAD(&xprt->bc_pa_list);
1963 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1964 INIT_LIST_HEAD(&xprt->xprt_switch);
1966 xprt->last_used = jiffies;
1967 xprt->cwnd = RPC_INITCWND;
1968 xprt->bind_index = 0;
1970 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1971 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1972 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
1973 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1975 xprt_init_xid(xprt);
1977 xprt->xprt_net = get_net(net);
1981 * xprt_create_transport - create an RPC transport
1982 * @args: rpc transport creation arguments
1985 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1987 struct rpc_xprt *xprt;
1988 const struct xprt_class *t;
1990 t = xprt_class_find_by_ident(args->ident);
1992 dprintk("RPC: transport (%d) not supported\n", args->ident);
1993 return ERR_PTR(-EIO);
1996 xprt = t->setup(args);
1997 xprt_class_release(t);
2001 if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
2002 xprt->idle_timeout = 0;
2003 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
2004 if (xprt_has_timer(xprt))
2005 timer_setup(&xprt->timer, xprt_init_autodisconnect, 0);
2007 timer_setup(&xprt->timer, NULL, 0);
2009 if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
2011 return ERR_PTR(-EINVAL);
2013 xprt->servername = kstrdup(args->servername, GFP_KERNEL);
2014 if (xprt->servername == NULL) {
2016 return ERR_PTR(-ENOMEM);
2019 rpc_xprt_debugfs_register(xprt);
2021 trace_xprt_create(xprt);
2026 static void xprt_destroy_cb(struct work_struct *work)
2028 struct rpc_xprt *xprt =
2029 container_of(work, struct rpc_xprt, task_cleanup);
2031 trace_xprt_destroy(xprt);
2033 rpc_xprt_debugfs_unregister(xprt);
2034 rpc_destroy_wait_queue(&xprt->binding);
2035 rpc_destroy_wait_queue(&xprt->pending);
2036 rpc_destroy_wait_queue(&xprt->sending);
2037 rpc_destroy_wait_queue(&xprt->backlog);
2038 kfree(xprt->servername);
2040 * Destroy any existing back channel
2042 xprt_destroy_backchannel(xprt, UINT_MAX);
2045 * Tear down transport state and free the rpc_xprt
2047 xprt->ops->destroy(xprt);
2051 * xprt_destroy - destroy an RPC transport, killing off all requests.
2052 * @xprt: transport to destroy
2055 static void xprt_destroy(struct rpc_xprt *xprt)
2058 * Exclude transport connect/disconnect handlers and autoclose
2060 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
2062 del_timer_sync(&xprt->timer);
2065 * Destroy sockets etc from the system workqueue so they can
2066 * safely flush receive work running on rpciod.
2068 INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
2069 schedule_work(&xprt->task_cleanup);
2072 static void xprt_destroy_kref(struct kref *kref)
2074 xprt_destroy(container_of(kref, struct rpc_xprt, kref));
2078 * xprt_get - return a reference to an RPC transport.
2079 * @xprt: pointer to the transport
2082 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
2084 if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
2088 EXPORT_SYMBOL_GPL(xprt_get);
2091 * xprt_put - release a reference to an RPC transport.
2092 * @xprt: pointer to the transport
2095 void xprt_put(struct rpc_xprt *xprt)
2098 kref_put(&xprt->kref, xprt_destroy_kref);
2100 EXPORT_SYMBOL_GPL(xprt_put);