2 * linux/net/sunrpc/xprt.c
4 * This is a generic RPC call interface supporting congestion avoidance,
5 * and asynchronous calls.
7 * The interface works like this:
9 * - When a process places a call, it allocates a request slot if
10 * one is available. Otherwise, it sleeps on the backlog queue
12 * - Next, the caller puts together the RPC message, stuffs it into
13 * the request struct, and calls xprt_transmit().
14 * - xprt_transmit sends the message and installs the caller on the
15 * transport's wait list. At the same time, if a reply is expected,
16 * it installs a timer that is run after the packet's timeout has
18 * - When a packet arrives, the data_ready handler walks the list of
19 * pending requests for that transport. If a matching XID is found, the
20 * caller is woken up, and the timer removed.
21 * - When no reply arrives within the timeout interval, the timer is
22 * fired by the kernel and runs xprt_timer(). It either adjusts the
23 * timeout values (minor timeout) or wakes up the caller with a status
25 * - When the caller receives a notification from RPC that a reply arrived,
26 * it should release the RPC slot, and process the reply.
27 * If the call timed out, it may choose to retry the operation by
28 * adjusting the initial timeout value, and simply calling rpc_call
31 * Support for async RPC is done through a set of RPC-specific scheduling
32 * primitives that `transparently' work for processes as well as async
33 * tasks that rely on callbacks.
35 * Copyright (C) 1995-1997, Olaf Kirch <okir@monad.swb.de>
37 * Transport switch API copyright (C) 2005, Chuck Lever <cel@netapp.com>
40 #include <linux/module.h>
42 #include <linux/types.h>
43 #include <linux/interrupt.h>
44 #include <linux/workqueue.h>
45 #include <linux/net.h>
46 #include <linux/ktime.h>
48 #include <linux/sunrpc/clnt.h>
49 #include <linux/sunrpc/metrics.h>
50 #include <linux/sunrpc/bc_xprt.h>
51 #include <linux/rcupdate.h>
52 #include <linux/sched/mm.h>
54 #include <trace/events/sunrpc.h>
62 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
63 # define RPCDBG_FACILITY RPCDBG_XPRT
69 static void xprt_init(struct rpc_xprt *xprt, struct net *net);
70 static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
71 static void xprt_destroy(struct rpc_xprt *xprt);
73 static DEFINE_SPINLOCK(xprt_list_lock);
74 static LIST_HEAD(xprt_list);
77 * xprt_register_transport - register a transport implementation
78 * @transport: transport to register
80 * If a transport implementation is loaded as a kernel module, it can
81 * call this interface to make itself known to the RPC client.
84 * 0: transport successfully registered
85 * -EEXIST: transport already registered
86 * -EINVAL: transport module being unloaded
88 int xprt_register_transport(struct xprt_class *transport)
94 spin_lock(&xprt_list_lock);
95 list_for_each_entry(t, &xprt_list, list) {
96 /* don't register the same transport class twice */
97 if (t->ident == transport->ident)
101 list_add_tail(&transport->list, &xprt_list);
102 printk(KERN_INFO "RPC: Registered %s transport module.\n",
107 spin_unlock(&xprt_list_lock);
110 EXPORT_SYMBOL_GPL(xprt_register_transport);
113 * xprt_unregister_transport - unregister a transport implementation
114 * @transport: transport to unregister
117 * 0: transport successfully unregistered
118 * -ENOENT: transport never registered
120 int xprt_unregister_transport(struct xprt_class *transport)
122 struct xprt_class *t;
126 spin_lock(&xprt_list_lock);
127 list_for_each_entry(t, &xprt_list, list) {
128 if (t == transport) {
130 "RPC: Unregistered %s transport module.\n",
132 list_del_init(&transport->list);
139 spin_unlock(&xprt_list_lock);
142 EXPORT_SYMBOL_GPL(xprt_unregister_transport);
145 * xprt_load_transport - load a transport implementation
146 * @transport_name: transport to load
149 * 0: transport successfully loaded
150 * -ENOENT: transport module not available
152 int xprt_load_transport(const char *transport_name)
154 struct xprt_class *t;
158 spin_lock(&xprt_list_lock);
159 list_for_each_entry(t, &xprt_list, list) {
160 if (strcmp(t->name, transport_name) == 0) {
161 spin_unlock(&xprt_list_lock);
165 spin_unlock(&xprt_list_lock);
166 result = request_module("xprt%s", transport_name);
170 EXPORT_SYMBOL_GPL(xprt_load_transport);
172 static void xprt_clear_locked(struct rpc_xprt *xprt)
174 xprt->snd_task = NULL;
175 if (!test_bit(XPRT_CLOSE_WAIT, &xprt->state)) {
176 smp_mb__before_atomic();
177 clear_bit(XPRT_LOCKED, &xprt->state);
178 smp_mb__after_atomic();
180 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
184 * xprt_reserve_xprt - serialize write access to transports
185 * @task: task that is requesting access to the transport
186 * @xprt: pointer to the target transport
188 * This prevents mixing the payload of separate requests, and prevents
189 * transport connects from colliding with writes. No congestion control
192 int xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
194 struct rpc_rqst *req = task->tk_rqstp;
196 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
197 if (task == xprt->snd_task)
201 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
203 xprt->snd_task = task;
208 xprt_clear_locked(xprt);
210 dprintk("RPC: %5u failed to lock transport %p\n",
212 task->tk_status = -EAGAIN;
213 if (RPC_IS_SOFT(task))
214 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
215 jiffies + req->rq_timeout);
217 rpc_sleep_on(&xprt->sending, task, NULL);
220 EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
223 xprt_need_congestion_window_wait(struct rpc_xprt *xprt)
225 return test_bit(XPRT_CWND_WAIT, &xprt->state);
229 xprt_set_congestion_window_wait(struct rpc_xprt *xprt)
231 if (!list_empty(&xprt->xmit_queue)) {
232 /* Peek at head of queue to see if it can make progress */
233 if (list_first_entry(&xprt->xmit_queue, struct rpc_rqst,
237 set_bit(XPRT_CWND_WAIT, &xprt->state);
241 xprt_test_and_clear_congestion_window_wait(struct rpc_xprt *xprt)
243 if (!RPCXPRT_CONGESTED(xprt))
244 clear_bit(XPRT_CWND_WAIT, &xprt->state);
248 * xprt_reserve_xprt_cong - serialize write access to transports
249 * @task: task that is requesting access to the transport
251 * Same as xprt_reserve_xprt, but Van Jacobson congestion control is
252 * integrated into the decision of whether a request is allowed to be
253 * woken up and given access to the transport.
254 * Note that the lock is only granted if we know there are free slots.
256 int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
258 struct rpc_rqst *req = task->tk_rqstp;
260 if (test_and_set_bit(XPRT_LOCKED, &xprt->state)) {
261 if (task == xprt->snd_task)
266 xprt->snd_task = task;
269 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
271 if (!xprt_need_congestion_window_wait(xprt)) {
272 xprt->snd_task = task;
276 xprt_clear_locked(xprt);
278 dprintk("RPC: %5u failed to lock transport %p\n", task->tk_pid, xprt);
279 task->tk_status = -EAGAIN;
280 if (RPC_IS_SOFT(task))
281 rpc_sleep_on_timeout(&xprt->sending, task, NULL,
282 jiffies + req->rq_timeout);
284 rpc_sleep_on(&xprt->sending, task, NULL);
287 EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
289 static inline int xprt_lock_write(struct rpc_xprt *xprt, struct rpc_task *task)
293 if (test_bit(XPRT_LOCKED, &xprt->state) && xprt->snd_task == task)
295 spin_lock_bh(&xprt->transport_lock);
296 retval = xprt->ops->reserve_xprt(xprt, task);
297 spin_unlock_bh(&xprt->transport_lock);
301 static bool __xprt_lock_write_func(struct rpc_task *task, void *data)
303 struct rpc_xprt *xprt = data;
305 xprt->snd_task = task;
309 static void __xprt_lock_write_next(struct rpc_xprt *xprt)
311 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
313 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
315 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
316 __xprt_lock_write_func, xprt))
319 xprt_clear_locked(xprt);
322 static void __xprt_lock_write_next_cong(struct rpc_xprt *xprt)
324 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
326 if (test_bit(XPRT_WRITE_SPACE, &xprt->state))
328 if (xprt_need_congestion_window_wait(xprt))
330 if (rpc_wake_up_first_on_wq(xprtiod_workqueue, &xprt->sending,
331 __xprt_lock_write_func, xprt))
334 xprt_clear_locked(xprt);
338 * xprt_release_xprt - allow other requests to use a transport
339 * @xprt: transport with other tasks potentially waiting
340 * @task: task that is releasing access to the transport
342 * Note that "task" can be NULL. No congestion control is provided.
344 void xprt_release_xprt(struct rpc_xprt *xprt, struct rpc_task *task)
346 if (xprt->snd_task == task) {
347 xprt_clear_locked(xprt);
348 __xprt_lock_write_next(xprt);
351 EXPORT_SYMBOL_GPL(xprt_release_xprt);
354 * xprt_release_xprt_cong - allow other requests to use a transport
355 * @xprt: transport with other tasks potentially waiting
356 * @task: task that is releasing access to the transport
358 * Note that "task" can be NULL. Another task is awoken to use the
359 * transport if the transport's congestion window allows it.
361 void xprt_release_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task)
363 if (xprt->snd_task == task) {
364 xprt_clear_locked(xprt);
365 __xprt_lock_write_next_cong(xprt);
368 EXPORT_SYMBOL_GPL(xprt_release_xprt_cong);
370 static inline void xprt_release_write(struct rpc_xprt *xprt, struct rpc_task *task)
372 if (xprt->snd_task != task)
374 spin_lock_bh(&xprt->transport_lock);
375 xprt->ops->release_xprt(xprt, task);
376 spin_unlock_bh(&xprt->transport_lock);
380 * Van Jacobson congestion avoidance. Check if the congestion window
381 * overflowed. Put the task to sleep if this is the case.
384 __xprt_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
388 dprintk("RPC: %5u xprt_cwnd_limited cong = %lu cwnd = %lu\n",
389 req->rq_task->tk_pid, xprt->cong, xprt->cwnd);
390 if (RPCXPRT_CONGESTED(xprt)) {
391 xprt_set_congestion_window_wait(xprt);
395 xprt->cong += RPC_CWNDSCALE;
400 * Adjust the congestion window, and wake up the next task
401 * that has been sleeping due to congestion
404 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
409 xprt->cong -= RPC_CWNDSCALE;
410 xprt_test_and_clear_congestion_window_wait(xprt);
411 __xprt_lock_write_next_cong(xprt);
415 * xprt_request_get_cong - Request congestion control credits
416 * @xprt: pointer to transport
417 * @req: pointer to RPC request
419 * Useful for transports that require congestion control.
422 xprt_request_get_cong(struct rpc_xprt *xprt, struct rpc_rqst *req)
428 spin_lock_bh(&xprt->transport_lock);
429 ret = __xprt_get_cong(xprt, req) != 0;
430 spin_unlock_bh(&xprt->transport_lock);
433 EXPORT_SYMBOL_GPL(xprt_request_get_cong);
436 * xprt_release_rqst_cong - housekeeping when request is complete
437 * @task: RPC request that recently completed
439 * Useful for transports that require congestion control.
441 void xprt_release_rqst_cong(struct rpc_task *task)
443 struct rpc_rqst *req = task->tk_rqstp;
445 __xprt_put_cong(req->rq_xprt, req);
447 EXPORT_SYMBOL_GPL(xprt_release_rqst_cong);
450 * Clear the congestion window wait flag and wake up the next
451 * entry on xprt->sending
454 xprt_clear_congestion_window_wait(struct rpc_xprt *xprt)
456 if (test_and_clear_bit(XPRT_CWND_WAIT, &xprt->state)) {
457 spin_lock_bh(&xprt->transport_lock);
458 __xprt_lock_write_next_cong(xprt);
459 spin_unlock_bh(&xprt->transport_lock);
464 * xprt_adjust_cwnd - adjust transport congestion window
465 * @xprt: pointer to xprt
466 * @task: recently completed RPC request used to adjust window
467 * @result: result code of completed RPC request
469 * The transport code maintains an estimate on the maximum number of out-
470 * standing RPC requests, using a smoothed version of the congestion
471 * avoidance implemented in 44BSD. This is basically the Van Jacobson
472 * congestion algorithm: If a retransmit occurs, the congestion window is
473 * halved; otherwise, it is incremented by 1/cwnd when
475 * - a reply is received and
476 * - a full number of requests are outstanding and
477 * - the congestion window hasn't been updated recently.
479 void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result)
481 struct rpc_rqst *req = task->tk_rqstp;
482 unsigned long cwnd = xprt->cwnd;
484 if (result >= 0 && cwnd <= xprt->cong) {
485 /* The (cwnd >> 1) term makes sure
486 * the result gets rounded properly. */
487 cwnd += (RPC_CWNDSCALE * RPC_CWNDSCALE + (cwnd >> 1)) / cwnd;
488 if (cwnd > RPC_MAXCWND(xprt))
489 cwnd = RPC_MAXCWND(xprt);
490 __xprt_lock_write_next_cong(xprt);
491 } else if (result == -ETIMEDOUT) {
493 if (cwnd < RPC_CWNDSCALE)
494 cwnd = RPC_CWNDSCALE;
496 dprintk("RPC: cong %ld, cwnd was %ld, now %ld\n",
497 xprt->cong, xprt->cwnd, cwnd);
499 __xprt_put_cong(xprt, req);
501 EXPORT_SYMBOL_GPL(xprt_adjust_cwnd);
504 * xprt_wake_pending_tasks - wake all tasks on a transport's pending queue
505 * @xprt: transport with waiting tasks
506 * @status: result code to plant in each task before waking it
509 void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status)
512 rpc_wake_up_status(&xprt->pending, status);
514 rpc_wake_up(&xprt->pending);
516 EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
519 * xprt_wait_for_buffer_space - wait for transport output buffer to clear
522 * Note that we only set the timer for the case of RPC_IS_SOFT(), since
523 * we don't in general want to force a socket disconnection due to
524 * an incomplete RPC call transmission.
526 void xprt_wait_for_buffer_space(struct rpc_xprt *xprt)
528 set_bit(XPRT_WRITE_SPACE, &xprt->state);
530 EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
533 xprt_clear_write_space_locked(struct rpc_xprt *xprt)
535 if (test_and_clear_bit(XPRT_WRITE_SPACE, &xprt->state)) {
536 __xprt_lock_write_next(xprt);
537 dprintk("RPC: write space: waking waiting task on "
545 * xprt_write_space - wake the task waiting for transport output buffer space
546 * @xprt: transport with waiting tasks
548 * Can be called in a soft IRQ context, so xprt_write_space never sleeps.
550 bool xprt_write_space(struct rpc_xprt *xprt)
554 if (!test_bit(XPRT_WRITE_SPACE, &xprt->state))
556 spin_lock_bh(&xprt->transport_lock);
557 ret = xprt_clear_write_space_locked(xprt);
558 spin_unlock_bh(&xprt->transport_lock);
561 EXPORT_SYMBOL_GPL(xprt_write_space);
563 static void xprt_reset_majortimeo(struct rpc_rqst *req)
565 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
567 req->rq_majortimeo = req->rq_timeout;
568 if (to->to_exponential)
569 req->rq_majortimeo <<= to->to_retries;
571 req->rq_majortimeo += to->to_increment * to->to_retries;
572 if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0)
573 req->rq_majortimeo = to->to_maxval;
574 req->rq_majortimeo += jiffies;
578 * xprt_adjust_timeout - adjust timeout values for next retransmit
579 * @req: RPC request containing parameters to use for the adjustment
582 int xprt_adjust_timeout(struct rpc_rqst *req)
584 struct rpc_xprt *xprt = req->rq_xprt;
585 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout;
588 if (time_before(jiffies, req->rq_majortimeo)) {
589 if (to->to_exponential)
590 req->rq_timeout <<= 1;
592 req->rq_timeout += to->to_increment;
593 if (to->to_maxval && req->rq_timeout >= to->to_maxval)
594 req->rq_timeout = to->to_maxval;
597 req->rq_timeout = to->to_initval;
599 xprt_reset_majortimeo(req);
600 /* Reset the RTT counters == "slow start" */
601 spin_lock_bh(&xprt->transport_lock);
602 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval);
603 spin_unlock_bh(&xprt->transport_lock);
607 if (req->rq_timeout == 0) {
608 printk(KERN_WARNING "xprt_adjust_timeout: rq_timeout = 0!\n");
609 req->rq_timeout = 5 * HZ;
614 static void xprt_autoclose(struct work_struct *work)
616 struct rpc_xprt *xprt =
617 container_of(work, struct rpc_xprt, task_cleanup);
618 unsigned int pflags = memalloc_nofs_save();
620 clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
621 xprt->ops->close(xprt);
622 xprt_release_write(xprt, NULL);
623 wake_up_bit(&xprt->state, XPRT_LOCKED);
624 memalloc_nofs_restore(pflags);
628 * xprt_disconnect_done - mark a transport as disconnected
629 * @xprt: transport to flag for disconnect
632 void xprt_disconnect_done(struct rpc_xprt *xprt)
634 dprintk("RPC: disconnected transport %p\n", xprt);
635 spin_lock_bh(&xprt->transport_lock);
636 xprt_clear_connected(xprt);
637 xprt_clear_write_space_locked(xprt);
638 xprt_wake_pending_tasks(xprt, -ENOTCONN);
639 spin_unlock_bh(&xprt->transport_lock);
641 EXPORT_SYMBOL_GPL(xprt_disconnect_done);
644 * xprt_force_disconnect - force a transport to disconnect
645 * @xprt: transport to disconnect
648 void xprt_force_disconnect(struct rpc_xprt *xprt)
650 /* Don't race with the test_bit() in xprt_clear_locked() */
651 spin_lock_bh(&xprt->transport_lock);
652 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
653 /* Try to schedule an autoclose RPC call */
654 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
655 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
656 else if (xprt->snd_task)
657 rpc_wake_up_queued_task_set_status(&xprt->pending,
658 xprt->snd_task, -ENOTCONN);
659 spin_unlock_bh(&xprt->transport_lock);
661 EXPORT_SYMBOL_GPL(xprt_force_disconnect);
664 xprt_connect_cookie(struct rpc_xprt *xprt)
666 return READ_ONCE(xprt->connect_cookie);
670 xprt_request_retransmit_after_disconnect(struct rpc_task *task)
672 struct rpc_rqst *req = task->tk_rqstp;
673 struct rpc_xprt *xprt = req->rq_xprt;
675 return req->rq_connect_cookie != xprt_connect_cookie(xprt) ||
676 !xprt_connected(xprt);
680 * xprt_conditional_disconnect - force a transport to disconnect
681 * @xprt: transport to disconnect
682 * @cookie: 'connection cookie'
684 * This attempts to break the connection if and only if 'cookie' matches
685 * the current transport 'connection cookie'. It ensures that we don't
686 * try to break the connection more than once when we need to retransmit
687 * a batch of RPC requests.
690 void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
692 /* Don't race with the test_bit() in xprt_clear_locked() */
693 spin_lock_bh(&xprt->transport_lock);
694 if (cookie != xprt->connect_cookie)
696 if (test_bit(XPRT_CLOSING, &xprt->state))
698 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
699 /* Try to schedule an autoclose RPC call */
700 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
701 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
702 xprt_wake_pending_tasks(xprt, -EAGAIN);
704 spin_unlock_bh(&xprt->transport_lock);
708 xprt_has_timer(const struct rpc_xprt *xprt)
710 return xprt->idle_timeout != 0;
714 xprt_schedule_autodisconnect(struct rpc_xprt *xprt)
715 __must_hold(&xprt->transport_lock)
717 if (RB_EMPTY_ROOT(&xprt->recv_queue) && xprt_has_timer(xprt))
718 mod_timer(&xprt->timer, xprt->last_used + xprt->idle_timeout);
722 xprt_init_autodisconnect(struct timer_list *t)
724 struct rpc_xprt *xprt = from_timer(xprt, t, timer);
726 spin_lock(&xprt->transport_lock);
727 if (!RB_EMPTY_ROOT(&xprt->recv_queue))
729 /* Reset xprt->last_used to avoid connect/autodisconnect cycling */
730 xprt->last_used = jiffies;
731 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
733 spin_unlock(&xprt->transport_lock);
734 queue_work(xprtiod_workqueue, &xprt->task_cleanup);
737 spin_unlock(&xprt->transport_lock);
740 bool xprt_lock_connect(struct rpc_xprt *xprt,
741 struct rpc_task *task,
746 spin_lock_bh(&xprt->transport_lock);
747 if (!test_bit(XPRT_LOCKED, &xprt->state))
749 if (xprt->snd_task != task)
751 xprt->snd_task = cookie;
754 spin_unlock_bh(&xprt->transport_lock);
758 void xprt_unlock_connect(struct rpc_xprt *xprt, void *cookie)
760 spin_lock_bh(&xprt->transport_lock);
761 if (xprt->snd_task != cookie)
763 if (!test_bit(XPRT_LOCKED, &xprt->state))
765 xprt->snd_task =NULL;
766 xprt->ops->release_xprt(xprt, NULL);
767 xprt_schedule_autodisconnect(xprt);
769 spin_unlock_bh(&xprt->transport_lock);
770 wake_up_bit(&xprt->state, XPRT_LOCKED);
774 * xprt_connect - schedule a transport connect operation
775 * @task: RPC task that is requesting the connect
778 void xprt_connect(struct rpc_task *task)
780 struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt;
782 dprintk("RPC: %5u xprt_connect xprt %p %s connected\n", task->tk_pid,
783 xprt, (xprt_connected(xprt) ? "is" : "is not"));
785 if (!xprt_bound(xprt)) {
786 task->tk_status = -EAGAIN;
789 if (!xprt_lock_write(xprt, task))
792 if (test_and_clear_bit(XPRT_CLOSE_WAIT, &xprt->state))
793 xprt->ops->close(xprt);
795 if (!xprt_connected(xprt)) {
796 task->tk_rqstp->rq_connect_cookie = xprt->connect_cookie;
797 rpc_sleep_on_timeout(&xprt->pending, task, NULL,
798 jiffies + task->tk_rqstp->rq_timeout);
800 if (test_bit(XPRT_CLOSING, &xprt->state))
802 if (xprt_test_and_set_connecting(xprt))
805 if (!xprt_connected(xprt)) {
806 xprt->stat.connect_start = jiffies;
807 xprt->ops->connect(xprt, task);
809 xprt_clear_connecting(xprt);
811 rpc_wake_up_queued_task(&xprt->pending, task);
814 xprt_release_write(xprt, task);
817 enum xprt_xid_rb_cmp {
822 static enum xprt_xid_rb_cmp
823 xprt_xid_cmp(__be32 xid1, __be32 xid2)
827 if ((__force u32)xid1 < (__force u32)xid2)
832 static struct rpc_rqst *
833 xprt_request_rb_find(struct rpc_xprt *xprt, __be32 xid)
835 struct rb_node *n = xprt->recv_queue.rb_node;
836 struct rpc_rqst *req;
839 req = rb_entry(n, struct rpc_rqst, rq_recv);
840 switch (xprt_xid_cmp(xid, req->rq_xid)) {
855 xprt_request_rb_insert(struct rpc_xprt *xprt, struct rpc_rqst *new)
857 struct rb_node **p = &xprt->recv_queue.rb_node;
858 struct rb_node *n = NULL;
859 struct rpc_rqst *req;
863 req = rb_entry(n, struct rpc_rqst, rq_recv);
864 switch(xprt_xid_cmp(new->rq_xid, req->rq_xid)) {
872 WARN_ON_ONCE(new != req);
876 rb_link_node(&new->rq_recv, n, p);
877 rb_insert_color(&new->rq_recv, &xprt->recv_queue);
881 xprt_request_rb_remove(struct rpc_xprt *xprt, struct rpc_rqst *req)
883 rb_erase(&req->rq_recv, &xprt->recv_queue);
887 * xprt_lookup_rqst - find an RPC request corresponding to an XID
888 * @xprt: transport on which the original request was transmitted
889 * @xid: RPC XID of incoming reply
891 * Caller holds xprt->queue_lock.
893 struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
895 struct rpc_rqst *entry;
897 entry = xprt_request_rb_find(xprt, xid);
899 trace_xprt_lookup_rqst(xprt, xid, 0);
900 entry->rq_rtt = ktime_sub(ktime_get(), entry->rq_xtime);
904 dprintk("RPC: xprt_lookup_rqst did not find xid %08x\n",
906 trace_xprt_lookup_rqst(xprt, xid, -ENOENT);
907 xprt->stat.bad_xids++;
910 EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
913 xprt_is_pinned_rqst(struct rpc_rqst *req)
915 return atomic_read(&req->rq_pin) != 0;
919 * xprt_pin_rqst - Pin a request on the transport receive list
920 * @req: Request to pin
922 * Caller must ensure this is atomic with the call to xprt_lookup_rqst()
923 * so should be holding the xprt receive lock.
925 void xprt_pin_rqst(struct rpc_rqst *req)
927 atomic_inc(&req->rq_pin);
929 EXPORT_SYMBOL_GPL(xprt_pin_rqst);
932 * xprt_unpin_rqst - Unpin a request on the transport receive list
933 * @req: Request to pin
935 * Caller should be holding the xprt receive lock.
937 void xprt_unpin_rqst(struct rpc_rqst *req)
939 if (!test_bit(RPC_TASK_MSG_PIN_WAIT, &req->rq_task->tk_runstate)) {
940 atomic_dec(&req->rq_pin);
943 if (atomic_dec_and_test(&req->rq_pin))
944 wake_up_var(&req->rq_pin);
946 EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
948 static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
950 wait_var_event(&req->rq_pin, !xprt_is_pinned_rqst(req));
954 xprt_request_data_received(struct rpc_task *task)
956 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
957 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) != 0;
961 xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
963 return !test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) &&
964 READ_ONCE(task->tk_rqstp->rq_reply_bytes_recvd) == 0;
968 * xprt_request_enqueue_receive - Add an request to the receive queue
973 xprt_request_enqueue_receive(struct rpc_task *task)
975 struct rpc_rqst *req = task->tk_rqstp;
976 struct rpc_xprt *xprt = req->rq_xprt;
978 if (!xprt_request_need_enqueue_receive(task, req))
980 spin_lock(&xprt->queue_lock);
982 /* Update the softirq receive buffer */
983 memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
984 sizeof(req->rq_private_buf));
986 /* Add request to the receive list */
987 xprt_request_rb_insert(xprt, req);
988 set_bit(RPC_TASK_NEED_RECV, &task->tk_runstate);
989 spin_unlock(&xprt->queue_lock);
991 xprt_reset_majortimeo(req);
992 /* Turn off autodisconnect */
993 del_singleshot_timer_sync(&xprt->timer);
997 * xprt_request_dequeue_receive_locked - Remove a request from the receive queue
1000 * Caller must hold xprt->queue_lock.
1003 xprt_request_dequeue_receive_locked(struct rpc_task *task)
1005 struct rpc_rqst *req = task->tk_rqstp;
1007 if (test_and_clear_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1008 xprt_request_rb_remove(req->rq_xprt, req);
1012 * xprt_update_rtt - Update RPC RTT statistics
1013 * @task: RPC request that recently completed
1015 * Caller holds xprt->queue_lock.
1017 void xprt_update_rtt(struct rpc_task *task)
1019 struct rpc_rqst *req = task->tk_rqstp;
1020 struct rpc_rtt *rtt = task->tk_client->cl_rtt;
1021 unsigned int timer = task->tk_msg.rpc_proc->p_timer;
1022 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt));
1025 if (req->rq_ntrans == 1)
1026 rpc_update_rtt(rtt, timer, m);
1027 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1);
1030 EXPORT_SYMBOL_GPL(xprt_update_rtt);
1033 * xprt_complete_rqst - called when reply processing is complete
1034 * @task: RPC request that recently completed
1035 * @copied: actual number of bytes received from the transport
1037 * Caller holds xprt->queue_lock.
1039 void xprt_complete_rqst(struct rpc_task *task, int copied)
1041 struct rpc_rqst *req = task->tk_rqstp;
1042 struct rpc_xprt *xprt = req->rq_xprt;
1044 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
1045 task->tk_pid, ntohl(req->rq_xid), copied);
1046 trace_xprt_complete_rqst(xprt, req->rq_xid, copied);
1050 req->rq_private_buf.len = copied;
1051 /* Ensure all writes are done before we update */
1052 /* req->rq_reply_bytes_recvd */
1054 req->rq_reply_bytes_recvd = copied;
1055 xprt_request_dequeue_receive_locked(task);
1056 rpc_wake_up_queued_task(&xprt->pending, task);
1058 EXPORT_SYMBOL_GPL(xprt_complete_rqst);
1060 static void xprt_timer(struct rpc_task *task)
1062 struct rpc_rqst *req = task->tk_rqstp;
1063 struct rpc_xprt *xprt = req->rq_xprt;
1065 if (task->tk_status != -ETIMEDOUT)
1068 trace_xprt_timer(xprt, req->rq_xid, task->tk_status);
1069 if (!req->rq_reply_bytes_recvd) {
1070 if (xprt->ops->timer)
1071 xprt->ops->timer(xprt, task);
1073 task->tk_status = 0;
1077 * xprt_wait_for_reply_request_def - wait for reply
1078 * @task: pointer to rpc_task
1080 * Set a request's retransmit timeout based on the transport's
1081 * default timeout parameters. Used by transports that don't adjust
1082 * the retransmit timeout based on round-trip time estimation,
1083 * and put the task to sleep on the pending queue.
1085 void xprt_wait_for_reply_request_def(struct rpc_task *task)
1087 struct rpc_rqst *req = task->tk_rqstp;
1089 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1090 jiffies + req->rq_timeout);
1092 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_def);
1095 * xprt_wait_for_reply_request_rtt - wait for reply using RTT estimator
1096 * @task: pointer to rpc_task
1098 * Set a request's retransmit timeout using the RTT estimator,
1099 * and put the task to sleep on the pending queue.
1101 void xprt_wait_for_reply_request_rtt(struct rpc_task *task)
1103 int timer = task->tk_msg.rpc_proc->p_timer;
1104 struct rpc_clnt *clnt = task->tk_client;
1105 struct rpc_rtt *rtt = clnt->cl_rtt;
1106 struct rpc_rqst *req = task->tk_rqstp;
1107 unsigned long max_timeout = clnt->cl_timeout->to_maxval;
1108 unsigned long timeout;
1110 timeout = rpc_calc_rto(rtt, timer);
1111 timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries;
1112 if (timeout > max_timeout || timeout == 0)
1113 timeout = max_timeout;
1114 rpc_sleep_on_timeout(&req->rq_xprt->pending, task, xprt_timer,
1117 EXPORT_SYMBOL_GPL(xprt_wait_for_reply_request_rtt);
1120 * xprt_request_wait_receive - wait for the reply to an RPC request
1121 * @task: RPC task about to send a request
1124 void xprt_request_wait_receive(struct rpc_task *task)
1126 struct rpc_rqst *req = task->tk_rqstp;
1127 struct rpc_xprt *xprt = req->rq_xprt;
1129 if (!test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate))
1132 * Sleep on the pending queue if we're expecting a reply.
1133 * The spinlock ensures atomicity between the test of
1134 * req->rq_reply_bytes_recvd, and the call to rpc_sleep_on().
1136 spin_lock(&xprt->queue_lock);
1137 if (test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate)) {
1138 xprt->ops->wait_for_reply_request(task);
1140 * Send an extra queue wakeup call if the
1141 * connection was dropped in case the call to
1142 * rpc_sleep_on() raced.
1144 if (xprt_request_retransmit_after_disconnect(task))
1145 rpc_wake_up_queued_task_set_status(&xprt->pending,
1148 spin_unlock(&xprt->queue_lock);
1152 xprt_request_need_enqueue_transmit(struct rpc_task *task, struct rpc_rqst *req)
1154 return !test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1158 * xprt_request_enqueue_transmit - queue a task for transmission
1159 * @task: pointer to rpc_task
1161 * Add a task to the transmission queue.
1164 xprt_request_enqueue_transmit(struct rpc_task *task)
1166 struct rpc_rqst *pos, *req = task->tk_rqstp;
1167 struct rpc_xprt *xprt = req->rq_xprt;
1169 if (xprt_request_need_enqueue_transmit(task, req)) {
1170 req->rq_bytes_sent = 0;
1171 spin_lock(&xprt->queue_lock);
1173 * Requests that carry congestion control credits are added
1174 * to the head of the list to avoid starvation issues.
1177 xprt_clear_congestion_window_wait(xprt);
1178 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1181 /* Note: req is added _before_ pos */
1182 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1183 INIT_LIST_HEAD(&req->rq_xmit2);
1184 trace_xprt_enq_xmit(task, 1);
1187 } else if (RPC_IS_SWAPPER(task)) {
1188 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1189 if (pos->rq_cong || pos->rq_bytes_sent)
1191 if (RPC_IS_SWAPPER(pos->rq_task))
1193 /* Note: req is added _before_ pos */
1194 list_add_tail(&req->rq_xmit, &pos->rq_xmit);
1195 INIT_LIST_HEAD(&req->rq_xmit2);
1196 trace_xprt_enq_xmit(task, 2);
1199 } else if (!req->rq_seqno) {
1200 list_for_each_entry(pos, &xprt->xmit_queue, rq_xmit) {
1201 if (pos->rq_task->tk_owner != task->tk_owner)
1203 list_add_tail(&req->rq_xmit2, &pos->rq_xmit2);
1204 INIT_LIST_HEAD(&req->rq_xmit);
1205 trace_xprt_enq_xmit(task, 3);
1209 list_add_tail(&req->rq_xmit, &xprt->xmit_queue);
1210 INIT_LIST_HEAD(&req->rq_xmit2);
1211 trace_xprt_enq_xmit(task, 4);
1213 set_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate);
1214 spin_unlock(&xprt->queue_lock);
1219 * xprt_request_dequeue_transmit_locked - remove a task from the transmission queue
1220 * @task: pointer to rpc_task
1222 * Remove a task from the transmission queue
1223 * Caller must hold xprt->queue_lock
1226 xprt_request_dequeue_transmit_locked(struct rpc_task *task)
1228 struct rpc_rqst *req = task->tk_rqstp;
1230 if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1232 if (!list_empty(&req->rq_xmit)) {
1233 list_del(&req->rq_xmit);
1234 if (!list_empty(&req->rq_xmit2)) {
1235 struct rpc_rqst *next = list_first_entry(&req->rq_xmit2,
1236 struct rpc_rqst, rq_xmit2);
1237 list_del(&req->rq_xmit2);
1238 list_add_tail(&next->rq_xmit, &next->rq_xprt->xmit_queue);
1241 list_del(&req->rq_xmit2);
1245 * xprt_request_dequeue_transmit - remove a task from the transmission queue
1246 * @task: pointer to rpc_task
1248 * Remove a task from the transmission queue
1251 xprt_request_dequeue_transmit(struct rpc_task *task)
1253 struct rpc_rqst *req = task->tk_rqstp;
1254 struct rpc_xprt *xprt = req->rq_xprt;
1256 spin_lock(&xprt->queue_lock);
1257 xprt_request_dequeue_transmit_locked(task);
1258 spin_unlock(&xprt->queue_lock);
1262 * xprt_request_prepare - prepare an encoded request for transport
1263 * @req: pointer to rpc_rqst
1265 * Calls into the transport layer to do whatever is needed to prepare
1266 * the request for transmission or receive.
1269 xprt_request_prepare(struct rpc_rqst *req)
1271 struct rpc_xprt *xprt = req->rq_xprt;
1273 if (xprt->ops->prepare_request)
1274 xprt->ops->prepare_request(req);
1278 * xprt_request_need_retransmit - Test if a task needs retransmission
1279 * @task: pointer to rpc_task
1281 * Test for whether a connection breakage requires the task to retransmit
1284 xprt_request_need_retransmit(struct rpc_task *task)
1286 return xprt_request_retransmit_after_disconnect(task);
1290 * xprt_prepare_transmit - reserve the transport before sending a request
1291 * @task: RPC task about to send a request
1294 bool xprt_prepare_transmit(struct rpc_task *task)
1296 struct rpc_rqst *req = task->tk_rqstp;
1297 struct rpc_xprt *xprt = req->rq_xprt;
1299 dprintk("RPC: %5u xprt_prepare_transmit\n", task->tk_pid);
1301 if (!xprt_lock_write(xprt, task)) {
1302 /* Race breaker: someone may have transmitted us */
1303 if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1304 rpc_wake_up_queued_task_set_status(&xprt->sending,
1312 void xprt_end_transmit(struct rpc_task *task)
1314 xprt_release_write(task->tk_rqstp->rq_xprt, task);
1318 * xprt_request_transmit - send an RPC request on a transport
1319 * @req: pointer to request to transmit
1320 * @snd_task: RPC task that owns the transport lock
1322 * This performs the transmission of a single request.
1323 * Note that if the request is not the same as snd_task, then it
1324 * does need to be pinned.
1325 * Returns '0' on success.
1328 xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task)
1330 struct rpc_xprt *xprt = req->rq_xprt;
1331 struct rpc_task *task = req->rq_task;
1332 unsigned int connect_cookie;
1333 int is_retrans = RPC_WAS_SENT(task);
1336 if (!req->rq_bytes_sent) {
1337 if (xprt_request_data_received(task)) {
1341 /* Verify that our message lies in the RPCSEC_GSS window */
1342 if (rpcauth_xmit_need_reencode(task)) {
1346 if (task->tk_ops->rpc_call_prepare_transmit) {
1347 task->tk_ops->rpc_call_prepare_transmit(task,
1349 status = task->tk_status;
1353 if (RPC_SIGNALLED(task)) {
1354 status = -ERESTARTSYS;
1360 * Update req->rq_ntrans before transmitting to avoid races with
1361 * xprt_update_rtt(), which needs to know that it is recording a
1362 * reply to the first transmission.
1366 connect_cookie = xprt->connect_cookie;
1367 status = xprt->ops->send_request(req);
1370 trace_xprt_transmit(req, status);
1375 task->tk_client->cl_stats->rpcretrans++;
1377 xprt_inject_disconnect(xprt);
1379 task->tk_flags |= RPC_TASK_SENT;
1380 spin_lock_bh(&xprt->transport_lock);
1383 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
1384 xprt->stat.bklog_u += xprt->backlog.qlen;
1385 xprt->stat.sending_u += xprt->sending.qlen;
1386 xprt->stat.pending_u += xprt->pending.qlen;
1387 spin_unlock_bh(&xprt->transport_lock);
1389 req->rq_connect_cookie = connect_cookie;
1391 trace_xprt_transmit(req, status);
1392 xprt_request_dequeue_transmit(task);
1393 rpc_wake_up_queued_task_set_status(&xprt->sending, task, status);
1398 * xprt_transmit - send an RPC request on a transport
1399 * @task: controlling RPC task
1401 * Attempts to drain the transmit queue. On exit, either the transport
1402 * signalled an error that needs to be handled before transmission can
1403 * resume, or @task finished transmitting, and detected that it already
1407 xprt_transmit(struct rpc_task *task)
1409 struct rpc_rqst *next, *req = task->tk_rqstp;
1410 struct rpc_xprt *xprt = req->rq_xprt;
1413 spin_lock(&xprt->queue_lock);
1414 while (!list_empty(&xprt->xmit_queue)) {
1415 next = list_first_entry(&xprt->xmit_queue,
1416 struct rpc_rqst, rq_xmit);
1417 xprt_pin_rqst(next);
1418 spin_unlock(&xprt->queue_lock);
1419 status = xprt_request_transmit(next, task);
1420 if (status == -EBADMSG && next != req)
1423 spin_lock(&xprt->queue_lock);
1424 xprt_unpin_rqst(next);
1426 if (!xprt_request_data_received(task) ||
1427 test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1429 } else if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
1430 task->tk_status = status;
1433 spin_unlock(&xprt->queue_lock);
1436 static void xprt_add_backlog(struct rpc_xprt *xprt, struct rpc_task *task)
1438 set_bit(XPRT_CONGESTED, &xprt->state);
1439 rpc_sleep_on(&xprt->backlog, task, NULL);
1442 static void xprt_wake_up_backlog(struct rpc_xprt *xprt)
1444 if (rpc_wake_up_next(&xprt->backlog) == NULL)
1445 clear_bit(XPRT_CONGESTED, &xprt->state);
1448 static bool xprt_throttle_congested(struct rpc_xprt *xprt, struct rpc_task *task)
1452 if (!test_bit(XPRT_CONGESTED, &xprt->state))
1454 spin_lock(&xprt->reserve_lock);
1455 if (test_bit(XPRT_CONGESTED, &xprt->state)) {
1456 rpc_sleep_on(&xprt->backlog, task, NULL);
1459 spin_unlock(&xprt->reserve_lock);
1464 static struct rpc_rqst *xprt_dynamic_alloc_slot(struct rpc_xprt *xprt)
1466 struct rpc_rqst *req = ERR_PTR(-EAGAIN);
1468 if (xprt->num_reqs >= xprt->max_reqs)
1471 spin_unlock(&xprt->reserve_lock);
1472 req = kzalloc(sizeof(struct rpc_rqst), GFP_NOFS);
1473 spin_lock(&xprt->reserve_lock);
1477 req = ERR_PTR(-ENOMEM);
1482 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1484 if (xprt->num_reqs > xprt->min_reqs) {
1492 void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
1494 struct rpc_rqst *req;
1496 spin_lock(&xprt->reserve_lock);
1497 if (!list_empty(&xprt->free)) {
1498 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
1499 list_del(&req->rq_list);
1502 req = xprt_dynamic_alloc_slot(xprt);
1505 switch (PTR_ERR(req)) {
1507 dprintk("RPC: dynamic allocation of request slot "
1508 "failed! Retrying\n");
1509 task->tk_status = -ENOMEM;
1512 xprt_add_backlog(xprt, task);
1513 dprintk("RPC: waiting for request slot\n");
1516 task->tk_status = -EAGAIN;
1518 spin_unlock(&xprt->reserve_lock);
1521 xprt->stat.max_slots = max_t(unsigned int, xprt->stat.max_slots,
1523 spin_unlock(&xprt->reserve_lock);
1525 task->tk_status = 0;
1526 task->tk_rqstp = req;
1528 EXPORT_SYMBOL_GPL(xprt_alloc_slot);
1530 void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
1532 spin_lock(&xprt->reserve_lock);
1533 if (!xprt_dynamic_free_slot(xprt, req)) {
1534 memset(req, 0, sizeof(*req)); /* mark unused */
1535 list_add(&req->rq_list, &xprt->free);
1537 xprt_wake_up_backlog(xprt);
1538 spin_unlock(&xprt->reserve_lock);
1540 EXPORT_SYMBOL_GPL(xprt_free_slot);
1542 static void xprt_free_all_slots(struct rpc_xprt *xprt)
1544 struct rpc_rqst *req;
1545 while (!list_empty(&xprt->free)) {
1546 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list);
1547 list_del(&req->rq_list);
1552 struct rpc_xprt *xprt_alloc(struct net *net, size_t size,
1553 unsigned int num_prealloc,
1554 unsigned int max_alloc)
1556 struct rpc_xprt *xprt;
1557 struct rpc_rqst *req;
1560 xprt = kzalloc(size, GFP_KERNEL);
1564 xprt_init(xprt, net);
1566 for (i = 0; i < num_prealloc; i++) {
1567 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
1570 list_add(&req->rq_list, &xprt->free);
1572 if (max_alloc > num_prealloc)
1573 xprt->max_reqs = max_alloc;
1575 xprt->max_reqs = num_prealloc;
1576 xprt->min_reqs = num_prealloc;
1577 xprt->num_reqs = num_prealloc;
1586 EXPORT_SYMBOL_GPL(xprt_alloc);
1588 void xprt_free(struct rpc_xprt *xprt)
1590 put_net(xprt->xprt_net);
1591 xprt_free_all_slots(xprt);
1592 kfree_rcu(xprt, rcu);
1594 EXPORT_SYMBOL_GPL(xprt_free);
1597 xprt_init_connect_cookie(struct rpc_rqst *req, struct rpc_xprt *xprt)
1599 req->rq_connect_cookie = xprt_connect_cookie(xprt) - 1;
1603 xprt_alloc_xid(struct rpc_xprt *xprt)
1607 spin_lock(&xprt->reserve_lock);
1608 xid = (__force __be32)xprt->xid++;
1609 spin_unlock(&xprt->reserve_lock);
1614 xprt_init_xid(struct rpc_xprt *xprt)
1616 xprt->xid = prandom_u32();
1620 xprt_request_init(struct rpc_task *task)
1622 struct rpc_xprt *xprt = task->tk_xprt;
1623 struct rpc_rqst *req = task->tk_rqstp;
1625 req->rq_timeout = task->tk_client->cl_timeout->to_initval;
1626 req->rq_task = task;
1627 req->rq_xprt = xprt;
1628 req->rq_buffer = NULL;
1629 req->rq_xid = xprt_alloc_xid(xprt);
1630 xprt_init_connect_cookie(req, xprt);
1631 req->rq_snd_buf.len = 0;
1632 req->rq_snd_buf.buflen = 0;
1633 req->rq_rcv_buf.len = 0;
1634 req->rq_rcv_buf.buflen = 0;
1635 req->rq_snd_buf.bvec = NULL;
1636 req->rq_rcv_buf.bvec = NULL;
1637 req->rq_release_snd_buf = NULL;
1638 xprt_reset_majortimeo(req);
1639 dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid,
1640 req, ntohl(req->rq_xid));
1644 xprt_do_reserve(struct rpc_xprt *xprt, struct rpc_task *task)
1646 xprt->ops->alloc_slot(xprt, task);
1647 if (task->tk_rqstp != NULL)
1648 xprt_request_init(task);
1652 * xprt_reserve - allocate an RPC request slot
1653 * @task: RPC task requesting a slot allocation
1655 * If the transport is marked as being congested, or if no more
1656 * slots are available, place the task on the transport's
1659 void xprt_reserve(struct rpc_task *task)
1661 struct rpc_xprt *xprt = task->tk_xprt;
1663 task->tk_status = 0;
1664 if (task->tk_rqstp != NULL)
1667 task->tk_status = -EAGAIN;
1668 if (!xprt_throttle_congested(xprt, task))
1669 xprt_do_reserve(xprt, task);
1673 * xprt_retry_reserve - allocate an RPC request slot
1674 * @task: RPC task requesting a slot allocation
1676 * If no more slots are available, place the task on the transport's
1678 * Note that the only difference with xprt_reserve is that we now
1679 * ignore the value of the XPRT_CONGESTED flag.
1681 void xprt_retry_reserve(struct rpc_task *task)
1683 struct rpc_xprt *xprt = task->tk_xprt;
1685 task->tk_status = 0;
1686 if (task->tk_rqstp != NULL)
1689 task->tk_status = -EAGAIN;
1690 xprt_do_reserve(xprt, task);
1694 xprt_request_dequeue_all(struct rpc_task *task, struct rpc_rqst *req)
1696 struct rpc_xprt *xprt = req->rq_xprt;
1698 if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate) ||
1699 test_bit(RPC_TASK_NEED_RECV, &task->tk_runstate) ||
1700 xprt_is_pinned_rqst(req)) {
1701 spin_lock(&xprt->queue_lock);
1702 xprt_request_dequeue_transmit_locked(task);
1703 xprt_request_dequeue_receive_locked(task);
1704 while (xprt_is_pinned_rqst(req)) {
1705 set_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1706 spin_unlock(&xprt->queue_lock);
1707 xprt_wait_on_pinned_rqst(req);
1708 spin_lock(&xprt->queue_lock);
1709 clear_bit(RPC_TASK_MSG_PIN_WAIT, &task->tk_runstate);
1711 spin_unlock(&xprt->queue_lock);
1716 * xprt_release - release an RPC request slot
1717 * @task: task which is finished with the slot
1720 void xprt_release(struct rpc_task *task)
1722 struct rpc_xprt *xprt;
1723 struct rpc_rqst *req = task->tk_rqstp;
1726 if (task->tk_client) {
1727 xprt = task->tk_xprt;
1728 xprt_release_write(xprt, task);
1733 xprt = req->rq_xprt;
1734 if (task->tk_ops->rpc_count_stats != NULL)
1735 task->tk_ops->rpc_count_stats(task, task->tk_calldata);
1736 else if (task->tk_client)
1737 rpc_count_iostats(task, task->tk_client->cl_metrics);
1738 xprt_request_dequeue_all(task, req);
1739 spin_lock_bh(&xprt->transport_lock);
1740 xprt->ops->release_xprt(xprt, task);
1741 if (xprt->ops->release_request)
1742 xprt->ops->release_request(task);
1743 xprt->last_used = jiffies;
1744 xprt_schedule_autodisconnect(xprt);
1745 spin_unlock_bh(&xprt->transport_lock);
1747 xprt->ops->buf_free(task);
1748 xprt_inject_disconnect(xprt);
1749 xdr_free_bvec(&req->rq_rcv_buf);
1750 xdr_free_bvec(&req->rq_snd_buf);
1751 if (req->rq_cred != NULL)
1752 put_rpccred(req->rq_cred);
1753 task->tk_rqstp = NULL;
1754 if (req->rq_release_snd_buf)
1755 req->rq_release_snd_buf(req);
1757 dprintk("RPC: %5u release request %p\n", task->tk_pid, req);
1758 if (likely(!bc_prealloc(req)))
1759 xprt->ops->free_slot(xprt, req);
1761 xprt_free_bc_request(req);
1764 #ifdef CONFIG_SUNRPC_BACKCHANNEL
1766 xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task)
1768 struct xdr_buf *xbufp = &req->rq_snd_buf;
1770 task->tk_rqstp = req;
1771 req->rq_task = task;
1772 xprt_init_connect_cookie(req, req->rq_xprt);
1774 * Set up the xdr_buf length.
1775 * This also indicates that the buffer is XDR encoded already.
1777 xbufp->len = xbufp->head[0].iov_len + xbufp->page_len +
1778 xbufp->tail[0].iov_len;
1782 static void xprt_init(struct rpc_xprt *xprt, struct net *net)
1784 kref_init(&xprt->kref);
1786 spin_lock_init(&xprt->transport_lock);
1787 spin_lock_init(&xprt->reserve_lock);
1788 spin_lock_init(&xprt->queue_lock);
1790 INIT_LIST_HEAD(&xprt->free);
1791 xprt->recv_queue = RB_ROOT;
1792 INIT_LIST_HEAD(&xprt->xmit_queue);
1793 #if defined(CONFIG_SUNRPC_BACKCHANNEL)
1794 spin_lock_init(&xprt->bc_pa_lock);
1795 INIT_LIST_HEAD(&xprt->bc_pa_list);
1796 #endif /* CONFIG_SUNRPC_BACKCHANNEL */
1797 INIT_LIST_HEAD(&xprt->xprt_switch);
1799 xprt->last_used = jiffies;
1800 xprt->cwnd = RPC_INITCWND;
1801 xprt->bind_index = 0;
1803 rpc_init_wait_queue(&xprt->binding, "xprt_binding");
1804 rpc_init_wait_queue(&xprt->pending, "xprt_pending");
1805 rpc_init_wait_queue(&xprt->sending, "xprt_sending");
1806 rpc_init_priority_wait_queue(&xprt->backlog, "xprt_backlog");
1808 xprt_init_xid(xprt);
1810 xprt->xprt_net = get_net(net);
1814 * xprt_create_transport - create an RPC transport
1815 * @args: rpc transport creation arguments
1818 struct rpc_xprt *xprt_create_transport(struct xprt_create *args)
1820 struct rpc_xprt *xprt;
1821 struct xprt_class *t;
1823 spin_lock(&xprt_list_lock);
1824 list_for_each_entry(t, &xprt_list, list) {
1825 if (t->ident == args->ident) {
1826 spin_unlock(&xprt_list_lock);
1830 spin_unlock(&xprt_list_lock);
1831 dprintk("RPC: transport (%d) not supported\n", args->ident);
1832 return ERR_PTR(-EIO);
1835 xprt = t->setup(args);
1837 dprintk("RPC: xprt_create_transport: failed, %ld\n",
1841 if (args->flags & XPRT_CREATE_NO_IDLE_TIMEOUT)
1842 xprt->idle_timeout = 0;
1843 INIT_WORK(&xprt->task_cleanup, xprt_autoclose);
1844 if (xprt_has_timer(xprt))
1845 timer_setup(&xprt->timer,
1846 xprt_init_autodisconnect,
1849 timer_setup(&xprt->timer, NULL, 0);
1851 if (strlen(args->servername) > RPC_MAXNETNAMELEN) {
1853 return ERR_PTR(-EINVAL);
1855 xprt->servername = kstrdup(args->servername, GFP_KERNEL);
1856 if (xprt->servername == NULL) {
1858 return ERR_PTR(-ENOMEM);
1861 rpc_xprt_debugfs_register(xprt);
1863 dprintk("RPC: created transport %p with %u slots\n", xprt,
1869 static void xprt_destroy_cb(struct work_struct *work)
1871 struct rpc_xprt *xprt =
1872 container_of(work, struct rpc_xprt, task_cleanup);
1874 rpc_xprt_debugfs_unregister(xprt);
1875 rpc_destroy_wait_queue(&xprt->binding);
1876 rpc_destroy_wait_queue(&xprt->pending);
1877 rpc_destroy_wait_queue(&xprt->sending);
1878 rpc_destroy_wait_queue(&xprt->backlog);
1879 kfree(xprt->servername);
1881 * Tear down transport state and free the rpc_xprt
1883 xprt->ops->destroy(xprt);
1887 * xprt_destroy - destroy an RPC transport, killing off all requests.
1888 * @xprt: transport to destroy
1891 static void xprt_destroy(struct rpc_xprt *xprt)
1893 dprintk("RPC: destroying transport %p\n", xprt);
1896 * Exclude transport connect/disconnect handlers and autoclose
1898 wait_on_bit_lock(&xprt->state, XPRT_LOCKED, TASK_UNINTERRUPTIBLE);
1900 del_timer_sync(&xprt->timer);
1903 * Destroy sockets etc from the system workqueue so they can
1904 * safely flush receive work running on rpciod.
1906 INIT_WORK(&xprt->task_cleanup, xprt_destroy_cb);
1907 schedule_work(&xprt->task_cleanup);
1910 static void xprt_destroy_kref(struct kref *kref)
1912 xprt_destroy(container_of(kref, struct rpc_xprt, kref));
1916 * xprt_get - return a reference to an RPC transport.
1917 * @xprt: pointer to the transport
1920 struct rpc_xprt *xprt_get(struct rpc_xprt *xprt)
1922 if (xprt != NULL && kref_get_unless_zero(&xprt->kref))
1926 EXPORT_SYMBOL_GPL(xprt_get);
1929 * xprt_put - release a reference to an RPC transport.
1930 * @xprt: pointer to the transport
1933 void xprt_put(struct rpc_xprt *xprt)
1936 kref_put(&xprt->kref, xprt_destroy_kref);
1938 EXPORT_SYMBOL_GPL(xprt_put);