1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Client connection-specific management code.
4 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
7 * Client connections need to be cached for a little while after they've made a
8 * call so as to handle retransmitted DATA packets in case the server didn't
9 * receive the final ACK or terminating ABORT we sent it.
11 * Client connections can be in one of a number of cache states:
13 * (1) INACTIVE - The connection is not held in any list and may not have been
14 * exposed to the world. If it has been previously exposed, it was
15 * discarded from the idle list after expiring.
17 * (2) WAITING - The connection is waiting for the number of client conns to
18 * drop below the maximum capacity. Calls may be in progress upon it from
19 * when it was active and got culled.
21 * The connection is on the rxrpc_waiting_client_conns list which is kept
22 * in to-be-granted order. Culled conns with waiters go to the back of
23 * the queue just like new conns.
25 * (3) ACTIVE - The connection has at least one call in progress upon it, it
26 * may freely grant available channels to new calls and calls may be
27 * waiting on it for channels to become available.
29 * The connection is on the rxnet->active_client_conns list which is kept
30 * in activation order for culling purposes.
32 * rxrpc_nr_active_client_conns is held incremented also.
34 * (4) UPGRADE - As for ACTIVE, but only one call may be in progress and is
35 * being used to probe for service upgrade.
37 * (5) CULLED - The connection got summarily culled to try and free up
38 * capacity. Calls currently in progress on the connection are allowed to
39 * continue, but new calls will have to wait. There can be no waiters in
40 * this state - the conn would have to go to the WAITING state instead.
42 * (6) IDLE - The connection has no calls in progress upon it and must have
43 * been exposed to the world (ie. the EXPOSED flag must be set). When it
44 * expires, the EXPOSED flag is cleared and the connection transitions to
47 * The connection is on the rxnet->idle_client_conns list which is kept in
48 * order of how soon they'll expire.
50 * There are flags of relevance to the cache:
52 * (1) EXPOSED - The connection ID got exposed to the world. If this flag is
53 * set, an extra ref is added to the connection preventing it from being
54 * reaped when it has no calls outstanding. This flag is cleared and the
55 * ref dropped when a conn is discarded from the idle list.
57 * This allows us to move terminal call state retransmission to the
58 * connection and to discard the call immediately we think it is done
59 * with. It also give us a chance to reuse the connection.
61 * (2) DONT_REUSE - The connection should be discarded as soon as possible and
62 * should not be reused. This is set when an exclusive connection is used
63 * or a call ID counter overflows.
65 * The caching state may only be changed if the cache lock is held.
67 * There are two idle client connection expiry durations. If the total number
68 * of connections is below the reap threshold, we use the normal duration; if
69 * it's above, we use the fast duration.
72 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
74 #include <linux/slab.h>
75 #include <linux/idr.h>
76 #include <linux/timer.h>
77 #include <linux/sched/signal.h>
79 #include "ar-internal.h"
81 __read_mostly unsigned int rxrpc_max_client_connections = 1000;
82 __read_mostly unsigned int rxrpc_reap_client_connections = 900;
83 __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
84 __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
87 * We use machine-unique IDs for our client connections.
89 DEFINE_IDR(rxrpc_client_conn_ids);
90 static DEFINE_SPINLOCK(rxrpc_conn_id_lock);
92 static void rxrpc_cull_active_client_conns(struct rxrpc_net *);
95 * Get a connection ID and epoch for a client connection from the global pool.
96 * The connection struct pointer is then recorded in the idr radix tree. The
97 * epoch doesn't change until the client is rebooted (or, at least, unless the
98 * module is unloaded).
100 static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn,
103 struct rxrpc_net *rxnet = conn->params.local->rxnet;
109 spin_lock(&rxrpc_conn_id_lock);
111 id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn,
112 1, 0x40000000, GFP_NOWAIT);
116 spin_unlock(&rxrpc_conn_id_lock);
119 conn->proto.epoch = rxnet->epoch;
120 conn->proto.cid = id << RXRPC_CIDSHIFT;
121 set_bit(RXRPC_CONN_HAS_IDR, &conn->flags);
122 _leave(" [CID %x]", conn->proto.cid);
126 spin_unlock(&rxrpc_conn_id_lock);
133 * Release a connection ID for a client connection from the global pool.
135 static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn)
137 if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) {
138 spin_lock(&rxrpc_conn_id_lock);
139 idr_remove(&rxrpc_client_conn_ids,
140 conn->proto.cid >> RXRPC_CIDSHIFT);
141 spin_unlock(&rxrpc_conn_id_lock);
146 * Destroy the client connection ID tree.
148 void rxrpc_destroy_client_conn_ids(void)
150 struct rxrpc_connection *conn;
153 if (!idr_is_empty(&rxrpc_client_conn_ids)) {
154 idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) {
155 pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
156 conn, atomic_read(&conn->usage));
161 idr_destroy(&rxrpc_client_conn_ids);
165 * Allocate a client connection.
167 static struct rxrpc_connection *
168 rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp)
170 struct rxrpc_connection *conn;
171 struct rxrpc_net *rxnet = cp->local->rxnet;
176 conn = rxrpc_alloc_connection(gfp);
178 _leave(" = -ENOMEM");
179 return ERR_PTR(-ENOMEM);
182 atomic_set(&conn->usage, 1);
184 __set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
186 __set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
189 conn->out_clientflag = RXRPC_CLIENT_INITIATED;
190 conn->state = RXRPC_CONN_CLIENT;
191 conn->service_id = cp->service_id;
193 ret = rxrpc_get_client_connection_id(conn, gfp);
197 ret = rxrpc_init_client_conn_security(conn);
201 ret = conn->security->prime_packet_security(conn);
205 atomic_inc(&rxnet->nr_conns);
206 write_lock(&rxnet->conn_lock);
207 list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
208 write_unlock(&rxnet->conn_lock);
210 /* We steal the caller's peer ref. */
212 rxrpc_get_local(conn->params.local);
213 key_get(conn->params.key);
215 trace_rxrpc_conn(conn->debug_id, rxrpc_conn_new_client,
216 atomic_read(&conn->usage),
217 __builtin_return_address(0));
218 trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
219 _leave(" = %p", conn);
223 conn->security->clear(conn);
225 rxrpc_put_client_connection_id(conn);
228 _leave(" = %d", ret);
233 * Determine if a connection may be reused.
235 static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
237 struct rxrpc_net *rxnet = conn->params.local->rxnet;
238 int id_cursor, id, distance, limit;
240 if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
243 if (conn->proto.epoch != rxnet->epoch)
244 goto mark_dont_reuse;
246 /* The IDR tree gets very expensive on memory if the connection IDs are
247 * widely scattered throughout the number space, so we shall want to
248 * kill off connections that, say, have an ID more than about four
249 * times the maximum number of client conns away from the current
250 * allocation point to try and keep the IDs concentrated.
252 id_cursor = idr_get_cursor(&rxrpc_client_conn_ids);
253 id = conn->proto.cid >> RXRPC_CIDSHIFT;
254 distance = id - id_cursor;
256 distance = -distance;
257 limit = max(rxrpc_max_client_connections * 4, 1024U);
258 if (distance > limit)
259 goto mark_dont_reuse;
264 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
270 * Create or find a client connection to use for a call.
272 * If we return with a connection, the call will be on its waiting list. It's
273 * left to the caller to assign a channel and wake up the call.
275 static int rxrpc_get_client_conn(struct rxrpc_sock *rx,
276 struct rxrpc_call *call,
277 struct rxrpc_conn_parameters *cp,
278 struct sockaddr_rxrpc *srx,
281 struct rxrpc_connection *conn, *candidate = NULL;
282 struct rxrpc_local *local = cp->local;
283 struct rb_node *p, **pp, *parent;
287 _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
289 cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp);
293 call->cong_cwnd = cp->peer->cong_cwnd;
294 if (call->cong_cwnd >= call->cong_ssthresh)
295 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
297 call->cong_mode = RXRPC_CALL_SLOW_START;
299 /* If the connection is not meant to be exclusive, search the available
300 * connections to see if the connection we want to use already exists.
302 if (!cp->exclusive) {
304 spin_lock(&local->client_conns_lock);
305 p = local->client_conns.rb_node;
307 conn = rb_entry(p, struct rxrpc_connection, client_node);
309 #define cmp(X) ((long)conn->params.X - (long)cp->X)
312 cmp(security_level) ?:
317 } else if (diff > 0) {
320 if (rxrpc_may_reuse_conn(conn) &&
321 rxrpc_get_connection_maybe(conn))
322 goto found_extant_conn;
323 /* The connection needs replacing. It's better
324 * to effect that when we have something to
325 * replace it with so that we don't have to
326 * rebalance the tree twice.
331 spin_unlock(&local->client_conns_lock);
334 /* There wasn't a connection yet or we need an exclusive connection.
335 * We need to create a candidate and then potentially redo the search
336 * in case we're racing with another thread also trying to connect on a
337 * shareable connection.
340 candidate = rxrpc_alloc_client_connection(cp, gfp);
341 if (IS_ERR(candidate)) {
342 ret = PTR_ERR(candidate);
346 /* Add the call to the new connection's waiting list in case we're
347 * going to have to wait for the connection to come live. It's our
348 * connection, so we want first dibs on the channel slots. We would
349 * normally have to take channel_lock but we do this before anyone else
350 * can see the connection.
352 list_add(&call->chan_wait_link, &candidate->waiting_calls);
355 call->conn = candidate;
356 call->security_ix = candidate->security_ix;
357 call->service_id = candidate->service_id;
358 _leave(" = 0 [exclusive %d]", candidate->debug_id);
362 /* Publish the new connection for userspace to find. We need to redo
363 * the search before doing this lest we race with someone else adding a
364 * conflicting instance.
367 spin_lock(&local->client_conns_lock);
369 pp = &local->client_conns.rb_node;
373 conn = rb_entry(parent, struct rxrpc_connection, client_node);
375 #define cmp(X) ((long)conn->params.X - (long)candidate->params.X)
378 cmp(security_level) ?:
382 pp = &(*pp)->rb_left;
383 } else if (diff > 0) {
384 pp = &(*pp)->rb_right;
386 if (rxrpc_may_reuse_conn(conn) &&
387 rxrpc_get_connection_maybe(conn))
388 goto found_extant_conn;
389 /* The old connection is from an outdated epoch. */
390 _debug("replace conn");
391 clear_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags);
392 rb_replace_node(&conn->client_node,
393 &candidate->client_node,
394 &local->client_conns);
395 trace_rxrpc_client(conn, -1, rxrpc_client_replace);
396 goto candidate_published;
401 rb_link_node(&candidate->client_node, parent, pp);
402 rb_insert_color(&candidate->client_node, &local->client_conns);
405 set_bit(RXRPC_CONN_IN_CLIENT_CONNS, &candidate->flags);
406 call->conn = candidate;
407 call->security_ix = candidate->security_ix;
408 call->service_id = candidate->service_id;
409 spin_unlock(&local->client_conns_lock);
410 _leave(" = 0 [new %d]", candidate->debug_id);
413 /* We come here if we found a suitable connection already in existence.
414 * Discard any candidate we may have allocated, and try to get a
415 * channel on this one.
418 _debug("found conn");
419 spin_unlock(&local->client_conns_lock);
422 trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate);
423 rxrpc_put_connection(candidate);
427 spin_lock(&conn->channel_lock);
429 call->security_ix = conn->security_ix;
430 call->service_id = conn->service_id;
431 list_add_tail(&call->chan_wait_link, &conn->waiting_calls);
432 spin_unlock(&conn->channel_lock);
433 _leave(" = 0 [extant %d]", conn->debug_id);
437 rxrpc_put_peer(cp->peer);
440 _leave(" = %d", ret);
445 * Activate a connection.
447 static void rxrpc_activate_conn(struct rxrpc_net *rxnet,
448 struct rxrpc_connection *conn)
450 if (test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) {
451 trace_rxrpc_client(conn, -1, rxrpc_client_to_upgrade);
452 conn->cache_state = RXRPC_CONN_CLIENT_UPGRADE;
454 trace_rxrpc_client(conn, -1, rxrpc_client_to_active);
455 conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
457 rxnet->nr_active_client_conns++;
458 list_move_tail(&conn->cache_link, &rxnet->active_client_conns);
462 * Attempt to animate a connection for a new call.
464 * If it's not exclusive, the connection is in the endpoint tree, and we're in
465 * the conn's list of those waiting to grab a channel. There is, however, a
466 * limit on the number of live connections allowed at any one time, so we may
467 * have to wait for capacity to become available.
469 * Note that a connection on the waiting queue might *also* have active
470 * channels if it has been culled to make space and then re-requested by a new
473 static void rxrpc_animate_client_conn(struct rxrpc_net *rxnet,
474 struct rxrpc_connection *conn)
476 unsigned int nr_conns;
478 _enter("%d,%d", conn->debug_id, conn->cache_state);
480 if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE ||
481 conn->cache_state == RXRPC_CONN_CLIENT_UPGRADE)
484 spin_lock(&rxnet->client_conn_cache_lock);
486 nr_conns = rxnet->nr_client_conns;
487 if (!test_and_set_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
488 trace_rxrpc_client(conn, -1, rxrpc_client_count);
489 rxnet->nr_client_conns = nr_conns + 1;
492 switch (conn->cache_state) {
493 case RXRPC_CONN_CLIENT_ACTIVE:
494 case RXRPC_CONN_CLIENT_UPGRADE:
495 case RXRPC_CONN_CLIENT_WAITING:
498 case RXRPC_CONN_CLIENT_INACTIVE:
499 case RXRPC_CONN_CLIENT_CULLED:
500 case RXRPC_CONN_CLIENT_IDLE:
501 if (nr_conns >= rxrpc_max_client_connections)
502 goto wait_for_capacity;
510 spin_unlock(&rxnet->client_conn_cache_lock);
512 _leave(" [%d]", conn->cache_state);
517 rxrpc_activate_conn(rxnet, conn);
522 trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
523 conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
524 list_move_tail(&conn->cache_link, &rxnet->waiting_client_conns);
529 * Deactivate a channel.
531 static void rxrpc_deactivate_one_channel(struct rxrpc_connection *conn,
532 unsigned int channel)
534 struct rxrpc_channel *chan = &conn->channels[channel];
536 rcu_assign_pointer(chan->call, NULL);
537 conn->active_chans &= ~(1 << channel);
541 * Assign a channel to the call at the front of the queue and wake the call up.
542 * We don't increment the callNumber counter until this number has been exposed
545 static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
546 unsigned int channel)
548 struct rxrpc_channel *chan = &conn->channels[channel];
549 struct rxrpc_call *call = list_entry(conn->waiting_calls.next,
550 struct rxrpc_call, chan_wait_link);
551 u32 call_id = chan->call_counter + 1;
553 trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
555 /* Cancel the final ACK on the previous call if it hasn't been sent yet
556 * as the DATA packet will implicitly ACK it.
558 clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
560 write_lock_bh(&call->state_lock);
561 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
562 write_unlock_bh(&call->state_lock);
564 rxrpc_see_call(call);
565 list_del_init(&call->chan_wait_link);
566 conn->active_chans |= 1 << channel;
567 call->peer = rxrpc_get_peer(conn->params.peer);
568 call->cid = conn->proto.cid | channel;
569 call->call_id = call_id;
571 trace_rxrpc_connect_call(call);
572 _net("CONNECT call %08x:%08x as call %d on conn %d",
573 call->cid, call->call_id, call->debug_id, conn->debug_id);
575 /* Paired with the read barrier in rxrpc_wait_for_channel(). This
576 * orders cid and epoch in the connection wrt to call_id without the
577 * need to take the channel_lock.
579 * We provisionally assign a callNumber at this point, but we don't
580 * confirm it until the call is about to be exposed.
582 * TODO: Pair with a barrier in the data_ready handler when that looks
583 * at the call ID through a connection channel.
586 chan->call_id = call_id;
587 chan->call_debug_id = call->debug_id;
588 rcu_assign_pointer(chan->call, call);
589 wake_up(&call->waitq);
593 * Assign channels and callNumbers to waiting calls with channel_lock
596 static void rxrpc_activate_channels_locked(struct rxrpc_connection *conn)
600 switch (conn->cache_state) {
601 case RXRPC_CONN_CLIENT_ACTIVE:
602 mask = RXRPC_ACTIVE_CHANS_MASK;
604 case RXRPC_CONN_CLIENT_UPGRADE:
611 while (!list_empty(&conn->waiting_calls) &&
612 (avail = ~conn->active_chans,
615 rxrpc_activate_one_channel(conn, __ffs(avail));
619 * Assign channels and callNumbers to waiting calls.
621 static void rxrpc_activate_channels(struct rxrpc_connection *conn)
623 _enter("%d", conn->debug_id);
625 trace_rxrpc_client(conn, -1, rxrpc_client_activate_chans);
627 if (conn->active_chans == RXRPC_ACTIVE_CHANS_MASK)
630 spin_lock(&conn->channel_lock);
631 rxrpc_activate_channels_locked(conn);
632 spin_unlock(&conn->channel_lock);
637 * Wait for a callNumber and a channel to be granted to a call.
639 static int rxrpc_wait_for_channel(struct rxrpc_call *call, gfp_t gfp)
643 _enter("%d", call->debug_id);
645 if (!call->call_id) {
646 DECLARE_WAITQUEUE(myself, current);
648 if (!gfpflags_allow_blocking(gfp)) {
653 add_wait_queue_exclusive(&call->waitq, &myself);
655 if (test_bit(RXRPC_CALL_IS_INTR, &call->flags))
656 set_current_state(TASK_INTERRUPTIBLE);
658 set_current_state(TASK_UNINTERRUPTIBLE);
661 if (test_bit(RXRPC_CALL_IS_INTR, &call->flags) &&
662 signal_pending(current)) {
668 remove_wait_queue(&call->waitq, &myself);
669 __set_current_state(TASK_RUNNING);
672 /* Paired with the write barrier in rxrpc_activate_one_channel(). */
676 _leave(" = %d", ret);
681 * find a connection for a call
682 * - called in process context with IRQs enabled
684 int rxrpc_connect_call(struct rxrpc_sock *rx,
685 struct rxrpc_call *call,
686 struct rxrpc_conn_parameters *cp,
687 struct sockaddr_rxrpc *srx,
690 struct rxrpc_net *rxnet = cp->local->rxnet;
693 _enter("{%d,%lx},", call->debug_id, call->user_call_ID);
695 rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper);
696 rxrpc_cull_active_client_conns(rxnet);
698 ret = rxrpc_get_client_conn(rx, call, cp, srx, gfp);
702 rxrpc_animate_client_conn(rxnet, call->conn);
703 rxrpc_activate_channels(call->conn);
705 ret = rxrpc_wait_for_channel(call, gfp);
707 trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed);
708 rxrpc_disconnect_client_call(call);
712 spin_lock_bh(&call->conn->params.peer->lock);
713 hlist_add_head_rcu(&call->error_link,
714 &call->conn->params.peer->error_targets);
715 spin_unlock_bh(&call->conn->params.peer->lock);
718 _leave(" = %d", ret);
723 * Note that a connection is about to be exposed to the world. Once it is
724 * exposed, we maintain an extra ref on it that stops it from being summarily
725 * discarded before it's (a) had a chance to deal with retransmission and (b)
726 * had a chance at re-use (the per-connection security negotiation is
729 static void rxrpc_expose_client_conn(struct rxrpc_connection *conn,
730 unsigned int channel)
732 if (!test_and_set_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
733 trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
734 rxrpc_get_connection(conn);
739 * Note that a call, and thus a connection, is about to be exposed to the
742 void rxrpc_expose_client_call(struct rxrpc_call *call)
744 unsigned int channel = call->cid & RXRPC_CHANNELMASK;
745 struct rxrpc_connection *conn = call->conn;
746 struct rxrpc_channel *chan = &conn->channels[channel];
748 if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
749 /* Mark the call ID as being used. If the callNumber counter
750 * exceeds ~2 billion, we kill the connection after its
751 * outstanding calls have finished so that the counter doesn't
754 chan->call_counter++;
755 if (chan->call_counter >= INT_MAX)
756 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
757 rxrpc_expose_client_conn(conn, channel);
762 * Set the reap timer.
764 static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet)
766 unsigned long now = jiffies;
767 unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
770 timer_reduce(&rxnet->client_conn_reap_timer, reap_at);
774 * Disconnect a client call.
776 void rxrpc_disconnect_client_call(struct rxrpc_call *call)
778 struct rxrpc_connection *conn = call->conn;
779 struct rxrpc_channel *chan = NULL;
780 struct rxrpc_net *rxnet = conn->params.local->rxnet;
781 unsigned int channel = -1;
784 spin_lock(&conn->channel_lock);
788 channel = cid & RXRPC_CHANNELMASK;
789 chan = &conn->channels[channel];
791 trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
794 /* Calls that have never actually been assigned a channel can simply be
795 * discarded. If the conn didn't get used either, it will follow
796 * immediately unless someone else grabs it in the meantime.
798 if (!list_empty(&call->chan_wait_link)) {
799 _debug("call is waiting");
800 ASSERTCMP(call->call_id, ==, 0);
801 ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
802 list_del_init(&call->chan_wait_link);
804 trace_rxrpc_client(conn, channel, rxrpc_client_chan_unstarted);
806 /* We must deactivate or idle the connection if it's now
807 * waiting for nothing.
809 spin_lock(&rxnet->client_conn_cache_lock);
810 if (conn->cache_state == RXRPC_CONN_CLIENT_WAITING &&
811 list_empty(&conn->waiting_calls) &&
813 goto idle_connection;
817 if (rcu_access_pointer(chan->call) != call) {
818 spin_unlock(&conn->channel_lock);
822 /* If a client call was exposed to the world, we save the result for
825 * We use a barrier here so that the call number and abort code can be
826 * read without needing to take a lock.
828 * TODO: Make the incoming packet handler check this and handle
829 * terminal retransmission without requiring access to the call.
831 if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
832 _debug("exposed %u,%u", call->call_id, call->abort_code);
833 __rxrpc_disconnect_call(conn, call);
836 /* See if we can pass the channel directly to another call. */
837 if (conn->cache_state == RXRPC_CONN_CLIENT_ACTIVE &&
838 !list_empty(&conn->waiting_calls)) {
839 trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
840 rxrpc_activate_one_channel(conn, channel);
844 /* Schedule the final ACK to be transmitted in a short while so that it
845 * can be skipped if we find a follow-on call. The first DATA packet
846 * of the follow on call will implicitly ACK this call.
848 if (call->completion == RXRPC_CALL_SUCCEEDED &&
849 test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
850 unsigned long final_ack_at = jiffies + 2;
852 WRITE_ONCE(chan->final_ack_at, final_ack_at);
853 smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
854 set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
855 rxrpc_reduce_conn_timer(conn, final_ack_at);
858 /* Things are more complex and we need the cache lock. We might be
859 * able to simply idle the conn or it might now be lurking on the wait
860 * list. It might even get moved back to the active list whilst we're
861 * waiting for the lock.
863 spin_lock(&rxnet->client_conn_cache_lock);
865 switch (conn->cache_state) {
866 case RXRPC_CONN_CLIENT_UPGRADE:
867 /* Deal with termination of a service upgrade probe. */
868 if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
869 clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
870 trace_rxrpc_client(conn, channel, rxrpc_client_to_active);
871 conn->cache_state = RXRPC_CONN_CLIENT_ACTIVE;
872 rxrpc_activate_channels_locked(conn);
875 case RXRPC_CONN_CLIENT_ACTIVE:
876 if (list_empty(&conn->waiting_calls)) {
877 rxrpc_deactivate_one_channel(conn, channel);
878 if (!conn->active_chans) {
879 rxnet->nr_active_client_conns--;
880 goto idle_connection;
885 trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
886 rxrpc_activate_one_channel(conn, channel);
889 case RXRPC_CONN_CLIENT_CULLED:
890 rxrpc_deactivate_one_channel(conn, channel);
891 ASSERT(list_empty(&conn->waiting_calls));
892 if (!conn->active_chans)
893 goto idle_connection;
896 case RXRPC_CONN_CLIENT_WAITING:
897 rxrpc_deactivate_one_channel(conn, channel);
905 spin_unlock(&rxnet->client_conn_cache_lock);
907 spin_unlock(&conn->channel_lock);
908 rxrpc_put_connection(conn);
913 /* As no channels remain active, the connection gets deactivated
914 * immediately or moved to the idle list for a short while.
916 if (test_bit(RXRPC_CONN_EXPOSED, &conn->flags)) {
917 trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
918 conn->idle_timestamp = jiffies;
919 conn->cache_state = RXRPC_CONN_CLIENT_IDLE;
920 list_move_tail(&conn->cache_link, &rxnet->idle_client_conns);
921 if (rxnet->idle_client_conns.next == &conn->cache_link &&
922 !rxnet->kill_all_client_conns)
923 rxrpc_set_client_reap_timer(rxnet);
925 trace_rxrpc_client(conn, channel, rxrpc_client_to_inactive);
926 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
927 list_del_init(&conn->cache_link);
933 * Clean up a dead client connection.
935 static struct rxrpc_connection *
936 rxrpc_put_one_client_conn(struct rxrpc_connection *conn)
938 struct rxrpc_connection *next = NULL;
939 struct rxrpc_local *local = conn->params.local;
940 struct rxrpc_net *rxnet = local->rxnet;
941 unsigned int nr_conns;
943 trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
945 if (test_bit(RXRPC_CONN_IN_CLIENT_CONNS, &conn->flags)) {
946 spin_lock(&local->client_conns_lock);
947 if (test_and_clear_bit(RXRPC_CONN_IN_CLIENT_CONNS,
949 rb_erase(&conn->client_node, &local->client_conns);
950 spin_unlock(&local->client_conns_lock);
953 rxrpc_put_client_connection_id(conn);
955 ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_INACTIVE);
957 if (test_bit(RXRPC_CONN_COUNTED, &conn->flags)) {
958 trace_rxrpc_client(conn, -1, rxrpc_client_uncount);
959 spin_lock(&rxnet->client_conn_cache_lock);
960 nr_conns = --rxnet->nr_client_conns;
962 if (nr_conns < rxrpc_max_client_connections &&
963 !list_empty(&rxnet->waiting_client_conns)) {
964 next = list_entry(rxnet->waiting_client_conns.next,
965 struct rxrpc_connection, cache_link);
966 rxrpc_get_connection(next);
967 rxrpc_activate_conn(rxnet, next);
970 spin_unlock(&rxnet->client_conn_cache_lock);
973 rxrpc_kill_connection(conn);
975 rxrpc_activate_channels(next);
977 /* We need to get rid of the temporary ref we took upon next, but we
978 * can't call rxrpc_put_connection() recursively.
984 * Clean up a dead client connections.
986 void rxrpc_put_client_conn(struct rxrpc_connection *conn)
988 const void *here = __builtin_return_address(0);
989 unsigned int debug_id = conn->debug_id;
993 n = atomic_dec_return(&conn->usage);
994 trace_rxrpc_conn(debug_id, rxrpc_conn_put_client, n, here);
999 conn = rxrpc_put_one_client_conn(conn);
1004 * Kill the longest-active client connections to make room for new ones.
1006 static void rxrpc_cull_active_client_conns(struct rxrpc_net *rxnet)
1008 struct rxrpc_connection *conn;
1009 unsigned int nr_conns = rxnet->nr_client_conns;
1010 unsigned int nr_active, limit;
1014 ASSERTCMP(nr_conns, >=, 0);
1015 if (nr_conns < rxrpc_max_client_connections) {
1019 limit = rxrpc_reap_client_connections;
1021 spin_lock(&rxnet->client_conn_cache_lock);
1022 nr_active = rxnet->nr_active_client_conns;
1024 while (nr_active > limit) {
1025 ASSERT(!list_empty(&rxnet->active_client_conns));
1026 conn = list_entry(rxnet->active_client_conns.next,
1027 struct rxrpc_connection, cache_link);
1028 ASSERTIFCMP(conn->cache_state != RXRPC_CONN_CLIENT_ACTIVE,
1029 conn->cache_state, ==, RXRPC_CONN_CLIENT_UPGRADE);
1031 if (list_empty(&conn->waiting_calls)) {
1032 trace_rxrpc_client(conn, -1, rxrpc_client_to_culled);
1033 conn->cache_state = RXRPC_CONN_CLIENT_CULLED;
1034 list_del_init(&conn->cache_link);
1036 trace_rxrpc_client(conn, -1, rxrpc_client_to_waiting);
1037 conn->cache_state = RXRPC_CONN_CLIENT_WAITING;
1038 list_move_tail(&conn->cache_link,
1039 &rxnet->waiting_client_conns);
1045 rxnet->nr_active_client_conns = nr_active;
1046 spin_unlock(&rxnet->client_conn_cache_lock);
1047 ASSERTCMP(nr_active, >=, 0);
1048 _leave(" [culled]");
1052 * Discard expired client connections from the idle list. Each conn in the
1053 * idle list has been exposed and holds an extra ref because of that.
1055 * This may be called from conn setup or from a work item so cannot be
1056 * considered non-reentrant.
1058 void rxrpc_discard_expired_client_conns(struct work_struct *work)
1060 struct rxrpc_connection *conn;
1061 struct rxrpc_net *rxnet =
1062 container_of(work, struct rxrpc_net, client_conn_reaper);
1063 unsigned long expiry, conn_expires_at, now;
1064 unsigned int nr_conns;
1068 if (list_empty(&rxnet->idle_client_conns)) {
1073 /* Don't double up on the discarding */
1074 if (!spin_trylock(&rxnet->client_conn_discard_lock)) {
1075 _leave(" [already]");
1079 /* We keep an estimate of what the number of conns ought to be after
1080 * we've discarded some so that we don't overdo the discarding.
1082 nr_conns = rxnet->nr_client_conns;
1085 spin_lock(&rxnet->client_conn_cache_lock);
1087 if (list_empty(&rxnet->idle_client_conns))
1090 conn = list_entry(rxnet->idle_client_conns.next,
1091 struct rxrpc_connection, cache_link);
1092 ASSERT(test_bit(RXRPC_CONN_EXPOSED, &conn->flags));
1094 if (!rxnet->kill_all_client_conns) {
1095 /* If the number of connections is over the reap limit, we
1096 * expedite discard by reducing the expiry timeout. We must,
1097 * however, have at least a short grace period to be able to do
1098 * final-ACK or ABORT retransmission.
1100 expiry = rxrpc_conn_idle_client_expiry;
1101 if (nr_conns > rxrpc_reap_client_connections)
1102 expiry = rxrpc_conn_idle_client_fast_expiry;
1103 if (conn->params.local->service_closed)
1104 expiry = rxrpc_closed_conn_expiry * HZ;
1106 conn_expires_at = conn->idle_timestamp + expiry;
1108 now = READ_ONCE(jiffies);
1109 if (time_after(conn_expires_at, now))
1110 goto not_yet_expired;
1113 trace_rxrpc_client(conn, -1, rxrpc_client_discard);
1114 if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags))
1116 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
1117 list_del_init(&conn->cache_link);
1119 spin_unlock(&rxnet->client_conn_cache_lock);
1121 /* When we cleared the EXPOSED flag, we took on responsibility for the
1122 * reference that that had on the usage count. We deal with that here.
1123 * If someone re-sets the flag and re-gets the ref, that's fine.
1125 rxrpc_put_connection(conn);
1130 /* The connection at the front of the queue hasn't yet expired, so
1131 * schedule the work item for that point if we discarded something.
1133 * We don't worry if the work item is already scheduled - it can look
1134 * after rescheduling itself at a later time. We could cancel it, but
1135 * then things get messier.
1138 if (!rxnet->kill_all_client_conns)
1139 timer_reduce(&rxnet->client_conn_reap_timer,
1143 spin_unlock(&rxnet->client_conn_cache_lock);
1144 spin_unlock(&rxnet->client_conn_discard_lock);
1149 * Preemptively destroy all the client connection records rather than waiting
1150 * for them to time out
1152 void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet)
1156 spin_lock(&rxnet->client_conn_cache_lock);
1157 rxnet->kill_all_client_conns = true;
1158 spin_unlock(&rxnet->client_conn_cache_lock);
1160 del_timer_sync(&rxnet->client_conn_reap_timer);
1162 if (!rxrpc_queue_work(&rxnet->client_conn_reaper))
1163 _debug("destroy: queue failed");
1169 * Clean up the client connections on a local endpoint.
1171 void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
1173 struct rxrpc_connection *conn, *tmp;
1174 struct rxrpc_net *rxnet = local->rxnet;
1175 unsigned int nr_active;
1176 LIST_HEAD(graveyard);
1180 spin_lock(&rxnet->client_conn_cache_lock);
1181 nr_active = rxnet->nr_active_client_conns;
1183 list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns,
1185 if (conn->params.local == local) {
1186 ASSERTCMP(conn->cache_state, ==, RXRPC_CONN_CLIENT_IDLE);
1188 trace_rxrpc_client(conn, -1, rxrpc_client_discard);
1189 if (!test_and_clear_bit(RXRPC_CONN_EXPOSED, &conn->flags))
1191 conn->cache_state = RXRPC_CONN_CLIENT_INACTIVE;
1192 list_move(&conn->cache_link, &graveyard);
1197 rxnet->nr_active_client_conns = nr_active;
1198 spin_unlock(&rxnet->client_conn_cache_lock);
1199 ASSERTCMP(nr_active, >=, 0);
1201 while (!list_empty(&graveyard)) {
1202 conn = list_entry(graveyard.next,
1203 struct rxrpc_connection, cache_link);
1204 list_del_init(&conn->cache_link);
1206 rxrpc_put_connection(conn);
1209 _leave(" [culled]");