1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* Client connection-specific management code.
4 * Copyright (C) 2016, 2020 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
7 * Client connections need to be cached for a little while after they've made a
8 * call so as to handle retransmitted DATA packets in case the server didn't
9 * receive the final ACK or terminating ABORT we sent it.
11 * There are flags of relevance to the cache:
13 * (2) DONT_REUSE - The connection should be discarded as soon as possible and
14 * should not be reused. This is set when an exclusive connection is used
15 * or a call ID counter overflows.
17 * The caching state may only be changed if the cache lock is held.
19 * There are two idle client connection expiry durations. If the total number
20 * of connections is below the reap threshold, we use the normal duration; if
21 * it's above, we use the fast duration.
24 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
26 #include <linux/slab.h>
27 #include <linux/idr.h>
28 #include <linux/timer.h>
29 #include <linux/sched/signal.h>
31 #include "ar-internal.h"
33 __read_mostly unsigned int rxrpc_reap_client_connections = 900;
34 __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ;
35 __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ;
37 static void rxrpc_activate_bundle(struct rxrpc_bundle *bundle)
39 atomic_inc(&bundle->active);
43 * Release a connection ID for a client connection.
45 static void rxrpc_put_client_connection_id(struct rxrpc_local *local,
46 struct rxrpc_connection *conn)
48 idr_remove(&local->conn_ids, conn->proto.cid >> RXRPC_CIDSHIFT);
52 * Destroy the client connection ID tree.
54 static void rxrpc_destroy_client_conn_ids(struct rxrpc_local *local)
56 struct rxrpc_connection *conn;
59 if (!idr_is_empty(&local->conn_ids)) {
60 idr_for_each_entry(&local->conn_ids, conn, id) {
61 pr_err("AF_RXRPC: Leaked client conn %p {%d}\n",
62 conn, refcount_read(&conn->ref));
67 idr_destroy(&local->conn_ids);
71 * Allocate a connection bundle.
73 static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_call *call,
76 static atomic_t rxrpc_bundle_id;
77 struct rxrpc_bundle *bundle;
79 bundle = kzalloc(sizeof(*bundle), gfp);
81 bundle->local = call->local;
82 bundle->peer = rxrpc_get_peer(call->peer, rxrpc_peer_get_bundle);
83 bundle->key = key_get(call->key);
84 bundle->security = call->security;
85 bundle->exclusive = test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags);
86 bundle->upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags);
87 bundle->service_id = call->dest_srx.srx_service;
88 bundle->security_level = call->security_level;
89 bundle->debug_id = atomic_inc_return(&rxrpc_bundle_id);
90 refcount_set(&bundle->ref, 1);
91 atomic_set(&bundle->active, 1);
92 INIT_LIST_HEAD(&bundle->waiting_calls);
93 trace_rxrpc_bundle(bundle->debug_id, 1, rxrpc_bundle_new);
95 write_lock(&bundle->local->rxnet->conn_lock);
96 list_add_tail(&bundle->proc_link, &bundle->local->rxnet->bundle_proc_list);
97 write_unlock(&bundle->local->rxnet->conn_lock);
102 struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *bundle,
103 enum rxrpc_bundle_trace why)
107 __refcount_inc(&bundle->ref, &r);
108 trace_rxrpc_bundle(bundle->debug_id, r + 1, why);
112 static void rxrpc_free_bundle(struct rxrpc_bundle *bundle)
114 trace_rxrpc_bundle(bundle->debug_id, refcount_read(&bundle->ref),
116 write_lock(&bundle->local->rxnet->conn_lock);
117 list_del(&bundle->proc_link);
118 write_unlock(&bundle->local->rxnet->conn_lock);
119 rxrpc_put_peer(bundle->peer, rxrpc_peer_put_bundle);
120 key_put(bundle->key);
124 void rxrpc_put_bundle(struct rxrpc_bundle *bundle, enum rxrpc_bundle_trace why)
131 id = bundle->debug_id;
132 dead = __refcount_dec_and_test(&bundle->ref, &r);
133 trace_rxrpc_bundle(id, r - 1, why);
135 rxrpc_free_bundle(bundle);
140 * Get rid of outstanding client connection preallocations when a local
141 * endpoint is destroyed.
143 void rxrpc_purge_client_connections(struct rxrpc_local *local)
145 rxrpc_destroy_client_conn_ids(local);
149 * Allocate a client connection.
151 static struct rxrpc_connection *
152 rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle)
154 struct rxrpc_connection *conn;
155 struct rxrpc_local *local = bundle->local;
156 struct rxrpc_net *rxnet = local->rxnet;
161 conn = rxrpc_alloc_connection(rxnet, GFP_ATOMIC | __GFP_NOWARN);
163 return ERR_PTR(-ENOMEM);
165 id = idr_alloc_cyclic(&local->conn_ids, conn, 1, 0x40000000,
166 GFP_ATOMIC | __GFP_NOWARN);
172 refcount_set(&conn->ref, 1);
173 conn->proto.cid = id << RXRPC_CIDSHIFT;
174 conn->proto.epoch = local->rxnet->epoch;
175 conn->out_clientflag = RXRPC_CLIENT_INITIATED;
176 conn->bundle = rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_conn);
177 conn->local = rxrpc_get_local(bundle->local, rxrpc_local_get_client_conn);
178 conn->peer = rxrpc_get_peer(bundle->peer, rxrpc_peer_get_client_conn);
179 conn->key = key_get(bundle->key);
180 conn->security = bundle->security;
181 conn->exclusive = bundle->exclusive;
182 conn->upgrade = bundle->upgrade;
183 conn->orig_service_id = bundle->service_id;
184 conn->security_level = bundle->security_level;
185 conn->state = RXRPC_CONN_CLIENT_UNSECURED;
186 conn->service_id = conn->orig_service_id;
188 if (conn->security == &rxrpc_no_security)
189 conn->state = RXRPC_CONN_CLIENT;
191 atomic_inc(&rxnet->nr_conns);
192 write_lock(&rxnet->conn_lock);
193 list_add_tail(&conn->proc_link, &rxnet->conn_proc_list);
194 write_unlock(&rxnet->conn_lock);
196 rxrpc_see_connection(conn, rxrpc_conn_new_client);
198 atomic_inc(&rxnet->nr_client_conns);
199 trace_rxrpc_client(conn, -1, rxrpc_client_alloc);
204 * Determine if a connection may be reused.
206 static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
208 struct rxrpc_net *rxnet;
209 int id_cursor, id, distance, limit;
215 if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags))
218 if ((conn->state != RXRPC_CONN_CLIENT_UNSECURED &&
219 conn->state != RXRPC_CONN_CLIENT) ||
220 conn->proto.epoch != rxnet->epoch)
221 goto mark_dont_reuse;
223 /* The IDR tree gets very expensive on memory if the connection IDs are
224 * widely scattered throughout the number space, so we shall want to
225 * kill off connections that, say, have an ID more than about four
226 * times the maximum number of client conns away from the current
227 * allocation point to try and keep the IDs concentrated.
229 id_cursor = idr_get_cursor(&conn->local->conn_ids);
230 id = conn->proto.cid >> RXRPC_CIDSHIFT;
231 distance = id - id_cursor;
233 distance = -distance;
234 limit = max_t(unsigned long, atomic_read(&rxnet->nr_conns) * 4, 1024);
235 if (distance > limit)
236 goto mark_dont_reuse;
241 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
247 * Look up the conn bundle that matches the connection parameters, adding it if
248 * it doesn't yet exist.
250 int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp)
252 struct rxrpc_bundle *bundle, *candidate;
253 struct rxrpc_local *local = call->local;
254 struct rb_node *p, **pp, *parent;
256 bool upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags);
258 _enter("{%px,%x,%u,%u}",
259 call->peer, key_serial(call->key), call->security_level,
262 if (test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags)) {
263 call->bundle = rxrpc_alloc_bundle(call, gfp);
264 return call->bundle ? 0 : -ENOMEM;
267 /* First, see if the bundle is already there. */
269 spin_lock(&local->client_bundles_lock);
270 p = local->client_bundles.rb_node;
272 bundle = rb_entry(p, struct rxrpc_bundle, local_node);
274 #define cmp(X, Y) ((long)(X) - (long)(Y))
275 diff = (cmp(bundle->peer, call->peer) ?:
276 cmp(bundle->key, call->key) ?:
277 cmp(bundle->security_level, call->security_level) ?:
278 cmp(bundle->upgrade, upgrade));
287 spin_unlock(&local->client_bundles_lock);
290 /* It wasn't. We need to add one. */
291 candidate = rxrpc_alloc_bundle(call, gfp);
296 spin_lock(&local->client_bundles_lock);
297 pp = &local->client_bundles.rb_node;
301 bundle = rb_entry(parent, struct rxrpc_bundle, local_node);
303 #define cmp(X, Y) ((long)(X) - (long)(Y))
304 diff = (cmp(bundle->peer, call->peer) ?:
305 cmp(bundle->key, call->key) ?:
306 cmp(bundle->security_level, call->security_level) ?:
307 cmp(bundle->upgrade, upgrade));
310 pp = &(*pp)->rb_left;
312 pp = &(*pp)->rb_right;
314 goto found_bundle_free;
317 _debug("new bundle");
318 rb_link_node(&candidate->local_node, parent, pp);
319 rb_insert_color(&candidate->local_node, &local->client_bundles);
320 call->bundle = rxrpc_get_bundle(candidate, rxrpc_bundle_get_client_call);
321 spin_unlock(&local->client_bundles_lock);
322 _leave(" = B=%u [new]", call->bundle->debug_id);
326 rxrpc_free_bundle(candidate);
328 call->bundle = rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_call);
329 rxrpc_activate_bundle(bundle);
330 spin_unlock(&local->client_bundles_lock);
331 _leave(" = B=%u [found]", call->bundle->debug_id);
336 * Allocate a new connection and add it into a bundle.
338 static bool rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle,
341 struct rxrpc_connection *conn, *old;
342 unsigned int shift = slot * RXRPC_MAXCALLS;
345 old = bundle->conns[slot];
347 bundle->conns[slot] = NULL;
348 bundle->conn_ids[slot] = 0;
349 trace_rxrpc_client(old, -1, rxrpc_client_replace);
350 rxrpc_put_connection(old, rxrpc_conn_put_noreuse);
353 conn = rxrpc_alloc_client_connection(bundle);
355 bundle->alloc_error = PTR_ERR(conn);
359 rxrpc_activate_bundle(bundle);
360 conn->bundle_shift = shift;
361 bundle->conns[slot] = conn;
362 bundle->conn_ids[slot] = conn->debug_id;
363 for (i = 0; i < RXRPC_MAXCALLS; i++)
364 set_bit(shift + i, &bundle->avail_chans);
369 * Add a connection to a bundle if there are no usable connections or we have
370 * connections waiting for extra capacity.
372 static bool rxrpc_bundle_has_space(struct rxrpc_bundle *bundle)
374 int slot = -1, i, usable;
378 bundle->alloc_error = 0;
380 /* See if there are any usable connections. */
382 for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) {
383 if (rxrpc_may_reuse_conn(bundle->conns[i]))
389 if (!usable && bundle->upgrade)
390 bundle->try_upgrade = true;
395 if (!bundle->avail_chans &&
396 !bundle->try_upgrade &&
397 usable < ARRAY_SIZE(bundle->conns))
404 return slot >= 0 ? rxrpc_add_conn_to_bundle(bundle, slot) : false;
408 * Assign a channel to the call at the front of the queue and wake the call up.
409 * We don't increment the callNumber counter until this number has been exposed
412 static void rxrpc_activate_one_channel(struct rxrpc_connection *conn,
413 unsigned int channel)
415 struct rxrpc_channel *chan = &conn->channels[channel];
416 struct rxrpc_bundle *bundle = conn->bundle;
417 struct rxrpc_call *call = list_entry(bundle->waiting_calls.next,
418 struct rxrpc_call, wait_link);
419 u32 call_id = chan->call_counter + 1;
421 _enter("C=%x,%u", conn->debug_id, channel);
423 list_del_init(&call->wait_link);
425 trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate);
427 /* Cancel the final ACK on the previous call if it hasn't been sent yet
428 * as the DATA packet will implicitly ACK it.
430 clear_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
431 clear_bit(conn->bundle_shift + channel, &bundle->avail_chans);
433 rxrpc_see_call(call, rxrpc_call_see_activate_client);
434 call->conn = rxrpc_get_connection(conn, rxrpc_conn_get_activate_call);
435 call->cid = conn->proto.cid | channel;
436 call->call_id = call_id;
437 call->dest_srx.srx_service = conn->service_id;
438 call->cong_ssthresh = call->peer->cong_ssthresh;
439 if (call->cong_cwnd >= call->cong_ssthresh)
440 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
442 call->cong_mode = RXRPC_CALL_SLOW_START;
444 chan->call_id = call_id;
445 chan->call_debug_id = call->debug_id;
448 rxrpc_see_call(call, rxrpc_call_see_connected);
449 trace_rxrpc_connect_call(call);
450 call->tx_last_sent = ktime_get_real();
451 rxrpc_start_call_timer(call);
452 rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_SEND_REQUEST);
453 wake_up(&call->waitq);
457 * Remove a connection from the idle list if it's on it.
459 static void rxrpc_unidle_conn(struct rxrpc_connection *conn)
461 if (!list_empty(&conn->cache_link)) {
462 list_del_init(&conn->cache_link);
463 rxrpc_put_connection(conn, rxrpc_conn_put_unidle);
468 * Assign channels and callNumbers to waiting calls.
470 static void rxrpc_activate_channels(struct rxrpc_bundle *bundle)
472 struct rxrpc_connection *conn;
473 unsigned long avail, mask;
474 unsigned int channel, slot;
476 trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans);
478 if (bundle->try_upgrade)
483 while (!list_empty(&bundle->waiting_calls)) {
484 avail = bundle->avail_chans & mask;
487 channel = __ffs(avail);
488 clear_bit(channel, &bundle->avail_chans);
490 slot = channel / RXRPC_MAXCALLS;
491 conn = bundle->conns[slot];
495 if (bundle->try_upgrade)
496 set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags);
497 rxrpc_unidle_conn(conn);
499 channel &= (RXRPC_MAXCALLS - 1);
500 conn->act_chans |= 1 << channel;
501 rxrpc_activate_one_channel(conn, channel);
506 * Connect waiting channels (called from the I/O thread).
508 void rxrpc_connect_client_calls(struct rxrpc_local *local)
510 struct rxrpc_call *call;
512 while ((call = list_first_entry_or_null(&local->new_client_calls,
513 struct rxrpc_call, wait_link))
515 struct rxrpc_bundle *bundle = call->bundle;
517 spin_lock(&local->client_call_lock);
518 list_move_tail(&call->wait_link, &bundle->waiting_calls);
519 spin_unlock(&local->client_call_lock);
521 if (rxrpc_bundle_has_space(bundle))
522 rxrpc_activate_channels(bundle);
527 * Note that a call, and thus a connection, is about to be exposed to the
530 void rxrpc_expose_client_call(struct rxrpc_call *call)
532 unsigned int channel = call->cid & RXRPC_CHANNELMASK;
533 struct rxrpc_connection *conn = call->conn;
534 struct rxrpc_channel *chan = &conn->channels[channel];
536 if (!test_and_set_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
537 /* Mark the call ID as being used. If the callNumber counter
538 * exceeds ~2 billion, we kill the connection after its
539 * outstanding calls have finished so that the counter doesn't
542 chan->call_counter++;
543 if (chan->call_counter >= INT_MAX)
544 set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags);
545 trace_rxrpc_client(conn, channel, rxrpc_client_exposed);
547 spin_lock(&call->peer->lock);
548 hlist_add_head(&call->error_link, &call->peer->error_targets);
549 spin_unlock(&call->peer->lock);
554 * Set the reap timer.
556 static void rxrpc_set_client_reap_timer(struct rxrpc_local *local)
558 if (!local->kill_all_client_conns) {
559 unsigned long now = jiffies;
560 unsigned long reap_at = now + rxrpc_conn_idle_client_expiry;
562 if (local->rxnet->live)
563 timer_reduce(&local->client_conn_reap_timer, reap_at);
568 * Disconnect a client call.
570 void rxrpc_disconnect_client_call(struct rxrpc_bundle *bundle, struct rxrpc_call *call)
572 struct rxrpc_connection *conn;
573 struct rxrpc_channel *chan = NULL;
574 struct rxrpc_local *local = bundle->local;
575 unsigned int channel;
579 _enter("c=%x", call->debug_id);
581 /* Calls that have never actually been assigned a channel can simply be
586 _debug("call is waiting");
587 ASSERTCMP(call->call_id, ==, 0);
588 ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags));
589 list_del_init(&call->wait_link);
594 channel = cid & RXRPC_CHANNELMASK;
595 chan = &conn->channels[channel];
596 trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect);
598 if (WARN_ON(chan->call != call))
601 may_reuse = rxrpc_may_reuse_conn(conn);
603 /* If a client call was exposed to the world, we save the result for
606 * We use a barrier here so that the call number and abort code can be
607 * read without needing to take a lock.
609 * TODO: Make the incoming packet handler check this and handle
610 * terminal retransmission without requiring access to the call.
612 if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
613 _debug("exposed %u,%u", call->call_id, call->abort_code);
614 __rxrpc_disconnect_call(conn, call);
616 if (test_and_clear_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) {
617 trace_rxrpc_client(conn, channel, rxrpc_client_to_active);
618 bundle->try_upgrade = false;
620 rxrpc_activate_channels(bundle);
624 /* See if we can pass the channel directly to another call. */
625 if (may_reuse && !list_empty(&bundle->waiting_calls)) {
626 trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass);
627 rxrpc_activate_one_channel(conn, channel);
631 /* Schedule the final ACK to be transmitted in a short while so that it
632 * can be skipped if we find a follow-on call. The first DATA packet
633 * of the follow on call will implicitly ACK this call.
635 if (call->completion == RXRPC_CALL_SUCCEEDED &&
636 test_bit(RXRPC_CALL_EXPOSED, &call->flags)) {
637 unsigned long final_ack_at = jiffies + 2;
639 WRITE_ONCE(chan->final_ack_at, final_ack_at);
640 smp_wmb(); /* vs rxrpc_process_delayed_final_acks() */
641 set_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags);
642 rxrpc_reduce_conn_timer(conn, final_ack_at);
645 /* Deactivate the channel. */
647 set_bit(conn->bundle_shift + channel, &conn->bundle->avail_chans);
648 conn->act_chans &= ~(1 << channel);
650 /* If no channels remain active, then put the connection on the idle
651 * list for a short while. Give it a ref to stop it going away if it
654 if (!conn->act_chans) {
655 trace_rxrpc_client(conn, channel, rxrpc_client_to_idle);
656 conn->idle_timestamp = jiffies;
658 rxrpc_get_connection(conn, rxrpc_conn_get_idle);
659 list_move_tail(&conn->cache_link, &local->idle_client_conns);
661 rxrpc_set_client_reap_timer(local);
666 * Remove a connection from a bundle.
668 static void rxrpc_unbundle_conn(struct rxrpc_connection *conn)
670 struct rxrpc_bundle *bundle = conn->bundle;
674 _enter("C=%x", conn->debug_id);
676 if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK)
677 rxrpc_process_delayed_final_acks(conn, true);
679 bindex = conn->bundle_shift / RXRPC_MAXCALLS;
680 if (bundle->conns[bindex] == conn) {
681 _debug("clear slot %u", bindex);
682 bundle->conns[bindex] = NULL;
683 bundle->conn_ids[bindex] = 0;
684 for (i = 0; i < RXRPC_MAXCALLS; i++)
685 clear_bit(conn->bundle_shift + i, &bundle->avail_chans);
686 rxrpc_put_client_connection_id(bundle->local, conn);
687 rxrpc_deactivate_bundle(bundle);
688 rxrpc_put_connection(conn, rxrpc_conn_put_unbundle);
693 * Drop the active count on a bundle.
695 void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle)
697 struct rxrpc_local *local;
698 bool need_put = false;
703 local = bundle->local;
704 if (atomic_dec_and_lock(&bundle->active, &local->client_bundles_lock)) {
705 if (!bundle->exclusive) {
706 _debug("erase bundle");
707 rb_erase(&bundle->local_node, &local->client_bundles);
711 spin_unlock(&local->client_bundles_lock);
713 rxrpc_put_bundle(bundle, rxrpc_bundle_put_discard);
718 * Clean up a dead client connection.
720 void rxrpc_kill_client_conn(struct rxrpc_connection *conn)
722 struct rxrpc_local *local = conn->local;
723 struct rxrpc_net *rxnet = local->rxnet;
725 _enter("C=%x", conn->debug_id);
727 trace_rxrpc_client(conn, -1, rxrpc_client_cleanup);
728 atomic_dec(&rxnet->nr_client_conns);
730 rxrpc_put_client_connection_id(local, conn);
734 * Discard expired client connections from the idle list. Each conn in the
735 * idle list has been exposed and holds an extra ref because of that.
737 * This may be called from conn setup or from a work item so cannot be
738 * considered non-reentrant.
740 void rxrpc_discard_expired_client_conns(struct rxrpc_local *local)
742 struct rxrpc_connection *conn;
743 unsigned long expiry, conn_expires_at, now;
744 unsigned int nr_conns;
748 /* We keep an estimate of what the number of conns ought to be after
749 * we've discarded some so that we don't overdo the discarding.
751 nr_conns = atomic_read(&local->rxnet->nr_client_conns);
754 conn = list_first_entry_or_null(&local->idle_client_conns,
755 struct rxrpc_connection, cache_link);
759 if (!local->kill_all_client_conns) {
760 /* If the number of connections is over the reap limit, we
761 * expedite discard by reducing the expiry timeout. We must,
762 * however, have at least a short grace period to be able to do
763 * final-ACK or ABORT retransmission.
765 expiry = rxrpc_conn_idle_client_expiry;
766 if (nr_conns > rxrpc_reap_client_connections)
767 expiry = rxrpc_conn_idle_client_fast_expiry;
768 if (conn->local->service_closed)
769 expiry = rxrpc_closed_conn_expiry * HZ;
771 conn_expires_at = conn->idle_timestamp + expiry;
773 now = READ_ONCE(jiffies);
774 if (time_after(conn_expires_at, now))
775 goto not_yet_expired;
778 atomic_dec(&conn->active);
779 trace_rxrpc_client(conn, -1, rxrpc_client_discard);
780 list_del_init(&conn->cache_link);
782 rxrpc_unbundle_conn(conn);
783 /* Drop the ->cache_link ref */
784 rxrpc_put_connection(conn, rxrpc_conn_put_discard_idle);
790 /* The connection at the front of the queue hasn't yet expired, so
791 * schedule the work item for that point if we discarded something.
793 * We don't worry if the work item is already scheduled - it can look
794 * after rescheduling itself at a later time. We could cancel it, but
795 * then things get messier.
798 if (!local->kill_all_client_conns)
799 timer_reduce(&local->client_conn_reap_timer, conn_expires_at);
805 * Clean up the client connections on a local endpoint.
807 void rxrpc_clean_up_local_conns(struct rxrpc_local *local)
809 struct rxrpc_connection *conn;
813 local->kill_all_client_conns = true;
815 del_timer_sync(&local->client_conn_reap_timer);
817 while ((conn = list_first_entry_or_null(&local->idle_client_conns,
818 struct rxrpc_connection, cache_link))) {
819 list_del_init(&conn->cache_link);
820 atomic_dec(&conn->active);
821 trace_rxrpc_client(conn, -1, rxrpc_client_discard);
822 rxrpc_unbundle_conn(conn);
823 rxrpc_put_connection(conn, rxrpc_conn_put_local_dead);