1 /* RxRPC packet reception
3 * Copyright (C) 2007, 2016 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/net.h>
16 #include <linux/skbuff.h>
17 #include <linux/errqueue.h>
18 #include <linux/udp.h>
20 #include <linux/in6.h>
21 #include <linux/icmp.h>
22 #include <linux/gfp.h>
24 #include <net/af_rxrpc.h>
27 #include <net/net_namespace.h>
28 #include "ar-internal.h"
30 static void rxrpc_proto_abort(const char *why,
31 struct rxrpc_call *call, rxrpc_seq_t seq)
33 if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, -EBADMSG)) {
34 set_bit(RXRPC_CALL_EV_ABORT, &call->events);
35 rxrpc_queue_call(call);
40 * Do TCP-style congestion management [RFC 5681].
42 static void rxrpc_congestion_management(struct rxrpc_call *call,
44 struct rxrpc_ack_summary *summary,
45 rxrpc_serial_t acked_serial)
47 enum rxrpc_congest_change change = rxrpc_cong_no_change;
48 unsigned int cumulative_acks = call->cong_cumul_acks;
49 unsigned int cwnd = call->cong_cwnd;
52 summary->flight_size =
53 (call->tx_top - call->tx_hard_ack) - summary->nr_acks;
55 if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) {
56 summary->retrans_timeo = true;
57 call->cong_ssthresh = max_t(unsigned int,
58 summary->flight_size / 2, 2);
60 if (cwnd >= call->cong_ssthresh &&
61 call->cong_mode == RXRPC_CALL_SLOW_START) {
62 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
63 call->cong_tstamp = skb->tstamp;
68 cumulative_acks += summary->nr_new_acks;
69 cumulative_acks += summary->nr_rot_new_acks;
70 if (cumulative_acks > 255)
71 cumulative_acks = 255;
73 summary->mode = call->cong_mode;
74 summary->cwnd = call->cong_cwnd;
75 summary->ssthresh = call->cong_ssthresh;
76 summary->cumulative_acks = cumulative_acks;
77 summary->dup_acks = call->cong_dup_acks;
79 switch (call->cong_mode) {
80 case RXRPC_CALL_SLOW_START:
81 if (summary->nr_nacks > 0)
82 goto packet_loss_detected;
83 if (summary->cumulative_acks > 0)
85 if (cwnd >= call->cong_ssthresh) {
86 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
87 call->cong_tstamp = skb->tstamp;
91 case RXRPC_CALL_CONGEST_AVOIDANCE:
92 if (summary->nr_nacks > 0)
93 goto packet_loss_detected;
95 /* We analyse the number of packets that get ACK'd per RTT
96 * period and increase the window if we managed to fill it.
98 if (call->peer->rtt_usage == 0)
100 if (ktime_before(skb->tstamp,
101 ktime_add_ns(call->cong_tstamp,
103 goto out_no_clear_ca;
104 change = rxrpc_cong_rtt_window_end;
105 call->cong_tstamp = skb->tstamp;
106 if (cumulative_acks >= cwnd)
110 case RXRPC_CALL_PACKET_LOSS:
111 if (summary->nr_nacks == 0)
112 goto resume_normality;
114 if (summary->new_low_nack) {
115 change = rxrpc_cong_new_low_nack;
116 call->cong_dup_acks = 1;
117 if (call->cong_extra > 1)
118 call->cong_extra = 1;
119 goto send_extra_data;
122 call->cong_dup_acks++;
123 if (call->cong_dup_acks < 3)
124 goto send_extra_data;
126 change = rxrpc_cong_begin_retransmission;
127 call->cong_mode = RXRPC_CALL_FAST_RETRANSMIT;
128 call->cong_ssthresh = max_t(unsigned int,
129 summary->flight_size / 2, 2);
130 cwnd = call->cong_ssthresh + 3;
131 call->cong_extra = 0;
132 call->cong_dup_acks = 0;
136 case RXRPC_CALL_FAST_RETRANSMIT:
137 if (!summary->new_low_nack) {
138 if (summary->nr_new_acks == 0)
140 call->cong_dup_acks++;
141 if (call->cong_dup_acks == 2) {
142 change = rxrpc_cong_retransmit_again;
143 call->cong_dup_acks = 0;
147 change = rxrpc_cong_progress;
148 cwnd = call->cong_ssthresh;
149 if (summary->nr_nacks == 0)
150 goto resume_normality;
160 change = rxrpc_cong_cleared_nacks;
161 call->cong_dup_acks = 0;
162 call->cong_extra = 0;
163 call->cong_tstamp = skb->tstamp;
164 if (cwnd < call->cong_ssthresh)
165 call->cong_mode = RXRPC_CALL_SLOW_START;
167 call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
171 if (cwnd >= RXRPC_RXTX_BUFF_SIZE - 1)
172 cwnd = RXRPC_RXTX_BUFF_SIZE - 1;
173 call->cong_cwnd = cwnd;
174 call->cong_cumul_acks = cumulative_acks;
175 trace_rxrpc_congest(call, summary, acked_serial, change);
176 if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
177 rxrpc_queue_call(call);
180 packet_loss_detected:
181 change = rxrpc_cong_saw_nack;
182 call->cong_mode = RXRPC_CALL_PACKET_LOSS;
183 call->cong_dup_acks = 0;
184 goto send_extra_data;
187 /* Send some previously unsent DATA if we have some to advance the ACK
190 if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
191 RXRPC_TX_ANNO_LAST ||
192 summary->nr_acks != call->tx_top - call->tx_hard_ack) {
194 wake_up(&call->waitq);
196 goto out_no_clear_ca;
200 * Ping the other end to fill our RTT cache and to retrieve the rwind
201 * and MTU parameters.
203 static void rxrpc_send_ping(struct rxrpc_call *call, struct sk_buff *skb,
206 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
207 ktime_t now = skb->tstamp;
209 if (call->peer->rtt_usage < 3 ||
210 ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
211 rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial,
213 rxrpc_propose_ack_ping_for_params);
217 * Apply a hard ACK by advancing the Tx window.
219 static bool rxrpc_rotate_tx_window(struct rxrpc_call *call, rxrpc_seq_t to,
220 struct rxrpc_ack_summary *summary)
222 struct sk_buff *skb, *list = NULL;
223 bool rot_last = false;
227 if (call->acks_lowest_nak == call->tx_hard_ack) {
228 call->acks_lowest_nak = to;
229 } else if (before_eq(call->acks_lowest_nak, to)) {
230 summary->new_low_nack = true;
231 call->acks_lowest_nak = to;
234 spin_lock(&call->lock);
236 while (before(call->tx_hard_ack, to)) {
238 ix = call->tx_hard_ack & RXRPC_RXTX_BUFF_MASK;
239 skb = call->rxtx_buffer[ix];
240 annotation = call->rxtx_annotations[ix];
241 rxrpc_see_skb(skb, rxrpc_skb_tx_rotated);
242 call->rxtx_buffer[ix] = NULL;
243 call->rxtx_annotations[ix] = 0;
247 if (annotation & RXRPC_TX_ANNO_LAST) {
248 set_bit(RXRPC_CALL_TX_LAST, &call->flags);
251 if ((annotation & RXRPC_TX_ANNO_MASK) != RXRPC_TX_ANNO_ACK)
252 summary->nr_rot_new_acks++;
255 spin_unlock(&call->lock);
257 trace_rxrpc_transmit(call, (rot_last ?
258 rxrpc_transmit_rotate_last :
259 rxrpc_transmit_rotate));
260 wake_up(&call->waitq);
266 rxrpc_free_skb(skb, rxrpc_skb_tx_freed);
273 * End the transmission phase of a call.
275 * This occurs when we get an ACKALL packet, the first DATA packet of a reply,
276 * or a final ACK packet.
278 static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun,
279 const char *abort_why)
283 ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags));
285 write_lock(&call->state_lock);
289 case RXRPC_CALL_CLIENT_SEND_REQUEST:
290 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
292 call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY;
294 call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY;
297 case RXRPC_CALL_SERVER_AWAIT_ACK:
298 __rxrpc_call_completed(call);
299 rxrpc_notify_socket(call);
307 write_unlock(&call->state_lock);
308 if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY)
309 trace_rxrpc_transmit(call, rxrpc_transmit_await_reply);
311 trace_rxrpc_transmit(call, rxrpc_transmit_end);
316 write_unlock(&call->state_lock);
317 kdebug("end_tx %s", rxrpc_call_states[call->state]);
318 rxrpc_proto_abort(abort_why, call, call->tx_top);
323 * Begin the reply reception phase of a call.
325 static bool rxrpc_receiving_reply(struct rxrpc_call *call)
327 struct rxrpc_ack_summary summary = { 0 };
328 unsigned long now, timo;
329 rxrpc_seq_t top = READ_ONCE(call->tx_top);
331 if (call->ackr_reason) {
332 spin_lock_bh(&call->lock);
333 call->ackr_reason = 0;
334 spin_unlock_bh(&call->lock);
336 timo = now + MAX_JIFFY_OFFSET;
337 WRITE_ONCE(call->resend_at, timo);
338 WRITE_ONCE(call->ack_at, timo);
339 trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now);
342 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) {
343 if (!rxrpc_rotate_tx_window(call, top, &summary)) {
344 rxrpc_proto_abort("TXL", call, top);
348 if (!rxrpc_end_tx_phase(call, true, "ETD"))
350 call->tx_phase = false;
355 * Scan a jumbo packet to validate its structure and to work out how many
356 * subpackets it contains.
358 * A jumbo packet is a collection of consecutive packets glued together with
359 * little headers between that indicate how to change the initial header for
362 * RXRPC_JUMBO_PACKET must be set on all but the last subpacket - and all but
363 * the last are RXRPC_JUMBO_DATALEN in size. The last subpacket may be of any
366 static bool rxrpc_validate_jumbo(struct sk_buff *skb)
368 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
369 unsigned int offset = sizeof(struct rxrpc_wire_header);
370 unsigned int len = skb->len;
372 u8 flags = sp->hdr.flags;
376 if (len - offset < RXRPC_JUMBO_SUBPKTLEN)
378 if (flags & RXRPC_LAST_PACKET)
380 offset += RXRPC_JUMBO_DATALEN;
381 if (skb_copy_bits(skb, offset, &flags, 1) < 0)
383 offset += sizeof(struct rxrpc_jumbo_header);
384 } while (flags & RXRPC_JUMBO_PACKET);
386 sp->nr_jumbo = nr_jumbo;
394 * Handle reception of a duplicate packet.
396 * We have to take care to avoid an attack here whereby we're given a series of
397 * jumbograms, each with a sequence number one before the preceding one and
398 * filled up to maximum UDP size. If they never send us the first packet in
399 * the sequence, they can cause us to have to hold on to around 2MiB of kernel
400 * space until the call times out.
402 * We limit the space usage by only accepting three duplicate jumbo packets per
403 * call. After that, we tell the other side we're no longer accepting jumbos
404 * (that information is encoded in the ACK packet).
406 static void rxrpc_input_dup_data(struct rxrpc_call *call, rxrpc_seq_t seq,
407 u8 annotation, bool *_jumbo_bad)
409 /* Discard normal packets that are duplicates. */
413 /* Skip jumbo subpackets that are duplicates. When we've had three or
414 * more partially duplicate jumbo packets, we refuse to take any more
415 * jumbos for this call.
418 call->nr_jumbo_bad++;
424 * Process a DATA packet, adding the packet to the Rx ring.
426 static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
429 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
430 enum rxrpc_call_state state;
431 unsigned int offset = sizeof(struct rxrpc_wire_header);
433 rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
434 rxrpc_seq_t seq = sp->hdr.seq, hard_ack;
435 bool immediate_ack = false, jumbo_bad = false, queued;
437 u8 ack = 0, flags, annotation = 0;
439 _enter("{%u,%u},{%u,%u}",
440 call->rx_hard_ack, call->rx_top, skb->len, seq);
442 _proto("Rx DATA %%%u { #%u f=%02x }",
443 sp->hdr.serial, seq, sp->hdr.flags);
445 state = READ_ONCE(call->state);
446 if (state >= RXRPC_CALL_COMPLETE)
449 if (state == RXRPC_CALL_SERVER_RECV_REQUEST) {
450 unsigned long timo = READ_ONCE(call->next_req_timo);
451 unsigned long now, expect_req_by;
455 expect_req_by = now + timo;
456 WRITE_ONCE(call->expect_req_by, expect_req_by);
457 rxrpc_reduce_call_timer(call, expect_req_by, now,
458 rxrpc_timer_set_for_idle);
462 spin_lock(&call->input_lock);
464 /* Received data implicitly ACKs all of the request packets we sent
465 * when we're acting as a client.
467 if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST ||
468 state == RXRPC_CALL_CLIENT_AWAIT_REPLY) &&
469 !rxrpc_receiving_reply(call))
472 call->ackr_prev_seq = seq;
474 hard_ack = READ_ONCE(call->rx_hard_ack);
475 if (after(seq, hard_ack + call->rx_winsize)) {
476 ack = RXRPC_ACK_EXCEEDS_WINDOW;
481 flags = sp->hdr.flags;
482 if (flags & RXRPC_JUMBO_PACKET) {
483 if (call->nr_jumbo_bad > 3) {
484 ack = RXRPC_ACK_NOSPACE;
493 ix = seq & RXRPC_RXTX_BUFF_MASK;
495 if (flags & RXRPC_JUMBO_PACKET)
496 len = RXRPC_JUMBO_DATALEN;
498 if (flags & RXRPC_LAST_PACKET) {
499 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
500 seq != call->rx_top) {
501 rxrpc_proto_abort("LSN", call, seq);
505 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) &&
506 after_eq(seq, call->rx_top)) {
507 rxrpc_proto_abort("LSA", call, seq);
512 trace_rxrpc_rx_data(call->debug_id, seq, serial, flags, annotation);
513 if (before_eq(seq, hard_ack)) {
514 ack = RXRPC_ACK_DUPLICATE;
519 if (flags & RXRPC_REQUEST_ACK && !ack) {
520 ack = RXRPC_ACK_REQUESTED;
524 if (call->rxtx_buffer[ix]) {
525 rxrpc_input_dup_data(call, seq, annotation, &jumbo_bad);
526 if (ack != RXRPC_ACK_DUPLICATE) {
527 ack = RXRPC_ACK_DUPLICATE;
530 immediate_ack = true;
534 /* Queue the packet. We use a couple of memory barriers here as need
535 * to make sure that rx_top is perceived to be set after the buffer
536 * pointer and that the buffer pointer is set after the annotation and
539 * Barriers against rxrpc_recvmsg_data() and rxrpc_rotate_rx_window()
540 * and also rxrpc_fill_out_ack().
542 rxrpc_get_skb(skb, rxrpc_skb_rx_got);
543 call->rxtx_annotations[ix] = annotation;
545 call->rxtx_buffer[ix] = skb;
546 if (after(seq, call->rx_top)) {
547 smp_store_release(&call->rx_top, seq);
548 } else if (before(seq, call->rx_top)) {
549 /* Send an immediate ACK if we fill in a hole */
551 ack = RXRPC_ACK_DELAY;
554 immediate_ack = true;
556 if (flags & RXRPC_LAST_PACKET) {
557 set_bit(RXRPC_CALL_RX_LAST, &call->flags);
558 trace_rxrpc_receive(call, rxrpc_receive_queue_last, serial, seq);
560 trace_rxrpc_receive(call, rxrpc_receive_queue, serial, seq);
564 if (after_eq(seq, call->rx_expect_next)) {
565 if (after(seq, call->rx_expect_next)) {
566 _net("OOS %u > %u", seq, call->rx_expect_next);
567 ack = RXRPC_ACK_OUT_OF_SEQUENCE;
570 call->rx_expect_next = seq + 1;
575 if (flags & RXRPC_JUMBO_PACKET) {
576 if (skb_copy_bits(skb, offset, &flags, 1) < 0) {
577 rxrpc_proto_abort("XJF", call, seq);
580 offset += sizeof(struct rxrpc_jumbo_header);
584 if (flags & RXRPC_JUMBO_PACKET)
585 annotation |= RXRPC_RX_ANNO_JLAST;
586 if (after(seq, hard_ack + call->rx_winsize)) {
587 ack = RXRPC_ACK_EXCEEDS_WINDOW;
590 call->nr_jumbo_bad++;
596 _proto("Rx DATA Jumbo %%%u", serial);
600 if (queued && flags & RXRPC_LAST_PACKET && !ack) {
601 ack = RXRPC_ACK_DELAY;
607 rxrpc_propose_ACK(call, ack, skew, ack_serial,
609 rxrpc_propose_ack_input_data);
611 rxrpc_propose_ACK(call, RXRPC_ACK_DELAY, skew, serial,
613 rxrpc_propose_ack_input_data);
615 trace_rxrpc_notify_socket(call->debug_id, serial);
616 rxrpc_notify_socket(call);
619 spin_unlock(&call->input_lock);
624 * Process a requested ACK.
626 static void rxrpc_input_requested_ack(struct rxrpc_call *call,
628 rxrpc_serial_t orig_serial,
629 rxrpc_serial_t ack_serial)
631 struct rxrpc_skb_priv *sp;
636 for (ix = 0; ix < RXRPC_RXTX_BUFF_SIZE; ix++) {
637 skb = call->rxtx_buffer[ix];
641 sent_at = skb->tstamp;
642 smp_rmb(); /* Read timestamp before serial. */
644 if (sp->hdr.serial != orig_serial)
652 rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_requested_ack,
653 orig_serial, ack_serial, sent_at, resp_time);
657 * Process the response to a ping that we sent to find out if we lost an ACK.
659 * If we got back a ping response that indicates a lower tx_top than what we
660 * had at the time of the ping transmission, we adjudge all the DATA packets
661 * sent between the response tx_top and the ping-time tx_top to have been lost.
663 static void rxrpc_input_check_for_lost_ack(struct rxrpc_call *call)
665 rxrpc_seq_t top, bottom, seq;
668 spin_lock_bh(&call->lock);
670 bottom = call->tx_hard_ack + 1;
671 top = call->acks_lost_top;
672 if (before(bottom, top)) {
673 for (seq = bottom; before_eq(seq, top); seq++) {
674 int ix = seq & RXRPC_RXTX_BUFF_MASK;
675 u8 annotation = call->rxtx_annotations[ix];
676 u8 anno_type = annotation & RXRPC_TX_ANNO_MASK;
678 if (anno_type != RXRPC_TX_ANNO_UNACK)
680 annotation &= ~RXRPC_TX_ANNO_MASK;
681 annotation |= RXRPC_TX_ANNO_RETRANS;
682 call->rxtx_annotations[ix] = annotation;
687 spin_unlock_bh(&call->lock);
689 if (resend && !test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
690 rxrpc_queue_call(call);
694 * Process a ping response.
696 static void rxrpc_input_ping_response(struct rxrpc_call *call,
698 rxrpc_serial_t orig_serial,
699 rxrpc_serial_t ack_serial)
701 rxrpc_serial_t ping_serial;
704 ping_time = call->ping_time;
706 ping_serial = READ_ONCE(call->ping_serial);
708 if (orig_serial == call->acks_lost_ping)
709 rxrpc_input_check_for_lost_ack(call);
711 if (before(orig_serial, ping_serial) ||
712 !test_and_clear_bit(RXRPC_CALL_PINGING, &call->flags))
714 if (after(orig_serial, ping_serial))
717 rxrpc_peer_add_rtt(call, rxrpc_rtt_rx_ping_response,
718 orig_serial, ack_serial, ping_time, resp_time);
722 * Process the extra information that may be appended to an ACK packet
724 static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
725 struct rxrpc_ackinfo *ackinfo)
727 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
728 struct rxrpc_peer *peer;
731 u32 rwind = ntohl(ackinfo->rwind);
733 _proto("Rx ACK %%%u Info { rx=%u max=%u rwin=%u jm=%u }",
735 ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU),
736 rwind, ntohl(ackinfo->jumbo_max));
738 if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
739 rwind = RXRPC_RXTX_BUFF_SIZE - 1;
740 if (call->tx_winsize != rwind) {
741 if (rwind > call->tx_winsize)
743 trace_rxrpc_rx_rwind_change(call, sp->hdr.serial, rwind, wake);
744 call->tx_winsize = rwind;
747 if (call->cong_ssthresh > rwind)
748 call->cong_ssthresh = rwind;
750 mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU));
753 if (mtu < peer->maxdata) {
754 spin_lock_bh(&peer->lock);
756 peer->mtu = mtu + peer->hdrsize;
757 spin_unlock_bh(&peer->lock);
758 _net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
762 wake_up(&call->waitq);
766 * Process individual soft ACKs.
768 * Each ACK in the array corresponds to one packet and can be either an ACK or
769 * a NAK. If we get find an explicitly NAK'd packet we resend immediately;
770 * packets that lie beyond the end of the ACK list are scheduled for resend by
771 * the timer on the basis that the peer might just not have processed them at
772 * the time the ACK was sent.
774 static void rxrpc_input_soft_acks(struct rxrpc_call *call, u8 *acks,
775 rxrpc_seq_t seq, int nr_acks,
776 struct rxrpc_ack_summary *summary)
779 u8 annotation, anno_type;
781 for (; nr_acks > 0; nr_acks--, seq++) {
782 ix = seq & RXRPC_RXTX_BUFF_MASK;
783 annotation = call->rxtx_annotations[ix];
784 anno_type = annotation & RXRPC_TX_ANNO_MASK;
785 annotation &= ~RXRPC_TX_ANNO_MASK;
787 case RXRPC_ACK_TYPE_ACK:
789 if (anno_type == RXRPC_TX_ANNO_ACK)
791 summary->nr_new_acks++;
792 call->rxtx_annotations[ix] =
793 RXRPC_TX_ANNO_ACK | annotation;
795 case RXRPC_ACK_TYPE_NACK:
796 if (!summary->nr_nacks &&
797 call->acks_lowest_nak != seq) {
798 call->acks_lowest_nak = seq;
799 summary->new_low_nack = true;
802 if (anno_type == RXRPC_TX_ANNO_NAK)
804 summary->nr_new_nacks++;
805 if (anno_type == RXRPC_TX_ANNO_RETRANS)
807 call->rxtx_annotations[ix] =
808 RXRPC_TX_ANNO_NAK | annotation;
811 return rxrpc_proto_abort("SFT", call, 0);
817 * Return true if the ACK is valid - ie. it doesn't appear to have regressed
818 * with respect to the ack state conveyed by preceding ACKs.
820 static bool rxrpc_is_ack_valid(struct rxrpc_call *call,
821 rxrpc_seq_t first_pkt, rxrpc_seq_t prev_pkt)
823 rxrpc_seq_t base = READ_ONCE(call->ackr_first_seq);
825 if (after(first_pkt, base))
826 return true; /* The window advanced */
828 if (before(first_pkt, base))
829 return false; /* firstPacket regressed */
831 if (after_eq(prev_pkt, call->ackr_prev_seq))
832 return true; /* previousPacket hasn't regressed. */
834 /* Some rx implementations put a serial number in previousPacket. */
835 if (after_eq(prev_pkt, base + call->tx_winsize))
841 * Process an ACK packet.
843 * ack.firstPacket is the sequence number of the first soft-ACK'd/NAK'd packet
844 * in the ACK array. Anything before that is hard-ACK'd and may be discarded.
846 * A hard-ACK means that a packet has been processed and may be discarded; a
847 * soft-ACK means that the packet may be discarded and retransmission
848 * requested. A phase is complete when all packets are hard-ACK'd.
850 static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
853 struct rxrpc_ack_summary summary = { 0 };
854 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
856 struct rxrpc_ackpacket ack;
857 struct rxrpc_ackinfo info;
858 u8 acks[RXRPC_MAXACKS];
860 rxrpc_serial_t acked_serial;
861 rxrpc_seq_t first_soft_ack, hard_ack, prev_pkt;
862 int nr_acks, offset, ioffset;
866 offset = sizeof(struct rxrpc_wire_header);
867 if (skb_copy_bits(skb, offset, &buf.ack, sizeof(buf.ack)) < 0) {
868 _debug("extraction failure");
869 return rxrpc_proto_abort("XAK", call, 0);
871 offset += sizeof(buf.ack);
873 acked_serial = ntohl(buf.ack.serial);
874 first_soft_ack = ntohl(buf.ack.firstPacket);
875 prev_pkt = ntohl(buf.ack.previousPacket);
876 hard_ack = first_soft_ack - 1;
877 nr_acks = buf.ack.nAcks;
878 summary.ack_reason = (buf.ack.reason < RXRPC_ACK__INVALID ?
879 buf.ack.reason : RXRPC_ACK__INVALID);
881 trace_rxrpc_rx_ack(call, sp->hdr.serial, acked_serial,
882 first_soft_ack, prev_pkt,
883 summary.ack_reason, nr_acks);
885 if (buf.ack.reason == RXRPC_ACK_PING_RESPONSE)
886 rxrpc_input_ping_response(call, skb->tstamp, acked_serial,
888 if (buf.ack.reason == RXRPC_ACK_REQUESTED)
889 rxrpc_input_requested_ack(call, skb->tstamp, acked_serial,
892 if (buf.ack.reason == RXRPC_ACK_PING) {
893 _proto("Rx ACK %%%u PING Request", sp->hdr.serial);
894 rxrpc_propose_ACK(call, RXRPC_ACK_PING_RESPONSE,
895 skew, sp->hdr.serial, true, true,
896 rxrpc_propose_ack_respond_to_ping);
897 } else if (sp->hdr.flags & RXRPC_REQUEST_ACK) {
898 rxrpc_propose_ACK(call, RXRPC_ACK_REQUESTED,
899 skew, sp->hdr.serial, true, true,
900 rxrpc_propose_ack_respond_to_ack);
903 /* Discard any out-of-order or duplicate ACKs (outside lock). */
904 if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
905 trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial,
906 first_soft_ack, call->ackr_first_seq,
907 prev_pkt, call->ackr_prev_seq);
912 ioffset = offset + nr_acks + 3;
913 if (skb->len >= ioffset + sizeof(buf.info) &&
914 skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
915 return rxrpc_proto_abort("XAI", call, 0);
917 spin_lock(&call->input_lock);
919 /* Discard any out-of-order or duplicate ACKs (inside lock). */
920 if (!rxrpc_is_ack_valid(call, first_soft_ack, prev_pkt)) {
921 trace_rxrpc_rx_discard_ack(call->debug_id, sp->hdr.serial,
922 first_soft_ack, call->ackr_first_seq,
923 prev_pkt, call->ackr_prev_seq);
926 call->acks_latest_ts = skb->tstamp;
927 call->acks_latest = sp->hdr.serial;
929 call->ackr_first_seq = first_soft_ack;
930 call->ackr_prev_seq = prev_pkt;
932 /* Parse rwind and mtu sizes if provided. */
934 rxrpc_input_ackinfo(call, skb, &buf.info);
936 if (first_soft_ack == 0) {
937 rxrpc_proto_abort("AK0", call, 0);
941 /* Ignore ACKs unless we are or have just been transmitting. */
942 switch (READ_ONCE(call->state)) {
943 case RXRPC_CALL_CLIENT_SEND_REQUEST:
944 case RXRPC_CALL_CLIENT_AWAIT_REPLY:
945 case RXRPC_CALL_SERVER_SEND_REPLY:
946 case RXRPC_CALL_SERVER_AWAIT_ACK:
952 if (before(hard_ack, call->tx_hard_ack) ||
953 after(hard_ack, call->tx_top)) {
954 rxrpc_proto_abort("AKW", call, 0);
957 if (nr_acks > call->tx_top - hard_ack) {
958 rxrpc_proto_abort("AKN", call, 0);
962 if (after(hard_ack, call->tx_hard_ack)) {
963 if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) {
964 rxrpc_end_tx_phase(call, false, "ETA");
970 if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0) {
971 rxrpc_proto_abort("XSA", call, 0);
974 rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks,
978 if (call->rxtx_annotations[call->tx_top & RXRPC_RXTX_BUFF_MASK] &
979 RXRPC_TX_ANNO_LAST &&
980 summary.nr_acks == call->tx_top - hard_ack &&
981 rxrpc_is_client_call(call))
982 rxrpc_propose_ACK(call, RXRPC_ACK_PING, skew, sp->hdr.serial,
984 rxrpc_propose_ack_ping_for_lost_reply);
986 rxrpc_congestion_management(call, skb, &summary, acked_serial);
988 spin_unlock(&call->input_lock);
992 * Process an ACKALL packet.
994 static void rxrpc_input_ackall(struct rxrpc_call *call, struct sk_buff *skb)
996 struct rxrpc_ack_summary summary = { 0 };
997 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
999 _proto("Rx ACKALL %%%u", sp->hdr.serial);
1001 spin_lock(&call->input_lock);
1003 if (rxrpc_rotate_tx_window(call, call->tx_top, &summary))
1004 rxrpc_end_tx_phase(call, false, "ETL");
1006 spin_unlock(&call->input_lock);
1010 * Process an ABORT packet directed at a call.
1012 static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
1014 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
1016 u32 abort_code = RX_CALL_DEAD;
1020 if (skb->len >= 4 &&
1021 skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
1022 &wtmp, sizeof(wtmp)) >= 0)
1023 abort_code = ntohl(wtmp);
1025 trace_rxrpc_rx_abort(call, sp->hdr.serial, abort_code);
1027 _proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code);
1029 if (rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED,
1030 abort_code, -ECONNABORTED))
1031 rxrpc_notify_socket(call);
1035 * Process an incoming call packet.
1037 static void rxrpc_input_call_packet(struct rxrpc_call *call,
1038 struct sk_buff *skb, u16 skew)
1040 struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
1043 _enter("%p,%p", call, skb);
1045 timo = READ_ONCE(call->next_rx_timo);
1047 unsigned long now = jiffies, expect_rx_by;
1049 expect_rx_by = now + timo;
1050 WRITE_ONCE(call->expect_rx_by, expect_rx_by);
1051 rxrpc_reduce_call_timer(call, expect_rx_by, now,
1052 rxrpc_timer_set_for_normal);
1055 switch (sp->hdr.type) {
1056 case RXRPC_PACKET_TYPE_DATA:
1057 rxrpc_input_data(call, skb, skew);
1060 case RXRPC_PACKET_TYPE_ACK:
1061 rxrpc_input_ack(call, skb, skew);
1064 case RXRPC_PACKET_TYPE_BUSY:
1065 _proto("Rx BUSY %%%u", sp->hdr.serial);
1067 /* Just ignore BUSY packets from the server; the retry and
1068 * lifespan timers will take care of business. BUSY packets
1069 * from the client don't make sense.
1073 case RXRPC_PACKET_TYPE_ABORT:
1074 rxrpc_input_abort(call, skb);
1077 case RXRPC_PACKET_TYPE_ACKALL:
1078 rxrpc_input_ackall(call, skb);
1089 * Handle a new service call on a channel implicitly completing the preceding
1090 * call on that channel. This does not apply to client conns.
1092 * TODO: If callNumber > call_id + 1, renegotiate security.
1094 static void rxrpc_input_implicit_end_call(struct rxrpc_sock *rx,
1095 struct rxrpc_connection *conn,
1096 struct rxrpc_call *call)
1098 switch (READ_ONCE(call->state)) {
1099 case RXRPC_CALL_SERVER_AWAIT_ACK:
1100 rxrpc_call_completed(call);
1102 case RXRPC_CALL_COMPLETE:
1105 if (rxrpc_abort_call("IMP", call, 0, RX_CALL_DEAD, -ESHUTDOWN)) {
1106 set_bit(RXRPC_CALL_EV_ABORT, &call->events);
1107 rxrpc_queue_call(call);
1109 trace_rxrpc_improper_term(call);
1113 spin_lock(&rx->incoming_lock);
1114 __rxrpc_disconnect_call(conn, call);
1115 spin_unlock(&rx->incoming_lock);
1116 rxrpc_notify_socket(call);
1120 * post connection-level events to the connection
1121 * - this includes challenges, responses, some aborts and call terminal packet
1124 static void rxrpc_post_packet_to_conn(struct rxrpc_connection *conn,
1125 struct sk_buff *skb)
1127 _enter("%p,%p", conn, skb);
1129 skb_queue_tail(&conn->rx_queue, skb);
1130 rxrpc_queue_conn(conn);
1134 * post endpoint-level events to the local endpoint
1135 * - this includes debug and version messages
1137 static void rxrpc_post_packet_to_local(struct rxrpc_local *local,
1138 struct sk_buff *skb)
1140 _enter("%p,%p", local, skb);
1142 if (rxrpc_get_local_maybe(local)) {
1143 skb_queue_tail(&local->event_queue, skb);
1144 rxrpc_queue_local(local);
1146 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
1151 * put a packet up for transport-level abort
1153 static void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
1155 CHECK_SLAB_OKAY(&local->usage);
1157 if (rxrpc_get_local_maybe(local)) {
1158 skb_queue_tail(&local->reject_queue, skb);
1159 rxrpc_queue_local(local);
1161 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
1166 * Extract the wire header from a packet and translate the byte order.
1169 int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
1171 struct rxrpc_wire_header whdr;
1173 /* dig out the RxRPC connection details */
1174 if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0) {
1175 trace_rxrpc_rx_eproto(NULL, sp->hdr.serial,
1176 tracepoint_string("bad_hdr"));
1180 memset(sp, 0, sizeof(*sp));
1181 sp->hdr.epoch = ntohl(whdr.epoch);
1182 sp->hdr.cid = ntohl(whdr.cid);
1183 sp->hdr.callNumber = ntohl(whdr.callNumber);
1184 sp->hdr.seq = ntohl(whdr.seq);
1185 sp->hdr.serial = ntohl(whdr.serial);
1186 sp->hdr.flags = whdr.flags;
1187 sp->hdr.type = whdr.type;
1188 sp->hdr.userStatus = whdr.userStatus;
1189 sp->hdr.securityIndex = whdr.securityIndex;
1190 sp->hdr._rsvd = ntohs(whdr._rsvd);
1191 sp->hdr.serviceId = ntohs(whdr.serviceId);
1196 * handle data received on the local endpoint
1197 * - may be called in interrupt context
1199 * [!] Note that as this is called from the encap_rcv hook, the socket is not
1200 * held locked by the caller and nothing prevents sk_user_data on the UDP from
1201 * being cleared in the middle of processing this function.
1203 * Called with the RCU read lock held from the IP layer via UDP.
1205 int rxrpc_input_packet(struct sock *udp_sk, struct sk_buff *skb)
1207 struct rxrpc_local *local = rcu_dereference_sk_user_data(udp_sk);
1208 struct rxrpc_connection *conn;
1209 struct rxrpc_channel *chan;
1210 struct rxrpc_call *call = NULL;
1211 struct rxrpc_skb_priv *sp;
1212 struct rxrpc_peer *peer = NULL;
1213 struct rxrpc_sock *rx = NULL;
1214 unsigned int channel;
1217 _enter("%p", udp_sk);
1219 if (unlikely(!local)) {
1223 if (skb->tstamp == 0)
1224 skb->tstamp = ktime_get_real();
1226 rxrpc_new_skb(skb, rxrpc_skb_rx_received);
1228 skb_pull(skb, sizeof(struct udphdr));
1230 /* The UDP protocol already released all skb resources;
1231 * we are free to add our own data there.
1233 sp = rxrpc_skb(skb);
1235 /* dig out the RxRPC connection details */
1236 if (rxrpc_extract_header(sp, skb) < 0)
1239 if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) {
1241 if ((lose++ & 7) == 7) {
1242 trace_rxrpc_rx_lose(sp);
1243 rxrpc_free_skb(skb, rxrpc_skb_rx_lost);
1248 if (skb->tstamp == 0)
1249 skb->tstamp = ktime_get_real();
1250 trace_rxrpc_rx_packet(sp);
1252 switch (sp->hdr.type) {
1253 case RXRPC_PACKET_TYPE_VERSION:
1254 if (rxrpc_to_client(sp))
1256 rxrpc_post_packet_to_local(local, skb);
1259 case RXRPC_PACKET_TYPE_BUSY:
1260 if (rxrpc_to_server(sp))
1263 case RXRPC_PACKET_TYPE_ACK:
1264 case RXRPC_PACKET_TYPE_ACKALL:
1265 if (sp->hdr.callNumber == 0)
1268 case RXRPC_PACKET_TYPE_ABORT:
1271 case RXRPC_PACKET_TYPE_DATA:
1272 if (sp->hdr.callNumber == 0 ||
1275 if (sp->hdr.flags & RXRPC_JUMBO_PACKET &&
1276 !rxrpc_validate_jumbo(skb))
1280 case RXRPC_PACKET_TYPE_CHALLENGE:
1281 if (rxrpc_to_server(sp))
1284 case RXRPC_PACKET_TYPE_RESPONSE:
1285 if (rxrpc_to_client(sp))
1289 /* Packet types 9-11 should just be ignored. */
1290 case RXRPC_PACKET_TYPE_PARAMS:
1291 case RXRPC_PACKET_TYPE_10:
1292 case RXRPC_PACKET_TYPE_11:
1296 _proto("Rx Bad Packet Type %u", sp->hdr.type);
1300 if (sp->hdr.serviceId == 0)
1303 if (rxrpc_to_server(sp)) {
1304 /* Weed out packets to services we're not offering. Packets
1305 * that would begin a call are explicitly rejected and the rest
1306 * are just discarded.
1308 rx = rcu_dereference(local->service);
1309 if (!rx || (sp->hdr.serviceId != rx->srx.srx_service &&
1310 sp->hdr.serviceId != rx->second_service)) {
1311 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA &&
1313 goto unsupported_service;
1318 conn = rxrpc_find_connection_rcu(local, skb, &peer);
1320 if (sp->hdr.securityIndex != conn->security_ix)
1321 goto wrong_security;
1323 if (sp->hdr.serviceId != conn->service_id) {
1326 if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags))
1328 old_id = cmpxchg(&conn->service_id, conn->params.service_id,
1331 if (old_id != conn->params.service_id &&
1332 old_id != sp->hdr.serviceId)
1336 if (sp->hdr.callNumber == 0) {
1337 /* Connection-level packet */
1338 _debug("CONN %p {%d}", conn, conn->debug_id);
1339 rxrpc_post_packet_to_conn(conn, skb);
1343 /* Note the serial number skew here */
1344 skew = (int)sp->hdr.serial - (int)conn->hi_serial;
1347 conn->hi_serial = sp->hdr.serial;
1350 skew = min(skew, 65535);
1353 /* Call-bound packets are routed by connection channel. */
1354 channel = sp->hdr.cid & RXRPC_CHANNELMASK;
1355 chan = &conn->channels[channel];
1357 /* Ignore really old calls */
1358 if (sp->hdr.callNumber < chan->last_call)
1361 if (sp->hdr.callNumber == chan->last_call) {
1363 sp->hdr.type == RXRPC_PACKET_TYPE_ABORT)
1366 /* For the previous service call, if completed
1367 * successfully, we discard all further packets.
1369 if (rxrpc_conn_is_service(conn) &&
1370 chan->last_type == RXRPC_PACKET_TYPE_ACK)
1373 /* But otherwise we need to retransmit the final packet
1374 * from data cached in the connection record.
1376 if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA)
1377 trace_rxrpc_rx_data(chan->call_debug_id,
1381 rxrpc_post_packet_to_conn(conn, skb);
1385 call = rcu_dereference(chan->call);
1387 if (sp->hdr.callNumber > chan->call_id) {
1388 if (rxrpc_to_client(sp))
1391 rxrpc_input_implicit_end_call(rx, conn, call);
1396 if (sp->hdr.serviceId != call->service_id)
1397 call->service_id = sp->hdr.serviceId;
1398 if ((int)sp->hdr.serial - (int)call->rx_serial > 0)
1399 call->rx_serial = sp->hdr.serial;
1400 if (!test_bit(RXRPC_CALL_RX_HEARD, &call->flags))
1401 set_bit(RXRPC_CALL_RX_HEARD, &call->flags);
1405 if (!call || atomic_read(&call->usage) == 0) {
1406 if (rxrpc_to_client(sp) ||
1407 sp->hdr.type != RXRPC_PACKET_TYPE_DATA)
1409 if (sp->hdr.seq != 1)
1411 call = rxrpc_new_incoming_call(local, rx, skb);
1414 rxrpc_send_ping(call, skb, skew);
1415 mutex_unlock(&call->user_mutex);
1418 rxrpc_input_call_packet(call, skb, skew);
1422 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
1424 trace_rxrpc_rx_done(0, 0);
1428 trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
1429 RXKADINCONSISTENCY, EBADMSG);
1430 skb->priority = RXKADINCONSISTENCY;
1433 unsupported_service:
1434 trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
1435 RX_INVALID_OPERATION, EOPNOTSUPP);
1436 skb->priority = RX_INVALID_OPERATION;
1440 trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
1441 RX_PROTOCOL_ERROR, EBADMSG);
1442 goto protocol_error;
1445 trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq,
1446 RX_PROTOCOL_ERROR, EBADMSG);
1448 skb->priority = RX_PROTOCOL_ERROR;
1450 skb->mark = RXRPC_SKB_MARK_REJECT_ABORT;
1452 trace_rxrpc_rx_done(skb->mark, skb->priority);
1453 rxrpc_reject_packet(local, skb);
1454 _leave(" [badmsg]");