1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* SCTP kernel implementation
3 * (C) Copyright IBM Corp. 2001, 2004
4 * Copyright (c) 1999-2000 Cisco, Inc.
5 * Copyright (c) 1999-2001 Motorola, Inc.
6 * Copyright (c) 2001 Intel Corp.
8 * This file is part of the SCTP kernel implementation
10 * This file contains sctp stream maniuplation primitives and helpers.
12 * Please send any bug reports or fixes you make to the
14 * lksctp developers <linux-sctp@vger.kernel.org>
16 * Written or modified by:
17 * Xin Long <lucien.xin@gmail.com>
20 #include <linux/list.h>
21 #include <net/sctp/sctp.h>
22 #include <net/sctp/sm.h>
23 #include <net/sctp/stream_sched.h>
25 static void sctp_stream_shrink_out(struct sctp_stream *stream, __u16 outcnt)
27 struct sctp_association *asoc;
28 struct sctp_chunk *ch, *temp;
29 struct sctp_outq *outq;
31 asoc = container_of(stream, struct sctp_association, stream);
32 outq = &asoc->outqueue;
34 list_for_each_entry_safe(ch, temp, &outq->out_chunk_list, list) {
35 __u16 sid = sctp_chunk_stream_no(ch);
40 sctp_sched_dequeue_common(outq, ch);
41 /* No need to call dequeue_done here because
42 * the chunks are not scheduled by now.
45 /* Mark as failed send. */
46 sctp_chunk_fail(ch, (__force __u32)SCTP_ERROR_INV_STRM);
47 if (asoc->peer.prsctp_capable &&
48 SCTP_PR_PRIO_ENABLED(ch->sinfo.sinfo_flags))
49 asoc->sent_cnt_removable--;
55 /* Migrates chunks from stream queues to new stream queues if needed,
56 * but not across associations. Also, removes those chunks to streams
57 * higher than the new max.
59 static void sctp_stream_outq_migrate(struct sctp_stream *stream,
60 struct sctp_stream *new, __u16 outcnt)
64 if (stream->outcnt > outcnt)
65 sctp_stream_shrink_out(stream, outcnt);
68 /* Here we actually move the old ext stuff into the new
69 * buffer, because we want to keep it. Then
70 * sctp_stream_update will swap ->out pointers.
72 for (i = 0; i < outcnt; i++) {
73 kfree(SCTP_SO(new, i)->ext);
74 SCTP_SO(new, i)->ext = SCTP_SO(stream, i)->ext;
75 SCTP_SO(stream, i)->ext = NULL;
79 for (i = outcnt; i < stream->outcnt; i++) {
80 kfree(SCTP_SO(stream, i)->ext);
81 SCTP_SO(stream, i)->ext = NULL;
85 static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
90 if (outcnt <= stream->outcnt)
93 ret = genradix_prealloc(&stream->out, outcnt, gfp);
98 stream->outcnt = outcnt;
102 static int sctp_stream_alloc_in(struct sctp_stream *stream, __u16 incnt,
107 if (incnt <= stream->incnt)
110 ret = genradix_prealloc(&stream->in, incnt, gfp);
115 stream->incnt = incnt;
119 int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
122 struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
127 /* Initial stream->out size may be very big, so free it and alloc
128 * a new one with new outcnt to save memory if needed.
130 if (outcnt == stream->outcnt)
133 /* Filter out chunks queued on streams that won't exist anymore */
134 sched->unsched_all(stream);
135 sctp_stream_outq_migrate(stream, NULL, outcnt);
136 sched->sched_all(stream);
138 ret = sctp_stream_alloc_out(stream, outcnt, gfp);
142 for (i = 0; i < stream->outcnt; i++)
143 SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
146 sctp_stream_interleave_init(stream);
150 ret = sctp_stream_alloc_in(stream, incnt, gfp);
158 genradix_free(&stream->in);
160 genradix_free(&stream->out);
166 int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid)
168 struct sctp_stream_out_ext *soute;
171 soute = kzalloc(sizeof(*soute), GFP_KERNEL);
174 SCTP_SO(stream, sid)->ext = soute;
176 ret = sctp_sched_init_sid(stream, sid, GFP_KERNEL);
178 kfree(SCTP_SO(stream, sid)->ext);
179 SCTP_SO(stream, sid)->ext = NULL;
185 void sctp_stream_free(struct sctp_stream *stream)
187 struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
191 for (i = 0; i < stream->outcnt; i++)
192 kfree(SCTP_SO(stream, i)->ext);
193 genradix_free(&stream->out);
194 genradix_free(&stream->in);
197 void sctp_stream_clear(struct sctp_stream *stream)
201 for (i = 0; i < stream->outcnt; i++) {
202 SCTP_SO(stream, i)->mid = 0;
203 SCTP_SO(stream, i)->mid_uo = 0;
206 for (i = 0; i < stream->incnt; i++)
207 SCTP_SI(stream, i)->mid = 0;
210 void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new)
212 struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
214 sched->unsched_all(stream);
215 sctp_stream_outq_migrate(stream, new, new->outcnt);
216 sctp_stream_free(stream);
218 stream->out = new->out;
219 stream->in = new->in;
220 stream->outcnt = new->outcnt;
221 stream->incnt = new->incnt;
223 sched->sched_all(stream);
225 new->out.tree.root = NULL;
226 new->in.tree.root = NULL;
231 static int sctp_send_reconf(struct sctp_association *asoc,
232 struct sctp_chunk *chunk)
234 struct net *net = sock_net(asoc->base.sk);
237 retval = sctp_primitive_RECONF(net, asoc, chunk);
239 sctp_chunk_free(chunk);
244 static bool sctp_stream_outq_is_empty(struct sctp_stream *stream,
245 __u16 str_nums, __be16 *str_list)
247 struct sctp_association *asoc;
250 asoc = container_of(stream, struct sctp_association, stream);
251 if (!asoc->outqueue.out_qlen)
257 for (i = 0; i < str_nums; i++) {
258 __u16 sid = ntohs(str_list[i]);
260 if (SCTP_SO(stream, sid)->ext &&
261 !list_empty(&SCTP_SO(stream, sid)->ext->outq))
268 int sctp_send_reset_streams(struct sctp_association *asoc,
269 struct sctp_reset_streams *params)
271 struct sctp_stream *stream = &asoc->stream;
272 __u16 i, str_nums, *str_list;
273 struct sctp_chunk *chunk;
274 int retval = -EINVAL;
278 if (!asoc->peer.reconf_capable ||
279 !(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ)) {
280 retval = -ENOPROTOOPT;
284 if (asoc->strreset_outstanding) {
285 retval = -EINPROGRESS;
289 out = params->srs_flags & SCTP_STREAM_RESET_OUTGOING;
290 in = params->srs_flags & SCTP_STREAM_RESET_INCOMING;
294 str_nums = params->srs_number_streams;
295 str_list = params->srs_stream_list;
300 for (i = 0; i < str_nums; i++)
301 if (str_list[i] >= stream->outcnt)
304 param_len = str_nums * sizeof(__u16) +
305 sizeof(struct sctp_strreset_outreq);
309 for (i = 0; i < str_nums; i++)
310 if (str_list[i] >= stream->incnt)
313 param_len += str_nums * sizeof(__u16) +
314 sizeof(struct sctp_strreset_inreq);
317 if (param_len > SCTP_MAX_CHUNK_LEN -
318 sizeof(struct sctp_reconf_chunk))
322 nstr_list = kcalloc(str_nums, sizeof(__be16), GFP_KERNEL);
328 for (i = 0; i < str_nums; i++)
329 nstr_list[i] = htons(str_list[i]);
331 if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
337 chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in);
348 for (i = 0; i < str_nums; i++)
349 SCTP_SO(stream, str_list[i])->state =
352 for (i = 0; i < stream->outcnt; i++)
353 SCTP_SO(stream, i)->state = SCTP_STREAM_CLOSED;
356 asoc->strreset_chunk = chunk;
357 sctp_chunk_hold(asoc->strreset_chunk);
359 retval = sctp_send_reconf(asoc, chunk);
361 sctp_chunk_put(asoc->strreset_chunk);
362 asoc->strreset_chunk = NULL;
367 for (i = 0; i < str_nums; i++)
368 SCTP_SO(stream, str_list[i])->state =
371 for (i = 0; i < stream->outcnt; i++)
372 SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
377 asoc->strreset_outstanding = out + in;
383 int sctp_send_reset_assoc(struct sctp_association *asoc)
385 struct sctp_stream *stream = &asoc->stream;
386 struct sctp_chunk *chunk = NULL;
390 if (!asoc->peer.reconf_capable ||
391 !(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ))
394 if (asoc->strreset_outstanding)
397 if (!sctp_outq_is_empty(&asoc->outqueue))
400 chunk = sctp_make_strreset_tsnreq(asoc);
404 /* Block further xmit of data until this request is completed */
405 for (i = 0; i < stream->outcnt; i++)
406 SCTP_SO(stream, i)->state = SCTP_STREAM_CLOSED;
408 asoc->strreset_chunk = chunk;
409 sctp_chunk_hold(asoc->strreset_chunk);
411 retval = sctp_send_reconf(asoc, chunk);
413 sctp_chunk_put(asoc->strreset_chunk);
414 asoc->strreset_chunk = NULL;
416 for (i = 0; i < stream->outcnt; i++)
417 SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
422 asoc->strreset_outstanding = 1;
427 int sctp_send_add_streams(struct sctp_association *asoc,
428 struct sctp_add_streams *params)
430 struct sctp_stream *stream = &asoc->stream;
431 struct sctp_chunk *chunk = NULL;
436 if (!asoc->peer.reconf_capable ||
437 !(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
438 retval = -ENOPROTOOPT;
442 if (asoc->strreset_outstanding) {
443 retval = -EINPROGRESS;
447 out = params->sas_outstrms;
448 in = params->sas_instrms;
449 outcnt = stream->outcnt + out;
450 incnt = stream->incnt + in;
451 if (outcnt > SCTP_MAX_STREAM || incnt > SCTP_MAX_STREAM ||
458 retval = sctp_stream_alloc_out(stream, outcnt, GFP_KERNEL);
463 chunk = sctp_make_strreset_addstrm(asoc, out, in);
469 asoc->strreset_chunk = chunk;
470 sctp_chunk_hold(asoc->strreset_chunk);
472 retval = sctp_send_reconf(asoc, chunk);
474 sctp_chunk_put(asoc->strreset_chunk);
475 asoc->strreset_chunk = NULL;
479 asoc->strreset_outstanding = !!out + !!in;
485 static struct sctp_paramhdr *sctp_chunk_lookup_strreset_param(
486 struct sctp_association *asoc, __be32 resp_seq,
489 struct sctp_chunk *chunk = asoc->strreset_chunk;
490 struct sctp_reconf_chunk *hdr;
491 union sctp_params param;
496 hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr;
497 sctp_walk_params(param, hdr, params) {
498 /* sctp_strreset_tsnreq is actually the basic structure
499 * of all stream reconf params, so it's safe to use it
500 * to access request_seq.
502 struct sctp_strreset_tsnreq *req = param.v;
504 if ((!resp_seq || req->request_seq == resp_seq) &&
505 (!type || type == req->param_hdr.type))
512 static void sctp_update_strreset_result(struct sctp_association *asoc,
515 asoc->strreset_result[1] = asoc->strreset_result[0];
516 asoc->strreset_result[0] = result;
519 struct sctp_chunk *sctp_process_strreset_outreq(
520 struct sctp_association *asoc,
521 union sctp_params param,
522 struct sctp_ulpevent **evp)
524 struct sctp_strreset_outreq *outreq = param.v;
525 struct sctp_stream *stream = &asoc->stream;
526 __u32 result = SCTP_STRRESET_DENIED;
527 __be16 *str_p = NULL;
531 request_seq = ntohl(outreq->request_seq);
533 if (ntohl(outreq->send_reset_at_tsn) >
534 sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)) {
535 result = SCTP_STRRESET_IN_PROGRESS;
539 if (TSN_lt(asoc->strreset_inseq, request_seq) ||
540 TSN_lt(request_seq, asoc->strreset_inseq - 2)) {
541 result = SCTP_STRRESET_ERR_BAD_SEQNO;
543 } else if (TSN_lt(request_seq, asoc->strreset_inseq)) {
544 i = asoc->strreset_inseq - request_seq - 1;
545 result = asoc->strreset_result[i];
548 asoc->strreset_inseq++;
550 /* Check strreset_enable after inseq inc, as sender cannot tell
551 * the peer doesn't enable strreset after receiving response with
552 * result denied, as well as to keep consistent with bsd.
554 if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ))
557 nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
558 str_p = outreq->list_of_streams;
559 for (i = 0; i < nums; i++) {
560 if (ntohs(str_p[i]) >= stream->incnt) {
561 result = SCTP_STRRESET_ERR_WRONG_SSN;
566 if (asoc->strreset_chunk) {
567 if (!sctp_chunk_lookup_strreset_param(
568 asoc, outreq->response_seq,
569 SCTP_PARAM_RESET_IN_REQUEST)) {
570 /* same process with outstanding isn't 0 */
571 result = SCTP_STRRESET_ERR_IN_PROGRESS;
575 asoc->strreset_outstanding--;
576 asoc->strreset_outseq++;
578 if (!asoc->strreset_outstanding) {
579 struct sctp_transport *t;
581 t = asoc->strreset_chunk->transport;
582 if (del_timer(&t->reconf_timer))
583 sctp_transport_put(t);
585 sctp_chunk_put(asoc->strreset_chunk);
586 asoc->strreset_chunk = NULL;
591 for (i = 0; i < nums; i++)
592 SCTP_SI(stream, ntohs(str_p[i]))->mid = 0;
594 for (i = 0; i < stream->incnt; i++)
595 SCTP_SI(stream, i)->mid = 0;
597 result = SCTP_STRRESET_PERFORMED;
599 *evp = sctp_ulpevent_make_stream_reset_event(asoc,
600 SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
603 sctp_update_strreset_result(asoc, result);
605 return sctp_make_strreset_resp(asoc, result, request_seq);
608 struct sctp_chunk *sctp_process_strreset_inreq(
609 struct sctp_association *asoc,
610 union sctp_params param,
611 struct sctp_ulpevent **evp)
613 struct sctp_strreset_inreq *inreq = param.v;
614 struct sctp_stream *stream = &asoc->stream;
615 __u32 result = SCTP_STRRESET_DENIED;
616 struct sctp_chunk *chunk = NULL;
621 request_seq = ntohl(inreq->request_seq);
622 if (TSN_lt(asoc->strreset_inseq, request_seq) ||
623 TSN_lt(request_seq, asoc->strreset_inseq - 2)) {
624 result = SCTP_STRRESET_ERR_BAD_SEQNO;
626 } else if (TSN_lt(request_seq, asoc->strreset_inseq)) {
627 i = asoc->strreset_inseq - request_seq - 1;
628 result = asoc->strreset_result[i];
629 if (result == SCTP_STRRESET_PERFORMED)
633 asoc->strreset_inseq++;
635 if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ))
638 if (asoc->strreset_outstanding) {
639 result = SCTP_STRRESET_ERR_IN_PROGRESS;
643 nums = (ntohs(param.p->length) - sizeof(*inreq)) / sizeof(__u16);
644 str_p = inreq->list_of_streams;
645 for (i = 0; i < nums; i++) {
646 if (ntohs(str_p[i]) >= stream->outcnt) {
647 result = SCTP_STRRESET_ERR_WRONG_SSN;
652 if (!sctp_stream_outq_is_empty(stream, nums, str_p)) {
653 result = SCTP_STRRESET_IN_PROGRESS;
654 asoc->strreset_inseq--;
658 chunk = sctp_make_strreset_req(asoc, nums, str_p, 1, 0);
663 for (i = 0; i < nums; i++)
664 SCTP_SO(stream, ntohs(str_p[i]))->state =
667 for (i = 0; i < stream->outcnt; i++)
668 SCTP_SO(stream, i)->state = SCTP_STREAM_CLOSED;
670 asoc->strreset_chunk = chunk;
671 asoc->strreset_outstanding = 1;
672 sctp_chunk_hold(asoc->strreset_chunk);
674 result = SCTP_STRRESET_PERFORMED;
677 sctp_update_strreset_result(asoc, result);
680 chunk = sctp_make_strreset_resp(asoc, result, request_seq);
685 struct sctp_chunk *sctp_process_strreset_tsnreq(
686 struct sctp_association *asoc,
687 union sctp_params param,
688 struct sctp_ulpevent **evp)
690 __u32 init_tsn = 0, next_tsn = 0, max_tsn_seen;
691 struct sctp_strreset_tsnreq *tsnreq = param.v;
692 struct sctp_stream *stream = &asoc->stream;
693 __u32 result = SCTP_STRRESET_DENIED;
697 request_seq = ntohl(tsnreq->request_seq);
698 if (TSN_lt(asoc->strreset_inseq, request_seq) ||
699 TSN_lt(request_seq, asoc->strreset_inseq - 2)) {
700 result = SCTP_STRRESET_ERR_BAD_SEQNO;
702 } else if (TSN_lt(request_seq, asoc->strreset_inseq)) {
703 i = asoc->strreset_inseq - request_seq - 1;
704 result = asoc->strreset_result[i];
705 if (result == SCTP_STRRESET_PERFORMED) {
706 next_tsn = asoc->ctsn_ack_point + 1;
708 sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1;
713 if (!sctp_outq_is_empty(&asoc->outqueue)) {
714 result = SCTP_STRRESET_IN_PROGRESS;
718 asoc->strreset_inseq++;
720 if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ))
723 if (asoc->strreset_outstanding) {
724 result = SCTP_STRRESET_ERR_IN_PROGRESS;
728 /* G4: The same processing as though a FWD-TSN chunk (as defined in
729 * [RFC3758]) with all streams affected and a new cumulative TSN
730 * ACK of the Receiver's Next TSN minus 1 were received MUST be
733 max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
734 asoc->stream.si->report_ftsn(&asoc->ulpq, max_tsn_seen);
736 /* G1: Compute an appropriate value for the Receiver's Next TSN -- the
737 * TSN that the peer should use to send the next DATA chunk. The
738 * value SHOULD be the smallest TSN not acknowledged by the
739 * receiver of the request plus 2^31.
741 init_tsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + (1 << 31);
742 sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
743 init_tsn, GFP_ATOMIC);
745 /* G3: The same processing as though a SACK chunk with no gap report
746 * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
747 * received MUST be performed.
749 sctp_outq_free(&asoc->outqueue);
751 /* G2: Compute an appropriate value for the local endpoint's next TSN,
752 * i.e., the next TSN assigned by the receiver of the SSN/TSN reset
753 * chunk. The value SHOULD be the highest TSN sent by the receiver
754 * of the request plus 1.
756 next_tsn = asoc->next_tsn;
757 asoc->ctsn_ack_point = next_tsn - 1;
758 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
760 /* G5: The next expected and outgoing SSNs MUST be reset to 0 for all
761 * incoming and outgoing streams.
763 for (i = 0; i < stream->outcnt; i++) {
764 SCTP_SO(stream, i)->mid = 0;
765 SCTP_SO(stream, i)->mid_uo = 0;
767 for (i = 0; i < stream->incnt; i++)
768 SCTP_SI(stream, i)->mid = 0;
770 result = SCTP_STRRESET_PERFORMED;
772 *evp = sctp_ulpevent_make_assoc_reset_event(asoc, 0, init_tsn,
773 next_tsn, GFP_ATOMIC);
776 sctp_update_strreset_result(asoc, result);
778 return sctp_make_strreset_tsnresp(asoc, result, request_seq,
782 struct sctp_chunk *sctp_process_strreset_addstrm_out(
783 struct sctp_association *asoc,
784 union sctp_params param,
785 struct sctp_ulpevent **evp)
787 struct sctp_strreset_addstrm *addstrm = param.v;
788 struct sctp_stream *stream = &asoc->stream;
789 __u32 result = SCTP_STRRESET_DENIED;
790 __u32 request_seq, incnt;
793 request_seq = ntohl(addstrm->request_seq);
794 if (TSN_lt(asoc->strreset_inseq, request_seq) ||
795 TSN_lt(request_seq, asoc->strreset_inseq - 2)) {
796 result = SCTP_STRRESET_ERR_BAD_SEQNO;
798 } else if (TSN_lt(request_seq, asoc->strreset_inseq)) {
799 i = asoc->strreset_inseq - request_seq - 1;
800 result = asoc->strreset_result[i];
803 asoc->strreset_inseq++;
805 if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
808 in = ntohs(addstrm->number_of_streams);
809 incnt = stream->incnt + in;
810 if (!in || incnt > SCTP_MAX_STREAM)
813 if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
816 if (asoc->strreset_chunk) {
817 if (!sctp_chunk_lookup_strreset_param(
818 asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) {
819 /* same process with outstanding isn't 0 */
820 result = SCTP_STRRESET_ERR_IN_PROGRESS;
824 asoc->strreset_outstanding--;
825 asoc->strreset_outseq++;
827 if (!asoc->strreset_outstanding) {
828 struct sctp_transport *t;
830 t = asoc->strreset_chunk->transport;
831 if (del_timer(&t->reconf_timer))
832 sctp_transport_put(t);
834 sctp_chunk_put(asoc->strreset_chunk);
835 asoc->strreset_chunk = NULL;
839 stream->incnt = incnt;
841 result = SCTP_STRRESET_PERFORMED;
843 *evp = sctp_ulpevent_make_stream_change_event(asoc,
844 0, ntohs(addstrm->number_of_streams), 0, GFP_ATOMIC);
847 sctp_update_strreset_result(asoc, result);
849 return sctp_make_strreset_resp(asoc, result, request_seq);
852 struct sctp_chunk *sctp_process_strreset_addstrm_in(
853 struct sctp_association *asoc,
854 union sctp_params param,
855 struct sctp_ulpevent **evp)
857 struct sctp_strreset_addstrm *addstrm = param.v;
858 struct sctp_stream *stream = &asoc->stream;
859 __u32 result = SCTP_STRRESET_DENIED;
860 struct sctp_chunk *chunk = NULL;
861 __u32 request_seq, outcnt;
865 request_seq = ntohl(addstrm->request_seq);
866 if (TSN_lt(asoc->strreset_inseq, request_seq) ||
867 TSN_lt(request_seq, asoc->strreset_inseq - 2)) {
868 result = SCTP_STRRESET_ERR_BAD_SEQNO;
870 } else if (TSN_lt(request_seq, asoc->strreset_inseq)) {
871 i = asoc->strreset_inseq - request_seq - 1;
872 result = asoc->strreset_result[i];
873 if (result == SCTP_STRRESET_PERFORMED)
877 asoc->strreset_inseq++;
879 if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
882 if (asoc->strreset_outstanding) {
883 result = SCTP_STRRESET_ERR_IN_PROGRESS;
887 out = ntohs(addstrm->number_of_streams);
888 outcnt = stream->outcnt + out;
889 if (!out || outcnt > SCTP_MAX_STREAM)
892 ret = sctp_stream_alloc_out(stream, outcnt, GFP_ATOMIC);
896 chunk = sctp_make_strreset_addstrm(asoc, out, 0);
900 asoc->strreset_chunk = chunk;
901 asoc->strreset_outstanding = 1;
902 sctp_chunk_hold(asoc->strreset_chunk);
904 stream->outcnt = outcnt;
906 result = SCTP_STRRESET_PERFORMED;
909 sctp_update_strreset_result(asoc, result);
912 chunk = sctp_make_strreset_resp(asoc, result, request_seq);
917 struct sctp_chunk *sctp_process_strreset_resp(
918 struct sctp_association *asoc,
919 union sctp_params param,
920 struct sctp_ulpevent **evp)
922 struct sctp_stream *stream = &asoc->stream;
923 struct sctp_strreset_resp *resp = param.v;
924 struct sctp_transport *t;
925 __u16 i, nums, flags = 0;
926 struct sctp_paramhdr *req;
929 req = sctp_chunk_lookup_strreset_param(asoc, resp->response_seq, 0);
933 result = ntohl(resp->result);
934 if (result != SCTP_STRRESET_PERFORMED) {
935 /* if in progress, do nothing but retransmit */
936 if (result == SCTP_STRRESET_IN_PROGRESS)
938 else if (result == SCTP_STRRESET_DENIED)
939 flags = SCTP_STREAM_RESET_DENIED;
941 flags = SCTP_STREAM_RESET_FAILED;
944 if (req->type == SCTP_PARAM_RESET_OUT_REQUEST) {
945 struct sctp_strreset_outreq *outreq;
948 outreq = (struct sctp_strreset_outreq *)req;
949 str_p = outreq->list_of_streams;
950 nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) /
953 if (result == SCTP_STRRESET_PERFORMED) {
954 struct sctp_stream_out *sout;
956 for (i = 0; i < nums; i++) {
957 sout = SCTP_SO(stream, ntohs(str_p[i]));
962 for (i = 0; i < stream->outcnt; i++) {
963 sout = SCTP_SO(stream, i);
970 flags |= SCTP_STREAM_RESET_OUTGOING_SSN;
972 for (i = 0; i < stream->outcnt; i++)
973 SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
975 *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
976 nums, str_p, GFP_ATOMIC);
977 } else if (req->type == SCTP_PARAM_RESET_IN_REQUEST) {
978 struct sctp_strreset_inreq *inreq;
981 /* if the result is performed, it's impossible for inreq */
982 if (result == SCTP_STRRESET_PERFORMED)
985 inreq = (struct sctp_strreset_inreq *)req;
986 str_p = inreq->list_of_streams;
987 nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) /
990 flags |= SCTP_STREAM_RESET_INCOMING_SSN;
992 *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
993 nums, str_p, GFP_ATOMIC);
994 } else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) {
995 struct sctp_strreset_resptsn *resptsn;
998 /* check for resptsn, as sctp_verify_reconf didn't do it*/
999 if (ntohs(param.p->length) != sizeof(*resptsn))
1002 resptsn = (struct sctp_strreset_resptsn *)resp;
1003 stsn = ntohl(resptsn->senders_next_tsn);
1004 rtsn = ntohl(resptsn->receivers_next_tsn);
1006 if (result == SCTP_STRRESET_PERFORMED) {
1007 __u32 mtsn = sctp_tsnmap_get_max_tsn_seen(
1008 &asoc->peer.tsn_map);
1011 asoc->stream.si->report_ftsn(&asoc->ulpq, mtsn);
1013 sctp_tsnmap_init(&asoc->peer.tsn_map,
1014 SCTP_TSN_MAP_INITIAL,
1017 /* Clean up sacked and abandoned queues only. As the
1018 * out_chunk_list may not be empty, splice it to temp,
1019 * then get it back after sctp_outq_free is done.
1021 list_splice_init(&asoc->outqueue.out_chunk_list, &temp);
1022 sctp_outq_free(&asoc->outqueue);
1023 list_splice_init(&temp, &asoc->outqueue.out_chunk_list);
1025 asoc->next_tsn = rtsn;
1026 asoc->ctsn_ack_point = asoc->next_tsn - 1;
1027 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1029 for (i = 0; i < stream->outcnt; i++) {
1030 SCTP_SO(stream, i)->mid = 0;
1031 SCTP_SO(stream, i)->mid_uo = 0;
1033 for (i = 0; i < stream->incnt; i++)
1034 SCTP_SI(stream, i)->mid = 0;
1037 for (i = 0; i < stream->outcnt; i++)
1038 SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
1040 *evp = sctp_ulpevent_make_assoc_reset_event(asoc, flags,
1041 stsn, rtsn, GFP_ATOMIC);
1042 } else if (req->type == SCTP_PARAM_RESET_ADD_OUT_STREAMS) {
1043 struct sctp_strreset_addstrm *addstrm;
1046 addstrm = (struct sctp_strreset_addstrm *)req;
1047 nums = ntohs(addstrm->number_of_streams);
1048 number = stream->outcnt - nums;
1050 if (result == SCTP_STRRESET_PERFORMED) {
1051 for (i = number; i < stream->outcnt; i++)
1052 SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
1054 sctp_stream_shrink_out(stream, number);
1055 stream->outcnt = number;
1058 *evp = sctp_ulpevent_make_stream_change_event(asoc, flags,
1059 0, nums, GFP_ATOMIC);
1060 } else if (req->type == SCTP_PARAM_RESET_ADD_IN_STREAMS) {
1061 struct sctp_strreset_addstrm *addstrm;
1063 /* if the result is performed, it's impossible for addstrm in
1066 if (result == SCTP_STRRESET_PERFORMED)
1069 addstrm = (struct sctp_strreset_addstrm *)req;
1070 nums = ntohs(addstrm->number_of_streams);
1072 *evp = sctp_ulpevent_make_stream_change_event(asoc, flags,
1073 nums, 0, GFP_ATOMIC);
1076 asoc->strreset_outstanding--;
1077 asoc->strreset_outseq++;
1079 /* remove everything for this reconf request */
1080 if (!asoc->strreset_outstanding) {
1081 t = asoc->strreset_chunk->transport;
1082 if (del_timer(&t->reconf_timer))
1083 sctp_transport_put(t);
1085 sctp_chunk_put(asoc->strreset_chunk);
1086 asoc->strreset_chunk = NULL;