1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
7 * This file is part of the SCTP kernel implementation
9 * This file contains sctp stream maniuplation primitives and helpers.
11 * This SCTP implementation is free software;
12 * you can redistribute it and/or modify it under the terms of
13 * the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This SCTP implementation is distributed in the hope that it
18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19 * ************************
20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21 * See the GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with GNU CC; see the file COPYING. If not, see
25 * <http://www.gnu.org/licenses/>.
27 * Please send any bug reports or fixes you make to the
29 * lksctp developers <linux-sctp@vger.kernel.org>
31 * Written or modified by:
32 * Xin Long <lucien.xin@gmail.com>
35 #include <linux/list.h>
36 #include <net/sctp/sctp.h>
37 #include <net/sctp/sm.h>
38 #include <net/sctp/stream_sched.h>
40 static struct flex_array *fa_alloc(size_t elem_size, size_t elem_count,
43 struct flex_array *result;
46 result = flex_array_alloc(elem_size, elem_count, gfp);
48 err = flex_array_prealloc(result, 0, elem_count, gfp);
50 flex_array_free(result);
58 static void fa_free(struct flex_array *fa)
64 static void fa_copy(struct flex_array *fa, struct flex_array *from,
65 size_t index, size_t count)
70 elem = flex_array_get(from, index);
71 flex_array_put(fa, index, elem, 0);
76 static void fa_zero(struct flex_array *fa, size_t index, size_t count)
81 elem = flex_array_get(fa, index);
82 memset(elem, 0, fa->element_size);
87 static size_t fa_index(struct flex_array *fa, void *elem, size_t count)
92 if (elem == flex_array_get(fa, index))
100 static void sctp_stream_shrink_out(struct sctp_stream *stream, __u16 outcnt)
102 struct sctp_association *asoc;
103 struct sctp_chunk *ch, *temp;
104 struct sctp_outq *outq;
106 asoc = container_of(stream, struct sctp_association, stream);
107 outq = &asoc->outqueue;
109 list_for_each_entry_safe(ch, temp, &outq->out_chunk_list, list) {
110 __u16 sid = sctp_chunk_stream_no(ch);
115 sctp_sched_dequeue_common(outq, ch);
116 /* No need to call dequeue_done here because
117 * the chunks are not scheduled by now.
120 /* Mark as failed send. */
121 sctp_chunk_fail(ch, (__force __u32)SCTP_ERROR_INV_STRM);
122 if (asoc->peer.prsctp_capable &&
123 SCTP_PR_PRIO_ENABLED(ch->sinfo.sinfo_flags))
124 asoc->sent_cnt_removable--;
130 /* Migrates chunks from stream queues to new stream queues if needed,
131 * but not across associations. Also, removes those chunks to streams
132 * higher than the new max.
134 static void sctp_stream_outq_migrate(struct sctp_stream *stream,
135 struct sctp_stream *new, __u16 outcnt)
139 if (stream->outcnt > outcnt)
140 sctp_stream_shrink_out(stream, outcnt);
143 /* Here we actually move the old ext stuff into the new
144 * buffer, because we want to keep it. Then
145 * sctp_stream_update will swap ->out pointers.
147 for (i = 0; i < outcnt; i++) {
148 kfree(SCTP_SO(new, i)->ext);
149 SCTP_SO(new, i)->ext = SCTP_SO(stream, i)->ext;
150 SCTP_SO(stream, i)->ext = NULL;
154 for (i = outcnt; i < stream->outcnt; i++) {
155 kfree(SCTP_SO(stream, i)->ext);
156 SCTP_SO(stream, i)->ext = NULL;
160 static int sctp_stream_alloc_out(struct sctp_stream *stream, __u16 outcnt,
163 struct flex_array *out;
164 size_t elem_size = sizeof(struct sctp_stream_out);
166 out = fa_alloc(elem_size, outcnt, gfp);
171 fa_copy(out, stream->out, 0, min(outcnt, stream->outcnt));
172 if (stream->out_curr) {
173 size_t index = fa_index(stream->out, stream->out_curr,
176 BUG_ON(index == stream->outcnt);
177 stream->out_curr = flex_array_get(out, index);
179 fa_free(stream->out);
182 if (outcnt > stream->outcnt)
183 fa_zero(out, stream->outcnt, (outcnt - stream->outcnt));
190 static int sctp_stream_alloc_in(struct sctp_stream *stream, __u16 incnt,
193 struct flex_array *in;
194 size_t elem_size = sizeof(struct sctp_stream_in);
196 in = fa_alloc(elem_size, incnt, gfp);
201 fa_copy(in, stream->in, 0, min(incnt, stream->incnt));
205 if (incnt > stream->incnt)
206 fa_zero(in, stream->incnt, (incnt - stream->incnt));
213 int sctp_stream_init(struct sctp_stream *stream, __u16 outcnt, __u16 incnt,
216 struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
221 /* Initial stream->out size may be very big, so free it and alloc
222 * a new one with new outcnt to save memory if needed.
224 if (outcnt == stream->outcnt)
227 /* Filter out chunks queued on streams that won't exist anymore */
228 sched->unsched_all(stream);
229 sctp_stream_outq_migrate(stream, NULL, outcnt);
230 sched->sched_all(stream);
232 ret = sctp_stream_alloc_out(stream, outcnt, gfp);
236 stream->outcnt = outcnt;
237 for (i = 0; i < stream->outcnt; i++)
238 SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
241 sctp_stream_interleave_init(stream);
245 ret = sctp_stream_alloc_in(stream, incnt, gfp);
248 fa_free(stream->out);
254 stream->incnt = incnt;
260 int sctp_stream_init_ext(struct sctp_stream *stream, __u16 sid)
262 struct sctp_stream_out_ext *soute;
265 soute = kzalloc(sizeof(*soute), GFP_KERNEL);
268 SCTP_SO(stream, sid)->ext = soute;
270 ret = sctp_sched_init_sid(stream, sid, GFP_KERNEL);
272 kfree(SCTP_SO(stream, sid)->ext);
273 SCTP_SO(stream, sid)->ext = NULL;
279 void sctp_stream_free(struct sctp_stream *stream)
281 struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
285 for (i = 0; i < stream->outcnt; i++)
286 kfree(SCTP_SO(stream, i)->ext);
287 fa_free(stream->out);
291 void sctp_stream_clear(struct sctp_stream *stream)
295 for (i = 0; i < stream->outcnt; i++) {
296 SCTP_SO(stream, i)->mid = 0;
297 SCTP_SO(stream, i)->mid_uo = 0;
300 for (i = 0; i < stream->incnt; i++)
301 SCTP_SI(stream, i)->mid = 0;
304 void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new)
306 struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream);
308 sched->unsched_all(stream);
309 sctp_stream_outq_migrate(stream, new, new->outcnt);
310 sctp_stream_free(stream);
312 stream->out = new->out;
313 stream->in = new->in;
314 stream->outcnt = new->outcnt;
315 stream->incnt = new->incnt;
317 sched->sched_all(stream);
325 static int sctp_send_reconf(struct sctp_association *asoc,
326 struct sctp_chunk *chunk)
328 struct net *net = sock_net(asoc->base.sk);
331 retval = sctp_primitive_RECONF(net, asoc, chunk);
333 sctp_chunk_free(chunk);
338 static bool sctp_stream_outq_is_empty(struct sctp_stream *stream,
339 __u16 str_nums, __be16 *str_list)
341 struct sctp_association *asoc;
344 asoc = container_of(stream, struct sctp_association, stream);
345 if (!asoc->outqueue.out_qlen)
351 for (i = 0; i < str_nums; i++) {
352 __u16 sid = ntohs(str_list[i]);
354 if (SCTP_SO(stream, sid)->ext &&
355 !list_empty(&SCTP_SO(stream, sid)->ext->outq))
362 int sctp_send_reset_streams(struct sctp_association *asoc,
363 struct sctp_reset_streams *params)
365 struct sctp_stream *stream = &asoc->stream;
366 __u16 i, str_nums, *str_list;
367 struct sctp_chunk *chunk;
368 int retval = -EINVAL;
372 if (!asoc->peer.reconf_capable ||
373 !(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ)) {
374 retval = -ENOPROTOOPT;
378 if (asoc->strreset_outstanding) {
379 retval = -EINPROGRESS;
383 out = params->srs_flags & SCTP_STREAM_RESET_OUTGOING;
384 in = params->srs_flags & SCTP_STREAM_RESET_INCOMING;
388 str_nums = params->srs_number_streams;
389 str_list = params->srs_stream_list;
394 for (i = 0; i < str_nums; i++)
395 if (str_list[i] >= stream->outcnt)
398 param_len = str_nums * sizeof(__u16) +
399 sizeof(struct sctp_strreset_outreq);
403 for (i = 0; i < str_nums; i++)
404 if (str_list[i] >= stream->incnt)
407 param_len += str_nums * sizeof(__u16) +
408 sizeof(struct sctp_strreset_inreq);
411 if (param_len > SCTP_MAX_CHUNK_LEN -
412 sizeof(struct sctp_reconf_chunk))
416 nstr_list = kcalloc(str_nums, sizeof(__be16), GFP_KERNEL);
422 for (i = 0; i < str_nums; i++)
423 nstr_list[i] = htons(str_list[i]);
425 if (out && !sctp_stream_outq_is_empty(stream, str_nums, nstr_list)) {
431 chunk = sctp_make_strreset_req(asoc, str_nums, nstr_list, out, in);
442 for (i = 0; i < str_nums; i++)
443 SCTP_SO(stream, str_list[i])->state =
446 for (i = 0; i < stream->outcnt; i++)
447 SCTP_SO(stream, i)->state = SCTP_STREAM_CLOSED;
450 asoc->strreset_chunk = chunk;
451 sctp_chunk_hold(asoc->strreset_chunk);
453 retval = sctp_send_reconf(asoc, chunk);
455 sctp_chunk_put(asoc->strreset_chunk);
456 asoc->strreset_chunk = NULL;
461 for (i = 0; i < str_nums; i++)
462 SCTP_SO(stream, str_list[i])->state =
465 for (i = 0; i < stream->outcnt; i++)
466 SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
471 asoc->strreset_outstanding = out + in;
477 int sctp_send_reset_assoc(struct sctp_association *asoc)
479 struct sctp_stream *stream = &asoc->stream;
480 struct sctp_chunk *chunk = NULL;
484 if (!asoc->peer.reconf_capable ||
485 !(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ))
488 if (asoc->strreset_outstanding)
491 if (!sctp_outq_is_empty(&asoc->outqueue))
494 chunk = sctp_make_strreset_tsnreq(asoc);
498 /* Block further xmit of data until this request is completed */
499 for (i = 0; i < stream->outcnt; i++)
500 SCTP_SO(stream, i)->state = SCTP_STREAM_CLOSED;
502 asoc->strreset_chunk = chunk;
503 sctp_chunk_hold(asoc->strreset_chunk);
505 retval = sctp_send_reconf(asoc, chunk);
507 sctp_chunk_put(asoc->strreset_chunk);
508 asoc->strreset_chunk = NULL;
510 for (i = 0; i < stream->outcnt; i++)
511 SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
516 asoc->strreset_outstanding = 1;
521 int sctp_send_add_streams(struct sctp_association *asoc,
522 struct sctp_add_streams *params)
524 struct sctp_stream *stream = &asoc->stream;
525 struct sctp_chunk *chunk = NULL;
530 if (!asoc->peer.reconf_capable ||
531 !(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ)) {
532 retval = -ENOPROTOOPT;
536 if (asoc->strreset_outstanding) {
537 retval = -EINPROGRESS;
541 out = params->sas_outstrms;
542 in = params->sas_instrms;
543 outcnt = stream->outcnt + out;
544 incnt = stream->incnt + in;
545 if (outcnt > SCTP_MAX_STREAM || incnt > SCTP_MAX_STREAM ||
552 retval = sctp_stream_alloc_out(stream, outcnt, GFP_KERNEL);
557 chunk = sctp_make_strreset_addstrm(asoc, out, in);
563 asoc->strreset_chunk = chunk;
564 sctp_chunk_hold(asoc->strreset_chunk);
566 retval = sctp_send_reconf(asoc, chunk);
568 sctp_chunk_put(asoc->strreset_chunk);
569 asoc->strreset_chunk = NULL;
573 stream->outcnt = outcnt;
575 asoc->strreset_outstanding = !!out + !!in;
581 static struct sctp_paramhdr *sctp_chunk_lookup_strreset_param(
582 struct sctp_association *asoc, __be32 resp_seq,
585 struct sctp_chunk *chunk = asoc->strreset_chunk;
586 struct sctp_reconf_chunk *hdr;
587 union sctp_params param;
592 hdr = (struct sctp_reconf_chunk *)chunk->chunk_hdr;
593 sctp_walk_params(param, hdr, params) {
594 /* sctp_strreset_tsnreq is actually the basic structure
595 * of all stream reconf params, so it's safe to use it
596 * to access request_seq.
598 struct sctp_strreset_tsnreq *req = param.v;
600 if ((!resp_seq || req->request_seq == resp_seq) &&
601 (!type || type == req->param_hdr.type))
608 static void sctp_update_strreset_result(struct sctp_association *asoc,
611 asoc->strreset_result[1] = asoc->strreset_result[0];
612 asoc->strreset_result[0] = result;
615 struct sctp_chunk *sctp_process_strreset_outreq(
616 struct sctp_association *asoc,
617 union sctp_params param,
618 struct sctp_ulpevent **evp)
620 struct sctp_strreset_outreq *outreq = param.v;
621 struct sctp_stream *stream = &asoc->stream;
622 __u32 result = SCTP_STRRESET_DENIED;
623 __be16 *str_p = NULL;
627 request_seq = ntohl(outreq->request_seq);
629 if (ntohl(outreq->send_reset_at_tsn) >
630 sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)) {
631 result = SCTP_STRRESET_IN_PROGRESS;
635 if (TSN_lt(asoc->strreset_inseq, request_seq) ||
636 TSN_lt(request_seq, asoc->strreset_inseq - 2)) {
637 result = SCTP_STRRESET_ERR_BAD_SEQNO;
639 } else if (TSN_lt(request_seq, asoc->strreset_inseq)) {
640 i = asoc->strreset_inseq - request_seq - 1;
641 result = asoc->strreset_result[i];
644 asoc->strreset_inseq++;
646 /* Check strreset_enable after inseq inc, as sender cannot tell
647 * the peer doesn't enable strreset after receiving response with
648 * result denied, as well as to keep consistent with bsd.
650 if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ))
653 nums = (ntohs(param.p->length) - sizeof(*outreq)) / sizeof(__u16);
654 str_p = outreq->list_of_streams;
655 for (i = 0; i < nums; i++) {
656 if (ntohs(str_p[i]) >= stream->incnt) {
657 result = SCTP_STRRESET_ERR_WRONG_SSN;
662 if (asoc->strreset_chunk) {
663 if (!sctp_chunk_lookup_strreset_param(
664 asoc, outreq->response_seq,
665 SCTP_PARAM_RESET_IN_REQUEST)) {
666 /* same process with outstanding isn't 0 */
667 result = SCTP_STRRESET_ERR_IN_PROGRESS;
671 asoc->strreset_outstanding--;
672 asoc->strreset_outseq++;
674 if (!asoc->strreset_outstanding) {
675 struct sctp_transport *t;
677 t = asoc->strreset_chunk->transport;
678 if (del_timer(&t->reconf_timer))
679 sctp_transport_put(t);
681 sctp_chunk_put(asoc->strreset_chunk);
682 asoc->strreset_chunk = NULL;
687 for (i = 0; i < nums; i++)
688 SCTP_SI(stream, ntohs(str_p[i]))->mid = 0;
690 for (i = 0; i < stream->incnt; i++)
691 SCTP_SI(stream, i)->mid = 0;
693 result = SCTP_STRRESET_PERFORMED;
695 *evp = sctp_ulpevent_make_stream_reset_event(asoc,
696 SCTP_STREAM_RESET_INCOMING_SSN, nums, str_p, GFP_ATOMIC);
699 sctp_update_strreset_result(asoc, result);
701 return sctp_make_strreset_resp(asoc, result, request_seq);
704 struct sctp_chunk *sctp_process_strreset_inreq(
705 struct sctp_association *asoc,
706 union sctp_params param,
707 struct sctp_ulpevent **evp)
709 struct sctp_strreset_inreq *inreq = param.v;
710 struct sctp_stream *stream = &asoc->stream;
711 __u32 result = SCTP_STRRESET_DENIED;
712 struct sctp_chunk *chunk = NULL;
717 request_seq = ntohl(inreq->request_seq);
718 if (TSN_lt(asoc->strreset_inseq, request_seq) ||
719 TSN_lt(request_seq, asoc->strreset_inseq - 2)) {
720 result = SCTP_STRRESET_ERR_BAD_SEQNO;
722 } else if (TSN_lt(request_seq, asoc->strreset_inseq)) {
723 i = asoc->strreset_inseq - request_seq - 1;
724 result = asoc->strreset_result[i];
725 if (result == SCTP_STRRESET_PERFORMED)
729 asoc->strreset_inseq++;
731 if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ))
734 if (asoc->strreset_outstanding) {
735 result = SCTP_STRRESET_ERR_IN_PROGRESS;
739 nums = (ntohs(param.p->length) - sizeof(*inreq)) / sizeof(__u16);
740 str_p = inreq->list_of_streams;
741 for (i = 0; i < nums; i++) {
742 if (ntohs(str_p[i]) >= stream->outcnt) {
743 result = SCTP_STRRESET_ERR_WRONG_SSN;
748 if (!sctp_stream_outq_is_empty(stream, nums, str_p)) {
749 result = SCTP_STRRESET_IN_PROGRESS;
750 asoc->strreset_inseq--;
754 chunk = sctp_make_strreset_req(asoc, nums, str_p, 1, 0);
759 for (i = 0; i < nums; i++)
760 SCTP_SO(stream, ntohs(str_p[i]))->state =
763 for (i = 0; i < stream->outcnt; i++)
764 SCTP_SO(stream, i)->state = SCTP_STREAM_CLOSED;
766 asoc->strreset_chunk = chunk;
767 asoc->strreset_outstanding = 1;
768 sctp_chunk_hold(asoc->strreset_chunk);
770 result = SCTP_STRRESET_PERFORMED;
773 sctp_update_strreset_result(asoc, result);
776 chunk = sctp_make_strreset_resp(asoc, result, request_seq);
781 struct sctp_chunk *sctp_process_strreset_tsnreq(
782 struct sctp_association *asoc,
783 union sctp_params param,
784 struct sctp_ulpevent **evp)
786 __u32 init_tsn = 0, next_tsn = 0, max_tsn_seen;
787 struct sctp_strreset_tsnreq *tsnreq = param.v;
788 struct sctp_stream *stream = &asoc->stream;
789 __u32 result = SCTP_STRRESET_DENIED;
793 request_seq = ntohl(tsnreq->request_seq);
794 if (TSN_lt(asoc->strreset_inseq, request_seq) ||
795 TSN_lt(request_seq, asoc->strreset_inseq - 2)) {
796 result = SCTP_STRRESET_ERR_BAD_SEQNO;
798 } else if (TSN_lt(request_seq, asoc->strreset_inseq)) {
799 i = asoc->strreset_inseq - request_seq - 1;
800 result = asoc->strreset_result[i];
801 if (result == SCTP_STRRESET_PERFORMED) {
802 next_tsn = asoc->ctsn_ack_point + 1;
804 sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1;
809 if (!sctp_outq_is_empty(&asoc->outqueue)) {
810 result = SCTP_STRRESET_IN_PROGRESS;
814 asoc->strreset_inseq++;
816 if (!(asoc->strreset_enable & SCTP_ENABLE_RESET_ASSOC_REQ))
819 if (asoc->strreset_outstanding) {
820 result = SCTP_STRRESET_ERR_IN_PROGRESS;
824 /* G4: The same processing as though a FWD-TSN chunk (as defined in
825 * [RFC3758]) with all streams affected and a new cumulative TSN
826 * ACK of the Receiver's Next TSN minus 1 were received MUST be
829 max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
830 asoc->stream.si->report_ftsn(&asoc->ulpq, max_tsn_seen);
832 /* G1: Compute an appropriate value for the Receiver's Next TSN -- the
833 * TSN that the peer should use to send the next DATA chunk. The
834 * value SHOULD be the smallest TSN not acknowledged by the
835 * receiver of the request plus 2^31.
837 init_tsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + (1 << 31);
838 sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
839 init_tsn, GFP_ATOMIC);
841 /* G3: The same processing as though a SACK chunk with no gap report
842 * and a cumulative TSN ACK of the Sender's Next TSN minus 1 were
843 * received MUST be performed.
845 sctp_outq_free(&asoc->outqueue);
847 /* G2: Compute an appropriate value for the local endpoint's next TSN,
848 * i.e., the next TSN assigned by the receiver of the SSN/TSN reset
849 * chunk. The value SHOULD be the highest TSN sent by the receiver
850 * of the request plus 1.
852 next_tsn = asoc->next_tsn;
853 asoc->ctsn_ack_point = next_tsn - 1;
854 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
856 /* G5: The next expected and outgoing SSNs MUST be reset to 0 for all
857 * incoming and outgoing streams.
859 for (i = 0; i < stream->outcnt; i++) {
860 SCTP_SO(stream, i)->mid = 0;
861 SCTP_SO(stream, i)->mid_uo = 0;
863 for (i = 0; i < stream->incnt; i++)
864 SCTP_SI(stream, i)->mid = 0;
866 result = SCTP_STRRESET_PERFORMED;
868 *evp = sctp_ulpevent_make_assoc_reset_event(asoc, 0, init_tsn,
869 next_tsn, GFP_ATOMIC);
872 sctp_update_strreset_result(asoc, result);
874 return sctp_make_strreset_tsnresp(asoc, result, request_seq,
878 struct sctp_chunk *sctp_process_strreset_addstrm_out(
879 struct sctp_association *asoc,
880 union sctp_params param,
881 struct sctp_ulpevent **evp)
883 struct sctp_strreset_addstrm *addstrm = param.v;
884 struct sctp_stream *stream = &asoc->stream;
885 __u32 result = SCTP_STRRESET_DENIED;
886 __u32 request_seq, incnt;
889 request_seq = ntohl(addstrm->request_seq);
890 if (TSN_lt(asoc->strreset_inseq, request_seq) ||
891 TSN_lt(request_seq, asoc->strreset_inseq - 2)) {
892 result = SCTP_STRRESET_ERR_BAD_SEQNO;
894 } else if (TSN_lt(request_seq, asoc->strreset_inseq)) {
895 i = asoc->strreset_inseq - request_seq - 1;
896 result = asoc->strreset_result[i];
899 asoc->strreset_inseq++;
901 if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
904 in = ntohs(addstrm->number_of_streams);
905 incnt = stream->incnt + in;
906 if (!in || incnt > SCTP_MAX_STREAM)
909 if (sctp_stream_alloc_in(stream, incnt, GFP_ATOMIC))
912 if (asoc->strreset_chunk) {
913 if (!sctp_chunk_lookup_strreset_param(
914 asoc, 0, SCTP_PARAM_RESET_ADD_IN_STREAMS)) {
915 /* same process with outstanding isn't 0 */
916 result = SCTP_STRRESET_ERR_IN_PROGRESS;
920 asoc->strreset_outstanding--;
921 asoc->strreset_outseq++;
923 if (!asoc->strreset_outstanding) {
924 struct sctp_transport *t;
926 t = asoc->strreset_chunk->transport;
927 if (del_timer(&t->reconf_timer))
928 sctp_transport_put(t);
930 sctp_chunk_put(asoc->strreset_chunk);
931 asoc->strreset_chunk = NULL;
935 stream->incnt = incnt;
937 result = SCTP_STRRESET_PERFORMED;
939 *evp = sctp_ulpevent_make_stream_change_event(asoc,
940 0, ntohs(addstrm->number_of_streams), 0, GFP_ATOMIC);
943 sctp_update_strreset_result(asoc, result);
945 return sctp_make_strreset_resp(asoc, result, request_seq);
948 struct sctp_chunk *sctp_process_strreset_addstrm_in(
949 struct sctp_association *asoc,
950 union sctp_params param,
951 struct sctp_ulpevent **evp)
953 struct sctp_strreset_addstrm *addstrm = param.v;
954 struct sctp_stream *stream = &asoc->stream;
955 __u32 result = SCTP_STRRESET_DENIED;
956 struct sctp_chunk *chunk = NULL;
957 __u32 request_seq, outcnt;
961 request_seq = ntohl(addstrm->request_seq);
962 if (TSN_lt(asoc->strreset_inseq, request_seq) ||
963 TSN_lt(request_seq, asoc->strreset_inseq - 2)) {
964 result = SCTP_STRRESET_ERR_BAD_SEQNO;
966 } else if (TSN_lt(request_seq, asoc->strreset_inseq)) {
967 i = asoc->strreset_inseq - request_seq - 1;
968 result = asoc->strreset_result[i];
969 if (result == SCTP_STRRESET_PERFORMED)
973 asoc->strreset_inseq++;
975 if (!(asoc->strreset_enable & SCTP_ENABLE_CHANGE_ASSOC_REQ))
978 if (asoc->strreset_outstanding) {
979 result = SCTP_STRRESET_ERR_IN_PROGRESS;
983 out = ntohs(addstrm->number_of_streams);
984 outcnt = stream->outcnt + out;
985 if (!out || outcnt > SCTP_MAX_STREAM)
988 ret = sctp_stream_alloc_out(stream, outcnt, GFP_ATOMIC);
992 chunk = sctp_make_strreset_addstrm(asoc, out, 0);
996 asoc->strreset_chunk = chunk;
997 asoc->strreset_outstanding = 1;
998 sctp_chunk_hold(asoc->strreset_chunk);
1000 stream->outcnt = outcnt;
1002 result = SCTP_STRRESET_PERFORMED;
1005 sctp_update_strreset_result(asoc, result);
1008 chunk = sctp_make_strreset_resp(asoc, result, request_seq);
1013 struct sctp_chunk *sctp_process_strreset_resp(
1014 struct sctp_association *asoc,
1015 union sctp_params param,
1016 struct sctp_ulpevent **evp)
1018 struct sctp_stream *stream = &asoc->stream;
1019 struct sctp_strreset_resp *resp = param.v;
1020 struct sctp_transport *t;
1021 __u16 i, nums, flags = 0;
1022 struct sctp_paramhdr *req;
1025 req = sctp_chunk_lookup_strreset_param(asoc, resp->response_seq, 0);
1029 result = ntohl(resp->result);
1030 if (result != SCTP_STRRESET_PERFORMED) {
1031 /* if in progress, do nothing but retransmit */
1032 if (result == SCTP_STRRESET_IN_PROGRESS)
1034 else if (result == SCTP_STRRESET_DENIED)
1035 flags = SCTP_STREAM_RESET_DENIED;
1037 flags = SCTP_STREAM_RESET_FAILED;
1040 if (req->type == SCTP_PARAM_RESET_OUT_REQUEST) {
1041 struct sctp_strreset_outreq *outreq;
1044 outreq = (struct sctp_strreset_outreq *)req;
1045 str_p = outreq->list_of_streams;
1046 nums = (ntohs(outreq->param_hdr.length) - sizeof(*outreq)) /
1049 if (result == SCTP_STRRESET_PERFORMED) {
1050 struct sctp_stream_out *sout;
1052 for (i = 0; i < nums; i++) {
1053 sout = SCTP_SO(stream, ntohs(str_p[i]));
1058 for (i = 0; i < stream->outcnt; i++) {
1059 sout = SCTP_SO(stream, i);
1066 flags |= SCTP_STREAM_RESET_OUTGOING_SSN;
1068 for (i = 0; i < stream->outcnt; i++)
1069 SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
1071 *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
1072 nums, str_p, GFP_ATOMIC);
1073 } else if (req->type == SCTP_PARAM_RESET_IN_REQUEST) {
1074 struct sctp_strreset_inreq *inreq;
1077 /* if the result is performed, it's impossible for inreq */
1078 if (result == SCTP_STRRESET_PERFORMED)
1081 inreq = (struct sctp_strreset_inreq *)req;
1082 str_p = inreq->list_of_streams;
1083 nums = (ntohs(inreq->param_hdr.length) - sizeof(*inreq)) /
1086 flags |= SCTP_STREAM_RESET_INCOMING_SSN;
1088 *evp = sctp_ulpevent_make_stream_reset_event(asoc, flags,
1089 nums, str_p, GFP_ATOMIC);
1090 } else if (req->type == SCTP_PARAM_RESET_TSN_REQUEST) {
1091 struct sctp_strreset_resptsn *resptsn;
1094 /* check for resptsn, as sctp_verify_reconf didn't do it*/
1095 if (ntohs(param.p->length) != sizeof(*resptsn))
1098 resptsn = (struct sctp_strreset_resptsn *)resp;
1099 stsn = ntohl(resptsn->senders_next_tsn);
1100 rtsn = ntohl(resptsn->receivers_next_tsn);
1102 if (result == SCTP_STRRESET_PERFORMED) {
1103 __u32 mtsn = sctp_tsnmap_get_max_tsn_seen(
1104 &asoc->peer.tsn_map);
1107 asoc->stream.si->report_ftsn(&asoc->ulpq, mtsn);
1109 sctp_tsnmap_init(&asoc->peer.tsn_map,
1110 SCTP_TSN_MAP_INITIAL,
1113 /* Clean up sacked and abandoned queues only. As the
1114 * out_chunk_list may not be empty, splice it to temp,
1115 * then get it back after sctp_outq_free is done.
1117 list_splice_init(&asoc->outqueue.out_chunk_list, &temp);
1118 sctp_outq_free(&asoc->outqueue);
1119 list_splice_init(&temp, &asoc->outqueue.out_chunk_list);
1121 asoc->next_tsn = rtsn;
1122 asoc->ctsn_ack_point = asoc->next_tsn - 1;
1123 asoc->adv_peer_ack_point = asoc->ctsn_ack_point;
1125 for (i = 0; i < stream->outcnt; i++) {
1126 SCTP_SO(stream, i)->mid = 0;
1127 SCTP_SO(stream, i)->mid_uo = 0;
1129 for (i = 0; i < stream->incnt; i++)
1130 SCTP_SI(stream, i)->mid = 0;
1133 for (i = 0; i < stream->outcnt; i++)
1134 SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
1136 *evp = sctp_ulpevent_make_assoc_reset_event(asoc, flags,
1137 stsn, rtsn, GFP_ATOMIC);
1138 } else if (req->type == SCTP_PARAM_RESET_ADD_OUT_STREAMS) {
1139 struct sctp_strreset_addstrm *addstrm;
1142 addstrm = (struct sctp_strreset_addstrm *)req;
1143 nums = ntohs(addstrm->number_of_streams);
1144 number = stream->outcnt - nums;
1146 if (result == SCTP_STRRESET_PERFORMED) {
1147 for (i = number; i < stream->outcnt; i++)
1148 SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
1150 sctp_stream_shrink_out(stream, number);
1151 stream->outcnt = number;
1154 *evp = sctp_ulpevent_make_stream_change_event(asoc, flags,
1155 0, nums, GFP_ATOMIC);
1156 } else if (req->type == SCTP_PARAM_RESET_ADD_IN_STREAMS) {
1157 struct sctp_strreset_addstrm *addstrm;
1159 /* if the result is performed, it's impossible for addstrm in
1162 if (result == SCTP_STRRESET_PERFORMED)
1165 addstrm = (struct sctp_strreset_addstrm *)req;
1166 nums = ntohs(addstrm->number_of_streams);
1168 *evp = sctp_ulpevent_make_stream_change_event(asoc, flags,
1169 nums, 0, GFP_ATOMIC);
1172 asoc->strreset_outstanding--;
1173 asoc->strreset_outseq++;
1175 /* remove everything for this reconf request */
1176 if (!asoc->strreset_outstanding) {
1177 t = asoc->strreset_chunk->transport;
1178 if (del_timer(&t->reconf_timer))
1179 sctp_transport_put(t);
1181 sctp_chunk_put(asoc->strreset_chunk);
1182 asoc->strreset_chunk = NULL;