xref: /src/sys/netinet/sctp_indata.c (revision a25439b68651d176ae05867f5090d45fd85e9f24)
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4  * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions are met:
8  *
9  * a) Redistributions of source code must retain the above copyright notice,
10  *    this list of conditions and the following disclaimer.
11  *
12  * b) Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the distribution.
15  *
16  * c) Neither the name of Cisco Systems, Inc. nor the names of its
17  *    contributors may be used to endorse or promote products derived
18  *    from this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30  * THE POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 
48 
49 /*
50  * NOTES: On the outbound side of things I need to check the sack timer to
51  * see if I should generate a sack into the chunk queue (if I have data to
52  * send that is and will be sending it .. for bundling.
53  *
54  * The callback in sctp_usrreq.c will get called when the socket is read from.
55  * This will cause sctp_service_queues() to get called on the top entry in
56  * the list.
57  */
58 
59 void
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
61 {
62 	asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
63 }
64 
65 /* Calculate what the rwnd would be */
66 uint32_t
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
68 {
69 	uint32_t calc = 0;
70 
71 	/*
72 	 * This is really set wrong with respect to a 1-2-m socket. Since
73 	 * the sb_cc is the count that everyone as put up. When we re-write
74 	 * sctp_soreceive then we will fix this so that ONLY this
75 	 * associations data is taken into account.
76 	 */
77 	if (stcb->sctp_socket == NULL)
78 		return (calc);
79 
80 	if (stcb->asoc.sb_cc == 0 &&
81 	    asoc->size_on_reasm_queue == 0 &&
82 	    asoc->size_on_all_streams == 0) {
83 		/* Full rwnd granted */
84 		calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
85 		return (calc);
86 	}
87 	/* get actual space */
88 	calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
89 
90 	/*
91 	 * take out what has NOT been put on socket queue and we yet hold
92 	 * for putting up.
93 	 */
94 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 	    asoc->cnt_on_reasm_queue * MSIZE));
96 	calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 	    asoc->cnt_on_all_streams * MSIZE));
98 
99 	if (calc == 0) {
100 		/* out of space */
101 		return (calc);
102 	}
103 	/* what is the overhead of all these rwnd's */
104 	calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
105 	/*
106 	 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 	 * even it is 0. SWS engaged
108 	 */
109 	if (calc < stcb->asoc.my_rwnd_control_len) {
110 		calc = 1;
111 	}
112 	return (calc);
113 }
114 
115 
116 
117 /*
118  * Build out our readq entry based on the incoming packet.
119  */
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122     struct sctp_nets *net,
123     uint32_t tsn, uint32_t ppid,
124     uint32_t context, uint16_t stream_no,
125     uint16_t stream_seq, uint8_t flags,
126     struct mbuf *dm)
127 {
128 	struct sctp_queued_to_read *read_queue_e = NULL;
129 
130 	sctp_alloc_a_readq(stcb, read_queue_e);
131 	if (read_queue_e == NULL) {
132 		goto failed_build;
133 	}
134 	read_queue_e->sinfo_stream = stream_no;
135 	read_queue_e->sinfo_ssn = stream_seq;
136 	read_queue_e->sinfo_flags = (flags << 8);
137 	read_queue_e->sinfo_ppid = ppid;
138 	read_queue_e->sinfo_context = context;
139 	read_queue_e->sinfo_timetolive = 0;
140 	read_queue_e->sinfo_tsn = tsn;
141 	read_queue_e->sinfo_cumtsn = tsn;
142 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 	read_queue_e->whoFrom = net;
144 	read_queue_e->length = 0;
145 	atomic_add_int(&net->ref_count, 1);
146 	read_queue_e->data = dm;
147 	read_queue_e->spec_flags = 0;
148 	read_queue_e->tail_mbuf = NULL;
149 	read_queue_e->aux_data = NULL;
150 	read_queue_e->stcb = stcb;
151 	read_queue_e->port_from = stcb->rport;
152 	read_queue_e->do_not_ref_stcb = 0;
153 	read_queue_e->end_added = 0;
154 	read_queue_e->some_taken = 0;
155 	read_queue_e->pdapi_aborted = 0;
156 failed_build:
157 	return (read_queue_e);
158 }
159 
160 
161 /*
162  * Build out our readq entry based on the incoming packet.
163  */
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166     struct sctp_tmit_chunk *chk)
167 {
168 	struct sctp_queued_to_read *read_queue_e = NULL;
169 
170 	sctp_alloc_a_readq(stcb, read_queue_e);
171 	if (read_queue_e == NULL) {
172 		goto failed_build;
173 	}
174 	read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 	read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 	read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 	read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 	read_queue_e->sinfo_context = stcb->asoc.context;
179 	read_queue_e->sinfo_timetolive = 0;
180 	read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 	read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 	read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 	read_queue_e->whoFrom = chk->whoTo;
184 	read_queue_e->aux_data = NULL;
185 	read_queue_e->length = 0;
186 	atomic_add_int(&chk->whoTo->ref_count, 1);
187 	read_queue_e->data = chk->data;
188 	read_queue_e->tail_mbuf = NULL;
189 	read_queue_e->stcb = stcb;
190 	read_queue_e->port_from = stcb->rport;
191 	read_queue_e->spec_flags = 0;
192 	read_queue_e->do_not_ref_stcb = 0;
193 	read_queue_e->end_added = 0;
194 	read_queue_e->some_taken = 0;
195 	read_queue_e->pdapi_aborted = 0;
196 failed_build:
197 	return (read_queue_e);
198 }
199 
200 
201 struct mbuf *
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
203 {
204 	struct sctp_extrcvinfo *seinfo;
205 	struct sctp_sndrcvinfo *outinfo;
206 	struct sctp_rcvinfo *rcvinfo;
207 	struct sctp_nxtinfo *nxtinfo;
208 	struct cmsghdr *cmh;
209 	struct mbuf *ret;
210 	int len;
211 	int use_extended;
212 	int provide_nxt;
213 
214 	if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216 	    sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217 		/* user does not want any ancillary data */
218 		return (NULL);
219 	}
220 	len = 0;
221 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
223 	}
224 	seinfo = (struct sctp_extrcvinfo *)sinfo;
225 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226 	    (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
227 		provide_nxt = 1;
228 		len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
229 	} else {
230 		provide_nxt = 0;
231 	}
232 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233 		if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
234 			use_extended = 1;
235 			len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
236 		} else {
237 			use_extended = 0;
238 			len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
239 		}
240 	} else {
241 		use_extended = 0;
242 	}
243 
244 	ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
245 	if (ret == NULL) {
246 		/* No space */
247 		return (ret);
248 	}
249 	SCTP_BUF_LEN(ret) = 0;
250 
251 	/* We need a CMSG header followed by the struct */
252 	cmh = mtod(ret, struct cmsghdr *);
253 	/*
254 	 * Make sure that there is no un-initialized padding between the
255 	 * cmsg header and cmsg data and after the cmsg data.
256 	 */
257 	memset(cmh, 0, len);
258 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
259 		cmh->cmsg_level = IPPROTO_SCTP;
260 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
261 		cmh->cmsg_type = SCTP_RCVINFO;
262 		rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
263 		rcvinfo->rcv_sid = sinfo->sinfo_stream;
264 		rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
265 		rcvinfo->rcv_flags = sinfo->sinfo_flags;
266 		rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
267 		rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
268 		rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
269 		rcvinfo->rcv_context = sinfo->sinfo_context;
270 		rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
271 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
272 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
273 	}
274 	if (provide_nxt) {
275 		cmh->cmsg_level = IPPROTO_SCTP;
276 		cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
277 		cmh->cmsg_type = SCTP_NXTINFO;
278 		nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
279 		nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
280 		nxtinfo->nxt_flags = 0;
281 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
282 			nxtinfo->nxt_flags |= SCTP_UNORDERED;
283 		}
284 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
285 			nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
286 		}
287 		if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
288 			nxtinfo->nxt_flags |= SCTP_COMPLETE;
289 		}
290 		nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
291 		nxtinfo->nxt_length = seinfo->sreinfo_next_length;
292 		nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
293 		cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
294 		SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
295 	}
296 	if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
297 		cmh->cmsg_level = IPPROTO_SCTP;
298 		outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
299 		if (use_extended) {
300 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
301 			cmh->cmsg_type = SCTP_EXTRCV;
302 			memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
303 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
304 		} else {
305 			cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
306 			cmh->cmsg_type = SCTP_SNDRCV;
307 			*outinfo = *sinfo;
308 			SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
309 		}
310 	}
311 	return (ret);
312 }
313 
314 
315 static void
316 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
317 {
318 	uint32_t gap, i, cumackp1;
319 	int fnd = 0;
320 
321 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
322 		return;
323 	}
324 	cumackp1 = asoc->cumulative_tsn + 1;
325 	if (SCTP_TSN_GT(cumackp1, tsn)) {
326 		/*
327 		 * this tsn is behind the cum ack and thus we don't need to
328 		 * worry about it being moved from one to the other.
329 		 */
330 		return;
331 	}
332 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
333 	if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
334 		SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
335 		sctp_print_mapping_array(asoc);
336 #ifdef INVARIANTS
337 		panic("Things are really messed up now!!");
338 #endif
339 	}
340 	SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
341 	SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
342 	if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
343 		asoc->highest_tsn_inside_nr_map = tsn;
344 	}
345 	if (tsn == asoc->highest_tsn_inside_map) {
346 		/* We must back down to see what the new highest is */
347 		for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
348 			SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
349 			if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
350 				asoc->highest_tsn_inside_map = i;
351 				fnd = 1;
352 				break;
353 			}
354 		}
355 		if (!fnd) {
356 			asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
357 		}
358 	}
359 }
360 
361 
362 /*
363  * We are delivering currently from the reassembly queue. We must continue to
364  * deliver until we either: 1) run out of space. 2) run out of sequential
365  * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
366  */
367 static void
368 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
369 {
370 	struct sctp_tmit_chunk *chk, *nchk;
371 	uint16_t nxt_todel;
372 	uint16_t stream_no;
373 	int end = 0;
374 	int cntDel;
375 	struct sctp_queued_to_read *control, *ctl, *nctl;
376 
377 	if (stcb == NULL)
378 		return;
379 
380 	cntDel = stream_no = 0;
381 	if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
382 	    (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
383 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
384 		/* socket above is long gone or going.. */
385 abandon:
386 		asoc->fragmented_delivery_inprogress = 0;
387 		TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
388 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
389 			asoc->size_on_reasm_queue -= chk->send_size;
390 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
391 			/*
392 			 * Lose the data pointer, since its in the socket
393 			 * buffer
394 			 */
395 			if (chk->data) {
396 				sctp_m_freem(chk->data);
397 				chk->data = NULL;
398 			}
399 			/* Now free the address and data */
400 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
401 			/* sa_ignore FREED_MEMORY */
402 		}
403 		return;
404 	}
405 	SCTP_TCB_LOCK_ASSERT(stcb);
406 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
407 		if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
408 			/* Can't deliver more :< */
409 			return;
410 		}
411 		stream_no = chk->rec.data.stream_number;
412 		nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
413 		if (nxt_todel != chk->rec.data.stream_seq &&
414 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
415 			/*
416 			 * Not the next sequence to deliver in its stream OR
417 			 * unordered
418 			 */
419 			return;
420 		}
421 		if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
422 
423 			control = sctp_build_readq_entry_chk(stcb, chk);
424 			if (control == NULL) {
425 				/* out of memory? */
426 				return;
427 			}
428 			/* save it off for our future deliveries */
429 			stcb->asoc.control_pdapi = control;
430 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
431 				end = 1;
432 			else
433 				end = 0;
434 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
435 			sctp_add_to_readq(stcb->sctp_ep,
436 			    stcb, control, &stcb->sctp_socket->so_rcv, end,
437 			    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
438 			cntDel++;
439 		} else {
440 			if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
441 				end = 1;
442 			else
443 				end = 0;
444 			sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
445 			if (sctp_append_to_readq(stcb->sctp_ep, stcb,
446 			    stcb->asoc.control_pdapi,
447 			    chk->data, end, chk->rec.data.TSN_seq,
448 			    &stcb->sctp_socket->so_rcv)) {
449 				/*
450 				 * something is very wrong, either
451 				 * control_pdapi is NULL, or the tail_mbuf
452 				 * is corrupt, or there is a EOM already on
453 				 * the mbuf chain.
454 				 */
455 				if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
456 					goto abandon;
457 				} else {
458 #ifdef INVARIANTS
459 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
460 						panic("This should not happen control_pdapi NULL?");
461 					}
462 					/* if we did not panic, it was a EOM */
463 					panic("Bad chunking ??");
464 #else
465 					if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
466 						SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
467 					}
468 					SCTP_PRINTF("Bad chunking ??\n");
469 					SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
470 
471 #endif
472 					goto abandon;
473 				}
474 			}
475 			cntDel++;
476 		}
477 		/* pull it we did it */
478 		TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
479 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
480 			asoc->fragmented_delivery_inprogress = 0;
481 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
482 				asoc->strmin[stream_no].last_sequence_delivered++;
483 			}
484 			if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
485 				SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
486 			}
487 		} else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
488 			/*
489 			 * turn the flag back on since we just  delivered
490 			 * yet another one.
491 			 */
492 			asoc->fragmented_delivery_inprogress = 1;
493 		}
494 		asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
495 		asoc->last_flags_delivered = chk->rec.data.rcv_flags;
496 		asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
497 		asoc->last_strm_no_delivered = chk->rec.data.stream_number;
498 
499 		asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
500 		asoc->size_on_reasm_queue -= chk->send_size;
501 		sctp_ucount_decr(asoc->cnt_on_reasm_queue);
502 		/* free up the chk */
503 		chk->data = NULL;
504 		sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
505 
506 		if (asoc->fragmented_delivery_inprogress == 0) {
507 			/*
508 			 * Now lets see if we can deliver the next one on
509 			 * the stream
510 			 */
511 			struct sctp_stream_in *strm;
512 
513 			strm = &asoc->strmin[stream_no];
514 			nxt_todel = strm->last_sequence_delivered + 1;
515 			TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
516 				/* Deliver more if we can. */
517 				if (nxt_todel == ctl->sinfo_ssn) {
518 					TAILQ_REMOVE(&strm->inqueue, ctl, next);
519 					asoc->size_on_all_streams -= ctl->length;
520 					sctp_ucount_decr(asoc->cnt_on_all_streams);
521 					strm->last_sequence_delivered++;
522 					sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
523 					sctp_add_to_readq(stcb->sctp_ep, stcb,
524 					    ctl,
525 					    &stcb->sctp_socket->so_rcv, 1,
526 					    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
527 				} else {
528 					break;
529 				}
530 				nxt_todel = strm->last_sequence_delivered + 1;
531 			}
532 			break;
533 		}
534 	}
535 }
536 
537 /*
538  * Queue the chunk either right into the socket buffer if it is the next one
539  * to go OR put it in the correct place in the delivery queue.  If we do
540  * append to the so_buf, keep doing so until we are out of order. One big
541  * question still remains, what to do when the socket buffer is FULL??
542  */
543 static void
544 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
545     struct sctp_queued_to_read *control, int *abort_flag)
546 {
547 	/*
548 	 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
549 	 * all the data in one stream this could happen quite rapidly. One
550 	 * could use the TSN to keep track of things, but this scheme breaks
551 	 * down in the other type of stream useage that could occur. Send a
552 	 * single msg to stream 0, send 4Billion messages to stream 1, now
553 	 * send a message to stream 0. You have a situation where the TSN
554 	 * has wrapped but not in the stream. Is this worth worrying about
555 	 * or should we just change our queue sort at the bottom to be by
556 	 * TSN.
557 	 *
558 	 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
559 	 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
560 	 * assignment this could happen... and I don't see how this would be
561 	 * a violation. So for now I am undecided an will leave the sort by
562 	 * SSN alone. Maybe a hybred approach is the answer
563 	 *
564 	 */
565 	struct sctp_stream_in *strm;
566 	struct sctp_queued_to_read *at;
567 	int queue_needed;
568 	uint16_t nxt_todel;
569 	struct mbuf *op_err;
570 	char msg[SCTP_DIAG_INFO_LEN];
571 
572 	queue_needed = 1;
573 	asoc->size_on_all_streams += control->length;
574 	sctp_ucount_incr(asoc->cnt_on_all_streams);
575 	strm = &asoc->strmin[control->sinfo_stream];
576 	nxt_todel = strm->last_sequence_delivered + 1;
577 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
578 		sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
579 	}
580 	SCTPDBG(SCTP_DEBUG_INDATA1,
581 	    "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
582 	    (uint32_t) control->sinfo_stream,
583 	    (uint32_t) strm->last_sequence_delivered,
584 	    (uint32_t) nxt_todel);
585 	if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
586 		/* The incoming sseq is behind where we last delivered? */
587 		SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
588 		    control->sinfo_ssn, strm->last_sequence_delivered);
589 protocol_error:
590 		/*
591 		 * throw it in the stream so it gets cleaned up in
592 		 * association destruction
593 		 */
594 		TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
595 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
596 		    strm->last_sequence_delivered, control->sinfo_tsn,
597 		    control->sinfo_stream, control->sinfo_ssn);
598 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
599 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
600 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
601 		*abort_flag = 1;
602 		return;
603 
604 	}
605 	if (nxt_todel == control->sinfo_ssn) {
606 		/* can be delivered right away? */
607 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
608 			sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
609 		}
610 		/* EY it wont be queued if it could be delivered directly */
611 		queue_needed = 0;
612 		asoc->size_on_all_streams -= control->length;
613 		sctp_ucount_decr(asoc->cnt_on_all_streams);
614 		strm->last_sequence_delivered++;
615 
616 		sctp_mark_non_revokable(asoc, control->sinfo_tsn);
617 		sctp_add_to_readq(stcb->sctp_ep, stcb,
618 		    control,
619 		    &stcb->sctp_socket->so_rcv, 1,
620 		    SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
621 		TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
622 			/* all delivered */
623 			nxt_todel = strm->last_sequence_delivered + 1;
624 			if (nxt_todel == control->sinfo_ssn) {
625 				TAILQ_REMOVE(&strm->inqueue, control, next);
626 				asoc->size_on_all_streams -= control->length;
627 				sctp_ucount_decr(asoc->cnt_on_all_streams);
628 				strm->last_sequence_delivered++;
629 				/*
630 				 * We ignore the return of deliver_data here
631 				 * since we always can hold the chunk on the
632 				 * d-queue. And we have a finite number that
633 				 * can be delivered from the strq.
634 				 */
635 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
636 					sctp_log_strm_del(control, NULL,
637 					    SCTP_STR_LOG_FROM_IMMED_DEL);
638 				}
639 				sctp_mark_non_revokable(asoc, control->sinfo_tsn);
640 				sctp_add_to_readq(stcb->sctp_ep, stcb,
641 				    control,
642 				    &stcb->sctp_socket->so_rcv, 1,
643 				    SCTP_READ_LOCK_NOT_HELD,
644 				    SCTP_SO_NOT_LOCKED);
645 				continue;
646 			}
647 			break;
648 		}
649 	}
650 	if (queue_needed) {
651 		/*
652 		 * Ok, we did not deliver this guy, find the correct place
653 		 * to put it on the queue.
654 		 */
655 		if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
656 			goto protocol_error;
657 		}
658 		if (TAILQ_EMPTY(&strm->inqueue)) {
659 			/* Empty queue */
660 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
661 				sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
662 			}
663 			TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
664 		} else {
665 			TAILQ_FOREACH(at, &strm->inqueue, next) {
666 				if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
667 					/*
668 					 * one in queue is bigger than the
669 					 * new one, insert before this one
670 					 */
671 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
672 						sctp_log_strm_del(control, at,
673 						    SCTP_STR_LOG_FROM_INSERT_MD);
674 					}
675 					TAILQ_INSERT_BEFORE(at, control, next);
676 					break;
677 				} else if (at->sinfo_ssn == control->sinfo_ssn) {
678 					/*
679 					 * Gak, He sent me a duplicate str
680 					 * seq number
681 					 */
682 					/*
683 					 * foo bar, I guess I will just free
684 					 * this new guy, should we abort
685 					 * too? FIX ME MAYBE? Or it COULD be
686 					 * that the SSN's have wrapped.
687 					 * Maybe I should compare to TSN
688 					 * somehow... sigh for now just blow
689 					 * away the chunk!
690 					 */
691 
692 					if (control->data)
693 						sctp_m_freem(control->data);
694 					control->data = NULL;
695 					asoc->size_on_all_streams -= control->length;
696 					sctp_ucount_decr(asoc->cnt_on_all_streams);
697 					if (control->whoFrom) {
698 						sctp_free_remote_addr(control->whoFrom);
699 						control->whoFrom = NULL;
700 					}
701 					sctp_free_a_readq(stcb, control);
702 					return;
703 				} else {
704 					if (TAILQ_NEXT(at, next) == NULL) {
705 						/*
706 						 * We are at the end, insert
707 						 * it after this one
708 						 */
709 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
710 							sctp_log_strm_del(control, at,
711 							    SCTP_STR_LOG_FROM_INSERT_TL);
712 						}
713 						TAILQ_INSERT_AFTER(&strm->inqueue,
714 						    at, control, next);
715 						break;
716 					}
717 				}
718 			}
719 		}
720 	}
721 }
722 
723 /*
724  * Returns two things: You get the total size of the deliverable parts of the
725  * first fragmented message on the reassembly queue. And you get a 1 back if
726  * all of the message is ready or a 0 back if the message is still incomplete
727  */
728 static int
729 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
730 {
731 	struct sctp_tmit_chunk *chk;
732 	uint32_t tsn;
733 
734 	*t_size = 0;
735 	chk = TAILQ_FIRST(&asoc->reasmqueue);
736 	if (chk == NULL) {
737 		/* nothing on the queue */
738 		return (0);
739 	}
740 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
741 		/* Not a first on the queue */
742 		return (0);
743 	}
744 	tsn = chk->rec.data.TSN_seq;
745 	TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
746 		if (tsn != chk->rec.data.TSN_seq) {
747 			return (0);
748 		}
749 		*t_size += chk->send_size;
750 		if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
751 			return (1);
752 		}
753 		tsn++;
754 	}
755 	return (0);
756 }
757 
758 static void
759 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
760 {
761 	struct sctp_tmit_chunk *chk;
762 	uint16_t nxt_todel;
763 	uint32_t tsize, pd_point;
764 
765 doit_again:
766 	chk = TAILQ_FIRST(&asoc->reasmqueue);
767 	if (chk == NULL) {
768 		/* Huh? */
769 		asoc->size_on_reasm_queue = 0;
770 		asoc->cnt_on_reasm_queue = 0;
771 		return;
772 	}
773 	if (asoc->fragmented_delivery_inprogress == 0) {
774 		nxt_todel =
775 		    asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
776 		if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
777 		    (nxt_todel == chk->rec.data.stream_seq ||
778 		    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
779 			/*
780 			 * Yep the first one is here and its ok to deliver
781 			 * but should we?
782 			 */
783 			if (stcb->sctp_socket) {
784 				pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
785 				    stcb->sctp_ep->partial_delivery_point);
786 			} else {
787 				pd_point = stcb->sctp_ep->partial_delivery_point;
788 			}
789 			if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
790 				/*
791 				 * Yes, we setup to start reception, by
792 				 * backing down the TSN just in case we
793 				 * can't deliver. If we
794 				 */
795 				asoc->fragmented_delivery_inprogress = 1;
796 				asoc->tsn_last_delivered =
797 				    chk->rec.data.TSN_seq - 1;
798 				asoc->str_of_pdapi =
799 				    chk->rec.data.stream_number;
800 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
801 				asoc->pdapi_ppid = chk->rec.data.payloadtype;
802 				asoc->fragment_flags = chk->rec.data.rcv_flags;
803 				sctp_service_reassembly(stcb, asoc);
804 			}
805 		}
806 	} else {
807 		/*
808 		 * Service re-assembly will deliver stream data queued at
809 		 * the end of fragmented delivery.. but it wont know to go
810 		 * back and call itself again... we do that here with the
811 		 * got doit_again
812 		 */
813 		sctp_service_reassembly(stcb, asoc);
814 		if (asoc->fragmented_delivery_inprogress == 0) {
815 			/*
816 			 * finished our Fragmented delivery, could be more
817 			 * waiting?
818 			 */
819 			goto doit_again;
820 		}
821 	}
822 }
823 
824 /*
825  * Dump onto the re-assembly queue, in its proper place. After dumping on the
826  * queue, see if anthing can be delivered. If so pull it off (or as much as
827  * we can. If we run out of space then we must dump what we can and set the
828  * appropriate flag to say we queued what we could.
829  */
830 static void
831 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
832     struct sctp_tmit_chunk *chk, int *abort_flag)
833 {
834 	struct mbuf *op_err;
835 	char msg[SCTP_DIAG_INFO_LEN];
836 	uint32_t cum_ackp1, prev_tsn, post_tsn;
837 	struct sctp_tmit_chunk *at, *prev, *next;
838 
839 	prev = next = NULL;
840 	cum_ackp1 = asoc->tsn_last_delivered + 1;
841 	if (TAILQ_EMPTY(&asoc->reasmqueue)) {
842 		/* This is the first one on the queue */
843 		TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
844 		/*
845 		 * we do not check for delivery of anything when only one
846 		 * fragment is here
847 		 */
848 		asoc->size_on_reasm_queue = chk->send_size;
849 		sctp_ucount_incr(asoc->cnt_on_reasm_queue);
850 		if (chk->rec.data.TSN_seq == cum_ackp1) {
851 			if (asoc->fragmented_delivery_inprogress == 0 &&
852 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
853 			    SCTP_DATA_FIRST_FRAG) {
854 				/*
855 				 * An empty queue, no delivery inprogress,
856 				 * we hit the next one and it does NOT have
857 				 * a FIRST fragment mark.
858 				 */
859 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
860 				snprintf(msg, sizeof(msg),
861 				    "Expected B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
862 				    chk->rec.data.TSN_seq,
863 				    chk->rec.data.stream_number,
864 				    chk->rec.data.stream_seq);
865 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
866 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
867 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
868 				*abort_flag = 1;
869 			} else if (asoc->fragmented_delivery_inprogress &&
870 			    (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
871 				/*
872 				 * We are doing a partial delivery and the
873 				 * NEXT chunk MUST be either the LAST or
874 				 * MIDDLE fragment NOT a FIRST
875 				 */
876 				SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
877 				snprintf(msg, sizeof(msg),
878 				    "Didn't expect B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
879 				    chk->rec.data.TSN_seq,
880 				    chk->rec.data.stream_number,
881 				    chk->rec.data.stream_seq);
882 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
883 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
884 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
885 				*abort_flag = 1;
886 			} else if (asoc->fragmented_delivery_inprogress) {
887 				/*
888 				 * Here we are ok with a MIDDLE or LAST
889 				 * piece
890 				 */
891 				if (chk->rec.data.stream_number !=
892 				    asoc->str_of_pdapi) {
893 					/* Got to be the right STR No */
894 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
895 					    chk->rec.data.stream_number,
896 					    asoc->str_of_pdapi);
897 					snprintf(msg, sizeof(msg),
898 					    "Expected SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
899 					    asoc->str_of_pdapi,
900 					    chk->rec.data.TSN_seq,
901 					    chk->rec.data.stream_number,
902 					    chk->rec.data.stream_seq);
903 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
904 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
905 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
906 					*abort_flag = 1;
907 				} else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
908 					    SCTP_DATA_UNORDERED &&
909 				    chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
910 					/* Got to be the right STR Seq */
911 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
912 					    chk->rec.data.stream_seq,
913 					    asoc->ssn_of_pdapi);
914 					snprintf(msg, sizeof(msg),
915 					    "Expected SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
916 					    asoc->ssn_of_pdapi,
917 					    chk->rec.data.TSN_seq,
918 					    chk->rec.data.stream_number,
919 					    chk->rec.data.stream_seq);
920 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
921 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
922 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
923 					*abort_flag = 1;
924 				}
925 			}
926 		}
927 		return;
928 	}
929 	/* Find its place */
930 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
931 		if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
932 			/*
933 			 * one in queue is bigger than the new one, insert
934 			 * before this one
935 			 */
936 			/* A check */
937 			asoc->size_on_reasm_queue += chk->send_size;
938 			sctp_ucount_incr(asoc->cnt_on_reasm_queue);
939 			next = at;
940 			TAILQ_INSERT_BEFORE(at, chk, sctp_next);
941 			break;
942 		} else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
943 			/* Gak, He sent me a duplicate str seq number */
944 			/*
945 			 * foo bar, I guess I will just free this new guy,
946 			 * should we abort too? FIX ME MAYBE? Or it COULD be
947 			 * that the SSN's have wrapped. Maybe I should
948 			 * compare to TSN somehow... sigh for now just blow
949 			 * away the chunk!
950 			 */
951 			if (chk->data) {
952 				sctp_m_freem(chk->data);
953 				chk->data = NULL;
954 			}
955 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
956 			return;
957 		} else {
958 			prev = at;
959 			if (TAILQ_NEXT(at, sctp_next) == NULL) {
960 				/*
961 				 * We are at the end, insert it after this
962 				 * one
963 				 */
964 				/* check it first */
965 				asoc->size_on_reasm_queue += chk->send_size;
966 				sctp_ucount_incr(asoc->cnt_on_reasm_queue);
967 				TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
968 				break;
969 			}
970 		}
971 	}
972 	/* Now the audits */
973 	if (prev) {
974 		prev_tsn = chk->rec.data.TSN_seq - 1;
975 		if (prev_tsn == prev->rec.data.TSN_seq) {
976 			/*
977 			 * Ok the one I am dropping onto the end is the
978 			 * NEXT. A bit of valdiation here.
979 			 */
980 			if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
981 			    SCTP_DATA_FIRST_FRAG ||
982 			    (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
983 			    SCTP_DATA_MIDDLE_FRAG) {
984 				/*
985 				 * Insert chk MUST be a MIDDLE or LAST
986 				 * fragment
987 				 */
988 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
989 				    SCTP_DATA_FIRST_FRAG) {
990 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
991 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
992 					snprintf(msg, sizeof(msg),
993 					    "Can't handle B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
994 					    chk->rec.data.TSN_seq,
995 					    chk->rec.data.stream_number,
996 					    chk->rec.data.stream_seq);
997 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
998 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
999 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1000 					*abort_flag = 1;
1001 					return;
1002 				}
1003 				if (chk->rec.data.stream_number !=
1004 				    prev->rec.data.stream_number) {
1005 					/*
1006 					 * Huh, need the correct STR here,
1007 					 * they must be the same.
1008 					 */
1009 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sid:%d not the same as at:%d\n",
1010 					    chk->rec.data.stream_number,
1011 					    prev->rec.data.stream_number);
1012 					snprintf(msg, sizeof(msg),
1013 					    "Expect SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1014 					    prev->rec.data.stream_number,
1015 					    chk->rec.data.TSN_seq,
1016 					    chk->rec.data.stream_number,
1017 					    chk->rec.data.stream_seq);
1018 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1019 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1020 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1021 					*abort_flag = 1;
1022 					return;
1023 				}
1024 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1025 				    (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1026 					/*
1027 					 * Huh, need the same ordering here,
1028 					 * they must be the same.
1029 					 */
1030 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, U-bit not constant\n");
1031 					snprintf(msg, sizeof(msg),
1032 					    "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1033 					    (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1034 					    chk->rec.data.TSN_seq,
1035 					    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1036 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1037 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1038 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1039 					*abort_flag = 1;
1040 					return;
1041 				}
1042 				if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1043 				    chk->rec.data.stream_seq !=
1044 				    prev->rec.data.stream_seq) {
1045 					/*
1046 					 * Huh, need the correct STR here,
1047 					 * they must be the same.
1048 					 */
1049 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1050 					    chk->rec.data.stream_seq,
1051 					    prev->rec.data.stream_seq);
1052 					snprintf(msg, sizeof(msg),
1053 					    "Expect SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1054 					    prev->rec.data.stream_seq,
1055 					    chk->rec.data.TSN_seq,
1056 					    chk->rec.data.stream_number,
1057 					    chk->rec.data.stream_seq);
1058 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1059 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1060 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1061 					*abort_flag = 1;
1062 					return;
1063 				}
1064 			} else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1065 			    SCTP_DATA_LAST_FRAG) {
1066 				/* Insert chk MUST be a FIRST */
1067 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1068 				    SCTP_DATA_FIRST_FRAG) {
1069 					SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1070 					snprintf(msg, sizeof(msg),
1071 					    "Expect B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1072 					    chk->rec.data.TSN_seq,
1073 					    chk->rec.data.stream_number,
1074 					    chk->rec.data.stream_seq);
1075 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1076 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1077 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1078 					*abort_flag = 1;
1079 					return;
1080 				}
1081 			}
1082 		}
1083 	}
1084 	if (next) {
1085 		post_tsn = chk->rec.data.TSN_seq + 1;
1086 		if (post_tsn == next->rec.data.TSN_seq) {
1087 			/*
1088 			 * Ok the one I am inserting ahead of is my NEXT
1089 			 * one. A bit of valdiation here.
1090 			 */
1091 			if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1092 				/* Insert chk MUST be a last fragment */
1093 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1094 				    != SCTP_DATA_LAST_FRAG) {
1095 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1096 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1097 					snprintf(msg, sizeof(msg),
1098 					    "Expect only E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1099 					    chk->rec.data.TSN_seq,
1100 					    chk->rec.data.stream_number,
1101 					    chk->rec.data.stream_seq);
1102 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1103 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1104 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1105 					*abort_flag = 1;
1106 					return;
1107 				}
1108 			} else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1109 				    SCTP_DATA_MIDDLE_FRAG ||
1110 				    (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1111 			    SCTP_DATA_LAST_FRAG) {
1112 				/*
1113 				 * Insert chk CAN be MIDDLE or FIRST NOT
1114 				 * LAST
1115 				 */
1116 				if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1117 				    SCTP_DATA_LAST_FRAG) {
1118 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1119 					SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1120 					snprintf(msg, sizeof(msg),
1121 					    "Didn't expect E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1122 					    chk->rec.data.TSN_seq,
1123 					    chk->rec.data.stream_number,
1124 					    chk->rec.data.stream_seq);
1125 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1126 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1127 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1128 					*abort_flag = 1;
1129 					return;
1130 				}
1131 				if (chk->rec.data.stream_number !=
1132 				    next->rec.data.stream_number) {
1133 					/*
1134 					 * Huh, need the correct STR here,
1135 					 * they must be the same.
1136 					 */
1137 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1138 					    chk->rec.data.stream_number,
1139 					    next->rec.data.stream_number);
1140 					snprintf(msg, sizeof(msg),
1141 					    "Required SID %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1142 					    next->rec.data.stream_number,
1143 					    chk->rec.data.TSN_seq,
1144 					    chk->rec.data.stream_number,
1145 					    chk->rec.data.stream_seq);
1146 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1147 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1148 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1149 					*abort_flag = 1;
1150 					return;
1151 				}
1152 				if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1153 				    (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1154 					/*
1155 					 * Huh, need the same ordering here,
1156 					 * they must be the same.
1157 					 */
1158 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next check - Gak, Evil plot, U-bit not constant\n");
1159 					snprintf(msg, sizeof(msg),
1160 					    "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1161 					    (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1162 					    chk->rec.data.TSN_seq,
1163 					    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1164 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1165 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1166 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1167 					*abort_flag = 1;
1168 					return;
1169 				}
1170 				if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1171 				    chk->rec.data.stream_seq !=
1172 				    next->rec.data.stream_seq) {
1173 					/*
1174 					 * Huh, need the correct STR here,
1175 					 * they must be the same.
1176 					 */
1177 					SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1178 					    chk->rec.data.stream_seq,
1179 					    next->rec.data.stream_seq);
1180 					snprintf(msg, sizeof(msg),
1181 					    "Required SSN %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1182 					    next->rec.data.stream_seq,
1183 					    chk->rec.data.TSN_seq,
1184 					    chk->rec.data.stream_number,
1185 					    chk->rec.data.stream_seq);
1186 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1187 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1188 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1189 					*abort_flag = 1;
1190 					return;
1191 				}
1192 			}
1193 		}
1194 	}
1195 	/* Do we need to do some delivery? check */
1196 	sctp_deliver_reasm_check(stcb, asoc);
1197 }
1198 
1199 /*
1200  * This is an unfortunate routine. It checks to make sure a evil guy is not
1201  * stuffing us full of bad packet fragments. A broken peer could also do this
1202  * but this is doubtful. It is to bad I must worry about evil crackers sigh
1203  * :< more cycles.
1204  */
1205 static int
1206 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1207     uint32_t TSN_seq)
1208 {
1209 	struct sctp_tmit_chunk *at;
1210 	uint32_t tsn_est;
1211 
1212 	TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1213 		if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1214 			/* is it one bigger? */
1215 			tsn_est = at->rec.data.TSN_seq + 1;
1216 			if (tsn_est == TSN_seq) {
1217 				/* yep. It better be a last then */
1218 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1219 				    SCTP_DATA_LAST_FRAG) {
1220 					/*
1221 					 * Ok this guy belongs next to a guy
1222 					 * that is NOT last, it should be a
1223 					 * middle/last, not a complete
1224 					 * chunk.
1225 					 */
1226 					return (1);
1227 				} else {
1228 					/*
1229 					 * This guy is ok since its a LAST
1230 					 * and the new chunk is a fully
1231 					 * self- contained one.
1232 					 */
1233 					return (0);
1234 				}
1235 			}
1236 		} else if (TSN_seq == at->rec.data.TSN_seq) {
1237 			/* Software error since I have a dup? */
1238 			return (1);
1239 		} else {
1240 			/*
1241 			 * Ok, 'at' is larger than new chunk but does it
1242 			 * need to be right before it.
1243 			 */
1244 			tsn_est = TSN_seq + 1;
1245 			if (tsn_est == at->rec.data.TSN_seq) {
1246 				/* Yep, It better be a first */
1247 				if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1248 				    SCTP_DATA_FIRST_FRAG) {
1249 					return (1);
1250 				} else {
1251 					return (0);
1252 				}
1253 			}
1254 		}
1255 	}
1256 	return (0);
1257 }
1258 
1259 static int
1260 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1261     struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1262     struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1263     int *break_flag, int last_chunk)
1264 {
1265 	/* Process a data chunk */
1266 	/* struct sctp_tmit_chunk *chk; */
1267 	struct sctp_tmit_chunk *chk;
1268 	uint32_t tsn, gap;
1269 	struct mbuf *dmbuf;
1270 	int the_len;
1271 	int need_reasm_check = 0;
1272 	uint16_t strmno, strmseq;
1273 	struct mbuf *op_err;
1274 	char msg[SCTP_DIAG_INFO_LEN];
1275 	struct sctp_queued_to_read *control;
1276 	int ordered;
1277 	uint32_t protocol_id;
1278 	uint8_t chunk_flags;
1279 	struct sctp_stream_reset_list *liste;
1280 
1281 	chk = NULL;
1282 	tsn = ntohl(ch->dp.tsn);
1283 	chunk_flags = ch->ch.chunk_flags;
1284 	if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1285 		asoc->send_sack = 1;
1286 	}
1287 	protocol_id = ch->dp.protocol_id;
1288 	ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1289 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1290 		sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1291 	}
1292 	if (stcb == NULL) {
1293 		return (0);
1294 	}
1295 	SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1296 	if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1297 		/* It is a duplicate */
1298 		SCTP_STAT_INCR(sctps_recvdupdata);
1299 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1300 			/* Record a dup for the next outbound sack */
1301 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1302 			asoc->numduptsns++;
1303 		}
1304 		asoc->send_sack = 1;
1305 		return (0);
1306 	}
1307 	/* Calculate the number of TSN's between the base and this TSN */
1308 	SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1309 	if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1310 		/* Can't hold the bit in the mapping at max array, toss it */
1311 		return (0);
1312 	}
1313 	if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1314 		SCTP_TCB_LOCK_ASSERT(stcb);
1315 		if (sctp_expand_mapping_array(asoc, gap)) {
1316 			/* Can't expand, drop it */
1317 			return (0);
1318 		}
1319 	}
1320 	if (SCTP_TSN_GT(tsn, *high_tsn)) {
1321 		*high_tsn = tsn;
1322 	}
1323 	/* See if we have received this one already */
1324 	if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1325 	    SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1326 		SCTP_STAT_INCR(sctps_recvdupdata);
1327 		if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1328 			/* Record a dup for the next outbound sack */
1329 			asoc->dup_tsns[asoc->numduptsns] = tsn;
1330 			asoc->numduptsns++;
1331 		}
1332 		asoc->send_sack = 1;
1333 		return (0);
1334 	}
1335 	/*
1336 	 * Check to see about the GONE flag, duplicates would cause a sack
1337 	 * to be sent up above
1338 	 */
1339 	if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1340 	    (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1341 	    (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1342 		/*
1343 		 * wait a minute, this guy is gone, there is no longer a
1344 		 * receiver. Send peer an ABORT!
1345 		 */
1346 		op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1347 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1348 		*abort_flag = 1;
1349 		return (0);
1350 	}
1351 	/*
1352 	 * Now before going further we see if there is room. If NOT then we
1353 	 * MAY let one through only IF this TSN is the one we are waiting
1354 	 * for on a partial delivery API.
1355 	 */
1356 
1357 	/* now do the tests */
1358 	if (((asoc->cnt_on_all_streams +
1359 	    asoc->cnt_on_reasm_queue +
1360 	    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1361 	    (((int)asoc->my_rwnd) <= 0)) {
1362 		/*
1363 		 * When we have NO room in the rwnd we check to make sure
1364 		 * the reader is doing its job...
1365 		 */
1366 		if (stcb->sctp_socket->so_rcv.sb_cc) {
1367 			/* some to read, wake-up */
1368 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1369 			struct socket *so;
1370 
1371 			so = SCTP_INP_SO(stcb->sctp_ep);
1372 			atomic_add_int(&stcb->asoc.refcnt, 1);
1373 			SCTP_TCB_UNLOCK(stcb);
1374 			SCTP_SOCKET_LOCK(so, 1);
1375 			SCTP_TCB_LOCK(stcb);
1376 			atomic_subtract_int(&stcb->asoc.refcnt, 1);
1377 			if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1378 				/* assoc was freed while we were unlocked */
1379 				SCTP_SOCKET_UNLOCK(so, 1);
1380 				return (0);
1381 			}
1382 #endif
1383 			sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1384 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1385 			SCTP_SOCKET_UNLOCK(so, 1);
1386 #endif
1387 		}
1388 		/* now is it in the mapping array of what we have accepted? */
1389 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1390 		    SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1391 			/* Nope not in the valid range dump it */
1392 			sctp_set_rwnd(stcb, asoc);
1393 			if ((asoc->cnt_on_all_streams +
1394 			    asoc->cnt_on_reasm_queue +
1395 			    asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1396 				SCTP_STAT_INCR(sctps_datadropchklmt);
1397 			} else {
1398 				SCTP_STAT_INCR(sctps_datadroprwnd);
1399 			}
1400 			*break_flag = 1;
1401 			return (0);
1402 		}
1403 	}
1404 	strmno = ntohs(ch->dp.stream_id);
1405 	if (strmno >= asoc->streamincnt) {
1406 		struct sctp_paramhdr *phdr;
1407 		struct mbuf *mb;
1408 
1409 		mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1410 		    0, M_NOWAIT, 1, MT_DATA);
1411 		if (mb != NULL) {
1412 			/* add some space up front so prepend will work well */
1413 			SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1414 			phdr = mtod(mb, struct sctp_paramhdr *);
1415 			/*
1416 			 * Error causes are just param's and this one has
1417 			 * two back to back phdr, one with the error type
1418 			 * and size, the other with the streamid and a rsvd
1419 			 */
1420 			SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1421 			phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1422 			phdr->param_length =
1423 			    htons(sizeof(struct sctp_paramhdr) * 2);
1424 			phdr++;
1425 			/* We insert the stream in the type field */
1426 			phdr->param_type = ch->dp.stream_id;
1427 			/* And set the length to 0 for the rsvd field */
1428 			phdr->param_length = 0;
1429 			sctp_queue_op_err(stcb, mb);
1430 		}
1431 		SCTP_STAT_INCR(sctps_badsid);
1432 		SCTP_TCB_LOCK_ASSERT(stcb);
1433 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1434 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1435 			asoc->highest_tsn_inside_nr_map = tsn;
1436 		}
1437 		if (tsn == (asoc->cumulative_tsn + 1)) {
1438 			/* Update cum-ack */
1439 			asoc->cumulative_tsn = tsn;
1440 		}
1441 		return (0);
1442 	}
1443 	/*
1444 	 * Before we continue lets validate that we are not being fooled by
1445 	 * an evil attacker. We can only have 4k chunks based on our TSN
1446 	 * spread allowed by the mapping array 512 * 8 bits, so there is no
1447 	 * way our stream sequence numbers could have wrapped. We of course
1448 	 * only validate the FIRST fragment so the bit must be set.
1449 	 */
1450 	strmseq = ntohs(ch->dp.stream_sequence);
1451 #ifdef SCTP_ASOCLOG_OF_TSNS
1452 	SCTP_TCB_LOCK_ASSERT(stcb);
1453 	if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1454 		asoc->tsn_in_at = 0;
1455 		asoc->tsn_in_wrapped = 1;
1456 	}
1457 	asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1458 	asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1459 	asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1460 	asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1461 	asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1462 	asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1463 	asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1464 	asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1465 	asoc->tsn_in_at++;
1466 #endif
1467 	if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1468 	    (TAILQ_EMPTY(&asoc->resetHead)) &&
1469 	    (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1470 	    SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1471 		/* The incoming sseq is behind where we last delivered? */
1472 		SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1473 		    strmseq, asoc->strmin[strmno].last_sequence_delivered);
1474 
1475 		snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1476 		    asoc->strmin[strmno].last_sequence_delivered,
1477 		    tsn, strmno, strmseq);
1478 		op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1479 		stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1480 		sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1481 		*abort_flag = 1;
1482 		return (0);
1483 	}
1484 	/************************************
1485 	 * From here down we may find ch-> invalid
1486 	 * so its a good idea NOT to use it.
1487 	 *************************************/
1488 
1489 	the_len = (chk_length - sizeof(struct sctp_data_chunk));
1490 	if (last_chunk == 0) {
1491 		dmbuf = SCTP_M_COPYM(*m,
1492 		    (offset + sizeof(struct sctp_data_chunk)),
1493 		    the_len, M_NOWAIT);
1494 #ifdef SCTP_MBUF_LOGGING
1495 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1496 			sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1497 		}
1498 #endif
1499 	} else {
1500 		/* We can steal the last chunk */
1501 		int l_len;
1502 
1503 		dmbuf = *m;
1504 		/* lop off the top part */
1505 		m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1506 		if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1507 			l_len = SCTP_BUF_LEN(dmbuf);
1508 		} else {
1509 			/*
1510 			 * need to count up the size hopefully does not hit
1511 			 * this to often :-0
1512 			 */
1513 			struct mbuf *lat;
1514 
1515 			l_len = 0;
1516 			for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1517 				l_len += SCTP_BUF_LEN(lat);
1518 			}
1519 		}
1520 		if (l_len > the_len) {
1521 			/* Trim the end round bytes off  too */
1522 			m_adj(dmbuf, -(l_len - the_len));
1523 		}
1524 	}
1525 	if (dmbuf == NULL) {
1526 		SCTP_STAT_INCR(sctps_nomem);
1527 		return (0);
1528 	}
1529 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1530 	    asoc->fragmented_delivery_inprogress == 0 &&
1531 	    TAILQ_EMPTY(&asoc->resetHead) &&
1532 	    ((ordered == 0) ||
1533 	    ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1534 	    TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1535 		/* Candidate for express delivery */
1536 		/*
1537 		 * Its not fragmented, No PD-API is up, Nothing in the
1538 		 * delivery queue, Its un-ordered OR ordered and the next to
1539 		 * deliver AND nothing else is stuck on the stream queue,
1540 		 * And there is room for it in the socket buffer. Lets just
1541 		 * stuff it up the buffer....
1542 		 */
1543 
1544 		/* It would be nice to avoid this copy if we could :< */
1545 		sctp_alloc_a_readq(stcb, control);
1546 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1547 		    protocol_id,
1548 		    strmno, strmseq,
1549 		    chunk_flags,
1550 		    dmbuf);
1551 		if (control == NULL) {
1552 			goto failed_express_del;
1553 		}
1554 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1555 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1556 			asoc->highest_tsn_inside_nr_map = tsn;
1557 		}
1558 		sctp_add_to_readq(stcb->sctp_ep, stcb,
1559 		    control, &stcb->sctp_socket->so_rcv,
1560 		    1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1561 
1562 		if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1563 			/* for ordered, bump what we delivered */
1564 			asoc->strmin[strmno].last_sequence_delivered++;
1565 		}
1566 		SCTP_STAT_INCR(sctps_recvexpress);
1567 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1568 			sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1569 			    SCTP_STR_LOG_FROM_EXPRS_DEL);
1570 		}
1571 		control = NULL;
1572 
1573 		goto finish_express_del;
1574 	}
1575 failed_express_del:
1576 	/* If we reach here this is a new chunk */
1577 	chk = NULL;
1578 	control = NULL;
1579 	/* Express for fragmented delivery? */
1580 	if ((asoc->fragmented_delivery_inprogress) &&
1581 	    (stcb->asoc.control_pdapi) &&
1582 	    (asoc->str_of_pdapi == strmno) &&
1583 	    (asoc->ssn_of_pdapi == strmseq)
1584 	    ) {
1585 		control = stcb->asoc.control_pdapi;
1586 		if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1587 			/* Can't be another first? */
1588 			goto failed_pdapi_express_del;
1589 		}
1590 		if (tsn == (control->sinfo_tsn + 1)) {
1591 			/* Yep, we can add it on */
1592 			int end = 0;
1593 
1594 			if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1595 				end = 1;
1596 			}
1597 			if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1598 			    tsn,
1599 			    &stcb->sctp_socket->so_rcv)) {
1600 				SCTP_PRINTF("Append fails end:%d\n", end);
1601 				goto failed_pdapi_express_del;
1602 			}
1603 			SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1604 			if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1605 				asoc->highest_tsn_inside_nr_map = tsn;
1606 			}
1607 			SCTP_STAT_INCR(sctps_recvexpressm);
1608 			asoc->tsn_last_delivered = tsn;
1609 			asoc->fragment_flags = chunk_flags;
1610 			asoc->tsn_of_pdapi_last_delivered = tsn;
1611 			asoc->last_flags_delivered = chunk_flags;
1612 			asoc->last_strm_seq_delivered = strmseq;
1613 			asoc->last_strm_no_delivered = strmno;
1614 			if (end) {
1615 				/* clean up the flags and such */
1616 				asoc->fragmented_delivery_inprogress = 0;
1617 				if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1618 					asoc->strmin[strmno].last_sequence_delivered++;
1619 				}
1620 				stcb->asoc.control_pdapi = NULL;
1621 				if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1622 					/*
1623 					 * There could be another message
1624 					 * ready
1625 					 */
1626 					need_reasm_check = 1;
1627 				}
1628 			}
1629 			control = NULL;
1630 			goto finish_express_del;
1631 		}
1632 	}
1633 failed_pdapi_express_del:
1634 	control = NULL;
1635 	if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1636 		SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1637 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1638 			asoc->highest_tsn_inside_nr_map = tsn;
1639 		}
1640 	} else {
1641 		SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1642 		if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1643 			asoc->highest_tsn_inside_map = tsn;
1644 		}
1645 	}
1646 	if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1647 		sctp_alloc_a_chunk(stcb, chk);
1648 		if (chk == NULL) {
1649 			/* No memory so we drop the chunk */
1650 			SCTP_STAT_INCR(sctps_nomem);
1651 			if (last_chunk == 0) {
1652 				/* we copied it, free the copy */
1653 				sctp_m_freem(dmbuf);
1654 			}
1655 			return (0);
1656 		}
1657 		chk->rec.data.TSN_seq = tsn;
1658 		chk->no_fr_allowed = 0;
1659 		chk->rec.data.stream_seq = strmseq;
1660 		chk->rec.data.stream_number = strmno;
1661 		chk->rec.data.payloadtype = protocol_id;
1662 		chk->rec.data.context = stcb->asoc.context;
1663 		chk->rec.data.doing_fast_retransmit = 0;
1664 		chk->rec.data.rcv_flags = chunk_flags;
1665 		chk->asoc = asoc;
1666 		chk->send_size = the_len;
1667 		chk->whoTo = net;
1668 		atomic_add_int(&net->ref_count, 1);
1669 		chk->data = dmbuf;
1670 	} else {
1671 		sctp_alloc_a_readq(stcb, control);
1672 		sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1673 		    protocol_id,
1674 		    strmno, strmseq,
1675 		    chunk_flags,
1676 		    dmbuf);
1677 		if (control == NULL) {
1678 			/* No memory so we drop the chunk */
1679 			SCTP_STAT_INCR(sctps_nomem);
1680 			if (last_chunk == 0) {
1681 				/* we copied it, free the copy */
1682 				sctp_m_freem(dmbuf);
1683 			}
1684 			return (0);
1685 		}
1686 		control->length = the_len;
1687 	}
1688 
1689 	/* Mark it as received */
1690 	/* Now queue it where it belongs */
1691 	if (control != NULL) {
1692 		/* First a sanity check */
1693 		if (asoc->fragmented_delivery_inprogress) {
1694 			/*
1695 			 * Ok, we have a fragmented delivery in progress if
1696 			 * this chunk is next to deliver OR belongs in our
1697 			 * view to the reassembly, the peer is evil or
1698 			 * broken.
1699 			 */
1700 			uint32_t estimate_tsn;
1701 
1702 			estimate_tsn = asoc->tsn_last_delivered + 1;
1703 			if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1704 			    (estimate_tsn == control->sinfo_tsn)) {
1705 				/* Evil/Broke peer */
1706 				sctp_m_freem(control->data);
1707 				control->data = NULL;
1708 				if (control->whoFrom) {
1709 					sctp_free_remote_addr(control->whoFrom);
1710 					control->whoFrom = NULL;
1711 				}
1712 				sctp_free_a_readq(stcb, control);
1713 				snprintf(msg, sizeof(msg), "Reas. queue emtpy, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1714 				    tsn, strmno, strmseq);
1715 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1716 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1717 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1718 				*abort_flag = 1;
1719 				if (last_chunk) {
1720 					*m = NULL;
1721 				}
1722 				return (0);
1723 			} else {
1724 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1725 					sctp_m_freem(control->data);
1726 					control->data = NULL;
1727 					if (control->whoFrom) {
1728 						sctp_free_remote_addr(control->whoFrom);
1729 						control->whoFrom = NULL;
1730 					}
1731 					sctp_free_a_readq(stcb, control);
1732 					snprintf(msg, sizeof(msg), "PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1733 					    tsn, strmno, strmseq);
1734 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1735 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1736 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1737 					*abort_flag = 1;
1738 					if (last_chunk) {
1739 						*m = NULL;
1740 					}
1741 					return (0);
1742 				}
1743 			}
1744 		} else {
1745 			/* No PDAPI running */
1746 			if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1747 				/*
1748 				 * Reassembly queue is NOT empty validate
1749 				 * that this tsn does not need to be in
1750 				 * reasembly queue. If it does then our peer
1751 				 * is broken or evil.
1752 				 */
1753 				if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1754 					sctp_m_freem(control->data);
1755 					control->data = NULL;
1756 					if (control->whoFrom) {
1757 						sctp_free_remote_addr(control->whoFrom);
1758 						control->whoFrom = NULL;
1759 					}
1760 					sctp_free_a_readq(stcb, control);
1761 					snprintf(msg, sizeof(msg), "No PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1762 					    tsn, strmno, strmseq);
1763 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1764 					stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1765 					sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1766 					*abort_flag = 1;
1767 					if (last_chunk) {
1768 						*m = NULL;
1769 					}
1770 					return (0);
1771 				}
1772 			}
1773 		}
1774 		/* ok, if we reach here we have passed the sanity checks */
1775 		if (chunk_flags & SCTP_DATA_UNORDERED) {
1776 			/* queue directly into socket buffer */
1777 			sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1778 			sctp_add_to_readq(stcb->sctp_ep, stcb,
1779 			    control,
1780 			    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1781 		} else {
1782 			/*
1783 			 * Special check for when streams are resetting. We
1784 			 * could be more smart about this and check the
1785 			 * actual stream to see if it is not being reset..
1786 			 * that way we would not create a HOLB when amongst
1787 			 * streams being reset and those not being reset.
1788 			 *
1789 			 * We take complete messages that have a stream reset
1790 			 * intervening (aka the TSN is after where our
1791 			 * cum-ack needs to be) off and put them on a
1792 			 * pending_reply_queue. The reassembly ones we do
1793 			 * not have to worry about since they are all sorted
1794 			 * and proceessed by TSN order. It is only the
1795 			 * singletons I must worry about.
1796 			 */
1797 			if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1798 			    SCTP_TSN_GT(tsn, liste->tsn)) {
1799 				/*
1800 				 * yep its past where we need to reset... go
1801 				 * ahead and queue it.
1802 				 */
1803 				if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
1804 					/* first one on */
1805 					TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1806 				} else {
1807 					struct sctp_queued_to_read *ctlOn,
1808 					                   *nctlOn;
1809 					unsigned char inserted = 0;
1810 
1811 					TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
1812 						if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
1813 							continue;
1814 						} else {
1815 							/* found it */
1816 							TAILQ_INSERT_BEFORE(ctlOn, control, next);
1817 							inserted = 1;
1818 							break;
1819 						}
1820 					}
1821 					if (inserted == 0) {
1822 						/*
1823 						 * must be put at end, use
1824 						 * prevP (all setup from
1825 						 * loop) to setup nextP.
1826 						 */
1827 						TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1828 					}
1829 				}
1830 			} else {
1831 				sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
1832 				if (*abort_flag) {
1833 					if (last_chunk) {
1834 						*m = NULL;
1835 					}
1836 					return (0);
1837 				}
1838 			}
1839 		}
1840 	} else {
1841 		/* Into the re-assembly queue */
1842 		sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
1843 		if (*abort_flag) {
1844 			/*
1845 			 * the assoc is now gone and chk was put onto the
1846 			 * reasm queue, which has all been freed.
1847 			 */
1848 			if (last_chunk) {
1849 				*m = NULL;
1850 			}
1851 			return (0);
1852 		}
1853 	}
1854 finish_express_del:
1855 	if (tsn == (asoc->cumulative_tsn + 1)) {
1856 		/* Update cum-ack */
1857 		asoc->cumulative_tsn = tsn;
1858 	}
1859 	if (last_chunk) {
1860 		*m = NULL;
1861 	}
1862 	if (ordered) {
1863 		SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
1864 	} else {
1865 		SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
1866 	}
1867 	SCTP_STAT_INCR(sctps_recvdata);
1868 	/* Set it present please */
1869 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1870 		sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
1871 	}
1872 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1873 		sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
1874 		    asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
1875 	}
1876 	/* check the special flag for stream resets */
1877 	if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1878 	    SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
1879 		/*
1880 		 * we have finished working through the backlogged TSN's now
1881 		 * time to reset streams. 1: call reset function. 2: free
1882 		 * pending_reply space 3: distribute any chunks in
1883 		 * pending_reply_queue.
1884 		 */
1885 		struct sctp_queued_to_read *ctl, *nctl;
1886 
1887 		sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
1888 		TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
1889 		SCTP_FREE(liste, SCTP_M_STRESET);
1890 		/* sa_ignore FREED_MEMORY */
1891 		liste = TAILQ_FIRST(&asoc->resetHead);
1892 		if (TAILQ_EMPTY(&asoc->resetHead)) {
1893 			/* All can be removed */
1894 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1895 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1896 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1897 				if (*abort_flag) {
1898 					return (0);
1899 				}
1900 			}
1901 		} else {
1902 			TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1903 				if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
1904 					break;
1905 				}
1906 				/*
1907 				 * if ctl->sinfo_tsn is <= liste->tsn we can
1908 				 * process it which is the NOT of
1909 				 * ctl->sinfo_tsn > liste->tsn
1910 				 */
1911 				TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1912 				sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1913 				if (*abort_flag) {
1914 					return (0);
1915 				}
1916 			}
1917 		}
1918 		/*
1919 		 * Now service re-assembly to pick up anything that has been
1920 		 * held on reassembly queue?
1921 		 */
1922 		sctp_deliver_reasm_check(stcb, asoc);
1923 		need_reasm_check = 0;
1924 	}
1925 	if (need_reasm_check) {
1926 		/* Another one waits ? */
1927 		sctp_deliver_reasm_check(stcb, asoc);
1928 	}
1929 	return (1);
1930 }
1931 
1932 int8_t sctp_map_lookup_tab[256] = {
1933 	0, 1, 0, 2, 0, 1, 0, 3,
1934 	0, 1, 0, 2, 0, 1, 0, 4,
1935 	0, 1, 0, 2, 0, 1, 0, 3,
1936 	0, 1, 0, 2, 0, 1, 0, 5,
1937 	0, 1, 0, 2, 0, 1, 0, 3,
1938 	0, 1, 0, 2, 0, 1, 0, 4,
1939 	0, 1, 0, 2, 0, 1, 0, 3,
1940 	0, 1, 0, 2, 0, 1, 0, 6,
1941 	0, 1, 0, 2, 0, 1, 0, 3,
1942 	0, 1, 0, 2, 0, 1, 0, 4,
1943 	0, 1, 0, 2, 0, 1, 0, 3,
1944 	0, 1, 0, 2, 0, 1, 0, 5,
1945 	0, 1, 0, 2, 0, 1, 0, 3,
1946 	0, 1, 0, 2, 0, 1, 0, 4,
1947 	0, 1, 0, 2, 0, 1, 0, 3,
1948 	0, 1, 0, 2, 0, 1, 0, 7,
1949 	0, 1, 0, 2, 0, 1, 0, 3,
1950 	0, 1, 0, 2, 0, 1, 0, 4,
1951 	0, 1, 0, 2, 0, 1, 0, 3,
1952 	0, 1, 0, 2, 0, 1, 0, 5,
1953 	0, 1, 0, 2, 0, 1, 0, 3,
1954 	0, 1, 0, 2, 0, 1, 0, 4,
1955 	0, 1, 0, 2, 0, 1, 0, 3,
1956 	0, 1, 0, 2, 0, 1, 0, 6,
1957 	0, 1, 0, 2, 0, 1, 0, 3,
1958 	0, 1, 0, 2, 0, 1, 0, 4,
1959 	0, 1, 0, 2, 0, 1, 0, 3,
1960 	0, 1, 0, 2, 0, 1, 0, 5,
1961 	0, 1, 0, 2, 0, 1, 0, 3,
1962 	0, 1, 0, 2, 0, 1, 0, 4,
1963 	0, 1, 0, 2, 0, 1, 0, 3,
1964 	0, 1, 0, 2, 0, 1, 0, 8
1965 };
1966 
1967 
1968 void
1969 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
1970 {
1971 	/*
1972 	 * Now we also need to check the mapping array in a couple of ways.
1973 	 * 1) Did we move the cum-ack point?
1974 	 *
1975 	 * When you first glance at this you might think that all entries that
1976 	 * make up the postion of the cum-ack would be in the nr-mapping
1977 	 * array only.. i.e. things up to the cum-ack are always
1978 	 * deliverable. Thats true with one exception, when its a fragmented
1979 	 * message we may not deliver the data until some threshold (or all
1980 	 * of it) is in place. So we must OR the nr_mapping_array and
1981 	 * mapping_array to get a true picture of the cum-ack.
1982 	 */
1983 	struct sctp_association *asoc;
1984 	int at;
1985 	uint8_t val;
1986 	int slide_from, slide_end, lgap, distance;
1987 	uint32_t old_cumack, old_base, old_highest, highest_tsn;
1988 
1989 	asoc = &stcb->asoc;
1990 
1991 	old_cumack = asoc->cumulative_tsn;
1992 	old_base = asoc->mapping_array_base_tsn;
1993 	old_highest = asoc->highest_tsn_inside_map;
1994 	/*
1995 	 * We could probably improve this a small bit by calculating the
1996 	 * offset of the current cum-ack as the starting point.
1997 	 */
1998 	at = 0;
1999 	for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2000 		val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2001 		if (val == 0xff) {
2002 			at += 8;
2003 		} else {
2004 			/* there is a 0 bit */
2005 			at += sctp_map_lookup_tab[val];
2006 			break;
2007 		}
2008 	}
2009 	asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2010 
2011 	if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2012 	    SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2013 #ifdef INVARIANTS
2014 		panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2015 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2016 #else
2017 		SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2018 		    asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2019 		sctp_print_mapping_array(asoc);
2020 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2021 			sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2022 		}
2023 		asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2024 		asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2025 #endif
2026 	}
2027 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2028 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2029 	} else {
2030 		highest_tsn = asoc->highest_tsn_inside_map;
2031 	}
2032 	if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2033 		/* The complete array was completed by a single FR */
2034 		/* highest becomes the cum-ack */
2035 		int clr;
2036 
2037 #ifdef INVARIANTS
2038 		unsigned int i;
2039 
2040 #endif
2041 
2042 		/* clear the array */
2043 		clr = ((at + 7) >> 3);
2044 		if (clr > asoc->mapping_array_size) {
2045 			clr = asoc->mapping_array_size;
2046 		}
2047 		memset(asoc->mapping_array, 0, clr);
2048 		memset(asoc->nr_mapping_array, 0, clr);
2049 #ifdef INVARIANTS
2050 		for (i = 0; i < asoc->mapping_array_size; i++) {
2051 			if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2052 				SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2053 				sctp_print_mapping_array(asoc);
2054 			}
2055 		}
2056 #endif
2057 		asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2058 		asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2059 	} else if (at >= 8) {
2060 		/* we can slide the mapping array down */
2061 		/* slide_from holds where we hit the first NON 0xff byte */
2062 
2063 		/*
2064 		 * now calculate the ceiling of the move using our highest
2065 		 * TSN value
2066 		 */
2067 		SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2068 		slide_end = (lgap >> 3);
2069 		if (slide_end < slide_from) {
2070 			sctp_print_mapping_array(asoc);
2071 #ifdef INVARIANTS
2072 			panic("impossible slide");
2073 #else
2074 			SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2075 			    lgap, slide_end, slide_from, at);
2076 			return;
2077 #endif
2078 		}
2079 		if (slide_end > asoc->mapping_array_size) {
2080 #ifdef INVARIANTS
2081 			panic("would overrun buffer");
2082 #else
2083 			SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2084 			    asoc->mapping_array_size, slide_end);
2085 			slide_end = asoc->mapping_array_size;
2086 #endif
2087 		}
2088 		distance = (slide_end - slide_from) + 1;
2089 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2090 			sctp_log_map(old_base, old_cumack, old_highest,
2091 			    SCTP_MAP_PREPARE_SLIDE);
2092 			sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2093 			    (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2094 		}
2095 		if (distance + slide_from > asoc->mapping_array_size ||
2096 		    distance < 0) {
2097 			/*
2098 			 * Here we do NOT slide forward the array so that
2099 			 * hopefully when more data comes in to fill it up
2100 			 * we will be able to slide it forward. Really I
2101 			 * don't think this should happen :-0
2102 			 */
2103 
2104 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2105 				sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2106 				    (uint32_t) asoc->mapping_array_size,
2107 				    SCTP_MAP_SLIDE_NONE);
2108 			}
2109 		} else {
2110 			int ii;
2111 
2112 			for (ii = 0; ii < distance; ii++) {
2113 				asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2114 				asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2115 
2116 			}
2117 			for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2118 				asoc->mapping_array[ii] = 0;
2119 				asoc->nr_mapping_array[ii] = 0;
2120 			}
2121 			if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2122 				asoc->highest_tsn_inside_map += (slide_from << 3);
2123 			}
2124 			if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2125 				asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2126 			}
2127 			asoc->mapping_array_base_tsn += (slide_from << 3);
2128 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2129 				sctp_log_map(asoc->mapping_array_base_tsn,
2130 				    asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2131 				    SCTP_MAP_SLIDE_RESULT);
2132 			}
2133 		}
2134 	}
2135 }
2136 
2137 void
2138 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2139 {
2140 	struct sctp_association *asoc;
2141 	uint32_t highest_tsn;
2142 
2143 	asoc = &stcb->asoc;
2144 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2145 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2146 	} else {
2147 		highest_tsn = asoc->highest_tsn_inside_map;
2148 	}
2149 
2150 	/*
2151 	 * Now we need to see if we need to queue a sack or just start the
2152 	 * timer (if allowed).
2153 	 */
2154 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2155 		/*
2156 		 * Ok special case, in SHUTDOWN-SENT case. here we maker
2157 		 * sure SACK timer is off and instead send a SHUTDOWN and a
2158 		 * SACK
2159 		 */
2160 		if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2161 			sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2162 			    stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2163 		}
2164 		sctp_send_shutdown(stcb,
2165 		    ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2166 		sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2167 	} else {
2168 		int is_a_gap;
2169 
2170 		/* is there a gap now ? */
2171 		is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2172 
2173 		/*
2174 		 * CMT DAC algorithm: increase number of packets received
2175 		 * since last ack
2176 		 */
2177 		stcb->asoc.cmt_dac_pkts_rcvd++;
2178 
2179 		if ((stcb->asoc.send_sack == 1) ||	/* We need to send a
2180 							 * SACK */
2181 		    ((was_a_gap) && (is_a_gap == 0)) ||	/* was a gap, but no
2182 							 * longer is one */
2183 		    (stcb->asoc.numduptsns) ||	/* we have dup's */
2184 		    (is_a_gap) ||	/* is still a gap */
2185 		    (stcb->asoc.delayed_ack == 0) ||	/* Delayed sack disabled */
2186 		    (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq)	/* hit limit of pkts */
2187 		    ) {
2188 
2189 			if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2190 			    (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2191 			    (stcb->asoc.send_sack == 0) &&
2192 			    (stcb->asoc.numduptsns == 0) &&
2193 			    (stcb->asoc.delayed_ack) &&
2194 			    (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2195 
2196 				/*
2197 				 * CMT DAC algorithm: With CMT, delay acks
2198 				 * even in the face of
2199 				 *
2200 				 * reordering. Therefore, if acks that do not
2201 				 * have to be sent because of the above
2202 				 * reasons, will be delayed. That is, acks
2203 				 * that would have been sent due to gap
2204 				 * reports will be delayed with DAC. Start
2205 				 * the delayed ack timer.
2206 				 */
2207 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2208 				    stcb->sctp_ep, stcb, NULL);
2209 			} else {
2210 				/*
2211 				 * Ok we must build a SACK since the timer
2212 				 * is pending, we got our first packet OR
2213 				 * there are gaps or duplicates.
2214 				 */
2215 				(void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2216 				sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2217 			}
2218 		} else {
2219 			if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2220 				sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2221 				    stcb->sctp_ep, stcb, NULL);
2222 			}
2223 		}
2224 	}
2225 }
2226 
2227 void
2228 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2229 {
2230 	struct sctp_tmit_chunk *chk;
2231 	uint32_t tsize, pd_point;
2232 	uint16_t nxt_todel;
2233 
2234 	if (asoc->fragmented_delivery_inprogress) {
2235 		sctp_service_reassembly(stcb, asoc);
2236 	}
2237 	/* Can we proceed further, i.e. the PD-API is complete */
2238 	if (asoc->fragmented_delivery_inprogress) {
2239 		/* no */
2240 		return;
2241 	}
2242 	/*
2243 	 * Now is there some other chunk I can deliver from the reassembly
2244 	 * queue.
2245 	 */
2246 doit_again:
2247 	chk = TAILQ_FIRST(&asoc->reasmqueue);
2248 	if (chk == NULL) {
2249 		asoc->size_on_reasm_queue = 0;
2250 		asoc->cnt_on_reasm_queue = 0;
2251 		return;
2252 	}
2253 	nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2254 	if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2255 	    ((nxt_todel == chk->rec.data.stream_seq) ||
2256 	    (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2257 		/*
2258 		 * Yep the first one is here. We setup to start reception,
2259 		 * by backing down the TSN just in case we can't deliver.
2260 		 */
2261 
2262 		/*
2263 		 * Before we start though either all of the message should
2264 		 * be here or the socket buffer max or nothing on the
2265 		 * delivery queue and something can be delivered.
2266 		 */
2267 		if (stcb->sctp_socket) {
2268 			pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
2269 			    stcb->sctp_ep->partial_delivery_point);
2270 		} else {
2271 			pd_point = stcb->sctp_ep->partial_delivery_point;
2272 		}
2273 		if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2274 			asoc->fragmented_delivery_inprogress = 1;
2275 			asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2276 			asoc->str_of_pdapi = chk->rec.data.stream_number;
2277 			asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2278 			asoc->pdapi_ppid = chk->rec.data.payloadtype;
2279 			asoc->fragment_flags = chk->rec.data.rcv_flags;
2280 			sctp_service_reassembly(stcb, asoc);
2281 			if (asoc->fragmented_delivery_inprogress == 0) {
2282 				goto doit_again;
2283 			}
2284 		}
2285 	}
2286 }
2287 
2288 int
2289 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2290     struct sockaddr *src, struct sockaddr *dst,
2291     struct sctphdr *sh, struct sctp_inpcb *inp,
2292     struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn,
2293     uint8_t mflowtype, uint32_t mflowid,
2294     uint32_t vrf_id, uint16_t port)
2295 {
2296 	struct sctp_data_chunk *ch, chunk_buf;
2297 	struct sctp_association *asoc;
2298 	int num_chunks = 0;	/* number of control chunks processed */
2299 	int stop_proc = 0;
2300 	int chk_length, break_flag, last_chunk;
2301 	int abort_flag = 0, was_a_gap;
2302 	struct mbuf *m;
2303 	uint32_t highest_tsn;
2304 
2305 	/* set the rwnd */
2306 	sctp_set_rwnd(stcb, &stcb->asoc);
2307 
2308 	m = *mm;
2309 	SCTP_TCB_LOCK_ASSERT(stcb);
2310 	asoc = &stcb->asoc;
2311 	if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2312 		highest_tsn = asoc->highest_tsn_inside_nr_map;
2313 	} else {
2314 		highest_tsn = asoc->highest_tsn_inside_map;
2315 	}
2316 	was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2317 	/*
2318 	 * setup where we got the last DATA packet from for any SACK that
2319 	 * may need to go out. Don't bump the net. This is done ONLY when a
2320 	 * chunk is assigned.
2321 	 */
2322 	asoc->last_data_chunk_from = net;
2323 
2324 	/*-
2325 	 * Now before we proceed we must figure out if this is a wasted
2326 	 * cluster... i.e. it is a small packet sent in and yet the driver
2327 	 * underneath allocated a full cluster for it. If so we must copy it
2328 	 * to a smaller mbuf and free up the cluster mbuf. This will help
2329 	 * with cluster starvation. Note for __Panda__ we don't do this
2330 	 * since it has clusters all the way down to 64 bytes.
2331 	 */
2332 	if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2333 		/* we only handle mbufs that are singletons.. not chains */
2334 		m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2335 		if (m) {
2336 			/* ok lets see if we can copy the data up */
2337 			caddr_t *from, *to;
2338 
2339 			/* get the pointers and copy */
2340 			to = mtod(m, caddr_t *);
2341 			from = mtod((*mm), caddr_t *);
2342 			memcpy(to, from, SCTP_BUF_LEN((*mm)));
2343 			/* copy the length and free up the old */
2344 			SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2345 			sctp_m_freem(*mm);
2346 			/* sucess, back copy */
2347 			*mm = m;
2348 		} else {
2349 			/* We are in trouble in the mbuf world .. yikes */
2350 			m = *mm;
2351 		}
2352 	}
2353 	/* get pointer to the first chunk header */
2354 	ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2355 	    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2356 	if (ch == NULL) {
2357 		return (1);
2358 	}
2359 	/*
2360 	 * process all DATA chunks...
2361 	 */
2362 	*high_tsn = asoc->cumulative_tsn;
2363 	break_flag = 0;
2364 	asoc->data_pkts_seen++;
2365 	while (stop_proc == 0) {
2366 		/* validate chunk length */
2367 		chk_length = ntohs(ch->ch.chunk_length);
2368 		if (length - *offset < chk_length) {
2369 			/* all done, mutulated chunk */
2370 			stop_proc = 1;
2371 			continue;
2372 		}
2373 		if (ch->ch.chunk_type == SCTP_DATA) {
2374 			if ((size_t)chk_length < sizeof(struct sctp_data_chunk)) {
2375 				/*
2376 				 * Need to send an abort since we had a
2377 				 * invalid data chunk.
2378 				 */
2379 				struct mbuf *op_err;
2380 				char msg[SCTP_DIAG_INFO_LEN];
2381 
2382 				snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2383 				    chk_length);
2384 				op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2385 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2386 				sctp_abort_association(inp, stcb, m, iphlen,
2387 				    src, dst, sh, op_err,
2388 				    mflowtype, mflowid,
2389 				    vrf_id, port);
2390 				return (2);
2391 			}
2392 			if ((size_t)chk_length == sizeof(struct sctp_data_chunk)) {
2393 				/*
2394 				 * Need to send an abort since we had an
2395 				 * empty data chunk.
2396 				 */
2397 				struct mbuf *op_err;
2398 
2399 				op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
2400 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2401 				sctp_abort_association(inp, stcb, m, iphlen,
2402 				    src, dst, sh, op_err,
2403 				    mflowtype, mflowid,
2404 				    vrf_id, port);
2405 				return (2);
2406 			}
2407 #ifdef SCTP_AUDITING_ENABLED
2408 			sctp_audit_log(0xB1, 0);
2409 #endif
2410 			if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2411 				last_chunk = 1;
2412 			} else {
2413 				last_chunk = 0;
2414 			}
2415 			if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2416 			    chk_length, net, high_tsn, &abort_flag, &break_flag,
2417 			    last_chunk)) {
2418 				num_chunks++;
2419 			}
2420 			if (abort_flag)
2421 				return (2);
2422 
2423 			if (break_flag) {
2424 				/*
2425 				 * Set because of out of rwnd space and no
2426 				 * drop rep space left.
2427 				 */
2428 				stop_proc = 1;
2429 				continue;
2430 			}
2431 		} else {
2432 			/* not a data chunk in the data region */
2433 			switch (ch->ch.chunk_type) {
2434 			case SCTP_INITIATION:
2435 			case SCTP_INITIATION_ACK:
2436 			case SCTP_SELECTIVE_ACK:
2437 			case SCTP_NR_SELECTIVE_ACK:
2438 			case SCTP_HEARTBEAT_REQUEST:
2439 			case SCTP_HEARTBEAT_ACK:
2440 			case SCTP_ABORT_ASSOCIATION:
2441 			case SCTP_SHUTDOWN:
2442 			case SCTP_SHUTDOWN_ACK:
2443 			case SCTP_OPERATION_ERROR:
2444 			case SCTP_COOKIE_ECHO:
2445 			case SCTP_COOKIE_ACK:
2446 			case SCTP_ECN_ECHO:
2447 			case SCTP_ECN_CWR:
2448 			case SCTP_SHUTDOWN_COMPLETE:
2449 			case SCTP_AUTHENTICATION:
2450 			case SCTP_ASCONF_ACK:
2451 			case SCTP_PACKET_DROPPED:
2452 			case SCTP_STREAM_RESET:
2453 			case SCTP_FORWARD_CUM_TSN:
2454 			case SCTP_ASCONF:
2455 				/*
2456 				 * Now, what do we do with KNOWN chunks that
2457 				 * are NOT in the right place?
2458 				 *
2459 				 * For now, I do nothing but ignore them. We
2460 				 * may later want to add sysctl stuff to
2461 				 * switch out and do either an ABORT() or
2462 				 * possibly process them.
2463 				 */
2464 				if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2465 					struct mbuf *op_err;
2466 
2467 					op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "");
2468 					sctp_abort_association(inp, stcb,
2469 					    m, iphlen,
2470 					    src, dst,
2471 					    sh, op_err,
2472 					    mflowtype, mflowid,
2473 					    vrf_id, port);
2474 					return (2);
2475 				}
2476 				break;
2477 			default:
2478 				/* unknown chunk type, use bit rules */
2479 				if (ch->ch.chunk_type & 0x40) {
2480 					/* Add a error report to the queue */
2481 					struct mbuf *merr;
2482 					struct sctp_paramhdr *phd;
2483 
2484 					merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA);
2485 					if (merr) {
2486 						phd = mtod(merr, struct sctp_paramhdr *);
2487 						/*
2488 						 * We cheat and use param
2489 						 * type since we did not
2490 						 * bother to define a error
2491 						 * cause struct. They are
2492 						 * the same basic format
2493 						 * with different names.
2494 						 */
2495 						phd->param_type =
2496 						    htons(SCTP_CAUSE_UNRECOG_CHUNK);
2497 						phd->param_length =
2498 						    htons(chk_length + sizeof(*phd));
2499 						SCTP_BUF_LEN(merr) = sizeof(*phd);
2500 						SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2501 						if (SCTP_BUF_NEXT(merr)) {
2502 							if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL) == NULL) {
2503 								sctp_m_freem(merr);
2504 							} else {
2505 								sctp_queue_op_err(stcb, merr);
2506 							}
2507 						} else {
2508 							sctp_m_freem(merr);
2509 						}
2510 					}
2511 				}
2512 				if ((ch->ch.chunk_type & 0x80) == 0) {
2513 					/* discard the rest of this packet */
2514 					stop_proc = 1;
2515 				}	/* else skip this bad chunk and
2516 					 * continue... */
2517 				break;
2518 			}	/* switch of chunk type */
2519 		}
2520 		*offset += SCTP_SIZE32(chk_length);
2521 		if ((*offset >= length) || stop_proc) {
2522 			/* no more data left in the mbuf chain */
2523 			stop_proc = 1;
2524 			continue;
2525 		}
2526 		ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2527 		    sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2528 		if (ch == NULL) {
2529 			*offset = length;
2530 			stop_proc = 1;
2531 			continue;
2532 		}
2533 	}
2534 	if (break_flag) {
2535 		/*
2536 		 * we need to report rwnd overrun drops.
2537 		 */
2538 		sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2539 	}
2540 	if (num_chunks) {
2541 		/*
2542 		 * Did we get data, if so update the time for auto-close and
2543 		 * give peer credit for being alive.
2544 		 */
2545 		SCTP_STAT_INCR(sctps_recvpktwithdata);
2546 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2547 			sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2548 			    stcb->asoc.overall_error_count,
2549 			    0,
2550 			    SCTP_FROM_SCTP_INDATA,
2551 			    __LINE__);
2552 		}
2553 		stcb->asoc.overall_error_count = 0;
2554 		(void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2555 	}
2556 	/* now service all of the reassm queue if needed */
2557 	if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2558 		sctp_service_queues(stcb, asoc);
2559 
2560 	if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2561 		/* Assure that we ack right away */
2562 		stcb->asoc.send_sack = 1;
2563 	}
2564 	/* Start a sack timer or QUEUE a SACK for sending */
2565 	sctp_sack_check(stcb, was_a_gap);
2566 	return (0);
2567 }
2568 
2569 static int
2570 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2571     uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2572     int *num_frs,
2573     uint32_t * biggest_newly_acked_tsn,
2574     uint32_t * this_sack_lowest_newack,
2575     int *rto_ok)
2576 {
2577 	struct sctp_tmit_chunk *tp1;
2578 	unsigned int theTSN;
2579 	int j, wake_him = 0, circled = 0;
2580 
2581 	/* Recover the tp1 we last saw */
2582 	tp1 = *p_tp1;
2583 	if (tp1 == NULL) {
2584 		tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2585 	}
2586 	for (j = frag_strt; j <= frag_end; j++) {
2587 		theTSN = j + last_tsn;
2588 		while (tp1) {
2589 			if (tp1->rec.data.doing_fast_retransmit)
2590 				(*num_frs) += 1;
2591 
2592 			/*-
2593 			 * CMT: CUCv2 algorithm. For each TSN being
2594 			 * processed from the sent queue, track the
2595 			 * next expected pseudo-cumack, or
2596 			 * rtx_pseudo_cumack, if required. Separate
2597 			 * cumack trackers for first transmissions,
2598 			 * and retransmissions.
2599 			 */
2600 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2601 			    (tp1->whoTo->find_pseudo_cumack == 1) &&
2602 			    (tp1->snd_count == 1)) {
2603 				tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2604 				tp1->whoTo->find_pseudo_cumack = 0;
2605 			}
2606 			if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2607 			    (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2608 			    (tp1->snd_count > 1)) {
2609 				tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2610 				tp1->whoTo->find_rtx_pseudo_cumack = 0;
2611 			}
2612 			if (tp1->rec.data.TSN_seq == theTSN) {
2613 				if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2614 					/*-
2615 					 * must be held until
2616 					 * cum-ack passes
2617 					 */
2618 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2619 						/*-
2620 						 * If it is less than RESEND, it is
2621 						 * now no-longer in flight.
2622 						 * Higher values may already be set
2623 						 * via previous Gap Ack Blocks...
2624 						 * i.e. ACKED or RESEND.
2625 						 */
2626 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2627 						    *biggest_newly_acked_tsn)) {
2628 							*biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2629 						}
2630 						/*-
2631 						 * CMT: SFR algo (and HTNA) - set
2632 						 * saw_newack to 1 for dest being
2633 						 * newly acked. update
2634 						 * this_sack_highest_newack if
2635 						 * appropriate.
2636 						 */
2637 						if (tp1->rec.data.chunk_was_revoked == 0)
2638 							tp1->whoTo->saw_newack = 1;
2639 
2640 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2641 						    tp1->whoTo->this_sack_highest_newack)) {
2642 							tp1->whoTo->this_sack_highest_newack =
2643 							    tp1->rec.data.TSN_seq;
2644 						}
2645 						/*-
2646 						 * CMT DAC algo: also update
2647 						 * this_sack_lowest_newack
2648 						 */
2649 						if (*this_sack_lowest_newack == 0) {
2650 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2651 								sctp_log_sack(*this_sack_lowest_newack,
2652 								    last_tsn,
2653 								    tp1->rec.data.TSN_seq,
2654 								    0,
2655 								    0,
2656 								    SCTP_LOG_TSN_ACKED);
2657 							}
2658 							*this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2659 						}
2660 						/*-
2661 						 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2662 						 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2663 						 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2664 						 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2665 						 * Separate pseudo_cumack trackers for first transmissions and
2666 						 * retransmissions.
2667 						 */
2668 						if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2669 							if (tp1->rec.data.chunk_was_revoked == 0) {
2670 								tp1->whoTo->new_pseudo_cumack = 1;
2671 							}
2672 							tp1->whoTo->find_pseudo_cumack = 1;
2673 						}
2674 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2675 							sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2676 						}
2677 						if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2678 							if (tp1->rec.data.chunk_was_revoked == 0) {
2679 								tp1->whoTo->new_pseudo_cumack = 1;
2680 							}
2681 							tp1->whoTo->find_rtx_pseudo_cumack = 1;
2682 						}
2683 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2684 							sctp_log_sack(*biggest_newly_acked_tsn,
2685 							    last_tsn,
2686 							    tp1->rec.data.TSN_seq,
2687 							    frag_strt,
2688 							    frag_end,
2689 							    SCTP_LOG_TSN_ACKED);
2690 						}
2691 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2692 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2693 							    tp1->whoTo->flight_size,
2694 							    tp1->book_size,
2695 							    (uintptr_t) tp1->whoTo,
2696 							    tp1->rec.data.TSN_seq);
2697 						}
2698 						sctp_flight_size_decrease(tp1);
2699 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2700 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2701 							    tp1);
2702 						}
2703 						sctp_total_flight_decrease(stcb, tp1);
2704 
2705 						tp1->whoTo->net_ack += tp1->send_size;
2706 						if (tp1->snd_count < 2) {
2707 							/*-
2708 							 * True non-retransmited chunk
2709 							 */
2710 							tp1->whoTo->net_ack2 += tp1->send_size;
2711 
2712 							/*-
2713 							 * update RTO too ?
2714 							 */
2715 							if (tp1->do_rtt) {
2716 								if (*rto_ok) {
2717 									tp1->whoTo->RTO =
2718 									    sctp_calculate_rto(stcb,
2719 									    &stcb->asoc,
2720 									    tp1->whoTo,
2721 									    &tp1->sent_rcv_time,
2722 									    sctp_align_safe_nocopy,
2723 									    SCTP_RTT_FROM_DATA);
2724 									*rto_ok = 0;
2725 								}
2726 								if (tp1->whoTo->rto_needed == 0) {
2727 									tp1->whoTo->rto_needed = 1;
2728 								}
2729 								tp1->do_rtt = 0;
2730 							}
2731 						}
2732 					}
2733 					if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2734 						if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2735 						    stcb->asoc.this_sack_highest_gap)) {
2736 							stcb->asoc.this_sack_highest_gap =
2737 							    tp1->rec.data.TSN_seq;
2738 						}
2739 						if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2740 							sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2741 #ifdef SCTP_AUDITING_ENABLED
2742 							sctp_audit_log(0xB2,
2743 							    (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2744 #endif
2745 						}
2746 					}
2747 					/*-
2748 					 * All chunks NOT UNSENT fall through here and are marked
2749 					 * (leave PR-SCTP ones that are to skip alone though)
2750 					 */
2751 					if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2752 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2753 						tp1->sent = SCTP_DATAGRAM_MARKED;
2754 					}
2755 					if (tp1->rec.data.chunk_was_revoked) {
2756 						/* deflate the cwnd */
2757 						tp1->whoTo->cwnd -= tp1->book_size;
2758 						tp1->rec.data.chunk_was_revoked = 0;
2759 					}
2760 					/* NR Sack code here */
2761 					if (nr_sacking &&
2762 					    (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2763 						if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2764 							stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2765 #ifdef INVARIANTS
2766 						} else {
2767 							panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2768 #endif
2769 						}
2770 						tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2771 						if (tp1->data) {
2772 							/*
2773 							 * sa_ignore
2774 							 * NO_NULL_CHK
2775 							 */
2776 							sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2777 							sctp_m_freem(tp1->data);
2778 							tp1->data = NULL;
2779 						}
2780 						wake_him++;
2781 					}
2782 				}
2783 				break;
2784 			}	/* if (tp1->TSN_seq == theTSN) */
2785 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2786 				break;
2787 			}
2788 			tp1 = TAILQ_NEXT(tp1, sctp_next);
2789 			if ((tp1 == NULL) && (circled == 0)) {
2790 				circled++;
2791 				tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2792 			}
2793 		}		/* end while (tp1) */
2794 		if (tp1 == NULL) {
2795 			circled = 0;
2796 			tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2797 		}
2798 		/* In case the fragments were not in order we must reset */
2799 	}			/* end for (j = fragStart */
2800 	*p_tp1 = tp1;
2801 	return (wake_him);	/* Return value only used for nr-sack */
2802 }
2803 
2804 
2805 static int
2806 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2807     uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2808     uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2809     int num_seg, int num_nr_seg, int *rto_ok)
2810 {
2811 	struct sctp_gap_ack_block *frag, block;
2812 	struct sctp_tmit_chunk *tp1;
2813 	int i;
2814 	int num_frs = 0;
2815 	int chunk_freed;
2816 	int non_revocable;
2817 	uint16_t frag_strt, frag_end, prev_frag_end;
2818 
2819 	tp1 = TAILQ_FIRST(&asoc->sent_queue);
2820 	prev_frag_end = 0;
2821 	chunk_freed = 0;
2822 
2823 	for (i = 0; i < (num_seg + num_nr_seg); i++) {
2824 		if (i == num_seg) {
2825 			prev_frag_end = 0;
2826 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
2827 		}
2828 		frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
2829 		    sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
2830 		*offset += sizeof(block);
2831 		if (frag == NULL) {
2832 			return (chunk_freed);
2833 		}
2834 		frag_strt = ntohs(frag->start);
2835 		frag_end = ntohs(frag->end);
2836 
2837 		if (frag_strt > frag_end) {
2838 			/* This gap report is malformed, skip it. */
2839 			continue;
2840 		}
2841 		if (frag_strt <= prev_frag_end) {
2842 			/* This gap report is not in order, so restart. */
2843 			tp1 = TAILQ_FIRST(&asoc->sent_queue);
2844 		}
2845 		if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
2846 			*biggest_tsn_acked = last_tsn + frag_end;
2847 		}
2848 		if (i < num_seg) {
2849 			non_revocable = 0;
2850 		} else {
2851 			non_revocable = 1;
2852 		}
2853 		if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
2854 		    non_revocable, &num_frs, biggest_newly_acked_tsn,
2855 		    this_sack_lowest_newack, rto_ok)) {
2856 			chunk_freed = 1;
2857 		}
2858 		prev_frag_end = frag_end;
2859 	}
2860 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2861 		if (num_frs)
2862 			sctp_log_fr(*biggest_tsn_acked,
2863 			    *biggest_newly_acked_tsn,
2864 			    last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2865 	}
2866 	return (chunk_freed);
2867 }
2868 
2869 static void
2870 sctp_check_for_revoked(struct sctp_tcb *stcb,
2871     struct sctp_association *asoc, uint32_t cumack,
2872     uint32_t biggest_tsn_acked)
2873 {
2874 	struct sctp_tmit_chunk *tp1;
2875 
2876 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2877 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
2878 			/*
2879 			 * ok this guy is either ACK or MARKED. If it is
2880 			 * ACKED it has been previously acked but not this
2881 			 * time i.e. revoked.  If it is MARKED it was ACK'ed
2882 			 * again.
2883 			 */
2884 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
2885 				break;
2886 			}
2887 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2888 				/* it has been revoked */
2889 				tp1->sent = SCTP_DATAGRAM_SENT;
2890 				tp1->rec.data.chunk_was_revoked = 1;
2891 				/*
2892 				 * We must add this stuff back in to assure
2893 				 * timers and such get started.
2894 				 */
2895 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2896 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
2897 					    tp1->whoTo->flight_size,
2898 					    tp1->book_size,
2899 					    (uintptr_t) tp1->whoTo,
2900 					    tp1->rec.data.TSN_seq);
2901 				}
2902 				sctp_flight_size_increase(tp1);
2903 				sctp_total_flight_increase(stcb, tp1);
2904 				/*
2905 				 * We inflate the cwnd to compensate for our
2906 				 * artificial inflation of the flight_size.
2907 				 */
2908 				tp1->whoTo->cwnd += tp1->book_size;
2909 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2910 					sctp_log_sack(asoc->last_acked_seq,
2911 					    cumack,
2912 					    tp1->rec.data.TSN_seq,
2913 					    0,
2914 					    0,
2915 					    SCTP_LOG_TSN_REVOKED);
2916 				}
2917 			} else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2918 				/* it has been re-acked in this SACK */
2919 				tp1->sent = SCTP_DATAGRAM_ACKED;
2920 			}
2921 		}
2922 		if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2923 			break;
2924 	}
2925 }
2926 
2927 
2928 static void
2929 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2930     uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
2931 {
2932 	struct sctp_tmit_chunk *tp1;
2933 	int strike_flag = 0;
2934 	struct timeval now;
2935 	int tot_retrans = 0;
2936 	uint32_t sending_seq;
2937 	struct sctp_nets *net;
2938 	int num_dests_sacked = 0;
2939 
2940 	/*
2941 	 * select the sending_seq, this is either the next thing ready to be
2942 	 * sent but not transmitted, OR, the next seq we assign.
2943 	 */
2944 	tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
2945 	if (tp1 == NULL) {
2946 		sending_seq = asoc->sending_seq;
2947 	} else {
2948 		sending_seq = tp1->rec.data.TSN_seq;
2949 	}
2950 
2951 	/* CMT DAC algo: finding out if SACK is a mixed SACK */
2952 	if ((asoc->sctp_cmt_on_off > 0) &&
2953 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
2954 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2955 			if (net->saw_newack)
2956 				num_dests_sacked++;
2957 		}
2958 	}
2959 	if (stcb->asoc.prsctp_supported) {
2960 		(void)SCTP_GETTIME_TIMEVAL(&now);
2961 	}
2962 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2963 		strike_flag = 0;
2964 		if (tp1->no_fr_allowed) {
2965 			/* this one had a timeout or something */
2966 			continue;
2967 		}
2968 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2969 			if (tp1->sent < SCTP_DATAGRAM_RESEND)
2970 				sctp_log_fr(biggest_tsn_newly_acked,
2971 				    tp1->rec.data.TSN_seq,
2972 				    tp1->sent,
2973 				    SCTP_FR_LOG_CHECK_STRIKE);
2974 		}
2975 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
2976 		    tp1->sent == SCTP_DATAGRAM_UNSENT) {
2977 			/* done */
2978 			break;
2979 		}
2980 		if (stcb->asoc.prsctp_supported) {
2981 			if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
2982 				/* Is it expired? */
2983 				if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
2984 					/* Yes so drop it */
2985 					if (tp1->data != NULL) {
2986 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
2987 						    SCTP_SO_NOT_LOCKED);
2988 					}
2989 					continue;
2990 				}
2991 			}
2992 		}
2993 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
2994 			/* we are beyond the tsn in the sack  */
2995 			break;
2996 		}
2997 		if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
2998 			/* either a RESEND, ACKED, or MARKED */
2999 			/* skip */
3000 			if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3001 				/* Continue strikin FWD-TSN chunks */
3002 				tp1->rec.data.fwd_tsn_cnt++;
3003 			}
3004 			continue;
3005 		}
3006 		/*
3007 		 * CMT : SFR algo (covers part of DAC and HTNA as well)
3008 		 */
3009 		if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3010 			/*
3011 			 * No new acks were receieved for data sent to this
3012 			 * dest. Therefore, according to the SFR algo for
3013 			 * CMT, no data sent to this dest can be marked for
3014 			 * FR using this SACK.
3015 			 */
3016 			continue;
3017 		} else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3018 		    tp1->whoTo->this_sack_highest_newack)) {
3019 			/*
3020 			 * CMT: New acks were receieved for data sent to
3021 			 * this dest. But no new acks were seen for data
3022 			 * sent after tp1. Therefore, according to the SFR
3023 			 * algo for CMT, tp1 cannot be marked for FR using
3024 			 * this SACK. This step covers part of the DAC algo
3025 			 * and the HTNA algo as well.
3026 			 */
3027 			continue;
3028 		}
3029 		/*
3030 		 * Here we check to see if we were have already done a FR
3031 		 * and if so we see if the biggest TSN we saw in the sack is
3032 		 * smaller than the recovery point. If so we don't strike
3033 		 * the tsn... otherwise we CAN strike the TSN.
3034 		 */
3035 		/*
3036 		 * @@@ JRI: Check for CMT if (accum_moved &&
3037 		 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3038 		 * 0)) {
3039 		 */
3040 		if (accum_moved && asoc->fast_retran_loss_recovery) {
3041 			/*
3042 			 * Strike the TSN if in fast-recovery and cum-ack
3043 			 * moved.
3044 			 */
3045 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3046 				sctp_log_fr(biggest_tsn_newly_acked,
3047 				    tp1->rec.data.TSN_seq,
3048 				    tp1->sent,
3049 				    SCTP_FR_LOG_STRIKE_CHUNK);
3050 			}
3051 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3052 				tp1->sent++;
3053 			}
3054 			if ((asoc->sctp_cmt_on_off > 0) &&
3055 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3056 				/*
3057 				 * CMT DAC algorithm: If SACK flag is set to
3058 				 * 0, then lowest_newack test will not pass
3059 				 * because it would have been set to the
3060 				 * cumack earlier. If not already to be
3061 				 * rtx'd, If not a mixed sack and if tp1 is
3062 				 * not between two sacked TSNs, then mark by
3063 				 * one more. NOTE that we are marking by one
3064 				 * additional time since the SACK DAC flag
3065 				 * indicates that two packets have been
3066 				 * received after this missing TSN.
3067 				 */
3068 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3069 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3070 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3071 						sctp_log_fr(16 + num_dests_sacked,
3072 						    tp1->rec.data.TSN_seq,
3073 						    tp1->sent,
3074 						    SCTP_FR_LOG_STRIKE_CHUNK);
3075 					}
3076 					tp1->sent++;
3077 				}
3078 			}
3079 		} else if ((tp1->rec.data.doing_fast_retransmit) &&
3080 		    (asoc->sctp_cmt_on_off == 0)) {
3081 			/*
3082 			 * For those that have done a FR we must take
3083 			 * special consideration if we strike. I.e the
3084 			 * biggest_newly_acked must be higher than the
3085 			 * sending_seq at the time we did the FR.
3086 			 */
3087 			if (
3088 #ifdef SCTP_FR_TO_ALTERNATE
3089 			/*
3090 			 * If FR's go to new networks, then we must only do
3091 			 * this for singly homed asoc's. However if the FR's
3092 			 * go to the same network (Armando's work) then its
3093 			 * ok to FR multiple times.
3094 			 */
3095 			    (asoc->numnets < 2)
3096 #else
3097 			    (1)
3098 #endif
3099 			    ) {
3100 
3101 				if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3102 				    tp1->rec.data.fast_retran_tsn)) {
3103 					/*
3104 					 * Strike the TSN, since this ack is
3105 					 * beyond where things were when we
3106 					 * did a FR.
3107 					 */
3108 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3109 						sctp_log_fr(biggest_tsn_newly_acked,
3110 						    tp1->rec.data.TSN_seq,
3111 						    tp1->sent,
3112 						    SCTP_FR_LOG_STRIKE_CHUNK);
3113 					}
3114 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3115 						tp1->sent++;
3116 					}
3117 					strike_flag = 1;
3118 					if ((asoc->sctp_cmt_on_off > 0) &&
3119 					    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3120 						/*
3121 						 * CMT DAC algorithm: If
3122 						 * SACK flag is set to 0,
3123 						 * then lowest_newack test
3124 						 * will not pass because it
3125 						 * would have been set to
3126 						 * the cumack earlier. If
3127 						 * not already to be rtx'd,
3128 						 * If not a mixed sack and
3129 						 * if tp1 is not between two
3130 						 * sacked TSNs, then mark by
3131 						 * one more. NOTE that we
3132 						 * are marking by one
3133 						 * additional time since the
3134 						 * SACK DAC flag indicates
3135 						 * that two packets have
3136 						 * been received after this
3137 						 * missing TSN.
3138 						 */
3139 						if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3140 						    (num_dests_sacked == 1) &&
3141 						    SCTP_TSN_GT(this_sack_lowest_newack,
3142 						    tp1->rec.data.TSN_seq)) {
3143 							if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3144 								sctp_log_fr(32 + num_dests_sacked,
3145 								    tp1->rec.data.TSN_seq,
3146 								    tp1->sent,
3147 								    SCTP_FR_LOG_STRIKE_CHUNK);
3148 							}
3149 							if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3150 								tp1->sent++;
3151 							}
3152 						}
3153 					}
3154 				}
3155 			}
3156 			/*
3157 			 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3158 			 * algo covers HTNA.
3159 			 */
3160 		} else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3161 		    biggest_tsn_newly_acked)) {
3162 			/*
3163 			 * We don't strike these: This is the  HTNA
3164 			 * algorithm i.e. we don't strike If our TSN is
3165 			 * larger than the Highest TSN Newly Acked.
3166 			 */
3167 			;
3168 		} else {
3169 			/* Strike the TSN */
3170 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3171 				sctp_log_fr(biggest_tsn_newly_acked,
3172 				    tp1->rec.data.TSN_seq,
3173 				    tp1->sent,
3174 				    SCTP_FR_LOG_STRIKE_CHUNK);
3175 			}
3176 			if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3177 				tp1->sent++;
3178 			}
3179 			if ((asoc->sctp_cmt_on_off > 0) &&
3180 			    SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3181 				/*
3182 				 * CMT DAC algorithm: If SACK flag is set to
3183 				 * 0, then lowest_newack test will not pass
3184 				 * because it would have been set to the
3185 				 * cumack earlier. If not already to be
3186 				 * rtx'd, If not a mixed sack and if tp1 is
3187 				 * not between two sacked TSNs, then mark by
3188 				 * one more. NOTE that we are marking by one
3189 				 * additional time since the SACK DAC flag
3190 				 * indicates that two packets have been
3191 				 * received after this missing TSN.
3192 				 */
3193 				if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3194 				    SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3195 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3196 						sctp_log_fr(48 + num_dests_sacked,
3197 						    tp1->rec.data.TSN_seq,
3198 						    tp1->sent,
3199 						    SCTP_FR_LOG_STRIKE_CHUNK);
3200 					}
3201 					tp1->sent++;
3202 				}
3203 			}
3204 		}
3205 		if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3206 			struct sctp_nets *alt;
3207 
3208 			/* fix counts and things */
3209 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3210 				sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3211 				    (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3212 				    tp1->book_size,
3213 				    (uintptr_t) tp1->whoTo,
3214 				    tp1->rec.data.TSN_seq);
3215 			}
3216 			if (tp1->whoTo) {
3217 				tp1->whoTo->net_ack++;
3218 				sctp_flight_size_decrease(tp1);
3219 				if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3220 					(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3221 					    tp1);
3222 				}
3223 			}
3224 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3225 				sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3226 				    asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3227 			}
3228 			/* add back to the rwnd */
3229 			asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3230 
3231 			/* remove from the total flight */
3232 			sctp_total_flight_decrease(stcb, tp1);
3233 
3234 			if ((stcb->asoc.prsctp_supported) &&
3235 			    (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3236 				/*
3237 				 * Has it been retransmitted tv_sec times? -
3238 				 * we store the retran count there.
3239 				 */
3240 				if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3241 					/* Yes, so drop it */
3242 					if (tp1->data != NULL) {
3243 						(void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3244 						    SCTP_SO_NOT_LOCKED);
3245 					}
3246 					/* Make sure to flag we had a FR */
3247 					tp1->whoTo->net_ack++;
3248 					continue;
3249 				}
3250 			}
3251 			/*
3252 			 * SCTP_PRINTF("OK, we are now ready to FR this
3253 			 * guy\n");
3254 			 */
3255 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3256 				sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3257 				    0, SCTP_FR_MARKED);
3258 			}
3259 			if (strike_flag) {
3260 				/* This is a subsequent FR */
3261 				SCTP_STAT_INCR(sctps_sendmultfastretrans);
3262 			}
3263 			sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3264 			if (asoc->sctp_cmt_on_off > 0) {
3265 				/*
3266 				 * CMT: Using RTX_SSTHRESH policy for CMT.
3267 				 * If CMT is being used, then pick dest with
3268 				 * largest ssthresh for any retransmission.
3269 				 */
3270 				tp1->no_fr_allowed = 1;
3271 				alt = tp1->whoTo;
3272 				/* sa_ignore NO_NULL_CHK */
3273 				if (asoc->sctp_cmt_pf > 0) {
3274 					/*
3275 					 * JRS 5/18/07 - If CMT PF is on,
3276 					 * use the PF version of
3277 					 * find_alt_net()
3278 					 */
3279 					alt = sctp_find_alternate_net(stcb, alt, 2);
3280 				} else {
3281 					/*
3282 					 * JRS 5/18/07 - If only CMT is on,
3283 					 * use the CMT version of
3284 					 * find_alt_net()
3285 					 */
3286 					/* sa_ignore NO_NULL_CHK */
3287 					alt = sctp_find_alternate_net(stcb, alt, 1);
3288 				}
3289 				if (alt == NULL) {
3290 					alt = tp1->whoTo;
3291 				}
3292 				/*
3293 				 * CUCv2: If a different dest is picked for
3294 				 * the retransmission, then new
3295 				 * (rtx-)pseudo_cumack needs to be tracked
3296 				 * for orig dest. Let CUCv2 track new (rtx-)
3297 				 * pseudo-cumack always.
3298 				 */
3299 				if (tp1->whoTo) {
3300 					tp1->whoTo->find_pseudo_cumack = 1;
3301 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3302 				}
3303 			} else {/* CMT is OFF */
3304 
3305 #ifdef SCTP_FR_TO_ALTERNATE
3306 				/* Can we find an alternate? */
3307 				alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3308 #else
3309 				/*
3310 				 * default behavior is to NOT retransmit
3311 				 * FR's to an alternate. Armando Caro's
3312 				 * paper details why.
3313 				 */
3314 				alt = tp1->whoTo;
3315 #endif
3316 			}
3317 
3318 			tp1->rec.data.doing_fast_retransmit = 1;
3319 			tot_retrans++;
3320 			/* mark the sending seq for possible subsequent FR's */
3321 			/*
3322 			 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3323 			 * (uint32_t)tpi->rec.data.TSN_seq);
3324 			 */
3325 			if (TAILQ_EMPTY(&asoc->send_queue)) {
3326 				/*
3327 				 * If the queue of send is empty then its
3328 				 * the next sequence number that will be
3329 				 * assigned so we subtract one from this to
3330 				 * get the one we last sent.
3331 				 */
3332 				tp1->rec.data.fast_retran_tsn = sending_seq;
3333 			} else {
3334 				/*
3335 				 * If there are chunks on the send queue
3336 				 * (unsent data that has made it from the
3337 				 * stream queues but not out the door, we
3338 				 * take the first one (which will have the
3339 				 * lowest TSN) and subtract one to get the
3340 				 * one we last sent.
3341 				 */
3342 				struct sctp_tmit_chunk *ttt;
3343 
3344 				ttt = TAILQ_FIRST(&asoc->send_queue);
3345 				tp1->rec.data.fast_retran_tsn =
3346 				    ttt->rec.data.TSN_seq;
3347 			}
3348 
3349 			if (tp1->do_rtt) {
3350 				/*
3351 				 * this guy had a RTO calculation pending on
3352 				 * it, cancel it
3353 				 */
3354 				if ((tp1->whoTo != NULL) &&
3355 				    (tp1->whoTo->rto_needed == 0)) {
3356 					tp1->whoTo->rto_needed = 1;
3357 				}
3358 				tp1->do_rtt = 0;
3359 			}
3360 			if (alt != tp1->whoTo) {
3361 				/* yes, there is an alternate. */
3362 				sctp_free_remote_addr(tp1->whoTo);
3363 				/* sa_ignore FREED_MEMORY */
3364 				tp1->whoTo = alt;
3365 				atomic_add_int(&alt->ref_count, 1);
3366 			}
3367 		}
3368 	}
3369 }
3370 
3371 struct sctp_tmit_chunk *
3372 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3373     struct sctp_association *asoc)
3374 {
3375 	struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3376 	struct timeval now;
3377 	int now_filled = 0;
3378 
3379 	if (asoc->prsctp_supported == 0) {
3380 		return (NULL);
3381 	}
3382 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3383 		if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3384 		    tp1->sent != SCTP_DATAGRAM_RESEND &&
3385 		    tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3386 			/* no chance to advance, out of here */
3387 			break;
3388 		}
3389 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3390 			if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3391 			    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3392 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3393 				    asoc->advanced_peer_ack_point,
3394 				    tp1->rec.data.TSN_seq, 0, 0);
3395 			}
3396 		}
3397 		if (!PR_SCTP_ENABLED(tp1->flags)) {
3398 			/*
3399 			 * We can't fwd-tsn past any that are reliable aka
3400 			 * retransmitted until the asoc fails.
3401 			 */
3402 			break;
3403 		}
3404 		if (!now_filled) {
3405 			(void)SCTP_GETTIME_TIMEVAL(&now);
3406 			now_filled = 1;
3407 		}
3408 		/*
3409 		 * now we got a chunk which is marked for another
3410 		 * retransmission to a PR-stream but has run out its chances
3411 		 * already maybe OR has been marked to skip now. Can we skip
3412 		 * it if its a resend?
3413 		 */
3414 		if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3415 		    (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3416 			/*
3417 			 * Now is this one marked for resend and its time is
3418 			 * now up?
3419 			 */
3420 			if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3421 				/* Yes so drop it */
3422 				if (tp1->data) {
3423 					(void)sctp_release_pr_sctp_chunk(stcb, tp1,
3424 					    1, SCTP_SO_NOT_LOCKED);
3425 				}
3426 			} else {
3427 				/*
3428 				 * No, we are done when hit one for resend
3429 				 * whos time as not expired.
3430 				 */
3431 				break;
3432 			}
3433 		}
3434 		/*
3435 		 * Ok now if this chunk is marked to drop it we can clean up
3436 		 * the chunk, advance our peer ack point and we can check
3437 		 * the next chunk.
3438 		 */
3439 		if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3440 		    (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3441 			/* advance PeerAckPoint goes forward */
3442 			if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3443 				asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3444 				a_adv = tp1;
3445 			} else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3446 				/* No update but we do save the chk */
3447 				a_adv = tp1;
3448 			}
3449 		} else {
3450 			/*
3451 			 * If it is still in RESEND we can advance no
3452 			 * further
3453 			 */
3454 			break;
3455 		}
3456 	}
3457 	return (a_adv);
3458 }
3459 
3460 static int
3461 sctp_fs_audit(struct sctp_association *asoc)
3462 {
3463 	struct sctp_tmit_chunk *chk;
3464 	int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3465 	int entry_flight, entry_cnt, ret;
3466 
3467 	entry_flight = asoc->total_flight;
3468 	entry_cnt = asoc->total_flight_count;
3469 	ret = 0;
3470 
3471 	if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3472 		return (0);
3473 
3474 	TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3475 		if (chk->sent < SCTP_DATAGRAM_RESEND) {
3476 			SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3477 			    chk->rec.data.TSN_seq,
3478 			    chk->send_size,
3479 			    chk->snd_count);
3480 			inflight++;
3481 		} else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3482 			resend++;
3483 		} else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3484 			inbetween++;
3485 		} else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3486 			above++;
3487 		} else {
3488 			acked++;
3489 		}
3490 	}
3491 
3492 	if ((inflight > 0) || (inbetween > 0)) {
3493 #ifdef INVARIANTS
3494 		panic("Flight size-express incorrect? \n");
3495 #else
3496 		SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3497 		    entry_flight, entry_cnt);
3498 
3499 		SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3500 		    inflight, inbetween, resend, above, acked);
3501 		ret = 1;
3502 #endif
3503 	}
3504 	return (ret);
3505 }
3506 
3507 
3508 static void
3509 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3510     struct sctp_association *asoc,
3511     struct sctp_tmit_chunk *tp1)
3512 {
3513 	tp1->window_probe = 0;
3514 	if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3515 		/* TSN's skipped we do NOT move back. */
3516 		sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3517 		    tp1->whoTo ? tp1->whoTo->flight_size : 0,
3518 		    tp1->book_size,
3519 		    (uintptr_t) tp1->whoTo,
3520 		    tp1->rec.data.TSN_seq);
3521 		return;
3522 	}
3523 	/* First setup this by shrinking flight */
3524 	if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3525 		(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3526 		    tp1);
3527 	}
3528 	sctp_flight_size_decrease(tp1);
3529 	sctp_total_flight_decrease(stcb, tp1);
3530 	/* Now mark for resend */
3531 	tp1->sent = SCTP_DATAGRAM_RESEND;
3532 	sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3533 
3534 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3535 		sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3536 		    tp1->whoTo->flight_size,
3537 		    tp1->book_size,
3538 		    (uintptr_t) tp1->whoTo,
3539 		    tp1->rec.data.TSN_seq);
3540 	}
3541 }
3542 
3543 void
3544 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3545     uint32_t rwnd, int *abort_now, int ecne_seen)
3546 {
3547 	struct sctp_nets *net;
3548 	struct sctp_association *asoc;
3549 	struct sctp_tmit_chunk *tp1, *tp2;
3550 	uint32_t old_rwnd;
3551 	int win_probe_recovery = 0;
3552 	int win_probe_recovered = 0;
3553 	int j, done_once = 0;
3554 	int rto_ok = 1;
3555 
3556 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3557 		sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3558 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3559 	}
3560 	SCTP_TCB_LOCK_ASSERT(stcb);
3561 #ifdef SCTP_ASOCLOG_OF_TSNS
3562 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3563 	stcb->asoc.cumack_log_at++;
3564 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3565 		stcb->asoc.cumack_log_at = 0;
3566 	}
3567 #endif
3568 	asoc = &stcb->asoc;
3569 	old_rwnd = asoc->peers_rwnd;
3570 	if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3571 		/* old ack */
3572 		return;
3573 	} else if (asoc->last_acked_seq == cumack) {
3574 		/* Window update sack */
3575 		asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3576 		    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3577 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3578 			/* SWS sender side engages */
3579 			asoc->peers_rwnd = 0;
3580 		}
3581 		if (asoc->peers_rwnd > old_rwnd) {
3582 			goto again;
3583 		}
3584 		return;
3585 	}
3586 	/* First setup for CC stuff */
3587 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3588 		if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3589 			/* Drag along the window_tsn for cwr's */
3590 			net->cwr_window_tsn = cumack;
3591 		}
3592 		net->prev_cwnd = net->cwnd;
3593 		net->net_ack = 0;
3594 		net->net_ack2 = 0;
3595 
3596 		/*
3597 		 * CMT: Reset CUC and Fast recovery algo variables before
3598 		 * SACK processing
3599 		 */
3600 		net->new_pseudo_cumack = 0;
3601 		net->will_exit_fast_recovery = 0;
3602 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3603 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3604 		}
3605 	}
3606 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3607 		uint32_t send_s;
3608 
3609 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3610 			tp1 = TAILQ_LAST(&asoc->sent_queue,
3611 			    sctpchunk_listhead);
3612 			send_s = tp1->rec.data.TSN_seq + 1;
3613 		} else {
3614 			send_s = asoc->sending_seq;
3615 		}
3616 		if (SCTP_TSN_GE(cumack, send_s)) {
3617 			struct mbuf *op_err;
3618 			char msg[SCTP_DIAG_INFO_LEN];
3619 
3620 			*abort_now = 1;
3621 			/* XXX */
3622 			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3623 			    cumack, send_s);
3624 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3625 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3626 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3627 			return;
3628 		}
3629 	}
3630 	asoc->this_sack_highest_gap = cumack;
3631 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3632 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3633 		    stcb->asoc.overall_error_count,
3634 		    0,
3635 		    SCTP_FROM_SCTP_INDATA,
3636 		    __LINE__);
3637 	}
3638 	stcb->asoc.overall_error_count = 0;
3639 	if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3640 		/* process the new consecutive TSN first */
3641 		TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3642 			if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3643 				if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3644 					SCTP_PRINTF("Warning, an unsent is now acked?\n");
3645 				}
3646 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3647 					/*
3648 					 * If it is less than ACKED, it is
3649 					 * now no-longer in flight. Higher
3650 					 * values may occur during marking
3651 					 */
3652 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3653 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3654 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3655 							    tp1->whoTo->flight_size,
3656 							    tp1->book_size,
3657 							    (uintptr_t) tp1->whoTo,
3658 							    tp1->rec.data.TSN_seq);
3659 						}
3660 						sctp_flight_size_decrease(tp1);
3661 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3662 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3663 							    tp1);
3664 						}
3665 						/* sa_ignore NO_NULL_CHK */
3666 						sctp_total_flight_decrease(stcb, tp1);
3667 					}
3668 					tp1->whoTo->net_ack += tp1->send_size;
3669 					if (tp1->snd_count < 2) {
3670 						/*
3671 						 * True non-retransmited
3672 						 * chunk
3673 						 */
3674 						tp1->whoTo->net_ack2 +=
3675 						    tp1->send_size;
3676 
3677 						/* update RTO too? */
3678 						if (tp1->do_rtt) {
3679 							if (rto_ok) {
3680 								tp1->whoTo->RTO =
3681 								/*
3682 								 * sa_ignore
3683 								 * NO_NULL_CH
3684 								 * K
3685 								 */
3686 								    sctp_calculate_rto(stcb,
3687 								    asoc, tp1->whoTo,
3688 								    &tp1->sent_rcv_time,
3689 								    sctp_align_safe_nocopy,
3690 								    SCTP_RTT_FROM_DATA);
3691 								rto_ok = 0;
3692 							}
3693 							if (tp1->whoTo->rto_needed == 0) {
3694 								tp1->whoTo->rto_needed = 1;
3695 							}
3696 							tp1->do_rtt = 0;
3697 						}
3698 					}
3699 					/*
3700 					 * CMT: CUCv2 algorithm. From the
3701 					 * cumack'd TSNs, for each TSN being
3702 					 * acked for the first time, set the
3703 					 * following variables for the
3704 					 * corresp destination.
3705 					 * new_pseudo_cumack will trigger a
3706 					 * cwnd update.
3707 					 * find_(rtx_)pseudo_cumack will
3708 					 * trigger search for the next
3709 					 * expected (rtx-)pseudo-cumack.
3710 					 */
3711 					tp1->whoTo->new_pseudo_cumack = 1;
3712 					tp1->whoTo->find_pseudo_cumack = 1;
3713 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
3714 
3715 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3716 						/* sa_ignore NO_NULL_CHK */
3717 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3718 					}
3719 				}
3720 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3721 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3722 				}
3723 				if (tp1->rec.data.chunk_was_revoked) {
3724 					/* deflate the cwnd */
3725 					tp1->whoTo->cwnd -= tp1->book_size;
3726 					tp1->rec.data.chunk_was_revoked = 0;
3727 				}
3728 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3729 					if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3730 						asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3731 #ifdef INVARIANTS
3732 					} else {
3733 						panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3734 #endif
3735 					}
3736 				}
3737 				TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3738 				if (tp1->data) {
3739 					/* sa_ignore NO_NULL_CHK */
3740 					sctp_free_bufspace(stcb, asoc, tp1, 1);
3741 					sctp_m_freem(tp1->data);
3742 					tp1->data = NULL;
3743 				}
3744 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3745 					sctp_log_sack(asoc->last_acked_seq,
3746 					    cumack,
3747 					    tp1->rec.data.TSN_seq,
3748 					    0,
3749 					    0,
3750 					    SCTP_LOG_FREE_SENT);
3751 				}
3752 				asoc->sent_queue_cnt--;
3753 				sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3754 			} else {
3755 				break;
3756 			}
3757 		}
3758 
3759 	}
3760 	/* sa_ignore NO_NULL_CHK */
3761 	if (stcb->sctp_socket) {
3762 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3763 		struct socket *so;
3764 
3765 #endif
3766 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3767 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3768 			/* sa_ignore NO_NULL_CHK */
3769 			sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3770 		}
3771 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3772 		so = SCTP_INP_SO(stcb->sctp_ep);
3773 		atomic_add_int(&stcb->asoc.refcnt, 1);
3774 		SCTP_TCB_UNLOCK(stcb);
3775 		SCTP_SOCKET_LOCK(so, 1);
3776 		SCTP_TCB_LOCK(stcb);
3777 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
3778 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3779 			/* assoc was freed while we were unlocked */
3780 			SCTP_SOCKET_UNLOCK(so, 1);
3781 			return;
3782 		}
3783 #endif
3784 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3785 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3786 		SCTP_SOCKET_UNLOCK(so, 1);
3787 #endif
3788 	} else {
3789 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3790 			sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
3791 		}
3792 	}
3793 
3794 	/* JRS - Use the congestion control given in the CC module */
3795 	if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
3796 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3797 			if (net->net_ack2 > 0) {
3798 				/*
3799 				 * Karn's rule applies to clearing error
3800 				 * count, this is optional.
3801 				 */
3802 				net->error_count = 0;
3803 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
3804 					/* addr came good */
3805 					net->dest_state |= SCTP_ADDR_REACHABLE;
3806 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3807 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
3808 				}
3809 				if (net == stcb->asoc.primary_destination) {
3810 					if (stcb->asoc.alternate) {
3811 						/*
3812 						 * release the alternate,
3813 						 * primary is good
3814 						 */
3815 						sctp_free_remote_addr(stcb->asoc.alternate);
3816 						stcb->asoc.alternate = NULL;
3817 					}
3818 				}
3819 				if (net->dest_state & SCTP_ADDR_PF) {
3820 					net->dest_state &= ~SCTP_ADDR_PF;
3821 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
3822 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
3823 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
3824 					/* Done with this net */
3825 					net->net_ack = 0;
3826 				}
3827 				/* restore any doubled timers */
3828 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
3829 				if (net->RTO < stcb->asoc.minrto) {
3830 					net->RTO = stcb->asoc.minrto;
3831 				}
3832 				if (net->RTO > stcb->asoc.maxrto) {
3833 					net->RTO = stcb->asoc.maxrto;
3834 				}
3835 			}
3836 		}
3837 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
3838 	}
3839 	asoc->last_acked_seq = cumack;
3840 
3841 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
3842 		/* nothing left in-flight */
3843 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3844 			net->flight_size = 0;
3845 			net->partial_bytes_acked = 0;
3846 		}
3847 		asoc->total_flight = 0;
3848 		asoc->total_flight_count = 0;
3849 	}
3850 	/* RWND update */
3851 	asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3852 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3853 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3854 		/* SWS sender side engages */
3855 		asoc->peers_rwnd = 0;
3856 	}
3857 	if (asoc->peers_rwnd > old_rwnd) {
3858 		win_probe_recovery = 1;
3859 	}
3860 	/* Now assure a timer where data is queued at */
3861 again:
3862 	j = 0;
3863 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3864 		int to_ticks;
3865 
3866 		if (win_probe_recovery && (net->window_probe)) {
3867 			win_probe_recovered = 1;
3868 			/*
3869 			 * Find first chunk that was used with window probe
3870 			 * and clear the sent
3871 			 */
3872 			/* sa_ignore FREED_MEMORY */
3873 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3874 				if (tp1->window_probe) {
3875 					/* move back to data send queue */
3876 					sctp_window_probe_recovery(stcb, asoc, tp1);
3877 					break;
3878 				}
3879 			}
3880 		}
3881 		if (net->RTO == 0) {
3882 			to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
3883 		} else {
3884 			to_ticks = MSEC_TO_TICKS(net->RTO);
3885 		}
3886 		if (net->flight_size) {
3887 			j++;
3888 			(void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3889 			    sctp_timeout_handler, &net->rxt_timer);
3890 			if (net->window_probe) {
3891 				net->window_probe = 0;
3892 			}
3893 		} else {
3894 			if (net->window_probe) {
3895 				/*
3896 				 * In window probes we must assure a timer
3897 				 * is still running there
3898 				 */
3899 				net->window_probe = 0;
3900 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3901 					SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3902 					    sctp_timeout_handler, &net->rxt_timer);
3903 				}
3904 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3905 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3906 				    stcb, net,
3907 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
3908 			}
3909 		}
3910 	}
3911 	if ((j == 0) &&
3912 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
3913 	    (asoc->sent_queue_retran_cnt == 0) &&
3914 	    (win_probe_recovered == 0) &&
3915 	    (done_once == 0)) {
3916 		/*
3917 		 * huh, this should not happen unless all packets are
3918 		 * PR-SCTP and marked to skip of course.
3919 		 */
3920 		if (sctp_fs_audit(asoc)) {
3921 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3922 				net->flight_size = 0;
3923 			}
3924 			asoc->total_flight = 0;
3925 			asoc->total_flight_count = 0;
3926 			asoc->sent_queue_retran_cnt = 0;
3927 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3928 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3929 					sctp_flight_size_increase(tp1);
3930 					sctp_total_flight_increase(stcb, tp1);
3931 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3932 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3933 				}
3934 			}
3935 		}
3936 		done_once = 1;
3937 		goto again;
3938 	}
3939 	/**********************************/
3940 	/* Now what about shutdown issues */
3941 	/**********************************/
3942 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
3943 		/* nothing left on sendqueue.. consider done */
3944 		/* clean up */
3945 		if ((asoc->stream_queue_cnt == 1) &&
3946 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
3947 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
3948 		    (asoc->locked_on_sending)
3949 		    ) {
3950 			struct sctp_stream_queue_pending *sp;
3951 
3952 			/*
3953 			 * I may be in a state where we got all across.. but
3954 			 * cannot write more due to a shutdown... we abort
3955 			 * since the user did not indicate EOR in this case.
3956 			 * The sp will be cleaned during free of the asoc.
3957 			 */
3958 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
3959 			    sctp_streamhead);
3960 			if ((sp) && (sp->length == 0)) {
3961 				/* Let cleanup code purge it */
3962 				if (sp->msg_is_complete) {
3963 					asoc->stream_queue_cnt--;
3964 				} else {
3965 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
3966 					asoc->locked_on_sending = NULL;
3967 					asoc->stream_queue_cnt--;
3968 				}
3969 			}
3970 		}
3971 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
3972 		    (asoc->stream_queue_cnt == 0)) {
3973 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
3974 				/* Need to abort here */
3975 				struct mbuf *op_err;
3976 
3977 		abort_out_now:
3978 				*abort_now = 1;
3979 				/* XXX */
3980 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
3981 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
3982 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3983 			} else {
3984 				struct sctp_nets *netp;
3985 
3986 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
3987 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3988 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3989 				}
3990 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
3991 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
3992 				sctp_stop_timers_for_shutdown(stcb);
3993 				if (asoc->alternate) {
3994 					netp = asoc->alternate;
3995 				} else {
3996 					netp = asoc->primary_destination;
3997 				}
3998 				sctp_send_shutdown(stcb, netp);
3999 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4000 				    stcb->sctp_ep, stcb, netp);
4001 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4002 				    stcb->sctp_ep, stcb, netp);
4003 			}
4004 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4005 		    (asoc->stream_queue_cnt == 0)) {
4006 			struct sctp_nets *netp;
4007 
4008 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4009 				goto abort_out_now;
4010 			}
4011 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4012 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4013 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4014 			sctp_stop_timers_for_shutdown(stcb);
4015 			if (asoc->alternate) {
4016 				netp = asoc->alternate;
4017 			} else {
4018 				netp = asoc->primary_destination;
4019 			}
4020 			sctp_send_shutdown_ack(stcb, netp);
4021 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4022 			    stcb->sctp_ep, stcb, netp);
4023 		}
4024 	}
4025 	/*********************************************/
4026 	/* Here we perform PR-SCTP procedures        */
4027 	/* (section 4.2)                             */
4028 	/*********************************************/
4029 	/* C1. update advancedPeerAckPoint */
4030 	if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4031 		asoc->advanced_peer_ack_point = cumack;
4032 	}
4033 	/* PR-Sctp issues need to be addressed too */
4034 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4035 		struct sctp_tmit_chunk *lchk;
4036 		uint32_t old_adv_peer_ack_point;
4037 
4038 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4039 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4040 		/* C3. See if we need to send a Fwd-TSN */
4041 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4042 			/*
4043 			 * ISSUE with ECN, see FWD-TSN processing.
4044 			 */
4045 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4046 				send_forward_tsn(stcb, asoc);
4047 			} else if (lchk) {
4048 				/* try to FR fwd-tsn's that get lost too */
4049 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4050 					send_forward_tsn(stcb, asoc);
4051 				}
4052 			}
4053 		}
4054 		if (lchk) {
4055 			/* Assure a timer is up */
4056 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4057 			    stcb->sctp_ep, stcb, lchk->whoTo);
4058 		}
4059 	}
4060 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4061 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4062 		    rwnd,
4063 		    stcb->asoc.peers_rwnd,
4064 		    stcb->asoc.total_flight,
4065 		    stcb->asoc.total_output_queue_size);
4066 	}
4067 }
4068 
4069 void
4070 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4071     struct sctp_tcb *stcb,
4072     uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4073     int *abort_now, uint8_t flags,
4074     uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4075 {
4076 	struct sctp_association *asoc;
4077 	struct sctp_tmit_chunk *tp1, *tp2;
4078 	uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4079 	uint16_t wake_him = 0;
4080 	uint32_t send_s = 0;
4081 	long j;
4082 	int accum_moved = 0;
4083 	int will_exit_fast_recovery = 0;
4084 	uint32_t a_rwnd, old_rwnd;
4085 	int win_probe_recovery = 0;
4086 	int win_probe_recovered = 0;
4087 	struct sctp_nets *net = NULL;
4088 	int done_once;
4089 	int rto_ok = 1;
4090 	uint8_t reneged_all = 0;
4091 	uint8_t cmt_dac_flag;
4092 
4093 	/*
4094 	 * we take any chance we can to service our queues since we cannot
4095 	 * get awoken when the socket is read from :<
4096 	 */
4097 	/*
4098 	 * Now perform the actual SACK handling: 1) Verify that it is not an
4099 	 * old sack, if so discard. 2) If there is nothing left in the send
4100 	 * queue (cum-ack is equal to last acked) then you have a duplicate
4101 	 * too, update any rwnd change and verify no timers are running.
4102 	 * then return. 3) Process any new consequtive data i.e. cum-ack
4103 	 * moved process these first and note that it moved. 4) Process any
4104 	 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4105 	 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4106 	 * sync up flightsizes and things, stop all timers and also check
4107 	 * for shutdown_pending state. If so then go ahead and send off the
4108 	 * shutdown. If in shutdown recv, send off the shutdown-ack and
4109 	 * start that timer, Ret. 9) Strike any non-acked things and do FR
4110 	 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4111 	 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4112 	 * if in shutdown_recv state.
4113 	 */
4114 	SCTP_TCB_LOCK_ASSERT(stcb);
4115 	/* CMT DAC algo */
4116 	this_sack_lowest_newack = 0;
4117 	SCTP_STAT_INCR(sctps_slowpath_sack);
4118 	last_tsn = cum_ack;
4119 	cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4120 #ifdef SCTP_ASOCLOG_OF_TSNS
4121 	stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4122 	stcb->asoc.cumack_log_at++;
4123 	if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4124 		stcb->asoc.cumack_log_at = 0;
4125 	}
4126 #endif
4127 	a_rwnd = rwnd;
4128 
4129 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4130 		sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4131 		    rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4132 	}
4133 	old_rwnd = stcb->asoc.peers_rwnd;
4134 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4135 		sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4136 		    stcb->asoc.overall_error_count,
4137 		    0,
4138 		    SCTP_FROM_SCTP_INDATA,
4139 		    __LINE__);
4140 	}
4141 	stcb->asoc.overall_error_count = 0;
4142 	asoc = &stcb->asoc;
4143 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4144 		sctp_log_sack(asoc->last_acked_seq,
4145 		    cum_ack,
4146 		    0,
4147 		    num_seg,
4148 		    num_dup,
4149 		    SCTP_LOG_NEW_SACK);
4150 	}
4151 	if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4152 		uint16_t i;
4153 		uint32_t *dupdata, dblock;
4154 
4155 		for (i = 0; i < num_dup; i++) {
4156 			dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4157 			    sizeof(uint32_t), (uint8_t *) & dblock);
4158 			if (dupdata == NULL) {
4159 				break;
4160 			}
4161 			sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4162 		}
4163 	}
4164 	if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4165 		/* reality check */
4166 		if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4167 			tp1 = TAILQ_LAST(&asoc->sent_queue,
4168 			    sctpchunk_listhead);
4169 			send_s = tp1->rec.data.TSN_seq + 1;
4170 		} else {
4171 			tp1 = NULL;
4172 			send_s = asoc->sending_seq;
4173 		}
4174 		if (SCTP_TSN_GE(cum_ack, send_s)) {
4175 			struct mbuf *op_err;
4176 			char msg[SCTP_DIAG_INFO_LEN];
4177 
4178 			/*
4179 			 * no way, we have not even sent this TSN out yet.
4180 			 * Peer is hopelessly messed up with us.
4181 			 */
4182 			SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4183 			    cum_ack, send_s);
4184 			if (tp1) {
4185 				SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4186 				    tp1->rec.data.TSN_seq, (void *)tp1);
4187 			}
4188 	hopeless_peer:
4189 			*abort_now = 1;
4190 			/* XXX */
4191 			snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4192 			    cum_ack, send_s);
4193 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4194 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4195 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4196 			return;
4197 		}
4198 	}
4199 	/**********************/
4200 	/* 1) check the range */
4201 	/**********************/
4202 	if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4203 		/* acking something behind */
4204 		return;
4205 	}
4206 	/* update the Rwnd of the peer */
4207 	if (TAILQ_EMPTY(&asoc->sent_queue) &&
4208 	    TAILQ_EMPTY(&asoc->send_queue) &&
4209 	    (asoc->stream_queue_cnt == 0)) {
4210 		/* nothing left on send/sent and strmq */
4211 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4212 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4213 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4214 		}
4215 		asoc->peers_rwnd = a_rwnd;
4216 		if (asoc->sent_queue_retran_cnt) {
4217 			asoc->sent_queue_retran_cnt = 0;
4218 		}
4219 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4220 			/* SWS sender side engages */
4221 			asoc->peers_rwnd = 0;
4222 		}
4223 		/* stop any timers */
4224 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4225 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4226 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4227 			net->partial_bytes_acked = 0;
4228 			net->flight_size = 0;
4229 		}
4230 		asoc->total_flight = 0;
4231 		asoc->total_flight_count = 0;
4232 		return;
4233 	}
4234 	/*
4235 	 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4236 	 * things. The total byte count acked is tracked in netAckSz AND
4237 	 * netAck2 is used to track the total bytes acked that are un-
4238 	 * amibguious and were never retransmitted. We track these on a per
4239 	 * destination address basis.
4240 	 */
4241 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4242 		if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4243 			/* Drag along the window_tsn for cwr's */
4244 			net->cwr_window_tsn = cum_ack;
4245 		}
4246 		net->prev_cwnd = net->cwnd;
4247 		net->net_ack = 0;
4248 		net->net_ack2 = 0;
4249 
4250 		/*
4251 		 * CMT: Reset CUC and Fast recovery algo variables before
4252 		 * SACK processing
4253 		 */
4254 		net->new_pseudo_cumack = 0;
4255 		net->will_exit_fast_recovery = 0;
4256 		if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4257 			(*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4258 		}
4259 	}
4260 	/* process the new consecutive TSN first */
4261 	TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4262 		if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4263 			if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4264 				accum_moved = 1;
4265 				if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4266 					/*
4267 					 * If it is less than ACKED, it is
4268 					 * now no-longer in flight. Higher
4269 					 * values may occur during marking
4270 					 */
4271 					if ((tp1->whoTo->dest_state &
4272 					    SCTP_ADDR_UNCONFIRMED) &&
4273 					    (tp1->snd_count < 2)) {
4274 						/*
4275 						 * If there was no retran
4276 						 * and the address is
4277 						 * un-confirmed and we sent
4278 						 * there and are now
4279 						 * sacked.. its confirmed,
4280 						 * mark it so.
4281 						 */
4282 						tp1->whoTo->dest_state &=
4283 						    ~SCTP_ADDR_UNCONFIRMED;
4284 					}
4285 					if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4286 						if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4287 							sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4288 							    tp1->whoTo->flight_size,
4289 							    tp1->book_size,
4290 							    (uintptr_t) tp1->whoTo,
4291 							    tp1->rec.data.TSN_seq);
4292 						}
4293 						sctp_flight_size_decrease(tp1);
4294 						sctp_total_flight_decrease(stcb, tp1);
4295 						if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4296 							(*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4297 							    tp1);
4298 						}
4299 					}
4300 					tp1->whoTo->net_ack += tp1->send_size;
4301 
4302 					/* CMT SFR and DAC algos */
4303 					this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4304 					tp1->whoTo->saw_newack = 1;
4305 
4306 					if (tp1->snd_count < 2) {
4307 						/*
4308 						 * True non-retransmited
4309 						 * chunk
4310 						 */
4311 						tp1->whoTo->net_ack2 +=
4312 						    tp1->send_size;
4313 
4314 						/* update RTO too? */
4315 						if (tp1->do_rtt) {
4316 							if (rto_ok) {
4317 								tp1->whoTo->RTO =
4318 								    sctp_calculate_rto(stcb,
4319 								    asoc, tp1->whoTo,
4320 								    &tp1->sent_rcv_time,
4321 								    sctp_align_safe_nocopy,
4322 								    SCTP_RTT_FROM_DATA);
4323 								rto_ok = 0;
4324 							}
4325 							if (tp1->whoTo->rto_needed == 0) {
4326 								tp1->whoTo->rto_needed = 1;
4327 							}
4328 							tp1->do_rtt = 0;
4329 						}
4330 					}
4331 					/*
4332 					 * CMT: CUCv2 algorithm. From the
4333 					 * cumack'd TSNs, for each TSN being
4334 					 * acked for the first time, set the
4335 					 * following variables for the
4336 					 * corresp destination.
4337 					 * new_pseudo_cumack will trigger a
4338 					 * cwnd update.
4339 					 * find_(rtx_)pseudo_cumack will
4340 					 * trigger search for the next
4341 					 * expected (rtx-)pseudo-cumack.
4342 					 */
4343 					tp1->whoTo->new_pseudo_cumack = 1;
4344 					tp1->whoTo->find_pseudo_cumack = 1;
4345 					tp1->whoTo->find_rtx_pseudo_cumack = 1;
4346 
4347 
4348 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4349 						sctp_log_sack(asoc->last_acked_seq,
4350 						    cum_ack,
4351 						    tp1->rec.data.TSN_seq,
4352 						    0,
4353 						    0,
4354 						    SCTP_LOG_TSN_ACKED);
4355 					}
4356 					if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4357 						sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4358 					}
4359 				}
4360 				if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4361 					sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4362 #ifdef SCTP_AUDITING_ENABLED
4363 					sctp_audit_log(0xB3,
4364 					    (asoc->sent_queue_retran_cnt & 0x000000ff));
4365 #endif
4366 				}
4367 				if (tp1->rec.data.chunk_was_revoked) {
4368 					/* deflate the cwnd */
4369 					tp1->whoTo->cwnd -= tp1->book_size;
4370 					tp1->rec.data.chunk_was_revoked = 0;
4371 				}
4372 				if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4373 					tp1->sent = SCTP_DATAGRAM_ACKED;
4374 				}
4375 			}
4376 		} else {
4377 			break;
4378 		}
4379 	}
4380 	biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4381 	/* always set this up to cum-ack */
4382 	asoc->this_sack_highest_gap = last_tsn;
4383 
4384 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4385 
4386 		/*
4387 		 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4388 		 * to be greater than the cumack. Also reset saw_newack to 0
4389 		 * for all dests.
4390 		 */
4391 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4392 			net->saw_newack = 0;
4393 			net->this_sack_highest_newack = last_tsn;
4394 		}
4395 
4396 		/*
4397 		 * thisSackHighestGap will increase while handling NEW
4398 		 * segments this_sack_highest_newack will increase while
4399 		 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4400 		 * used for CMT DAC algo. saw_newack will also change.
4401 		 */
4402 		if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4403 		    &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4404 		    num_seg, num_nr_seg, &rto_ok)) {
4405 			wake_him++;
4406 		}
4407 		if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4408 			/*
4409 			 * validate the biggest_tsn_acked in the gap acks if
4410 			 * strict adherence is wanted.
4411 			 */
4412 			if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4413 				/*
4414 				 * peer is either confused or we are under
4415 				 * attack. We must abort.
4416 				 */
4417 				SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4418 				    biggest_tsn_acked, send_s);
4419 				goto hopeless_peer;
4420 			}
4421 		}
4422 	}
4423 	/*******************************************/
4424 	/* cancel ALL T3-send timer if accum moved */
4425 	/*******************************************/
4426 	if (asoc->sctp_cmt_on_off > 0) {
4427 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4428 			if (net->new_pseudo_cumack)
4429 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4430 				    stcb, net,
4431 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4432 
4433 		}
4434 	} else {
4435 		if (accum_moved) {
4436 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4437 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4438 				    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4439 			}
4440 		}
4441 	}
4442 	/********************************************/
4443 	/* drop the acked chunks from the sentqueue */
4444 	/********************************************/
4445 	asoc->last_acked_seq = cum_ack;
4446 
4447 	TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4448 		if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4449 			break;
4450 		}
4451 		if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4452 			if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4453 				asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4454 #ifdef INVARIANTS
4455 			} else {
4456 				panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4457 #endif
4458 			}
4459 		}
4460 		TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4461 		if (PR_SCTP_ENABLED(tp1->flags)) {
4462 			if (asoc->pr_sctp_cnt != 0)
4463 				asoc->pr_sctp_cnt--;
4464 		}
4465 		asoc->sent_queue_cnt--;
4466 		if (tp1->data) {
4467 			/* sa_ignore NO_NULL_CHK */
4468 			sctp_free_bufspace(stcb, asoc, tp1, 1);
4469 			sctp_m_freem(tp1->data);
4470 			tp1->data = NULL;
4471 			if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4472 				asoc->sent_queue_cnt_removeable--;
4473 			}
4474 		}
4475 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4476 			sctp_log_sack(asoc->last_acked_seq,
4477 			    cum_ack,
4478 			    tp1->rec.data.TSN_seq,
4479 			    0,
4480 			    0,
4481 			    SCTP_LOG_FREE_SENT);
4482 		}
4483 		sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4484 		wake_him++;
4485 	}
4486 	if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4487 #ifdef INVARIANTS
4488 		panic("Warning flight size is postive and should be 0");
4489 #else
4490 		SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4491 		    asoc->total_flight);
4492 #endif
4493 		asoc->total_flight = 0;
4494 	}
4495 	/* sa_ignore NO_NULL_CHK */
4496 	if ((wake_him) && (stcb->sctp_socket)) {
4497 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4498 		struct socket *so;
4499 
4500 #endif
4501 		SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4502 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4503 			sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4504 		}
4505 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4506 		so = SCTP_INP_SO(stcb->sctp_ep);
4507 		atomic_add_int(&stcb->asoc.refcnt, 1);
4508 		SCTP_TCB_UNLOCK(stcb);
4509 		SCTP_SOCKET_LOCK(so, 1);
4510 		SCTP_TCB_LOCK(stcb);
4511 		atomic_subtract_int(&stcb->asoc.refcnt, 1);
4512 		if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4513 			/* assoc was freed while we were unlocked */
4514 			SCTP_SOCKET_UNLOCK(so, 1);
4515 			return;
4516 		}
4517 #endif
4518 		sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4519 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4520 		SCTP_SOCKET_UNLOCK(so, 1);
4521 #endif
4522 	} else {
4523 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4524 			sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4525 		}
4526 	}
4527 
4528 	if (asoc->fast_retran_loss_recovery && accum_moved) {
4529 		if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4530 			/* Setup so we will exit RFC2582 fast recovery */
4531 			will_exit_fast_recovery = 1;
4532 		}
4533 	}
4534 	/*
4535 	 * Check for revoked fragments:
4536 	 *
4537 	 * if Previous sack - Had no frags then we can't have any revoked if
4538 	 * Previous sack - Had frag's then - If we now have frags aka
4539 	 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4540 	 * some of them. else - The peer revoked all ACKED fragments, since
4541 	 * we had some before and now we have NONE.
4542 	 */
4543 
4544 	if (num_seg) {
4545 		sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4546 		asoc->saw_sack_with_frags = 1;
4547 	} else if (asoc->saw_sack_with_frags) {
4548 		int cnt_revoked = 0;
4549 
4550 		/* Peer revoked all dg's marked or acked */
4551 		TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4552 			if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4553 				tp1->sent = SCTP_DATAGRAM_SENT;
4554 				if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4555 					sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4556 					    tp1->whoTo->flight_size,
4557 					    tp1->book_size,
4558 					    (uintptr_t) tp1->whoTo,
4559 					    tp1->rec.data.TSN_seq);
4560 				}
4561 				sctp_flight_size_increase(tp1);
4562 				sctp_total_flight_increase(stcb, tp1);
4563 				tp1->rec.data.chunk_was_revoked = 1;
4564 				/*
4565 				 * To ensure that this increase in
4566 				 * flightsize, which is artificial, does not
4567 				 * throttle the sender, we also increase the
4568 				 * cwnd artificially.
4569 				 */
4570 				tp1->whoTo->cwnd += tp1->book_size;
4571 				cnt_revoked++;
4572 			}
4573 		}
4574 		if (cnt_revoked) {
4575 			reneged_all = 1;
4576 		}
4577 		asoc->saw_sack_with_frags = 0;
4578 	}
4579 	if (num_nr_seg > 0)
4580 		asoc->saw_sack_with_nr_frags = 1;
4581 	else
4582 		asoc->saw_sack_with_nr_frags = 0;
4583 
4584 	/* JRS - Use the congestion control given in the CC module */
4585 	if (ecne_seen == 0) {
4586 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4587 			if (net->net_ack2 > 0) {
4588 				/*
4589 				 * Karn's rule applies to clearing error
4590 				 * count, this is optional.
4591 				 */
4592 				net->error_count = 0;
4593 				if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4594 					/* addr came good */
4595 					net->dest_state |= SCTP_ADDR_REACHABLE;
4596 					sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4597 					    0, (void *)net, SCTP_SO_NOT_LOCKED);
4598 				}
4599 				if (net == stcb->asoc.primary_destination) {
4600 					if (stcb->asoc.alternate) {
4601 						/*
4602 						 * release the alternate,
4603 						 * primary is good
4604 						 */
4605 						sctp_free_remote_addr(stcb->asoc.alternate);
4606 						stcb->asoc.alternate = NULL;
4607 					}
4608 				}
4609 				if (net->dest_state & SCTP_ADDR_PF) {
4610 					net->dest_state &= ~SCTP_ADDR_PF;
4611 					sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4612 					sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4613 					asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4614 					/* Done with this net */
4615 					net->net_ack = 0;
4616 				}
4617 				/* restore any doubled timers */
4618 				net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4619 				if (net->RTO < stcb->asoc.minrto) {
4620 					net->RTO = stcb->asoc.minrto;
4621 				}
4622 				if (net->RTO > stcb->asoc.maxrto) {
4623 					net->RTO = stcb->asoc.maxrto;
4624 				}
4625 			}
4626 		}
4627 		asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4628 	}
4629 	if (TAILQ_EMPTY(&asoc->sent_queue)) {
4630 		/* nothing left in-flight */
4631 		TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4632 			/* stop all timers */
4633 			sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4634 			    stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4635 			net->flight_size = 0;
4636 			net->partial_bytes_acked = 0;
4637 		}
4638 		asoc->total_flight = 0;
4639 		asoc->total_flight_count = 0;
4640 	}
4641 	/**********************************/
4642 	/* Now what about shutdown issues */
4643 	/**********************************/
4644 	if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4645 		/* nothing left on sendqueue.. consider done */
4646 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4647 			sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4648 			    asoc->peers_rwnd, 0, 0, a_rwnd);
4649 		}
4650 		asoc->peers_rwnd = a_rwnd;
4651 		if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4652 			/* SWS sender side engages */
4653 			asoc->peers_rwnd = 0;
4654 		}
4655 		/* clean up */
4656 		if ((asoc->stream_queue_cnt == 1) &&
4657 		    ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4658 		    (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4659 		    (asoc->locked_on_sending)
4660 		    ) {
4661 			struct sctp_stream_queue_pending *sp;
4662 
4663 			/*
4664 			 * I may be in a state where we got all across.. but
4665 			 * cannot write more due to a shutdown... we abort
4666 			 * since the user did not indicate EOR in this case.
4667 			 */
4668 			sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4669 			    sctp_streamhead);
4670 			if ((sp) && (sp->length == 0)) {
4671 				asoc->locked_on_sending = NULL;
4672 				if (sp->msg_is_complete) {
4673 					asoc->stream_queue_cnt--;
4674 				} else {
4675 					asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4676 					asoc->stream_queue_cnt--;
4677 				}
4678 			}
4679 		}
4680 		if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4681 		    (asoc->stream_queue_cnt == 0)) {
4682 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4683 				/* Need to abort here */
4684 				struct mbuf *op_err;
4685 
4686 		abort_out_now:
4687 				*abort_now = 1;
4688 				/* XXX */
4689 				op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4690 				stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4691 				sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4692 				return;
4693 			} else {
4694 				struct sctp_nets *netp;
4695 
4696 				if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4697 				    (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4698 					SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4699 				}
4700 				SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4701 				SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4702 				sctp_stop_timers_for_shutdown(stcb);
4703 				if (asoc->alternate) {
4704 					netp = asoc->alternate;
4705 				} else {
4706 					netp = asoc->primary_destination;
4707 				}
4708 				sctp_send_shutdown(stcb, netp);
4709 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4710 				    stcb->sctp_ep, stcb, netp);
4711 				sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4712 				    stcb->sctp_ep, stcb, netp);
4713 			}
4714 			return;
4715 		} else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4716 		    (asoc->stream_queue_cnt == 0)) {
4717 			struct sctp_nets *netp;
4718 
4719 			if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4720 				goto abort_out_now;
4721 			}
4722 			SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4723 			SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4724 			SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4725 			sctp_stop_timers_for_shutdown(stcb);
4726 			if (asoc->alternate) {
4727 				netp = asoc->alternate;
4728 			} else {
4729 				netp = asoc->primary_destination;
4730 			}
4731 			sctp_send_shutdown_ack(stcb, netp);
4732 			sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4733 			    stcb->sctp_ep, stcb, netp);
4734 			return;
4735 		}
4736 	}
4737 	/*
4738 	 * Now here we are going to recycle net_ack for a different use...
4739 	 * HEADS UP.
4740 	 */
4741 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4742 		net->net_ack = 0;
4743 	}
4744 
4745 	/*
4746 	 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4747 	 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4748 	 * automatically ensure that.
4749 	 */
4750 	if ((asoc->sctp_cmt_on_off > 0) &&
4751 	    SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4752 	    (cmt_dac_flag == 0)) {
4753 		this_sack_lowest_newack = cum_ack;
4754 	}
4755 	if ((num_seg > 0) || (num_nr_seg > 0)) {
4756 		sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4757 		    biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4758 	}
4759 	/* JRS - Use the congestion control given in the CC module */
4760 	asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4761 
4762 	/* Now are we exiting loss recovery ? */
4763 	if (will_exit_fast_recovery) {
4764 		/* Ok, we must exit fast recovery */
4765 		asoc->fast_retran_loss_recovery = 0;
4766 	}
4767 	if ((asoc->sat_t3_loss_recovery) &&
4768 	    SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4769 		/* end satellite t3 loss recovery */
4770 		asoc->sat_t3_loss_recovery = 0;
4771 	}
4772 	/*
4773 	 * CMT Fast recovery
4774 	 */
4775 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4776 		if (net->will_exit_fast_recovery) {
4777 			/* Ok, we must exit fast recovery */
4778 			net->fast_retran_loss_recovery = 0;
4779 		}
4780 	}
4781 
4782 	/* Adjust and set the new rwnd value */
4783 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4784 		sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4785 		    asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4786 	}
4787 	asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4788 	    (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4789 	if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4790 		/* SWS sender side engages */
4791 		asoc->peers_rwnd = 0;
4792 	}
4793 	if (asoc->peers_rwnd > old_rwnd) {
4794 		win_probe_recovery = 1;
4795 	}
4796 	/*
4797 	 * Now we must setup so we have a timer up for anyone with
4798 	 * outstanding data.
4799 	 */
4800 	done_once = 0;
4801 again:
4802 	j = 0;
4803 	TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4804 		if (win_probe_recovery && (net->window_probe)) {
4805 			win_probe_recovered = 1;
4806 			/*-
4807 			 * Find first chunk that was used with
4808 			 * window probe and clear the event. Put
4809 			 * it back into the send queue as if has
4810 			 * not been sent.
4811 			 */
4812 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4813 				if (tp1->window_probe) {
4814 					sctp_window_probe_recovery(stcb, asoc, tp1);
4815 					break;
4816 				}
4817 			}
4818 		}
4819 		if (net->flight_size) {
4820 			j++;
4821 			if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4822 				sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4823 				    stcb->sctp_ep, stcb, net);
4824 			}
4825 			if (net->window_probe) {
4826 				net->window_probe = 0;
4827 			}
4828 		} else {
4829 			if (net->window_probe) {
4830 				/*
4831 				 * In window probes we must assure a timer
4832 				 * is still running there
4833 				 */
4834 				if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4835 					sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4836 					    stcb->sctp_ep, stcb, net);
4837 
4838 				}
4839 			} else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4840 				sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4841 				    stcb, net,
4842 				    SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4843 			}
4844 		}
4845 	}
4846 	if ((j == 0) &&
4847 	    (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4848 	    (asoc->sent_queue_retran_cnt == 0) &&
4849 	    (win_probe_recovered == 0) &&
4850 	    (done_once == 0)) {
4851 		/*
4852 		 * huh, this should not happen unless all packets are
4853 		 * PR-SCTP and marked to skip of course.
4854 		 */
4855 		if (sctp_fs_audit(asoc)) {
4856 			TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4857 				net->flight_size = 0;
4858 			}
4859 			asoc->total_flight = 0;
4860 			asoc->total_flight_count = 0;
4861 			asoc->sent_queue_retran_cnt = 0;
4862 			TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4863 				if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4864 					sctp_flight_size_increase(tp1);
4865 					sctp_total_flight_increase(stcb, tp1);
4866 				} else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4867 					sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4868 				}
4869 			}
4870 		}
4871 		done_once = 1;
4872 		goto again;
4873 	}
4874 	/*********************************************/
4875 	/* Here we perform PR-SCTP procedures        */
4876 	/* (section 4.2)                             */
4877 	/*********************************************/
4878 	/* C1. update advancedPeerAckPoint */
4879 	if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
4880 		asoc->advanced_peer_ack_point = cum_ack;
4881 	}
4882 	/* C2. try to further move advancedPeerAckPoint ahead */
4883 	if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4884 		struct sctp_tmit_chunk *lchk;
4885 		uint32_t old_adv_peer_ack_point;
4886 
4887 		old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4888 		lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4889 		/* C3. See if we need to send a Fwd-TSN */
4890 		if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
4891 			/*
4892 			 * ISSUE with ECN, see FWD-TSN processing.
4893 			 */
4894 			if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
4895 				sctp_misc_ints(SCTP_FWD_TSN_CHECK,
4896 				    0xee, cum_ack, asoc->advanced_peer_ack_point,
4897 				    old_adv_peer_ack_point);
4898 			}
4899 			if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4900 				send_forward_tsn(stcb, asoc);
4901 			} else if (lchk) {
4902 				/* try to FR fwd-tsn's that get lost too */
4903 				if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4904 					send_forward_tsn(stcb, asoc);
4905 				}
4906 			}
4907 		}
4908 		if (lchk) {
4909 			/* Assure a timer is up */
4910 			sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4911 			    stcb->sctp_ep, stcb, lchk->whoTo);
4912 		}
4913 	}
4914 	if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4915 		sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4916 		    a_rwnd,
4917 		    stcb->asoc.peers_rwnd,
4918 		    stcb->asoc.total_flight,
4919 		    stcb->asoc.total_output_queue_size);
4920 	}
4921 }
4922 
4923 void
4924 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
4925 {
4926 	/* Copy cum-ack */
4927 	uint32_t cum_ack, a_rwnd;
4928 
4929 	cum_ack = ntohl(cp->cumulative_tsn_ack);
4930 	/* Arrange so a_rwnd does NOT change */
4931 	a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
4932 
4933 	/* Now call the express sack handling */
4934 	sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
4935 }
4936 
4937 static void
4938 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
4939     struct sctp_stream_in *strmin)
4940 {
4941 	struct sctp_queued_to_read *ctl, *nctl;
4942 	struct sctp_association *asoc;
4943 	uint16_t tt;
4944 
4945 	asoc = &stcb->asoc;
4946 	tt = strmin->last_sequence_delivered;
4947 	/*
4948 	 * First deliver anything prior to and including the stream no that
4949 	 * came in
4950 	 */
4951 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4952 		if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
4953 			/* this is deliverable now */
4954 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4955 			/* subtract pending on streams */
4956 			asoc->size_on_all_streams -= ctl->length;
4957 			sctp_ucount_decr(asoc->cnt_on_all_streams);
4958 			/* deliver it to at least the delivery-q */
4959 			if (stcb->sctp_socket) {
4960 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
4961 				sctp_add_to_readq(stcb->sctp_ep, stcb,
4962 				    ctl,
4963 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
4964 			}
4965 		} else {
4966 			/* no more delivery now. */
4967 			break;
4968 		}
4969 	}
4970 	/*
4971 	 * now we must deliver things in queue the normal way  if any are
4972 	 * now ready.
4973 	 */
4974 	tt = strmin->last_sequence_delivered + 1;
4975 	TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4976 		if (tt == ctl->sinfo_ssn) {
4977 			/* this is deliverable now */
4978 			TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4979 			/* subtract pending on streams */
4980 			asoc->size_on_all_streams -= ctl->length;
4981 			sctp_ucount_decr(asoc->cnt_on_all_streams);
4982 			/* deliver it to at least the delivery-q */
4983 			strmin->last_sequence_delivered = ctl->sinfo_ssn;
4984 			if (stcb->sctp_socket) {
4985 				sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
4986 				sctp_add_to_readq(stcb->sctp_ep, stcb,
4987 				    ctl,
4988 				    &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
4989 
4990 			}
4991 			tt = strmin->last_sequence_delivered + 1;
4992 		} else {
4993 			break;
4994 		}
4995 	}
4996 }
4997 
4998 static void
4999 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5000     struct sctp_association *asoc,
5001     uint16_t stream, uint16_t seq)
5002 {
5003 	struct sctp_tmit_chunk *chk, *nchk;
5004 
5005 	/* For each one on here see if we need to toss it */
5006 	/*
5007 	 * For now large messages held on the reasmqueue that are complete
5008 	 * will be tossed too. We could in theory do more work to spin
5009 	 * through and stop after dumping one msg aka seeing the start of a
5010 	 * new msg at the head, and call the delivery function... to see if
5011 	 * it can be delivered... But for now we just dump everything on the
5012 	 * queue.
5013 	 */
5014 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5015 		/*
5016 		 * Do not toss it if on a different stream or marked for
5017 		 * unordered delivery in which case the stream sequence
5018 		 * number has no meaning.
5019 		 */
5020 		if ((chk->rec.data.stream_number != stream) ||
5021 		    ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5022 			continue;
5023 		}
5024 		if (chk->rec.data.stream_seq == seq) {
5025 			/* It needs to be tossed */
5026 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5027 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5028 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5029 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5030 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5031 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5032 			}
5033 			asoc->size_on_reasm_queue -= chk->send_size;
5034 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5035 
5036 			/* Clear up any stream problem */
5037 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5038 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5039 				/*
5040 				 * We must dump forward this streams
5041 				 * sequence number if the chunk is not
5042 				 * unordered that is being skipped. There is
5043 				 * a chance that if the peer does not
5044 				 * include the last fragment in its FWD-TSN
5045 				 * we WILL have a problem here since you
5046 				 * would have a partial chunk in queue that
5047 				 * may not be deliverable. Also if a Partial
5048 				 * delivery API as started the user may get
5049 				 * a partial chunk. The next read returning
5050 				 * a new chunk... really ugly but I see no
5051 				 * way around it! Maybe a notify??
5052 				 */
5053 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5054 			}
5055 			if (chk->data) {
5056 				sctp_m_freem(chk->data);
5057 				chk->data = NULL;
5058 			}
5059 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5060 		} else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5061 			/*
5062 			 * If the stream_seq is > than the purging one, we
5063 			 * are done
5064 			 */
5065 			break;
5066 		}
5067 	}
5068 }
5069 
5070 
5071 void
5072 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5073     struct sctp_forward_tsn_chunk *fwd,
5074     int *abort_flag, struct mbuf *m, int offset)
5075 {
5076 	/* The pr-sctp fwd tsn */
5077 	/*
5078 	 * here we will perform all the data receiver side steps for
5079 	 * processing FwdTSN, as required in by pr-sctp draft:
5080 	 *
5081 	 * Assume we get FwdTSN(x):
5082 	 *
5083 	 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5084 	 * others we have 3) examine and update re-ordering queue on
5085 	 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5086 	 * report where we are.
5087 	 */
5088 	struct sctp_association *asoc;
5089 	uint32_t new_cum_tsn, gap;
5090 	unsigned int i, fwd_sz, m_size;
5091 	uint32_t str_seq;
5092 	struct sctp_stream_in *strm;
5093 	struct sctp_tmit_chunk *chk, *nchk;
5094 	struct sctp_queued_to_read *ctl, *sv;
5095 
5096 	asoc = &stcb->asoc;
5097 	if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5098 		SCTPDBG(SCTP_DEBUG_INDATA1,
5099 		    "Bad size too small/big fwd-tsn\n");
5100 		return;
5101 	}
5102 	m_size = (stcb->asoc.mapping_array_size << 3);
5103 	/*************************************************************/
5104 	/* 1. Here we update local cumTSN and shift the bitmap array */
5105 	/*************************************************************/
5106 	new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5107 
5108 	if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5109 		/* Already got there ... */
5110 		return;
5111 	}
5112 	/*
5113 	 * now we know the new TSN is more advanced, let's find the actual
5114 	 * gap
5115 	 */
5116 	SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5117 	asoc->cumulative_tsn = new_cum_tsn;
5118 	if (gap >= m_size) {
5119 		if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5120 			struct mbuf *op_err;
5121 			char msg[SCTP_DIAG_INFO_LEN];
5122 
5123 			/*
5124 			 * out of range (of single byte chunks in the rwnd I
5125 			 * give out). This must be an attacker.
5126 			 */
5127 			*abort_flag = 1;
5128 			snprintf(msg, sizeof(msg),
5129 			    "New cum ack %8.8x too high, highest TSN %8.8x",
5130 			    new_cum_tsn, asoc->highest_tsn_inside_map);
5131 			op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5132 			stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5133 			sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5134 			return;
5135 		}
5136 		SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5137 
5138 		memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5139 		asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5140 		asoc->highest_tsn_inside_map = new_cum_tsn;
5141 
5142 		memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5143 		asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5144 
5145 		if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5146 			sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5147 		}
5148 	} else {
5149 		SCTP_TCB_LOCK_ASSERT(stcb);
5150 		for (i = 0; i <= gap; i++) {
5151 			if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5152 			    !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5153 				SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5154 				if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5155 					asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5156 				}
5157 			}
5158 		}
5159 	}
5160 	/*************************************************************/
5161 	/* 2. Clear up re-assembly queue                             */
5162 	/*************************************************************/
5163 	/*
5164 	 * First service it if pd-api is up, just in case we can progress it
5165 	 * forward
5166 	 */
5167 	if (asoc->fragmented_delivery_inprogress) {
5168 		sctp_service_reassembly(stcb, asoc);
5169 	}
5170 	/* For each one on here see if we need to toss it */
5171 	/*
5172 	 * For now large messages held on the reasmqueue that are complete
5173 	 * will be tossed too. We could in theory do more work to spin
5174 	 * through and stop after dumping one msg aka seeing the start of a
5175 	 * new msg at the head, and call the delivery function... to see if
5176 	 * it can be delivered... But for now we just dump everything on the
5177 	 * queue.
5178 	 */
5179 	TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5180 		if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5181 			/* It needs to be tossed */
5182 			TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5183 			if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5184 				asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5185 				asoc->str_of_pdapi = chk->rec.data.stream_number;
5186 				asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5187 				asoc->fragment_flags = chk->rec.data.rcv_flags;
5188 			}
5189 			asoc->size_on_reasm_queue -= chk->send_size;
5190 			sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5191 
5192 			/* Clear up any stream problem */
5193 			if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5194 			    SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5195 				/*
5196 				 * We must dump forward this streams
5197 				 * sequence number if the chunk is not
5198 				 * unordered that is being skipped. There is
5199 				 * a chance that if the peer does not
5200 				 * include the last fragment in its FWD-TSN
5201 				 * we WILL have a problem here since you
5202 				 * would have a partial chunk in queue that
5203 				 * may not be deliverable. Also if a Partial
5204 				 * delivery API as started the user may get
5205 				 * a partial chunk. The next read returning
5206 				 * a new chunk... really ugly but I see no
5207 				 * way around it! Maybe a notify??
5208 				 */
5209 				asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5210 			}
5211 			if (chk->data) {
5212 				sctp_m_freem(chk->data);
5213 				chk->data = NULL;
5214 			}
5215 			sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5216 		} else {
5217 			/*
5218 			 * Ok we have gone beyond the end of the fwd-tsn's
5219 			 * mark.
5220 			 */
5221 			break;
5222 		}
5223 	}
5224 	/*******************************************************/
5225 	/* 3. Update the PR-stream re-ordering queues and fix  */
5226 	/* delivery issues as needed.                       */
5227 	/*******************************************************/
5228 	fwd_sz -= sizeof(*fwd);
5229 	if (m && fwd_sz) {
5230 		/* New method. */
5231 		unsigned int num_str;
5232 		struct sctp_strseq *stseq, strseqbuf;
5233 
5234 		offset += sizeof(*fwd);
5235 
5236 		SCTP_INP_READ_LOCK(stcb->sctp_ep);
5237 		num_str = fwd_sz / sizeof(struct sctp_strseq);
5238 		for (i = 0; i < num_str; i++) {
5239 			uint16_t st;
5240 
5241 			stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5242 			    sizeof(struct sctp_strseq),
5243 			    (uint8_t *) & strseqbuf);
5244 			offset += sizeof(struct sctp_strseq);
5245 			if (stseq == NULL) {
5246 				break;
5247 			}
5248 			/* Convert */
5249 			st = ntohs(stseq->stream);
5250 			stseq->stream = st;
5251 			st = ntohs(stseq->sequence);
5252 			stseq->sequence = st;
5253 
5254 			/* now process */
5255 
5256 			/*
5257 			 * Ok we now look for the stream/seq on the read
5258 			 * queue where its not all delivered. If we find it
5259 			 * we transmute the read entry into a PDI_ABORTED.
5260 			 */
5261 			if (stseq->stream >= asoc->streamincnt) {
5262 				/* screwed up streams, stop!  */
5263 				break;
5264 			}
5265 			if ((asoc->str_of_pdapi == stseq->stream) &&
5266 			    (asoc->ssn_of_pdapi == stseq->sequence)) {
5267 				/*
5268 				 * If this is the one we were partially
5269 				 * delivering now then we no longer are.
5270 				 * Note this will change with the reassembly
5271 				 * re-write.
5272 				 */
5273 				asoc->fragmented_delivery_inprogress = 0;
5274 			}
5275 			sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5276 			TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5277 				if ((ctl->sinfo_stream == stseq->stream) &&
5278 				    (ctl->sinfo_ssn == stseq->sequence)) {
5279 					str_seq = (stseq->stream << 16) | stseq->sequence;
5280 					ctl->end_added = 1;
5281 					ctl->pdapi_aborted = 1;
5282 					sv = stcb->asoc.control_pdapi;
5283 					stcb->asoc.control_pdapi = ctl;
5284 					sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5285 					    stcb,
5286 					    SCTP_PARTIAL_DELIVERY_ABORTED,
5287 					    (void *)&str_seq,
5288 					    SCTP_SO_NOT_LOCKED);
5289 					stcb->asoc.control_pdapi = sv;
5290 					break;
5291 				} else if ((ctl->sinfo_stream == stseq->stream) &&
5292 				    SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5293 					/* We are past our victim SSN */
5294 					break;
5295 				}
5296 			}
5297 			strm = &asoc->strmin[stseq->stream];
5298 			if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5299 				/* Update the sequence number */
5300 				strm->last_sequence_delivered = stseq->sequence;
5301 			}
5302 			/* now kick the stream the new way */
5303 			/* sa_ignore NO_NULL_CHK */
5304 			sctp_kick_prsctp_reorder_queue(stcb, strm);
5305 		}
5306 		SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5307 	}
5308 	/*
5309 	 * Now slide thing forward.
5310 	 */
5311 	sctp_slide_mapping_arrays(stcb);
5312 
5313 	if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5314 		/* now lets kick out and check for more fragmented delivery */
5315 		/* sa_ignore NO_NULL_CHK */
5316 		sctp_deliver_reasm_check(stcb, &stcb->asoc);
5317 	}
5318 }
5319