Lines Matching +full:shutdown +full:- +full:ack
1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (c) 1999-2000 Cisco, Inc.
5 * Copyright (c) 1999-2001 Motorola, Inc.
6 * Copyright (c) 2001-2003 Intel Corp.
15 * lksctp developers <linux-sctp@vger.kernel.org>
65 list_add(&ch->list, &q->out_chunk_list); in sctp_outq_head_data()
66 q->out_qlen += ch->skb->len; in sctp_outq_head_data()
69 oute = SCTP_SO(&q->asoc->stream, stream)->ext; in sctp_outq_head_data()
70 list_add(&ch->stream_list, &oute->outq); in sctp_outq_head_data()
76 return q->sched->dequeue(q); in sctp_outq_dequeue_data()
86 list_add_tail(&ch->list, &q->out_chunk_list); in sctp_outq_tail_data()
87 q->out_qlen += ch->skb->len; in sctp_outq_tail_data()
90 oute = SCTP_SO(&q->asoc->stream, stream)->ext; in sctp_outq_tail_data()
91 list_add_tail(&ch->stream_list, &oute->outq); in sctp_outq_tail_data()
95 * SFR-CACC algorithm:
110 * SFR-CACC algorithm:
120 (transport && !transport->cacc.cacc_saw_newack)) in sctp_cacc_skip_3_1_f()
126 * SFR-CACC algorithm:
136 if (!primary->cacc.cycling_changeover) { in sctp_cacc_skip_3_1()
147 * SFR-CACC algorithm:
155 if (primary->cacc.cycling_changeover && in sctp_cacc_skip_3_2()
156 TSN_lt(tsn, primary->cacc.next_tsn_at_change)) in sctp_cacc_skip_3_2()
162 * SFR-CACC algorithm:
165 * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
180 if (primary->cacc.changeover_active && in sctp_cacc_skip()
195 q->asoc = asoc; in sctp_outq_init()
196 INIT_LIST_HEAD(&q->out_chunk_list); in sctp_outq_init()
197 INIT_LIST_HEAD(&q->control_chunk_list); in sctp_outq_init()
198 INIT_LIST_HEAD(&q->retransmit); in sctp_outq_init()
199 INIT_LIST_HEAD(&q->sacked); in sctp_outq_init()
200 INIT_LIST_HEAD(&q->abandoned); in sctp_outq_init()
201 sctp_sched_set_sched(asoc, sctp_sk(asoc->base.sk)->default_ss); in sctp_outq_init()
213 list_for_each_entry(transport, &q->asoc->peer.transport_addr_list, in __sctp_outq_teardown()
215 while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) { in __sctp_outq_teardown()
219 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown()
225 list_for_each_safe(lchunk, temp, &q->sacked) { in __sctp_outq_teardown()
229 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown()
234 list_for_each_safe(lchunk, temp, &q->retransmit) { in __sctp_outq_teardown()
238 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown()
243 list_for_each_safe(lchunk, temp, &q->abandoned) { in __sctp_outq_teardown()
247 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown()
256 sctp_chunk_fail(chunk, q->error); in __sctp_outq_teardown()
261 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) { in __sctp_outq_teardown()
262 list_del_init(&chunk->list); in __sctp_outq_teardown()
270 sctp_outq_init(q->asoc, q); in sctp_outq_teardown()
283 struct net *net = q->asoc->base.net; in sctp_outq_tail()
286 chunk && chunk->chunk_hdr ? in sctp_outq_tail()
287 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : in sctp_outq_tail()
295 __func__, q, chunk, chunk && chunk->chunk_hdr ? in sctp_outq_tail()
296 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : in sctp_outq_tail()
300 if (chunk->asoc->peer.prsctp_capable && in sctp_outq_tail()
301 SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags)) in sctp_outq_tail()
302 chunk->asoc->sent_cnt_removable++; in sctp_outq_tail()
303 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) in sctp_outq_tail()
308 list_add_tail(&chunk->list, &q->control_chunk_list); in sctp_outq_tail()
312 if (!q->cork) in sctp_outq_tail()
327 ntsn = ntohl(nchunk->subh.data_hdr->tsn); in sctp_insert_list()
331 ltsn = ntohl(lchunk->subh.data_hdr->tsn); in sctp_insert_list()
333 list_add(new, pos->prev); in sctp_insert_list()
351 if (!chk->msg->abandoned && in sctp_prsctp_prune_sent()
352 (!SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) || in sctp_prsctp_prune_sent()
353 chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)) in sctp_prsctp_prune_sent()
356 chk->msg->abandoned = 1; in sctp_prsctp_prune_sent()
357 list_del_init(&chk->transmitted_list); in sctp_prsctp_prune_sent()
358 sctp_insert_list(&asoc->outqueue.abandoned, in sctp_prsctp_prune_sent()
359 &chk->transmitted_list); in sctp_prsctp_prune_sent()
361 streamout = SCTP_SO(&asoc->stream, chk->sinfo.sinfo_stream); in sctp_prsctp_prune_sent()
362 asoc->sent_cnt_removable--; in sctp_prsctp_prune_sent()
363 asoc->abandoned_sent[SCTP_PR_INDEX(PRIO)]++; in sctp_prsctp_prune_sent()
364 streamout->ext->abandoned_sent[SCTP_PR_INDEX(PRIO)]++; in sctp_prsctp_prune_sent()
366 if (queue != &asoc->outqueue.retransmit && in sctp_prsctp_prune_sent()
367 !chk->tsn_gap_acked) { in sctp_prsctp_prune_sent()
368 if (chk->transport) in sctp_prsctp_prune_sent()
369 chk->transport->flight_size -= in sctp_prsctp_prune_sent()
371 asoc->outqueue.outstanding_bytes -= sctp_data_size(chk); in sctp_prsctp_prune_sent()
374 msg_len -= chk->skb->truesize + sizeof(struct sctp_chunk); in sctp_prsctp_prune_sent()
385 struct sctp_outq *q = &asoc->outqueue; in sctp_prsctp_prune_unsent()
389 q->sched->unsched_all(&asoc->stream); in sctp_prsctp_prune_unsent()
391 list_for_each_entry_safe(chk, temp, &q->out_chunk_list, list) { in sctp_prsctp_prune_unsent()
392 if (!chk->msg->abandoned && in sctp_prsctp_prune_unsent()
393 (!(chk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG) || in sctp_prsctp_prune_unsent()
394 !SCTP_PR_PRIO_ENABLED(chk->sinfo.sinfo_flags) || in sctp_prsctp_prune_unsent()
395 chk->sinfo.sinfo_timetolive <= sinfo->sinfo_timetolive)) in sctp_prsctp_prune_unsent()
398 chk->msg->abandoned = 1; in sctp_prsctp_prune_unsent()
400 asoc->sent_cnt_removable--; in sctp_prsctp_prune_unsent()
401 asoc->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; in sctp_prsctp_prune_unsent()
403 sout = SCTP_SO(&asoc->stream, chk->sinfo.sinfo_stream); in sctp_prsctp_prune_unsent()
404 sout->ext->abandoned_unsent[SCTP_PR_INDEX(PRIO)]++; in sctp_prsctp_prune_unsent()
407 if (asoc->stream.out_curr == sout && in sctp_prsctp_prune_unsent()
408 list_is_last(&chk->frag_list, &chk->msg->chunks)) in sctp_prsctp_prune_unsent()
409 asoc->stream.out_curr = NULL; in sctp_prsctp_prune_unsent()
411 msg_len -= chk->skb->truesize + sizeof(struct sctp_chunk); in sctp_prsctp_prune_unsent()
417 q->sched->sched_all(&asoc->stream); in sctp_prsctp_prune_unsent()
428 if (!asoc->peer.prsctp_capable || !asoc->sent_cnt_removable) in sctp_prsctp_prune()
432 &asoc->outqueue.retransmit, in sctp_prsctp_prune()
437 list_for_each_entry(transport, &asoc->peer.transport_addr_list, in sctp_prsctp_prune()
440 &transport->transmitted, in sctp_prsctp_prune()
458 list_for_each_safe(lchunk, ltemp, &transport->transmitted) { in sctp_retransmit_mark()
465 sctp_insert_list(&q->abandoned, lchunk); in sctp_retransmit_mark()
472 if (!chunk->tsn_gap_acked) { in sctp_retransmit_mark()
473 if (chunk->transport) in sctp_retransmit_mark()
474 chunk->transport->flight_size -= in sctp_retransmit_mark()
476 q->outstanding_bytes -= sctp_data_size(chunk); in sctp_retransmit_mark()
477 q->asoc->peer.rwnd += sctp_data_size(chunk); in sctp_retransmit_mark()
487 (chunk->fast_retransmit == SCTP_NEED_FRTX)) || in sctp_retransmit_mark()
488 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) { in sctp_retransmit_mark()
492 * retransmission (via either T3-rtx timer expiration in sctp_retransmit_mark()
497 q->asoc->peer.rwnd += sctp_data_size(chunk); in sctp_retransmit_mark()
498 q->outstanding_bytes -= sctp_data_size(chunk); in sctp_retransmit_mark()
499 if (chunk->transport) in sctp_retransmit_mark()
500 transport->flight_size -= sctp_data_size(chunk); in sctp_retransmit_mark()
502 /* sctpimpguide-05 Section 2.8.2 in sctp_retransmit_mark()
503 * M5) If a T3-rtx timer expires, the in sctp_retransmit_mark()
507 chunk->tsn_missing_report = 0; in sctp_retransmit_mark()
515 if (chunk->rtt_in_progress) { in sctp_retransmit_mark()
516 chunk->rtt_in_progress = 0; in sctp_retransmit_mark()
517 transport->rto_pending = 0; in sctp_retransmit_mark()
524 sctp_insert_list(&q->retransmit, lchunk); in sctp_retransmit_mark()
530 transport->cwnd, transport->ssthresh, transport->flight_size, in sctp_retransmit_mark()
531 transport->partial_bytes_acked); in sctp_retransmit_mark()
540 struct net *net = q->asoc->base.net; in sctp_retransmit()
546 /* Update the retran path if the T3-rtx timer has expired for in sctp_retransmit()
549 if (transport == transport->asoc->peer.retran_path) in sctp_retransmit()
550 sctp_assoc_update_retran_path(transport->asoc); in sctp_retransmit()
551 transport->asoc->rtx_data_chunks += in sctp_retransmit()
552 transport->asoc->unack_data; in sctp_retransmit()
553 if (transport->pl.state == SCTP_PL_COMPLETE && in sctp_retransmit()
554 transport->asoc->unack_data) in sctp_retransmit()
560 q->fast_rtx = 1; in sctp_retransmit()
567 transport->asoc->init_retries++; in sctp_retransmit()
575 /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination, in sctp_retransmit()
576 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by in sctp_retransmit()
577 * following the procedures outlined in C1 - C5. in sctp_retransmit()
580 q->asoc->stream.si->generate_ftsn(q, q->asoc->ctsn_ack_point); in sctp_retransmit()
594 * We assume that pkt->transport has already been set.
601 struct sctp_transport *transport = pkt->transport; in __sctp_outq_flush_rtx()
610 lqueue = &q->retransmit; in __sctp_outq_flush_rtx()
611 fast_rtx = q->fast_rtx; in __sctp_outq_flush_rtx()
613 /* This loop handles time-out retransmissions, fast retransmissions, in __sctp_outq_flush_rtx()
616 * RFC 2960 6.3.3 Handle T3-rtx Expiration in __sctp_outq_flush_rtx()
620 * T3-rtx has expired will fit into a single packet, subject in __sctp_outq_flush_rtx()
639 list_del_init(&chunk->transmitted_list); in __sctp_outq_flush_rtx()
640 sctp_insert_list(&q->abandoned, in __sctp_outq_flush_rtx()
641 &chunk->transmitted_list); in __sctp_outq_flush_rtx()
650 if (chunk->tsn_gap_acked) { in __sctp_outq_flush_rtx()
651 list_move_tail(&chunk->transmitted_list, in __sctp_outq_flush_rtx()
652 &transport->transmitted); in __sctp_outq_flush_rtx()
656 /* If we are doing fast retransmit, ignore non-fast_rtransmit in __sctp_outq_flush_rtx()
659 if (fast_rtx && !chunk->fast_retransmit) in __sctp_outq_flush_rtx()
668 if (!pkt->has_data && !pkt->has_cookie_echo) { in __sctp_outq_flush_rtx()
716 list_move_tail(&chunk->transmitted_list, in __sctp_outq_flush_rtx()
717 &transport->transmitted); in __sctp_outq_flush_rtx()
722 if (chunk->fast_retransmit == SCTP_NEED_FRTX) in __sctp_outq_flush_rtx()
723 chunk->fast_retransmit = SCTP_DONT_FRTX; in __sctp_outq_flush_rtx()
725 q->asoc->stats.rtxchunks++; in __sctp_outq_flush_rtx()
744 if (chunk1->fast_retransmit == SCTP_NEED_FRTX) in __sctp_outq_flush_rtx()
745 chunk1->fast_retransmit = SCTP_DONT_FRTX; in __sctp_outq_flush_rtx()
753 q->fast_rtx = 0; in __sctp_outq_flush_rtx()
761 if (q->cork) in sctp_outq_uncork()
762 q->cork = 0; in sctp_outq_uncork()
770 const struct sctp_association *asoc = transport->asoc; in sctp_packet_singleton()
771 const __u16 sport = asoc->base.bind_addr.port; in sctp_packet_singleton()
772 const __u16 dport = asoc->peer.port; in sctp_packet_singleton()
773 const __u32 vtag = asoc->peer.i.init_tag; in sctp_packet_singleton()
779 list_del_init(&chunk->list); in sctp_packet_singleton()
781 return -ENOMEM; in sctp_packet_singleton()
803 struct sctp_transport *new_transport = chunk->transport; in sctp_outq_select_transport()
816 if (ctx->transport && sctp_cmp_addr_exact(&chunk->dest, in sctp_outq_select_transport()
817 &ctx->transport->ipaddr)) in sctp_outq_select_transport()
818 new_transport = ctx->transport; in sctp_outq_select_transport()
820 new_transport = sctp_assoc_lookup_paddr(ctx->asoc, in sctp_outq_select_transport()
821 &chunk->dest); in sctp_outq_select_transport()
828 new_transport = ctx->asoc->peer.active_path; in sctp_outq_select_transport()
832 switch (new_transport->state) { in sctp_outq_select_transport()
836 /* If the chunk is Heartbeat or Heartbeat Ack, in sctp_outq_select_transport()
837 * send it to chunk->transport, even if it's in sctp_outq_select_transport()
842 * A HEARTBEAT ACK is always sent to the source IP in sctp_outq_select_transport()
844 * HEARTBEAT chunk to which this ack is responding. in sctp_outq_select_transport()
849 type = chunk->chunk_hdr->type; in sctp_outq_select_transport()
853 new_transport = ctx->asoc->peer.active_path; in sctp_outq_select_transport()
861 if (new_transport != ctx->transport) { in sctp_outq_select_transport()
862 ctx->transport = new_transport; in sctp_outq_select_transport()
863 ctx->packet = &ctx->transport->packet; in sctp_outq_select_transport()
865 if (list_empty(&ctx->transport->send_ready)) in sctp_outq_select_transport()
866 list_add_tail(&ctx->transport->send_ready, in sctp_outq_select_transport()
867 &ctx->transport_list); in sctp_outq_select_transport()
869 sctp_packet_config(ctx->packet, in sctp_outq_select_transport()
870 ctx->asoc->peer.i.init_tag, in sctp_outq_select_transport()
871 ctx->asoc->peer.ecn_capable); in sctp_outq_select_transport()
875 sctp_transport_burst_limited(ctx->transport); in sctp_outq_select_transport()
885 list_for_each_entry_safe(chunk, tmp, &ctx->q->control_chunk_list, list) { in sctp_outq_flush_ctrl()
894 if (ctx->asoc->src_out_of_asoc_ok && in sctp_outq_flush_ctrl()
895 chunk->chunk_hdr->type != SCTP_CID_ASCONF) in sctp_outq_flush_ctrl()
898 list_del_init(&chunk->list); in sctp_outq_flush_ctrl()
905 switch (chunk->chunk_hdr->type) { in sctp_outq_flush_ctrl()
908 * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN in sctp_outq_flush_ctrl()
914 error = sctp_packet_singleton(ctx->transport, chunk, in sctp_outq_flush_ctrl()
915 ctx->gfp); in sctp_outq_flush_ctrl()
917 ctx->asoc->base.sk->sk_err = -error; in sctp_outq_flush_ctrl()
920 ctx->asoc->stats.octrlchunks++; in sctp_outq_flush_ctrl()
925 ctx->packet->vtag = ctx->asoc->c.my_vtag; in sctp_outq_flush_ctrl()
944 if (chunk->pmtu_probe) { in sctp_outq_flush_ctrl()
945 error = sctp_packet_singleton(ctx->transport, in sctp_outq_flush_ctrl()
946 chunk, ctx->gfp); in sctp_outq_flush_ctrl()
948 ctx->asoc->stats.octrlchunks++; in sctp_outq_flush_ctrl()
959 status = sctp_packet_transmit_chunk(ctx->packet, chunk, in sctp_outq_flush_ctrl()
960 one_packet, ctx->gfp); in sctp_outq_flush_ctrl()
963 list_add(&chunk->list, &ctx->q->control_chunk_list); in sctp_outq_flush_ctrl()
967 ctx->asoc->stats.octrlchunks++; in sctp_outq_flush_ctrl()
968 /* PR-SCTP C5) If a FORWARD TSN is sent, the in sctp_outq_flush_ctrl()
969 * sender MUST assure that at least one T3-rtx in sctp_outq_flush_ctrl()
972 if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN || in sctp_outq_flush_ctrl()
973 chunk->chunk_hdr->type == SCTP_CID_I_FWD_TSN) { in sctp_outq_flush_ctrl()
974 sctp_transport_reset_t3_rtx(ctx->transport); in sctp_outq_flush_ctrl()
975 ctx->transport->last_time_sent = jiffies; in sctp_outq_flush_ctrl()
978 if (chunk == ctx->asoc->strreset_chunk) in sctp_outq_flush_ctrl()
979 sctp_transport_reset_reconf_timer(ctx->transport); in sctp_outq_flush_ctrl()
996 if (ctx->asoc->peer.retran_path->state == SCTP_UNCONFIRMED) in sctp_outq_flush_rtx()
999 if (ctx->transport != ctx->asoc->peer.retran_path) { in sctp_outq_flush_rtx()
1001 ctx->transport = ctx->asoc->peer.retran_path; in sctp_outq_flush_rtx()
1002 ctx->packet = &ctx->transport->packet; in sctp_outq_flush_rtx()
1004 if (list_empty(&ctx->transport->send_ready)) in sctp_outq_flush_rtx()
1005 list_add_tail(&ctx->transport->send_ready, in sctp_outq_flush_rtx()
1006 &ctx->transport_list); in sctp_outq_flush_rtx()
1008 sctp_packet_config(ctx->packet, ctx->asoc->peer.i.init_tag, in sctp_outq_flush_rtx()
1009 ctx->asoc->peer.ecn_capable); in sctp_outq_flush_rtx()
1012 error = __sctp_outq_flush_rtx(ctx->q, ctx->packet, rtx_timeout, in sctp_outq_flush_rtx()
1013 &start_timer, ctx->gfp); in sctp_outq_flush_rtx()
1015 ctx->asoc->base.sk->sk_err = -error; in sctp_outq_flush_rtx()
1018 sctp_transport_reset_t3_rtx(ctx->transport); in sctp_outq_flush_rtx()
1019 ctx->transport->last_time_sent = jiffies; in sctp_outq_flush_rtx()
1022 /* This can happen on COOKIE-ECHO resend. Only in sctp_outq_flush_rtx()
1023 * one chunk can get bundled with a COOKIE-ECHO. in sctp_outq_flush_rtx()
1025 if (ctx->packet->has_cookie_echo) in sctp_outq_flush_rtx()
1031 if (!list_empty(&ctx->q->retransmit)) in sctp_outq_flush_rtx()
1044 switch (ctx->asoc->state) { in sctp_outq_flush_data()
1046 /* Only allow bundling when this packet has a COOKIE-ECHO in sctp_outq_flush_data()
1049 if (!ctx->packet || !ctx->packet->has_cookie_echo) in sctp_outq_flush_data()
1071 if (!list_empty(&ctx->q->retransmit) && in sctp_outq_flush_data()
1080 if (ctx->transport) in sctp_outq_flush_data()
1081 sctp_transport_burst_limited(ctx->transport); in sctp_outq_flush_data()
1084 while ((chunk = sctp_outq_dequeue_data(ctx->q)) != NULL) { in sctp_outq_flush_data()
1085 __u32 sid = ntohs(chunk->subh.data_hdr->stream); in sctp_outq_flush_data()
1086 __u8 stream_state = SCTP_SO(&ctx->asoc->stream, sid)->state; in sctp_outq_flush_data()
1090 sctp_sched_dequeue_done(ctx->q, chunk); in sctp_outq_flush_data()
1097 sctp_outq_head_data(ctx->q, chunk); in sctp_outq_flush_data()
1103 pr_debug("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p skb->users:%d\n", in sctp_outq_flush_data()
1104 __func__, ctx->q, chunk, chunk && chunk->chunk_hdr ? in sctp_outq_flush_data()
1105 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) : in sctp_outq_flush_data()
1106 "illegal chunk", ntohl(chunk->subh.data_hdr->tsn), in sctp_outq_flush_data()
1107 chunk->skb ? chunk->skb->head : NULL, chunk->skb ? in sctp_outq_flush_data()
1108 refcount_read(&chunk->skb->users) : -1); in sctp_outq_flush_data()
1111 status = sctp_packet_transmit_chunk(ctx->packet, chunk, 0, in sctp_outq_flush_data()
1112 ctx->gfp); in sctp_outq_flush_data()
1118 __func__, ntohl(chunk->subh.data_hdr->tsn), in sctp_outq_flush_data()
1121 sctp_outq_head_data(ctx->q, chunk); in sctp_outq_flush_data()
1125 /* The sender is in the SHUTDOWN-PENDING state, in sctp_outq_flush_data()
1126 * The sender MAY set the I-bit in the DATA in sctp_outq_flush_data()
1129 if (ctx->asoc->state == SCTP_STATE_SHUTDOWN_PENDING) in sctp_outq_flush_data()
1130 chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM; in sctp_outq_flush_data()
1131 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) in sctp_outq_flush_data()
1132 ctx->asoc->stats.ouodchunks++; in sctp_outq_flush_data()
1134 ctx->asoc->stats.oodchunks++; in sctp_outq_flush_data()
1137 * chunk as sent, sched-wise. in sctp_outq_flush_data()
1139 sctp_sched_dequeue_done(ctx->q, chunk); in sctp_outq_flush_data()
1141 list_add_tail(&chunk->transmitted_list, in sctp_outq_flush_data()
1142 &ctx->transport->transmitted); in sctp_outq_flush_data()
1144 sctp_transport_reset_t3_rtx(ctx->transport); in sctp_outq_flush_data()
1145 ctx->transport->last_time_sent = jiffies; in sctp_outq_flush_data()
1148 * COOKIE-ECHO chunk. in sctp_outq_flush_data()
1150 if (ctx->packet->has_cookie_echo) in sctp_outq_flush_data()
1157 struct sock *sk = ctx->asoc->base.sk; in sctp_outq_flush_transports()
1163 while ((ltransport = sctp_list_dequeue(&ctx->transport_list)) != NULL) { in sctp_outq_flush_transports()
1165 packet = &t->packet; in sctp_outq_flush_transports()
1168 if (t->dst && __sk_dst_get(sk) != t->dst) { in sctp_outq_flush_transports()
1169 dst_hold(t->dst); in sctp_outq_flush_transports()
1170 sk_setup_caps(sk, t->dst); in sctp_outq_flush_transports()
1173 error = sctp_packet_transmit(packet, ctx->gfp); in sctp_outq_flush_transports()
1175 ctx->q->asoc->base.sk->sk_err = -error; in sctp_outq_flush_transports()
1198 .asoc = q->asoc, in sctp_outq_flush()
1214 if (q->asoc->src_out_of_asoc_ok) in sctp_outq_flush()
1232 unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1; in sctp_sack_update_unack_data()
1235 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) { in sctp_sack_update_unack_data()
1236 unack_data -= ((ntohs(frags[i].gab.end) - in sctp_sack_update_unack_data()
1240 assoc->unack_data = unack_data; in sctp_sack_update_unack_data()
1250 struct sctp_association *asoc = q->asoc; in sctp_outq_sack()
1251 struct sctp_sackhdr *sack = chunk->subh.sack_hdr; in sctp_outq_sack()
1259 struct sctp_transport *primary = asoc->peer.primary_path; in sctp_outq_sack()
1265 transport_list = &asoc->peer.transport_addr_list; in sctp_outq_sack()
1273 sack_ctsn = ntohl(sack->cum_tsn_ack); in sctp_outq_sack()
1274 gap_ack_blocks = ntohs(sack->num_gap_ack_blocks); in sctp_outq_sack()
1275 asoc->stats.gapcnt += gap_ack_blocks; in sctp_outq_sack()
1277 * SFR-CACC algorithm: in sctp_outq_sack()
1281 * 1) If the cumulative ack in the SACK passes next tsn_at_change in sctp_outq_sack()
1294 if (primary->cacc.changeover_active) { in sctp_outq_sack()
1297 if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) { in sctp_outq_sack()
1298 primary->cacc.changeover_active = 0; in sctp_outq_sack()
1306 transport->cacc.cycling_changeover = 0; in sctp_outq_sack()
1308 transport->cacc.cacc_saw_newack = 0; in sctp_outq_sack()
1319 highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end); in sctp_outq_sack()
1322 if (TSN_lt(asoc->highest_sacked, highest_tsn)) in sctp_outq_sack()
1323 asoc->highest_sacked = highest_tsn; in sctp_outq_sack()
1330 sctp_check_transmitted(q, &q->retransmit, NULL, NULL, sack, &highest_new_tsn); in sctp_outq_sack()
1338 sctp_check_transmitted(q, &transport->transmitted, in sctp_outq_sack()
1339 transport, &chunk->source, sack, in sctp_outq_sack()
1342 * SFR-CACC algorithm: in sctp_outq_sack()
1346 if (transport->cacc.cacc_saw_newack) in sctp_outq_sack()
1350 /* Move the Cumulative TSN Ack Point if appropriate. */ in sctp_outq_sack()
1351 if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) { in sctp_outq_sack()
1352 asoc->ctsn_ack_point = sack_ctsn; in sctp_outq_sack()
1358 if (asoc->fast_recovery && accum_moved) in sctp_outq_sack()
1362 sctp_mark_missing(q, &transport->transmitted, transport, in sctp_outq_sack()
1369 ctsn = asoc->ctsn_ack_point; in sctp_outq_sack()
1372 list_for_each_safe(lchunk, temp, &q->sacked) { in sctp_outq_sack()
1375 tsn = ntohl(tchunk->subh.data_hdr->tsn); in sctp_outq_sack()
1377 list_del_init(&tchunk->transmitted_list); in sctp_outq_sack()
1378 if (asoc->peer.prsctp_capable && in sctp_outq_sack()
1379 SCTP_PR_PRIO_ENABLED(chunk->sinfo.sinfo_flags)) in sctp_outq_sack()
1380 asoc->sent_cnt_removable--; in sctp_outq_sack()
1387 * Cumulative TSN Ack and the Gap Ack Blocks. in sctp_outq_sack()
1390 sack_a_rwnd = ntohl(sack->a_rwnd); in sctp_outq_sack()
1391 asoc->peer.zero_window_announced = !sack_a_rwnd; in sctp_outq_sack()
1392 outstanding = q->outstanding_bytes; in sctp_outq_sack()
1395 sack_a_rwnd -= outstanding; in sctp_outq_sack()
1399 asoc->peer.rwnd = sack_a_rwnd; in sctp_outq_sack()
1401 asoc->stream.si->generate_ftsn(q, sack_ctsn); in sctp_outq_sack()
1403 pr_debug("%s: sack cumulative tsn ack:0x%x\n", __func__, sack_ctsn); in sctp_outq_sack()
1404 pr_debug("%s: cumulative tsn ack of assoc:%p is 0x%x, " in sctp_outq_sack()
1405 "advertised peer ack point:0x%x\n", __func__, asoc, ctsn, in sctp_outq_sack()
1406 asoc->adv_peer_ack_point); in sctp_outq_sack()
1412 * The queue is empty when we have not pending data, no in-flight data
1417 return q->out_qlen == 0 && q->outstanding_bytes == 0 && in sctp_outq_is_empty()
1418 list_empty(&q->retransmit); in sctp_outq_is_empty()
1426 * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked.
1429 * I added coherent debug information output. --xguo
1432 * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
1433 * KEPT TSN6-TSN7, etc.
1453 sack_ctsn = ntohl(sack->cum_tsn_ack); in sctp_check_transmitted()
1464 sctp_insert_list(&q->abandoned, lchunk); in sctp_check_transmitted()
1469 if (transmitted_queue != &q->retransmit && in sctp_check_transmitted()
1470 !tchunk->tsn_gap_acked) { in sctp_check_transmitted()
1471 if (tchunk->transport) in sctp_check_transmitted()
1472 tchunk->transport->flight_size -= in sctp_check_transmitted()
1474 q->outstanding_bytes -= sctp_data_size(tchunk); in sctp_check_transmitted()
1479 tsn = ntohl(tchunk->subh.data_hdr->tsn); in sctp_check_transmitted()
1486 if (transport && !tchunk->tsn_gap_acked) { in sctp_check_transmitted()
1499 tchunk->rtt_in_progress) { in sctp_check_transmitted()
1500 tchunk->rtt_in_progress = 0; in sctp_check_transmitted()
1501 rtt = jiffies - tchunk->sent_at; in sctp_check_transmitted()
1508 * SFR-CACC algorithm: in sctp_check_transmitted()
1520 if (sack->num_gap_ack_blocks && in sctp_check_transmitted()
1521 q->asoc->peer.primary_path->cacc. in sctp_check_transmitted()
1523 transport->cacc.cacc_saw_newack in sctp_check_transmitted()
1534 if (!tchunk->tsn_gap_acked) { in sctp_check_transmitted()
1535 tchunk->tsn_gap_acked = 1; in sctp_check_transmitted()
1539 if (!tchunk->transport) in sctp_check_transmitted()
1550 * for that address, restart T3-rtx in sctp_check_transmitted()
1557 list_add_tail(&tchunk->transmitted_list, in sctp_check_transmitted()
1558 &q->sacked); in sctp_check_transmitted()
1560 /* RFC2960 7.2.4, sctpimpguide-05 2.8.2 in sctp_check_transmitted()
1578 if (tchunk->tsn_gap_acked) { in sctp_check_transmitted()
1582 tchunk->tsn_gap_acked = 0; in sctp_check_transmitted()
1584 if (tchunk->transport) in sctp_check_transmitted()
1585 bytes_acked -= sctp_data_size(tchunk); in sctp_check_transmitted()
1591 * Gap Ack Block, start T3-rtx for the in sctp_check_transmitted()
1605 struct sctp_association *asoc = transport->asoc; in sctp_check_transmitted()
1608 * to this transport due to DEL-IP operation. in sctp_check_transmitted()
1613 bytes_acked -= migrate_bytes; in sctp_check_transmitted()
1622 transport->error_count = 0; in sctp_check_transmitted()
1623 transport->asoc->overall_error_count = 0; in sctp_check_transmitted()
1627 * While in SHUTDOWN PENDING, we may have started in sctp_check_transmitted()
1628 * the T5 shutdown guard timer after reaching the in sctp_check_transmitted()
1632 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING && in sctp_check_transmitted()
1633 del_timer(&asoc->timers in sctp_check_transmitted()
1640 if ((transport->state == SCTP_INACTIVE || in sctp_check_transmitted()
1641 transport->state == SCTP_UNCONFIRMED) && in sctp_check_transmitted()
1642 sctp_cmp_addr_exact(&transport->ipaddr, saddr)) { in sctp_check_transmitted()
1644 transport->asoc, in sctp_check_transmitted()
1653 transport->flight_size -= bytes_acked; in sctp_check_transmitted()
1654 if (transport->flight_size == 0) in sctp_check_transmitted()
1655 transport->partial_bytes_acked = 0; in sctp_check_transmitted()
1656 q->outstanding_bytes -= bytes_acked + migrate_bytes; in sctp_check_transmitted()
1658 /* RFC 2960 6.1, sctpimpguide-06 2.15.2 in sctp_check_transmitted()
1668 * Allow the association to timeout while in SHUTDOWN in sctp_check_transmitted()
1669 * PENDING or SHUTDOWN RECEIVED in case the receiver in sctp_check_transmitted()
1672 if (!q->asoc->peer.rwnd && in sctp_check_transmitted()
1674 (sack_ctsn+2 == q->asoc->next_tsn) && in sctp_check_transmitted()
1675 q->asoc->state < SCTP_STATE_SHUTDOWN_PENDING) { in sctp_check_transmitted()
1679 q->asoc->overall_error_count = 0; in sctp_check_transmitted()
1680 transport->error_count = 0; in sctp_check_transmitted()
1687 * been acknowledged, turn off the T3-rtx timer of that in sctp_check_transmitted()
1690 if (!transport->flight_size) { in sctp_check_transmitted()
1691 if (del_timer(&transport->T3_rtx_timer)) in sctp_check_transmitted()
1694 if (!mod_timer(&transport->T3_rtx_timer, in sctp_check_transmitted()
1695 jiffies + transport->rto)) in sctp_check_transmitted()
1700 if (transport->dst) in sctp_check_transmitted()
1718 struct sctp_association *asoc = q->asoc; in sctp_mark_missing()
1719 struct sctp_transport *primary = asoc->peer.primary_path; in sctp_mark_missing()
1723 tsn = ntohl(chunk->subh.data_hdr->tsn); in sctp_mark_missing()
1725 /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all in sctp_mark_missing()
1732 if (chunk->fast_retransmit == SCTP_CAN_FRTX && in sctp_mark_missing()
1733 !chunk->tsn_gap_acked && in sctp_mark_missing()
1736 /* SFR-CACC may require us to skip marking in sctp_mark_missing()
1740 chunk->transport, in sctp_mark_missing()
1742 chunk->tsn_missing_report++; in sctp_mark_missing()
1745 __func__, tsn, chunk->tsn_missing_report); in sctp_mark_missing()
1755 if (chunk->tsn_missing_report >= 3) { in sctp_mark_missing()
1756 chunk->fast_retransmit = SCTP_NEED_FRTX; in sctp_mark_missing()
1767 transport->cwnd, transport->ssthresh, in sctp_mark_missing()
1768 transport->flight_size, transport->partial_bytes_acked); in sctp_mark_missing()
1775 __u32 ctsn = ntohl(sack->cum_tsn_ack); in sctp_acked()
1785 * Gap Ack Blocks: in sctp_acked()
1786 * These fields contain the Gap Ack Blocks. They are repeated in sctp_acked()
1787 * for each Gap Ack Block up to the number of Gap Ack Blocks in sctp_acked()
1788 * defined in the Number of Gap Ack Blocks field. All DATA in sctp_acked()
1790 * Ack + Gap Ack Block Start) and less than or equal to in sctp_acked()
1791 * (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack in sctp_acked()
1796 blocks = ntohs(sack->num_gap_ack_blocks); in sctp_acked()
1797 tsn_offset = tsn - ctsn; in sctp_acked()
1824 struct sctp_association *asoc = q->asoc; in sctp_generate_fwdtsn()
1833 if (!asoc->peer.prsctp_capable) in sctp_generate_fwdtsn()
1836 /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the in sctp_generate_fwdtsn()
1839 * If (Advanced.Peer.Ack.Point < SackCumAck), then update in sctp_generate_fwdtsn()
1840 * Advanced.Peer.Ack.Point to be equal to SackCumAck. in sctp_generate_fwdtsn()
1842 if (TSN_lt(asoc->adv_peer_ack_point, ctsn)) in sctp_generate_fwdtsn()
1843 asoc->adv_peer_ack_point = ctsn; in sctp_generate_fwdtsn()
1845 /* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point" in sctp_generate_fwdtsn()
1846 * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as in sctp_generate_fwdtsn()
1847 * the chunk next in the out-queue space is marked as "abandoned" as in sctp_generate_fwdtsn()
1850 * Assuming that a SACK arrived with the Cumulative TSN ACK 102 in sctp_generate_fwdtsn()
1851 * and the Advanced.Peer.Ack.Point is updated to this value: in sctp_generate_fwdtsn()
1853 * out-queue at the end of ==> out-queue after Adv.Ack.Point in sctp_generate_fwdtsn()
1856 * Adv.Ack.Pt-> 102 acked 102 acked in sctp_generate_fwdtsn()
1858 * 104 abandoned Adv.Ack.P-> 104 abandoned in sctp_generate_fwdtsn()
1864 * "Advanced.Peer.Ack.Point" from 102 to 104 locally. in sctp_generate_fwdtsn()
1866 list_for_each_safe(lchunk, temp, &q->abandoned) { in sctp_generate_fwdtsn()
1869 tsn = ntohl(chunk->subh.data_hdr->tsn); in sctp_generate_fwdtsn()
1878 if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) { in sctp_generate_fwdtsn()
1879 asoc->adv_peer_ack_point = tsn; in sctp_generate_fwdtsn()
1880 if (chunk->chunk_hdr->flags & in sctp_generate_fwdtsn()
1885 chunk->subh.data_hdr->stream); in sctp_generate_fwdtsn()
1887 chunk->subh.data_hdr->stream; in sctp_generate_fwdtsn()
1889 chunk->subh.data_hdr->ssn; in sctp_generate_fwdtsn()
1899 /* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point" in sctp_generate_fwdtsn()
1900 * is greater than the Cumulative TSN ACK carried in the received in sctp_generate_fwdtsn()
1903 * "Advanced.Peer.Ack.Point". in sctp_generate_fwdtsn()
1914 * Advanced.Peer.Ack.Point to the last TSN that will fit in a in sctp_generate_fwdtsn()
1917 if (asoc->adv_peer_ack_point > ctsn) in sctp_generate_fwdtsn()
1918 ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point, in sctp_generate_fwdtsn()
1922 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list); in sctp_generate_fwdtsn()
1923 SCTP_INC_STATS(asoc->base.net, SCTP_MIB_OUTCTRLCHUNKS); in sctp_generate_fwdtsn()