Lines Matching +full:settling +full:- +full:time +full:- +full:us

1 // SPDX-License-Identifier: GPL-2.0
9 if (!tp->reord_seen) { in tcp_rack_reo_wnd()
13 if (inet_csk(sk)->icsk_ca_state >= TCP_CA_Recovery) in tcp_rack_reo_wnd()
16 if (tp->sacked_out >= tp->reordering && in tcp_rack_reo_wnd()
17 !(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) & in tcp_rack_reo_wnd()
22 /* To be more reordering resilient, allow min_rtt/4 settling delay. in tcp_rack_reo_wnd()
28 return min((tcp_min_rtt(tp) >> 2) * tp->rack.reo_wnd_steps, in tcp_rack_reo_wnd()
29 tp->srtt_us >> 3); in tcp_rack_reo_wnd()
34 return tp->rack.rtt_us + reo_wnd - in tcp_rack_skb_timeout()
35 tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(skb)); in tcp_rack_skb_timeout()
38 /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
46 * RACK: sent time delta to the latest delivered packet (time domain)
51 * "settling delay", instead of tweaking the dupthresh.
56 * make us enter the CA_Recovery state.
66 list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue, in tcp_rack_detect_loss()
72 if ((scb->sacked & TCPCB_LOST) && in tcp_rack_detect_loss()
73 !(scb->sacked & TCPCB_SACKED_RETRANS)) in tcp_rack_detect_loss()
76 if (!tcp_skb_sent_after(tp->rack.mstamp, in tcp_rack_detect_loss()
78 tp->rack.end_seq, scb->end_seq)) in tcp_rack_detect_loss()
87 list_del_init(&skb->tcp_tsorted_anchor); in tcp_rack_detect_loss()
89 /* Record maximum wait time */ in tcp_rack_detect_loss()
100 if (!tp->rack.advanced) in tcp_rack_mark_lost()
104 tp->rack.advanced = 0; in tcp_rack_mark_lost()
109 timeout, inet_csk(sk)->icsk_rto); in tcp_rack_mark_lost()
114 /* Record the most recently (re)sent time among the (s)acked packets
116 * draft-cheng-tcpm-rack-00.txt
123 rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time); in tcp_rack_advance()
137 tp->rack.advanced = 1; in tcp_rack_advance()
138 tp->rack.rtt_us = rtt_us; in tcp_rack_advance()
139 if (tcp_skb_sent_after(xmit_time, tp->rack.mstamp, in tcp_rack_advance()
140 end_seq, tp->rack.end_seq)) { in tcp_rack_advance()
141 tp->rack.mstamp = xmit_time; in tcp_rack_advance()
142 tp->rack.end_seq = end_seq; in tcp_rack_advance()
153 u32 lost = tp->lost; in tcp_rack_reo_timeout()
158 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) { in tcp_rack_reo_timeout()
160 if (!inet_csk(sk)->icsk_ca_ops->cong_control) in tcp_rack_reo_timeout()
161 tcp_cwnd_reduction(sk, 1, tp->lost - lost, 0); in tcp_rack_reo_timeout()
165 if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS) in tcp_rack_reo_timeout()
177 * no. of successful recoveries (accounts for full DSACK-based loss
182 * after the reo_wnd has been updated last time.
191 if ((READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) & in tcp_rack_update_reo_wnd()
193 !rs->prior_delivered) in tcp_rack_update_reo_wnd()
197 if (before(rs->prior_delivered, tp->rack.last_delivered)) in tcp_rack_update_reo_wnd()
198 tp->rack.dsack_seen = 0; in tcp_rack_update_reo_wnd()
201 if (tp->rack.dsack_seen) { in tcp_rack_update_reo_wnd()
202 tp->rack.reo_wnd_steps = min_t(u32, 0xFF, in tcp_rack_update_reo_wnd()
203 tp->rack.reo_wnd_steps + 1); in tcp_rack_update_reo_wnd()
204 tp->rack.dsack_seen = 0; in tcp_rack_update_reo_wnd()
205 tp->rack.last_delivered = tp->delivered; in tcp_rack_update_reo_wnd()
206 tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH; in tcp_rack_update_reo_wnd()
207 } else if (!tp->rack.reo_wnd_persist) { in tcp_rack_update_reo_wnd()
208 tp->rack.reo_wnd_steps = 1; in tcp_rack_update_reo_wnd()
212 /* RFC6582 NewReno recovery for non-SACK connection. It simply retransmits
219 const u8 state = inet_csk(sk)->icsk_ca_state; in tcp_newreno_mark_lost()
222 if ((state < TCP_CA_Recovery && tp->sacked_out >= tp->reordering) || in tcp_newreno_mark_lost()
227 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) in tcp_newreno_mark_lost()
231 if (tcp_skb_pcount(skb) > 1 && skb->len > mss) in tcp_newreno_mark_lost()