Lines Matching refs:tp
334 static void tcp_ecn_queue_cwr(struct tcp_sock *tp) in tcp_ecn_queue_cwr() argument
336 if (tp->ecn_flags & TCP_ECN_OK) in tcp_ecn_queue_cwr()
337 tp->ecn_flags |= TCP_ECN_QUEUE_CWR; in tcp_ecn_queue_cwr()
354 static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) in tcp_ecn_withdraw_cwr() argument
356 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; in tcp_ecn_withdraw_cwr()
361 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ecn_check_ce() local
369 if (tp->ecn_flags & TCP_ECN_SEEN) in __tcp_ecn_check_ce()
376 if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { in __tcp_ecn_check_ce()
379 tp->ecn_flags |= TCP_ECN_DEMAND_CWR; in __tcp_ecn_check_ce()
381 tp->ecn_flags |= TCP_ECN_SEEN; in __tcp_ecn_check_ce()
386 tp->ecn_flags |= TCP_ECN_SEEN; in __tcp_ecn_check_ce()
397 static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_synack() argument
399 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) in tcp_ecn_rcv_synack()
400 tp->ecn_flags &= ~TCP_ECN_OK; in tcp_ecn_rcv_synack()
403 static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_syn() argument
405 if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) in tcp_ecn_rcv_syn()
406 tp->ecn_flags &= ~TCP_ECN_OK; in tcp_ecn_rcv_syn()
409 static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) in tcp_ecn_rcv_ecn_echo() argument
411 if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) in tcp_ecn_rcv_ecn_echo()
423 const struct tcp_sock *tp = tcp_sk(sk); in tcp_sndbuf_expand() local
431 per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + in tcp_sndbuf_expand()
438 nr_segs = max_t(u32, TCP_INIT_CWND, tcp_snd_cwnd(tp)); in tcp_sndbuf_expand()
439 nr_segs = max_t(u32, nr_segs, tp->reordering + 1); in tcp_sndbuf_expand()
482 const struct tcp_sock *tp = tcp_sk(sk); in __tcp_grow_window() local
487 while (tp->rcv_ssthresh <= window) { in __tcp_grow_window()
519 struct tcp_sock *tp = tcp_sk(sk); in tcp_grow_window() local
522 room = min_t(int, tp->window_clamp, tcp_space(sk)) - tp->rcv_ssthresh; in tcp_grow_window()
536 incr = 2 * tp->advmss; in tcp_grow_window()
542 tp->rcv_ssthresh += min(room, incr); in tcp_grow_window()
559 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_buffer_space() local
565 tcp_mstamp_refresh(tp); in tcp_init_buffer_space()
566 tp->rcvq_space.time = tp->tcp_mstamp; in tcp_init_buffer_space()
567 tp->rcvq_space.seq = tp->copied_seq; in tcp_init_buffer_space()
571 if (tp->window_clamp >= maxwin) { in tcp_init_buffer_space()
572 WRITE_ONCE(tp->window_clamp, maxwin); in tcp_init_buffer_space()
574 if (tcp_app_win && maxwin > 4 * tp->advmss) in tcp_init_buffer_space()
575 WRITE_ONCE(tp->window_clamp, in tcp_init_buffer_space()
577 4 * tp->advmss)); in tcp_init_buffer_space()
582 tp->window_clamp > 2 * tp->advmss && in tcp_init_buffer_space()
583 tp->window_clamp + tp->advmss > maxwin) in tcp_init_buffer_space()
584 WRITE_ONCE(tp->window_clamp, in tcp_init_buffer_space()
585 max(2 * tp->advmss, maxwin - tp->advmss)); in tcp_init_buffer_space()
587 tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); in tcp_init_buffer_space()
588 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_init_buffer_space()
589 tp->rcvq_space.space = min3(tp->rcv_ssthresh, tp->rcv_wnd, in tcp_init_buffer_space()
590 (u32)TCP_INIT_CWND * tp->advmss); in tcp_init_buffer_space()
596 struct tcp_sock *tp = tcp_sk(sk); in tcp_clamp_window() local
612 tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); in tcp_clamp_window()
624 const struct tcp_sock *tp = tcp_sk(sk); in tcp_initialize_rcv_mss() local
625 unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); in tcp_initialize_rcv_mss()
627 hint = min(hint, tp->rcv_wnd / 2); in tcp_initialize_rcv_mss()
646 static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) in tcp_rcv_rtt_update() argument
648 u32 new_sample = tp->rcv_rtt_est.rtt_us; in tcp_rcv_rtt_update()
675 tp->rcv_rtt_est.rtt_us = new_sample; in tcp_rcv_rtt_update()
678 static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) in tcp_rcv_rtt_measure() argument
682 if (tp->rcv_rtt_est.time == 0) in tcp_rcv_rtt_measure()
684 if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) in tcp_rcv_rtt_measure()
686 delta_us = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcv_rtt_est.time); in tcp_rcv_rtt_measure()
689 tcp_rcv_rtt_update(tp, delta_us, 1); in tcp_rcv_rtt_measure()
692 tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; in tcp_rcv_rtt_measure()
693 tp->rcv_rtt_est.time = tp->tcp_mstamp; in tcp_rcv_rtt_measure()
696 static s32 tcp_rtt_tsopt_us(const struct tcp_sock *tp) in tcp_rtt_tsopt_us() argument
700 delta = tcp_time_stamp_ts(tp) - tp->rx_opt.rcv_tsecr; in tcp_rtt_tsopt_us()
701 if (tp->tcp_usec_ts) in tcp_rtt_tsopt_us()
716 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_rtt_measure_ts() local
718 if (tp->rx_opt.rcv_tsecr == tp->rcv_rtt_last_tsecr) in tcp_rcv_rtt_measure_ts()
720 tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr; in tcp_rcv_rtt_measure_ts()
724 s32 delta = tcp_rtt_tsopt_us(tp); in tcp_rcv_rtt_measure_ts()
727 tcp_rcv_rtt_update(tp, delta, 0); in tcp_rcv_rtt_measure_ts()
737 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_space_adjust() local
743 tcp_mstamp_refresh(tp); in tcp_rcv_space_adjust()
744 time = tcp_stamp_us_delta(tp->tcp_mstamp, tp->rcvq_space.time); in tcp_rcv_space_adjust()
745 if (time < (tp->rcv_rtt_est.rtt_us >> 3) || tp->rcv_rtt_est.rtt_us == 0) in tcp_rcv_space_adjust()
749 copied = tp->copied_seq - tp->rcvq_space.seq; in tcp_rcv_space_adjust()
750 if (copied <= tp->rcvq_space.space) in tcp_rcv_space_adjust()
770 rcvwin = ((u64)copied << 1) + 16 * tp->advmss; in tcp_rcv_space_adjust()
773 grow = rcvwin * (copied - tp->rcvq_space.space); in tcp_rcv_space_adjust()
774 do_div(grow, tp->rcvq_space.space); in tcp_rcv_space_adjust()
783 WRITE_ONCE(tp->window_clamp, in tcp_rcv_space_adjust()
787 tp->rcvq_space.space = copied; in tcp_rcv_space_adjust()
790 tp->rcvq_space.seq = tp->copied_seq; in tcp_rcv_space_adjust()
791 tp->rcvq_space.time = tp->tcp_mstamp; in tcp_rcv_space_adjust()
816 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_data_recv() local
824 tcp_rcv_rtt_measure(tp); in tcp_event_data_recv()
871 struct tcp_sock *tp = tcp_sk(sk); in tcp_rtt_estimator() local
873 u32 srtt = tp->srtt_us; in tcp_rtt_estimator()
896 m -= (tp->mdev_us >> 2); /* similar update on mdev */ in tcp_rtt_estimator()
908 m -= (tp->mdev_us >> 2); /* similar update on mdev */ in tcp_rtt_estimator()
910 tp->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */ in tcp_rtt_estimator()
911 if (tp->mdev_us > tp->mdev_max_us) { in tcp_rtt_estimator()
912 tp->mdev_max_us = tp->mdev_us; in tcp_rtt_estimator()
913 if (tp->mdev_max_us > tp->rttvar_us) in tcp_rtt_estimator()
914 tp->rttvar_us = tp->mdev_max_us; in tcp_rtt_estimator()
916 if (after(tp->snd_una, tp->rtt_seq)) { in tcp_rtt_estimator()
917 if (tp->mdev_max_us < tp->rttvar_us) in tcp_rtt_estimator()
918 tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2; in tcp_rtt_estimator()
919 tp->rtt_seq = tp->snd_nxt; in tcp_rtt_estimator()
920 tp->mdev_max_us = tcp_rto_min_us(sk); in tcp_rtt_estimator()
927 tp->mdev_us = m << 1; /* make sure rto = 3*rtt */ in tcp_rtt_estimator()
928 tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk)); in tcp_rtt_estimator()
929 tp->mdev_max_us = tp->rttvar_us; in tcp_rtt_estimator()
930 tp->rtt_seq = tp->snd_nxt; in tcp_rtt_estimator()
934 tp->srtt_us = max(1U, srtt); in tcp_rtt_estimator()
939 const struct tcp_sock *tp = tcp_sk(sk); in tcp_update_pacing_rate() local
943 rate = (u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3); in tcp_update_pacing_rate()
953 if (tcp_snd_cwnd(tp) < tp->snd_ssthresh / 2) in tcp_update_pacing_rate()
958 rate *= max(tcp_snd_cwnd(tp), tp->packets_out); in tcp_update_pacing_rate()
960 if (likely(tp->srtt_us)) in tcp_update_pacing_rate()
961 do_div(rate, tp->srtt_us); in tcp_update_pacing_rate()
976 const struct tcp_sock *tp = tcp_sk(sk); in tcp_set_rto() local
987 inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); in tcp_set_rto()
1001 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) in tcp_init_cwnd() argument
1007 return min_t(__u32, cwnd, tp->snd_cwnd_clamp); in tcp_init_cwnd()
1030 static u32 tcp_dsack_seen(struct tcp_sock *tp, u32 start_seq, in tcp_dsack_seen() argument
1040 if (seq_len > tp->max_window) in tcp_dsack_seen()
1042 if (seq_len > tp->mss_cache) in tcp_dsack_seen()
1043 dup_segs = DIV_ROUND_UP(seq_len, tp->mss_cache); in tcp_dsack_seen()
1044 else if (tp->tlp_high_seq && tp->tlp_high_seq == end_seq) in tcp_dsack_seen()
1047 tp->dsack_dups += dup_segs; in tcp_dsack_seen()
1049 if (tp->dsack_dups > tp->total_retrans) in tcp_dsack_seen()
1052 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; in tcp_dsack_seen()
1059 if (tp->reord_seen && !(state->flag & FLAG_DSACK_TLP)) in tcp_dsack_seen()
1060 tp->rack.dsack_seen = 1; in tcp_dsack_seen()
1076 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_sack_reordering() local
1077 const u32 mss = tp->mss_cache; in tcp_check_sack_reordering()
1080 fack = tcp_highest_sack_seq(tp); in tcp_check_sack_reordering()
1085 if ((metric > tp->reordering * mss) && mss) { in tcp_check_sack_reordering()
1088 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, in tcp_check_sack_reordering()
1089 tp->reordering, in tcp_check_sack_reordering()
1091 tp->sacked_out, in tcp_check_sack_reordering()
1092 tp->undo_marker ? tp->undo_retrans : 0); in tcp_check_sack_reordering()
1094 tp->reordering = min_t(u32, (metric + mss - 1) / mss, in tcp_check_sack_reordering()
1099 tp->reord_seen++; in tcp_check_sack_reordering()
1109 static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) in tcp_verify_retransmit_hint() argument
1111 if ((!tp->retransmit_skb_hint && tp->retrans_out >= tp->lost_out) || in tcp_verify_retransmit_hint()
1112 (tp->retransmit_skb_hint && in tcp_verify_retransmit_hint()
1114 TCP_SKB_CB(tp->retransmit_skb_hint)->seq))) in tcp_verify_retransmit_hint()
1115 tp->retransmit_skb_hint = skb; in tcp_verify_retransmit_hint()
1121 static void tcp_notify_skb_loss_event(struct tcp_sock *tp, const struct sk_buff *skb) in tcp_notify_skb_loss_event() argument
1123 tp->lost += tcp_skb_pcount(skb); in tcp_notify_skb_loss_event()
1129 struct tcp_sock *tp = tcp_sk(sk); in tcp_mark_skb_lost() local
1134 tcp_verify_retransmit_hint(tp, skb); in tcp_mark_skb_lost()
1139 tp->retrans_out -= tcp_skb_pcount(skb); in tcp_mark_skb_lost()
1142 tcp_notify_skb_loss_event(tp, skb); in tcp_mark_skb_lost()
1145 tp->lost_out += tcp_skb_pcount(skb); in tcp_mark_skb_lost()
1147 tcp_notify_skb_loss_event(tp, skb); in tcp_mark_skb_lost()
1152 static void tcp_count_delivered(struct tcp_sock *tp, u32 delivered, in tcp_count_delivered() argument
1155 tp->delivered += delivered; in tcp_count_delivered()
1157 tp->delivered_ce += delivered; in tcp_count_delivered()
1253 static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack, in tcp_is_sackblock_valid() argument
1257 if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) in tcp_is_sackblock_valid()
1261 if (!before(start_seq, tp->snd_nxt)) in tcp_is_sackblock_valid()
1267 if (after(start_seq, tp->snd_una)) in tcp_is_sackblock_valid()
1270 if (!is_dsack || !tp->undo_marker) in tcp_is_sackblock_valid()
1274 if (after(end_seq, tp->snd_una)) in tcp_is_sackblock_valid()
1277 if (!before(start_seq, tp->undo_marker)) in tcp_is_sackblock_valid()
1281 if (!after(end_seq, tp->undo_marker)) in tcp_is_sackblock_valid()
1287 return !before(start_seq, end_seq - tp->max_window); in tcp_is_sackblock_valid()
1294 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_dsack() local
1312 dup_segs = tcp_dsack_seen(tp, start_seq_0, end_seq_0, state); in tcp_check_dsack()
1321 if (tp->undo_marker && tp->undo_retrans > 0 && in tcp_check_dsack()
1323 after(end_seq_0, tp->undo_marker)) in tcp_check_dsack()
1324 tp->undo_retrans = max_t(int, 0, tp->undo_retrans - dup_segs); in tcp_check_dsack()
1392 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_one() local
1396 if (tp->undo_marker && tp->undo_retrans > 0 && in tcp_sacktag_one()
1397 after(end_seq, tp->undo_marker)) in tcp_sacktag_one()
1398 tp->undo_retrans = max_t(int, 0, tp->undo_retrans - pcount); in tcp_sacktag_one()
1405 if (!after(end_seq, tp->snd_una)) in tcp_sacktag_one()
1409 tcp_rack_advance(tp, sacked, end_seq, xmit_time); in tcp_sacktag_one()
1418 tp->lost_out -= pcount; in tcp_sacktag_one()
1419 tp->retrans_out -= pcount; in tcp_sacktag_one()
1427 tcp_highest_sack_seq(tp)) && in tcp_sacktag_one()
1431 if (!after(end_seq, tp->high_seq)) in tcp_sacktag_one()
1440 tp->lost_out -= pcount; in tcp_sacktag_one()
1446 tp->sacked_out += pcount; in tcp_sacktag_one()
1451 if (tp->lost_skb_hint && in tcp_sacktag_one()
1452 before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) in tcp_sacktag_one()
1453 tp->lost_cnt_hint += pcount; in tcp_sacktag_one()
1462 tp->retrans_out -= pcount; in tcp_sacktag_one()
1477 struct tcp_sock *tp = tcp_sk(sk); in tcp_shifted_skb() local
1494 if (skb == tp->lost_skb_hint) in tcp_shifted_skb()
1495 tp->lost_cnt_hint += pcount; in tcp_shifted_skb()
1527 if (skb == tp->retransmit_skb_hint) in tcp_shifted_skb()
1528 tp->retransmit_skb_hint = prev; in tcp_shifted_skb()
1529 if (skb == tp->lost_skb_hint) { in tcp_shifted_skb()
1530 tp->lost_skb_hint = prev; in tcp_shifted_skb()
1531 tp->lost_cnt_hint -= tcp_skb_pcount(prev); in tcp_shifted_skb()
1590 struct tcp_sock *tp = tcp_sk(sk); in tcp_shift_skb_data() local
1604 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) in tcp_shift_skb_data()
1684 if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) in tcp_shift_skb_data()
1729 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_walk() local
1788 tcp_highest_sack_seq(tp))) in tcp_sacktag_walk()
1844 static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache) in tcp_sack_cache_ok() argument
1846 return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); in tcp_sack_cache_ok()
1853 struct tcp_sock *tp = tcp_sk(sk); in tcp_sacktag_write_queue() local
1867 state->reord = tp->snd_nxt; in tcp_sacktag_write_queue()
1869 if (!tp->sacked_out) in tcp_sacktag_write_queue()
1879 if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) in tcp_sacktag_write_queue()
1882 if (!tp->packets_out) in tcp_sacktag_write_queue()
1893 if (!tcp_is_sackblock_valid(tp, dup_sack, in tcp_sacktag_write_queue()
1899 if (!tp->undo_marker) in tcp_sacktag_write_queue()
1905 if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && in tcp_sacktag_write_queue()
1906 !after(sp[used_sacks].end_seq, tp->snd_una)) in tcp_sacktag_write_queue()
1944 if (!tp->sacked_out) { in tcp_sacktag_write_queue()
1946 cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); in tcp_sacktag_write_queue()
1948 cache = tp->recv_sack_cache; in tcp_sacktag_write_queue()
1950 while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq && in tcp_sacktag_write_queue()
1965 while (tcp_sack_cache_ok(tp, cache) && in tcp_sacktag_write_queue()
1970 if (tcp_sack_cache_ok(tp, cache) && !dup_sack && in tcp_sacktag_write_queue()
1992 if (tcp_highest_sack_seq(tp) == cache->end_seq) { in tcp_sacktag_write_queue()
2007 if (!before(start_seq, tcp_highest_sack_seq(tp))) { in tcp_sacktag_write_queue()
2023 for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) { in tcp_sacktag_write_queue()
2024 tp->recv_sack_cache[i].start_seq = 0; in tcp_sacktag_write_queue()
2025 tp->recv_sack_cache[i].end_seq = 0; in tcp_sacktag_write_queue()
2028 tp->recv_sack_cache[i++] = sp[j]; in tcp_sacktag_write_queue()
2030 if (inet_csk(sk)->icsk_ca_state != TCP_CA_Loss || tp->undo_marker) in tcp_sacktag_write_queue()
2033 tcp_verify_left_out(tp); in tcp_sacktag_write_queue()
2037 WARN_ON((int)tp->sacked_out < 0); in tcp_sacktag_write_queue()
2038 WARN_ON((int)tp->lost_out < 0); in tcp_sacktag_write_queue()
2039 WARN_ON((int)tp->retrans_out < 0); in tcp_sacktag_write_queue()
2040 WARN_ON((int)tcp_packets_in_flight(tp) < 0); in tcp_sacktag_write_queue()
2048 static bool tcp_limit_reno_sacked(struct tcp_sock *tp) in tcp_limit_reno_sacked() argument
2052 holes = max(tp->lost_out, 1U); in tcp_limit_reno_sacked()
2053 holes = min(holes, tp->packets_out); in tcp_limit_reno_sacked()
2055 if ((tp->sacked_out + holes) > tp->packets_out) { in tcp_limit_reno_sacked()
2056 tp->sacked_out = tp->packets_out - holes; in tcp_limit_reno_sacked()
2068 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_reno_reordering() local
2070 if (!tcp_limit_reno_sacked(tp)) in tcp_check_reno_reordering()
2073 tp->reordering = min_t(u32, tp->packets_out + addend, in tcp_check_reno_reordering()
2075 tp->reord_seen++; in tcp_check_reno_reordering()
2084 struct tcp_sock *tp = tcp_sk(sk); in tcp_add_reno_sack() local
2085 u32 prior_sacked = tp->sacked_out; in tcp_add_reno_sack()
2088 tp->sacked_out += num_dupack; in tcp_add_reno_sack()
2090 delivered = tp->sacked_out - prior_sacked; in tcp_add_reno_sack()
2092 tcp_count_delivered(tp, delivered, ece_ack); in tcp_add_reno_sack()
2093 tcp_verify_left_out(tp); in tcp_add_reno_sack()
2101 struct tcp_sock *tp = tcp_sk(sk); in tcp_remove_reno_sacks() local
2105 tcp_count_delivered(tp, max_t(int, acked - tp->sacked_out, 1), in tcp_remove_reno_sacks()
2107 if (acked - 1 >= tp->sacked_out) in tcp_remove_reno_sacks()
2108 tp->sacked_out = 0; in tcp_remove_reno_sacks()
2110 tp->sacked_out -= acked - 1; in tcp_remove_reno_sacks()
2113 tcp_verify_left_out(tp); in tcp_remove_reno_sacks()
2116 static inline void tcp_reset_reno_sack(struct tcp_sock *tp) in tcp_reset_reno_sack() argument
2118 tp->sacked_out = 0; in tcp_reset_reno_sack()
2121 void tcp_clear_retrans(struct tcp_sock *tp) in tcp_clear_retrans() argument
2123 tp->retrans_out = 0; in tcp_clear_retrans()
2124 tp->lost_out = 0; in tcp_clear_retrans()
2125 tp->undo_marker = 0; in tcp_clear_retrans()
2126 tp->undo_retrans = -1; in tcp_clear_retrans()
2127 tp->sacked_out = 0; in tcp_clear_retrans()
2128 tp->rto_stamp = 0; in tcp_clear_retrans()
2129 tp->total_rto = 0; in tcp_clear_retrans()
2130 tp->total_rto_recoveries = 0; in tcp_clear_retrans()
2131 tp->total_rto_time = 0; in tcp_clear_retrans()
2134 static inline void tcp_init_undo(struct tcp_sock *tp) in tcp_init_undo() argument
2136 tp->undo_marker = tp->snd_una; in tcp_init_undo()
2140 tp->undo_retrans = tp->retrans_out; in tcp_init_undo()
2142 if (tp->tlp_high_seq && tp->tlp_retrans) in tcp_init_undo()
2143 tp->undo_retrans++; in tcp_init_undo()
2145 if (!tp->undo_retrans) in tcp_init_undo()
2146 tp->undo_retrans = -1; in tcp_init_undo()
2161 struct tcp_sock *tp = tcp_sk(sk); in tcp_timeout_mark_lost() local
2169 tp->sacked_out = 0; in tcp_timeout_mark_lost()
2171 tp->is_sack_reneg = 1; in tcp_timeout_mark_lost()
2172 } else if (tcp_is_reno(tp)) { in tcp_timeout_mark_lost()
2173 tcp_reset_reno_sack(tp); in tcp_timeout_mark_lost()
2181 tcp_rack_skb_timeout(tp, skb, 0) > 0) in tcp_timeout_mark_lost()
2185 tcp_verify_left_out(tp); in tcp_timeout_mark_lost()
2186 tcp_clear_all_retrans_hints(tp); in tcp_timeout_mark_lost()
2193 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_loss() local
2202 !after(tp->high_seq, tp->snd_una) || in tcp_enter_loss()
2204 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_enter_loss()
2205 tp->prior_cwnd = tcp_snd_cwnd(tp); in tcp_enter_loss()
2206 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); in tcp_enter_loss()
2208 tcp_init_undo(tp); in tcp_enter_loss()
2210 tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + 1); in tcp_enter_loss()
2211 tp->snd_cwnd_cnt = 0; in tcp_enter_loss()
2212 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_enter_loss()
2219 tp->sacked_out >= reordering) in tcp_enter_loss()
2220 tp->reordering = min_t(unsigned int, tp->reordering, in tcp_enter_loss()
2224 tp->high_seq = tp->snd_nxt; in tcp_enter_loss()
2225 tp->tlp_high_seq = 0; in tcp_enter_loss()
2226 tcp_ecn_queue_cwr(tp); in tcp_enter_loss()
2232 tp->frto = READ_ONCE(net->ipv4.sysctl_tcp_frto) && in tcp_enter_loss()
2251 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_sack_reneging() local
2252 unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4), in tcp_check_sack_reneging()
2274 static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) in tcp_dupack_heuristics() argument
2276 return tp->sacked_out + 1; in tcp_dupack_heuristics()
2378 struct tcp_sock *tp = tcp_sk(sk); in tcp_time_to_recover() local
2381 if (tp->lost_out) in tcp_time_to_recover()
2385 if (!tcp_is_rack(sk) && tcp_dupack_heuristics(tp) > tp->reordering) in tcp_time_to_recover()
2398 struct tcp_sock *tp = tcp_sk(sk); in tcp_mark_head_lost() local
2402 const u32 loss_high = tp->snd_nxt; in tcp_mark_head_lost()
2404 WARN_ON(packets > tp->packets_out); in tcp_mark_head_lost()
2405 skb = tp->lost_skb_hint; in tcp_mark_head_lost()
2408 if (mark_head && after(TCP_SKB_CB(skb)->seq, tp->snd_una)) in tcp_mark_head_lost()
2410 cnt = tp->lost_cnt_hint; in tcp_mark_head_lost()
2419 tp->lost_skb_hint = skb; in tcp_mark_head_lost()
2420 tp->lost_cnt_hint = cnt; in tcp_mark_head_lost()
2437 tcp_verify_left_out(tp); in tcp_mark_head_lost()
2444 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_scoreboard() local
2446 if (tcp_is_sack(tp)) { in tcp_update_scoreboard()
2447 int sacked_upto = tp->sacked_out - tp->reordering; in tcp_update_scoreboard()
2455 static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when) in tcp_tsopt_ecr_before() argument
2457 return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && in tcp_tsopt_ecr_before()
2458 before(tp->rx_opt.rcv_tsecr, when); in tcp_tsopt_ecr_before()
2464 static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp, in tcp_skb_spurious_retrans() argument
2468 tcp_tsopt_ecr_before(tp, tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb)); in tcp_skb_spurious_retrans()
2474 static inline bool tcp_packet_delayed(const struct tcp_sock *tp) in tcp_packet_delayed() argument
2476 const struct sock *sk = (const struct sock *)tp; in tcp_packet_delayed()
2478 if (tp->retrans_stamp && in tcp_packet_delayed()
2479 tcp_tsopt_ecr_before(tp, tp->retrans_stamp)) in tcp_packet_delayed()
2487 if (!tp->retrans_stamp && /* no record of a retransmit/SYN? */ in tcp_packet_delayed()
2512 const struct tcp_sock *tp = tcp_sk(sk); in tcp_any_retrans_done() local
2515 if (tp->retrans_out) in tcp_any_retrans_done()
2538 struct tcp_sock *tp = tcp_sk(sk); in DBGUNDO() local
2545 tcp_snd_cwnd(tp), tcp_left_out(tp), in DBGUNDO()
2546 tp->snd_ssthresh, tp->prior_ssthresh, in DBGUNDO()
2547 tp->packets_out); in DBGUNDO()
2554 tcp_snd_cwnd(tp), tcp_left_out(tp), in DBGUNDO()
2555 tp->snd_ssthresh, tp->prior_ssthresh, in DBGUNDO()
2556 tp->packets_out); in DBGUNDO()
2564 struct tcp_sock *tp = tcp_sk(sk); in tcp_undo_cwnd_reduction() local
2572 tp->lost_out = 0; in tcp_undo_cwnd_reduction()
2573 tcp_clear_all_retrans_hints(tp); in tcp_undo_cwnd_reduction()
2576 if (tp->prior_ssthresh) { in tcp_undo_cwnd_reduction()
2579 tcp_snd_cwnd_set(tp, icsk->icsk_ca_ops->undo_cwnd(sk)); in tcp_undo_cwnd_reduction()
2581 if (tp->prior_ssthresh > tp->snd_ssthresh) { in tcp_undo_cwnd_reduction()
2582 tp->snd_ssthresh = tp->prior_ssthresh; in tcp_undo_cwnd_reduction()
2583 tcp_ecn_withdraw_cwr(tp); in tcp_undo_cwnd_reduction()
2586 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_undo_cwnd_reduction()
2587 tp->undo_marker = 0; in tcp_undo_cwnd_reduction()
2588 tp->rack.advanced = 1; /* Force RACK to re-exam losses */ in tcp_undo_cwnd_reduction()
2591 static inline bool tcp_may_undo(const struct tcp_sock *tp) in tcp_may_undo() argument
2593 return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); in tcp_may_undo()
2598 struct tcp_sock *tp = tcp_sk(sk); in tcp_is_non_sack_preventing_reopen() local
2600 if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { in tcp_is_non_sack_preventing_reopen()
2605 tp->retrans_stamp = 0; in tcp_is_non_sack_preventing_reopen()
2614 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_recovery() local
2616 if (tcp_may_undo(tp)) { in tcp_try_undo_recovery()
2630 } else if (tp->rack.reo_wnd_persist) { in tcp_try_undo_recovery()
2631 tp->rack.reo_wnd_persist--; in tcp_try_undo_recovery()
2636 tp->is_sack_reneg = 0; in tcp_try_undo_recovery()
2643 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_dsack() local
2645 if (tp->undo_marker && !tp->undo_retrans) { in tcp_try_undo_dsack()
2646 tp->rack.reo_wnd_persist = min(TCP_RACK_RECOVERY_THRESH, in tcp_try_undo_dsack()
2647 tp->rack.reo_wnd_persist + 1); in tcp_try_undo_dsack()
2659 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_loss() local
2661 if (frto_undo || tcp_may_undo(tp)) { in tcp_try_undo_loss()
2672 if (frto_undo || tcp_is_sack(tp)) { in tcp_try_undo_loss()
2674 tp->is_sack_reneg = 0; in tcp_try_undo_loss()
2692 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_cwnd_reduction() local
2694 tp->high_seq = tp->snd_nxt; in tcp_init_cwnd_reduction()
2695 tp->tlp_high_seq = 0; in tcp_init_cwnd_reduction()
2696 tp->snd_cwnd_cnt = 0; in tcp_init_cwnd_reduction()
2697 tp->prior_cwnd = tcp_snd_cwnd(tp); in tcp_init_cwnd_reduction()
2698 tp->prr_delivered = 0; in tcp_init_cwnd_reduction()
2699 tp->prr_out = 0; in tcp_init_cwnd_reduction()
2700 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); in tcp_init_cwnd_reduction()
2701 tcp_ecn_queue_cwr(tp); in tcp_init_cwnd_reduction()
2706 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_reduction() local
2708 int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); in tcp_cwnd_reduction()
2710 if (newly_acked_sacked <= 0 || WARN_ON_ONCE(!tp->prior_cwnd)) in tcp_cwnd_reduction()
2713 tp->prr_delivered += newly_acked_sacked; in tcp_cwnd_reduction()
2715 u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + in tcp_cwnd_reduction()
2716 tp->prior_cwnd - 1; in tcp_cwnd_reduction()
2717 sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; in tcp_cwnd_reduction()
2719 sndcnt = max_t(int, tp->prr_delivered - tp->prr_out, in tcp_cwnd_reduction()
2726 sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1)); in tcp_cwnd_reduction()
2727 tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + sndcnt); in tcp_cwnd_reduction()
2732 struct tcp_sock *tp = tcp_sk(sk); in tcp_end_cwnd_reduction() local
2738 if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH && in tcp_end_cwnd_reduction()
2739 (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) { in tcp_end_cwnd_reduction()
2740 tcp_snd_cwnd_set(tp, tp->snd_ssthresh); in tcp_end_cwnd_reduction()
2741 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_end_cwnd_reduction()
2749 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_cwr() local
2751 tp->prior_ssthresh = 0; in tcp_enter_cwr()
2753 tp->undo_marker = 0; in tcp_enter_cwr()
2762 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_keep_open() local
2765 if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) in tcp_try_keep_open()
2770 tp->high_seq = tp->snd_nxt; in tcp_try_keep_open()
2776 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_to_open() local
2778 tcp_verify_left_out(tp); in tcp_try_to_open()
2781 tp->retrans_stamp = 0; in tcp_try_to_open()
2802 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtup_probe_success() local
2806 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_mtup_probe_success()
2808 val = (u64)tcp_snd_cwnd(tp) * tcp_mss_to_mtu(sk, tp->mss_cache); in tcp_mtup_probe_success()
2811 tcp_snd_cwnd_set(tp, max_t(u32, 1U, val)); in tcp_mtup_probe_success()
2813 tp->snd_cwnd_cnt = 0; in tcp_mtup_probe_success()
2814 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_mtup_probe_success()
2815 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_mtup_probe_success()
2836 struct tcp_sock *tp = tcp_sk(sk); in tcp_non_congestion_loss_retransmit() local
2839 tp->high_seq = tp->snd_nxt; in tcp_non_congestion_loss_retransmit()
2840 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_non_congestion_loss_retransmit()
2841 tp->prior_ssthresh = 0; in tcp_non_congestion_loss_retransmit()
2842 tp->undo_marker = 0; in tcp_non_congestion_loss_retransmit()
2854 struct tcp_sock *tp = tcp_sk(sk); in tcp_simple_retransmit() local
2868 if (tp->syn_data && sk->sk_state == TCP_SYN_SENT) in tcp_simple_retransmit()
2878 tcp_clear_retrans_hints_partial(tp); in tcp_simple_retransmit()
2880 if (!tp->lost_out) in tcp_simple_retransmit()
2883 if (tcp_is_reno(tp)) in tcp_simple_retransmit()
2884 tcp_limit_reno_sacked(tp); in tcp_simple_retransmit()
2886 tcp_verify_left_out(tp); in tcp_simple_retransmit()
2899 struct tcp_sock *tp = tcp_sk(sk); in tcp_enter_recovery() local
2905 if (tcp_is_reno(tp)) in tcp_enter_recovery()
2912 tp->prior_ssthresh = 0; in tcp_enter_recovery()
2913 tcp_init_undo(tp); in tcp_enter_recovery()
2917 tp->prior_ssthresh = tcp_current_ssthresh(sk); in tcp_enter_recovery()
2923 static void tcp_update_rto_time(struct tcp_sock *tp) in tcp_update_rto_time() argument
2925 if (tp->rto_stamp) { in tcp_update_rto_time()
2926 tp->total_rto_time += tcp_time_stamp_ms(tp) - tp->rto_stamp; in tcp_update_rto_time()
2927 tp->rto_stamp = 0; in tcp_update_rto_time()
2937 struct tcp_sock *tp = tcp_sk(sk); in tcp_process_loss() local
2938 bool recovered = !before(tp->snd_una, tp->high_seq); in tcp_process_loss()
2940 if ((flag & FLAG_SND_UNA_ADVANCED || rcu_access_pointer(tp->fastopen_rsk)) && in tcp_process_loss()
2944 if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ in tcp_process_loss()
2952 if (after(tp->snd_nxt, tp->high_seq)) { in tcp_process_loss()
2954 tp->frto = 0; /* Step 3.a. loss was real */ in tcp_process_loss()
2956 tp->high_seq = tp->snd_nxt; in tcp_process_loss()
2962 after(tcp_wnd_end(tp), tp->snd_nxt)) { in tcp_process_loss()
2966 tp->frto = 0; in tcp_process_loss()
2975 if (tcp_is_reno(tp)) { in tcp_process_loss()
2979 if (after(tp->snd_nxt, tp->high_seq) && num_dupack) in tcp_process_loss()
2982 tcp_reset_reno_sack(tp); in tcp_process_loss()
2989 struct tcp_sock *tp = tcp_sk(sk); in tcp_force_fast_retransmit() local
2991 return after(tcp_highest_sack_seq(tp), in tcp_force_fast_retransmit()
2992 tp->snd_una + tp->reordering * tp->mss_cache); in tcp_force_fast_retransmit()
2999 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_partial() local
3001 if (tp->undo_marker && tcp_packet_delayed(tp)) { in tcp_try_undo_partial()
3012 if (tp->retrans_out) in tcp_try_undo_partial()
3016 tp->retrans_stamp = 0; in tcp_try_undo_partial()
3031 struct tcp_sock *tp = tcp_sk(sk); in tcp_identify_packet_loss() local
3036 if (unlikely(tcp_is_reno(tp))) { in tcp_identify_packet_loss()
3039 u32 prior_retrans = tp->retrans_out; in tcp_identify_packet_loss()
3043 if (prior_retrans > tp->retrans_out) in tcp_identify_packet_loss()
3064 struct tcp_sock *tp = tcp_sk(sk); in tcp_fastretrans_alert() local
3070 if (!tp->packets_out && tp->sacked_out) in tcp_fastretrans_alert()
3071 tp->sacked_out = 0; in tcp_fastretrans_alert()
3076 tp->prior_ssthresh = 0; in tcp_fastretrans_alert()
3083 tcp_verify_left_out(tp); in tcp_fastretrans_alert()
3088 WARN_ON(tp->retrans_out != 0 && !tp->syn_data); in tcp_fastretrans_alert()
3089 tp->retrans_stamp = 0; in tcp_fastretrans_alert()
3090 } else if (!before(tp->snd_una, tp->high_seq)) { in tcp_fastretrans_alert()
3095 if (tp->snd_una != tp->high_seq) { in tcp_fastretrans_alert()
3102 if (tcp_is_reno(tp)) in tcp_fastretrans_alert()
3103 tcp_reset_reno_sack(tp); in tcp_fastretrans_alert()
3115 if (tcp_is_reno(tp)) in tcp_fastretrans_alert()
3136 tcp_update_rto_time(tp); in tcp_fastretrans_alert()
3144 if (tcp_is_reno(tp)) { in tcp_fastretrans_alert()
3146 tcp_reset_reno_sack(tp); in tcp_fastretrans_alert()
3162 tp->snd_una == tp->mtu_probe.probe_seq_start) { in tcp_fastretrans_alert()
3165 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1); in tcp_fastretrans_alert()
3183 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_rtt_min() local
3185 if ((flag & FLAG_ACK_MAYBE_DELAYED) && rtt_us > tcp_min_rtt(tp)) { in tcp_update_rtt_min()
3192 minmax_running_min(&tp->rtt_min, wlen, tcp_jiffies32, in tcp_update_rtt_min()
3200 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_update_rtt() local
3216 if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && in tcp_ack_update_rtt()
3217 tp->rx_opt.rcv_tsecr && flag & FLAG_ACKED) in tcp_ack_update_rtt()
3218 seq_rtt_us = ca_rtt_us = tcp_rtt_tsopt_us(tp); in tcp_ack_update_rtt()
3264 struct tcp_sock *tp = tcp_sk(sk); in tcp_rearm_rto() local
3269 if (rcu_access_pointer(tp->fastopen_rsk)) in tcp_rearm_rto()
3272 if (!tp->packets_out) { in tcp_rearm_rto()
3300 struct tcp_sock *tp = tcp_sk(sk); in tcp_tso_acked() local
3303 BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); in tcp_tso_acked()
3306 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in tcp_tso_acked()
3346 struct tcp_sock *tp = tcp_sk(sk); in tcp_clean_rtx_queue() local
3347 u32 prior_sacked = tp->sacked_out; in tcp_clean_rtx_queue()
3348 u32 reord = tp->snd_nxt; /* lowest acked un-retx un-sacked seq */ in tcp_clean_rtx_queue()
3367 if (after(scb->end_seq, tp->snd_una)) { in tcp_clean_rtx_queue()
3369 !after(tp->snd_una, scb->seq)) in tcp_clean_rtx_queue()
3382 tp->retrans_out -= acked_pcount; in tcp_clean_rtx_queue()
3392 if (!after(scb->end_seq, tp->high_seq)) in tcp_clean_rtx_queue()
3397 tp->sacked_out -= acked_pcount; in tcp_clean_rtx_queue()
3398 } else if (tcp_is_sack(tp)) { in tcp_clean_rtx_queue()
3399 tcp_count_delivered(tp, acked_pcount, ece_ack); in tcp_clean_rtx_queue()
3400 if (!tcp_skb_spurious_retrans(tp, skb)) in tcp_clean_rtx_queue()
3401 tcp_rack_advance(tp, sacked, scb->end_seq, in tcp_clean_rtx_queue()
3405 tp->lost_out -= acked_pcount; in tcp_clean_rtx_queue()
3407 tp->packets_out -= acked_pcount; in tcp_clean_rtx_queue()
3422 tp->retrans_stamp = 0; in tcp_clean_rtx_queue()
3431 if (unlikely(skb == tp->retransmit_skb_hint)) in tcp_clean_rtx_queue()
3432 tp->retransmit_skb_hint = NULL; in tcp_clean_rtx_queue()
3433 if (unlikely(skb == tp->lost_skb_hint)) in tcp_clean_rtx_queue()
3434 tp->lost_skb_hint = NULL; in tcp_clean_rtx_queue()
3442 if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una))) in tcp_clean_rtx_queue()
3443 tp->snd_up = tp->snd_una; in tcp_clean_rtx_queue()
3452 seq_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, first_ackt); in tcp_clean_rtx_queue()
3453 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, last_ackt); in tcp_clean_rtx_queue()
3456 (tp->snd_una - prior_snd_una) < tp->mss_cache && in tcp_clean_rtx_queue()
3457 sack->rate->prior_delivered + 1 == tp->delivered && in tcp_clean_rtx_queue()
3467 sack_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->first_sackt); in tcp_clean_rtx_queue()
3468 ca_rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, sack->last_sackt); in tcp_clean_rtx_queue()
3476 !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { in tcp_clean_rtx_queue()
3480 if (tcp_is_reno(tp)) { in tcp_clean_rtx_queue()
3498 delta = prior_sacked - tp->sacked_out; in tcp_clean_rtx_queue()
3499 tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); in tcp_clean_rtx_queue()
3502 sack_rtt_us > tcp_stamp_us_delta(tp->tcp_mstamp, in tcp_clean_rtx_queue()
3515 sample.in_flight = tp->mss_cache * in tcp_clean_rtx_queue()
3516 (tp->delivered - sack->rate->prior_delivered); in tcp_clean_rtx_queue()
3521 WARN_ON((int)tp->sacked_out < 0); in tcp_clean_rtx_queue()
3522 WARN_ON((int)tp->lost_out < 0); in tcp_clean_rtx_queue()
3523 WARN_ON((int)tp->retrans_out < 0); in tcp_clean_rtx_queue()
3524 if (!tp->packets_out && tcp_is_sack(tp)) { in tcp_clean_rtx_queue()
3526 if (tp->lost_out) { in tcp_clean_rtx_queue()
3528 tp->lost_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3529 tp->lost_out = 0; in tcp_clean_rtx_queue()
3531 if (tp->sacked_out) { in tcp_clean_rtx_queue()
3533 tp->sacked_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3534 tp->sacked_out = 0; in tcp_clean_rtx_queue()
3536 if (tp->retrans_out) { in tcp_clean_rtx_queue()
3538 tp->retrans_out, icsk->icsk_ca_state); in tcp_clean_rtx_queue()
3539 tp->retrans_out = 0; in tcp_clean_rtx_queue()
3550 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_probe() local
3555 if (!after(TCP_SKB_CB(head)->end_seq, tcp_wnd_end(tp))) { in tcp_ack_probe()
3620 static inline bool tcp_may_update_window(const struct tcp_sock *tp, in tcp_may_update_window() argument
3624 return after(ack, tp->snd_una) || in tcp_may_update_window()
3625 after(ack_seq, tp->snd_wl1) || in tcp_may_update_window()
3626 (ack_seq == tp->snd_wl1 && (nwin > tp->snd_wnd || !nwin)); in tcp_may_update_window()
3629 static void tcp_snd_sne_update(struct tcp_sock *tp, u32 ack) in tcp_snd_sne_update() argument
3637 ao = rcu_dereference_protected(tp->ao_info, in tcp_snd_sne_update()
3638 lockdep_sock_is_held((struct sock *)tp)); in tcp_snd_sne_update()
3639 if (ao && ack < tp->snd_una) { in tcp_snd_sne_update()
3641 trace_tcp_ao_snd_sne_update((struct sock *)tp, ao->snd_sne); in tcp_snd_sne_update()
3647 static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack) in tcp_snd_una_update() argument
3649 u32 delta = ack - tp->snd_una; in tcp_snd_una_update()
3651 sock_owned_by_me((struct sock *)tp); in tcp_snd_una_update()
3652 tp->bytes_acked += delta; in tcp_snd_una_update()
3653 tcp_snd_sne_update(tp, ack); in tcp_snd_una_update()
3654 tp->snd_una = ack; in tcp_snd_una_update()
3657 static void tcp_rcv_sne_update(struct tcp_sock *tp, u32 seq) in tcp_rcv_sne_update() argument
3665 ao = rcu_dereference_protected(tp->ao_info, in tcp_rcv_sne_update()
3666 lockdep_sock_is_held((struct sock *)tp)); in tcp_rcv_sne_update()
3667 if (ao && seq < tp->rcv_nxt) { in tcp_rcv_sne_update()
3669 trace_tcp_ao_rcv_sne_update((struct sock *)tp, ao->rcv_sne); in tcp_rcv_sne_update()
3675 static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq) in tcp_rcv_nxt_update() argument
3677 u32 delta = seq - tp->rcv_nxt; in tcp_rcv_nxt_update()
3679 sock_owned_by_me((struct sock *)tp); in tcp_rcv_nxt_update()
3680 tp->bytes_received += delta; in tcp_rcv_nxt_update()
3681 tcp_rcv_sne_update(tp, seq); in tcp_rcv_nxt_update()
3682 WRITE_ONCE(tp->rcv_nxt, seq); in tcp_rcv_nxt_update()
3693 struct tcp_sock *tp = tcp_sk(sk); in tcp_ack_update_window() local
3698 nwin <<= tp->rx_opt.snd_wscale; in tcp_ack_update_window()
3700 if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { in tcp_ack_update_window()
3702 tcp_update_wl(tp, ack_seq); in tcp_ack_update_window()
3704 if (tp->snd_wnd != nwin) { in tcp_ack_update_window()
3705 tp->snd_wnd = nwin; in tcp_ack_update_window()
3710 tp->pred_flags = 0; in tcp_ack_update_window()
3716 if (nwin > tp->max_window) { in tcp_ack_update_window()
3717 tp->max_window = nwin; in tcp_ack_update_window()
3723 tcp_snd_una_update(tp, ack); in tcp_ack_update_window()
3773 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_challenge_ack() local
3780 &tp->last_oow_ack_time)) in tcp_send_challenge_ack()
3805 static void tcp_store_ts_recent(struct tcp_sock *tp) in tcp_store_ts_recent() argument
3807 tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; in tcp_store_ts_recent()
3808 tp->rx_opt.ts_recent_stamp = ktime_get_seconds(); in tcp_store_ts_recent()
3811 static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) in tcp_replace_ts_recent() argument
3813 if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { in tcp_replace_ts_recent()
3821 if (tcp_paws_check(&tp->rx_opt, 0)) in tcp_replace_ts_recent()
3822 tcp_store_ts_recent(tp); in tcp_replace_ts_recent()
3831 struct tcp_sock *tp = tcp_sk(sk); in tcp_process_tlp_ack() local
3833 if (before(ack, tp->tlp_high_seq)) in tcp_process_tlp_ack()
3836 if (!tp->tlp_retrans) { in tcp_process_tlp_ack()
3838 tp->tlp_high_seq = 0; in tcp_process_tlp_ack()
3841 tp->tlp_high_seq = 0; in tcp_process_tlp_ack()
3842 } else if (after(ack, tp->tlp_high_seq)) { in tcp_process_tlp_ack()
3855 tp->tlp_high_seq = 0; in tcp_process_tlp_ack()
3873 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_recovery() local
3881 if (after(tp->snd_nxt, tp->high_seq)) in tcp_xmit_recovery()
3883 tp->frto = 0; in tcp_xmit_recovery()
3892 struct tcp_sock *tp = tcp_sk(sk); in tcp_newly_delivered() local
3895 delivered = tp->delivered - prior_delivered; in tcp_newly_delivered()
3907 struct tcp_sock *tp = tcp_sk(sk); in tcp_ack() local
3910 u32 prior_snd_una = tp->snd_una; in tcp_ack()
3911 bool is_sack_reneg = tp->is_sack_reneg; in tcp_ack()
3915 int prior_packets = tp->packets_out; in tcp_ack()
3916 u32 delivered = tp->delivered; in tcp_ack()
3917 u32 lost = tp->lost; in tcp_ack()
3935 max_window = min_t(u64, tp->max_window, tp->bytes_acked); in tcp_ack()
3948 if (after(ack, tp->snd_nxt)) in tcp_ack()
3962 prior_fack = tcp_is_sack(tp) ? tcp_highest_sack_seq(tp) : tp->snd_una; in tcp_ack()
3963 rs.prior_in_flight = tcp_packets_in_flight(tp); in tcp_ack()
3969 tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); in tcp_ack()
3977 tcp_update_wl(tp, ack_seq); in tcp_ack()
3978 tcp_snd_una_update(tp, ack); in tcp_ack()
3998 if (tcp_ecn_rcv_ecn_echo(tp, tcp_hdr(skb))) { in tcp_ack()
4004 tcp_count_delivered(tp, sack_state.sack_delivered, in tcp_ack()
4027 tp->rcv_tstamp = tcp_jiffies32; in tcp_ack()
4037 if (tp->tlp_high_seq) in tcp_ack()
4060 lost = tp->lost - lost; /* freshly marked lost */ in tcp_ack()
4080 if (tp->tlp_high_seq) in tcp_ack()
4314 static bool tcp_parse_aligned_timestamp(struct tcp_sock *tp, const struct tcphdr *th) in tcp_parse_aligned_timestamp() argument
4320 tp->rx_opt.saw_tstamp = 1; in tcp_parse_aligned_timestamp()
4322 tp->rx_opt.rcv_tsval = ntohl(*ptr); in tcp_parse_aligned_timestamp()
4325 tp->rx_opt.rcv_tsecr = ntohl(*ptr) - tp->tsoffset; in tcp_parse_aligned_timestamp()
4327 tp->rx_opt.rcv_tsecr = 0; in tcp_parse_aligned_timestamp()
4338 const struct tcphdr *th, struct tcp_sock *tp) in tcp_fast_parse_options() argument
4344 tp->rx_opt.saw_tstamp = 0; in tcp_fast_parse_options()
4346 } else if (tp->rx_opt.tstamp_ok && in tcp_fast_parse_options()
4348 if (tcp_parse_aligned_timestamp(tp, th)) in tcp_fast_parse_options()
4352 tcp_parse_options(net, skb, &tp->rx_opt, 1, NULL); in tcp_fast_parse_options()
4353 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) in tcp_fast_parse_options()
4354 tp->rx_opt.rcv_tsecr -= tp->tsoffset; in tcp_fast_parse_options()
4455 const struct tcp_sock *tp = tcp_sk(sk); in tcp_disordered_ack() local
4461 (th->ack && seq == TCP_SKB_CB(skb)->end_seq && seq == tp->rcv_nxt) && in tcp_disordered_ack()
4464 ack == tp->snd_una && in tcp_disordered_ack()
4467 !tcp_may_update_window(tp, ack, seq, ntohs(th->window) << tp->rx_opt.snd_wscale) && in tcp_disordered_ack()
4470 (s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) <= in tcp_disordered_ack()
4477 const struct tcp_sock *tp = tcp_sk(sk); in tcp_paws_discard() local
4479 return !tcp_paws_check(&tp->rx_opt, TCP_PAWS_WINDOW) && in tcp_paws_discard()
4496 static enum skb_drop_reason tcp_sequence(const struct tcp_sock *tp, in tcp_sequence() argument
4499 if (before(end_seq, tp->rcv_wup)) in tcp_sequence()
4502 if (after(seq, tp->rcv_nxt + tcp_receive_window(tp))) in tcp_sequence()
4568 struct tcp_sock *tp = tcp_sk(sk); in tcp_fin() local
4618 skb_rbtree_purge(&tp->out_of_order_queue); in tcp_fin()
4619 if (tcp_is_sack(tp)) in tcp_fin()
4620 tcp_sack_reset(&tp->rx_opt); in tcp_fin()
4649 struct tcp_sock *tp = tcp_sk(sk); in tcp_dsack_set() local
4651 if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) { in tcp_dsack_set()
4654 if (before(seq, tp->rcv_nxt)) in tcp_dsack_set()
4661 tp->rx_opt.dsack = 1; in tcp_dsack_set()
4662 tp->duplicate_sack[0].start_seq = seq; in tcp_dsack_set()
4663 tp->duplicate_sack[0].end_seq = end_seq; in tcp_dsack_set()
4669 struct tcp_sock *tp = tcp_sk(sk); in tcp_dsack_extend() local
4671 if (!tp->rx_opt.dsack) in tcp_dsack_extend()
4674 tcp_sack_extend(tp->duplicate_sack, seq, end_seq); in tcp_dsack_extend()
4702 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_dupack() local
4705 before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { in tcp_send_dupack()
4709 if (tcp_is_sack(tp) && READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_dsack)) { in tcp_send_dupack()
4713 if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) in tcp_send_dupack()
4714 end_seq = tp->rcv_nxt; in tcp_send_dupack()
4725 static void tcp_sack_maybe_coalesce(struct tcp_sock *tp) in tcp_sack_maybe_coalesce() argument
4728 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_sack_maybe_coalesce()
4734 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks;) { in tcp_sack_maybe_coalesce()
4741 tp->rx_opt.num_sacks--; in tcp_sack_maybe_coalesce()
4742 for (i = this_sack; i < tp->rx_opt.num_sacks; i++) in tcp_sack_maybe_coalesce()
4753 struct tcp_sock *tp = tcp_sk(sk); in tcp_sack_compress_send_ack() local
4755 if (!tp->compressed_ack) in tcp_sack_compress_send_ack()
4758 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) in tcp_sack_compress_send_ack()
4766 tp->compressed_ack - 1); in tcp_sack_compress_send_ack()
4768 tp->compressed_ack = 0; in tcp_sack_compress_send_ack()
4780 struct tcp_sock *tp = tcp_sk(sk); in tcp_sack_new_ofo_skb() local
4781 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_sack_new_ofo_skb()
4782 int cur_sacks = tp->rx_opt.num_sacks; in tcp_sack_new_ofo_skb()
4796 tcp_sack_maybe_coalesce(tp); in tcp_sack_new_ofo_skb()
4812 tp->rx_opt.num_sacks--; in tcp_sack_new_ofo_skb()
4822 tp->rx_opt.num_sacks++; in tcp_sack_new_ofo_skb()
4827 static void tcp_sack_remove(struct tcp_sock *tp) in tcp_sack_remove() argument
4829 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_sack_remove()
4830 int num_sacks = tp->rx_opt.num_sacks; in tcp_sack_remove()
4834 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) { in tcp_sack_remove()
4835 tp->rx_opt.num_sacks = 0; in tcp_sack_remove()
4841 if (!before(tp->rcv_nxt, sp->start_seq)) { in tcp_sack_remove()
4845 WARN_ON(before(tp->rcv_nxt, sp->end_seq)); in tcp_sack_remove()
4849 tp->selective_acks[i-1] = tp->selective_acks[i]; in tcp_sack_remove()
4856 tp->rx_opt.num_sacks = num_sacks; in tcp_sack_remove()
4936 struct tcp_sock *tp = tcp_sk(sk); in tcp_ofo_queue() local
4937 __u32 dsack_high = tp->rcv_nxt; in tcp_ofo_queue()
4942 p = rb_first(&tp->out_of_order_queue); in tcp_ofo_queue()
4945 if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) in tcp_ofo_queue()
4955 rb_erase(&skb->rbnode, &tp->out_of_order_queue); in tcp_ofo_queue()
4957 if (unlikely(!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt))) { in tcp_ofo_queue()
4964 tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); in tcp_ofo_queue()
5003 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_queue_ofo() local
5020 tp->pred_flags = 0; in tcp_data_queue_ofo()
5023 tp->rcv_ooopack += max_t(u16, 1, skb_shinfo(skb)->gso_segs); in tcp_data_queue_ofo()
5028 p = &tp->out_of_order_queue.rb_node; in tcp_data_queue_ofo()
5029 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) { in tcp_data_queue_ofo()
5031 if (tcp_is_sack(tp)) { in tcp_data_queue_ofo()
5032 tp->rx_opt.num_sacks = 1; in tcp_data_queue_ofo()
5033 tp->selective_acks[0].start_seq = seq; in tcp_data_queue_ofo()
5034 tp->selective_acks[0].end_seq = end_seq; in tcp_data_queue_ofo()
5037 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo()
5038 tp->ooo_last_skb = skb; in tcp_data_queue_ofo()
5045 if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, in tcp_data_queue_ofo()
5051 if (tcp_is_sack(tp)) in tcp_data_queue_ofo()
5058 if (!before(seq, TCP_SKB_CB(tp->ooo_last_skb)->end_seq)) { in tcp_data_queue_ofo()
5059 parent = &tp->ooo_last_skb->rbnode; in tcp_data_queue_ofo()
5092 &tp->out_of_order_queue); in tcp_data_queue_ofo()
5111 rb_insert_color(&skb->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo()
5123 rb_erase(&skb1->rbnode, &tp->out_of_order_queue); in tcp_data_queue_ofo()
5131 tp->ooo_last_skb = skb; in tcp_data_queue_ofo()
5134 if (tcp_is_sack(tp)) in tcp_data_queue_ofo()
5141 if (tcp_is_sack(tp)) in tcp_data_queue_ofo()
5225 struct tcp_sock *tp = tcp_sk(sk); in tcp_data_queue() local
5246 tp->rx_opt.dsack = 0; in tcp_data_queue()
5252 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) { in tcp_data_queue()
5253 if (tcp_receive_window(tp) == 0) { in tcp_data_queue()
5292 if (!RB_EMPTY_ROOT(&tp->out_of_order_queue)) { in tcp_data_queue()
5298 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) in tcp_data_queue()
5302 if (tp->rx_opt.num_sacks) in tcp_data_queue()
5303 tcp_sack_remove(tp); in tcp_data_queue()
5314 if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { in tcp_data_queue()
5331 tp->rcv_nxt + tcp_receive_window(tp))) { in tcp_data_queue()
5336 if (before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { in tcp_data_queue()
5338 tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, tp->rcv_nxt); in tcp_data_queue()
5343 if (!tcp_receive_window(tp)) { in tcp_data_queue()
5515 struct tcp_sock *tp = tcp_sk(sk); in tcp_collapse_ofo_queue() local
5520 skb = skb_rb_first(&tp->out_of_order_queue); in tcp_collapse_ofo_queue()
5523 tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue); in tcp_collapse_ofo_queue()
5542 tcp_collapse(sk, NULL, &tp->out_of_order_queue, in tcp_collapse_ofo_queue()
5575 struct tcp_sock *tp = tcp_sk(sk); in tcp_prune_ofo_queue() local
5580 if (RB_EMPTY_ROOT(&tp->out_of_order_queue)) in tcp_prune_ofo_queue()
5584 node = &tp->ooo_last_skb->rbnode; in tcp_prune_ofo_queue()
5594 rb_erase(node, &tp->out_of_order_queue); in tcp_prune_ofo_queue()
5597 tp->ooo_last_skb = rb_to_skb(prev); in tcp_prune_ofo_queue()
5614 if (tp->rx_opt.sack_ok) in tcp_prune_ofo_queue()
5615 tcp_sack_reset(&tp->rx_opt); in tcp_prune_ofo_queue()
5629 struct tcp_sock *tp = tcp_sk(sk); in tcp_prune_queue() local
5646 tp->copied_seq, tp->rcv_nxt); in tcp_prune_queue()
5666 tp->pred_flags = 0; in tcp_prune_queue()
5672 const struct tcp_sock *tp = tcp_sk(sk); in tcp_should_expand_sndbuf() local
5699 if (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp)) in tcp_should_expand_sndbuf()
5707 struct tcp_sock *tp = tcp_sk(sk); in tcp_new_space() local
5711 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_new_space()
5750 struct tcp_sock *tp = tcp_sk(sk); in __tcp_ack_snd_check() local
5754 if (((tp->rcv_nxt - tp->rcv_wup) > inet_csk(sk)->icsk_ack.rcv_mss && in __tcp_ack_snd_check()
5760 (tp->rcv_nxt - tp->copied_seq < sk->sk_rcvlowat || in __tcp_ack_snd_check()
5761 __tcp_select_window(sk) >= tp->rcv_wnd)) || in __tcp_ack_snd_check()
5779 if (!ofo_possible || RB_EMPTY_ROOT(&tp->out_of_order_queue)) { in __tcp_ack_snd_check()
5784 if (!tcp_is_sack(tp) || in __tcp_ack_snd_check()
5785 tp->compressed_ack >= READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_comp_sack_nr)) in __tcp_ack_snd_check()
5788 if (tp->compressed_ack_rcv_nxt != tp->rcv_nxt) { in __tcp_ack_snd_check()
5789 tp->compressed_ack_rcv_nxt = tp->rcv_nxt; in __tcp_ack_snd_check()
5790 tp->dup_ack_counter = 0; in __tcp_ack_snd_check()
5792 if (tp->dup_ack_counter < TCP_FASTRETRANS_THRESH) { in __tcp_ack_snd_check()
5793 tp->dup_ack_counter++; in __tcp_ack_snd_check()
5796 tp->compressed_ack++; in __tcp_ack_snd_check()
5797 if (hrtimer_is_queued(&tp->compressed_ack_timer)) in __tcp_ack_snd_check()
5802 rtt = tp->rcv_rtt_est.rtt_us; in __tcp_ack_snd_check()
5803 if (tp->srtt_us && tp->srtt_us < rtt) in __tcp_ack_snd_check()
5804 rtt = tp->srtt_us; in __tcp_ack_snd_check()
5810 hrtimer_start_range_ns(&tp->compressed_ack_timer, ns_to_ktime(delay), in __tcp_ack_snd_check()
5836 struct tcp_sock *tp = tcp_sk(sk); in tcp_check_urg() local
5844 if (after(tp->copied_seq, ptr)) in tcp_check_urg()
5857 if (before(ptr, tp->rcv_nxt)) in tcp_check_urg()
5861 if (tp->urg_data && !after(ptr, tp->urg_seq)) in tcp_check_urg()
5882 if (tp->urg_seq == tp->copied_seq && tp->urg_data && in tcp_check_urg()
5883 !sock_flag(sk, SOCK_URGINLINE) && tp->copied_seq != tp->rcv_nxt) { in tcp_check_urg()
5885 tp->copied_seq++; in tcp_check_urg()
5886 if (skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq)) { in tcp_check_urg()
5892 WRITE_ONCE(tp->urg_data, TCP_URG_NOTYET); in tcp_check_urg()
5893 WRITE_ONCE(tp->urg_seq, ptr); in tcp_check_urg()
5896 tp->pred_flags = 0; in tcp_check_urg()
5902 struct tcp_sock *tp = tcp_sk(sk); in tcp_urg() local
5909 if (unlikely(tp->urg_data == TCP_URG_NOTYET)) { in tcp_urg()
5910 u32 ptr = tp->urg_seq - ntohl(th->seq) + (th->doff * 4) - in tcp_urg()
5918 WRITE_ONCE(tp->urg_data, TCP_URG_VALID | tmp); in tcp_urg()
5935 const struct tcp_sock *tp = tcp_sk(sk); in tcp_reset_check() local
5937 return unlikely(TCP_SKB_CB(skb)->seq == (tp->rcv_nxt - 1) && in tcp_reset_check()
5948 struct tcp_sock *tp = tcp_sk(sk); in tcp_validate_incoming() local
5952 if (tcp_fast_parse_options(sock_net(sk), skb, th, tp) && in tcp_validate_incoming()
5953 tp->rx_opt.saw_tstamp && in tcp_validate_incoming()
5961 &tp->last_oow_ack_time)) in tcp_validate_incoming()
5970 reason = tcp_sequence(tp, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); in tcp_validate_incoming()
5983 &tp->last_oow_ack_time)) in tcp_validate_incoming()
6002 if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt || in tcp_validate_incoming()
6006 if (tcp_is_sack(tp) && tp->rx_opt.num_sacks > 0) { in tcp_validate_incoming()
6007 struct tcp_sack_block *sp = &tp->selective_acks[0]; in tcp_validate_incoming()
6011 for (this_sack = 1; this_sack < tp->rx_opt.num_sacks; in tcp_validate_incoming()
6026 if (tp->syn_fastopen && !tp->data_segs_in && in tcp_validate_incoming()
6042 TCP_SKB_CB(skb)->seq + 1 == tp->rcv_nxt && in tcp_validate_incoming()
6043 TCP_SKB_CB(skb)->ack_seq == tp->snd_nxt) in tcp_validate_incoming()
6096 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_established() local
6102 tcp_mstamp_refresh(tp); in tcp_rcv_established()
6120 tp->rx_opt.saw_tstamp = 0; in tcp_rcv_established()
6131 if ((tcp_flag_word(th) & TCP_HP_BITS) == tp->pred_flags && in tcp_rcv_established()
6132 TCP_SKB_CB(skb)->seq == tp->rcv_nxt && in tcp_rcv_established()
6133 !after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { in tcp_rcv_established()
6134 int tcp_header_len = tp->tcp_header_len; in tcp_rcv_established()
6144 if (!tcp_parse_aligned_timestamp(tp, th)) in tcp_rcv_established()
6148 if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) < 0) in tcp_rcv_established()
6167 tp->rcv_nxt == tp->rcv_wup) in tcp_rcv_established()
6168 tcp_store_ts_recent(tp); in tcp_rcv_established()
6180 tp->rcv_rtt_last_tsecr = tp->rx_opt.rcv_tsecr; in tcp_rcv_established()
6203 tp->rcv_nxt == tp->rcv_wup) in tcp_rcv_established()
6204 tcp_store_ts_recent(tp); in tcp_rcv_established()
6217 if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { in tcp_rcv_established()
6224 tcp_update_wl(tp, TCP_SKB_CB(skb)->seq); in tcp_rcv_established()
6284 struct tcp_sock *tp = tcp_sk(sk); in tcp_init_transfer() local
6296 if (tp->total_retrans > 1 && tp->undo_marker) in tcp_init_transfer()
6297 tcp_snd_cwnd_set(tp, 1); in tcp_init_transfer()
6299 tcp_snd_cwnd_set(tp, tcp_init_cwnd(tp, __sk_dst_get(sk))); in tcp_init_transfer()
6300 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_init_transfer()
6311 struct tcp_sock *tp = tcp_sk(sk); in tcp_finish_connect() local
6329 tp->lsndtime = tcp_jiffies32; in tcp_finish_connect()
6332 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tp)); in tcp_finish_connect()
6334 if (!tp->rx_opt.snd_wscale) in tcp_finish_connect()
6335 __tcp_fast_path_on(tp, tp->snd_wnd); in tcp_finish_connect()
6337 tp->pred_flags = 0; in tcp_finish_connect()
6343 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_fastopen_synack() local
6344 struct sk_buff *data = tp->syn_data ? tcp_rtx_queue_head(sk) : NULL; in tcp_rcv_fastopen_synack()
6345 u16 mss = tp->rx_opt.mss_clamp, try_exp = 0; in tcp_rcv_fastopen_synack()
6348 if (mss == tp->rx_opt.user_mss) { in tcp_rcv_fastopen_synack()
6358 if (!tp->syn_fastopen) { in tcp_rcv_fastopen_synack()
6361 } else if (tp->total_retrans) { in tcp_rcv_fastopen_synack()
6368 } else if (cookie->len < 0 && !tp->syn_data) { in tcp_rcv_fastopen_synack()
6373 try_exp = tp->syn_fastopen_exp ? 2 : 1; in tcp_rcv_fastopen_synack()
6379 if (tp->total_retrans) in tcp_rcv_fastopen_synack()
6380 tp->fastopen_client_fail = TFO_SYN_RETRANSMITTED; in tcp_rcv_fastopen_synack()
6382 tp->fastopen_client_fail = TFO_DATA_NOT_ACKED; in tcp_rcv_fastopen_synack()
6390 tp->syn_data_acked = tp->syn_data; in tcp_rcv_fastopen_synack()
6391 if (tp->syn_data_acked) { in tcp_rcv_fastopen_synack()
6394 if (tp->delivered > 1) in tcp_rcv_fastopen_synack()
6395 --tp->delivered; in tcp_rcv_fastopen_synack()
6403 static void smc_check_reset_syn(struct tcp_sock *tp) in smc_check_reset_syn() argument
6407 if (tp->syn_smc && !tp->rx_opt.smc_ok) in smc_check_reset_syn()
6408 tp->syn_smc = 0; in smc_check_reset_syn()
6415 struct tcp_sock *tp = tcp_sk(sk); in tcp_try_undo_spurious_syn() local
6422 syn_stamp = tp->retrans_stamp; in tcp_try_undo_spurious_syn()
6423 if (tp->undo_marker && syn_stamp && tp->rx_opt.saw_tstamp && in tcp_try_undo_spurious_syn()
6424 syn_stamp == tp->rx_opt.rcv_tsecr) in tcp_try_undo_spurious_syn()
6425 tp->undo_marker = 0; in tcp_try_undo_spurious_syn()
6432 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_synsent_state_process() local
6434 int saved_clamp = tp->rx_opt.mss_clamp; in tcp_rcv_synsent_state_process()
6438 tcp_parse_options(sock_net(sk), skb, &tp->rx_opt, 0, &foc); in tcp_rcv_synsent_state_process()
6439 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) in tcp_rcv_synsent_state_process()
6440 tp->rx_opt.rcv_tsecr -= tp->tsoffset; in tcp_rcv_synsent_state_process()
6451 if (!after(TCP_SKB_CB(skb)->ack_seq, tp->snd_una) || in tcp_rcv_synsent_state_process()
6452 after(TCP_SKB_CB(skb)->ack_seq, tp->snd_nxt)) { in tcp_rcv_synsent_state_process()
6462 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && in tcp_rcv_synsent_state_process()
6463 !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, in tcp_rcv_synsent_state_process()
6464 tcp_time_stamp_ts(tp))) { in tcp_rcv_synsent_state_process()
6504 tcp_ecn_rcv_synack(tp, th); in tcp_rcv_synsent_state_process()
6506 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); in tcp_rcv_synsent_state_process()
6513 WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1); in tcp_rcv_synsent_state_process()
6514 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
6519 tp->snd_wnd = ntohs(th->window); in tcp_rcv_synsent_state_process()
6521 if (!tp->rx_opt.wscale_ok) { in tcp_rcv_synsent_state_process()
6522 tp->rx_opt.snd_wscale = tp->rx_opt.rcv_wscale = 0; in tcp_rcv_synsent_state_process()
6523 WRITE_ONCE(tp->window_clamp, in tcp_rcv_synsent_state_process()
6524 min(tp->window_clamp, 65535U)); in tcp_rcv_synsent_state_process()
6527 if (tp->rx_opt.saw_tstamp) { in tcp_rcv_synsent_state_process()
6528 tp->rx_opt.tstamp_ok = 1; in tcp_rcv_synsent_state_process()
6529 tp->tcp_header_len = in tcp_rcv_synsent_state_process()
6531 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; in tcp_rcv_synsent_state_process()
6532 tcp_store_ts_recent(tp); in tcp_rcv_synsent_state_process()
6534 tp->tcp_header_len = sizeof(struct tcphdr); in tcp_rcv_synsent_state_process()
6543 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); in tcp_rcv_synsent_state_process()
6545 smc_check_reset_syn(tp); in tcp_rcv_synsent_state_process()
6551 fastopen_fail = (tp->syn_fastopen || tp->syn_data) && in tcp_rcv_synsent_state_process()
6593 if (tp->rx_opt.ts_recent_stamp && tp->rx_opt.saw_tstamp && in tcp_rcv_synsent_state_process()
6594 tcp_paws_reject(&tp->rx_opt, 0)) { in tcp_rcv_synsent_state_process()
6606 ao = rcu_dereference_protected(tp->ao_info, in tcp_rcv_synsent_state_process()
6615 if (tp->rx_opt.saw_tstamp) { in tcp_rcv_synsent_state_process()
6616 tp->rx_opt.tstamp_ok = 1; in tcp_rcv_synsent_state_process()
6617 tcp_store_ts_recent(tp); in tcp_rcv_synsent_state_process()
6618 tp->tcp_header_len = in tcp_rcv_synsent_state_process()
6621 tp->tcp_header_len = sizeof(struct tcphdr); in tcp_rcv_synsent_state_process()
6624 WRITE_ONCE(tp->rcv_nxt, TCP_SKB_CB(skb)->seq + 1); in tcp_rcv_synsent_state_process()
6625 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); in tcp_rcv_synsent_state_process()
6626 tp->rcv_wup = TCP_SKB_CB(skb)->seq + 1; in tcp_rcv_synsent_state_process()
6631 tp->snd_wnd = ntohs(th->window); in tcp_rcv_synsent_state_process()
6632 tp->snd_wl1 = TCP_SKB_CB(skb)->seq; in tcp_rcv_synsent_state_process()
6633 tp->max_window = tp->snd_wnd; in tcp_rcv_synsent_state_process()
6635 tcp_ecn_rcv_syn(tp, th); in tcp_rcv_synsent_state_process()
6664 tcp_clear_options(&tp->rx_opt); in tcp_rcv_synsent_state_process()
6665 tp->rx_opt.mss_clamp = saved_clamp; in tcp_rcv_synsent_state_process()
6670 tcp_clear_options(&tp->rx_opt); in tcp_rcv_synsent_state_process()
6671 tp->rx_opt.mss_clamp = saved_clamp; in tcp_rcv_synsent_state_process()
6678 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_synrecv_state_fastopen() local
6684 if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss && !tp->packets_out) in tcp_rcv_synrecv_state_fastopen()
6687 tcp_update_rto_time(tp); in tcp_rcv_synrecv_state_fastopen()
6702 req = rcu_dereference_protected(tp->fastopen_rsk, in tcp_rcv_synrecv_state_fastopen()
6727 struct tcp_sock *tp = tcp_sk(sk); in tcp_rcv_state_process() local
6768 tp->rx_opt.saw_tstamp = 0; in tcp_rcv_state_process()
6769 tcp_mstamp_refresh(tp); in tcp_rcv_state_process()
6781 tcp_mstamp_refresh(tp); in tcp_rcv_state_process()
6782 tp->rx_opt.saw_tstamp = 0; in tcp_rcv_state_process()
6783 req = rcu_dereference_protected(tp->fastopen_rsk, in tcp_rcv_state_process()
6826 tp->delivered++; /* SYN-ACK delivery isn't tracked in tcp_ack */ in tcp_rcv_state_process()
6827 if (!tp->srtt_us) in tcp_rcv_state_process()
6834 tp->retrans_stamp = 0; in tcp_rcv_state_process()
6837 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); in tcp_rcv_state_process()
6851 tp->snd_una = TCP_SKB_CB(skb)->ack_seq; in tcp_rcv_state_process()
6852 tp->snd_wnd = ntohs(th->window) << tp->rx_opt.snd_wscale; in tcp_rcv_state_process()
6853 tcp_init_wl(tp, TCP_SKB_CB(skb)->seq); in tcp_rcv_state_process()
6855 if (tp->rx_opt.tstamp_ok) in tcp_rcv_state_process()
6856 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; in tcp_rcv_state_process()
6862 tp->lsndtime = tcp_jiffies32; in tcp_rcv_state_process()
6865 tcp_fast_path_on(tp); in tcp_rcv_state_process()
6876 if (tp->snd_una != tp->write_seq) in tcp_rcv_state_process()
6890 if (READ_ONCE(tp->linger2) < 0) { in tcp_rcv_state_process()
6896 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { in tcp_rcv_state_process()
6898 if (tp->syn_fastopen && th->fin) in tcp_rcv_state_process()
6924 if (tp->snd_una == tp->write_seq) { in tcp_rcv_state_process()
6931 if (tp->snd_una == tp->write_seq) { in tcp_rcv_state_process()
6947 if (!before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { in tcp_rcv_state_process()
6964 after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { in tcp_rcv_state_process()
7154 struct tcp_sock *tp = tcp_sk(sk); in tcp_get_syncookie_mss() local
7169 mss = tcp_parse_mss_option(th, tp->rx_opt.user_mss); in tcp_get_syncookie_mss()
7183 struct tcp_sock *tp = tcp_sk(sk); in tcp_conn_request() local
7234 tmp_opt.user_mss = tp->rx_opt.user_mss; in tcp_conn_request()