Lines Matching refs:tp
55 void tcp_mstamp_refresh(struct tcp_sock *tp) in tcp_mstamp_refresh() argument
59 tp->tcp_clock_cache = val; in tcp_mstamp_refresh()
60 tp->tcp_mstamp = div_u64(val, NSEC_PER_USEC); in tcp_mstamp_refresh()
70 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_new_data_sent() local
71 unsigned int prior_packets = tp->packets_out; in tcp_event_new_data_sent()
73 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(skb)->end_seq); in tcp_event_new_data_sent()
78 if (tp->highest_sack == NULL) in tcp_event_new_data_sent()
79 tp->highest_sack = skb; in tcp_event_new_data_sent()
81 tp->packets_out += tcp_skb_pcount(skb); in tcp_event_new_data_sent()
99 const struct tcp_sock *tp = tcp_sk(sk); in tcp_acceptable_seq() local
101 if (!before(tcp_wnd_end(tp), tp->snd_nxt) || in tcp_acceptable_seq()
102 (tp->rx_opt.wscale_ok && in tcp_acceptable_seq()
103 ((tp->snd_nxt - tcp_wnd_end(tp)) < (1 << tp->rx_opt.rcv_wscale)))) in tcp_acceptable_seq()
104 return tp->snd_nxt; in tcp_acceptable_seq()
106 return tcp_wnd_end(tp); in tcp_acceptable_seq()
125 struct tcp_sock *tp = tcp_sk(sk); in tcp_advertise_mss() local
127 int mss = tp->advmss; in tcp_advertise_mss()
134 tp->advmss = mss; in tcp_advertise_mss()
146 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_restart() local
147 u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk)); in tcp_cwnd_restart()
148 u32 cwnd = tcp_snd_cwnd(tp); in tcp_cwnd_restart()
152 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_restart()
157 tcp_snd_cwnd_set(tp, max(cwnd, restart_cwnd)); in tcp_cwnd_restart()
158 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_cwnd_restart()
159 tp->snd_cwnd_used = 0; in tcp_cwnd_restart()
163 static void tcp_event_data_sent(struct tcp_sock *tp, in tcp_event_data_sent() argument
169 if (tcp_packets_in_flight(tp) == 0) in tcp_event_data_sent()
172 tp->lsndtime = now; in tcp_event_data_sent()
184 struct tcp_sock *tp = tcp_sk(sk); in tcp_event_ack_sent() local
186 if (unlikely(tp->compressed_ack)) { in tcp_event_ack_sent()
188 tp->compressed_ack); in tcp_event_ack_sent()
189 tp->compressed_ack = 0; in tcp_event_ack_sent()
190 if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1) in tcp_event_ack_sent()
194 if (unlikely(rcv_nxt != tp->rcv_nxt)) in tcp_event_ack_sent()
262 struct tcp_sock *tp = tcp_sk(sk); in tcp_select_window() local
264 u32 old_win = tp->rcv_wnd; in tcp_select_window()
274 cur_win = tcp_receive_window(tp); in tcp_select_window()
284 if (!READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) || !tp->rx_opt.rcv_wscale) { in tcp_select_window()
288 new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale); in tcp_select_window()
292 tp->rcv_wnd = new_win; in tcp_select_window()
293 tp->rcv_wup = tp->rcv_nxt; in tcp_select_window()
298 if (!tp->rx_opt.rcv_wscale && in tcp_select_window()
302 new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); in tcp_select_window()
305 new_win >>= tp->rx_opt.rcv_wscale; in tcp_select_window()
309 tp->pred_flags = 0; in tcp_select_window()
322 const struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_synack() local
325 if (!(tp->ecn_flags & TCP_ECN_OK)) in tcp_ecn_send_synack()
335 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send_syn() local
347 tp->ecn_flags = 0; in tcp_ecn_send_syn()
351 tp->ecn_flags = TCP_ECN_OK; in tcp_ecn_send_syn()
379 struct tcp_sock *tp = tcp_sk(sk); in tcp_ecn_send() local
381 if (tp->ecn_flags & TCP_ECN_OK) { in tcp_ecn_send()
384 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { in tcp_ecn_send()
386 if (tp->ecn_flags & TCP_ECN_QUEUE_CWR) { in tcp_ecn_send()
387 tp->ecn_flags &= ~TCP_ECN_QUEUE_CWR; in tcp_ecn_send()
395 if (tp->ecn_flags & TCP_ECN_DEMAND_CWR) in tcp_ecn_send()
417 static inline bool tcp_urg_mode(const struct tcp_sock *tp) in tcp_urg_mode() argument
419 return tp->snd_una != tp->snd_up; in tcp_urg_mode()
460 struct tcp_sock *tp, in mptcp_options_write() argument
465 mptcp_write_options(th, ptr, tp, &opts->mptcp); in mptcp_options_write()
608 static __be32 *process_tcp_ao_options(struct tcp_sock *tp, in process_tcp_ao_options() argument
626 ao_info = rcu_dereference_check(tp->ao_info, in process_tcp_ao_options()
627 lockdep_sock_is_held(&tp->inet_conn.icsk_inet.sk)); in process_tcp_ao_options()
659 static void tcp_options_write(struct tcphdr *th, struct tcp_sock *tp, in tcp_options_write() argument
674 ptr = process_tcp_ao_options(tp, tcprsk, opts, key, ptr); in tcp_options_write()
714 struct tcp_sack_block *sp = tp->rx_opt.dsack ? in tcp_options_write()
715 tp->duplicate_sack : tp->selective_acks; in tcp_options_write()
730 tp->rx_opt.dsack = 0; in tcp_options_write()
759 mptcp_options_write(th, ptr, tp, opts); in tcp_options_write()
762 static void smc_set_option(const struct tcp_sock *tp, in smc_set_option() argument
768 if (tp->syn_smc) { in smc_set_option()
778 static void smc_set_option_cond(const struct tcp_sock *tp, in smc_set_option_cond() argument
785 if (tp->syn_smc && ireq->smc_ok) { in smc_set_option_cond()
818 struct tcp_sock *tp = tcp_sk(sk); in tcp_syn_options() local
820 struct tcp_fastopen_request *fastopen = tp->fastopen_req; in tcp_syn_options()
850 opts->tsval = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) + tp->tsoffset; in tcp_syn_options()
851 opts->tsecr = tp->rx_opt.ts_recent; in tcp_syn_options()
855 opts->ws = tp->rx_opt.rcv_wscale; in tcp_syn_options()
875 tp->syn_fastopen = 1; in tcp_syn_options()
876 tp->syn_fastopen_exp = fastopen->cookie.exp ? 1 : 0; in tcp_syn_options()
880 smc_set_option(tp, opts, &remaining); in tcp_syn_options()
977 struct tcp_sock *tp = tcp_sk(sk); in tcp_established_options() local
992 if (likely(tp->rx_opt.tstamp_ok)) { in tcp_established_options()
994 opts->tsval = skb ? tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb) + in tcp_established_options()
995 tp->tsoffset : 0; in tcp_established_options()
996 opts->tsecr = tp->rx_opt.ts_recent; in tcp_established_options()
1017 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack; in tcp_established_options()
1033 if (unlikely(BPF_SOCK_OPS_TEST_FLAG(tp, in tcp_established_options()
1071 struct tcp_sock *tp = tcp_sk(sk); in tcp_tsq_write() local
1073 if (tp->lost_out > tp->retrans_out && in tcp_tsq_write()
1074 tcp_snd_cwnd(tp) > tcp_packets_in_flight(tp)) { in tcp_tsq_write()
1075 tcp_mstamp_refresh(tp); in tcp_tsq_write()
1079 tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle, in tcp_tsq_write()
1105 struct tcp_sock *tp; in tcp_tasklet_func() local
1113 tp = list_entry(q, struct tcp_sock, tsq_node); in tcp_tasklet_func()
1114 list_del(&tp->tsq_node); in tcp_tasklet_func()
1116 sk = (struct sock *)tp; in tcp_tasklet_func()
1191 struct tcp_sock *tp = tcp_sk(sk); in tcp_wfree() local
1223 list_add(&tp->tsq_node, &tsq->head); in tcp_wfree()
1237 struct tcp_sock *tp = container_of(timer, struct tcp_sock, pacing_timer); in tcp_pace_kick() local
1238 struct sock *sk = (struct sock *)tp; in tcp_pace_kick()
1249 struct tcp_sock *tp = tcp_sk(sk); in tcp_update_skb_after_send() local
1258 if (rate != ~0UL && rate && tp->data_segs_out >= 10) { in tcp_update_skb_after_send()
1260 u64 credit = tp->tcp_wstamp_ns - prior_wstamp; in tcp_update_skb_after_send()
1264 tp->tcp_wstamp_ns += len_ns; in tcp_update_skb_after_send()
1267 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); in tcp_update_skb_after_send()
1290 struct tcp_sock *tp; in __tcp_transmit_skb() local
1301 tp = tcp_sk(sk); in __tcp_transmit_skb()
1302 prior_wstamp = tp->tcp_wstamp_ns; in __tcp_transmit_skb()
1303 tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache); in __tcp_transmit_skb()
1304 skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC); in __tcp_transmit_skb()
1391 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) { in __tcp_transmit_skb()
1392 if (before(tp->snd_up, tcb->seq + 0x10000)) { in __tcp_transmit_skb()
1393 th->urg_ptr = htons(tp->snd_up - tcb->seq); in __tcp_transmit_skb()
1395 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) { in __tcp_transmit_skb()
1409 th->window = htons(min(tp->rcv_wnd, 65535U)); in __tcp_transmit_skb()
1412 tcp_options_write(th, tp, NULL, &opts, &key); in __tcp_transmit_skb()
1418 tp->af_specific->calc_md5_hash(opts.hash_location, in __tcp_transmit_skb()
1443 tcp_event_data_sent(tp, sk); in __tcp_transmit_skb()
1444 tp->data_segs_out += tcp_skb_pcount(skb); in __tcp_transmit_skb()
1445 tp->bytes_sent += skb->len - tcp_header_size; in __tcp_transmit_skb()
1448 if (after(tcb->end_seq, tp->snd_nxt) || tcb->seq == tcb->end_seq) in __tcp_transmit_skb()
1452 tp->segs_out += tcp_skb_pcount(skb); in __tcp_transmit_skb()
1464 tcp_add_tx_delay(skb, tp); in __tcp_transmit_skb()
1495 struct tcp_sock *tp = tcp_sk(sk); in tcp_queue_skb() local
1498 WRITE_ONCE(tp->write_seq, TCP_SKB_CB(skb)->end_seq); in tcp_queue_skb()
1529 struct tcp_sock *tp = tcp_sk(sk); in tcp_adjust_pcount() local
1531 tp->packets_out -= decr; in tcp_adjust_pcount()
1534 tp->sacked_out -= decr; in tcp_adjust_pcount()
1536 tp->retrans_out -= decr; in tcp_adjust_pcount()
1538 tp->lost_out -= decr; in tcp_adjust_pcount()
1541 if (tcp_is_reno(tp) && decr > 0) in tcp_adjust_pcount()
1542 tp->sacked_out -= min_t(u32, tp->sacked_out, decr); in tcp_adjust_pcount()
1544 if (tp->lost_skb_hint && in tcp_adjust_pcount()
1545 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && in tcp_adjust_pcount()
1547 tp->lost_cnt_hint -= decr; in tcp_adjust_pcount()
1549 tcp_verify_left_out(tp); in tcp_adjust_pcount()
1602 struct tcp_sock *tp = tcp_sk(sk); in tcp_fragment() local
1673 if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { in tcp_fragment()
1752 const struct tcp_sock *tp = tcp_sk(sk); in __tcp_mtu_to_mss() local
1762 if (mss_now > tp->rx_opt.mss_clamp) in __tcp_mtu_to_mss()
1763 mss_now = tp->rx_opt.mss_clamp; in __tcp_mtu_to_mss()
1786 const struct tcp_sock *tp = tcp_sk(sk); in tcp_mss_to_mtu() local
1790 tp->tcp_header_len + in tcp_mss_to_mtu()
1799 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtup_init() local
1804 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + in tcp_mtup_init()
1837 struct tcp_sock *tp = tcp_sk(sk); in tcp_sync_mss() local
1845 mss_now = tcp_bound_to_half_wnd(tp, mss_now); in tcp_sync_mss()
1851 tp->mss_cache = mss_now; in tcp_sync_mss()
1862 const struct tcp_sock *tp = tcp_sk(sk); in tcp_current_mss() local
1869 mss_now = tp->mss_cache; in tcp_current_mss()
1883 if (header_len != tp->tcp_header_len) { in tcp_current_mss()
1884 int delta = (int) header_len - tp->tcp_header_len; in tcp_current_mss()
1897 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_application_limited() local
1902 u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk)); in tcp_cwnd_application_limited()
1903 u32 win_used = max(tp->snd_cwnd_used, init_win); in tcp_cwnd_application_limited()
1904 if (win_used < tcp_snd_cwnd(tp)) { in tcp_cwnd_application_limited()
1905 tp->snd_ssthresh = tcp_current_ssthresh(sk); in tcp_cwnd_application_limited()
1906 tcp_snd_cwnd_set(tp, (tcp_snd_cwnd(tp) + win_used) >> 1); in tcp_cwnd_application_limited()
1908 tp->snd_cwnd_used = 0; in tcp_cwnd_application_limited()
1910 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_cwnd_application_limited()
1916 struct tcp_sock *tp = tcp_sk(sk); in tcp_cwnd_validate() local
1925 if (!before(tp->snd_una, tp->cwnd_usage_seq) || in tcp_cwnd_validate()
1927 (!tp->is_cwnd_limited && in tcp_cwnd_validate()
1928 tp->packets_out > tp->max_packets_out)) { in tcp_cwnd_validate()
1929 tp->is_cwnd_limited = is_cwnd_limited; in tcp_cwnd_validate()
1930 tp->max_packets_out = tp->packets_out; in tcp_cwnd_validate()
1931 tp->cwnd_usage_seq = tp->snd_nxt; in tcp_cwnd_validate()
1936 tp->snd_cwnd_used = 0; in tcp_cwnd_validate()
1937 tp->snd_cwnd_stamp = tcp_jiffies32; in tcp_cwnd_validate()
1940 if (tp->packets_out > tp->snd_cwnd_used) in tcp_cwnd_validate()
1941 tp->snd_cwnd_used = tp->packets_out; in tcp_cwnd_validate()
1944 (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto && in tcp_cwnd_validate()
1963 static bool tcp_minshall_check(const struct tcp_sock *tp) in tcp_minshall_check() argument
1965 return after(tp->snd_sml, tp->snd_una) && in tcp_minshall_check()
1966 !after(tp->snd_sml, tp->snd_nxt); in tcp_minshall_check()
1977 static void tcp_minshall_update(struct tcp_sock *tp, unsigned int mss_now, in tcp_minshall_update() argument
1981 tp->snd_sml = TCP_SKB_CB(skb)->end_seq; in tcp_minshall_update()
1991 static bool tcp_nagle_check(bool partial, const struct tcp_sock *tp, in tcp_nagle_check() argument
1996 (!nonagle && tp->packets_out && tcp_minshall_check(tp))); in tcp_nagle_check()
2053 const struct tcp_sock *tp = tcp_sk(sk); in tcp_mss_split_point() local
2056 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_mss_split_point()
2072 if (tcp_nagle_check(partial != 0, tp, nonagle)) in tcp_mss_split_point()
2081 static u32 tcp_cwnd_test(const struct tcp_sock *tp) in tcp_cwnd_test() argument
2085 in_flight = tcp_packets_in_flight(tp); in tcp_cwnd_test()
2086 cwnd = tcp_snd_cwnd(tp); in tcp_cwnd_test()
2115 static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, in tcp_nagle_test() argument
2128 if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) in tcp_nagle_test()
2131 if (!tcp_nagle_check(skb->len < cur_mss, tp, nonagle)) in tcp_nagle_test()
2138 static bool tcp_snd_wnd_test(const struct tcp_sock *tp, in tcp_snd_wnd_test() argument
2147 return !after(end_seq, tcp_wnd_end(tp)); in tcp_snd_wnd_test()
2216 struct tcp_sock *tp = tcp_sk(sk); in tcp_tso_should_defer() local
2229 delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC; in tcp_tso_should_defer()
2233 in_flight = tcp_packets_in_flight(tp); in tcp_tso_should_defer()
2236 BUG_ON(tcp_snd_cwnd(tp) <= in_flight); in tcp_tso_should_defer()
2238 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_tso_should_defer()
2241 cong_win = (tcp_snd_cwnd(tp) - in_flight) * tp->mss_cache; in tcp_tso_should_defer()
2246 if (limit >= max_segs * tp->mss_cache) in tcp_tso_should_defer()
2255 u32 chunk = min(tp->snd_wnd, tcp_snd_cwnd(tp) * tp->mss_cache); in tcp_tso_should_defer()
2269 if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) in tcp_tso_should_defer()
2277 delta = tp->tcp_clock_cache - head->tstamp; in tcp_tso_should_defer()
2279 if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0) in tcp_tso_should_defer()
2314 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtu_check_reprobe() local
2326 icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + in tcp_mtu_check_reprobe()
2432 struct tcp_sock *tp = tcp_sk(sk); in tcp_mtu_probe() local
2449 tcp_snd_cwnd(tp) < 11 || in tcp_mtu_probe()
2450 tp->rx_opt.num_sacks || tp->rx_opt.dsack)) in tcp_mtu_probe()
2460 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; in tcp_mtu_probe()
2476 if (tp->write_seq - tp->snd_nxt < size_needed) in tcp_mtu_probe()
2479 if (tp->snd_wnd < size_needed) in tcp_mtu_probe()
2481 if (after(tp->snd_nxt + size_needed, tcp_wnd_end(tp))) in tcp_mtu_probe()
2485 if (tcp_packets_in_flight(tp) + 2 > tcp_snd_cwnd(tp)) { in tcp_mtu_probe()
2486 if (!tcp_packets_in_flight(tp)) in tcp_mtu_probe()
2547 tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1); in tcp_mtu_probe()
2551 tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; in tcp_mtu_probe()
2552 tp->mtu_probe.probe_seq_end = TCP_SKB_CB(nskb)->end_seq; in tcp_mtu_probe()
2562 struct tcp_sock *tp = tcp_sk(sk); in tcp_pacing_check() local
2567 if (tp->tcp_wstamp_ns <= tp->tcp_clock_cache) in tcp_pacing_check()
2570 if (!hrtimer_is_queued(&tp->pacing_timer)) { in tcp_pacing_check()
2571 hrtimer_start(&tp->pacing_timer, in tcp_pacing_check()
2572 ns_to_ktime(tp->tcp_wstamp_ns), in tcp_pacing_check()
2649 static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new) in tcp_chrono_set() argument
2652 enum tcp_chrono old = tp->chrono_type; in tcp_chrono_set()
2655 tp->chrono_stat[old - 1] += now - tp->chrono_start; in tcp_chrono_set()
2656 tp->chrono_start = now; in tcp_chrono_set()
2657 tp->chrono_type = new; in tcp_chrono_set()
2662 struct tcp_sock *tp = tcp_sk(sk); in tcp_chrono_start() local
2669 if (type > tp->chrono_type) in tcp_chrono_start()
2670 tcp_chrono_set(tp, type); in tcp_chrono_start()
2675 struct tcp_sock *tp = tcp_sk(sk); in tcp_chrono_stop() local
2686 tcp_chrono_set(tp, TCP_CHRONO_UNSPEC); in tcp_chrono_stop()
2687 else if (type == tp->chrono_type) in tcp_chrono_stop()
2688 tcp_chrono_set(tp, TCP_CHRONO_BUSY); in tcp_chrono_stop()
2737 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_xmit() local
2746 tcp_mstamp_refresh(tp); in tcp_write_xmit()
2762 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { in tcp_write_xmit()
2764 tp->tcp_wstamp_ns = tp->tcp_clock_cache; in tcp_write_xmit()
2765 skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC); in tcp_write_xmit()
2766 list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); in tcp_write_xmit()
2774 cwnd_quota = tcp_cwnd_test(tp); in tcp_write_xmit()
2789 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) { in tcp_write_xmit()
2795 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, in tcp_write_xmit()
2807 if (tso_segs > 1 && !tcp_urg_mode(tp)) in tcp_write_xmit()
2836 tcp_minshall_update(tp, mss_now, skb); in tcp_write_xmit()
2848 is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp)); in tcp_write_xmit()
2854 tp->prr_out += sent_pkts; in tcp_write_xmit()
2861 return !tp->packets_out && !tcp_write_queue_empty(sk); in tcp_write_xmit()
2867 struct tcp_sock *tp = tcp_sk(sk); in tcp_schedule_loss_probe() local
2874 if (rcu_access_pointer(tp->fastopen_rsk)) in tcp_schedule_loss_probe()
2882 !tp->packets_out || !tcp_is_sack(tp) || in tcp_schedule_loss_probe()
2891 if (tp->srtt_us) { in tcp_schedule_loss_probe()
2892 timeout_us = tp->srtt_us >> 2; in tcp_schedule_loss_probe()
2893 if (tp->packets_out == 1) in tcp_schedule_loss_probe()
2937 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_loss_probe() local
2943 if (tp->tlp_high_seq) in tcp_send_loss_probe()
2946 tp->tlp_retrans = 0; in tcp_send_loss_probe()
2948 if (skb && tcp_snd_wnd_test(tp, skb, mss)) { in tcp_send_loss_probe()
2949 pcount = tp->packets_out; in tcp_send_loss_probe()
2951 if (tp->packets_out > pcount) in tcp_send_loss_probe()
2957 WARN_ONCE(tp->packets_out, in tcp_send_loss_probe()
2959 tp->packets_out, sk->sk_state, tcp_snd_cwnd(tp), mss); in tcp_send_loss_probe()
2985 tp->tlp_retrans = 1; in tcp_send_loss_probe()
2989 tp->tlp_high_seq = tp->snd_nxt; in tcp_send_loss_probe()
3084 struct tcp_sock *tp = tcp_sk(sk); in __tcp_select_window() local
3100 full_space = min_t(int, tp->window_clamp, allowed_space); in __tcp_select_window()
3111 if (READ_ONCE(net->ipv4.sysctl_tcp_shrink_window) && tp->rx_opt.rcv_wscale) in __tcp_select_window()
3125 free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); in __tcp_select_window()
3138 if (free_space > tp->rcv_ssthresh) in __tcp_select_window()
3139 free_space = tp->rcv_ssthresh; in __tcp_select_window()
3144 if (tp->rx_opt.rcv_wscale) { in __tcp_select_window()
3151 window = ALIGN(window, (1 << tp->rx_opt.rcv_wscale)); in __tcp_select_window()
3153 window = tp->rcv_wnd; in __tcp_select_window()
3173 free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale); in __tcp_select_window()
3183 free_space < (1 << tp->rx_opt.rcv_wscale)) in __tcp_select_window()
3187 if (free_space > tp->rcv_ssthresh) { in __tcp_select_window()
3188 free_space = tp->rcv_ssthresh; in __tcp_select_window()
3196 free_space = ALIGN(free_space, (1 << tp->rx_opt.rcv_wscale)); in __tcp_select_window()
3220 struct tcp_sock *tp = tcp_sk(sk); in tcp_collapse_retrans() local
3246 tcp_clear_retrans_hints_partial(tp); in tcp_collapse_retrans()
3247 if (next_skb == tp->retransmit_skb_hint) in tcp_collapse_retrans()
3248 tp->retransmit_skb_hint = skb; in tcp_collapse_retrans()
3280 struct tcp_sock *tp = tcp_sk(sk); in tcp_retrans_try_collapse() local
3306 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) in tcp_retrans_try_collapse()
3321 struct tcp_sock *tp = tcp_sk(sk); in __tcp_retransmit_skb() local
3334 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { in __tcp_retransmit_skb()
3340 if (unlikely(before(TCP_SKB_CB(skb)->end_seq, tp->snd_una))) { in __tcp_retransmit_skb()
3344 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in __tcp_retransmit_skb()
3352 avail_wnd = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in __tcp_retransmit_skb()
3360 if (TCP_SKB_CB(skb)->seq != tp->snd_una) in __tcp_retransmit_skb()
3398 tp->total_retrans += segs; in __tcp_retransmit_skb()
3399 tp->bytes_retrans += skb->len; in __tcp_retransmit_skb()
3420 tcp_update_skb_after_send(sk, skb, tp->tcp_wstamp_ns); in __tcp_retransmit_skb()
3427 if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RETRANS_CB_FLAG)) in __tcp_retransmit_skb()
3447 struct tcp_sock *tp = tcp_sk(sk); in tcp_retransmit_skb() local
3457 tp->retrans_out += tcp_skb_pcount(skb); in tcp_retransmit_skb()
3461 if (!tp->retrans_stamp) in tcp_retransmit_skb()
3462 tp->retrans_stamp = tcp_skb_timestamp_ts(tp->tcp_usec_ts, skb); in tcp_retransmit_skb()
3464 if (tp->undo_retrans < 0) in tcp_retransmit_skb()
3465 tp->undo_retrans = 0; in tcp_retransmit_skb()
3466 tp->undo_retrans += tcp_skb_pcount(skb); in tcp_retransmit_skb()
3479 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_retransmit_queue() local
3484 if (!tp->packets_out) in tcp_xmit_retransmit_queue()
3488 skb = tp->retransmit_skb_hint ?: rtx_head; in tcp_xmit_retransmit_queue()
3499 tp->retransmit_skb_hint = skb; in tcp_xmit_retransmit_queue()
3501 segs = tcp_snd_cwnd(tp) - tcp_packets_in_flight(tp); in tcp_xmit_retransmit_queue()
3510 if (tp->retrans_out >= tp->lost_out) { in tcp_xmit_retransmit_queue()
3536 tp->prr_out += tcp_skb_pcount(skb); in tcp_xmit_retransmit_queue()
3578 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_fin() local
3592 tp->write_seq++; in tcp_send_fin()
3600 WRITE_ONCE(tp->snd_nxt, tp->snd_nxt + 1); in tcp_send_fin()
3614 tcp_init_nondata_skb(skb, tp->write_seq, in tcp_send_fin()
3712 const struct tcp_sock *tp = tcp_sk(sk); in tcp_make_synack() local
3748 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); in tcp_make_synack()
3849 tcp_add_tx_delay(skb, tp); in tcp_make_synack()
3878 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect_init() local
3885 tp->tcp_header_len = sizeof(struct tcphdr); in tcp_connect_init()
3887 tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED; in tcp_connect_init()
3892 if (tp->rx_opt.user_mss) in tcp_connect_init()
3893 tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; in tcp_connect_init()
3894 tp->max_window = 0; in tcp_connect_init()
3900 if (!tp->window_clamp) in tcp_connect_init()
3901 WRITE_ONCE(tp->window_clamp, dst_metric(dst, RTAX_WINDOW)); in tcp_connect_init()
3902 tp->advmss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); in tcp_connect_init()
3908 (tp->window_clamp > tcp_full_space(sk) || tp->window_clamp == 0)) in tcp_connect_init()
3909 WRITE_ONCE(tp->window_clamp, tcp_full_space(sk)); in tcp_connect_init()
3916 tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), in tcp_connect_init()
3917 &tp->rcv_wnd, in tcp_connect_init()
3918 &tp->window_clamp, in tcp_connect_init()
3923 tp->rx_opt.rcv_wscale = rcv_wscale; in tcp_connect_init()
3924 tp->rcv_ssthresh = tp->rcv_wnd; in tcp_connect_init()
3928 tp->snd_wnd = 0; in tcp_connect_init()
3929 tcp_init_wl(tp, 0); in tcp_connect_init()
3931 tp->snd_una = tp->write_seq; in tcp_connect_init()
3932 tp->snd_sml = tp->write_seq; in tcp_connect_init()
3933 tp->snd_up = tp->write_seq; in tcp_connect_init()
3934 WRITE_ONCE(tp->snd_nxt, tp->write_seq); in tcp_connect_init()
3936 if (likely(!tp->repair)) in tcp_connect_init()
3937 tp->rcv_nxt = 0; in tcp_connect_init()
3939 tp->rcv_tstamp = tcp_jiffies32; in tcp_connect_init()
3940 tp->rcv_wup = tp->rcv_nxt; in tcp_connect_init()
3941 WRITE_ONCE(tp->copied_seq, tp->rcv_nxt); in tcp_connect_init()
3945 tcp_clear_retrans(tp); in tcp_connect_init()
3950 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect_queue_skb() local
3957 WRITE_ONCE(tp->write_seq, tcb->end_seq); in tcp_connect_queue_skb()
3958 tp->packets_out += tcp_skb_pcount(skb); in tcp_connect_queue_skb()
3971 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_syn_data() local
3972 struct tcp_fastopen_request *fo = tp->fastopen_req; in tcp_send_syn_data()
3977 tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */ in tcp_send_syn_data()
3978 if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie)) in tcp_send_syn_data()
3985 tp->rx_opt.mss_clamp = tcp_mss_clamp(tp, tp->rx_opt.mss_clamp); in tcp_send_syn_data()
4042 tp->syn_data = (fo->copied > 0); in tcp_send_syn_data()
4050 tp->packets_out -= tcp_skb_pcount(syn_data); in tcp_send_syn_data()
4058 tp->syn_fastopen = 0; in tcp_send_syn_data()
4067 struct tcp_sock *tp = tcp_sk(sk); in tcp_connect() local
4078 if (unlikely(rcu_dereference_protected(tp->md5sig_info, in tcp_connect()
4080 bool needs_ao = !!tp->af_specific->ao_lookup(sk, sk, -1, -1); in tcp_connect()
4081 bool needs_md5 = !!tp->af_specific->md5_lookup(sk, sk); in tcp_connect()
4084 ao_info = rcu_dereference_check(tp->ao_info, in tcp_connect()
4104 kfree(rcu_replace_pointer(tp->md5sig_info, NULL, in tcp_connect()
4110 if (unlikely(rcu_dereference_protected(tp->ao_info, in tcp_connect()
4115 if (!tp->af_specific->ao_lookup(sk, sk, -1, -1)) in tcp_connect()
4125 if (unlikely(tp->repair)) { in tcp_connect()
4134 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); in tcp_connect()
4135 tcp_mstamp_refresh(tp); in tcp_connect()
4136 tp->retrans_stamp = tcp_time_stamp_ts(tp); in tcp_connect()
4142 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : in tcp_connect()
4150 WRITE_ONCE(tp->snd_nxt, tp->write_seq); in tcp_connect()
4151 tp->pushed_seq = tp->write_seq; in tcp_connect()
4154 WRITE_ONCE(tp->snd_nxt, TCP_SKB_CB(buff)->seq); in tcp_connect()
4155 tp->pushed_seq = TCP_SKB_CB(buff)->seq; in tcp_connect()
4184 const struct tcp_sock *tp = tcp_sk(sk); in tcp_send_delayed_ack() local
4197 if (tp->srtt_us) { in tcp_send_delayed_ack()
4198 int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3), in tcp_send_delayed_ack()
4290 struct tcp_sock *tp = tcp_sk(sk); in tcp_xmit_probe_skb() local
4305 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); in tcp_xmit_probe_skb()
4323 struct tcp_sock *tp = tcp_sk(sk); in tcp_write_wakeup() local
4330 if (skb && before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { in tcp_write_wakeup()
4333 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_write_wakeup()
4335 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) in tcp_write_wakeup()
4336 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; in tcp_write_wakeup()
4358 if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) in tcp_write_wakeup()
4370 struct tcp_sock *tp = tcp_sk(sk); in tcp_send_probe0() local
4377 if (tp->packets_out || tcp_write_queue_empty(sk)) { in tcp_send_probe0()