Lines Matching refs:subflow

70 	struct mptcp_subflow_context *subflow;  in __mptcp_socket_create()  local
81 subflow = mptcp_subflow_ctx(ssock->sk); in __mptcp_socket_create()
82 list_add(&subflow->node, &msk->conn_list); in __mptcp_socket_create()
84 subflow->request_mptcp = 1; in __mptcp_socket_create()
85 subflow->subflow_id = msk->subflow_id++; in __mptcp_socket_create()
88 WRITE_ONCE(subflow->local_id, 0); in __mptcp_socket_create()
342 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in __mptcp_move_skb() local
364 MPTCP_SKB_CB(skb)->map_seq = mptcp_subflow_get_mapped_dsn(subflow); in __mptcp_move_skb()
484 static long mptcp_timeout_from_subflow(const struct mptcp_subflow_context *subflow) in mptcp_timeout_from_subflow() argument
486 const struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_timeout_from_subflow()
488 return inet_csk(ssk)->icsk_pending && !subflow->stale_count ? in mptcp_timeout_from_subflow()
494 struct mptcp_subflow_context *subflow; in mptcp_set_timeout() local
497 mptcp_for_each_subflow(mptcp_sk(sk), subflow) in mptcp_set_timeout()
498 tout = max(tout, mptcp_timeout_from_subflow(subflow)); in mptcp_set_timeout()
525 struct mptcp_subflow_context *subflow; in mptcp_send_ack() local
527 mptcp_for_each_subflow(msk, subflow) in mptcp_send_ack()
528 mptcp_subflow_send_ack(mptcp_subflow_tcp_sock(subflow)); in mptcp_send_ack()
557 struct mptcp_subflow_context *subflow; in mptcp_cleanup_rbuf() local
565 mptcp_for_each_subflow(msk, subflow) { in mptcp_cleanup_rbuf()
566 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_cleanup_rbuf()
639 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in __mptcp_move_skbs_from_subflow() local
667 map_remaining = subflow->map_data_len - in __mptcp_move_skbs_from_subflow()
668 mptcp_subflow_get_map_offset(subflow); in __mptcp_move_skbs_from_subflow()
687 subflow->map_data_len = skb->len; in __mptcp_move_skbs_from_subflow()
813 struct mptcp_subflow_context *subflow; in __mptcp_error_report() local
816 mptcp_for_each_subflow(msk, subflow) in __mptcp_error_report()
817 if (__mptcp_subflow_error_report(sk, mptcp_subflow_tcp_sock(subflow))) in __mptcp_error_report()
850 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_data_ready() local
858 if (unlikely(subflow->disposable)) in mptcp_data_ready()
907 struct mptcp_subflow_context *tmp, *subflow; in __mptcp_flush_join_list() local
910 list_for_each_entry_safe(subflow, tmp, join_list, node) { in __mptcp_flush_join_list()
911 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_flush_join_list()
914 list_move_tail(&subflow->node, &msk->conn_list); in __mptcp_flush_join_list()
954 struct mptcp_subflow_context *subflow; in mptcp_subflow_recv_lookup() local
958 mptcp_for_each_subflow(msk, subflow) { in mptcp_subflow_recv_lookup()
959 if (READ_ONCE(subflow->data_avail)) in mptcp_subflow_recv_lookup()
960 return mptcp_subflow_tcp_sock(subflow); in mptcp_subflow_recv_lookup()
1087 struct mptcp_subflow_context *subflow; in mptcp_enter_memory_pressure() local
1091 mptcp_for_each_subflow(msk, subflow) { in mptcp_enter_memory_pressure()
1092 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_enter_memory_pressure()
1395 void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow) in mptcp_subflow_set_active() argument
1397 if (!subflow->stale) in mptcp_subflow_set_active()
1400 subflow->stale = 0; in mptcp_subflow_set_active()
1401 MPTCP_INC_STATS(sock_net(mptcp_subflow_tcp_sock(subflow)), MPTCP_MIB_SUBFLOWRECOVER); in mptcp_subflow_set_active()
1404 bool mptcp_subflow_active(struct mptcp_subflow_context *subflow) in mptcp_subflow_active() argument
1406 if (unlikely(subflow->stale)) { in mptcp_subflow_active()
1407 u32 rcv_tstamp = READ_ONCE(tcp_sk(mptcp_subflow_tcp_sock(subflow))->rcv_tstamp); in mptcp_subflow_active()
1409 if (subflow->stale_rcv_tstamp == rcv_tstamp) in mptcp_subflow_active()
1412 mptcp_subflow_set_active(subflow); in mptcp_subflow_active()
1414 return __mptcp_subflow_active(subflow); in mptcp_subflow_active()
1428 struct mptcp_subflow_context *subflow; in mptcp_subflow_get_send() local
1442 mptcp_for_each_subflow(msk, subflow) { in mptcp_subflow_get_send()
1443 bool backup = subflow->backup || subflow->request_bkup; in mptcp_subflow_get_send()
1445 trace_mptcp_subflow_get_send(subflow); in mptcp_subflow_get_send()
1446 ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_subflow_get_send()
1447 if (!mptcp_subflow_active(subflow)) in mptcp_subflow_get_send()
1450 tout = max(tout, mptcp_timeout_from_subflow(subflow)); in mptcp_subflow_get_send()
1452 pace = subflow->avg_pacing_rate; in mptcp_subflow_get_send()
1455 subflow->avg_pacing_rate = READ_ONCE(ssk->sk_pacing_rate); in mptcp_subflow_get_send()
1456 pace = subflow->avg_pacing_rate; in mptcp_subflow_get_send()
1493 subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_get_send()
1494 subflow->avg_pacing_rate = div_u64((u64)subflow->avg_pacing_rate * wmem + in mptcp_subflow_get_send()
1598 struct mptcp_subflow_context *subflow; in __mptcp_push_pending() local
1606 mptcp_for_each_subflow(msk, subflow) { in __mptcp_push_pending()
1607 if (READ_ONCE(subflow->scheduled)) { in __mptcp_push_pending()
1608 mptcp_subflow_set_scheduled(subflow, false); in __mptcp_push_pending()
1611 ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_push_pending()
1664 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in __mptcp_subflow_push_pending() local
1671 mptcp_subflow_set_scheduled(subflow, false); in __mptcp_subflow_push_pending()
1683 if (READ_ONCE(subflow->scheduled)) { in __mptcp_subflow_push_pending()
1684 mptcp_subflow_set_scheduled(subflow, false); in __mptcp_subflow_push_pending()
1691 mptcp_for_each_subflow(msk, subflow) { in __mptcp_subflow_push_pending()
1692 if (READ_ONCE(subflow->scheduled)) { in __mptcp_subflow_push_pending()
1693 xmit_ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_subflow_push_pending()
1695 mptcp_subflow_delegate(subflow, in __mptcp_subflow_push_pending()
2004 struct mptcp_subflow_context *subflow; in mptcp_rcv_space_adjust() local
2028 mptcp_for_each_subflow(msk, subflow) { in mptcp_rcv_space_adjust()
2033 tp = tcp_sk(mptcp_subflow_tcp_sock(subflow)); in mptcp_rcv_space_adjust()
2077 mptcp_for_each_subflow(msk, subflow) { in mptcp_rcv_space_adjust()
2081 ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_rcv_space_adjust()
2337 struct mptcp_subflow_context *subflow; in mptcp_subflow_get_retrans() local
2340 mptcp_for_each_subflow(msk, subflow) { in mptcp_subflow_get_retrans()
2341 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_subflow_get_retrans()
2343 if (!__mptcp_subflow_active(subflow)) in mptcp_subflow_get_retrans()
2349 min_stale_count = min_t(int, min_stale_count, subflow->stale_count); in mptcp_subflow_get_retrans()
2353 if (subflow->backup || subflow->request_bkup) { in mptcp_subflow_get_retrans()
2416 struct mptcp_subflow_context *subflow, in __mptcp_subflow_disconnect() argument
2425 mptcp_subflow_ctx_reset(subflow); in __mptcp_subflow_disconnect()
2440 struct mptcp_subflow_context *subflow, in __mptcp_close_ssk() argument
2463 list_del(&subflow->node); in __mptcp_close_ssk()
2473 subflow->send_fastclose = 1; in __mptcp_close_ssk()
2478 __mptcp_subflow_disconnect(ssk, subflow, flags); in __mptcp_close_ssk()
2484 subflow->disposable = 1; in __mptcp_close_ssk()
2492 kfree_rcu(subflow, rcu); in __mptcp_close_ssk()
2533 struct mptcp_subflow_context *subflow) in mptcp_close_ssk() argument
2536 if (subflow->close_event_done) in mptcp_close_ssk()
2539 subflow->close_event_done = true; in mptcp_close_ssk()
2547 mptcp_pm_subflow_check_next(mptcp_sk(sk), subflow); in mptcp_close_ssk()
2549 __mptcp_close_ssk(sk, ssk, subflow, MPTCP_CF_PUSH); in mptcp_close_ssk()
2559 struct mptcp_subflow_context *subflow, *tmp; in __mptcp_close_subflow() local
2564 mptcp_for_each_subflow_safe(msk, subflow, tmp) { in __mptcp_close_subflow()
2565 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_close_subflow()
2577 mptcp_close_ssk(sk, ssk, subflow); in __mptcp_close_subflow()
2594 struct mptcp_subflow_context *subflow, *tmp; in mptcp_check_fastclose() local
2602 mptcp_for_each_subflow_safe(msk, subflow, tmp) { in mptcp_check_fastclose()
2603 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); in mptcp_check_fastclose()
2644 struct mptcp_subflow_context *subflow; in __mptcp_retrans() local
2676 mptcp_for_each_subflow(msk, subflow) { in __mptcp_retrans()
2677 if (READ_ONCE(subflow->scheduled)) { in __mptcp_retrans()
2680 mptcp_subflow_set_scheduled(subflow, false); in __mptcp_retrans()
2682 ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_retrans()
2760 struct mptcp_subflow_context *subflow, *tmp; in mptcp_do_fastclose() local
2764 mptcp_for_each_subflow_safe(msk, subflow, tmp) in mptcp_do_fastclose()
2765 __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), in mptcp_do_fastclose()
2766 subflow, MPTCP_CF_FASTCLOSE); in mptcp_do_fastclose()
2999 struct mptcp_subflow_context *subflow; in mptcp_check_send_data_fin() local
3015 mptcp_for_each_subflow(msk, subflow) { in mptcp_check_send_data_fin()
3016 struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); in mptcp_check_send_data_fin()
3094 struct mptcp_subflow_context *subflow; in __mptcp_close() local
3121 mptcp_for_each_subflow(msk, subflow) { in __mptcp_close()
3122 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in __mptcp_close()
3131 subflow->fail_tout = 0; in __mptcp_close()
3306 struct mptcp_subflow_context *subflow; in mptcp_sk_clone_init() local
3354 subflow = mptcp_subflow_ctx(ssk); in mptcp_sk_clone_init()
3355 list_add(&subflow->node, &msk->conn_list); in mptcp_sk_clone_init()
3372 __mptcp_subflow_fully_established(msk, subflow, mp_opt); in mptcp_sk_clone_init()
3398 struct mptcp_subflow_context *subflow, *tmp; in mptcp_destroy_common() local
3404 mptcp_for_each_subflow_safe(msk, subflow, tmp) in mptcp_destroy_common()
3405 __mptcp_close_ssk(sk, mptcp_subflow_tcp_sock(subflow), subflow, flags); in mptcp_destroy_common()
3540 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_subflow_process_delegated() local
3541 struct sock *sk = subflow->conn; in mptcp_subflow_process_delegated()
3590 struct mptcp_subflow_context *subflow; in mptcp_finish_connect() local
3594 subflow = mptcp_subflow_ctx(ssk); in mptcp_finish_connect()
3595 sk = subflow->conn; in mptcp_finish_connect()
3598 pr_debug("msk=%p, token=%u\n", sk, subflow->token); in mptcp_finish_connect()
3600 subflow->map_seq = subflow->iasn; in mptcp_finish_connect()
3601 subflow->map_subflow_seq = 1; in mptcp_finish_connect()
3606 WRITE_ONCE(msk->local_key, subflow->local_key); in mptcp_finish_connect()
3622 struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); in mptcp_finish_join() local
3623 struct mptcp_sock *msk = mptcp_sk(subflow->conn); in mptcp_finish_join()
3627 pr_debug("msk=%p, subflow=%p\n", msk, subflow); in mptcp_finish_join()
3631 subflow->reset_reason = MPTCP_RST_EMPTCP; in mptcp_finish_join()
3636 if (!list_empty(&subflow->node)) { in mptcp_finish_join()
3653 list_add_tail(&subflow->node, &msk->conn_list); in mptcp_finish_join()
3657 list_add_tail(&subflow->node, &msk->join_list); in mptcp_finish_join()
3664 subflow->reset_reason = MPTCP_RST_EPROHIBIT; in mptcp_finish_join()
3748 struct mptcp_subflow_context *subflow; in mptcp_connect() local
3758 subflow = mptcp_subflow_ctx(ssk); in mptcp_connect()
3764 mptcp_subflow_early_fallback(msk, subflow); in mptcp_connect()
3766 if (subflow->request_mptcp) { in mptcp_connect()
3769 mptcp_subflow_early_fallback(msk, subflow); in mptcp_connect()
3772 mptcp_subflow_early_fallback(msk, subflow); in mptcp_connect()
3776 WRITE_ONCE(msk->write_seq, subflow->idsn); in mptcp_connect()
3777 WRITE_ONCE(msk->snd_nxt, subflow->idsn); in mptcp_connect()
3778 WRITE_ONCE(msk->snd_una, subflow->idsn); in mptcp_connect()
3947 struct mptcp_subflow_context *subflow; in mptcp_stream_accept() local
3950 subflow = mptcp_subflow_ctx(newsk); in mptcp_stream_accept()
3951 new_mptcp_sock = subflow->conn; in mptcp_stream_accept()
3975 mptcp_for_each_subflow(msk, subflow) { in mptcp_stream_accept()
3976 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_stream_accept()
4107 struct mptcp_subflow_context *subflow; in mptcp_napi_poll() local
4111 while ((subflow = mptcp_subflow_delegated_next(delegated)) != NULL) { in mptcp_napi_poll()
4112 struct sock *ssk = mptcp_subflow_tcp_sock(subflow); in mptcp_napi_poll()
4116 mptcp_subflow_process_delegated(ssk, xchg(&subflow->delegated_status, 0)); in mptcp_napi_poll()
4124 clear_bit(MPTCP_DELEGATE_SCHEDULED, &subflow->delegated_status); in mptcp_napi_poll()