Lines Matching +full:t +full:- +full:head
1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (c) 1999-2000 Cisco, Inc.
5 * Copyright (c) 1999-2001 Motorola, Inc.
13 * lksctp developers <linux-sctp@vger.kernel.org>
60 packet->size = packet->overhead; in sctp_packet_reset()
62 packet->has_cookie_echo = 0; in sctp_packet_reset()
63 packet->has_sack = 0; in sctp_packet_reset()
64 packet->has_data = 0; in sctp_packet_reset()
65 packet->has_auth = 0; in sctp_packet_reset()
66 packet->ipfragok = 0; in sctp_packet_reset()
67 packet->auth = NULL; in sctp_packet_reset()
76 struct sctp_transport *tp = packet->transport; in sctp_packet_config()
77 struct sctp_association *asoc = tp->asoc; in sctp_packet_config()
82 packet->vtag = vtag; in sctp_packet_config()
89 packet->max_size = tp->pathmtu; in sctp_packet_config()
92 sk = asoc->base.sk; in sctp_packet_config()
95 packet->overhead = sctp_mtu_payload(sp, 0, 0); in sctp_packet_config()
96 packet->size = packet->overhead; in sctp_packet_config()
104 if (asoc->param_flags & SPP_PMTUD_ENABLE) in sctp_packet_config()
107 asoc->param_flags & SPP_PMTUD_ENABLE) { in sctp_packet_config()
112 if (asoc->pmtu_pending) { in sctp_packet_config()
113 if (asoc->param_flags & SPP_PMTUD_ENABLE) in sctp_packet_config()
115 asoc->pmtu_pending = 0; in sctp_packet_config()
128 if (!tp->dst) in sctp_packet_config()
133 if (__sk_dst_get(sk) != tp->dst) { in sctp_packet_config()
134 dst_hold(tp->dst); in sctp_packet_config()
135 sk_setup_caps(sk, tp->dst); in sctp_packet_config()
137 packet->max_size = sk_can_gso(sk) ? min(READ_ONCE(tp->dst->dev->gso_max_size), in sctp_packet_config()
139 : asoc->pathmtu; in sctp_packet_config()
150 packet->transport = transport; in sctp_packet_init()
151 packet->source_port = sport; in sctp_packet_init()
152 packet->destination_port = dport; in sctp_packet_init()
153 INIT_LIST_HEAD(&packet->chunk_list); in sctp_packet_init()
155 packet->overhead = 0; in sctp_packet_init()
157 packet->vtag = 0; in sctp_packet_init()
167 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { in sctp_packet_free()
168 list_del_init(&chunk->list); in sctp_packet_free()
187 packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1); in sctp_packet_transmit_chunk()
191 if (!packet->has_cookie_echo) { in sctp_packet_transmit_chunk()
196 chunk->skb->sk->sk_err = -error; in sctp_packet_transmit_chunk()
219 struct sctp_transport *t = pkt->transport; in sctp_packet_bundle_pad() local
223 if (!chunk->pmtu_probe) in sctp_packet_bundle_pad()
229 pad = sctp_make_pad(t->asoc, t->pl.probe_size - overhead); in sctp_packet_bundle_pad()
233 list_add_tail(&pad->list, &pkt->chunk_list); in sctp_packet_bundle_pad()
234 pkt->size += SCTP_PAD4(ntohs(pad->chunk_hdr->length)); in sctp_packet_bundle_pad()
235 chunk->transport = t; in sctp_packet_bundle_pad()
244 struct sctp_association *asoc = pkt->transport->asoc; in sctp_packet_bundle_auth()
248 /* if we don't have an association, we can't do authentication */ in sctp_packet_bundle_auth()
255 if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth) in sctp_packet_bundle_auth()
259 * don't do it in sctp_packet_bundle_auth()
261 if (!chunk->auth) in sctp_packet_bundle_auth()
264 auth = sctp_make_auth(asoc, chunk->shkey->key_id); in sctp_packet_bundle_auth()
268 auth->shkey = chunk->shkey; in sctp_packet_bundle_auth()
269 sctp_auth_shkey_hold(auth->shkey); in sctp_packet_bundle_auth()
285 /* If sending DATA and haven't aleady bundled a SACK, try to in sctp_packet_bundle_sack()
288 if (sctp_chunk_is_data(chunk) && !pkt->has_sack && in sctp_packet_bundle_sack()
289 !pkt->has_cookie_echo) { in sctp_packet_bundle_sack()
292 asoc = pkt->transport->asoc; in sctp_packet_bundle_sack()
293 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; in sctp_packet_bundle_sack()
299 if (pkt->transport->sack_generation != in sctp_packet_bundle_sack()
300 pkt->transport->asoc->peer.sack_generation) in sctp_packet_bundle_sack()
303 asoc->a_rwnd = asoc->rwnd; in sctp_packet_bundle_sack()
311 SCTP_INC_STATS(asoc->base.net, in sctp_packet_bundle_sack()
313 asoc->stats.octrlchunks++; in sctp_packet_bundle_sack()
314 asoc->peer.sack_needed = 0; in sctp_packet_bundle_sack()
331 __u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length)); in __sctp_packet_append_chunk()
340 switch (chunk->chunk_hdr->type) { in __sctp_packet_append_chunk()
346 packet->has_sack = 1; in __sctp_packet_append_chunk()
348 packet->has_auth = 1; in __sctp_packet_append_chunk()
350 packet->has_data = 1; in __sctp_packet_append_chunk()
352 chunk->sent_at = jiffies; in __sctp_packet_append_chunk()
354 chunk->sent_count++; in __sctp_packet_append_chunk()
357 packet->has_cookie_echo = 1; in __sctp_packet_append_chunk()
361 packet->has_sack = 1; in __sctp_packet_append_chunk()
362 if (chunk->asoc) in __sctp_packet_append_chunk()
363 chunk->asoc->stats.osacks++; in __sctp_packet_append_chunk()
367 packet->has_auth = 1; in __sctp_packet_append_chunk()
368 packet->auth = chunk; in __sctp_packet_append_chunk()
373 list_add_tail(&chunk->list, &packet->chunk_list); in __sctp_packet_append_chunk()
374 packet->size += chunk_len; in __sctp_packet_append_chunk()
375 chunk->transport = packet->transport; in __sctp_packet_append_chunk()
420 static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb) in sctp_packet_gso_append() argument
422 if (SCTP_OUTPUT_CB(head)->last == head) in sctp_packet_gso_append()
423 skb_shinfo(head)->frag_list = skb; in sctp_packet_gso_append()
425 SCTP_OUTPUT_CB(head)->last->next = skb; in sctp_packet_gso_append()
426 SCTP_OUTPUT_CB(head)->last = skb; in sctp_packet_gso_append()
428 head->truesize += skb->truesize; in sctp_packet_gso_append()
429 head->data_len += skb->len; in sctp_packet_gso_append()
430 head->len += skb->len; in sctp_packet_gso_append()
431 refcount_add(skb->truesize, &head->sk->sk_wmem_alloc); in sctp_packet_gso_append()
437 struct sk_buff *head, int gso, gfp_t gfp) in sctp_packet_pack() argument
439 struct sctp_transport *tp = packet->transport; in sctp_packet_pack()
443 struct sock *sk = head->sk; in sctp_packet_pack()
448 skb_shinfo(head)->gso_type = sk->sk_gso_type; in sctp_packet_pack()
449 SCTP_OUTPUT_CB(head)->last = head; in sctp_packet_pack()
451 nskb = head; in sctp_packet_pack()
452 pkt_size = packet->size; in sctp_packet_pack()
458 pkt_size = packet->overhead; in sctp_packet_pack()
459 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, in sctp_packet_pack()
461 int padded = SCTP_PAD4(chunk->skb->len); in sctp_packet_pack()
463 if (chunk == packet->auth) in sctp_packet_pack()
465 else if (auth_len + padded + packet->overhead > in sctp_packet_pack()
466 tp->pathmtu) in sctp_packet_pack()
468 else if (pkt_size + padded > tp->pathmtu) in sctp_packet_pack()
475 skb_reserve(nskb, packet->overhead + MAX_HEADER); in sctp_packet_pack()
478 /* merge chunks into nskb and append nskb into head list */ in sctp_packet_pack()
479 pkt_size -= packet->overhead; in sctp_packet_pack()
480 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { in sctp_packet_pack()
483 list_del_init(&chunk->list); in sctp_packet_pack()
486 !tp->rto_pending) { in sctp_packet_pack()
487 chunk->rtt_in_progress = 1; in sctp_packet_pack()
488 tp->rto_pending = 1; in sctp_packet_pack()
492 padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len; in sctp_packet_pack()
494 skb_put_zero(chunk->skb, padding); in sctp_packet_pack()
496 if (chunk == packet->auth) in sctp_packet_pack()
500 skb_put_data(nskb, chunk->skb->data, chunk->skb->len); in sctp_packet_pack()
502 pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n", in sctp_packet_pack()
504 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)), in sctp_packet_pack()
505 chunk->has_tsn ? "TSN" : "No TSN", in sctp_packet_pack()
506 chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0, in sctp_packet_pack()
507 ntohs(chunk->chunk_hdr->length), chunk->skb->len, in sctp_packet_pack()
508 chunk->rtt_in_progress); in sctp_packet_pack()
510 pkt_size -= SCTP_PAD4(chunk->skb->len); in sctp_packet_pack()
512 if (!sctp_chunk_is_data(chunk) && chunk != packet->auth) in sctp_packet_pack()
520 sctp_auth_calculate_hmac(tp->asoc, nskb, auth, in sctp_packet_pack()
521 packet->auth->shkey, gfp); in sctp_packet_pack()
523 if (list_empty(&packet->chunk_list)) in sctp_packet_pack()
524 sctp_chunk_free(packet->auth); in sctp_packet_pack()
526 list_add(&packet->auth->list, in sctp_packet_pack()
527 &packet->chunk_list); in sctp_packet_pack()
531 sctp_packet_gso_append(head, nskb); in sctp_packet_pack()
534 } while (!list_empty(&packet->chunk_list)); in sctp_packet_pack()
537 memset(head->cb, 0, max(sizeof(struct inet_skb_parm), in sctp_packet_pack()
539 skb_shinfo(head)->gso_segs = pkt_count; in sctp_packet_pack()
540 skb_shinfo(head)->gso_size = GSO_BY_FRAGS; in sctp_packet_pack()
547 if (!(tp->dst->dev->features & NETIF_F_SCTP_CRC) || in sctp_packet_pack()
548 dst_xfrm(tp->dst) || packet->ipfragok || tp->encap_port) { in sctp_packet_pack()
550 (struct sctphdr *)skb_transport_header(head); in sctp_packet_pack()
552 sh->checksum = sctp_compute_cksum(head, 0); in sctp_packet_pack()
555 head->ip_summed = CHECKSUM_PARTIAL; in sctp_packet_pack()
556 head->csum_not_inet = 1; in sctp_packet_pack()
557 head->csum_start = skb_transport_header(head) - head->head; in sctp_packet_pack()
558 head->csum_offset = offsetof(struct sctphdr, checksum); in sctp_packet_pack()
571 struct sctp_transport *tp = packet->transport; in sctp_packet_transmit()
572 struct sctp_association *asoc = tp->asoc; in sctp_packet_transmit()
575 struct sk_buff *head; in sctp_packet_transmit() local
580 if (list_empty(&packet->chunk_list)) in sctp_packet_transmit()
582 chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list); in sctp_packet_transmit()
583 sk = chunk->skb->sk; in sctp_packet_transmit()
585 if (packet->size > tp->pathmtu && !packet->ipfragok && !chunk->pmtu_probe) { in sctp_packet_transmit()
586 if (tp->pl.state == SCTP_PL_ERROR) { /* do IP fragmentation if in Error state */ in sctp_packet_transmit()
587 packet->ipfragok = 1; in sctp_packet_transmit()
590 pr_err_once("Trying to GSO but underlying device doesn't support it."); in sctp_packet_transmit()
597 /* alloc head skb */ in sctp_packet_transmit()
598 head = alloc_skb((gso ? packet->overhead : packet->size) + in sctp_packet_transmit()
600 if (!head) in sctp_packet_transmit()
602 skb_reserve(head, packet->overhead + MAX_HEADER); in sctp_packet_transmit()
603 skb_set_owner_w(head, sk); in sctp_packet_transmit()
606 sh = skb_push(head, sizeof(struct sctphdr)); in sctp_packet_transmit()
607 skb_reset_transport_header(head); in sctp_packet_transmit()
608 sh->source = htons(packet->source_port); in sctp_packet_transmit()
609 sh->dest = htons(packet->destination_port); in sctp_packet_transmit()
610 sh->vtag = htonl(packet->vtag); in sctp_packet_transmit()
611 sh->checksum = 0; in sctp_packet_transmit()
614 if (!tp->dst) { in sctp_packet_transmit()
616 kfree_skb(head); in sctp_packet_transmit()
621 pkt_count = sctp_packet_pack(packet, head, gso, gfp); in sctp_packet_transmit()
623 kfree_skb(head); in sctp_packet_transmit()
626 pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len); in sctp_packet_transmit()
629 if (packet->has_data && sctp_state(asoc, ESTABLISHED) && in sctp_packet_transmit()
630 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) { in sctp_packet_transmit()
632 &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; in sctp_packet_transmit()
634 asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; in sctp_packet_transmit()
641 tp->af_specific->ecn_capable(sk); in sctp_packet_transmit()
643 asoc->stats.opackets += pkt_count; in sctp_packet_transmit()
644 if (asoc->peer.last_sent_to != tp) in sctp_packet_transmit()
645 asoc->peer.last_sent_to = tp; in sctp_packet_transmit()
647 head->ignore_df = packet->ipfragok; in sctp_packet_transmit()
648 if (tp->dst_pending_confirm) in sctp_packet_transmit()
649 skb_set_dst_pending_confirm(head, 1); in sctp_packet_transmit()
653 if (tp->af_specific->sctp_xmit(head, tp) >= 0 && in sctp_packet_transmit()
654 tp->dst_pending_confirm) in sctp_packet_transmit()
655 tp->dst_pending_confirm = 0; in sctp_packet_transmit()
658 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) { in sctp_packet_transmit()
659 list_del_init(&chunk->list); in sctp_packet_transmit()
676 struct sctp_transport *transport = packet->transport; in sctp_packet_can_append_data()
677 struct sctp_association *asoc = transport->asoc; in sctp_packet_can_append_data()
678 struct sctp_outq *q = &asoc->outqueue; in sctp_packet_can_append_data()
693 rwnd = asoc->peer.rwnd; in sctp_packet_can_append_data()
694 inflight = q->outstanding_bytes; in sctp_packet_can_append_data()
695 flight_size = transport->flight_size; in sctp_packet_can_append_data()
701 * so we can't fall back to rule 6.1 B). in sctp_packet_can_append_data()
717 if (chunk->fast_retransmit != SCTP_NEED_FRTX && in sctp_packet_can_append_data()
718 flight_size >= transport->cwnd) in sctp_packet_can_append_data()
721 /* Nagle's algorithm to solve small-packet problem: in sctp_packet_can_append_data()
727 if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) && in sctp_packet_can_append_data()
728 !asoc->force_delay) in sctp_packet_can_append_data()
742 if (chunk->skb->len + q->out_qlen > transport->pathmtu - in sctp_packet_can_append_data()
743 packet->overhead - sctp_datachk_len(&chunk->asoc->stream) - 4) in sctp_packet_can_append_data()
747 /* Don't delay large message writes that may have been fragmented */ in sctp_packet_can_append_data()
748 if (!chunk->msg->can_delay) in sctp_packet_can_append_data()
759 struct sctp_transport *transport = packet->transport; in sctp_packet_append_data()
761 struct sctp_association *asoc = transport->asoc; in sctp_packet_append_data()
762 u32 rwnd = asoc->peer.rwnd; in sctp_packet_append_data()
765 transport->flight_size += datasize; in sctp_packet_append_data()
768 asoc->outqueue.outstanding_bytes += datasize; in sctp_packet_append_data()
772 rwnd -= datasize; in sctp_packet_append_data()
776 asoc->peer.rwnd = rwnd; in sctp_packet_append_data()
778 asoc->stream.si->assign_number(chunk); in sctp_packet_append_data()
788 /* Don't bundle in this packet if this chunk's auth key doesn't in sctp_packet_will_fit()
790 * don't bundle the chunk with auth key if other chunks in this in sctp_packet_will_fit()
791 * packet don't have auth key. in sctp_packet_will_fit()
793 if ((packet->auth && chunk->shkey != packet->auth->shkey) || in sctp_packet_will_fit()
794 (!packet->auth && chunk->shkey && in sctp_packet_will_fit()
795 chunk->chunk_hdr->type != SCTP_CID_AUTH)) in sctp_packet_will_fit()
798 psize = packet->size; in sctp_packet_will_fit()
799 if (packet->transport->asoc) in sctp_packet_will_fit()
800 pmtu = packet->transport->asoc->pathmtu; in sctp_packet_will_fit()
802 pmtu = packet->transport->pathmtu; in sctp_packet_will_fit()
810 * 2. The packet doesn't have any data in it yet and data in sctp_packet_will_fit()
814 (!packet->has_data && chunk->auth)) { in sctp_packet_will_fit()
815 /* We no longer do re-fragmentation. in sctp_packet_will_fit()
819 packet->ipfragok = 1; in sctp_packet_will_fit()
828 maxsize = pmtu - packet->overhead; in sctp_packet_will_fit()
829 if (packet->auth) in sctp_packet_will_fit()
830 maxsize -= SCTP_PAD4(packet->auth->skb->len); in sctp_packet_will_fit()
840 if (!sctp_chunk_is_data(chunk) && packet->has_data) in sctp_packet_will_fit()
843 if (psize + chunk_len > packet->max_size) in sctp_packet_will_fit()
847 if (!packet->transport->burst_limited && in sctp_packet_will_fit()
848 psize + chunk_len > (packet->transport->cwnd >> 1)) in sctp_packet_will_fit()
854 if (packet->transport->burst_limited && in sctp_packet_will_fit()
855 psize + chunk_len > (packet->transport->burst_limited >> 1)) in sctp_packet_will_fit()