Lines Matching +full:link +full:- +full:loss +full:- +full:low

1 // SPDX-License-Identifier: GPL-2.0-only
34 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
38 ----------------------------------------------------------------
45 normal, Pareto, or experimental curves. Packet loss,
53 Correlated Loss Generator models
55 Added generation of correlated loss according to the
56 "Gilbert-Elliot" model, a 4-state markov model.
61 and intuitive loss model for packet networks and its implementation
74 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
77 /* a linear queue; reduces rbtree rebalancing when jitter is low */
89 u32 loss; member
133 /* Correlated Loss Generation models */
138 /* 4-states and Gilbert-Elliot models */
139 u32 a1; /* p13 for 4-states or p for GE */
140 u32 a2; /* p31 for 4-states or r for GE */
141 u32 a3; /* p32 for 4-states or h for GE */
142 u32 a4; /* p14 for 4-states or 1-k for GE */
143 u32 a5; /* p23 used only in 4-states */
159 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
160 * and skb->next & skb->prev are scratch space for a qdisc,
161 * we save skb->tstamp value in skb->cb[] before destroying it.
171 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; in netem_skb_cb()
174 /* init_crandom - initialize correlated random number generator
179 state->rho = rho; in init_crandom()
180 state->last = get_random_u32(); in init_crandom()
183 /* get_crandom - correlated random number generator
191 struct rnd_state *s = &p->prng_state; in get_crandom()
193 if (!state || state->rho == 0) /* no correlation */ in get_crandom()
197 rho = (u64)state->rho + 1; in get_crandom()
198 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; in get_crandom()
199 state->last = answer; in get_crandom()
203 /* loss_4state - 4-state model loss generator
204 * Generates losses according to the 4-state Markov chain adopted in
205 * the GI (General and Intuitive) loss model.
209 struct clgstate *clg = &q->clg; in loss_4state()
210 u32 rnd = prandom_u32_state(&q->prng.prng_state); in loss_4state()
222 switch (clg->state) { in loss_4state()
224 if (rnd < clg->a4) { in loss_4state()
225 clg->state = LOST_IN_GAP_PERIOD; in loss_4state()
227 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) { in loss_4state()
228 clg->state = LOST_IN_BURST_PERIOD; in loss_4state()
230 } else if (clg->a1 + clg->a4 < rnd) { in loss_4state()
231 clg->state = TX_IN_GAP_PERIOD; in loss_4state()
236 if (rnd < clg->a5) { in loss_4state()
237 clg->state = LOST_IN_BURST_PERIOD; in loss_4state()
240 clg->state = TX_IN_BURST_PERIOD; in loss_4state()
245 if (rnd < clg->a3) in loss_4state()
246 clg->state = TX_IN_BURST_PERIOD; in loss_4state()
247 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) { in loss_4state()
248 clg->state = TX_IN_GAP_PERIOD; in loss_4state()
249 } else if (clg->a2 + clg->a3 < rnd) { in loss_4state()
250 clg->state = LOST_IN_BURST_PERIOD; in loss_4state()
255 clg->state = TX_IN_GAP_PERIOD; in loss_4state()
262 /* loss_gilb_ell - Gilbert-Elliot model loss generator
263 * Generates losses according to the Gilbert-Elliot loss model or
269 * with the loss probability of the current state decides if the next
274 struct clgstate *clg = &q->clg; in loss_gilb_ell()
275 struct rnd_state *s = &q->prng.prng_state; in loss_gilb_ell()
277 switch (clg->state) { in loss_gilb_ell()
279 if (prandom_u32_state(s) < clg->a1) in loss_gilb_ell()
280 clg->state = BAD_STATE; in loss_gilb_ell()
281 if (prandom_u32_state(s) < clg->a4) in loss_gilb_ell()
285 if (prandom_u32_state(s) < clg->a2) in loss_gilb_ell()
286 clg->state = GOOD_STATE; in loss_gilb_ell()
287 if (prandom_u32_state(s) > clg->a3) in loss_gilb_ell()
296 switch (q->loss_model) { in loss_event()
299 return q->loss && q->loss >= get_crandom(&q->loss_cor, &q->prng); in loss_event()
302 /* 4state loss model algorithm (used also for GI model) in loss_event()
303 * Extracts a value from the markov 4 state loss generator, in loss_event()
310 /* Gilbert-Elliot loss model algorithm in loss_event()
311 * Extracts a value from the Gilbert-Elliot loss generator, in loss_event()
322 /* tabledist - return a pseudo-randomly distributed value with mean mu and
324 * distribution, and a uniformly-distributed pseudo-random source.
342 return ((rnd % (2 * (u32)sigma)) + mu) - sigma; in tabledist()
344 t = dist->table[rnd % dist->size]; in tabledist()
349 x -= NETEM_DIST_SCALE/2; in tabledist()
356 len += q->packet_overhead; in packet_time_ns()
358 if (q->cell_size) { in packet_time_ns()
359 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal); in packet_time_ns()
361 if (len > cells * q->cell_size) /* extra cell needed for remainder */ in packet_time_ns()
363 len = cells * (q->cell_size + q->cell_overhead); in packet_time_ns()
366 return div64_u64(len * NSEC_PER_SEC, q->rate); in packet_time_ns()
372 struct rb_node *p = rb_first(&q->t_root); in tfifo_reset()
378 rb_erase(&skb->rbnode, &q->t_root); in tfifo_reset()
382 rtnl_kfree_skbs(q->t_head, q->t_tail); in tfifo_reset()
383 q->t_head = NULL; in tfifo_reset()
384 q->t_tail = NULL; in tfifo_reset()
390 u64 tnext = netem_skb_cb(nskb)->time_to_send; in tfifo_enqueue()
392 if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) { in tfifo_enqueue()
393 if (q->t_tail) in tfifo_enqueue()
394 q->t_tail->next = nskb; in tfifo_enqueue()
396 q->t_head = nskb; in tfifo_enqueue()
397 q->t_tail = nskb; in tfifo_enqueue()
399 struct rb_node **p = &q->t_root.rb_node, *parent = NULL; in tfifo_enqueue()
406 if (tnext >= netem_skb_cb(skb)->time_to_send) in tfifo_enqueue()
407 p = &parent->rb_right; in tfifo_enqueue()
409 p = &parent->rb_left; in tfifo_enqueue()
411 rb_link_node(&nskb->rbnode, parent, p); in tfifo_enqueue()
412 rb_insert_color(&nskb->rbnode, &q->t_root); in tfifo_enqueue()
414 sch->q.qlen++; in tfifo_enqueue()
419 * the first packet to be corrupted, and re-enqueue the remaining frames
455 skb->prev = NULL; in netem_enqueue()
458 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor, &q->prng)) in netem_enqueue()
463 if (q->ecn && INET_ECN_set_ce(skb)) in netem_enqueue()
466 --count; in netem_enqueue()
475 * place at TX completion time, so _before_ the link transit delay) in netem_enqueue()
477 if (q->latency || q->jitter || q->rate) in netem_enqueue()
493 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor, &q->prng)) { in netem_enqueue()
499 segs = skb->next; in netem_enqueue()
501 qdisc_skb_cb(skb)->pkt_len = skb->len; in netem_enqueue()
509 if (skb->ip_summed == CHECKSUM_PARTIAL && in netem_enqueue()
516 skb->data[get_random_u32_below(skb_headlen(skb))] ^= in netem_enqueue()
520 if (unlikely(sch->q.qlen >= sch->limit)) { in netem_enqueue()
521 /* re-link segs, so that qdisc_drop_all() frees them all */ in netem_enqueue()
522 skb->next = segs; in netem_enqueue()
530 * If doing duplication then re-insert at top of the in netem_enqueue()
536 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ in netem_enqueue()
538 q->duplicate = 0; in netem_enqueue()
539 rootq->enqueue(skb2, rootq, to_free); in netem_enqueue()
540 q->duplicate = dupsave; in netem_enqueue()
547 if (q->gap == 0 || /* not doing reordering */ in netem_enqueue()
548 q->counter < q->gap - 1 || /* inside last reordering gap */ in netem_enqueue()
549 q->reorder < get_crandom(&q->reorder_cor, &q->prng)) { in netem_enqueue()
553 delay = tabledist(q->latency, q->jitter, in netem_enqueue()
554 &q->delay_cor, &q->prng, q->delay_dist); in netem_enqueue()
558 if (q->rate) { in netem_enqueue()
561 if (sch->q.tail) in netem_enqueue()
562 last = netem_skb_cb(sch->q.tail); in netem_enqueue()
563 if (q->t_root.rb_node) { in netem_enqueue()
567 t_skb = skb_rb_last(&q->t_root); in netem_enqueue()
570 t_last->time_to_send > last->time_to_send) in netem_enqueue()
573 if (q->t_tail) { in netem_enqueue()
575 netem_skb_cb(q->t_tail); in netem_enqueue()
578 t_last->time_to_send > last->time_to_send) in netem_enqueue()
588 delay -= last->time_to_send - now; in netem_enqueue()
590 now = last->time_to_send; in netem_enqueue()
596 cb->time_to_send = now + delay; in netem_enqueue()
597 ++q->counter; in netem_enqueue()
601 * Do re-ordering by putting one out of N packets at the front in netem_enqueue()
604 cb->time_to_send = ktime_get_ns(); in netem_enqueue()
605 q->counter = 0; in netem_enqueue()
607 __qdisc_enqueue_head(skb, &sch->q); in netem_enqueue()
608 sch->qstats.requeues++; in netem_enqueue()
619 len = skb ? skb->len : 0; in netem_enqueue()
623 skb2 = segs->next; in netem_enqueue()
625 qdisc_skb_cb(segs)->pkt_len = segs->len; in netem_enqueue()
626 last_len = segs->len; in netem_enqueue()
638 qdisc_tree_reduce_backlog(sch, -(nb - 1), -(len - prev_len)); in netem_enqueue()
653 if (!q->slot_dist) in get_slot_next()
654 next_delay = q->slot_config.min_delay + in get_slot_next()
656 (q->slot_config.max_delay - in get_slot_next()
657 q->slot_config.min_delay) >> 32); in get_slot_next()
659 next_delay = tabledist(q->slot_config.dist_delay, in get_slot_next()
660 (s32)(q->slot_config.dist_jitter), in get_slot_next()
661 NULL, &q->prng, q->slot_dist); in get_slot_next()
663 q->slot.slot_next = now + next_delay; in get_slot_next()
664 q->slot.packets_left = q->slot_config.max_packets; in get_slot_next()
665 q->slot.bytes_left = q->slot_config.max_bytes; in get_slot_next()
670 struct sk_buff *skb = skb_rb_first(&q->t_root); in netem_peek()
674 return q->t_head; in netem_peek()
675 if (!q->t_head) in netem_peek()
678 t1 = netem_skb_cb(skb)->time_to_send; in netem_peek()
679 t2 = netem_skb_cb(q->t_head)->time_to_send; in netem_peek()
682 return q->t_head; in netem_peek()
687 if (skb == q->t_head) { in netem_erase_head()
688 q->t_head = skb->next; in netem_erase_head()
689 if (!q->t_head) in netem_erase_head()
690 q->t_tail = NULL; in netem_erase_head()
692 rb_erase(&skb->rbnode, &q->t_root); in netem_erase_head()
702 skb = __qdisc_dequeue_head(&sch->q); in netem_dequeue()
715 time_to_send = netem_skb_cb(skb)->time_to_send; in netem_dequeue()
716 if (q->slot.slot_next && q->slot.slot_next < time_to_send) in netem_dequeue()
719 if (time_to_send <= now && q->slot.slot_next <= now) { in netem_dequeue()
721 sch->q.qlen--; in netem_dequeue()
723 skb->next = NULL; in netem_dequeue()
724 skb->prev = NULL; in netem_dequeue()
725 /* skb->dev shares skb->rbnode area, in netem_dequeue()
728 skb->dev = qdisc_dev(sch); in netem_dequeue()
730 if (q->slot.slot_next) { in netem_dequeue()
731 q->slot.packets_left--; in netem_dequeue()
732 q->slot.bytes_left -= qdisc_pkt_len(skb); in netem_dequeue()
733 if (q->slot.packets_left <= 0 || in netem_dequeue()
734 q->slot.bytes_left <= 0) in netem_dequeue()
738 if (q->qdisc) { in netem_dequeue()
743 err = qdisc_enqueue(skb, q->qdisc, &to_free); in netem_dequeue()
755 if (q->qdisc) { in netem_dequeue()
756 skb = q->qdisc->ops->dequeue(q->qdisc); in netem_dequeue()
761 qdisc_watchdog_schedule_ns(&q->watchdog, in netem_dequeue()
763 q->slot.slot_next)); in netem_dequeue()
766 if (q->qdisc) { in netem_dequeue()
767 skb = q->qdisc->ops->dequeue(q->qdisc); in netem_dequeue()
780 if (q->qdisc) in netem_reset()
781 qdisc_reset(q->qdisc); in netem_reset()
782 qdisc_watchdog_cancel(&q->watchdog); in netem_reset()
803 return -EINVAL; in get_dist_table()
807 return -ENOMEM; in get_dist_table()
809 d->size = n; in get_dist_table()
811 d->table[i] = data[i]; in get_dist_table()
821 q->slot_config = *c; in get_slot()
822 if (q->slot_config.max_packets == 0) in get_slot()
823 q->slot_config.max_packets = INT_MAX; in get_slot()
824 if (q->slot_config.max_bytes == 0) in get_slot()
825 q->slot_config.max_bytes = INT_MAX; in get_slot()
828 q->slot_config.dist_jitter = min_t(__s64, INT_MAX, abs(q->slot_config.dist_jitter)); in get_slot()
830 q->slot.packets_left = q->slot_config.max_packets; in get_slot()
831 q->slot.bytes_left = q->slot_config.max_bytes; in get_slot()
832 if (q->slot_config.min_delay | q->slot_config.max_delay | in get_slot()
833 q->slot_config.dist_jitter) in get_slot()
834 q->slot.slot_next = ktime_get_ns(); in get_slot()
836 q->slot.slot_next = 0; in get_slot()
843 init_crandom(&q->delay_cor, c->delay_corr); in get_correlation()
844 init_crandom(&q->loss_cor, c->loss_corr); in get_correlation()
845 init_crandom(&q->dup_cor, c->dup_corr); in get_correlation()
852 q->reorder = r->probability; in get_reorder()
853 init_crandom(&q->reorder_cor, r->correlation); in get_reorder()
860 q->corrupt = r->probability; in get_corrupt()
861 init_crandom(&q->corrupt_cor, r->correlation); in get_corrupt()
868 q->rate = r->rate; in get_rate()
869 q->packet_overhead = r->packet_overhead; in get_rate()
870 q->cell_size = r->cell_size; in get_rate()
871 q->cell_overhead = r->cell_overhead; in get_rate()
872 if (q->cell_size) in get_rate()
873 q->cell_size_reciprocal = reciprocal_value(q->cell_size); in get_rate()
875 q->cell_size_reciprocal = (struct reciprocal_value) { 0 }; in get_rate()
892 return -EINVAL; in get_loss_clg()
895 q->loss_model = CLG_4_STATES; in get_loss_clg()
897 q->clg.state = TX_IN_GAP_PERIOD; in get_loss_clg()
898 q->clg.a1 = gi->p13; in get_loss_clg()
899 q->clg.a2 = gi->p31; in get_loss_clg()
900 q->clg.a3 = gi->p32; in get_loss_clg()
901 q->clg.a4 = gi->p14; in get_loss_clg()
902 q->clg.a5 = gi->p23; in get_loss_clg()
911 return -EINVAL; in get_loss_clg()
914 q->loss_model = CLG_GILB_ELL; in get_loss_clg()
915 q->clg.state = GOOD_STATE; in get_loss_clg()
916 q->clg.a1 = ge->p; in get_loss_clg()
917 q->clg.a2 = ge->r; in get_loss_clg()
918 q->clg.a3 = ge->h; in get_loss_clg()
919 q->clg.a4 = ge->k1; in get_loss_clg()
924 pr_info("netem: unknown loss type %u\n", type); in get_loss_clg()
925 return -EINVAL; in get_loss_clg()
949 int nested_len = nla_len(nla) - NLA_ALIGN(len); in parse_attr()
953 return -EINVAL; in parse_attr()
996 /* backup q->clg and q->loss_model */ in netem_change()
997 old_clg = q->clg; in netem_change()
998 old_loss_model = q->loss_model; in netem_change()
1003 q->loss_model = old_loss_model; in netem_change()
1004 q->clg = old_clg; in netem_change()
1008 q->loss_model = CLG_RANDOM; in netem_change()
1012 swap(q->delay_dist, delay_dist); in netem_change()
1014 swap(q->slot_dist, slot_dist); in netem_change()
1015 sch->limit = qopt->limit; in netem_change()
1017 q->latency = PSCHED_TICKS2NS(qopt->latency); in netem_change()
1018 q->jitter = PSCHED_TICKS2NS(qopt->jitter); in netem_change()
1019 q->limit = qopt->limit; in netem_change()
1020 q->gap = qopt->gap; in netem_change()
1021 q->counter = 0; in netem_change()
1022 q->loss = qopt->loss; in netem_change()
1023 q->duplicate = qopt->duplicate; in netem_change()
1028 if (q->gap) in netem_change()
1029 q->reorder = ~0; in netem_change()
1044 q->rate = max_t(u64, q->rate, in netem_change()
1048 q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]); in netem_change()
1051 q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]); in netem_change()
1054 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]); in netem_change()
1060 q->jitter = min_t(s64, abs(q->jitter), INT_MAX); in netem_change()
1063 q->prng.seed = nla_get_u64(tb[TCA_NETEM_PRNG_SEED]); in netem_change()
1065 q->prng.seed = get_random_u64(); in netem_change()
1066 prandom_seed_state(&q->prng.prng_state, q->prng.seed); in netem_change()
1083 qdisc_watchdog_init(&q->watchdog, sch); in netem_init()
1086 return -EINVAL; in netem_init()
1088 q->loss_model = CLG_RANDOM; in netem_init()
1099 qdisc_watchdog_cancel(&q->watchdog); in netem_destroy()
1100 if (q->qdisc) in netem_destroy()
1101 qdisc_put(q->qdisc); in netem_destroy()
1102 dist_free(q->delay_dist); in netem_destroy()
1103 dist_free(q->slot_dist); in netem_destroy()
1115 switch (q->loss_model) { in dump_loss_model()
1117 /* legacy loss model */ in dump_loss_model()
1123 .p13 = q->clg.a1, in dump_loss_model()
1124 .p31 = q->clg.a2, in dump_loss_model()
1125 .p32 = q->clg.a3, in dump_loss_model()
1126 .p14 = q->clg.a4, in dump_loss_model()
1127 .p23 = q->clg.a5, in dump_loss_model()
1136 .p = q->clg.a1, in dump_loss_model()
1137 .r = q->clg.a2, in dump_loss_model()
1138 .h = q->clg.a3, in dump_loss_model()
1139 .k1 = q->clg.a4, in dump_loss_model()
1153 return -1; in dump_loss_model()
1167 qopt.latency = min_t(psched_time_t, PSCHED_NS2TICKS(q->latency), in netem_dump()
1169 qopt.jitter = min_t(psched_time_t, PSCHED_NS2TICKS(q->jitter), in netem_dump()
1171 qopt.limit = q->limit; in netem_dump()
1172 qopt.loss = q->loss; in netem_dump()
1173 qopt.gap = q->gap; in netem_dump()
1174 qopt.duplicate = q->duplicate; in netem_dump()
1178 if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency)) in netem_dump()
1181 if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter)) in netem_dump()
1184 cor.delay_corr = q->delay_cor.rho; in netem_dump()
1185 cor.loss_corr = q->loss_cor.rho; in netem_dump()
1186 cor.dup_corr = q->dup_cor.rho; in netem_dump()
1190 reorder.probability = q->reorder; in netem_dump()
1191 reorder.correlation = q->reorder_cor.rho; in netem_dump()
1195 corrupt.probability = q->corrupt; in netem_dump()
1196 corrupt.correlation = q->corrupt_cor.rho; in netem_dump()
1200 if (q->rate >= (1ULL << 32)) { in netem_dump()
1201 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate, in netem_dump()
1206 rate.rate = q->rate; in netem_dump()
1208 rate.packet_overhead = q->packet_overhead; in netem_dump()
1209 rate.cell_size = q->cell_size; in netem_dump()
1210 rate.cell_overhead = q->cell_overhead; in netem_dump()
1214 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn)) in netem_dump()
1220 if (q->slot_config.min_delay | q->slot_config.max_delay | in netem_dump()
1221 q->slot_config.dist_jitter) { in netem_dump()
1222 slot = q->slot_config; in netem_dump()
1231 if (nla_put_u64_64bit(skb, TCA_NETEM_PRNG_SEED, q->prng.seed, in netem_dump()
1239 return -1; in netem_dump()
1247 if (cl != 1 || !q->qdisc) /* only one class */ in netem_dump_class()
1248 return -ENOENT; in netem_dump_class()
1250 tcm->tcm_handle |= TC_H_MIN(1); in netem_dump_class()
1251 tcm->tcm_info = q->qdisc->handle; in netem_dump_class()
1261 *old = qdisc_replace(sch, new, &q->qdisc); in netem_graft()
1268 return q->qdisc; in netem_leaf()
1278 if (!walker->stop) { in netem_walk()