Lines Matching +full:row +full:- +full:hold
1 // SPDX-License-Identifier: GPL-2.0-or-later
50 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
78 struct rb_root row; member
82 /* When class changes from state 1->2 and disconnects from
101 int quantum; /* but stored for parent-to-leaf return */
172 /* time of nearest event per level (row) */
191 clc = qdisc_class_find(&q->clhash, handle); in htb_find()
202 #define HTB_DIRECT ((struct htb_class *)-1L)
205 * htb_classify - classify a packet into class
210 * It returns NULL if the packet should be dropped or -1 if the packet
228 /* allow to select class by setting skb->priority to valid classid; in htb_classify()
232 if (skb->priority == sch->handle) in htb_classify()
234 cl = htb_find(skb->priority, sch); in htb_classify()
236 if (cl->level == 0) in htb_classify()
238 /* Start with inner filter chain if a non-leaf class is selected */ in htb_classify()
239 tcf = rcu_dereference_bh(cl->filter_list); in htb_classify()
241 tcf = rcu_dereference_bh(q->filter_list); in htb_classify()
259 if (res.classid == sch->handle) in htb_classify()
265 if (!cl->level) in htb_classify()
269 tcf = rcu_dereference_bh(cl->filter_list); in htb_classify()
272 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); in htb_classify()
273 if (!cl || cl->level) in htb_classify()
279 * htb_add_to_id_tree - adds class to the round robin list
290 struct rb_node **p = &root->rb_node, *parent = NULL; in htb_add_to_id_tree()
297 if (cl->common.classid > c->common.classid) in htb_add_to_id_tree()
298 p = &parent->rb_right; in htb_add_to_id_tree()
300 p = &parent->rb_left; in htb_add_to_id_tree()
302 rb_link_node(&cl->node[prio], parent, p); in htb_add_to_id_tree()
303 rb_insert_color(&cl->node[prio], root); in htb_add_to_id_tree()
307 * htb_add_to_wait_tree - adds class to the event queue with delay
313 * change its mode in cl->pq_key microseconds. Make sure that class is not
319 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL; in htb_add_to_wait_tree()
321 cl->pq_key = q->now + delay; in htb_add_to_wait_tree()
322 if (cl->pq_key == q->now) in htb_add_to_wait_tree()
323 cl->pq_key++; in htb_add_to_wait_tree()
326 if (q->near_ev_cache[cl->level] > cl->pq_key) in htb_add_to_wait_tree()
327 q->near_ev_cache[cl->level] = cl->pq_key; in htb_add_to_wait_tree()
333 if (cl->pq_key >= c->pq_key) in htb_add_to_wait_tree()
334 p = &parent->rb_right; in htb_add_to_wait_tree()
336 p = &parent->rb_left; in htb_add_to_wait_tree()
338 rb_link_node(&cl->pq_node, parent, p); in htb_add_to_wait_tree()
339 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq); in htb_add_to_wait_tree()
343 * htb_next_rb_node - finds next node in binary tree
355 * htb_add_class_to_row - add class to its row
360 * The class is added to row at priorities marked in mask.
366 q->row_mask[cl->level] |= mask; in htb_add_class_to_row()
370 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio); in htb_add_class_to_row()
387 * htb_remove_class_from_row - removes class from its row
392 * The class is removed from row at priorities marked in mask.
399 struct htb_level *hlevel = &q->hlevel[cl->level]; in htb_remove_class_from_row()
403 struct htb_prio *hprio = &hlevel->hprio[prio]; in htb_remove_class_from_row()
406 if (hprio->ptr == cl->node + prio) in htb_remove_class_from_row()
407 htb_next_rb_node(&hprio->ptr); in htb_remove_class_from_row()
409 htb_safe_rb_erase(cl->node + prio, &hprio->row); in htb_remove_class_from_row()
410 if (!hprio->row.rb_node) in htb_remove_class_from_row()
413 q->row_mask[cl->level] &= ~m; in htb_remove_class_from_row()
417 * htb_activate_prios - creates active classe's feed chain
422 * for priorities it is participating on. cl->cmode must be new
423 * (activated) mode. It does nothing if cl->prio_activity == 0.
427 struct htb_class *p = cl->parent; in htb_activate_prios()
428 long m, mask = cl->prio_activity; in htb_activate_prios()
430 while (cl->cmode == HTB_MAY_BORROW && p && mask) { in htb_activate_prios()
435 if (WARN_ON_ONCE(prio >= ARRAY_SIZE(p->inner.clprio))) in htb_activate_prios()
439 if (p->inner.clprio[prio].feed.rb_node) in htb_activate_prios()
445 htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio); in htb_activate_prios()
447 p->prio_activity |= mask; in htb_activate_prios()
449 p = cl->parent; in htb_activate_prios()
452 if (cl->cmode == HTB_CAN_SEND && mask) in htb_activate_prios()
457 * htb_deactivate_prios - remove class from feed chain
461 * cl->cmode must represent old mode (before deactivation). It does
462 * nothing if cl->prio_activity == 0. Class is removed from all feed
467 struct htb_class *p = cl->parent; in htb_deactivate_prios()
468 long m, mask = cl->prio_activity; in htb_deactivate_prios()
470 while (cl->cmode == HTB_MAY_BORROW && p && mask) { in htb_deactivate_prios()
477 if (p->inner.clprio[prio].ptr == cl->node + prio) { in htb_deactivate_prios()
479 * parent feed - forget the pointer but remember in htb_deactivate_prios()
482 p->inner.clprio[prio].last_ptr_id = cl->common.classid; in htb_deactivate_prios()
483 p->inner.clprio[prio].ptr = NULL; in htb_deactivate_prios()
486 htb_safe_rb_erase(cl->node + prio, in htb_deactivate_prios()
487 &p->inner.clprio[prio].feed); in htb_deactivate_prios()
489 if (!p->inner.clprio[prio].feed.rb_node) in htb_deactivate_prios()
493 p->prio_activity &= ~mask; in htb_deactivate_prios()
495 p = cl->parent; in htb_deactivate_prios()
498 if (cl->cmode == HTB_CAN_SEND && mask) in htb_deactivate_prios()
505 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0; in htb_lowater()
512 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0; in htb_hiwater()
519 * htb_class_mode - computes and returns current class mode
523 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
524 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
527 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
528 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
536 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) { in htb_class_mode()
537 *diff = -toks; in htb_class_mode()
541 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl)) in htb_class_mode()
544 *diff = -toks; in htb_class_mode()
549 * htb_change_class_mode - changes classe's mode
557 * be different from old one and cl->pq_key has to be valid if changing
565 if (new_mode == cl->cmode) in htb_change_class_mode()
569 cl->overlimits++; in htb_change_class_mode()
570 q->overlimits++; in htb_change_class_mode()
573 if (cl->prio_activity) { /* not necessary: speed optimization */ in htb_change_class_mode()
574 if (cl->cmode != HTB_CANT_SEND) in htb_change_class_mode()
576 cl->cmode = new_mode; in htb_change_class_mode()
580 cl->cmode = new_mode; in htb_change_class_mode()
584 * htb_activate - inserts leaf cl into appropriate active feeds
594 WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen); in htb_activate()
596 if (!cl->prio_activity) { in htb_activate()
597 cl->prio_activity = 1 << cl->prio; in htb_activate()
603 * htb_deactivate - remove leaf cl from active feeds
608 * with non-active leaf. It also removes class from the drop list.
612 WARN_ON(!cl->prio_activity); in htb_deactivate()
615 cl->prio_activity = 0; in htb_deactivate()
628 if (q->direct_queue.qlen < q->direct_qlen) { in htb_enqueue()
629 __qdisc_enqueue_tail(skb, &q->direct_queue); in htb_enqueue()
630 q->direct_pkts++; in htb_enqueue()
641 } else if ((ret = qdisc_enqueue(skb, cl->leaf.q, in htb_enqueue()
645 cl->drops++; in htb_enqueue()
652 sch->qstats.backlog += len; in htb_enqueue()
653 sch->q.qlen++; in htb_enqueue()
659 s64 toks = diff + cl->tokens; in htb_accnt_tokens()
661 if (toks > cl->buffer) in htb_accnt_tokens()
662 toks = cl->buffer; in htb_accnt_tokens()
663 toks -= (s64) psched_l2t_ns(&cl->rate, bytes); in htb_accnt_tokens()
664 if (toks <= -cl->mbuffer) in htb_accnt_tokens()
665 toks = 1 - cl->mbuffer; in htb_accnt_tokens()
667 cl->tokens = toks; in htb_accnt_tokens()
672 s64 toks = diff + cl->ctokens; in htb_accnt_ctokens()
674 if (toks > cl->cbuffer) in htb_accnt_ctokens()
675 toks = cl->cbuffer; in htb_accnt_ctokens()
676 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes); in htb_accnt_ctokens()
677 if (toks <= -cl->mbuffer) in htb_accnt_ctokens()
678 toks = 1 - cl->mbuffer; in htb_accnt_ctokens()
680 cl->ctokens = toks; in htb_accnt_ctokens()
684 * htb_charge_class - charges amount "bytes" to leaf and ancestors
706 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); in htb_charge_class()
707 if (cl->level >= level) { in htb_charge_class()
708 if (cl->level == level) in htb_charge_class()
709 cl->xstats.lends++; in htb_charge_class()
712 cl->xstats.borrows++; in htb_charge_class()
713 cl->tokens += diff; /* we moved t_c; update tokens */ in htb_charge_class()
716 cl->t_c = q->now; in htb_charge_class()
718 old_mode = cl->cmode; in htb_charge_class()
721 if (old_mode != cl->cmode) { in htb_charge_class()
723 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq); in htb_charge_class()
724 if (cl->cmode != HTB_CAN_SEND) in htb_charge_class()
729 if (cl->level) in htb_charge_class()
730 bstats_update(&cl->bstats, skb); in htb_charge_class()
732 cl = cl->parent; in htb_charge_class()
737 * htb_do_events - make mode changes to classes at the level
739 * @level: which wait_pq in 'q->hlevel'
743 * next pending event (0 for no event in pq, q->now for too many events).
744 * Note: Applied are events whose have cl->pq_key <= q->now.
754 struct rb_root *wait_pq = &q->hlevel[level].wait_pq; in htb_do_events()
765 if (cl->pq_key > q->now) in htb_do_events()
766 return cl->pq_key; in htb_do_events()
769 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer); in htb_do_events()
771 if (cl->cmode != HTB_CAN_SEND) in htb_do_events()
775 /* too much load - let's continue after a break for scheduling */ in htb_do_events()
776 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { in htb_do_events()
778 q->warned |= HTB_WARN_TOOMANYEVENTS; in htb_do_events()
781 return q->now; in htb_do_events()
784 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
795 if (id > cl->common.classid) { in htb_id_find_next_upper()
796 n = n->rb_right; in htb_id_find_next_upper()
797 } else if (id < cl->common.classid) { in htb_id_find_next_upper()
799 n = n->rb_left; in htb_id_find_next_upper()
808 * htb_lookup_leaf - returns next leaf class in DRR order
823 BUG_ON(!hprio->row.rb_node); in htb_lookup_leaf()
824 sp->root = hprio->row.rb_node; in htb_lookup_leaf()
825 sp->pptr = &hprio->ptr; in htb_lookup_leaf()
826 sp->pid = &hprio->last_ptr_id; in htb_lookup_leaf()
829 if (!*sp->pptr && *sp->pid) { in htb_lookup_leaf()
830 /* ptr was invalidated but id is valid - try to recover in htb_lookup_leaf()
833 *sp->pptr = in htb_lookup_leaf()
834 htb_id_find_next_upper(prio, sp->root, *sp->pid); in htb_lookup_leaf()
836 *sp->pid = 0; /* ptr is valid now so that remove this hint as it in htb_lookup_leaf()
839 if (!*sp->pptr) { /* we are at right end; rewind & go up */ in htb_lookup_leaf()
840 *sp->pptr = sp->root; in htb_lookup_leaf()
841 while ((*sp->pptr)->rb_left) in htb_lookup_leaf()
842 *sp->pptr = (*sp->pptr)->rb_left; in htb_lookup_leaf()
844 sp--; in htb_lookup_leaf()
845 if (!*sp->pptr) { in htb_lookup_leaf()
849 htb_next_rb_node(sp->pptr); in htb_lookup_leaf()
855 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]); in htb_lookup_leaf()
856 if (!cl->level) in htb_lookup_leaf()
858 clp = &cl->inner.clprio[prio]; in htb_lookup_leaf()
859 (++sp)->root = clp->feed.rb_node; in htb_lookup_leaf()
860 sp->pptr = &clp->ptr; in htb_lookup_leaf()
861 sp->pid = &clp->last_ptr_id; in htb_lookup_leaf()
876 struct htb_level *hlevel = &q->hlevel[level]; in htb_dequeue_tree()
877 struct htb_prio *hprio = &hlevel->hprio[prio]; in htb_dequeue_tree()
879 /* look initial class up in the row */ in htb_dequeue_tree()
887 /* class can be empty - it is unlikely but can be true if leaf in htb_dequeue_tree()
892 if (unlikely(cl->leaf.q->q.qlen == 0)) { in htb_dequeue_tree()
896 /* row/level might become empty */ in htb_dequeue_tree()
897 if ((q->row_mask[level] & (1 << prio)) == 0) in htb_dequeue_tree()
908 skb = cl->leaf.q->dequeue(cl->leaf.q); in htb_dequeue_tree()
912 qdisc_warn_nonwc("htb", cl->leaf.q); in htb_dequeue_tree()
913 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr: in htb_dequeue_tree()
914 &q->hlevel[0].hprio[prio].ptr); in htb_dequeue_tree()
920 bstats_update(&cl->bstats, skb); in htb_dequeue_tree()
921 cl->leaf.deficit[level] -= qdisc_pkt_len(skb); in htb_dequeue_tree()
922 if (cl->leaf.deficit[level] < 0) { in htb_dequeue_tree()
923 cl->leaf.deficit[level] += cl->quantum; in htb_dequeue_tree()
924 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr : in htb_dequeue_tree()
925 &q->hlevel[0].hprio[prio].ptr); in htb_dequeue_tree()
930 if (!cl->leaf.q->q.qlen) in htb_dequeue_tree()
946 skb = __qdisc_dequeue_head(&q->direct_queue); in htb_dequeue()
951 sch->q.qlen--; in htb_dequeue()
955 if (!sch->q.qlen) in htb_dequeue()
957 q->now = ktime_get_ns(); in htb_dequeue()
960 next_event = q->now + 5LLU * NSEC_PER_SEC; in htb_dequeue()
963 /* common case optimization - skip event handler quickly */ in htb_dequeue()
965 s64 event = q->near_ev_cache[level]; in htb_dequeue()
967 if (q->now >= event) { in htb_dequeue()
970 event = q->now + NSEC_PER_SEC; in htb_dequeue()
971 q->near_ev_cache[level] = event; in htb_dequeue()
977 m = ~q->row_mask[level]; in htb_dequeue()
978 while (m != (int)(-1)) { in htb_dequeue()
987 if (likely(next_event > q->now)) in htb_dequeue()
988 qdisc_watchdog_schedule_ns(&q->watchdog, next_event); in htb_dequeue()
990 schedule_work(&q->work); in htb_dequeue()
1003 for (i = 0; i < q->clhash.hashsize; i++) { in htb_reset()
1004 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_reset()
1005 if (cl->level) in htb_reset()
1006 memset(&cl->inner, 0, sizeof(cl->inner)); in htb_reset()
1008 if (cl->leaf.q && !q->offload) in htb_reset()
1009 qdisc_reset(cl->leaf.q); in htb_reset()
1011 cl->prio_activity = 0; in htb_reset()
1012 cl->cmode = HTB_CAN_SEND; in htb_reset()
1015 qdisc_watchdog_cancel(&q->watchdog); in htb_reset()
1016 __qdisc_reset_queue(&q->direct_queue); in htb_reset()
1017 memset(q->hlevel, 0, sizeof(q->hlevel)); in htb_reset()
1018 memset(q->row_mask, 0, sizeof(q->row_mask)); in htb_reset()
1035 struct Qdisc *sch = q->watchdog.qdisc; in htb_work_func()
1044 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_HTB, opt); in htb_offload()
1059 qdisc_watchdog_init(&q->watchdog, sch); in htb_init()
1060 INIT_WORK(&q->work, htb_work_func); in htb_init()
1063 return -EINVAL; in htb_init()
1065 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); in htb_init()
1075 return -EINVAL; in htb_init()
1078 if (gopt->version != HTB_VER >> 16) in htb_init()
1079 return -EINVAL; in htb_init()
1084 if (sch->parent != TC_H_ROOT) { in htb_init()
1086 return -EOPNOTSUPP; in htb_init()
1089 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) { in htb_init()
1090 NL_SET_ERR_MSG(extack, "hw-tc-offload ethtool feature flag must be on"); in htb_init()
1091 return -EOPNOTSUPP; in htb_init()
1094 q->num_direct_qdiscs = dev->real_num_tx_queues; in htb_init()
1095 q->direct_qdiscs = kcalloc(q->num_direct_qdiscs, in htb_init()
1096 sizeof(*q->direct_qdiscs), in htb_init()
1098 if (!q->direct_qdiscs) in htb_init()
1099 return -ENOMEM; in htb_init()
1102 err = qdisc_class_hash_init(&q->clhash); in htb_init()
1107 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]); in htb_init()
1109 q->direct_qlen = qdisc_dev(sch)->tx_queue_len; in htb_init()
1111 if ((q->rate2quantum = gopt->rate2quantum) < 1) in htb_init()
1112 q->rate2quantum = 1; in htb_init()
1113 q->defcls = gopt->defcls; in htb_init()
1118 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) { in htb_init()
1123 TC_H_MAKE(sch->handle, 0), extack); in htb_init()
1125 return -ENOMEM; in htb_init()
1128 q->direct_qdiscs[ntx] = qdisc; in htb_init()
1129 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; in htb_init()
1132 sch->flags |= TCQ_F_MQROOT; in htb_init()
1136 .parent_classid = TC_H_MAJ(sch->handle) >> 16, in htb_init()
1137 .classid = TC_H_MIN(q->defcls), in htb_init()
1144 /* Defer this assignment, so that htb_destroy skips offload-related in htb_init()
1147 q->offload = true; in htb_init()
1158 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) { in htb_attach_offload()
1159 struct Qdisc *old, *qdisc = q->direct_qdiscs[ntx]; in htb_attach_offload()
1161 old = dev_graft_qdisc(qdisc->dev_queue, qdisc); in htb_attach_offload()
1165 for (ntx = q->num_direct_qdiscs; ntx < dev->num_tx_queues; ntx++) { in htb_attach_offload()
1172 kfree(q->direct_qdiscs); in htb_attach_offload()
1173 q->direct_qdiscs = NULL; in htb_attach_offload()
1182 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in htb_attach_software()
1196 if (q->offload) in htb_attach()
1208 if (q->offload) in htb_dump()
1209 sch->flags |= TCQ_F_OFFLOADED; in htb_dump()
1211 sch->flags &= ~TCQ_F_OFFLOADED; in htb_dump()
1213 sch->qstats.overlimits = q->overlimits; in htb_dump()
1214 /* Its safe to not acquire qdisc lock. As we hold RTNL, in htb_dump()
1218 gopt.direct_pkts = q->direct_pkts; in htb_dump()
1220 gopt.rate2quantum = q->rate2quantum; in htb_dump()
1221 gopt.defcls = q->defcls; in htb_dump()
1228 nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen)) in htb_dump()
1230 if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD)) in htb_dump()
1237 return -1; in htb_dump()
1248 /* Its safe to not acquire qdisc lock. As we hold RTNL, in htb_dump_class()
1251 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT; in htb_dump_class()
1252 tcm->tcm_handle = cl->common.classid; in htb_dump_class()
1253 if (!cl->level && cl->leaf.q) in htb_dump_class()
1254 tcm->tcm_info = cl->leaf.q->handle; in htb_dump_class()
1262 psched_ratecfg_getrate(&opt.rate, &cl->rate); in htb_dump_class()
1263 opt.buffer = PSCHED_NS2TICKS(cl->buffer); in htb_dump_class()
1264 psched_ratecfg_getrate(&opt.ceil, &cl->ceil); in htb_dump_class()
1265 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer); in htb_dump_class()
1266 opt.quantum = cl->quantum; in htb_dump_class()
1267 opt.prio = cl->prio; in htb_dump_class()
1268 opt.level = cl->level; in htb_dump_class()
1271 if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD)) in htb_dump_class()
1273 if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) && in htb_dump_class()
1274 nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps, in htb_dump_class()
1277 if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) && in htb_dump_class()
1278 nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps, in htb_dump_class()
1286 return -1; in htb_dump_class()
1296 gnet_stats_basic_sync_init(&cl->bstats); in htb_offload_aggregate_stats()
1298 for (i = 0; i < q->clhash.hashsize; i++) { in htb_offload_aggregate_stats()
1299 hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) { in htb_offload_aggregate_stats()
1302 while (p && p->level < cl->level) in htb_offload_aggregate_stats()
1303 p = p->parent; in htb_offload_aggregate_stats()
1308 bytes += u64_stats_read(&c->bstats_bias.bytes); in htb_offload_aggregate_stats()
1309 packets += u64_stats_read(&c->bstats_bias.packets); in htb_offload_aggregate_stats()
1310 if (c->level == 0) { in htb_offload_aggregate_stats()
1311 bytes += u64_stats_read(&c->leaf.q->bstats.bytes); in htb_offload_aggregate_stats()
1312 packets += u64_stats_read(&c->leaf.q->bstats.packets); in htb_offload_aggregate_stats()
1316 _bstats_update(&cl->bstats, bytes, packets); in htb_offload_aggregate_stats()
1325 .drops = cl->drops, in htb_dump_class_stats()
1326 .overlimits = cl->overlimits, in htb_dump_class_stats()
1330 if (!cl->level && cl->leaf.q) in htb_dump_class_stats()
1331 qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog); in htb_dump_class_stats()
1333 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens), in htb_dump_class_stats()
1335 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens), in htb_dump_class_stats()
1338 if (q->offload) { in htb_dump_class_stats()
1339 if (!cl->level) { in htb_dump_class_stats()
1340 if (cl->leaf.q) in htb_dump_class_stats()
1341 cl->bstats = cl->leaf.q->bstats; in htb_dump_class_stats()
1343 gnet_stats_basic_sync_init(&cl->bstats); in htb_dump_class_stats()
1344 _bstats_update(&cl->bstats, in htb_dump_class_stats()
1345 u64_stats_read(&cl->bstats_bias.bytes), in htb_dump_class_stats()
1346 u64_stats_read(&cl->bstats_bias.packets)); in htb_dump_class_stats()
1352 if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 || in htb_dump_class_stats()
1353 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || in htb_dump_class_stats()
1355 return -1; in htb_dump_class_stats()
1357 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); in htb_dump_class_stats()
1368 if (!q->offload) in htb_select_queue()
1369 return sch->dev_queue; in htb_select_queue()
1373 .classid = TC_H_MIN(tcm->tcm_parent), in htb_select_queue()
1376 if (err || offload_opt.qid >= dev->num_tx_queues) in htb_select_queue()
1384 struct net_device *dev = dev_queue->dev; in htb_graft_helper()
1387 if (dev->flags & IFF_UP) in htb_graft_helper()
1391 new_q->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT; in htb_graft_helper()
1392 if (dev->flags & IFF_UP) in htb_graft_helper()
1402 queue = cl->leaf.offload_queue; in htb_offload_get_queue()
1403 if (!(cl->leaf.q->flags & TCQ_F_BUILTIN)) in htb_offload_get_queue()
1404 WARN_ON(cl->leaf.q->dev_queue != queue); in htb_offload_get_queue()
1421 if (dev->flags & IFF_UP) in htb_offload_move_qdisc()
1424 WARN_ON(qdisc != cl_old->leaf.q); in htb_offload_move_qdisc()
1427 if (!(cl_old->leaf.q->flags & TCQ_F_BUILTIN)) in htb_offload_move_qdisc()
1428 cl_old->leaf.q->dev_queue = queue_new; in htb_offload_move_qdisc()
1429 cl_old->leaf.offload_queue = queue_new; in htb_offload_move_qdisc()
1434 qdisc = dev_graft_qdisc(queue_new, cl_old->leaf.q); in htb_offload_move_qdisc()
1435 if (dev->flags & IFF_UP) in htb_offload_move_qdisc()
1437 WARN_ON(!(qdisc->flags & TCQ_F_BUILTIN)); in htb_offload_move_qdisc()
1444 struct netdev_queue *dev_queue = sch->dev_queue; in htb_graft()
1449 if (cl->level) in htb_graft()
1450 return -EINVAL; in htb_graft()
1452 if (q->offload) in htb_graft()
1457 cl->common.classid, extack); in htb_graft()
1459 return -ENOBUFS; in htb_graft()
1462 if (q->offload) { in htb_graft()
1463 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */ in htb_graft()
1468 *old = qdisc_replace(sch, new, &cl->leaf.q); in htb_graft()
1470 if (q->offload) { in htb_graft()
1481 return !cl->level ? cl->leaf.q : NULL; in htb_leaf()
1493 if (!cl->parent) in htb_parent_last_child()
1496 if (cl->parent->children > 1) in htb_parent_last_child()
1506 struct htb_class *parent = cl->parent; in htb_parent_to_leaf()
1508 WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity); in htb_parent_to_leaf()
1510 if (parent->cmode != HTB_CAN_SEND) in htb_parent_to_leaf()
1511 htb_safe_rb_erase(&parent->pq_node, in htb_parent_to_leaf()
1512 &q->hlevel[parent->level].wait_pq); in htb_parent_to_leaf()
1514 parent->level = 0; in htb_parent_to_leaf()
1515 memset(&parent->inner, 0, sizeof(parent->inner)); in htb_parent_to_leaf()
1516 parent->leaf.q = new_q ? new_q : &noop_qdisc; in htb_parent_to_leaf()
1517 parent->tokens = parent->buffer; in htb_parent_to_leaf()
1518 parent->ctokens = parent->cbuffer; in htb_parent_to_leaf()
1519 parent->t_c = ktime_get_ns(); in htb_parent_to_leaf()
1520 parent->cmode = HTB_CAN_SEND; in htb_parent_to_leaf()
1521 if (q->offload) in htb_parent_to_leaf()
1522 parent->leaf.offload_queue = cl->leaf.offload_queue; in htb_parent_to_leaf()
1531 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */ in htb_parent_to_leaf_offload()
1535 WARN_ON(!(old_q->flags & TCQ_F_BUILTIN)); in htb_parent_to_leaf_offload()
1544 struct Qdisc *q = cl->leaf.q; in htb_destroy_class_offload()
1548 if (cl->level) in htb_destroy_class_offload()
1549 return -EINVAL; in htb_destroy_class_offload()
1559 /* Last qdisc grafted should be the same as cl->leaf.q when in htb_destroy_class_offload()
1565 if (cl->parent) { in htb_destroy_class_offload()
1566 _bstats_update(&cl->parent->bstats_bias, in htb_destroy_class_offload()
1567 u64_stats_read(&q->bstats.bytes), in htb_destroy_class_offload()
1568 u64_stats_read(&q->bstats.packets)); in htb_destroy_class_offload()
1575 .classid = cl->common.classid, in htb_destroy_class_offload()
1590 if (!err && offload_opt.classid != TC_H_MIN(cl->common.classid)) { in htb_destroy_class_offload()
1591 u32 classid = TC_H_MAJ(sch->handle) | in htb_destroy_class_offload()
1603 if (!cl->level) { in htb_destroy_class()
1604 WARN_ON(!cl->leaf.q); in htb_destroy_class()
1605 qdisc_put(cl->leaf.q); in htb_destroy_class()
1607 gen_kill_estimator(&cl->rate_est); in htb_destroy_class()
1608 tcf_block_put(cl->block); in htb_destroy_class()
1622 cancel_work_sync(&q->work); in htb_destroy()
1623 qdisc_watchdog_cancel(&q->watchdog); in htb_destroy()
1629 tcf_block_put(q->block); in htb_destroy()
1631 for (i = 0; i < q->clhash.hashsize; i++) { in htb_destroy()
1632 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_destroy()
1633 tcf_block_put(cl->block); in htb_destroy()
1634 cl->block = NULL; in htb_destroy()
1641 for (i = 0; i < q->clhash.hashsize; i++) { in htb_destroy()
1642 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i], in htb_destroy()
1646 if (!q->offload) { in htb_destroy()
1653 if (cl->level) in htb_destroy()
1661 qdisc_class_hash_remove(&q->clhash, in htb_destroy()
1662 &cl->common); in htb_destroy()
1663 if (cl->parent) in htb_destroy()
1664 cl->parent->children--; in htb_destroy()
1673 qdisc_class_hash_destroy(&q->clhash); in htb_destroy()
1674 __qdisc_reset_queue(&q->direct_queue); in htb_destroy()
1676 if (q->offload) { in htb_destroy()
1683 if (!q->direct_qdiscs) in htb_destroy()
1685 for (i = 0; i < q->num_direct_qdiscs && q->direct_qdiscs[i]; i++) in htb_destroy()
1686 qdisc_put(q->direct_qdiscs[i]); in htb_destroy()
1687 kfree(q->direct_qdiscs); in htb_destroy()
1703 if (cl->children || qdisc_class_in_use(&cl->common)) { in htb_delete()
1705 return -EBUSY; in htb_delete()
1708 if (!cl->level && htb_parent_last_child(cl)) in htb_delete()
1711 if (q->offload) { in htb_delete()
1719 struct netdev_queue *dev_queue = sch->dev_queue; in htb_delete()
1721 if (q->offload) in htb_delete()
1725 cl->parent->common.classid, in htb_delete()
1727 if (q->offload) in htb_delete()
1733 if (!cl->level) in htb_delete()
1734 qdisc_purge_queue(cl->leaf.q); in htb_delete()
1737 qdisc_class_hash_remove(&q->clhash, &cl->common); in htb_delete()
1738 if (cl->parent) in htb_delete()
1739 cl->parent->children--; in htb_delete()
1741 if (cl->prio_activity) in htb_delete()
1744 if (cl->cmode != HTB_CAN_SEND) in htb_delete()
1745 htb_safe_rb_erase(&cl->pq_node, in htb_delete()
1746 &q->hlevel[cl->level].wait_pq); in htb_delete()
1761 int err = -EINVAL; in htb_change_class()
1782 err = -EINVAL; in htb_change_class()
1789 if (!hopt->rate.rate || !hopt->ceil.rate) in htb_change_class()
1792 if (q->offload) { in htb_change_class()
1794 if (hopt->rate.overhead || hopt->ceil.overhead) { in htb_change_class()
1798 if (hopt->rate.mpu || hopt->ceil.mpu) { in htb_change_class()
1805 if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE) in htb_change_class()
1806 qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB], in htb_change_class()
1809 if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE) in htb_change_class()
1810 qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB], in htb_change_class()
1836 if (!classid || TC_H_MAJ(classid ^ sch->handle) || in htb_change_class()
1841 if (parent && parent->parent && parent->parent->level < 2) { in htb_change_class()
1845 err = -ENOBUFS; in htb_change_class()
1850 gnet_stats_basic_sync_init(&cl->bstats); in htb_change_class()
1851 gnet_stats_basic_sync_init(&cl->bstats_bias); in htb_change_class()
1853 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack); in htb_change_class()
1859 err = gen_new_estimator(&cl->bstats, NULL, in htb_change_class()
1860 &cl->rate_est, in htb_change_class()
1868 cl->children = 0; in htb_change_class()
1869 RB_CLEAR_NODE(&cl->pq_node); in htb_change_class()
1872 RB_CLEAR_NODE(&cl->node[prio]); in htb_change_class()
1874 cl->common.classid = classid; in htb_change_class()
1883 * -- thanks to Karlis Peisenieks in htb_change_class()
1885 if (!q->offload) { in htb_change_class()
1886 dev_queue = sch->dev_queue; in htb_change_class()
1887 } else if (!(parent && !parent->level)) { in htb_change_class()
1891 .classid = cl->common.classid, in htb_change_class()
1893 TC_H_MIN(parent->common.classid) : in htb_change_class()
1895 .rate = max_t(u64, hopt->rate.rate, rate64), in htb_change_class()
1896 .ceil = max_t(u64, hopt->ceil.rate, ceil64), in htb_change_class()
1897 .prio = hopt->prio, in htb_change_class()
1898 .quantum = hopt->quantum, in htb_change_class()
1911 WARN_ON(old_q != parent->leaf.q); in htb_change_class()
1914 .classid = cl->common.classid, in htb_change_class()
1916 TC_H_MIN(parent->common.classid), in htb_change_class()
1917 .rate = max_t(u64, hopt->rate.rate, rate64), in htb_change_class()
1918 .ceil = max_t(u64, hopt->ceil.rate, ceil64), in htb_change_class()
1919 .prio = hopt->prio, in htb_change_class()
1920 .quantum = hopt->quantum, in htb_change_class()
1930 _bstats_update(&parent->bstats_bias, in htb_change_class()
1931 u64_stats_read(&old_q->bstats.bytes), in htb_change_class()
1932 u64_stats_read(&old_q->bstats.packets)); in htb_change_class()
1937 if (q->offload) { in htb_change_class()
1938 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */ in htb_change_class()
1943 WARN_ON(!(old_q->flags & TCQ_F_BUILTIN)); in htb_change_class()
1946 if (parent && !parent->level) { in htb_change_class()
1948 qdisc_purge_queue(parent->leaf.q); in htb_change_class()
1949 parent_qdisc = parent->leaf.q; in htb_change_class()
1950 if (parent->prio_activity) in htb_change_class()
1954 if (parent->cmode != HTB_CAN_SEND) { in htb_change_class()
1955 htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq); in htb_change_class()
1956 parent->cmode = HTB_CAN_SEND; in htb_change_class()
1958 parent->level = (parent->parent ? parent->parent->level in htb_change_class()
1959 : TC_HTB_MAXDEPTH) - 1; in htb_change_class()
1960 memset(&parent->inner, 0, sizeof(parent->inner)); in htb_change_class()
1964 cl->leaf.q = new_q ? new_q : &noop_qdisc; in htb_change_class()
1965 if (q->offload) in htb_change_class()
1966 cl->leaf.offload_queue = dev_queue; in htb_change_class()
1968 cl->parent = parent; in htb_change_class()
1971 cl->tokens = PSCHED_TICKS2NS(hopt->buffer); in htb_change_class()
1972 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer); in htb_change_class()
1973 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */ in htb_change_class()
1974 cl->t_c = ktime_get_ns(); in htb_change_class()
1975 cl->cmode = HTB_CAN_SEND; in htb_change_class()
1978 qdisc_class_hash_insert(&q->clhash, &cl->common); in htb_change_class()
1980 parent->children++; in htb_change_class()
1981 if (cl->leaf.q != &noop_qdisc) in htb_change_class()
1982 qdisc_hash_add(cl->leaf.q, true); in htb_change_class()
1985 err = gen_replace_estimator(&cl->bstats, NULL, in htb_change_class()
1986 &cl->rate_est, in htb_change_class()
1994 if (q->offload) { in htb_change_class()
1999 .classid = cl->common.classid, in htb_change_class()
2000 .rate = max_t(u64, hopt->rate.rate, rate64), in htb_change_class()
2001 .ceil = max_t(u64, hopt->ceil.rate, ceil64), in htb_change_class()
2002 .prio = hopt->prio, in htb_change_class()
2003 .quantum = hopt->quantum, in htb_change_class()
2020 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64); in htb_change_class()
2021 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64); in htb_change_class()
2024 * is really leaf before changing cl->leaf ! in htb_change_class()
2026 if (!cl->level) { in htb_change_class()
2027 u64 quantum = cl->rate.rate_bytes_ps; in htb_change_class()
2029 do_div(quantum, q->rate2quantum); in htb_change_class()
2030 cl->quantum = min_t(u64, quantum, INT_MAX); in htb_change_class()
2032 if (!hopt->quantum && cl->quantum < 1000) { in htb_change_class()
2033 warn = -1; in htb_change_class()
2034 cl->quantum = 1000; in htb_change_class()
2036 if (!hopt->quantum && cl->quantum > 200000) { in htb_change_class()
2038 cl->quantum = 200000; in htb_change_class()
2040 if (hopt->quantum) in htb_change_class()
2041 cl->quantum = hopt->quantum; in htb_change_class()
2042 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO) in htb_change_class()
2043 cl->prio = TC_HTB_NUMPRIO - 1; in htb_change_class()
2046 cl->buffer = PSCHED_TICKS2NS(hopt->buffer); in htb_change_class()
2047 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer); in htb_change_class()
2055 cl->common.classid, (warn == -1 ? "small" : "big")); in htb_change_class()
2057 qdisc_class_hash_grow(sch, &q->clhash); in htb_change_class()
2063 gen_kill_estimator(&cl->rate_est); in htb_change_class()
2065 tcf_block_put(cl->block); in htb_change_class()
2077 return cl ? cl->block : q->block; in htb_tcf_block()
2085 /*if (cl && !cl->level) return 0; in htb_bind_filter()
2089 * ---- in htb_bind_filter()
2090 * 19.6.2002 As Werner explained it is ok - bind filter is just in htb_bind_filter()
2091 * another way to "lock" the class - unlike "get" this lock can in htb_bind_filter()
2095 qdisc_class_get(&cl->common); in htb_bind_filter()
2103 qdisc_class_put(&cl->common); in htb_unbind_filter()
2112 if (arg->stop) in htb_walk()
2115 for (i = 0; i < q->clhash.hashsize; i++) { in htb_walk()
2116 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) { in htb_walk()