Lines Matching +full:1 +full:q

101  * @rec_inv_sqrt:	reciprocal value of sqrt(count) >> 1
261 CAKE_FLAG_AUTORATE_INGRESS = BIT(1),
300 static u16 quantum_div[CAKE_QUEUES + 1] = {0};
306 1, 1, 1, 1, 1, 1, 1, 1,
316 2, 0, 1, 2, 4, 2, 2, 2,
317 1, 2, 1, 2, 1, 2, 1, 2,
327 0, 1, 0, 0, 2, 0, 0, 0,
328 1, 0, 0, 0, 0, 0, 0, 0,
338 0, 1, 0, 0, 2, 0, 0, 0,
339 1, 0, 0, 0, 0, 0, 0, 0,
361 static const u8 normal_order[] = {0, 1, 2, 3, 4, 5, 6, 7};
362 static const u8 bulk_order[] = {1, 0, 2, 3};
366 * values, particularly when stepping from count 1 to 2 or vice versa. Hence,
399 val = (val * invsqrt) >> (32 - 2 + 1); in cobalt_newton_step()
448 vars->count = 1; in cobalt_queue_full()
508 * both the '0' and '1' entries in the invsqrt cache are 0xFFFFFFFF, as close in cobalt_should_drop()
529 vars->count = 1; in cobalt_should_drop()
630 static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb, in cake_hash() argument
706 flow_hash = flow_override - 1; in cake_hash()
710 dsthost_hash = host_override - 1; in cake_hash()
711 srchost_hash = host_override - 1; in cake_hash()
726 if (likely(q->tags[reduced_hash] == flow_hash && in cake_hash()
727 q->flows[reduced_hash].set)) { in cake_hash()
728 q->way_directs++; in cake_hash()
740 i++, k = (k + 1) % CAKE_SET_WAYS) { in cake_hash()
741 if (q->tags[outer_hash + k] == flow_hash) { in cake_hash()
743 q->way_hits++; in cake_hash()
745 if (!q->flows[outer_hash + k].set) { in cake_hash()
759 i++, k = (k + 1) % CAKE_SET_WAYS) { in cake_hash()
760 if (!q->flows[outer_hash + k].set) { in cake_hash()
761 q->way_misses++; in cake_hash()
771 q->way_collisions++; in cake_hash()
775 if (q->flows[outer_hash + k].set == CAKE_SET_BULK) { in cake_hash()
777 q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--; in cake_hash()
779 q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--; in cake_hash()
784 q->tags[reduced_hash] = flow_hash; in cake_hash()
791 i++, k = (k + 1) % CAKE_SET_WAYS) { in cake_hash()
792 if (q->hosts[outer_hash + k].srchost_tag == in cake_hash()
797 i++, k = (k + 1) % CAKE_SET_WAYS) { in cake_hash()
798 if (!q->hosts[outer_hash + k].srchost_bulk_flow_count) in cake_hash()
801 q->hosts[outer_hash + k].srchost_tag = srchost_hash; in cake_hash()
804 if (q->flows[reduced_hash].set == CAKE_SET_BULK) in cake_hash()
805 q->hosts[srchost_idx].srchost_bulk_flow_count++; in cake_hash()
806 q->flows[reduced_hash].srchost = srchost_idx; in cake_hash()
814 i++, k = (k + 1) % CAKE_SET_WAYS) { in cake_hash()
815 if (q->hosts[outer_hash + k].dsthost_tag == in cake_hash()
820 i++, k = (k + 1) % CAKE_SET_WAYS) { in cake_hash()
821 if (!q->hosts[outer_hash + k].dsthost_bulk_flow_count) in cake_hash()
824 q->hosts[outer_hash + k].dsthost_tag = dsthost_hash; in cake_hash()
827 if (q->flows[reduced_hash].set == CAKE_SET_BULK) in cake_hash()
828 q->hosts[dsthost_idx].dsthost_bulk_flow_count++; in cake_hash()
829 q->flows[reduced_hash].dsthost = dsthost_idx; in cake_hash()
945 const u8 *ptr = (const u8 *)(tcph + 1); in cake_get_tcpopt()
980 * @return -1, 0 or 1 as normal compare functions
1000 return -1; in cake_tcph_sack_compare()
1003 return 1; in cake_tcph_sack_compare()
1017 return -1; in cake_tcph_sack_compare()
1039 return -1; in cake_tcph_sack_compare()
1049 return bytes_b > bytes_a ? 1 : 0; in cake_tcph_sack_compare()
1071 const u8 *ptr = (const u8 *)(tcph + 1); in cake_tcph_may_drop()
1078 * 0x0FFF0000 = all TCP flags (confirm ACK=1, others zero) in cake_tcph_may_drop()
1138 static struct sk_buff *cake_ack_filter(struct cake_sched_data *q, in cake_ack_filter() argument
1141 bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE; in cake_ack_filter()
1211 WARN_ON(1); /* shouldn't happen */ in cake_ack_filter()
1301 static u32 cake_calc_overhead(struct cake_sched_data *q, u32 len, u32 off) in cake_calc_overhead() argument
1303 if (q->rate_flags & CAKE_FLAG_OVERHEAD) in cake_calc_overhead()
1306 if (q->max_netlen < len) in cake_calc_overhead()
1307 q->max_netlen = len; in cake_calc_overhead()
1308 if (q->min_netlen > len) in cake_calc_overhead()
1309 q->min_netlen = len; in cake_calc_overhead()
1311 len += q->rate_overhead; in cake_calc_overhead()
1313 if (len < q->rate_mpu) in cake_calc_overhead()
1314 len = q->rate_mpu; in cake_calc_overhead()
1316 if (q->atm_mode == CAKE_ATM_ATM) { in cake_calc_overhead()
1320 } else if (q->atm_mode == CAKE_ATM_PTM) { in cake_calc_overhead()
1328 if (q->max_adjlen < len) in cake_calc_overhead()
1329 q->max_adjlen = len; in cake_calc_overhead()
1330 if (q->min_adjlen > len) in cake_calc_overhead()
1331 q->min_adjlen = len; in cake_calc_overhead()
1336 static u32 cake_overhead(struct cake_sched_data *q, const struct sk_buff *skb) in cake_overhead() argument
1342 u16 segs = 1; in cake_overhead()
1344 q->avg_netoff = cake_ewma(q->avg_netoff, off << 16, 8); in cake_overhead()
1347 return cake_calc_overhead(q, len, off); in cake_overhead()
1377 last_len = skb->len - shinfo->gso_size * (segs - 1); in cake_overhead()
1379 return (cake_calc_overhead(q, len, off) * (segs - 1) + in cake_overhead()
1380 cake_calc_overhead(q, last_len, off)); in cake_overhead()
1383 static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j) in cake_heap_swap() argument
1385 struct cake_heap_entry ii = q->overflow_heap[i]; in cake_heap_swap()
1386 struct cake_heap_entry jj = q->overflow_heap[j]; in cake_heap_swap()
1388 q->overflow_heap[i] = jj; in cake_heap_swap()
1389 q->overflow_heap[j] = ii; in cake_heap_swap()
1391 q->tins[ii.t].overflow_idx[ii.b] = j; in cake_heap_swap()
1392 q->tins[jj.t].overflow_idx[jj.b] = i; in cake_heap_swap()
1395 static u32 cake_heap_get_backlog(const struct cake_sched_data *q, u16 i) in cake_heap_get_backlog() argument
1397 struct cake_heap_entry ii = q->overflow_heap[i]; in cake_heap_get_backlog()
1399 return q->tins[ii.t].backlogs[ii.b]; in cake_heap_get_backlog()
1402 static void cake_heapify(struct cake_sched_data *q, u16 i) in cake_heapify() argument
1405 u32 mb = cake_heap_get_backlog(q, i); in cake_heapify()
1409 u32 l = m + m + 1; in cake_heapify()
1410 u32 r = l + 1; in cake_heapify()
1413 u32 lb = cake_heap_get_backlog(q, l); in cake_heapify()
1422 u32 rb = cake_heap_get_backlog(q, r); in cake_heapify()
1431 cake_heap_swap(q, i, m); in cake_heapify()
1439 static void cake_heapify_up(struct cake_sched_data *q, u16 i) in cake_heapify_up() argument
1442 u16 p = (i - 1) >> 1; in cake_heapify_up()
1443 u32 ib = cake_heap_get_backlog(q, i); in cake_heapify_up()
1444 u32 pb = cake_heap_get_backlog(q, p); in cake_heapify_up()
1447 cake_heap_swap(q, i, p); in cake_heapify_up()
1455 static int cake_advance_shaper(struct cake_sched_data *q, in cake_advance_shaper() argument
1465 if (q->rate_ns) { in cake_advance_shaper()
1467 u64 global_dur = (len * q->rate_ns) >> q->rate_shft; in cake_advance_shaper()
1468 u64 failsafe_dur = global_dur + (global_dur >> 1); in cake_advance_shaper()
1478 q->time_next_packet = ktime_add_ns(q->time_next_packet, in cake_advance_shaper()
1481 q->failsafe_next_packet = \ in cake_advance_shaper()
1482 ktime_add_ns(q->failsafe_next_packet, in cake_advance_shaper()
1490 struct cake_sched_data *q = qdisc_priv(sch); in cake_drop() local
1498 if (!q->overflow_timeout) { in cake_drop()
1501 for (i = CAKE_MAX_TINS * CAKE_QUEUES / 2 - 1; i >= 0; i--) in cake_drop()
1502 cake_heapify(q, i); in cake_drop()
1504 q->overflow_timeout = 65535; in cake_drop()
1507 qq = q->overflow_heap[0]; in cake_drop()
1511 b = &q->tins[tin]; in cake_drop()
1516 q->overflow_timeout = 0; in cake_drop()
1524 q->buffer_used -= skb->truesize; in cake_drop()
1528 qdisc_tree_reduce_backlog(sch, 1, len); in cake_drop()
1534 if (q->rate_flags & CAKE_FLAG_INGRESS) in cake_drop()
1535 cake_advance_shaper(q, b, skb, now, true); in cake_drop()
1538 sch->q.qlen--; in cake_drop()
1540 cake_heapify(q, 0); in cake_drop()
1604 struct cake_sched_data *q = qdisc_priv(sch); in cake_select_tin() local
1613 mark = (skb->mark & q->fwmark_mask) >> q->fwmark_shft; in cake_select_tin()
1614 wash = !!(q->rate_flags & CAKE_FLAG_WASH); in cake_select_tin()
1618 if (q->tin_mode == CAKE_DIFFSERV_BESTEFFORT) in cake_select_tin()
1621 else if (mark && mark <= q->tin_cnt) in cake_select_tin()
1622 tin = q->tin_order[mark - 1]; in cake_select_tin()
1626 TC_H_MIN(skb->priority) <= q->tin_cnt) in cake_select_tin()
1627 tin = q->tin_order[TC_H_MIN(skb->priority) - 1]; in cake_select_tin()
1632 tin = q->tin_index[dscp]; in cake_select_tin()
1634 if (unlikely(tin >= q->tin_cnt)) in cake_select_tin()
1638 return &q->tins[tin]; in cake_select_tin()
1644 struct cake_sched_data *q = qdisc_priv(sch); in cake_classify() local
1650 filter = rcu_dereference_bh(q->filter_list); in cake_classify()
1676 return cake_hash(*t, skb, flow_mode, flow, host) + 1; in cake_classify()
1684 struct cake_sched_data *q = qdisc_priv(sch); in cake_enqueue() local
1694 idx = cake_classify(sch, &b, skb, q->flow_mode, &ret); in cake_enqueue()
1709 if (!sch->q.qlen) { in cake_enqueue()
1710 if (ktime_before(q->time_next_packet, now)) { in cake_enqueue()
1711 q->failsafe_next_packet = now; in cake_enqueue()
1712 q->time_next_packet = now; in cake_enqueue()
1713 } else if (ktime_after(q->time_next_packet, now) && in cake_enqueue()
1714 ktime_after(q->failsafe_next_packet, now)) { in cake_enqueue()
1716 min(ktime_to_ns(q->time_next_packet), in cake_enqueue()
1718 q->failsafe_next_packet)); in cake_enqueue()
1720 qdisc_watchdog_schedule_ns(&q->watchdog, next); in cake_enqueue()
1728 if (skb_is_gso(skb) && q->rate_flags & CAKE_FLAG_SPLIT_GSO) { in cake_enqueue()
1741 get_cobalt_cb(segs)->adjusted_len = cake_overhead(q, in cake_enqueue()
1745 sch->q.qlen++; in cake_enqueue()
1748 q->buffer_used += segs->truesize; in cake_enqueue()
1757 q->avg_window_bytes += slen; in cake_enqueue()
1759 qdisc_tree_reduce_backlog(sch, 1-numsegs, len-slen); in cake_enqueue()
1764 get_cobalt_cb(skb)->adjusted_len = cake_overhead(q, skb); in cake_enqueue()
1767 if (q->ack_filter) in cake_enqueue()
1768 ack = cake_ack_filter(q, flow); in cake_enqueue()
1775 q->buffer_used += skb->truesize - ack->truesize; in cake_enqueue()
1776 if (q->rate_flags & CAKE_FLAG_INGRESS) in cake_enqueue()
1777 cake_advance_shaper(q, b, ack, now, true); in cake_enqueue()
1779 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack)); in cake_enqueue()
1782 sch->q.qlen++; in cake_enqueue()
1783 q->buffer_used += skb->truesize; in cake_enqueue()
1792 q->avg_window_bytes += len; in cake_enqueue()
1795 if (q->overflow_timeout) in cake_enqueue()
1796 cake_heapify_up(q, b->overflow_idx[idx]); in cake_enqueue()
1799 if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) { in cake_enqueue()
1801 ktime_to_ns(ktime_sub(now, q->last_packet_time)); in cake_enqueue()
1807 q->avg_packet_interval = \ in cake_enqueue()
1808 cake_ewma(q->avg_packet_interval, in cake_enqueue()
1810 (packet_interval > q->avg_packet_interval ? in cake_enqueue()
1813 q->last_packet_time = now; in cake_enqueue()
1815 if (packet_interval > q->avg_packet_interval) { in cake_enqueue()
1818 q->avg_window_begin)); in cake_enqueue()
1819 u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC; in cake_enqueue()
1822 q->avg_peak_bandwidth = in cake_enqueue()
1823 cake_ewma(q->avg_peak_bandwidth, b, in cake_enqueue()
1824 b > q->avg_peak_bandwidth ? 2 : 8); in cake_enqueue()
1825 q->avg_window_bytes = 0; in cake_enqueue()
1826 q->avg_window_begin = now; in cake_enqueue()
1829 ktime_add_ms(q->last_reconfig_time, in cake_enqueue()
1831 q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4; in cake_enqueue()
1836 q->avg_window_bytes = 0; in cake_enqueue()
1837 q->last_packet_time = now; in cake_enqueue()
1844 u16 host_load = 1; in cake_enqueue()
1855 if (cake_dsrc(q->flow_mode)) in cake_enqueue()
1858 if (cake_ddst(q->flow_mode)) in cake_enqueue()
1874 if (cake_dsrc(q->flow_mode)) in cake_enqueue()
1877 if (cake_ddst(q->flow_mode)) in cake_enqueue()
1882 if (q->buffer_used > q->buffer_max_used) in cake_enqueue()
1883 q->buffer_max_used = q->buffer_used; in cake_enqueue()
1885 if (q->buffer_used > q->buffer_limit) { in cake_enqueue()
1888 while (q->buffer_used > q->buffer_limit) { in cake_enqueue()
1899 struct cake_sched_data *q = qdisc_priv(sch); in cake_dequeue_one() local
1900 struct cake_tin_data *b = &q->tins[q->cur_tin]; in cake_dequeue_one()
1901 struct cake_flow *flow = &b->flows[q->cur_flow]; in cake_dequeue_one()
1908 b->backlogs[q->cur_flow] -= len; in cake_dequeue_one()
1911 q->buffer_used -= skb->truesize; in cake_dequeue_one()
1912 sch->q.qlen--; in cake_dequeue_one()
1914 if (q->overflow_timeout) in cake_dequeue_one()
1915 cake_heapify(q, b->overflow_idx[q->cur_flow]); in cake_dequeue_one()
1923 struct cake_sched_data *q = qdisc_priv(sch); in cake_clear_tin() local
1926 q->cur_tin = tin; in cake_clear_tin()
1927 for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++) in cake_clear_tin()
1934 struct cake_sched_data *q = qdisc_priv(sch); in cake_dequeue() local
1935 struct cake_tin_data *b = &q->tins[q->cur_tin]; in cake_dequeue()
1947 if (!sch->q.qlen) in cake_dequeue()
1951 if (ktime_after(q->time_next_packet, now) && in cake_dequeue()
1952 ktime_after(q->failsafe_next_packet, now)) { in cake_dequeue()
1953 u64 next = min(ktime_to_ns(q->time_next_packet), in cake_dequeue()
1954 ktime_to_ns(q->failsafe_next_packet)); in cake_dequeue()
1957 qdisc_watchdog_schedule_ns(&q->watchdog, next); in cake_dequeue()
1962 if (!q->rate_ns) { in cake_dequeue()
1975 q->cur_tin++; in cake_dequeue()
1977 if (q->cur_tin >= q->tin_cnt) { in cake_dequeue()
1978 q->cur_tin = 0; in cake_dequeue()
1979 b = q->tins; in cake_dequeue()
1982 /* It's possible for q->qlen to be in cake_dequeue()
2001 for (tin = 0; tin < q->tin_cnt; tin++) { in cake_dequeue()
2002 b = q->tins + tin; in cake_dequeue()
2016 q->cur_tin = best_tin; in cake_dequeue()
2017 b = q->tins + best_tin; in cake_dequeue()
2039 q->cur_flow = flow - b->flows; in cake_dequeue()
2045 host_load = 1; in cake_dequeue()
2058 if (cake_dsrc(q->flow_mode)) in cake_dequeue()
2061 if (cake_ddst(q->flow_mode)) in cake_dequeue()
2074 if (cake_dsrc(q->flow_mode)) in cake_dequeue()
2077 if (cake_ddst(q->flow_mode)) in cake_dequeue()
2093 while (1) { in cake_dequeue()
2110 if (cake_dsrc(q->flow_mode)) in cake_dequeue()
2113 if (cake_ddst(q->flow_mode)) in cake_dequeue()
2132 if (cake_dsrc(q->flow_mode)) in cake_dequeue()
2135 if (cake_ddst(q->flow_mode)) in cake_dequeue()
2149 !!(q->rate_flags & in cake_dequeue()
2155 if (q->rate_flags & CAKE_FLAG_INGRESS) { in cake_dequeue()
2156 len = cake_advance_shaper(q, b, skb, in cake_dequeue()
2163 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb)); in cake_dequeue()
2166 if (q->rate_flags & CAKE_FLAG_INGRESS) in cake_dequeue()
2181 len = cake_advance_shaper(q, b, skb, now, false); in cake_dequeue()
2185 if (ktime_after(q->time_next_packet, now) && sch->q.qlen) { in cake_dequeue()
2186 u64 next = min(ktime_to_ns(q->time_next_packet), in cake_dequeue()
2187 ktime_to_ns(q->failsafe_next_packet)); in cake_dequeue()
2189 qdisc_watchdog_schedule_ns(&q->watchdog, next); in cake_dequeue()
2190 } else if (!sch->q.qlen) { in cake_dequeue()
2193 for (i = 0; i < q->tin_cnt; i++) { in cake_dequeue()
2194 if (q->tins[i].decaying_flow_count) { in cake_dequeue()
2197 q->tins[i].cparams.target); in cake_dequeue()
2199 qdisc_watchdog_schedule_ns(&q->watchdog, in cake_dequeue()
2206 if (q->overflow_timeout) in cake_dequeue()
2207 q->overflow_timeout--; in cake_dequeue()
2214 struct cake_sched_data *q = qdisc_priv(sch); in cake_reset() local
2217 if (!q->tins) in cake_reset()
2224 static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = {
2263 rate_ns >>= 1; in cake_set_rate()
2279 b->cparams.p_inc = 1 << 24; /* 1/256 */ in cake_set_rate()
2280 b->cparams.p_dec = 1 << 20; /* 1/4096 */ in cake_set_rate()
2285 struct cake_sched_data *q = qdisc_priv(sch); in cake_config_besteffort() local
2286 struct cake_tin_data *b = &q->tins[0]; in cake_config_besteffort()
2288 u64 rate = q->rate_bps; in cake_config_besteffort()
2290 q->tin_cnt = 1; in cake_config_besteffort()
2292 q->tin_index = besteffort; in cake_config_besteffort()
2293 q->tin_order = normal_order; in cake_config_besteffort()
2296 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_besteffort()
2305 struct cake_sched_data *q = qdisc_priv(sch); in cake_config_precedence() local
2307 u64 rate = q->rate_bps; in cake_config_precedence()
2311 q->tin_cnt = 8; in cake_config_precedence()
2312 q->tin_index = precedence; in cake_config_precedence()
2313 q->tin_order = normal_order; in cake_config_precedence()
2315 for (i = 0; i < q->tin_cnt; i++) { in cake_config_precedence()
2316 struct cake_tin_data *b = &q->tins[i]; in cake_config_precedence()
2318 cake_set_rate(b, rate, mtu, us_to_ns(q->target), in cake_config_precedence()
2319 us_to_ns(q->interval)); in cake_config_precedence()
2321 b->tin_quantum = max_t(u16, 1U, quantum); in cake_config_precedence()
2340 * Assured Forwarding 1 (AF1x) - x3
2344 * Precedence Class 1 (CS1)
2394 struct cake_sched_data *q = qdisc_priv(sch); in cake_config_diffserv8() local
2396 u64 rate = q->rate_bps; in cake_config_diffserv8()
2400 q->tin_cnt = 8; in cake_config_diffserv8()
2403 q->tin_index = diffserv8; in cake_config_diffserv8()
2404 q->tin_order = normal_order; in cake_config_diffserv8()
2407 for (i = 0; i < q->tin_cnt; i++) { in cake_config_diffserv8()
2408 struct cake_tin_data *b = &q->tins[i]; in cake_config_diffserv8()
2410 cake_set_rate(b, rate, mtu, us_to_ns(q->target), in cake_config_diffserv8()
2411 us_to_ns(q->interval)); in cake_config_diffserv8()
2413 b->tin_quantum = max_t(u16, 1U, quantum); in cake_config_diffserv8()
2438 struct cake_sched_data *q = qdisc_priv(sch); in cake_config_diffserv4() local
2440 u64 rate = q->rate_bps; in cake_config_diffserv4()
2443 q->tin_cnt = 4; in cake_config_diffserv4()
2446 q->tin_index = diffserv4; in cake_config_diffserv4()
2447 q->tin_order = bulk_order; in cake_config_diffserv4()
2450 cake_set_rate(&q->tins[0], rate, mtu, in cake_config_diffserv4()
2451 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv4()
2452 cake_set_rate(&q->tins[1], rate >> 4, mtu, in cake_config_diffserv4()
2453 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv4()
2454 cake_set_rate(&q->tins[2], rate >> 1, mtu, in cake_config_diffserv4()
2455 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv4()
2456 cake_set_rate(&q->tins[3], rate >> 2, mtu, in cake_config_diffserv4()
2457 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv4()
2460 q->tins[0].tin_quantum = quantum; in cake_config_diffserv4()
2461 q->tins[1].tin_quantum = quantum >> 4; in cake_config_diffserv4()
2462 q->tins[2].tin_quantum = quantum >> 1; in cake_config_diffserv4()
2463 q->tins[3].tin_quantum = quantum >> 2; in cake_config_diffserv4()
2475 struct cake_sched_data *q = qdisc_priv(sch); in cake_config_diffserv3() local
2477 u64 rate = q->rate_bps; in cake_config_diffserv3()
2480 q->tin_cnt = 3; in cake_config_diffserv3()
2483 q->tin_index = diffserv3; in cake_config_diffserv3()
2484 q->tin_order = bulk_order; in cake_config_diffserv3()
2487 cake_set_rate(&q->tins[0], rate, mtu, in cake_config_diffserv3()
2488 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv3()
2489 cake_set_rate(&q->tins[1], rate >> 4, mtu, in cake_config_diffserv3()
2490 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv3()
2491 cake_set_rate(&q->tins[2], rate >> 2, mtu, in cake_config_diffserv3()
2492 us_to_ns(q->target), us_to_ns(q->interval)); in cake_config_diffserv3()
2495 q->tins[0].tin_quantum = quantum; in cake_config_diffserv3()
2496 q->tins[1].tin_quantum = quantum >> 4; in cake_config_diffserv3()
2497 q->tins[2].tin_quantum = quantum >> 2; in cake_config_diffserv3()
2504 struct cake_sched_data *q = qdisc_priv(sch); in cake_reconfigure() local
2507 switch (q->tin_mode) { in cake_reconfigure()
2530 for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) { in cake_reconfigure()
2532 q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time; in cake_reconfigure()
2535 q->rate_ns = q->tins[ft].tin_rate_ns; in cake_reconfigure()
2536 q->rate_shft = q->tins[ft].tin_rate_shft; in cake_reconfigure()
2538 if (q->buffer_config_limit) { in cake_reconfigure()
2539 q->buffer_limit = q->buffer_config_limit; in cake_reconfigure()
2540 } else if (q->rate_bps) { in cake_reconfigure()
2541 u64 t = q->rate_bps * q->interval; in cake_reconfigure()
2544 q->buffer_limit = max_t(u32, t, 4U << 20); in cake_reconfigure()
2546 q->buffer_limit = ~0; in cake_reconfigure()
2551 q->buffer_limit = min(q->buffer_limit, in cake_reconfigure()
2553 q->buffer_config_limit)); in cake_reconfigure()
2559 struct cake_sched_data *q = qdisc_priv(sch); in cake_change() local
2560 struct nlattr *tb[TCA_CAKE_MAX + 1]; in cake_change()
2570 flow_mode = q->flow_mode; in cake_change()
2584 WRITE_ONCE(q->rate_bps, in cake_change()
2588 WRITE_ONCE(q->tin_mode, in cake_change()
2591 rate_flags = q->rate_flags; in cake_change()
2605 WRITE_ONCE(q->atm_mode, in cake_change()
2609 WRITE_ONCE(q->rate_overhead, in cake_change()
2613 q->max_netlen = 0; in cake_change()
2614 q->max_adjlen = 0; in cake_change()
2615 q->min_netlen = ~0; in cake_change()
2616 q->min_adjlen = ~0; in cake_change()
2622 q->max_netlen = 0; in cake_change()
2623 q->max_adjlen = 0; in cake_change()
2624 q->min_netlen = ~0; in cake_change()
2625 q->min_adjlen = ~0; in cake_change()
2629 WRITE_ONCE(q->rate_mpu, in cake_change()
2635 WRITE_ONCE(q->interval, max(interval, 1U)); in cake_change()
2641 WRITE_ONCE(q->target, max(target, 1U)); in cake_change()
2659 WRITE_ONCE(q->ack_filter, in cake_change()
2663 WRITE_ONCE(q->buffer_config_limit, in cake_change()
2674 WRITE_ONCE(q->fwmark_mask, nla_get_u32(tb[TCA_CAKE_FWMARK])); in cake_change()
2675 WRITE_ONCE(q->fwmark_shft, in cake_change()
2676 q->fwmark_mask ? __ffs(q->fwmark_mask) : 0); in cake_change()
2679 WRITE_ONCE(q->rate_flags, rate_flags); in cake_change()
2680 WRITE_ONCE(q->flow_mode, flow_mode); in cake_change()
2681 if (q->tins) { in cake_change()
2692 struct cake_sched_data *q = qdisc_priv(sch); in cake_destroy() local
2694 qdisc_watchdog_cancel(&q->watchdog); in cake_destroy()
2695 tcf_block_put(q->block); in cake_destroy()
2696 kvfree(q->tins); in cake_destroy()
2702 struct cake_sched_data *q = qdisc_priv(sch); in cake_init() local
2706 q->tin_mode = CAKE_DIFFSERV_DIFFSERV3; in cake_init()
2707 q->flow_mode = CAKE_FLOW_TRIPLE; in cake_init()
2709 q->rate_bps = 0; /* unlimited by default */ in cake_init()
2711 q->interval = 100000; /* 100ms default */ in cake_init()
2712 q->target = 5000; /* 5ms: codel RFC argues in cake_init()
2715 q->rate_flags |= CAKE_FLAG_SPLIT_GSO; in cake_init()
2716 q->cur_tin = 0; in cake_init()
2717 q->cur_flow = 0; in cake_init()
2719 qdisc_watchdog_init(&q->watchdog, sch); in cake_init()
2728 err = tcf_block_get(&q->block, &q->filter_list, sch, extack); in cake_init()
2733 for (i = 1; i <= CAKE_QUEUES; i++) in cake_init()
2736 q->tins = kvcalloc(CAKE_MAX_TINS, sizeof(struct cake_tin_data), in cake_init()
2738 if (!q->tins) in cake_init()
2742 struct cake_tin_data *b = q->tins + i; in cake_init()
2758 q->overflow_heap[k].t = i; in cake_init()
2759 q->overflow_heap[k].b = j; in cake_init()
2765 q->avg_peak_bandwidth = q->rate_bps; in cake_init()
2766 q->min_netlen = ~0; in cake_init()
2767 q->min_adjlen = ~0; in cake_init()
2773 struct cake_sched_data *q = qdisc_priv(sch); in cake_dump() local
2783 READ_ONCE(q->rate_bps), TCA_CAKE_PAD)) in cake_dump()
2786 flow_mode = READ_ONCE(q->flow_mode); in cake_dump()
2790 if (nla_put_u32(skb, TCA_CAKE_RTT, READ_ONCE(q->interval))) in cake_dump()
2793 if (nla_put_u32(skb, TCA_CAKE_TARGET, READ_ONCE(q->target))) in cake_dump()
2797 READ_ONCE(q->buffer_config_limit))) in cake_dump()
2800 rate_flags = READ_ONCE(q->rate_flags); in cake_dump()
2809 if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, READ_ONCE(q->ack_filter))) in cake_dump()
2816 if (nla_put_u32(skb, TCA_CAKE_DIFFSERV_MODE, READ_ONCE(q->tin_mode))) in cake_dump()
2823 if (nla_put_u32(skb, TCA_CAKE_OVERHEAD, READ_ONCE(q->rate_overhead))) in cake_dump()
2830 if (nla_put_u32(skb, TCA_CAKE_ATM, READ_ONCE(q->atm_mode))) in cake_dump()
2833 if (nla_put_u32(skb, TCA_CAKE_MPU, READ_ONCE(q->rate_mpu))) in cake_dump()
2840 if (nla_put_u32(skb, TCA_CAKE_FWMARK, READ_ONCE(q->fwmark_mask))) in cake_dump()
2846 return -1; in cake_dump()
2852 struct cake_sched_data *q = qdisc_priv(sch); in cake_dump_stats() local
2857 return -1; in cake_dump_stats()
2869 PUT_STAT_U64(CAPACITY_ESTIMATE64, q->avg_peak_bandwidth); in cake_dump_stats()
2870 PUT_STAT_U32(MEMORY_LIMIT, q->buffer_limit); in cake_dump_stats()
2871 PUT_STAT_U32(MEMORY_USED, q->buffer_max_used); in cake_dump_stats()
2872 PUT_STAT_U32(AVG_NETOFF, ((q->avg_netoff + 0x8000) >> 16)); in cake_dump_stats()
2873 PUT_STAT_U32(MAX_NETLEN, q->max_netlen); in cake_dump_stats()
2874 PUT_STAT_U32(MAX_ADJLEN, q->max_adjlen); in cake_dump_stats()
2875 PUT_STAT_U32(MIN_NETLEN, q->min_netlen); in cake_dump_stats()
2876 PUT_STAT_U32(MIN_ADJLEN, q->min_adjlen); in cake_dump_stats()
2895 for (i = 0; i < q->tin_cnt; i++) { in cake_dump_stats()
2896 struct cake_tin_data *b = &q->tins[q->tin_order[i]]; in cake_dump_stats()
2898 ts = nla_nest_start_noflag(d->skb, i + 1); in cake_dump_stats()
2945 return -1; in cake_dump_stats()
2964 static void cake_unbind(struct Qdisc *q, unsigned long cl) in cake_unbind() argument
2971 struct cake_sched_data *q = qdisc_priv(sch); in cake_tcf_block() local
2975 return q->block; in cake_tcf_block()
2988 struct cake_sched_data *q = qdisc_priv(sch); in cake_dump_class_stats() local
2992 u32 idx = cl - 1; in cake_dump_class_stats()
2994 if (idx < CAKE_QUEUES * q->tin_cnt) { in cake_dump_class_stats()
2996 &q->tins[q->tin_order[idx / CAKE_QUEUES]]; in cake_dump_class_stats()
3014 return -1; in cake_dump_class_stats()
3020 return -1; in cake_dump_class_stats()
3049 return -1; in cake_dump_class_stats()
3056 return -1; in cake_dump_class_stats()
3061 struct cake_sched_data *q = qdisc_priv(sch); in cake_walk() local
3067 for (i = 0; i < q->tin_cnt; i++) { in cake_walk()
3068 struct cake_tin_data *b = &q->tins[q->tin_order[i]]; in cake_walk()
3075 if (!tc_qdisc_stats_dump(sch, i * CAKE_QUEUES + j + 1, in cake_walk()