/linux-6.12.1/samples/bpf/ |
D | hbm.c | 163 struct hbm_queue_stats qstats = {0}; in run_bpf_prog() local 189 qstats.rate = rate; in run_bpf_prog() 190 qstats.stats = stats_flag ? 1 : 0; in run_bpf_prog() 191 qstats.loopback = loopback_flag ? 1 : 0; in run_bpf_prog() 192 qstats.no_cn = no_cn_flag ? 1 : 0; in run_bpf_prog() 193 if (bpf_map_update_elem(queue_stats_fd, &key, &qstats, BPF_ANY)) { in run_bpf_prog() 224 bpf_map_lookup_elem(queue_stats_fd, &key, &qstats); in run_bpf_prog() 232 last_cg_tx_bytes = qstats.bytes_total; in run_bpf_prog() 253 bpf_map_lookup_elem(queue_stats_fd, &key, &qstats); in run_bpf_prog() 254 new_cg_tx_bytes = qstats.bytes_total; in run_bpf_prog() [all …]
|
/linux-6.12.1/net/core/ |
D | gen_stats.c | 340 static void gnet_stats_add_queue_cpu(struct gnet_stats_queue *qstats, in gnet_stats_add_queue_cpu() argument 348 qstats->qlen += qcpu->qlen; in gnet_stats_add_queue_cpu() 349 qstats->backlog += qcpu->backlog; in gnet_stats_add_queue_cpu() 350 qstats->drops += qcpu->drops; in gnet_stats_add_queue_cpu() 351 qstats->requeues += qcpu->requeues; in gnet_stats_add_queue_cpu() 352 qstats->overlimits += qcpu->overlimits; in gnet_stats_add_queue_cpu() 356 void gnet_stats_add_queue(struct gnet_stats_queue *qstats, in gnet_stats_add_queue() argument 361 gnet_stats_add_queue_cpu(qstats, cpu); in gnet_stats_add_queue() 363 qstats->qlen += q->qlen; in gnet_stats_add_queue() 364 qstats->backlog += q->backlog; in gnet_stats_add_queue() [all …]
|
/linux-6.12.1/net/sched/ |
D | sch_skbprio.c | 35 struct gnet_stats_queue qstats[SKBPRIO_MAX_PRIORITY]; member 87 q->qstats[prio].backlog += qdisc_pkt_len(skb); in skbprio_enqueue() 103 q->qstats[prio].drops++; in skbprio_enqueue() 104 q->qstats[prio].overlimits++; in skbprio_enqueue() 110 q->qstats[prio].backlog += qdisc_pkt_len(skb); in skbprio_enqueue() 119 q->qstats[lp].backlog -= qdisc_pkt_len(to_drop); in skbprio_enqueue() 120 q->qstats[lp].drops++; in skbprio_enqueue() 121 q->qstats[lp].overlimits++; in skbprio_enqueue() 154 q->qstats[q->highest_prio].backlog -= qdisc_pkt_len(skb); in skbprio_dequeue() 191 memset(&q->qstats, 0, sizeof(q->qstats)); in skbprio_init() [all …]
|
D | sch_mq.c | 45 .qstats = &sch->qstats, in mq_offload_stats() 136 memset(&sch->qstats, 0, sizeof(sch->qstats)); in mq_dump() 149 gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats, in mq_dump() 150 &qdisc->qstats); in mq_dump()
|
D | sch_gred.c | 118 return sch->qstats.backlog; in gred_backlog() 182 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= in gred_enqueue() 279 if (!sch->qstats.backlog) in gred_dequeue() 348 opt->set.qstats = &sch->qstats; in gred_offload() 386 table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog; in gred_offload_dump_stats() 390 sch->qstats.qlen += hw_stats->stats.qstats[i].qlen; in gred_offload_dump_stats() 391 sch->qstats.backlog += hw_stats->stats.qstats[i].backlog; in gred_offload_dump_stats() 392 sch->qstats.drops += hw_stats->stats.qstats[i].drops; in gred_offload_dump_stats() 393 sch->qstats.requeues += hw_stats->stats.qstats[i].requeues; in gred_offload_dump_stats() 394 sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits; in gred_offload_dump_stats()
|
D | sch_fifo.c | 22 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= in bfifo_enqueue() 46 prev_backlog = sch->qstats.backlog; in pfifo_tail_enqueue() 52 qdisc_tree_reduce_backlog(sch, 0, prev_backlog - sch->qstats.backlog); in pfifo_tail_enqueue() 92 qopt.stats.qstats = &sch->qstats; in fifo_offload_dump()
|
D | sch_mqprio.c | 564 memset(&sch->qstats, 0, sizeof(sch->qstats)); in mqprio_dump() 577 gnet_stats_add_queue(&sch->qstats, qdisc->cpu_qstats, in mqprio_dump() 578 &qdisc->qstats); in mqprio_dump() 669 struct gnet_stats_queue qstats = {0}; in mqprio_dump_class_stats() local 691 gnet_stats_add_queue(&qstats, qdisc->cpu_qstats, in mqprio_dump_class_stats() 692 &qdisc->qstats); in mqprio_dump_class_stats() 697 qlen = qdisc_qlen(sch) + qstats.qlen; in mqprio_dump_class_stats() 703 gnet_stats_copy_queue(d, NULL, &qstats, qlen) < 0) in mqprio_dump_class_stats()
|
D | sch_ets.c | 45 struct gnet_stats_queue qstats; member 125 qopt.replace_params.qstats = &sch->qstats; in ets_offload_change() 185 qopt.stats.qstats = &sch->qstats; in ets_offload_dump() 431 cl->qstats.drops++; in ets_qdisc_enqueue() 442 sch->qstats.backlog += len; in ets_qdisc_enqueue() 682 memset(&q->classes[i].qstats, 0, sizeof(q->classes[i].qstats)); in ets_qdisc_change()
|
D | sch_prio.c | 88 sch->qstats.backlog += len; in prio_enqueue() 156 opt.replace_params.qstats = &sch->qstats; in prio_offload() 254 .qstats = &sch->qstats, in prio_dump_offload()
|
D | sch_red.c | 80 child->qstats.backlog); in red_enqueue() 133 sch->qstats.backlog += len; in red_enqueue() 205 opt.set.qstats = &sch->qstats; in red_offload() 402 .stats.qstats = &sch->qstats, in red_dump_offload_stats()
|
D | sch_tbf.c | 156 qopt.replace_params.qstats = &sch->qstats; in tbf_offload_change() 183 qopt.stats.qstats = &sch->qstats; in tbf_offload_dump() 259 sch->qstats.backlog += len; in tbf_enqueue()
|
D | sch_fq_codel.c | 178 sch->qstats.drops += i; in fq_codel_drop() 179 sch->qstats.backlog -= len; in fq_codel_drop() 220 prev_backlog = sch->qstats.backlog; in fq_codel_enqueue() 233 prev_backlog -= sch->qstats.backlog; in fq_codel_enqueue() 268 sch->qstats.backlog -= qdisc_pkt_len(skb); in dequeue_func() 303 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams, in fq_codel_dequeue()
|
D | sch_drr.c | 22 struct gnet_stats_queue qstats; member 271 gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0) in drr_dump_class_stats() 353 cl->qstats.drops++; in drr_enqueue() 364 sch->qstats.backlog += len; in drr_enqueue()
|
D | sch_hhf.c | 405 prev_backlog = sch->qstats.backlog; in hhf_enqueue() 414 qdisc_tree_reduce_backlog(sch, 1, prev_backlog - sch->qstats.backlog); in hhf_enqueue() 565 prev_backlog = sch->qstats.backlog; in hhf_change() 572 prev_backlog - sch->qstats.backlog); in hhf_change()
|
D | sch_codel.c | 45 sch->qstats.backlog -= qdisc_pkt_len(skb); in dequeue_func() 64 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->params, &q->vars, in codel_qdisc_dequeue()
|
D | sch_pie.c | 96 if (!pie_drop_early(sch, &q->params, &q->vars, sch->qstats.backlog, in pie_qdisc_enqueue() 431 pie_calculate_probability(&q->params, &q->vars, sch->qstats.backlog); in pie_timer() 528 pie_process_dequeue(skb, &q->params, &q->vars, sch->qstats.backlog); in pie_qdisc_dequeue()
|
/linux-6.12.1/drivers/net/ethernet/broadcom/bnx2x/ |
D | bnx2x_stats.c | 952 struct bnx2x_eth_q_stats *qstats = in bnx2x_storm_stats_update() local 976 qstats->total_bytes_received_hi = in bnx2x_storm_stats_update() 977 qstats->total_broadcast_bytes_received_hi; in bnx2x_storm_stats_update() 978 qstats->total_bytes_received_lo = in bnx2x_storm_stats_update() 979 qstats->total_broadcast_bytes_received_lo; in bnx2x_storm_stats_update() 981 ADD_64(qstats->total_bytes_received_hi, in bnx2x_storm_stats_update() 982 qstats->total_multicast_bytes_received_hi, in bnx2x_storm_stats_update() 983 qstats->total_bytes_received_lo, in bnx2x_storm_stats_update() 984 qstats->total_multicast_bytes_received_lo); in bnx2x_storm_stats_update() 986 ADD_64(qstats->total_bytes_received_hi, in bnx2x_storm_stats_update() [all …]
|
D | bnx2x_stats.h | 437 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ 452 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ 465 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \ 470 qstats->t##_lo = qstats_old->t##_lo + le32_to_cpu(s.lo); \ 471 qstats->t##_hi = qstats_old->t##_hi + le32_to_cpu(s.hi) \ 472 + ((qstats->t##_lo < qstats_old->t##_lo) ? 1 : 0); \ 477 qstats_old->f = qstats->f; \ 482 ADD_64(estats->s##_hi, qstats->s##_hi, \ 483 estats->s##_lo, qstats->s##_lo); \ 486 qstats_old->s##_hi_old = qstats->s##_hi; \ [all …]
|
/linux-6.12.1/drivers/infiniband/hw/hfi1/ |
D | vnic_main.c | 24 #define SUM_GRP_COUNTERS(stats, qstats, x_grp) do { \ argument 26 for (src64 = &qstats->x_grp.unicast, \ 45 struct opa_vnic_stats *qstats = &vinfo->stats[i]; in hfi1_vnic_update_stats() local 50 stats->tx_drop_state += qstats->tx_drop_state; in hfi1_vnic_update_stats() 51 stats->tx_dlid_zero += qstats->tx_dlid_zero; in hfi1_vnic_update_stats() 53 SUM_GRP_COUNTERS(stats, qstats, tx_grp); in hfi1_vnic_update_stats() 60 struct opa_vnic_stats *qstats = &vinfo->stats[i]; in hfi1_vnic_update_stats() local 65 stats->rx_drop_state += qstats->rx_drop_state; in hfi1_vnic_update_stats() 66 stats->rx_oversize += qstats->rx_oversize; in hfi1_vnic_update_stats() 67 stats->rx_runt += qstats->rx_runt; in hfi1_vnic_update_stats() [all …]
|
/linux-6.12.1/include/net/ |
D | sch_generic.h | 119 struct gnet_stats_queue qstats; member 525 __u32 qlen = q->qstats.qlen; in qdisc_qlen_sum() 886 sch->qstats.backlog -= qdisc_pkt_len(skb); in qdisc_qstats_backlog_dec() 898 sch->qstats.backlog += qdisc_pkt_len(skb); in qdisc_qstats_backlog_inc() 924 sch->qstats.drops += count; in __qdisc_qstats_drop() 927 static inline void qstats_drop_inc(struct gnet_stats_queue *qstats) in qstats_drop_inc() argument 929 qstats->drops++; in qstats_drop_inc() 932 static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats) in qstats_overlimit_inc() argument 934 qstats->overlimits++; in qstats_overlimit_inc() 939 qstats_drop_inc(&sch->qstats); in qdisc_qstats_drop() [all …]
|
D | pkt_cls.h | 814 struct gnet_stats_queue *qstats; member 891 struct gnet_stats_queue *qstats; member 930 struct gnet_stats_queue *qstats; member 936 struct gnet_stats_queue qstats[MAX_DPs]; member 963 struct gnet_stats_queue *qstats; member 1004 struct gnet_stats_queue *qstats; member 1033 struct gnet_stats_queue *qstats; member
|
D | gen_stats.h | 63 void gnet_stats_add_queue(struct gnet_stats_queue *qstats,
|
/linux-6.12.1/Documentation/networking/ |
D | gen_stats.rst | 26 struct gnet_stats_queue qstats; 33 mystruct->qstats.backlog += skb->pkt_len; 50 gnet_stats_copy_queue(&dump, &mystruct->qstats) < 0 ||
|
/linux-6.12.1/drivers/net/ethernet/netronome/nfp/abm/ |
D | qdisc.c | 462 struct gnet_stats_queue *qstats) in nfp_abm_stats_calculate() argument 466 qstats->qlen += new->backlog_pkts - old->backlog_pkts; in nfp_abm_stats_calculate() 467 qstats->backlog += new->backlog_bytes - old->backlog_bytes; in nfp_abm_stats_calculate() 468 qstats->overlimits += new->overlimits - old->overlimits; in nfp_abm_stats_calculate() 469 qstats->drops += new->drops - old->drops; in nfp_abm_stats_calculate() 503 &stats->bstats[i], &stats->qstats[i]); in nfp_abm_gred_stats() 654 stats->bstats, stats->qstats); in nfp_abm_red_stats() 811 stats->bstats, stats->qstats); in nfp_abm_mq_stats()
|
/linux-6.12.1/drivers/net/ethernet/mellanox/mlxsw/ |
D | spectrum_qdisc.c | 572 stats_ptr->qstats->drops += drops; in mlxsw_sp_qdisc_update_stats() 573 stats_ptr->qstats->backlog += mlxsw_sp_cells_bytes(mlxsw_sp, backlog); in mlxsw_sp_qdisc_update_stats() 749 struct gnet_stats_queue *qstats) in mlxsw_sp_qdisc_leaf_unoffload() argument 755 qstats->backlog -= backlog; in mlxsw_sp_qdisc_leaf_unoffload() 766 mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats); in mlxsw_sp_qdisc_red_unoffload() 818 stats_ptr->qstats->overlimits += overlimits; in mlxsw_sp_qdisc_get_red_stats() 1059 mlxsw_sp_qdisc_leaf_unoffload(mlxsw_sp_port, mlxsw_sp_qdisc, p->qstats); in mlxsw_sp_qdisc_tbf_unoffload() 1420 struct gnet_stats_queue *qstats) in __mlxsw_sp_qdisc_ets_unoffload() argument 1426 qstats->backlog -= backlog; in __mlxsw_sp_qdisc_ets_unoffload() 1437 p->qstats); in mlxsw_sp_qdisc_prio_unoffload() [all …]
|