/linux-6.12.1/tools/testing/selftests/tc-testing/tc-tests/qdiscs/ |
D | fq_codel.json | 17 …"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 10240p flows 1024 quantum.*target 5ms … 38 …"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 1000p flows 1024 quantum.*target 5ms i… 59 …"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 10240p flows 1024 quantum.*target 5ms … 80 …"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 10240p flows 1024 quantum.*target 2ms … 101 …"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 10240p flows 1024 quantum.*target 5ms … 122 …"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 10240p flows 1024 quantum 9000 target … 143 …"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 10240p flows 1024 quantum.*target 5ms … 164 …"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 10240p flows 1024 quantum.*target 5ms … 185 …"matchPattern": "qdisc fq_codel 1: root refcnt [0-9]+ limit 10240p flows 1024 quantum.*target 5ms … 203 …"cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq_codel limit 1000 flows 256 drop_batch … [all …]
|
D | fq_pie.json | 4 "name": "Create FQ-PIE with invalid number of flows", 14 "cmdUnderTest": "$TC qdisc add dev $DUMMY handle 1: root fq_pie flows 65536", 17 "matchPattern": "qdisc fq_pie 1: root refcnt 2 limit 10240p flows 65536",
|
/linux-6.12.1/samples/pktgen/ |
D | pktgen_sample04_many_flows.sh | 4 # Script example for many flows testing 6 # Number of simultaneous flows limited by variable $FLOWS 37 # Limiting the number of concurrent flows ($FLOWS) 40 [ -z "$FLOWS" ] && FLOWS="8000" 88 # Limit number of flows (max 65535) 89 pg_set $dev "flows $FLOWS"
|
/linux-6.12.1/include/net/ |
D | fq_impl.h | 36 idx = flow - fq->flows; in __fq_adjust_removal() 152 flow = &fq->flows[idx]; in fq_flow_classify() 160 tin->flows++; in fq_flow_classify() 173 struct fq_flow *cur = &fq->flows[i]; in fq_find_fattest_flow() 361 fq->flows = kvcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL); in fq_init() 362 if (!fq->flows) in fq_init() 367 kvfree(fq->flows); in fq_init() 368 fq->flows = NULL; in fq_init() 373 fq_flow_init(&fq->flows[i]); in fq_init() 384 fq_flow_reset(fq, &fq->flows[i], free_func); in fq_reset() [all …]
|
D | fq.h | 43 * pull interleaved packets out of the associated flows. 57 u32 flows; member 65 * @limit: max number of packets that can be queued across all flows 66 * @backlog: number of packets queued across all flows 69 struct fq_flow *flows; member
|
/linux-6.12.1/net/sched/ |
D | sch_fq_codel.c | 29 * Packets are classified (internal classifier or external) on flows. 30 * This is a Stochastic model (as we use a hash, several flows 33 * Flows are linked onto two (Round Robin) lists, 34 * so that new flows have priority on old ones. 53 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */ member 55 u32 flows_cnt; /* number of flows */ 66 struct list_head new_flows; /* list of new flows */ 67 struct list_head old_flows; /* list of old flows */ 148 * This might sound expensive, but with 1024 flows, we scan in fq_codel_drop() 164 flow = &q->flows[idx]; in fq_codel_drop() [all …]
|
D | sch_fq_pie.c | 22 * - Packets are classified on flows. 23 * - This is a Stochastic model (as we use a hash, several flows might 26 * - Flows are linked onto two (Round Robin) lists, 27 * so that new flows have priority on old ones. 58 struct fq_pie_flow *flows; member 151 sel_flow = &q->flows[idx]; in fq_pie_qdisc_enqueue() 306 if (q->flows) { in fq_pie_change() 308 "Number of flows cannot be changed"); in fq_pie_change() 314 "Number of flows must range in [1..65536]"); in fq_pie_change() 395 /* Limit this expensive loop to 2048 flows per round. */ in fq_pie_timer() [all …]
|
D | sch_sfq.c | 53 When hash collisions occur, several flows are considered as one. 69 - max 65408 flows, 76 #define SFQ_MAX_FLOWS (0x10000 - SFQ_MAX_DEPTH - 1) /* max number of flows */ 127 struct sfq_slot *slots; /* Flows table ('maxflows' entries) */ 135 * dep[0] : list of unused flows 136 * dep[1] : list of flows with 1 packet 137 * dep[X] : list of flows with X packets 140 unsigned int maxflows; /* number of flows in flows array */ 455 * but we could endup servicing new flows only, and freeze old ones. in sfq_enqueue() 458 /* We could use a bigger initial quantum for new flows */ in sfq_enqueue() [all …]
|
D | sch_fq.c | 12 * Flows are dynamically allocated and stored in a hash table of RB trees 13 * They are also part of one Round Robin 'queues' (new or old flows) 27 * dequeue() : serves flows in Round Robin 137 struct rb_root delayed; /* for rate limited flows */ 142 u32 flows; member 143 u32 inactive_flows; /* Flows with no packet to send. */ 247 /* limit number of collected flows per round */ 294 q->flows -= fcnt; in fq_gc() 323 * scheduled in the future (ie no flows are eligible) in fq_fastpath_check() 326 if (q->flows != q->inactive_flows + q->throttled_flows) in fq_fastpath_check() [all …]
|
/linux-6.12.1/drivers/crypto/allwinner/sun8i-ss/ |
D | sun8i-ss-core.c | 76 ss->flows[flow].stat_req++; in sun8i_ss_run_task() 132 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_run_task() 133 ss->flows[flow].status = 0; in sun8i_ss_run_task() 138 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, in sun8i_ss_run_task() 140 if (ss->flows[flow].status == 0) { in sun8i_ss_run_task() 159 ss->flows[flow].status = 1; in ss_irq_handler() 160 complete(&ss->flows[flow].complete); in ss_irq_handler() 478 ss->flows[i].stat_req); in sun8i_ss_debugfs_show() 536 crypto_engine_exit(ss->flows[i].engine); in sun8i_ss_free_flows() 548 ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow), in allocate_flows() [all …]
|
D | sun8i-ss-prng.c | 134 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_prng_generate() 135 ss->flows[flow].status = 0; in sun8i_ss_prng_generate() 141 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, in sun8i_ss_prng_generate() 143 if (ss->flows[flow].status == 0) { in sun8i_ss_prng_generate()
|
/linux-6.12.1/samples/bpf/ |
D | do_hbm_test.sh | 18 echo " [-f=<#flows>|--flows=<#flows>] [-h] [-i=<id>|--id=<id >]" 34 echo " -f or --flows number of concurrent flows (default=1)" 38 echo " -l do not limit flows using loopback" 78 flows=1 150 -f=*|--flows=*) 151 flows="${i#*=}" 278 while [ $flow_cnt -le $flows ] ; do 320 while [ $flow_cnt -le $flows ] ; do 346 iperf3 -c $host -p $port -i 0 -P $flows -f m -t $dur > iperf.$id 366 while [ $flow_cnt -le $flows ] ; do [all …]
|
/linux-6.12.1/drivers/net/wireless/intel/iwlwifi/ |
D | iwl-drv.h | 30 * DOC: Driver system flows - drv component 32 * This component implements the system flows such as bus enumeration, bus 33 * removal. Bus dependent parts of system flows (such as iwl_pci_probe) are in 38 * the wifi flows: it will allow to have several fw API implementation. These 63 * specific system flows implementations. For example, the bus specific probe 74 * Stop the driver. This should be called by bus specific system flows
|
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/en/ |
D | tc_priv.h | 83 /* flows sharing the same reformat object - currently mpls decap */ 87 /* flows sharing same route entry */ 98 struct list_head hairpin; /* flows sharing the same hairpin */ 99 struct list_head peer[MLX5_MAX_PORTS]; /* flows with peer flow */ 100 struct list_head unready; /* flows not ready to be offloaded (e.g 103 struct list_head peer_flows; /* flows on peer */
|
/linux-6.12.1/Documentation/networking/ |
D | scaling.rst | 31 of logical flows. Packets for each flow are steered to a separate receive 50 applications that monitor TCP/IP flows (IDS, firewalls, ...etc) and need 245 to the same CPU is CPU load imbalance if flows vary in packet rate. 251 Flow Limit is an optional RPS feature that prioritizes small flows 252 during CPU contention by dropping packets from large flows slightly 253 ahead of those from small flows. It is active only when an RPS or RFS 259 new packet is dropped. Packets from other flows are still only 263 even large flows maintain connectivity. 281 identification of large flows and fewer false positives. The default 318 flows to the CPUs where those flows are being processed. The flow hash [all …]
|
D | nf_flowtable.rst | 33 specifies what flows are placed into the flowtable. Hence, packets follow the 34 classic IP forwarding path unless the user explicitly instruct flows to use this 111 You can identify offloaded flows through the [OFFLOAD] tag when listing your 130 instead the real device is sufficient for the flowtable to track your flows. 198 There is a workqueue that adds the flows to the hardware. Note that a few 202 You can identify hardware offloaded flows through the [HW_OFFLOAD] tag when
|
D | openvswitch.rst | 16 table" that userspace populates with "flows" that map from keys based 104 A wildcarded flow can represent a group of exact match flows. Each '1' bit 108 by reduce the number of new flows need to be processed by the user space program. 120 two possible approaches: reactively install flows as they miss the kernel 130 The behavior when using overlapping wildcarded flows is undefined. It is the 133 performs best-effort detection of overlapping wildcarded flows and may reject 146 future operations. The kernel is not required to index flows by the original
|
/linux-6.12.1/drivers/net/ethernet/marvell/mvpp2/ |
D | mvpp2_cls.c | 26 /* TCP over IPv4 flows, Not fragmented, no vlan tag */ 45 /* TCP over IPv4 flows, Not fragmented, with vlan tag */ 61 /* TCP over IPv4 flows, fragmented, no vlan tag */ 80 /* TCP over IPv4 flows, fragmented, with vlan tag */ 99 /* UDP over IPv4 flows, Not fragmented, no vlan tag */ 118 /* UDP over IPv4 flows, Not fragmented, with vlan tag */ 134 /* UDP over IPv4 flows, fragmented, no vlan tag */ 153 /* UDP over IPv4 flows, fragmented, with vlan tag */ 172 /* TCP over IPv6 flows, not fragmented, no vlan tag */ 185 /* TCP over IPv6 flows, not fragmented, with vlan tag */ [all …]
|
/linux-6.12.1/drivers/dma/ti/ |
D | k3-udma-glue.c | 84 struct k3_udma_glue_rx_flow *flows; member 690 * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx); in k3_udma_glue_cfg_rx_chn() 715 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_release_rx_flow() 735 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_cfg_rx_flow() 1032 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_rx_chn_priv() 1033 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_rx_chn_priv() 1034 if (!rx_chn->flows) { in k3_udma_glue_request_rx_chn_priv() 1044 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; in k3_udma_glue_request_rx_chn_priv() 1084 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_remote_rx_chn_common() 1085 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_remote_rx_chn_common() [all …]
|
/linux-6.12.1/drivers/net/ethernet/netronome/nfp/flower/ |
D | conntrack.h | 50 * @tc_merge_tb: The table of merged tc flows 56 * @nft_merge_tb: The table of merged tc+nft flows 121 * @children: List of tc_merge flows this flow forms part of 148 * struct nfp_fl_ct_tc_merge - Merge of two flows from tc 170 * struct nfp_fl_nft_tc_merge - Merge of tc_merge flows with nft flow 283 * nfp_fl_ct_stats() - Handle flower stats callbacks for ct flows
|
/linux-6.12.1/include/uapi/linux/ |
D | pkt_sched.h | 62 classes (or flows) have major equal to parent qdisc major, and 218 unsigned flows; /* Maximal number of flows */ member 675 __u32 penalty_rate; /* inelastic flows are rate limited to 'rate' pps */ 774 __u32 new_flows_len; /* count of flows in new list */ 775 __u32 old_flows_len; /* count of flows in old list */ 856 __u32 flows; member 950 __u32 new_flow_count; /* count of new flows created by packets */ 951 __u32 new_flows_len; /* count of flows in new list */ 952 __u32 old_flows_len; /* count of flows in old list */
|
/linux-6.12.1/drivers/media/platform/amphion/ |
D | vpu_dbg.c | 207 for (i = 0; i < ARRAY_SIZE(inst->flows); i++) { in vpu_dbg_instance() 208 u32 idx = (inst->flow_idx + i) % (ARRAY_SIZE(inst->flows)); in vpu_dbg_instance() 210 if (!inst->flows[idx]) in vpu_dbg_instance() 213 inst->flows[idx] >= VPU_MSG_ID_NOOP ? "M" : "C", in vpu_dbg_instance() 214 vpu_id_name(inst->flows[idx])); in vpu_dbg_instance() 506 inst->flows[inst->flow_idx] = flow; in vpu_inst_record_flow() 507 inst->flow_idx = (inst->flow_idx + 1) % (ARRAY_SIZE(inst->flows)); in vpu_inst_record_flow()
|
/linux-6.12.1/tools/perf/pmu-events/arch/arm64/fujitsu/a64fx/ |
D | pipeline.json | 141 … the number of times where 2 elements of the gather instructions became 2 flows because 2 elements… 144 … the number of times where 2 elements of the gather instructions became 2 flows because 2 elements… 159 "PublicDescription": "This event counts the number of flows of the scatter instructions.", 162 "BriefDescription": "This event counts the number of flows of the scatter instructions."
|
/linux-6.12.1/drivers/infiniband/hw/hfi1/ |
D | tid_rdma.c | 47 /* Reserved generation value to set to unused flows for kernel contexts */ 757 u32 generation = rcd->flows[flow_idx].generation; in kern_setup_hw_flow() 775 rcd->flows[flow_idx].generation = in kern_clear_hw_flow() 776 kern_flow_generation_next(rcd->flows[flow_idx].generation); in kern_clear_hw_flow() 804 rcd->flows[fs->index].generation = fs->generation; in hfi1_kern_setup_hw_flow() 853 rcd->flows[i].generation = mask_generation(get_random_u32()); in hfi1_kern_init_ctxt_generations() 1436 * (6) Reserves and programs HW flows. 1443 * invocation of function call. With flow = &req->flows[req->flow_idx], 1465 struct tid_rdma_flow *flow = &req->flows[req->setup_head]; in hfi1_kern_exp_rcv_setup() 1556 struct tid_rdma_flow *flow = &req->flows[req->clear_tail]; in hfi1_kern_exp_rcv_clear() [all …]
|
/linux-6.12.1/Documentation/netlink/specs/ |
D | tc.yaml | 549 name: flows 551 doc: Maximal number of flows 801 name: new-flows-len 803 doc: Count of flows in new list 805 name: old-flows-len 807 doc: Count of flows in old list 846 doc: Count of new flows created by packets 848 name: new-flows-len 850 doc: Count of flows in new list 852 name: old-flows-len [all …]
|