/linux-6.12.1/include/net/ |
D | fq_impl.h | 36 idx = flow - fq->flows; in __fq_adjust_removal() 152 flow = &fq->flows[idx]; in fq_flow_classify() 160 tin->flows++; in fq_flow_classify() 173 struct fq_flow *cur = &fq->flows[i]; in fq_find_fattest_flow() 361 fq->flows = kvcalloc(fq->flows_cnt, sizeof(fq->flows[0]), GFP_KERNEL); in fq_init() 362 if (!fq->flows) in fq_init() 367 kvfree(fq->flows); in fq_init() 368 fq->flows = NULL; in fq_init() 373 fq_flow_init(&fq->flows[i]); in fq_init() 384 fq_flow_reset(fq, &fq->flows[i], free_func); in fq_reset() [all …]
|
D | fq.h | 57 u32 flows; member 69 struct fq_flow *flows; member
|
/linux-6.12.1/drivers/crypto/allwinner/sun8i-ss/ |
D | sun8i-ss-core.c | 76 ss->flows[flow].stat_req++; in sun8i_ss_run_task() 132 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_run_task() 133 ss->flows[flow].status = 0; in sun8i_ss_run_task() 138 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, in sun8i_ss_run_task() 140 if (ss->flows[flow].status == 0) { in sun8i_ss_run_task() 159 ss->flows[flow].status = 1; in ss_irq_handler() 160 complete(&ss->flows[flow].complete); in ss_irq_handler() 478 ss->flows[i].stat_req); in sun8i_ss_debugfs_show() 536 crypto_engine_exit(ss->flows[i].engine); in sun8i_ss_free_flows() 548 ss->flows = devm_kcalloc(ss->dev, MAXFLOW, sizeof(struct sun8i_ss_flow), in allocate_flows() [all …]
|
D | sun8i-ss-prng.c | 134 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_prng_generate() 135 ss->flows[flow].status = 0; in sun8i_ss_prng_generate() 141 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, in sun8i_ss_prng_generate() 143 if (ss->flows[flow].status == 0) { in sun8i_ss_prng_generate()
|
D | sun8i-ss-hash.c | 290 ss->flows[flow].stat_req++; in sun8i_ss_run_hash_task() 323 reinit_completion(&ss->flows[flow].complete); in sun8i_ss_run_hash_task() 324 ss->flows[flow].status = 0; in sun8i_ss_run_hash_task() 329 wait_for_completion_interruptible_timeout(&ss->flows[flow].complete, in sun8i_ss_run_hash_task() 331 if (ss->flows[flow].status == 0) { in sun8i_ss_run_hash_task() 407 engine = ss->flows[e].engine; in sun8i_ss_hash_digest() 505 result = ss->flows[rctx->flow].result; in sun8i_ss_hash_run() 506 pad = ss->flows[rctx->flow].pad; in sun8i_ss_hash_run()
|
D | sun8i-ss-cipher.c | 130 struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; in sun8i_ss_setup_ivs() 191 struct sun8i_ss_flow *sf = &ss->flows[rctx->flow]; in sun8i_ss_cipher() 360 engine = op->ss->flows[e].engine; in sun8i_ss_skdecrypt() 381 engine = op->ss->flows[e].engine; in sun8i_ss_skencrypt()
|
/linux-6.12.1/samples/bpf/ |
D | do_hbm_test.sh | 78 flows=1 150 -f=*|--flows=*) 151 flows="${i#*=}" 278 while [ $flow_cnt -le $flows ] ; do 320 while [ $flow_cnt -le $flows ] ; do 346 iperf3 -c $host -p $port -i 0 -P $flows -f m -t $dur > iperf.$id 366 while [ $flow_cnt -le $flows ] ; do 386 while [ $flow_cnt -le $flows ] ; do
|
/linux-6.12.1/net/sched/ |
D | sch_fq_codel.c | 53 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */ member 164 flow = &q->flows[idx]; in fq_codel_drop() 204 flow = &q->flows[idx]; in fq_codel_enqueue() 265 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb); in dequeue_func() 343 struct fq_codel_flow *flow = q->flows + i; in fq_codel_reset() 380 if (q->flows) in fq_codel_change() 465 kvfree(q->flows); in fq_codel_destroy() 497 if (!q->flows) { in fq_codel_init() 498 q->flows = kvcalloc(q->flows_cnt, in fq_codel_init() 501 if (!q->flows) { in fq_codel_init() [all …]
|
D | sch_fq_pie.c | 58 struct fq_pie_flow *flows; member 151 sel_flow = &q->flows[idx]; in fq_pie_qdisc_enqueue() 306 if (q->flows) { in fq_pie_change() 399 &q->flows[q->flows_cursor].vars, in fq_pie_timer() 400 q->flows[q->flows_cursor].backlog); in fq_pie_timer() 447 q->flows = kvcalloc(q->flows_cnt, sizeof(struct fq_pie_flow), in fq_pie_init() 449 if (!q->flows) { in fq_pie_init() 454 struct fq_pie_flow *flow = q->flows + idx; in fq_pie_init() 539 struct fq_pie_flow *flow = q->flows + idx; in fq_pie_reset() 557 kvfree(q->flows); in fq_pie_destroy()
|
D | sch_cake.c | 151 struct cake_flow flows[CAKE_QUEUES]; member 727 q->flows[reduced_hash].set)) { in cake_hash() 745 if (!q->flows[outer_hash + k].set) { in cake_hash() 760 if (!q->flows[outer_hash + k].set) { in cake_hash() 775 if (q->flows[outer_hash + k].set == CAKE_SET_BULK) { in cake_hash() 777 q->hosts[q->flows[reduced_hash].srchost].srchost_bulk_flow_count--; in cake_hash() 779 q->hosts[q->flows[reduced_hash].dsthost].dsthost_bulk_flow_count--; in cake_hash() 804 if (q->flows[reduced_hash].set == CAKE_SET_BULK) in cake_hash() 806 q->flows[reduced_hash].srchost = srchost_idx; in cake_hash() 827 if (q->flows[reduced_hash].set == CAKE_SET_BULK) in cake_hash() [all …]
|
D | sch_fq.c | 142 u32 flows; member 294 q->flows -= fcnt; in fq_gc() 326 if (q->flows != q->inactive_flows + q->throttled_flows) in fq_fastpath_check() 450 q->flows++; in fq_classify() 814 q->flows = 0; in fq_reset() 859 q->flows -= fcnt; in fq_rehash() 1267 st.flows = q->flows; in fq_dump_stats()
|
/linux-6.12.1/drivers/dma/ti/ |
D | k3-udma-glue.c | 84 struct k3_udma_glue_rx_flow *flows; member 715 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; in k3_udma_glue_release_rx_flow() 735 struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; in k3_udma_glue_cfg_rx_flow() 1032 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_rx_chn_priv() 1033 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_rx_chn_priv() 1034 if (!rx_chn->flows) { in k3_udma_glue_request_rx_chn_priv() 1044 rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; in k3_udma_glue_request_rx_chn_priv() 1084 rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, in k3_udma_glue_request_remote_rx_chn_common() 1085 sizeof(*rx_chn->flows), GFP_KERNEL); in k3_udma_glue_request_remote_rx_chn_common() 1086 if (!rx_chn->flows) in k3_udma_glue_request_remote_rx_chn_common() [all …]
|
/linux-6.12.1/drivers/media/platform/amphion/ |
D | vpu_dbg.c | 207 for (i = 0; i < ARRAY_SIZE(inst->flows); i++) { in vpu_dbg_instance() 208 u32 idx = (inst->flow_idx + i) % (ARRAY_SIZE(inst->flows)); in vpu_dbg_instance() 210 if (!inst->flows[idx]) in vpu_dbg_instance() 213 inst->flows[idx] >= VPU_MSG_ID_NOOP ? "M" : "C", in vpu_dbg_instance() 214 vpu_id_name(inst->flows[idx])); in vpu_dbg_instance() 506 inst->flows[inst->flow_idx] = flow; in vpu_inst_record_flow() 507 inst->flow_idx = (inst->flow_idx + 1) % (ARRAY_SIZE(inst->flows)); in vpu_inst_record_flow()
|
/linux-6.12.1/drivers/infiniband/hw/hfi1/ |
D | tid_rdma.c | 757 u32 generation = rcd->flows[flow_idx].generation; in kern_setup_hw_flow() 775 rcd->flows[flow_idx].generation = in kern_clear_hw_flow() 776 kern_flow_generation_next(rcd->flows[flow_idx].generation); in kern_clear_hw_flow() 804 rcd->flows[fs->index].generation = fs->generation; in hfi1_kern_setup_hw_flow() 853 rcd->flows[i].generation = mask_generation(get_random_u32()); in hfi1_kern_init_ctxt_generations() 1465 struct tid_rdma_flow *flow = &req->flows[req->setup_head]; in hfi1_kern_exp_rcv_setup() 1556 struct tid_rdma_flow *flow = &req->flows[req->clear_tail]; in hfi1_kern_exp_rcv_clear() 1612 kfree(req->flows); in hfi1_kern_exp_rcv_free_flows() 1613 req->flows = NULL; in hfi1_kern_exp_rcv_free_flows() 1634 struct tid_rdma_flow *flows; in hfi1_kern_exp_rcv_alloc_flows() local [all …]
|
/linux-6.12.1/Documentation/networking/ |
D | nf_flowtable.rst | 33 specifies what flows are placed into the flowtable. Hence, packets follow the 34 classic IP forwarding path unless the user explicitly instruct flows to use this 111 You can identify offloaded flows through the [OFFLOAD] tag when listing your 130 instead the real device is sufficient for the flowtable to track your flows. 198 There is a workqueue that adds the flows to the hardware. Note that a few 202 You can identify hardware offloaded flows through the [HW_OFFLOAD] tag when
|
D | scaling.rst | 31 of logical flows. Packets for each flow are steered to a separate receive 50 applications that monitor TCP/IP flows (IDS, firewalls, ...etc) and need 245 to the same CPU is CPU load imbalance if flows vary in packet rate. 251 Flow Limit is an optional RPS feature that prioritizes small flows 252 during CPU contention by dropping packets from large flows slightly 253 ahead of those from small flows. It is active only when an RPS or RFS 259 new packet is dropped. Packets from other flows are still only 263 even large flows maintain connectivity. 281 identification of large flows and fewer false positives. The default 318 flows to the CPUs where those flows are being processed. The flow hash [all …]
|
D | openvswitch.rst | 16 table" that userspace populates with "flows" that map from keys based 104 A wildcarded flow can represent a group of exact match flows. Each '1' bit 108 by reduce the number of new flows need to be processed by the user space program. 120 two possible approaches: reactively install flows as they miss the kernel 130 The behavior when using overlapping wildcarded flows is undefined. It is the 133 performs best-effort detection of overlapping wildcarded flows and may reject 146 future operations. The kernel is not required to index flows by the original
|
/linux-6.12.1/net/mctp/test/ |
D | route-test.c | 924 struct mctp_flow *flows[2]; in mctp_test_fragment_flow() local 948 flows[0] = skb_ext_find(tx_skbs[0], SKB_EXT_MCTP); in mctp_test_fragment_flow() 949 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[0]); in mctp_test_fragment_flow() 950 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[0]->key); in mctp_test_fragment_flow() 951 KUNIT_ASSERT_PTR_EQ(test, flows[0]->key->sk, sock->sk); in mctp_test_fragment_flow() 953 flows[1] = skb_ext_find(tx_skbs[1], SKB_EXT_MCTP); in mctp_test_fragment_flow() 954 KUNIT_ASSERT_NOT_ERR_OR_NULL(test, flows[1]); in mctp_test_fragment_flow() 955 KUNIT_ASSERT_PTR_EQ(test, flows[1]->key, flows[0]->key); in mctp_test_fragment_flow()
|
/linux-6.12.1/Documentation/admin-guide/pm/ |
D | system-wide.rst | 11 suspend-flows
|
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/ |
D | en_rep.h | 183 struct list_head flows; member 208 struct list_head flows; member
|
D | eswitch_offloads.c | 1189 struct mlx5_flow_handle **flows; in esw_add_fdb_peer_miss_rules() local 1208 flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL); in esw_add_fdb_peer_miss_rules() 1209 if (!flows) { in esw_add_fdb_peer_miss_rules() 1229 flows[vport->index] = flow; in esw_add_fdb_peer_miss_rules() 1241 flows[vport->index] = flow; in esw_add_fdb_peer_miss_rules() 1255 flows[vport->index] = flow; in esw_add_fdb_peer_miss_rules() 1270 flows[vport->index] = flow; in esw_add_fdb_peer_miss_rules() 1281 esw->fdb_table.offloads.peer_miss_rules[pfindex] = flows; in esw_add_fdb_peer_miss_rules() 1288 if (!flows[vport->index]) in esw_add_fdb_peer_miss_rules() 1290 mlx5_del_flow_rules(flows[vport->index]); in esw_add_fdb_peer_miss_rules() [all …]
|
/linux-6.12.1/Documentation/userspace-api/media/mediactl/ |
D | media-controller-model.rst | 26 by an entity flows from the entity's output to one or more entity 31 pads, either on the same entity or on different entities. Data flows
|
/linux-6.12.1/net/core/ |
D | pktgen.c | 416 struct flow_state *flows; member 2331 return !!(pkt_dev->flows[flow].flags & F_INIT); in f_seen() 2339 if (pkt_dev->flows[flow].count >= pkt_dev->lflow) { in f_pick() 2341 pkt_dev->flows[flow].count = 0; in f_pick() 2342 pkt_dev->flows[flow].flags = 0; in f_pick() 2351 if (pkt_dev->flows[flow].count > pkt_dev->lflow) { in f_pick() 2352 pkt_dev->flows[flow].count = 0; in f_pick() 2353 pkt_dev->flows[flow].flags = 0; in f_pick() 2368 struct xfrm_state *x = pkt_dev->flows[flow].x; in get_ipsec_sa() 2387 pkt_dev->flows[flow].x = x; in get_ipsec_sa() [all …]
|
/linux-6.12.1/drivers/net/ethernet/ti/ |
D | am65-cpsw-nuss.c | 410 flow = &rx_chn->flows[id]; in am65_cpsw_destroy_xdp_rxqs() 447 flow = &rx_chn->flows[id]; in am65_cpsw_create_xdp_rxqs() 547 am65_cpsw_put_page(&rx_chn->flows[flow_id], page, false); in am65_cpsw_nuss_rx_cleanup() 693 flow = &rx_chn->flows[flow_idx]; in am65_cpsw_nuss_common_open() 721 napi_enable(&rx_chn->flows[i].napi_rx); in am65_cpsw_nuss_common_open() 722 if (rx_chn->flows[i].irq_disabled) { in am65_cpsw_nuss_common_open() 723 rx_chn->flows[i].irq_disabled = false; in am65_cpsw_nuss_common_open() 724 enable_irq(rx_chn->flows[i].irq); in am65_cpsw_nuss_common_open() 750 flow = &rx_chn->flows[flow_idx]; in am65_cpsw_nuss_common_open() 816 napi_disable(&rx_chn->flows[i].napi_rx); in am65_cpsw_nuss_common_stop() [all …]
|
/linux-6.12.1/Documentation/admin-guide/blockdev/drbd/ |
D | figures.rst | 5 Data flows that Relate some functions, and write packets
|