Home
last modified time | relevance | path

Searched refs:queues (Results 1 – 25 of 436) sorted by relevance

12345678910>>...18

/linux-6.12.1/tools/testing/selftests/drivers/net/
Dqueues.py17 queues = nl.queue_get({'ifindex': cfg.ifindex}, dump=True)
18 if queues:
19 return len([q for q in queues if q['type'] == 'rx'])
24 queues = nl_get_queues(cfg, nl)
25 if not queues:
29 ksft_eq(queues, expected)
33 queues = nl_get_queues(cfg, nl)
34 if not queues:
50 queues = nl_get_queues(cfg, nl)
51 ksft_eq(queues, expected)
[all …]
/linux-6.12.1/drivers/gpu/drm/imagination/
Dpvr_context.c184 pvr_queue_destroy(ctx->queues.fragment); in pvr_context_destroy_queues()
185 pvr_queue_destroy(ctx->queues.geometry); in pvr_context_destroy_queues()
188 pvr_queue_destroy(ctx->queues.compute); in pvr_context_destroy_queues()
191 pvr_queue_destroy(ctx->queues.transfer); in pvr_context_destroy_queues()
214 ctx->queues.geometry = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_GEOMETRY, in pvr_context_create_queues()
216 if (IS_ERR(ctx->queues.geometry)) { in pvr_context_create_queues()
217 err = PTR_ERR(ctx->queues.geometry); in pvr_context_create_queues()
218 ctx->queues.geometry = NULL; in pvr_context_create_queues()
222 ctx->queues.fragment = pvr_queue_create(ctx, DRM_PVR_JOB_TYPE_FRAGMENT, in pvr_context_create_queues()
224 if (IS_ERR(ctx->queues.fragment)) { in pvr_context_create_queues()
[all …]
Dpvr_queue.c523 job->ctx->queues.fragment); in pvr_queue_prepare_job()
564 lockdep_assert_held(&pvr_dev->queues.lock); in pvr_queue_update_active_state_locked()
574 list_move_tail(&queue->node, &pvr_dev->queues.idle); in pvr_queue_update_active_state_locked()
576 list_move_tail(&queue->node, &pvr_dev->queues.active); in pvr_queue_update_active_state_locked()
596 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_update_active_state()
598 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_update_active_state()
730 struct pvr_queue *geom_queue = job->ctx->queues.geometry; in pvr_queue_run_job()
731 struct pvr_queue *frag_queue = job->ctx->queues.fragment; in pvr_queue_run_job()
819 mutex_lock(&pvr_dev->queues.lock); in pvr_queue_timedout_job()
821 mutex_unlock(&pvr_dev->queues.lock); in pvr_queue_timedout_job()
[all …]
Dpvr_context.h87 } queues; member
98 return ctx->type == DRM_PVR_CTX_TYPE_RENDER ? ctx->queues.geometry : NULL; in pvr_context_get_queue_for_job()
100 return ctx->type == DRM_PVR_CTX_TYPE_RENDER ? ctx->queues.fragment : NULL; in pvr_context_get_queue_for_job()
102 return ctx->type == DRM_PVR_CTX_TYPE_COMPUTE ? ctx->queues.compute : NULL; in pvr_context_get_queue_for_job()
104 return ctx->type == DRM_PVR_CTX_TYPE_TRANSFER_FRAG ? ctx->queues.transfer : NULL; in pvr_context_get_queue_for_job()
/linux-6.12.1/tools/testing/selftests/net/
Dncdevmem.c161 struct netdev_queue_id *queues, in bind_rx_queue() argument
177 __netdev_bind_rx_req_set_queues(req, queues, n_queue_index); in bind_rx_queue()
243 struct netdev_queue_id *queues; in do_server() local
279 queues = malloc(sizeof(*queues) * num_queues); in do_server()
282 queues[i]._present.type = 1; in do_server()
283 queues[i]._present.id = 1; in do_server()
284 queues[i].type = NETDEV_QUEUE_TYPE_RX; in do_server()
285 queues[i].id = start_queue + i; in do_server()
288 if (bind_rx_queue(ifindex, buf, queues, num_queues, &ys)) in do_server()
467 struct netdev_queue_id *queues; in run_devmem_tests() local
[all …]
/linux-6.12.1/net/sched/
Dsch_multiq.c25 struct Qdisc **queues; member
54 return q->queues[0]; in multiq_classify()
56 return q->queues[band]; in multiq_classify()
105 qdisc = q->queues[q->curband]; in multiq_dequeue()
137 qdisc = q->queues[curband]; in multiq_peek()
154 qdisc_reset(q->queues[band]); in multiq_reset()
166 qdisc_put(q->queues[band]); in multiq_destroy()
168 kfree(q->queues); in multiq_destroy()
196 if (q->queues[i] != &noop_qdisc) { in multiq_tune()
197 struct Qdisc *child = q->queues[i]; in multiq_tune()
[all …]
Dsch_prio.c26 struct Qdisc *queues[TCQ_PRIO_BANDS]; member
57 return q->queues[q->prio2band[band & TC_PRIO_MAX]]; in prio_classify()
63 return q->queues[q->prio2band[0]]; in prio_classify()
65 return q->queues[band]; in prio_classify()
103 struct Qdisc *qdisc = q->queues[prio]; in prio_peek()
117 struct Qdisc *qdisc = q->queues[prio]; in prio_dequeue()
137 qdisc_reset(q->queues[prio]); in prio_reset()
173 qdisc_put(q->queues[prio]); in prio_destroy()
180 struct Qdisc *queues[TCQ_PRIO_BANDS]; in prio_tune() local
198 queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in prio_tune()
[all …]
/linux-6.12.1/Documentation/networking/
Dmulti-pf-netdev.rst63 Each combined channel works against one specific PF, creating all its datapath queues against it. We
126 that is capable of pointing to the receive queues of a different PF.
142 - /sys/class/net/eth2/queues/tx-0/xps_cpus:000001
143 - /sys/class/net/eth2/queues/tx-1/xps_cpus:001000
144 - /sys/class/net/eth2/queues/tx-2/xps_cpus:000002
145 - /sys/class/net/eth2/queues/tx-3/xps_cpus:002000
146 - /sys/class/net/eth2/queues/tx-4/xps_cpus:000004
147 - /sys/class/net/eth2/queues/tx-5/xps_cpus:004000
148 - /sys/class/net/eth2/queues/tx-6/xps_cpus:000008
149 - /sys/class/net/eth2/queues/tx-7/xps_cpus:008000
[all …]
Dtc-queue-filters.rst7 TC can be used for directing traffic to either a set of queues or
12 1) TC filter directing traffic to a set of queues is achieved
14 the priority maps to a traffic class (set of queues) when
23 queues and/or a single queue are supported as below:
25 1) TC flower filter directs incoming traffic to a set of queues using
/linux-6.12.1/drivers/net/wireless/silabs/wfx/
Dqueue.c233 struct wfx_queue *queues[IEEE80211_NUM_ACS * ARRAY_SIZE(wdev->vif)]; in wfx_tx_queues_get_skb() local
243 WARN_ON(num_queues >= ARRAY_SIZE(queues)); in wfx_tx_queues_get_skb()
244 queues[num_queues] = &wvif->tx_queue[i]; in wfx_tx_queues_get_skb()
246 if (wfx_tx_queue_get_weight(queues[j]) < in wfx_tx_queues_get_skb()
247 wfx_tx_queue_get_weight(queues[j - 1])) in wfx_tx_queues_get_skb()
248 swap(queues[j - 1], queues[j]); in wfx_tx_queues_get_skb()
256 skb = skb_dequeue(&queues[i]->offchan); in wfx_tx_queues_get_skb()
264 atomic_inc(&queues[i]->pending_frames); in wfx_tx_queues_get_skb()
265 trace_queues_stats(wdev, queues[i]); in wfx_tx_queues_get_skb()
278 skb = skb_dequeue(&queues[i]->cab); in wfx_tx_queues_get_skb()
[all …]
/linux-6.12.1/drivers/scsi/aacraid/
Dcomminit.c373 struct aac_entry * queues; in aac_comm_init() local
375 struct aac_queue_block * comm = dev->queues; in aac_comm_init()
394 queues = (struct aac_entry *)(((ulong)headers) + hdrsize); in aac_comm_init()
397 comm->queue[HostNormCmdQueue].base = queues; in aac_comm_init()
399 queues += HOST_NORM_CMD_ENTRIES; in aac_comm_init()
403 comm->queue[HostHighCmdQueue].base = queues; in aac_comm_init()
406 queues += HOST_HIGH_CMD_ENTRIES; in aac_comm_init()
410 comm->queue[AdapNormCmdQueue].base = queues; in aac_comm_init()
413 queues += ADAP_NORM_CMD_ENTRIES; in aac_comm_init()
417 comm->queue[AdapHighCmdQueue].base = queues; in aac_comm_init()
[all …]
/linux-6.12.1/Documentation/ABI/testing/
Dsysfs-class-net-queues1 What: /sys/class/net/<iface>/queues/rx-<queue>/rps_cpus
11 What: /sys/class/net/<iface>/queues/rx-<queue>/rps_flow_cnt
19 What: /sys/class/net/<iface>/queues/tx-<queue>/tx_timeout
27 What: /sys/class/net/<iface>/queues/tx-<queue>/tx_maxrate
35 What: /sys/class/net/<iface>/queues/tx-<queue>/xps_cpus
45 What: /sys/class/net/<iface>/queues/tx-<queue>/xps_rxqs
56 What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
65 What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/inflight
73 What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit
82 What: /sys/class/net/<iface>/queues/tx-<queue>/byte_queue_limits/limit_max
[all …]
/linux-6.12.1/drivers/media/platform/nxp/imx8-isi/
Dimx8-isi-m2m.c58 } queues; member
85 return &ctx->queues.out; in mxc_isi_m2m_ctx_qdata()
87 return &ctx->queues.cap; in mxc_isi_m2m_ctx_qdata()
112 src_vbuf->sequence = ctx->queues.out.sequence++; in mxc_isi_m2m_frame_write_done()
113 dst_vbuf->sequence = ctx->queues.cap.sequence++; in mxc_isi_m2m_frame_write_done()
135 .width = ctx->queues.out.format.width, in mxc_isi_m2m_device_run()
136 .height = ctx->queues.out.format.height, in mxc_isi_m2m_device_run()
139 .width = ctx->queues.cap.format.width, in mxc_isi_m2m_device_run()
140 .height = ctx->queues.cap.format.height, in mxc_isi_m2m_device_run()
143 .width = ctx->queues.cap.format.width, in mxc_isi_m2m_device_run()
[all …]
/linux-6.12.1/drivers/nvme/target/
Dloop.c30 struct nvme_loop_queue *queues; member
71 return queue - queue->ctrl->queues; in nvme_loop_queue_idx()
176 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_submit_async_event()
198 iod->queue = &ctrl->queues[queue_idx]; in nvme_loop_init_iod()
222 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_loop_init_hctx()
242 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_init_admin_hctx()
266 if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags)) in nvme_loop_destroy_admin_queue()
275 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); in nvme_loop_destroy_admin_queue()
292 kfree(ctrl->queues); in nvme_loop_free_ctrl()
303 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); in nvme_loop_destroy_io_queues()
[all …]
/linux-6.12.1/Documentation/devicetree/bindings/soc/ti/
Dkeystone-navigator-qmss.txt9 management of the packet queues. Packets are queued/de-queued by writing or
32 -- managed-queues : the actual queues managed by each queue manager
33 instance, specified as <"base queue #" "# of queues">.
51 - qpend : pool of qpend(interruptible) queues
52 - general-purpose : pool of general queues, primarily used
53 as free descriptor queues or the
54 transmit DMA queues.
55 - accumulator : pool of queues on PDSP accumulator channel
57 -- qrange : number of queues to use per queue range, specified as
58 <"base queue #" "# of queues">.
[all …]
/linux-6.12.1/sound/virtio/
Dvirtio_card.h64 struct virtio_snd_queue queues[VIRTIO_SND_VQ_MAX]; member
86 return &snd->queues[VIRTIO_SND_VQ_CONTROL]; in virtsnd_control_queue()
92 return &snd->queues[VIRTIO_SND_VQ_EVENT]; in virtsnd_event_queue()
98 return &snd->queues[VIRTIO_SND_VQ_TX]; in virtsnd_tx_queue()
104 return &snd->queues[VIRTIO_SND_VQ_RX]; in virtsnd_rx_queue()
/linux-6.12.1/Documentation/networking/device_drivers/ethernet/ti/
Dcpsw.rst26 - TX queues must be rated starting from txq0 that has highest priority
28 - CBS shapers should be used with rated queues
30 potential incoming rate, thus, rate of all incoming tx queues has
150 // Add 4 tx queues, for interface Eth0, and 1 tx queue for Eth1
156 // Check if num of queues is set correctly:
172 // TX queues must be rated starting from 0, so set bws for tx0 and tx1
175 // Leave last 2 tx queues not rated.
176 $ echo 40 > /sys/class/net/eth0/queues/tx-0/tx_maxrate
177 $ echo 20 > /sys/class/net/eth0/queues/tx-1/tx_maxrate
181 // Check maximum rate of tx (cpdma) queues:
[all …]
/linux-6.12.1/tools/perf/util/
Dintel-bts.c46 struct auxtrace_queues queues; member
211 for (i = 0; i < bts->queues.nr_queues; i++) { in intel_bts_setup_queues()
212 ret = intel_bts_setup_queue(bts, &bts->queues.queue_array[i], in intel_bts_setup_queues()
222 if (bts->queues.new_data) { in intel_bts_update_queues()
223 bts->queues.new_data = false; in intel_bts_update_queues()
465 queue = &btsq->bts->queues.queue_array[btsq->queue_nr]; in intel_bts_process_queue()
539 struct auxtrace_queues *queues = &bts->queues; in intel_bts_process_tid_exit() local
542 for (i = 0; i < queues->nr_queues; i++) { in intel_bts_process_tid_exit()
543 struct auxtrace_queue *queue = &bts->queues.queue_array[i]; in intel_bts_process_tid_exit()
568 queue = &bts->queues.queue_array[queue_nr]; in intel_bts_process_queues()
[all …]
Dauxtrace.c221 int auxtrace_queues__init_nr(struct auxtrace_queues *queues, int nr_queues) in auxtrace_queues__init_nr() argument
223 queues->nr_queues = nr_queues; in auxtrace_queues__init_nr()
224 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues); in auxtrace_queues__init_nr()
225 if (!queues->queue_array) in auxtrace_queues__init_nr()
230 int auxtrace_queues__init(struct auxtrace_queues *queues) in auxtrace_queues__init() argument
232 return auxtrace_queues__init_nr(queues, AUXTRACE_INIT_NR_QUEUES); in auxtrace_queues__init()
235 static int auxtrace_queues__grow(struct auxtrace_queues *queues, in auxtrace_queues__grow() argument
238 unsigned int nr_queues = queues->nr_queues; in auxtrace_queues__grow()
248 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues) in auxtrace_queues__grow()
255 for (i = 0; i < queues->nr_queues; i++) { in auxtrace_queues__grow()
[all …]
Ds390-cpumsf.c170 struct auxtrace_queues queues; member
203 if (!sf->use_logfile || sf->queues.nr_queues <= sample->cpu) in s390_cpumcf_dumpctr()
206 q = &sf->queues.queue_array[sample->cpu]; in s390_cpumcf_dumpctr()
701 queue = &sfq->sf->queues.queue_array[sfq->queue_nr]; in s390_cpumsf_run_decoder()
825 for (i = 0; i < sf->queues.nr_queues; i++) { in s390_cpumsf_setup_queues()
826 ret = s390_cpumsf_setup_queue(sf, &sf->queues.queue_array[i], in s390_cpumsf_setup_queues()
836 if (!sf->queues.new_data) in s390_cpumsf_update_queues()
839 sf->queues.new_data = false; in s390_cpumsf_update_queues()
860 queue = &sf->queues.queue_array[queue_nr]; in s390_cpumsf_process_queues()
980 err = auxtrace_queues__add_event(&sf->queues, session, event, in s390_cpumsf_process_auxtrace_event()
[all …]
/linux-6.12.1/drivers/target/
Dtarget_core_tmr.c118 flush_work(&dev->queues[i].sq.work); in core_tmr_abort_task()
120 spin_lock_irqsave(&dev->queues[i].lock, flags); in core_tmr_abort_task()
121 list_for_each_entry_safe(se_cmd, next, &dev->queues[i].state_list, in core_tmr_abort_task()
148 spin_unlock_irqrestore(&dev->queues[i].lock, flags); in core_tmr_abort_task()
163 spin_unlock_irqrestore(&dev->queues[i].lock, flags); in core_tmr_abort_task()
301 flush_work(&dev->queues[i].sq.work); in core_tmr_drain_state_list()
303 spin_lock_irqsave(&dev->queues[i].lock, flags); in core_tmr_drain_state_list()
304 list_for_each_entry_safe(cmd, next, &dev->queues[i].state_list, in core_tmr_drain_state_list()
333 spin_unlock_irqrestore(&dev->queues[i].lock, flags); in core_tmr_drain_state_list()
/linux-6.12.1/Documentation/arch/arm/keystone/
Dknav-qmss.rst15 management of the packet queues. Packets are queued/de-queued by writing or
24 knav qmss driver provides a set of APIs to drivers to open/close qmss queues,
25 allocate descriptor pools, map the descriptors, push/pop to queues etc. For
31 Accumulator QMSS queues using PDSP firmware
34 queue or multiple contiguous queues. drivers/soc/ti/knav_qmss_acc.c is the
37 1 or 32 queues per channel. More description on the firmware is available in
56 Use of accumulated queues requires the firmware image to be present in the
57 file system. The driver doesn't acc queues to the supported queue range if
/linux-6.12.1/include/linux/
Dptr_ring.h627 void ***queues; in ptr_ring_resize_multiple_noprof() local
630 queues = kmalloc_array_noprof(nrings, sizeof(*queues), gfp); in ptr_ring_resize_multiple_noprof()
631 if (!queues) in ptr_ring_resize_multiple_noprof()
635 queues[i] = __ptr_ring_init_queue_alloc_noprof(size, gfp); in ptr_ring_resize_multiple_noprof()
636 if (!queues[i]) in ptr_ring_resize_multiple_noprof()
643 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], in ptr_ring_resize_multiple_noprof()
650 kvfree(queues[i]); in ptr_ring_resize_multiple_noprof()
652 kfree(queues); in ptr_ring_resize_multiple_noprof()
658 kvfree(queues[i]); in ptr_ring_resize_multiple_noprof()
660 kfree(queues); in ptr_ring_resize_multiple_noprof()
/linux-6.12.1/Documentation/block/
Dblk-mq.rst37 spawns multiple queues with individual entry points local to the CPU, removing
49 blk-mq has two group of queues: software staging queues and hardware dispatch
50 queues. When the request arrives at the block layer, it will try the shortest
56 Then, after the requests are processed by software queues, they will be placed
62 Software staging queues
65 The block IO subsystem adds requests in the software staging queues
71 the number of queues is defined by a per-CPU or per-node basis.
93 requests from different queues, otherwise there would be cache trashing and a
99 queue (a.k.a. run the hardware queue), the software queues mapped to that
102 Hardware dispatch queues
[all …]
/linux-6.12.1/drivers/vdpa/alibaba/
Deni_vdpa.c45 int queues; member
118 for (i = 0; i < eni_vdpa->queues; i++) { in eni_vdpa_free_irq()
164 int queues = eni_vdpa->queues; in eni_vdpa_request_irq() local
165 int vectors = queues + 1; in eni_vdpa_request_irq()
177 for (i = 0; i < queues; i++) { in eni_vdpa_request_irq()
195 irq = pci_irq_vector(pdev, queues); in eni_vdpa_request_irq()
202 vp_legacy_config_vector(ldev, queues); in eni_vdpa_request_irq()
500 eni_vdpa->queues = eni_vdpa_get_num_queues(eni_vdpa); in eni_vdpa_probe()
502 eni_vdpa->vring = devm_kcalloc(&pdev->dev, eni_vdpa->queues, in eni_vdpa_probe()
511 for (i = 0; i < eni_vdpa->queues; i++) { in eni_vdpa_probe()
[all …]

12345678910>>...18