Home
last modified time | relevance | path

Searched refs:rings (Results 1 – 25 of 104) sorted by relevance

12345

/linux-6.12.1/drivers/net/ethernet/intel/ice/
Dice_fwlog.c9 bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings) in ice_fwlog_ring_full() argument
13 head = rings->head; in ice_fwlog_ring_full()
14 tail = rings->tail; in ice_fwlog_ring_full()
16 if (head < tail && (tail - head == (rings->size - 1))) in ice_fwlog_ring_full()
24 bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings) in ice_fwlog_ring_empty() argument
26 return rings->head == rings->tail; in ice_fwlog_ring_empty()
34 static int ice_fwlog_alloc_ring_buffs(struct ice_fwlog_ring *rings) in ice_fwlog_alloc_ring_buffs() argument
39 nr_bytes = rings->size * ICE_AQ_MAX_BUF_LEN; in ice_fwlog_alloc_ring_buffs()
44 for (i = 0; i < rings->size; i++) { in ice_fwlog_alloc_ring_buffs()
45 struct ice_fwlog_data *ring = &rings->rings[i]; in ice_fwlog_alloc_ring_buffs()
[all …]
Dice_fwlog.h56 struct ice_fwlog_data *rings; member
67 bool ice_fwlog_ring_full(struct ice_fwlog_ring *rings);
68 bool ice_fwlog_ring_empty(struct ice_fwlog_ring *rings);
/linux-6.12.1/drivers/net/wireless/broadcom/brcm80211/brcmfmac/
Dflowring.c142 if (flow->rings[i] == NULL) in brcmf_flowring_create()
160 flow->rings[i] = ring; in brcmf_flowring_create()
172 ring = flow->rings[flowid]; in brcmf_flowring_tid()
192 ring = flow->rings[flowid]; in brcmf_flowring_block()
201 if ((flow->rings[i]) && (i != flowid)) { in brcmf_flowring_block()
202 ring = flow->rings[i]; in brcmf_flowring_block()
212 flow->rings[flowid]->blocked = blocked; in brcmf_flowring_block()
236 ring = flow->rings[flowid]; in brcmf_flowring_delete()
247 flow->rings[flowid] = NULL; in brcmf_flowring_delete()
264 ring = flow->rings[flowid]; in brcmf_flowring_enqueue()
[all …]
/linux-6.12.1/net/9p/
Dtrans_xen.c57 struct xen_9pfs_dataring *rings; member
134 ring = &priv->rings[num]; in p9_xen_request()
282 struct xen_9pfs_dataring *ring = &priv->rings[i]; in xen_9pfs_front_free()
286 if (!priv->rings[i].intf) in xen_9pfs_front_free()
288 if (priv->rings[i].irq > 0) in xen_9pfs_front_free()
289 unbind_from_irqhandler(priv->rings[i].irq, priv->dev); in xen_9pfs_front_free()
290 if (priv->rings[i].data.in) { in xen_9pfs_front_free()
292 j < (1 << priv->rings[i].intf->ring_order); in xen_9pfs_front_free()
296 ref = priv->rings[i].intf->ref[j]; in xen_9pfs_front_free()
299 free_pages_exact(priv->rings[i].data.in, in xen_9pfs_front_free()
[all …]
/linux-6.12.1/tools/net/ynl/samples/
Dethtool.c16 struct ethtool_rings_get_list *rings; in main() local
42 rings = ethtool_rings_get_dump(ys, &rreq); in main()
43 if (!rings) in main()
47 ynl_dump_foreach(rings, dev) { in main()
55 ethtool_rings_get_list_free(rings); in main()
/linux-6.12.1/drivers/i3c/master/mipi-i3c-hci/
Ddma.c152 struct hci_rings_data *rings = hci->io_data; in hci_dma_cleanup() local
156 if (!rings) in hci_dma_cleanup()
159 for (i = 0; i < rings->total; i++) { in hci_dma_cleanup()
160 rh = &rings->headers[i]; in hci_dma_cleanup()
189 kfree(rings); in hci_dma_cleanup()
195 struct hci_rings_data *rings; in hci_dma_init() local
211 rings = kzalloc(struct_size(rings, headers, nr_rings), GFP_KERNEL); in hci_dma_init()
212 if (!rings) in hci_dma_init()
214 hci->io_data = rings; in hci_dma_init()
215 rings->total = nr_rings; in hci_dma_init()
[all …]
/linux-6.12.1/tools/testing/selftests/net/
Dpsock_fanout.c235 static int sock_fanout_read(int fds[], char *rings[], const int expect[]) in sock_fanout_read() argument
239 ret[0] = sock_fanout_read_ring(fds[0], rings[0]); in sock_fanout_read()
240 ret[1] = sock_fanout_read_ring(fds[1], rings[1]); in sock_fanout_read()
412 char *rings[2]; in test_datapath() local
431 rings[0] = sock_fanout_open_ring(fds[0]); in test_datapath()
432 rings[1] = sock_fanout_open_ring(fds[1]); in test_datapath()
435 sock_fanout_read(fds, rings, expect0); in test_datapath()
440 ret = sock_fanout_read(fds, rings, expect1); in test_datapath()
445 ret |= sock_fanout_read(fds, rings, expect2); in test_datapath()
447 if (munmap(rings[1], RING_NUM_FRAMES * getpagesize()) || in test_datapath()
[all …]
Dtoeplitz.c104 static struct ring_state rings[RSS_MAX_CPUS]; variable
250 do {} while (recv_block(&rings[i])); in process_rings()
404 rings[i].cpu = i; in setup_rings()
405 rings[i].fd = create_ring(&rings[i].mmap); in setup_rings()
410 set_filter(rings[i].fd); in setup_rings()
418 if (munmap(rings[i].mmap, ring_block_nr * ring_block_sz)) in cleanup_rings()
420 if (close(rings[i].fd)) in cleanup_rings()
/linux-6.12.1/Documentation/mhi/
Dmhi.rst58 Transfer rings: Used by the host to schedule work items for a channel. The
59 transfer rings are organized as a circular queue of Transfer Descriptors (TD).
64 Event rings: Used by the device to send completion and state transition messages
70 Command rings: Used by the host to send MHI commands to the device. The command
71 rings are organized as a circular queue of Command Descriptors (CD).
81 Two unidirectional channels with their associated transfer rings form a
87 Transfer rings
91 Transfer Descriptors (TD). TDs are managed through transfer rings, which are
101 Below is the basic usage of transfer rings:
110 buffer information, increments the WP to the next element and rings the
[all …]
/linux-6.12.1/tools/lib/bpf/
Dringbuf.c37 struct ring **rings; member
102 tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings)); in ring_buffer__add()
105 rb->rings = tmp; in ring_buffer__add()
115 rb->rings[rb->ring_cnt] = r; in ring_buffer__add()
180 ringbuf_free_ring(rb, rb->rings[i]); in ring_buffer__free()
185 free(rb->rings); in ring_buffer__free()
293 struct ring *ring = rb->rings[i]; in ring_buffer__consume_n()
318 struct ring *ring = rb->rings[i]; in ring_buffer__consume()
347 struct ring *ring = rb->rings[ring_id]; in ring_buffer__poll()
370 return rb->rings[idx]; in ring_buffer__ring()
/linux-6.12.1/drivers/crypto/intel/qat/qat_common/
Dadf_transport.c267 ring = &bank->rings[ring_num]; in adf_create_ring()
338 adf_handle_response(&bank->rings[i]); in adf_ring_response_handler()
406 bank->rings = kzalloc_node(size, GFP_KERNEL, in adf_init_bank()
408 if (!bank->rings) in adf_init_bank()
425 ring = &bank->rings[i]; in adf_init_bank()
439 tx_ring = &bank->rings[i - hw_data->tx_rx_gap]; in adf_init_bank()
456 ring = &bank->rings[i]; in adf_init_bank()
460 kfree(bank->rings); in adf_init_bank()
530 struct adf_etr_ring_data *ring = &bank->rings[i]; in cleanup_bank()
538 kfree(bank->rings); in cleanup_bank()
[all …]
Dadf_gen4_hw_data.c515 state->rings[i].head = ops->read_csr_ring_head(base, bank, i); in bank_state_save()
516 state->rings[i].tail = ops->read_csr_ring_tail(base, bank, i); in bank_state_save()
517 state->rings[i].config = ops->read_csr_ring_config(base, bank, i); in bank_state_save()
518 state->rings[i].base = ops->read_csr_ring_base(base, bank, i); in bank_state_save()
539 ops->write_csr_ring_base(base, bank, i, state->rings[i].base); in bank_state_restore()
542 ops->write_csr_ring_config(base, bank, i, state->rings[i].config); in bank_state_restore()
548 ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head); in bank_state_restore()
549 ops->write_csr_ring_tail(base, bank, tx, state->rings[tx].tail); in bank_state_restore()
560 ops->write_csr_ring_head(base, bank, tx, state->rings[tx].head); in bank_state_restore()
563 ops->write_csr_ring_tail(base, bank, rx, state->rings[rx].tail); in bank_state_restore()
[all …]
/linux-6.12.1/drivers/soc/ti/
Dk3-ringacc.c219 struct k3_ring *rings; member
358 !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED)) in k3_ringacc_request_ring()
360 else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED) in k3_ringacc_request_ring()
372 ringacc->rings[id].proxy_id = proxy_id; in k3_ringacc_request_ring()
381 ringacc->rings[id].use_count++; in k3_ringacc_request_ring()
383 return &ringacc->rings[id]; in k3_ringacc_request_ring()
419 *fwd_ring = &ringacc->rings[fwd_id]; in k3_dmaring_request_dual_ring()
420 *compl_ring = &ringacc->rings[fwd_id + ringacc->num_rings]; in k3_dmaring_request_dual_ring()
422 ringacc->rings[fwd_id].use_count++; in k3_dmaring_request_dual_ring()
755 reverse_ring = &ringacc->rings[ring->ring_id + ringacc->num_rings]; in k3_dmaring_cfg()
[all …]
/linux-6.12.1/io_uring/
Dio_uring.c181 return ctx->cached_cq_tail - READ_ONCE(ctx->rings->cq.head); in __io_cqring_events()
186 return READ_ONCE(ctx->rings->cq.tail) - READ_ONCE(ctx->rings->cq.head); in __io_cqring_events_user()
372 struct io_rings *r = ctx->rings; in io_account_cq_overflow()
660 atomic_andnot(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags); in __io_cqring_overflow_flush()
667 if (ctx->rings) in io_cqring_overflow_kill()
751 atomic_or(IORING_SQ_CQ_OVERFLOW, &ctx->rings->sq_flags); in io_cqring_event_overflow()
780 struct io_rings *rings = ctx->rings; in io_cqe_cache_refill() local
805 ctx->cqe_cached = &rings->cqes[off]; in io_cqe_cache_refill()
1033 atomic_andnot(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); in ctx_flush_and_put()
1214 atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags); in io_req_local_work_add()
[all …]
Dio_uring.h58 int dist = READ_ONCE(ctx->rings->cq.tail) - (int) iowq->cq_tail; in io_should_wake()
256 smp_store_release(&ctx->rings->cq.tail, ctx->cached_cq_tail); in io_commit_cqring()
285 struct io_rings *r = ctx->rings; in io_sqring_full()
299 struct io_rings *rings = ctx->rings; in io_sqring_entries() local
303 entries = smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; in io_sqring_entries()
/linux-6.12.1/drivers/block/xen-blkback/
Dxenbus.c84 if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev_file) in xen_update_blkif_status()
110 ring = &blkif->rings[i]; in xen_update_blkif_status()
124 ring = &blkif->rings[i]; in xen_update_blkif_status()
134 blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring), in xen_blkif_alloc_rings()
136 if (!blkif->rings) in xen_blkif_alloc_rings()
140 struct xen_blkif_ring *ring = &blkif->rings[r]; in xen_blkif_alloc_rings()
274 struct xen_blkif_ring *ring = &blkif->rings[r]; in xen_blkif_disconnect()
338 kfree(blkif->rings); in xen_blkif_disconnect()
339 blkif->rings = NULL; in xen_blkif_disconnect()
389 if (!blkif->rings) \
[all …]
/linux-6.12.1/Documentation/networking/
Daf_xdp.rst24 syscall. Associated with each XSK are two rings: the RX ring and the
26 packets on the TX ring. These rings are registered and sized with the
28 to have at least one of these rings for each socket. An RX or TX
37 one of the rings references a frame by referencing its addr. The addr
42 UMEM also has two rings: the FILL ring and the COMPLETION ring. The
50 TX ring. In summary, the RX and FILL rings are used for the RX path
51 and the TX and COMPLETION rings are used for the TX path.
59 corresponding two rings, sets the XDP_SHARED_UMEM flag in the bind
65 process has to create its own socket with associated RX and TX rings,
67 reason that there is only one set of FILL and COMPLETION rings per
[all …]
/linux-6.12.1/drivers/crypto/inside-secure/
Dsafexcel.c51 for (i = 0; i < priv->config.rings; i++) { in eip197_trc_cache_setupvirt()
510 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_setup_cdesc_rings()
558 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_setup_rdesc_rings()
600 priv->config.pes, priv->config.rings); in safexcel_hw_init()
662 GENMASK(priv->config.rings - 1, 0), in safexcel_hw_init()
720 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_init()
746 for (i = 0; i < priv->config.rings; i++) { in safexcel_hw_init()
770 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), in safexcel_hw_init()
774 writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), in safexcel_hw_init()
1329 priv->config.rings = min_t(u32, priv->hwconfig.hwnumrings, max_rings); in safexcel_configure()
[all …]
/linux-6.12.1/include/linux/
Dptr_ring.h621 static inline int ptr_ring_resize_multiple_noprof(struct ptr_ring **rings, in ptr_ring_resize_multiple_noprof() argument
641 spin_lock_irqsave(&(rings[i])->consumer_lock, flags); in ptr_ring_resize_multiple_noprof()
642 spin_lock(&(rings[i])->producer_lock); in ptr_ring_resize_multiple_noprof()
643 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], in ptr_ring_resize_multiple_noprof()
645 spin_unlock(&(rings[i])->producer_lock); in ptr_ring_resize_multiple_noprof()
646 spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags); in ptr_ring_resize_multiple_noprof()
Dskb_array.h202 static inline int skb_array_resize_multiple_noprof(struct skb_array **rings, in skb_array_resize_multiple_noprof() argument
207 return ptr_ring_resize_multiple_noprof((struct ptr_ring **)rings, in skb_array_resize_multiple_noprof()
/linux-6.12.1/drivers/net/ethernet/netronome/nfp/
DMakefile24 nfd3/rings.o \
27 nfdk/rings.o \
/linux-6.12.1/Documentation/networking/device_drivers/ethernet/google/
Dgve.rst125 The descriptor rings are power-of-two-sized ring buffers consisting of
136 gve maps the buffers for transmit rings into a FIFO and copies the packets
141 The buffers for receive rings are put into a data ring that is the same
143 the rings together.
/linux-6.12.1/drivers/gpu/drm/amd/amdgpu/
Damdgpu_fence.c601 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_hw_fini()
631 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_isr_toggle()
648 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_sw_fini()
687 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_fence_driver_hw_init()
901 struct amdgpu_ring *ring = adev->rings[i]; in amdgpu_debugfs_fence_info_show()
/linux-6.12.1/net/ethtool/
DMakefile8 linkstate.o debug.o wol.o features.o privflags.o rings.o \
/linux-6.12.1/Documentation/devicetree/bindings/net/
Dopencores-ethoc.txt6 first region is for the device registers and descriptor rings,

12345