Home
last modified time | relevance | path

Searched refs:queue (Results 1 – 25 of 1786) sorted by relevance

12345678910>>...72

/linux-6.12.1/drivers/media/usb/uvc/
Duvc_queue.c45 static void uvc_queue_return_buffers(struct uvc_video_queue *queue, in uvc_queue_return_buffers() argument
52 while (!list_empty(&queue->irqqueue)) { in uvc_queue_return_buffers()
53 struct uvc_buffer *buf = list_first_entry(&queue->irqqueue, in uvc_queue_return_buffers()
55 queue); in uvc_queue_return_buffers()
56 list_del(&buf->queue); in uvc_queue_return_buffers()
70 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); in uvc_queue_setup() local
80 stream = uvc_queue_to_stream(queue); in uvc_queue_setup()
101 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_prepare() local
106 uvc_dbg(uvc_queue_to_stream(queue)->dev, CAPTURE, in uvc_buffer_prepare()
111 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) in uvc_buffer_prepare()
[all …]
/linux-6.12.1/drivers/usb/gadget/function/
Duvc_queue.c45 struct uvc_video_queue *queue = vb2_get_drv_priv(vq); in uvc_queue_setup() local
46 struct uvc_video *video = container_of(queue, struct uvc_video, queue); in uvc_queue_setup()
73 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_prepare() local
83 if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) in uvc_buffer_prepare()
87 if (queue->use_sg) { in uvc_buffer_prepare()
104 struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); in uvc_buffer_queue() local
109 spin_lock_irqsave(&queue->irqlock, flags); in uvc_buffer_queue()
111 if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) { in uvc_buffer_queue()
112 list_add_tail(&buf->queue, &queue->irqqueue); in uvc_buffer_queue()
122 spin_unlock_irqrestore(&queue->irqlock, flags); in uvc_buffer_queue()
[all …]
/linux-6.12.1/drivers/net/wireless/st/cw1200/
Dqueue.c27 static inline void __cw1200_queue_lock(struct cw1200_queue *queue) in __cw1200_queue_lock() argument
29 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_lock()
30 if (queue->tx_locked_cnt++ == 0) { in __cw1200_queue_lock()
32 queue->queue_id); in __cw1200_queue_lock()
33 ieee80211_stop_queue(stats->priv->hw, queue->queue_id); in __cw1200_queue_lock()
37 static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) in __cw1200_queue_unlock() argument
39 struct cw1200_queue_stats *stats = queue->stats; in __cw1200_queue_unlock()
40 BUG_ON(!queue->tx_locked_cnt); in __cw1200_queue_unlock()
41 if (--queue->tx_locked_cnt == 0) { in __cw1200_queue_unlock()
43 queue->queue_id); in __cw1200_queue_unlock()
[all …]
/linux-6.12.1/drivers/md/dm-vdo/
Dfunnel-workqueue.c73 static inline struct simple_work_queue *as_simple_work_queue(struct vdo_work_queue *queue) in as_simple_work_queue() argument
75 return ((queue == NULL) ? in as_simple_work_queue()
76 NULL : container_of(queue, struct simple_work_queue, common)); in as_simple_work_queue()
79 static inline struct round_robin_work_queue *as_round_robin_work_queue(struct vdo_work_queue *queue) in as_round_robin_work_queue() argument
81 return ((queue == NULL) ? in as_round_robin_work_queue()
83 container_of(queue, struct round_robin_work_queue, common)); in as_round_robin_work_queue()
96 static struct vdo_completion *poll_for_completion(struct simple_work_queue *queue) in poll_for_completion() argument
100 for (i = queue->common.type->max_priority; i >= 0; i--) { in poll_for_completion()
101 struct funnel_queue_entry *link = vdo_funnel_queue_poll(queue->priority_lists[i]); in poll_for_completion()
110 static void enqueue_work_queue_completion(struct simple_work_queue *queue, in enqueue_work_queue_completion() argument
[all …]
Dfunnel-queue.c15 struct funnel_queue *queue; in vdo_make_funnel_queue() local
17 result = vdo_allocate(1, struct funnel_queue, "funnel queue", &queue); in vdo_make_funnel_queue()
25 queue->stub.next = NULL; in vdo_make_funnel_queue()
26 queue->newest = &queue->stub; in vdo_make_funnel_queue()
27 queue->oldest = &queue->stub; in vdo_make_funnel_queue()
29 *queue_ptr = queue; in vdo_make_funnel_queue()
33 void vdo_free_funnel_queue(struct funnel_queue *queue) in vdo_free_funnel_queue() argument
35 vdo_free(queue); in vdo_free_funnel_queue()
38 static struct funnel_queue_entry *get_oldest(struct funnel_queue *queue) in get_oldest() argument
45 struct funnel_queue_entry *oldest = queue->oldest; in get_oldest()
[all …]
/linux-6.12.1/drivers/net/wireless/broadcom/b43legacy/
Dpio.c22 static void tx_start(struct b43legacy_pioqueue *queue) in tx_start() argument
24 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_start()
28 static void tx_octet(struct b43legacy_pioqueue *queue, in tx_octet() argument
31 if (queue->need_workarounds) { in tx_octet()
32 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet()
33 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet()
36 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet()
38 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet()
63 static void tx_data(struct b43legacy_pioqueue *queue, in tx_data() argument
71 if (queue->need_workarounds) { in tx_data()
[all …]
/linux-6.12.1/drivers/nvme/target/
Dtcp.c110 struct nvmet_tcp_queue *queue; member
221 static inline u16 nvmet_tcp_cmd_tag(struct nvmet_tcp_queue *queue, in nvmet_tcp_cmd_tag() argument
224 if (unlikely(!queue->nr_cmds)) { in nvmet_tcp_cmd_tag()
229 return cmd - queue->cmds; in nvmet_tcp_cmd_tag()
257 nvmet_tcp_get_cmd(struct nvmet_tcp_queue *queue) in nvmet_tcp_get_cmd() argument
261 cmd = list_first_entry_or_null(&queue->free_list, in nvmet_tcp_get_cmd()
277 if (unlikely(cmd == &cmd->queue->connect)) in nvmet_tcp_put_cmd()
280 list_add_tail(&cmd->entry, &cmd->queue->free_list); in nvmet_tcp_put_cmd()
283 static inline int queue_cpu(struct nvmet_tcp_queue *queue) in queue_cpu() argument
285 return queue->sock->sk->sk_incoming_cpu; in queue_cpu()
[all …]
Drdma.c52 struct nvmet_rdma_queue *queue; member
66 struct nvmet_rdma_queue *queue; member
172 static void nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue);
213 nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue) in nvmet_rdma_get_rsp() argument
218 tag = sbitmap_get(&queue->rsp_tags); in nvmet_rdma_get_rsp()
220 rsp = &queue->rsps[tag]; in nvmet_rdma_get_rsp()
228 ret = nvmet_rdma_alloc_rsp(queue->dev, rsp, in nvmet_rdma_get_rsp()
243 nvmet_rdma_free_rsp(rsp->queue->dev, rsp); in nvmet_rdma_put_rsp()
248 sbitmap_clear_bit(&rsp->queue->rsp_tags, rsp->tag); in nvmet_rdma_put_rsp()
448 nvmet_rdma_alloc_rsps(struct nvmet_rdma_queue *queue) in nvmet_rdma_alloc_rsps() argument
[all …]
/linux-6.12.1/drivers/iio/buffer/
Dindustrialio-buffer-dma.c101 struct iio_dma_buffer_queue *queue = block->queue; in iio_buffer_block_release() local
106 dma_free_coherent(queue->dev, PAGE_ALIGN(block->size), in iio_buffer_block_release()
109 atomic_dec(&queue->num_dmabufs); in iio_buffer_block_release()
112 iio_buffer_put(&queue->buffer); in iio_buffer_block_release()
175 struct iio_dma_buffer_queue *queue, size_t size, bool fileio) in iio_dma_buffer_alloc_block() argument
184 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), in iio_dma_buffer_alloc_block()
195 block->queue = queue; in iio_dma_buffer_alloc_block()
199 iio_buffer_get(&queue->buffer); in iio_dma_buffer_alloc_block()
202 atomic_inc(&queue->num_dmabufs); in iio_dma_buffer_alloc_block()
213 static void iio_dma_buffer_queue_wake(struct iio_dma_buffer_queue *queue) in iio_dma_buffer_queue_wake() argument
[all …]
/linux-6.12.1/drivers/net/xen-netback/
Drx.c42 static void xenvif_update_needed_slots(struct xenvif_queue *queue, in xenvif_update_needed_slots() argument
55 WRITE_ONCE(queue->rx_slots_needed, needed); in xenvif_update_needed_slots()
58 static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) in xenvif_rx_ring_slots_available() argument
63 needed = READ_ONCE(queue->rx_slots_needed); in xenvif_rx_ring_slots_available()
68 prod = queue->rx.sring->req_prod; in xenvif_rx_ring_slots_available()
69 cons = queue->rx.req_cons; in xenvif_rx_ring_slots_available()
74 queue->rx.sring->req_event = prod + 1; in xenvif_rx_ring_slots_available()
80 } while (queue->rx.sring->req_prod != prod); in xenvif_rx_ring_slots_available()
85 bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb) in xenvif_rx_queue_tail() argument
90 spin_lock_irqsave(&queue->rx_queue.lock, flags); in xenvif_rx_queue_tail()
[all …]
Dnetback.c107 static void xenvif_idx_release(struct xenvif_queue *queue, u16 pending_idx,
110 static void make_tx_response(struct xenvif_queue *queue,
115 static void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx);
117 static inline int tx_work_todo(struct xenvif_queue *queue);
119 static inline unsigned long idx_to_pfn(struct xenvif_queue *queue, in idx_to_pfn() argument
122 return page_to_pfn(queue->mmap_pages[idx]); in idx_to_pfn()
125 static inline unsigned long idx_to_kaddr(struct xenvif_queue *queue, in idx_to_kaddr() argument
128 return (unsigned long)pfn_to_kaddr(idx_to_pfn(queue, idx)); in idx_to_kaddr()
161 void xenvif_kick_thread(struct xenvif_queue *queue) in xenvif_kick_thread() argument
163 wake_up(&queue->wq); in xenvif_kick_thread()
[all …]
Dinterface.c52 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, in xenvif_skb_zerocopy_prepare() argument
56 atomic_inc(&queue->inflight_packets); in xenvif_skb_zerocopy_prepare()
59 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) in xenvif_skb_zerocopy_complete() argument
61 atomic_dec(&queue->inflight_packets); in xenvif_skb_zerocopy_complete()
67 wake_up(&queue->dealloc_wq); in xenvif_skb_zerocopy_complete()
77 static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue) in xenvif_handle_tx_interrupt() argument
81 rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx); in xenvif_handle_tx_interrupt()
83 napi_schedule(&queue->napi); in xenvif_handle_tx_interrupt()
89 struct xenvif_queue *queue = dev_id; in xenvif_tx_interrupt() local
92 old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending); in xenvif_tx_interrupt()
[all …]
/linux-6.12.1/drivers/md/dm-vdo/indexer/
Dfunnel-requestqueue.c68 static inline struct uds_request *poll_queues(struct uds_request_queue *queue) in poll_queues() argument
72 entry = vdo_funnel_queue_poll(queue->retry_queue); in poll_queues()
76 entry = vdo_funnel_queue_poll(queue->main_queue); in poll_queues()
83 static inline bool are_queues_idle(struct uds_request_queue *queue) in are_queues_idle() argument
85 return vdo_is_funnel_queue_idle(queue->retry_queue) && in are_queues_idle()
86 vdo_is_funnel_queue_idle(queue->main_queue); in are_queues_idle()
94 static inline bool dequeue_request(struct uds_request_queue *queue, in dequeue_request() argument
97 struct uds_request *request = poll_queues(queue); in dequeue_request()
104 if (!READ_ONCE(queue->running)) { in dequeue_request()
115 static void wait_for_request(struct uds_request_queue *queue, bool dormant, in wait_for_request() argument
[all …]
/linux-6.12.1/drivers/misc/genwqe/
Dcard_ddcb.c82 static int queue_empty(struct ddcb_queue *queue) in queue_empty() argument
84 return queue->ddcb_next == queue->ddcb_act; in queue_empty()
87 static int queue_enqueued_ddcbs(struct ddcb_queue *queue) in queue_enqueued_ddcbs() argument
89 if (queue->ddcb_next >= queue->ddcb_act) in queue_enqueued_ddcbs()
90 return queue->ddcb_next - queue->ddcb_act; in queue_enqueued_ddcbs()
92 return queue->ddcb_max - (queue->ddcb_act - queue->ddcb_next); in queue_enqueued_ddcbs()
95 static int queue_free_ddcbs(struct ddcb_queue *queue) in queue_free_ddcbs() argument
97 int free_ddcbs = queue->ddcb_max - queue_enqueued_ddcbs(queue) - 1; in queue_free_ddcbs()
163 static void print_ddcb_info(struct genwqe_dev *cd, struct ddcb_queue *queue) in print_ddcb_info() argument
174 cd->card_idx, queue->ddcb_act, queue->ddcb_next); in print_ddcb_info()
[all …]
/linux-6.12.1/drivers/nvme/host/
Dtcp.c105 struct nvme_tcp_queue *queue; member
205 static int nvme_tcp_try_send(struct nvme_tcp_queue *queue);
212 static inline int nvme_tcp_queue_id(struct nvme_tcp_queue *queue) in nvme_tcp_queue_id() argument
214 return queue - queue->ctrl->queues; in nvme_tcp_queue_id()
220 static inline bool nvme_tcp_queue_tls(struct nvme_tcp_queue *queue) in nvme_tcp_queue_tls() argument
225 return queue->tls_enabled; in nvme_tcp_queue_tls()
239 static inline struct blk_mq_tags *nvme_tcp_tagset(struct nvme_tcp_queue *queue) in nvme_tcp_tagset() argument
241 u32 queue_idx = nvme_tcp_queue_id(queue); in nvme_tcp_tagset()
244 return queue->ctrl->admin_tag_set.tags[queue_idx]; in nvme_tcp_tagset()
245 return queue->ctrl->tag_set.tags[queue_idx - 1]; in nvme_tcp_tagset()
[all …]
/linux-6.12.1/drivers/crypto/cavium/zip/
Dzip_device.c57 static inline u32 zip_cmd_queue_consumed(struct zip_device *zip_dev, int queue) in zip_cmd_queue_consumed() argument
59 return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) * in zip_cmd_queue_consumed()
81 u32 queue = 0; in zip_load_instr() local
91 queue = 0; in zip_load_instr()
93 queue = 1; in zip_load_instr()
95 zip_dbg("CPU Core: %d Queue number:%d", raw_smp_processor_id(), queue); in zip_load_instr()
98 spin_lock(&zip_dev->iq[queue].lock); in zip_load_instr()
109 zip_dbg("sw_head : %lx", zip_dev->iq[queue].sw_head); in zip_load_instr()
110 zip_dbg("sw_tail : %lx", zip_dev->iq[queue].sw_tail); in zip_load_instr()
112 consumed = zip_cmd_queue_consumed(zip_dev, queue); in zip_load_instr()
[all …]
/linux-6.12.1/drivers/net/
Dxen-netfront.c218 static struct sk_buff *xennet_get_rx_skb(struct netfront_queue *queue, in xennet_get_rx_skb() argument
222 struct sk_buff *skb = queue->rx_skbs[i]; in xennet_get_rx_skb()
223 queue->rx_skbs[i] = NULL; in xennet_get_rx_skb()
227 static grant_ref_t xennet_get_rx_ref(struct netfront_queue *queue, in xennet_get_rx_ref() argument
231 grant_ref_t ref = queue->grant_rx_ref[i]; in xennet_get_rx_ref()
232 queue->grant_rx_ref[i] = INVALID_GRANT_REF; in xennet_get_rx_ref()
248 struct netfront_queue *queue = from_timer(queue, t, rx_refill_timer); in rx_refill_timeout() local
249 napi_schedule(&queue->napi); in rx_refill_timeout()
252 static int netfront_tx_slot_available(struct netfront_queue *queue) in netfront_tx_slot_available() argument
254 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < in netfront_tx_slot_available()
[all …]
/linux-6.12.1/drivers/net/wireless/ralink/rt2x00/
Drt2x00queue.c25 struct data_queue *queue = entry->queue; in rt2x00queue_alloc_rxskb() local
26 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2x00queue_alloc_rxskb()
37 frame_size = queue->data_size + queue->desc_size + queue->winfo_size; in rt2x00queue_alloc_rxskb()
95 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_map_txskb()
112 struct device *dev = entry->queue->rt2x00dev->dev; in rt2x00queue_unmap_skb()
488 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00queue_write_tx_data()
500 entry->queue->qid, DRV_PROJECT); in rt2x00queue_write_tx_data()
529 struct data_queue *queue = entry->queue; in rt2x00queue_write_tx_descriptor() local
531 queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc); in rt2x00queue_write_tx_descriptor()
537 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry); in rt2x00queue_write_tx_descriptor()
[all …]
/linux-6.12.1/drivers/net/wireguard/
Dqueueing.c25 int wg_packet_queue_init(struct crypt_queue *queue, work_func_t function, in wg_packet_queue_init() argument
30 memset(queue, 0, sizeof(*queue)); in wg_packet_queue_init()
31 queue->last_cpu = -1; in wg_packet_queue_init()
32 ret = ptr_ring_init(&queue->ring, len, GFP_KERNEL); in wg_packet_queue_init()
35 queue->worker = wg_packet_percpu_multicore_worker_alloc(function, queue); in wg_packet_queue_init()
36 if (!queue->worker) { in wg_packet_queue_init()
37 ptr_ring_cleanup(&queue->ring, NULL); in wg_packet_queue_init()
43 void wg_packet_queue_free(struct crypt_queue *queue, bool purge) in wg_packet_queue_free() argument
45 free_percpu(queue->worker); in wg_packet_queue_free()
46 WARN_ON(!purge && !__ptr_ring_empty(&queue->ring)); in wg_packet_queue_free()
[all …]
/linux-6.12.1/drivers/gpu/drm/imagination/
Dpvr_queue.c116 pvr_context_put(fence->queue->ctx); in pvr_queue_fence_release()
125 switch (fence->queue->type) { in pvr_queue_job_fence_get_timeline_name()
148 switch (fence->queue->type) { in pvr_queue_cccb_fence_get_timeline_name()
263 struct pvr_queue *queue, in pvr_queue_fence_init() argument
269 pvr_context_get(queue->ctx); in pvr_queue_fence_init()
270 fence->queue = queue; in pvr_queue_fence_init()
287 pvr_queue_cccb_fence_init(struct dma_fence *fence, struct pvr_queue *queue) in pvr_queue_cccb_fence_init() argument
289 pvr_queue_fence_init(fence, queue, &pvr_queue_cccb_fence_ops, in pvr_queue_cccb_fence_init()
290 &queue->cccb_fence_ctx.base); in pvr_queue_cccb_fence_init()
305 pvr_queue_job_fence_init(struct dma_fence *fence, struct pvr_queue *queue) in pvr_queue_job_fence_init() argument
[all …]
/linux-6.12.1/drivers/scsi/arm/
Dqueue.c59 int queue_initialise (Queue_t *queue) in queue_initialise() argument
64 spin_lock_init(&queue->queue_lock); in queue_initialise()
65 INIT_LIST_HEAD(&queue->head); in queue_initialise()
66 INIT_LIST_HEAD(&queue->free); in queue_initialise()
74 queue->alloc = q = kmalloc_array(nqueues, sizeof(QE_t), GFP_KERNEL); in queue_initialise()
79 list_add(&q->list, &queue->free); in queue_initialise()
83 return queue->alloc != NULL; in queue_initialise()
91 void queue_free (Queue_t *queue) in queue_free() argument
93 if (!list_empty(&queue->head)) in queue_free()
94 printk(KERN_WARNING "freeing non-empty queue %p\n", queue); in queue_free()
[all …]
/linux-6.12.1/drivers/net/ethernet/ibm/ehea/
Dehea_qmr.h196 static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) in hw_qeit_calc() argument
200 if (q_offset >= queue->queue_length) in hw_qeit_calc()
201 q_offset -= queue->queue_length; in hw_qeit_calc()
202 current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT]; in hw_qeit_calc()
206 static inline void *hw_qeit_get(struct hw_queue *queue) in hw_qeit_get() argument
208 return hw_qeit_calc(queue, queue->current_q_offset); in hw_qeit_get()
211 static inline void hw_qeit_inc(struct hw_queue *queue) in hw_qeit_inc() argument
213 queue->current_q_offset += queue->qe_size; in hw_qeit_inc()
214 if (queue->current_q_offset >= queue->queue_length) { in hw_qeit_inc()
215 queue->current_q_offset = 0; in hw_qeit_inc()
[all …]
/linux-6.12.1/drivers/crypto/hisilicon/sec/
Dsec_drv.c227 static int sec_queue_map_io(struct sec_queue *queue) in sec_queue_map_io() argument
229 struct device *dev = queue->dev_info->dev; in sec_queue_map_io()
234 2 + queue->queue_id); in sec_queue_map_io()
237 queue->queue_id); in sec_queue_map_io()
240 queue->regs = ioremap(res->start, resource_size(res)); in sec_queue_map_io()
241 if (!queue->regs) in sec_queue_map_io()
247 static void sec_queue_unmap_io(struct sec_queue *queue) in sec_queue_unmap_io() argument
249 iounmap(queue->regs); in sec_queue_unmap_io()
252 static int sec_queue_ar_pkgattr(struct sec_queue *queue, u32 ar_pkg) in sec_queue_ar_pkgattr() argument
254 void __iomem *addr = queue->regs + SEC_Q_ARUSER_CFG_REG; in sec_queue_ar_pkgattr()
[all …]
/linux-6.12.1/drivers/soc/ixp4xx/
Dixp4xx-qmgr.c29 void qmgr_put_entry(unsigned int queue, u32 val) in qmgr_put_entry() argument
32 BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */ in qmgr_put_entry()
35 qmgr_queue_descs[queue], queue, val); in qmgr_put_entry()
37 __raw_writel(val, &qmgr_regs->acc[queue][0]); in qmgr_put_entry()
40 u32 qmgr_get_entry(unsigned int queue) in qmgr_get_entry() argument
43 val = __raw_readl(&qmgr_regs->acc[queue][0]); in qmgr_get_entry()
45 BUG_ON(!qmgr_queue_descs[queue]); /* not yet requested */ in qmgr_get_entry()
48 qmgr_queue_descs[queue], queue, val); in qmgr_get_entry()
53 static int __qmgr_get_stat1(unsigned int queue) in __qmgr_get_stat1() argument
55 return (__raw_readl(&qmgr_regs->stat1[queue >> 3]) in __qmgr_get_stat1()
[all …]
/linux-6.12.1/net/sunrpc/
Dsched.c95 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) in __rpc_disable_timer() argument
101 if (list_empty(&queue->timer_list.list)) in __rpc_disable_timer()
102 cancel_delayed_work(&queue->timer_list.dwork); in __rpc_disable_timer()
106 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) in rpc_set_queue_timer() argument
109 queue->timer_list.expires = expires; in rpc_set_queue_timer()
114 mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires); in rpc_set_queue_timer()
121 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, in __rpc_add_timer() argument
125 if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires)) in __rpc_add_timer()
126 rpc_set_queue_timer(queue, timeout); in __rpc_add_timer()
127 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); in __rpc_add_timer()
[all …]

12345678910>>...72