/linux-6.12.1/include/trace/events/ |
D | page_pool.h | 17 s32 inflight, u32 hold, u32 release), 19 TP_ARGS(pool, inflight, hold, release), 23 __field(s32, inflight) 31 __entry->inflight = inflight; 38 __entry->pool, __entry->inflight, __entry->hold,
|
D | wbt.h | 132 int step, unsigned int inflight), 134 TP_ARGS(bdi, status, step, inflight), 140 __field(unsigned int, inflight) 148 __entry->inflight = inflight; 152 __entry->status, __entry->step, __entry->inflight)
|
/linux-6.12.1/block/ |
D | genhd.c | 123 unsigned int inflight = 0; in part_in_flight() local 127 inflight += part_stat_local_read_cpu(part, in_flight[0], cpu) + in part_in_flight() 130 if ((int)inflight < 0) in part_in_flight() 131 inflight = 0; in part_in_flight() 133 return inflight; in part_in_flight() 137 unsigned int inflight[2]) in part_in_flight_rw() 141 inflight[0] = 0; in part_in_flight_rw() 142 inflight[1] = 0; in part_in_flight_rw() 144 inflight[0] += part_stat_local_read_cpu(part, in_flight[0], cpu); in part_in_flight_rw() 145 inflight[1] += part_stat_local_read_cpu(part, in_flight[1], cpu); in part_in_flight_rw() [all …]
|
D | blk-wbt.c | 198 int inflight, limit; in wbt_rqw_done() local 200 inflight = atomic_dec_return(&rqw->inflight); in wbt_rqw_done() 218 if (inflight && inflight >= limit) in wbt_rqw_done() 222 int diff = limit - inflight; in wbt_rqw_done() 224 if (!inflight || diff >= rwb->wb_background / 2) in wbt_rqw_done() 291 ret += atomic_read(&rwb->rq_wait[i].inflight); in wbt_inflight() 425 unsigned int inflight = wbt_inflight(rwb); in wb_timer_fn() local 433 trace_wbt_timer(rwb->rqos.disk->bdi, status, rqd->scale_step, inflight); in wb_timer_fn() 475 if (rqd->scale_step || inflight) in wb_timer_fn() 823 atomic_read(&rwb->rq_wait[i].inflight)); in wbt_inflight_show()
|
D | blk-rq-qos.h | 24 atomic_t inflight; member 84 atomic_set(&rq_wait->inflight, 0); in rq_wait_init()
|
D | blk-iolatency.c | 276 atomic_dec(&rqw->inflight); in iolat_cleanup_cb() 305 atomic_inc(&rqw->inflight); in __blkcg_iolatency_throttle() 599 int inflight = 0; in blkcg_iolatency_done_bio() local 621 inflight = atomic_dec_return(&rqw->inflight); in blkcg_iolatency_done_bio() 622 WARN_ON_ONCE(inflight < 0); in blkcg_iolatency_done_bio()
|
/linux-6.12.1/drivers/firmware/arm_scmi/transports/ |
D | smc.c | 66 atomic_t inflight; member 98 atomic_set(&scmi_info->inflight, INFLIGHT_NONE); in smc_channel_lock_init() 103 static bool smc_xfer_inflight(struct scmi_xfer *xfer, atomic_t *inflight) in smc_xfer_inflight() argument 107 ret = atomic_cmpxchg(inflight, INFLIGHT_NONE, xfer->hdr.seq); in smc_xfer_inflight() 117 spin_until_cond(smc_xfer_inflight(xfer, &scmi_info->inflight)); in smc_channel_lock_acquire() 125 atomic_set(&scmi_info->inflight, INFLIGHT_NONE); in smc_channel_lock_release()
|
/linux-6.12.1/net/core/ |
D | page_pool.c | 611 s32 inflight; in page_pool_inflight() local 613 inflight = _distance(hold_cnt, release_cnt); in page_pool_inflight() 616 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt); in page_pool_inflight() 617 WARN(inflight < 0, "Negative(%d) inflight packet-pages", in page_pool_inflight() 618 inflight); in page_pool_inflight() 620 inflight = max(0, inflight); in page_pool_inflight() 623 return inflight; in page_pool_inflight() 1051 int inflight; in page_pool_release() local 1054 inflight = page_pool_inflight(pool, true); in page_pool_release() 1055 if (!inflight) in page_pool_release() [all …]
|
D | page_pool_user.c | 218 size_t inflight, refsz; in page_pool_nl_fill() local 236 inflight = page_pool_inflight(pool, false); in page_pool_nl_fill() 238 if (nla_put_uint(rsp, NETDEV_A_PAGE_POOL_INFLIGHT, inflight) || in page_pool_nl_fill() 240 inflight * refsz)) in page_pool_nl_fill()
|
/linux-6.12.1/drivers/crypto/chelsio/ |
D | chcr_core.c | 56 if (atomic_read(&dev->inflight)) { in detach_work_fn() 60 atomic_read(&dev->inflight)); in detach_work_fn() 65 atomic_read(&dev->inflight)); in detach_work_fn() 103 atomic_set(&dev->inflight, 0); in chcr_dev_add() 122 atomic_set(&dev->inflight, 0); in chcr_dev_init() 232 if (atomic_read(&dev->inflight) != 0) { in chcr_detach_device()
|
D | chcr_core.h | 99 atomic_t inflight; member
|
/linux-6.12.1/tools/testing/selftests/net/af_unix/ |
D | scm_rights.c | 200 int inflight, int receiver) in __send_fd() argument 214 self->fd[inflight * 2], in __send_fd() 215 self->fd[inflight * 2], in __send_fd() 240 #define send_fd(inflight, receiver) \ argument 241 __send_fd(_metadata, self, variant, inflight, receiver)
|
/linux-6.12.1/net/unix/ |
D | garbage.c | 200 struct unix_sock *inflight = unix_get_socket(fpl->fp[j++]); in unix_add_edges() local 203 if (!inflight) in unix_add_edges() 207 edge->predecessor = inflight; in unix_add_edges() 220 fpl->inflight = true; in unix_add_edges() 251 fpl->inflight = false; in unix_del_edges() 300 if (fpl->inflight) in unix_destroy_fpl()
|
/linux-6.12.1/drivers/gpu/drm/i915/gt/ |
D | intel_context_types.h | 91 struct intel_engine_cs *inflight; member 95 __intel_context_inflight(READ_ONCE((ce)->inflight)) 97 __intel_context_inflight_count(READ_ONCE((ce)->inflight))
|
D | intel_execlists_submission.c | 538 old = ce->inflight; in execlists_schedule_in() 541 WRITE_ONCE(ce->inflight, ptr_inc(old)); in execlists_schedule_in() 602 GEM_BUG_ON(ce->inflight != engine); in __execlists_schedule_out() 647 WRITE_ONCE(ce->inflight, NULL); in __execlists_schedule_out() 657 GEM_BUG_ON(!ce->inflight); in execlists_schedule_out() 658 ce->inflight = ptr_dec(ce->inflight); in execlists_schedule_out() 659 if (!__intel_context_inflight_count(ce->inflight)) in execlists_schedule_out() 1000 const struct intel_engine_cs *inflight; in virtual_matches() local 1017 inflight = intel_context_inflight(&ve->context); in virtual_matches() 1018 if (inflight && inflight != engine) in virtual_matches() [all …]
|
D | intel_timeline.c | 423 unsigned long count, ready, inflight; in intel_gt_show_timelines() local 440 inflight = 0; in intel_gt_show_timelines() 449 inflight++; in intel_gt_show_timelines() 454 count, ready, inflight); in intel_gt_show_timelines()
|
/linux-6.12.1/net/atm/ |
D | pppoatm.c | 64 atomic_t inflight; member 139 atomic_dec(&pvcc->inflight); in pppoatm_pop() 244 atomic_inc_not_zero(&pvcc->inflight)) in pppoatm_may_send() 274 atomic_inc_not_zero(&pvcc->inflight)) in pppoatm_may_send() 406 atomic_set(&pvcc->inflight, NONE_INFLIGHT); in pppoatm_assign_vcc()
|
/linux-6.12.1/drivers/infiniband/ulp/rtrs/ |
D | rtrs-clt-stats.c | 102 atomic_read(&stats->inflight), sum.failover_cnt); in rtrs_clt_stats_rdma_to_str() 158 atomic_set(&s->inflight, 0); in rtrs_clt_reset_all_stats() 182 atomic_inc(&stats->inflight); in rtrs_clt_update_all_stats()
|
D | README | 100 corresponding path is disconnected, all the inflight IO are failed over to a 131 inflight IO and for the error code. 149 inflight IO and for the error code. The new rkey is sent back using 171 outstanding inflight IO and the error code. 192 outstanding inflight IO and the error code. The new rkey is sent back using
|
/linux-6.12.1/net/ipv4/ |
D | tcp_bbr.c | 415 u32 inflight; in bbr_inflight() local 417 inflight = bbr_bdp(sk, bw, gain); in bbr_inflight() 418 inflight = bbr_quantization_budget(sk, inflight); in bbr_inflight() 420 return inflight; in bbr_inflight() 562 u32 inflight, bw; in bbr_is_next_cycle_phase() local 570 inflight = bbr_packets_in_net_at_edt(sk, rs->prior_in_flight); in bbr_is_next_cycle_phase() 581 inflight >= bbr_inflight(sk, bw, bbr->pacing_gain)); in bbr_is_next_cycle_phase() 588 inflight <= bbr_inflight(sk, bw, BBR_UNIT); in bbr_is_next_cycle_phase()
|
/linux-6.12.1/drivers/vhost/ |
D | scsi.c | 104 struct vhost_scsi_inflight *inflight; member 219 struct vhost_scsi_inflight *inflight; member 247 struct vhost_scsi_inflight *inflight; in vhost_scsi_done_inflight() local 249 inflight = container_of(kref, struct vhost_scsi_inflight, kref); in vhost_scsi_done_inflight() 250 complete(&inflight->comp); in vhost_scsi_done_inflight() 283 struct vhost_scsi_inflight *inflight; in vhost_scsi_get_inflight() local 287 inflight = &svq->inflights[svq->inflight_idx]; in vhost_scsi_get_inflight() 288 kref_get(&inflight->kref); in vhost_scsi_get_inflight() 290 return inflight; in vhost_scsi_get_inflight() 293 static void vhost_scsi_put_inflight(struct vhost_scsi_inflight *inflight) in vhost_scsi_put_inflight() argument [all …]
|
/linux-6.12.1/io_uring/ |
D | tctx.c | 55 percpu_counter_destroy(&tctx->inflight); in __io_uring_free() 70 ret = percpu_counter_init(&tctx->inflight, 0, GFP_KERNEL); in io_uring_alloc_task_context() 79 percpu_counter_destroy(&tctx->inflight); in io_uring_alloc_task_context()
|
/linux-6.12.1/tools/net/ynl/samples/ |
D | page-pool.c | 51 if (pp->_present.inflight) in count() 52 s->live[l].refs += pp->inflight; in count()
|
/linux-6.12.1/drivers/crypto/cavium/cpt/ |
D | cpt_hw_types.h | 443 u64 inflight:8; member 445 u64 inflight:8;
|
/linux-6.12.1/net/sctp/ |
D | output.c | 675 size_t datasize, rwnd, inflight, flight_size; in sctp_packet_can_append_data() local 694 inflight = q->outstanding_bytes; in sctp_packet_can_append_data() 699 if (datasize > rwnd && inflight > 0) in sctp_packet_can_append_data() 727 if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) && in sctp_packet_can_append_data()
|