Searched refs:atomic_sub_return (Results 1 – 25 of 50) sorted by relevance
12
/linux-6.12.1/drivers/scsi/elx/efct/ |
D | efct_scsi.c | 436 atomic_sub_return(1, &xport->io_pending_count); in efct_scsi_dispatch_pending() 469 atomic_sub_return(1, &xport->io_pending_recursing); in efct_scsi_check_pending() 477 atomic_sub_return(1, &xport->io_pending_recursing); in efct_scsi_check_pending() 497 atomic_sub_return(1, &xport->io_pending_count); in efct_scsi_check_pending() 513 atomic_sub_return(1, &xport->io_pending_recursing); in efct_scsi_check_pending()
|
D | efct_io.c | 166 atomic_sub_return(1, &efct->xport->io_active_count); in efct_io_pool_io_free()
|
D | efct_lio.c | 336 atomic_sub_return(1, &efct->tgt_efct.ios_in_use); in efct_lio_release_cmd() 1310 ini_count = atomic_sub_return(1, &efct->tgt_efct.initiator_count); in efct_scsi_del_initiator()
|
/linux-6.12.1/drivers/crypto/virtio/ |
D | virtio_crypto_mgr.c | 152 if (atomic_sub_return(1, &vcrypto_dev->ref_count) == 0) in virtcrypto_dev_put()
|
/linux-6.12.1/arch/arm64/include/asm/ |
D | atomic.h | 53 ATOMIC_FETCH_OPS(atomic_sub_return)
|
/linux-6.12.1/fs/bcachefs/ |
D | nocow_locking.c | 33 int v = atomic_sub_return(lock_val, &l->l[i]); in bch2_bucket_nocow_unlock()
|
D | six.c | 186 old = atomic_sub_return(SIX_LOCK_HELD_write, &lock->state); in __do_six_trylock()
|
/linux-6.12.1/include/linux/ |
D | page_ref.h | 144 int ret = atomic_sub_return(nr, &folio->_refcount); in folio_ref_sub_return()
|
/linux-6.12.1/tools/memory-model/ |
D | linux-kernel.def | 89 atomic_sub_return(V,X) __atomic_op_return{mb}(X,-,V)
|
/linux-6.12.1/drivers/crypto/intel/qat/qat_common/ |
D | adf_dev_mgr.c | 421 if (atomic_sub_return(1, &accel_dev->ref_count) == 0) in adf_dev_put()
|
/linux-6.12.1/io_uring/ |
D | waitid.c | 212 if (!atomic_sub_return(1, &iw->refs)) in io_waitid_drop_issue_ref()
|
D | poll.c | 348 } while (atomic_sub_return(v, &req->poll_refs) & IO_POLL_REF_MASK); in io_poll_check_events()
|
/linux-6.12.1/drivers/tty/ |
D | tty_buffer.c | 203 WARN_ON(atomic_sub_return(b->size, &buf->mem_used) < 0); in tty_buffer_free()
|
/linux-6.12.1/drivers/gpu/drm/i915/gt/ |
D | gen6_ppgtt.c | 93 if (!atomic_sub_return(count, &pt->used)) in gen6_ppgtt_clear_range()
|
/linux-6.12.1/drivers/s390/cio/ |
D | qdio_main.c | 467 if (atomic_sub_return(count, &q->nr_buf_used) == 0) in get_inbound_buffer_frontier() 479 if (atomic_sub_return(count, &q->nr_buf_used) == 0) in get_inbound_buffer_frontier()
|
/linux-6.12.1/net/9p/ |
D | trans_rdma.c | 437 if ((atomic_sub_return(1, &rdma->excess_rc) >= 0)) { in rdma_request()
|
/linux-6.12.1/drivers/net/wwan/t7xx/ |
D | t7xx_hif_dpmaif_tx.c | 311 if (atomic_sub_return(send_cnt, &txq->tx_budget) <= (MAX_SKB_FRAGS + 2)) in t7xx_dpmaif_add_skb_to_ring()
|
/linux-6.12.1/drivers/w1/ |
D | w1_netlink.c | 64 if (atomic_sub_return(1, &block->refcnt) == 0) { in w1_unref_block()
|
/linux-6.12.1/net/vmw_vsock/ |
D | virtio_transport.c | 295 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); in virtio_transport_cancel_pkt()
|
/linux-6.12.1/net/sunrpc/xprtrdma/ |
D | svc_rdma_sendto.c | 357 if (atomic_sub_return(sqecount, &rdma->sc_sq_avail) < 0) { in svc_rdma_post_send()
|
D | svc_rdma_rw.c | 388 if (atomic_sub_return(cc->cc_sqecount, in svc_rdma_post_chunk_ctxt()
|
/linux-6.12.1/drivers/vhost/ |
D | vsock.c | 318 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies); in vhost_transport_cancel_pkt()
|
/linux-6.12.1/drivers/net/ethernet/huawei/hinic/ |
D | hinic_hw_wq.c | 750 if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) { in hinic_get_wqe()
|
/linux-6.12.1/net/rds/ |
D | ib_send.c | 231 if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) && in rds_ib_sub_signaled()
|
/linux-6.12.1/fs/gfs2/ |
D | log.c | 543 if (atomic_sub_return(blks, &sdp->sd_log_blks_needed)) in __gfs2_log_reserve()
|
12