/linux-6.12.1/drivers/net/ethernet/huawei/hinic/ |
D | hinic_hw_eqs.c | 33 HINIC_CSR_AEQ_CONS_IDX_ADDR((eq)->q_id) : \ 34 HINIC_CSR_CEQ_CONS_IDX_ADDR((eq)->q_id)) 37 HINIC_CSR_AEQ_PROD_IDX_ADDR((eq)->q_id) : \ 38 HINIC_CSR_CEQ_PROD_IDX_ADDR((eq)->q_id)) 41 HINIC_CSR_AEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num) : \ 42 HINIC_CSR_CEQ_HI_PHYS_ADDR_REG((eq)->q_id, pg_num)) 45 HINIC_CSR_AEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num) : \ 46 HINIC_CSR_CEQ_LO_PHYS_ADDR_REG((eq)->q_id, pg_num)) 80 container_of((eq) - (eq)->q_id, struct hinic_aeqs, aeq[0]) 83 container_of((eq) - (eq)->q_id, struct hinic_ceqs, ceq[0]) [all …]
|
D | hinic_hw_csr.h | 87 #define HINIC_CSR_AEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ argument 88 (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ 91 #define HINIC_CSR_CEQ_HI_PHYS_ADDR_REG(q_id, pg_num) \ argument 92 (HINIC_CSR_CEQ_MTT_OFF(q_id) + \ 95 #define HINIC_CSR_AEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ argument 96 (HINIC_CSR_AEQ_MTT_OFF(q_id) + \ 99 #define HINIC_CSR_CEQ_LO_PHYS_ADDR_REG(q_id, pg_num) \ argument 100 (HINIC_CSR_CEQ_MTT_OFF(q_id) + \
|
D | hinic_hw_io.c | 30 #define CI_ADDR(base_addr, q_id) ((base_addr) + \ argument 31 (q_id) * CI_Q_ADDR_SIZE) 132 base_qpn + qp->q_id); in write_sq_ctxts() 176 base_qpn + qp->q_id); in write_rq_ctxts() 272 struct hinic_qp *qp, int q_id, in init_qp() argument 281 qp->q_id = q_id; in init_qp() 283 err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->sq_wq[q_id], in init_qp() 291 err = hinic_wq_allocate(&func_to_io->wqs, &func_to_io->rq_wq[q_id], in init_qp() 306 func_to_io->sq_db[q_id] = db_base; in init_qp() 308 qp->sq.qid = q_id; in init_qp() [all …]
|
D | hinic_tx.c | 494 u16 prod_idx, q_id = skb->queue_mapping; in hinic_lb_xmit_frame() local 502 txq = &nic_dev->txqs[q_id]; in hinic_lb_xmit_frame() 514 netif_stop_subqueue(netdev, qp->q_id); in hinic_lb_xmit_frame() 518 netif_wake_subqueue(nic_dev->netdev, qp->q_id); in hinic_lb_xmit_frame() 537 netdev_txq = netdev_get_tx_queue(netdev, q_id); in hinic_lb_xmit_frame() 555 u16 prod_idx, q_id = skb->queue_mapping; in hinic_xmit_frame() local 563 txq = &nic_dev->txqs[q_id]; in hinic_xmit_frame() 595 netif_stop_subqueue(netdev, qp->q_id); in hinic_xmit_frame() 602 netif_wake_subqueue(nic_dev->netdev, qp->q_id); in hinic_xmit_frame() 626 netdev_txq = netdev_get_tx_queue(netdev, q_id); in hinic_xmit_frame() [all …]
|
D | hinic_rx.c | 421 skb_record_rx_queue(skb, qp->q_id); in rxq_recv() 532 intr_coal = &nic_dev->rx_intr_coalesce[qp->q_id]; in rx_request_irq() 549 cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask); in rx_request_irq() 594 "%s_rxq%d", netdev->name, qp->q_id); in hinic_init_rxq()
|
D | hinic_main.c | 818 u16 num_sqs, q_id; in hinic_tx_timeout() local 824 for (q_id = 0; q_id < num_sqs; q_id++) { in hinic_tx_timeout() 825 if (!netif_xmit_stopped(netdev_get_tx_queue(netdev, q_id))) in hinic_tx_timeout() 828 sq = hinic_hwdev_get_sq(nic_dev->hwdev, q_id); in hinic_tx_timeout() 833 q_id, sw_pi, hw_ci, sw_ci, in hinic_tx_timeout() 834 nic_dev->txqs[q_id].napi.state); in hinic_tx_timeout()
|
D | hinic_hw_qp.c | 42 #define SQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \ argument 43 (((max_rqs) + (max_sqs)) * CTXT_RSVD + (q_id) * Q_CTXT_SIZE) 45 #define RQ_CTXT_OFFSET(max_sqs, max_rqs, q_id) \ argument 47 (max_sqs + (q_id)) * Q_CTXT_SIZE) 625 HINIC_SQ_DB_INFO_SET(qp->q_id, QID)); in sq_prepare_db()
|
/linux-6.12.1/fs/xfs/scrub/ |
D | quotacheck.c | 44 xfs_dqid_t q_id; member 188 xfs_dqid_t q_id) in xqcheck_get_dqtrx() argument 195 dqa->dqtrx[i].q_id == q_id)) in xqcheck_get_dqtrx() 271 dqtrx = xqcheck_get_dqtrx(dqa, p->q_type, p->q_id); in xqcheck_mod_live_ino_dqtrx() 276 dqtrx->q_id = p->q_id; in xqcheck_mod_live_ino_dqtrx() 354 dqtrx = xqcheck_get_dqtrx(dqa, p->q_type, p->q_id); in xqcheck_apply_live_dqtrx() 360 error = xqcheck_update_incore_counts(xqc, counts, p->q_id, in xqcheck_apply_live_dqtrx() 566 error = xfarray_load_sparse(counts, dq->q_id, &xcdq); in xqcheck_compare_dquot() 571 xchk_qcheck_set_corrupt(xqc->sc, dqtype, dq->q_id); in xqcheck_compare_dquot() 574 xchk_qcheck_set_corrupt(xqc->sc, dqtype, dq->q_id); in xqcheck_compare_dquot() [all …]
|
D | quotacheck_repair.c | 70 error = xfarray_load_sparse(counts, dq->q_id, &xcdq); in xqcheck_commit_dquot() 97 error = xfarray_store(counts, dq->q_id, &xcdq); in xqcheck_commit_dquot() 111 trace_xrep_quotacheck_dquot(xqc->sc->mp, dq->q_type, dq->q_id); in xqcheck_commit_dquot() 115 if (dq->q_id) in xqcheck_commit_dquot()
|
D | quota.c | 169 offset = dq->q_id / qi->qi_dqperchunk; in xchk_quota_item() 170 if (dq->q_id && dq->q_id <= sqi->last_id) in xchk_quota_item() 173 sqi->last_id = dq->q_id; in xchk_quota_item() 228 if (dq->q_id == 0) in xchk_quota_item()
|
D | quota_repair.c | 82 trace_xrep_dquot_item_fill_bmap_hole(sc->mp, dq->q_type, dq->q_id); in xrep_quota_item_fill_bmap_hole() 91 xfs_qm_init_dquot_blk(sc->tp, dq->q_id, dq->q_type, bp); in xrep_quota_item_fill_bmap_hole() 114 xfs_fileoff_t offset = dq->q_id / qi->qi_dqperchunk; in xrep_quota_item_bmap() 251 trace_xrep_dquot_item(sc->mp, dq->q_type, dq->q_id); in xrep_quota_item() 255 if (dq->q_id) { in xrep_quota_item()
|
D | dqiterate.c | 158 *next_incore_id = dq->q_id; in xchk_dquot_iter_advance_incore() 208 cursor->id = dq->q_id + 1; in xchk_dquot_iter()
|
/linux-6.12.1/tools/cgroup/ |
D | iocost_monitor.py | 64 def __init__(self, root_blkcg, q_id, include_dying=False): argument 67 self.walk(root_blkcg, q_id, '') 72 def walk(self, blkcg, q_id, parent_path): argument 80 address=radix_tree_lookup(blkcg.blkg_tree.address_of_(), q_id)) 88 self.walk(c, q_id, path) 224 q_id = None variable 232 q_id = blkg.q.id.value_() variable 258 for path, blkg in BlkgIterator(blkcg_root, q_id):
|
/linux-6.12.1/drivers/block/ |
D | ublk_drv.c | 132 int q_id; member 662 static inline char *ublk_queue_cmd_buf(struct ublk_device *ub, int q_id) in ublk_queue_cmd_buf() argument 664 return ublk_get_queue(ub, q_id)->io_cmd_buf; in ublk_queue_cmd_buf() 667 static inline int ublk_queue_cmd_buf_size(struct ublk_device *ub, int q_id) in ublk_queue_cmd_buf_size() argument 669 struct ublk_queue *ubq = ublk_get_queue(ub, q_id); in ublk_queue_cmd_buf_size() 1109 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags, in __ublk_rq_task_work() 1135 __func__, io->cmd->cmd_op, ubq->q_id, in __ublk_rq_task_work() 1149 __func__, io->cmd->cmd_op, ubq->q_id, req->tag, io->flags, in __ublk_rq_task_work() 1327 int q_id, ret = 0; in ublk_ch_mmap() local 1346 q_id = (phys_off - UBLKSRV_CMD_BUF_OFFSET) / max_sz; in ublk_ch_mmap() [all …]
|
/linux-6.12.1/fs/xfs/ |
D | xfs_dquot.c | 101 ASSERT(dq->q_id); in xfs_qm_adjust_dqlimits() 188 ASSERT(dq->q_id); in xfs_qm_adjust_dqtimers() 376 xfs_qm_init_dquot_blk(tp, dqp->q_id, qtype, bp); in xfs_dquot_disk_alloc() 500 dqp->q_id = id; in xfs_dquot_alloc() 557 if (be32_to_cpu(ddqp->d_id) != dqp->q_id) in xfs_dquot_check_type() 566 dqp_type == XFS_DQTYPE_USER || dqp->q_id != 0) in xfs_dquot_check_type() 597 __this_address, dqp->q_id); in xfs_dquot_from_disk() 642 ddqp->d_id = cpu_to_be32(dqp->q_id); in xfs_dquot_to_disk() 1209 if (dqp->q_id == 0) in xfs_qm_dqflush_check() 1228 if (dqp->q_id == 0) in xfs_qm_dqflush_check() [all …]
|
D | xfs_trans_dquot.c | 61 if (dqp->q_id != 0 && in xfs_trans_log_dquot() 166 .q_id = dqp->q_id, in xfs_trans_mod_ino_dquot() 451 .q_id = dqp->q_id, in xfs_trans_apply_dquot_deltas_hook() 565 if (dqp->q_id) { in xfs_trans_apply_dquot_deltas() 620 .q_id = dqp->q_id, in xfs_trans_unreserve_and_mod_dquots_hook() 723 quota_send_warning(make_kqid(&init_user_ns, qtype, dqp->q_id), in xfs_quota_warn() 810 if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_id && in xfs_trans_dqresv()
|
D | xfs_dquot_item.c | 57 qlf->qlf_id = qlip->qli_dquot->q_id; in xfs_qm_dquot_logitem_format() 209 fa = xfs_dquot_verify(mp, &ddq, dqp->q_id); in xfs_qm_dquot_logitem_precommit() 215 fa, dqp->q_id); in xfs_qm_dquot_logitem_precommit()
|
/linux-6.12.1/drivers/net/ethernet/intel/ice/ |
D | ice_base.c | 826 u16 q_id, q_base; in ice_vsi_map_rings_to_vectors() local 836 for (q_id = q_base; q_id < (q_base + tx_rings_per_v); q_id++) { in ice_vsi_map_rings_to_vectors() 837 struct ice_tx_ring *tx_ring = vsi->tx_rings[q_id]; in ice_vsi_map_rings_to_vectors() 853 for (q_id = q_base; q_id < (q_base + rx_rings_per_v); q_id++) { in ice_vsi_map_rings_to_vectors() 854 struct ice_rx_ring *rx_ring = vsi->rx_rings[q_id]; in ice_vsi_map_rings_to_vectors() 1154 &txq_meta->q_id, &txq_meta->q_teid, rst_src, in ice_vsi_stop_tx_ring() 1196 txq_meta->q_id = ring->reg_idx; in ice_fill_txq_meta()
|
D | ice_idc.c | 119 u16 q_id; in ice_del_rdma_qset() local 130 q_id = qset->qs_handle; in ice_del_rdma_qset() 135 return ice_dis_vsi_rdma_qset(vsi->port_info, 1, &teid, &q_id); in ice_del_rdma_qset()
|
/linux-6.12.1/drivers/net/wireless/intel/iwlwifi/pcie/ |
D | tx.c | 94 txq_id != trans_pcie->txqs.cmd.q_id && in iwl_pcie_txq_inc_wr_ptr() 397 if (txq_id != trans_pcie->txqs.cmd.q_id) { in iwl_pcie_txq_unmap() 411 txq_id == trans_pcie->txqs.cmd.q_id) in iwl_pcie_txq_unmap() 449 if (txq_id == trans_pcie->txqs.cmd.q_id) in iwl_pcie_txq_free() 513 iwl_trans_ac_txq_enable(trans, trans_pcie->txqs.cmd.q_id, in iwl_pcie_tx_start() 851 bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id); in iwl_pcie_tx_alloc() 968 bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id); in iwl_pcie_tx_init() 1167 if (txq_id == trans_pcie->txqs.cmd.q_id && in iwl_trans_pcie_txq_enable() 1175 if (txq_id != trans_pcie->txqs.cmd.q_id) in iwl_trans_pcie_txq_enable() 1245 if (txq_id == trans_pcie->txqs.cmd.q_id && in iwl_trans_pcie_txq_enable() [all …]
|
/linux-6.12.1/net/core/ |
D | netdev-genl.c | 341 static int netdev_nl_queue_validate(struct net_device *netdev, u32 q_id, in netdev_nl_queue_validate() argument 346 if (q_id >= netdev->real_num_rx_queues) in netdev_nl_queue_validate() 350 if (q_id >= netdev->real_num_tx_queues) in netdev_nl_queue_validate() 374 u32 q_id, q_type, ifindex; in netdev_nl_queue_get_doit() local 384 q_id = nla_get_u32(info->attrs[NETDEV_A_QUEUE_ID]); in netdev_nl_queue_get_doit() 396 err = netdev_nl_queue_fill(rsp, netdev, q_id, q_type, info); in netdev_nl_queue_get_doit()
|
/linux-6.12.1/drivers/net/ethernet/intel/idpf/ |
D | idpf_virtchnl.c | 1456 cpu_to_le32(tx_qgrp->txqs[j]->q_id); in idpf_send_config_tx_queues_msg() 1469 cpu_to_le16(tx_qgrp->complq->q_id); in idpf_send_config_tx_queues_msg() 1487 qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id); in idpf_send_config_tx_queues_msg() 1581 qi[k].queue_id = cpu_to_le32(bufq->q_id); in idpf_send_config_rx_queues_msg() 1620 qi[k].rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id); in idpf_send_config_rx_queues_msg() 1624 cpu_to_le16(sets[1].bufq.q_id); in idpf_send_config_rx_queues_msg() 1641 qi[k].queue_id = cpu_to_le32(rxq->q_id); in idpf_send_config_rx_queues_msg() 1731 qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); in idpf_send_ena_dis_queues_msg() 1745 qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id); in idpf_send_ena_dis_queues_msg() 1763 cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id); in idpf_send_ena_dis_queues_msg() [all …]
|
D | idpf_txrx.h | 564 u32 q_id; member 688 u32 q_id; member 745 u32 q_id; member 798 u32 q_id; member
|
/linux-6.12.1/drivers/crypto/hisilicon/sec/ |
D | sec_drv.c | 688 int q_id; in sec_isr_handle() local 693 q_id = outorder_msg->data & SEC_OUT_BD_INFO_Q_ID_M; in sec_isr_handle() 694 msg = msg_ring->vaddr + q_id; in sec_isr_handle() 701 set_bit(q_id, queue->unprocessed); in sec_isr_handle() 702 if (q_id == queue->expected) in sec_isr_handle() 719 q_id = outorder_msg->data & SEC_OUT_BD_INFO_Q_ID_M; in sec_isr_handle() 720 msg = msg_ring->vaddr + q_id; in sec_isr_handle()
|
/linux-6.12.1/drivers/net/ethernet/hisilicon/hns3/hns3pf/ |
D | hclge_tm.h | 266 int hclge_tm_get_q_to_qs_map(struct hclge_dev *hdev, u16 q_id, u16 *qset_id); 267 int hclge_tm_get_q_to_tc(struct hclge_dev *hdev, u16 q_id, u8 *tc_id);
|