/linux-6.12.1/tools/testing/selftests/kvm/lib/x86_64/ |
D | hyperv.c | 32 int i, nent = 0; in vcpu_set_hv_cpuid() local 38 cpuid_full = allocate_kvm_cpuid2(cpuid_sys->nent + cpuid_hv->nent); in vcpu_set_hv_cpuid() 45 for (i = 0; i < cpuid_sys->nent; i++) { in vcpu_set_hv_cpuid() 49 cpuid_full->entries[nent] = cpuid_sys->entries[i]; in vcpu_set_hv_cpuid() 50 nent++; in vcpu_set_hv_cpuid() 53 memcpy(&cpuid_full->entries[nent], cpuid_hv->entries, in vcpu_set_hv_cpuid() 54 cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2)); in vcpu_set_hv_cpuid() 55 cpuid_full->nent = nent + cpuid_hv->nent; in vcpu_set_hv_cpuid()
|
D | processor.c | 734 for (i = 0; i < cpuid->nent; i++) { in __kvm_cpu_has() 825 if (vcpu->cpuid && vcpu->cpuid->nent < cpuid->nent) { in vcpu_init_cpuid() 831 vcpu->cpuid = allocate_kvm_cpuid2(cpuid->nent); in vcpu_init_cpuid() 833 memcpy(vcpu->cpuid, cpuid, kvm_cpuid2_size(cpuid->nent)); in vcpu_init_cpuid() 1151 for (i = 0; i < cpuid->nent; i++) { in get_cpuid_entry()
|
/linux-6.12.1/tools/testing/selftests/kvm/x86_64/ |
D | cpuid_test.c | 33 for (i = 0; i < guest_cpuid->nent; i++) { in test_guest_cpuids() 78 TEST_ASSERT(cpuid1->nent == cpuid2->nent, in compare_cpuids() 79 "CPUID nent mismatch: %d vs. %d", cpuid1->nent, cpuid2->nent); in compare_cpuids() 81 for (i = 0; i < cpuid1->nent; i++) { in compare_cpuids() 128 int size = sizeof(*cpuid) + cpuid->nent * sizeof(cpuid->entries[0]); in vcpu_alloc_cpuid() 168 struct kvm_cpuid2 *cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent + 1); in test_get_cpuid2() 172 TEST_ASSERT(cpuid->nent == vcpu->cpuid->nent, in test_get_cpuid2() 174 vcpu->cpuid->nent, cpuid->nent); in test_get_cpuid2() 176 for (i = 0; i < vcpu->cpuid->nent; i++) { in test_get_cpuid2() 177 cpuid->nent = i; in test_get_cpuid2() [all …]
|
D | hyperv_cpuid.c | 51 TEST_ASSERT(hv_cpuid_entries->nent == nent_expected, in test_hv_cpuid() 54 nent_expected, hv_cpuid_entries->nent); in test_hv_cpuid() 56 for (i = 0; i < hv_cpuid_entries->nent; i++) { in test_hv_cpuid() 116 static struct kvm_cpuid2 cpuid = {.nent = 0}; in test_hv_cpuid_e2big()
|
D | hyperv_features.c | 154 prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent); in guest_test_msrs_access() 494 memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent)); in guest_test_msrs_access() 546 prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent); in guest_test_hcalls_access() 663 memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent)); in guest_test_hcalls_access()
|
/linux-6.12.1/arch/x86/kvm/ |
D | cpuid.c | 83 struct kvm_cpuid_entry2 *entries, int nent, u32 function, u64 index) in cpuid_entry2_find() argument 100 for (i = 0; i < nent; i++) { in cpuid_entry2_find() 135 int nent) in kvm_check_cpuid() argument 144 best = cpuid_entry2_find(entries, nent, 0x80000008, in kvm_check_cpuid() 157 best = cpuid_entry2_find(entries, nent, 0xd, 0); in kvm_check_cpuid() 171 int nent) in kvm_cpuid_check_equal() argument 176 if (nent != vcpu->arch.cpuid_nent) in kvm_cpuid_check_equal() 179 for (i = 0; i < nent; i++) { in kvm_cpuid_check_equal() 193 int nent, const char *sig) in __kvm_get_hypervisor_cpuid() argument 200 entry = cpuid_entry2_find(entries, nent, base, KVM_CPUID_INDEX_NOT_SIGNIFICANT); in __kvm_get_hypervisor_cpuid() [all …]
|
/linux-6.12.1/drivers/gpu/drm/nouveau/nvkm/engine/gr/ |
D | gk20a.c | 41 int nent; in gk20a_gr_av_to_init_() local 44 nent = (blob->size / sizeof(struct gk20a_fw_av)); in gk20a_gr_av_to_init_() 45 pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1))); in gk20a_gr_av_to_init_() 52 for (i = 0; i < nent; i++) { in gk20a_gr_av_to_init_() 84 int nent; in gk20a_gr_aiv_to_init() local 87 nent = (blob->size / sizeof(struct gk20a_fw_aiv)); in gk20a_gr_aiv_to_init() 88 pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1))); in gk20a_gr_aiv_to_init() 95 for (i = 0; i < nent; i++) { in gk20a_gr_aiv_to_init() 117 int nent; in gk20a_gr_av_to_method() local 120 nent = (blob->size / sizeof(struct gk20a_fw_av)); in gk20a_gr_av_to_method() [all …]
|
D | ga102.c | 245 int nent; in ga102_gr_av64_to_init() local 248 nent = (blob->size / sizeof(struct netlist_av64)); in ga102_gr_av64_to_init() 249 pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1))); in ga102_gr_av64_to_init() 257 for (i = 0; i < nent; i++) { in ga102_gr_av64_to_init()
|
/linux-6.12.1/drivers/tty/vt/ |
D | conmakehash.c | 82 int i, nuni, nent; in main() local 269 nent = 0; in main() 272 while ( nent >= unicount[fp0] ) in main() 275 nent = 0; in main() 277 printf("0x%04x", unitable[fp0][nent++]); in main()
|
/linux-6.12.1/drivers/infiniband/hw/qib/ |
D | qib_pcie.c | 196 int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent) in qib_pcie_params() argument 214 maxvec = (nent && *nent) ? *nent : 1; in qib_pcie_params() 224 if (nent) in qib_pcie_params() 225 *nent = !dd->pcidev->msix_enabled ? 0 : nvec; in qib_pcie_params()
|
/linux-6.12.1/drivers/infiniband/hw/mthca/ |
D | mthca_allocator.c | 155 int mthca_array_init(struct mthca_array *array, int nent) in mthca_array_init() argument 157 int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_array_init() 173 void mthca_array_cleanup(struct mthca_array *array, int nent) in mthca_array_cleanup() argument 177 for (i = 0; i < (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; ++i) in mthca_array_cleanup()
|
D | mthca_eq.c | 184 mthca_write64(MTHCA_EQ_DB_SET_CI | eq->eqn, ci & (eq->nent - 1), in tavor_set_eq_ci() 230 unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE; in get_eqe() 466 int nent, in mthca_create_eq() argument 479 eq->nent = roundup_pow_of_two(max(nent, 2)); in mthca_create_eq() 480 npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE; in mthca_create_eq() 511 for (i = 0; i < eq->nent; ++i) in mthca_create_eq() 535 eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24); in mthca_create_eq() 560 eq->eqn, eq->nent); in mthca_create_eq() 593 int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / in mthca_free_eq()
|
D | mthca_cq.c | 348 int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent) in mthca_alloc_cq_buf() argument 353 ret = mthca_buf_alloc(dev, nent * MTHCA_CQ_ENTRY_SIZE, in mthca_alloc_cq_buf() 360 for (i = 0; i < nent; ++i) in mthca_alloc_cq_buf() 768 int mthca_init_cq(struct mthca_dev *dev, int nent, in mthca_init_cq() argument 776 cq->ibcq.cqe = nent - 1; in mthca_init_cq() 814 err = mthca_alloc_cq_buf(dev, &cq->buf, nent); in mthca_init_cq() 828 cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); in mthca_init_cq()
|
D | mthca_dev.h | 421 int mthca_array_init(struct mthca_array *array, int nent); 422 void mthca_array_cleanup(struct mthca_array *array, int nent); 487 int mthca_init_cq(struct mthca_dev *dev, int nent, 498 int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent);
|
D | mthca_cmd.c | 664 int nent = 0; in mthca_map_cmd() local 694 pages[nent * 2] = cpu_to_be64(virt); in mthca_map_cmd() 698 pages[nent * 2 + 1] = in mthca_map_cmd() 704 if (++nent == MTHCA_MAILBOX_SIZE / 16) { in mthca_map_cmd() 705 err = mthca_cmd(dev, mailbox->dma, nent, 0, op, in mthca_map_cmd() 709 nent = 0; in mthca_map_cmd() 714 if (nent) in mthca_map_cmd() 715 err = mthca_cmd(dev, mailbox->dma, nent, 0, op, in mthca_map_cmd()
|
/linux-6.12.1/fs/nfs/ |
D | nfs42xattr.c | 73 atomic_long_t nent; member 297 atomic_long_set(&cache->nent, 0); in nfs4_xattr_alloc_cache() 406 atomic_long_set(&cache->nent, 0); in nfs4_xattr_discard_cache() 548 atomic_long_inc(&cache->nent); in nfs4_xattr_hash_add() 577 atomic_long_dec(&cache->nent); in nfs4_xattr_hash_remove() 812 if (atomic_long_read(&cache->nent) > 1) in cache_lru_isolate() 898 atomic_long_dec(&cache->nent); in entry_lru_isolate() 968 atomic_long_set(&cache->nent, 0); in nfs4_xattr_cache_init_once()
|
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx5/core/ |
D | eq.c | 268 u8 log_eq_size = order_base_2(param->nent + MLX5_NUM_SPARE_EQE); in create_map_eq() 670 .nent = MLX5_NUM_CMD_EQE, in create_async_eqs() 683 .nent = async_eq_depth_devlink_param_get(dev), in create_async_eqs() 699 .nent = /* TODO: sriov max_vf + */ 1, in create_async_eqs() 796 u32 nent = eq_get_size(eq); in mlx5_eq_get_eqe() local 799 eqe = get_eqe(eq, ci & (nent - 1)); in mlx5_eq_get_eqe() 800 eqe = ((eqe->owner & 1) ^ !!(ci & nent)) ? NULL : eqe; in mlx5_eq_get_eqe() 998 int nent; in create_comp_eq() local 1012 nent = comp_eq_depth_devlink_param_get(dev); in create_comp_eq() 1029 .nent = nent, in create_comp_eq()
|
/linux-6.12.1/drivers/net/ethernet/mellanox/mlx4/ |
D | eq.c | 110 unsigned long offset = (entry & (eq->nent - 1)) * eqe_size; in get_eqe() 124 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; in next_eqe_sw() 783 eq->cons_index, eqe->owner, eq->nent, in mlx4_eq_int() 786 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); in mlx4_eq_int() 808 eq->cons_index, eqe->owner, eq->nent, in mlx4_eq_int() 810 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); in mlx4_eq_int() 820 eq->cons_index, eqe->owner, eq->nent, in mlx4_eq_int() 823 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); in mlx4_eq_int() 970 static int mlx4_create_eq(struct mlx4_dev *dev, int nent, in mlx4_create_eq() argument 984 eq->nent = roundup_pow_of_two(max(nent, 2)); in mlx4_create_eq() [all …]
|
D | cq.c | 342 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, in mlx4_cq_alloc() argument 382 cpu_to_be32((ilog2(nent) << 24) | in mlx4_cq_alloc() 394 err = mlx4_init_user_cqes(buf_addr, nent, in mlx4_cq_alloc() 399 mlx4_init_kernel_cqes(buf_addr, nent, in mlx4_cq_alloc()
|
/linux-6.12.1/drivers/tty/serial/ |
D | pch_uart.c | 231 int nent; member 743 for (i = 0; i < priv->nent; i++, sg++) in pch_dma_tx_complete() 749 priv->nent = 0; in pch_dma_tx_complete() 855 int nent; in dma_handle_tx() local 931 nent = dma_map_sg(port->dev, sg, num, DMA_TO_DEVICE); in dma_handle_tx() 932 if (!nent) { in dma_handle_tx() 937 priv->nent = nent; in dma_handle_tx() 939 for (i = 0; i < nent; i++, sg++) { in dma_handle_tx() 943 if (i == (nent - 1)) in dma_handle_tx() 950 priv->sg_tx_p, nent, DMA_MEM_TO_DEV, in dma_handle_tx() [all …]
|
/linux-6.12.1/Documentation/translations/zh_CN/mm/ |
D | free_page_reporting.rst | 29 nent-1中。 当页面被报告函数处理时,分配器将无法访问它们。一旦报告函数完成,这些
|
/linux-6.12.1/drivers/iommu/ |
D | omap-iommu.c | 704 int nent = 1; in iopgtable_clear_entry_core() local 718 nent *= 16; in iopgtable_clear_entry_core() 722 bytes *= nent; in iopgtable_clear_entry_core() 723 memset(iopte, 0, nent * sizeof(*iopte)); in iopgtable_clear_entry_core() 725 flush_iopte_range(obj->dev, pt_dma, pt_offset, nent); in iopgtable_clear_entry_core() 736 nent = 1; /* for the next L1 entry */ in iopgtable_clear_entry_core() 740 nent *= 16; in iopgtable_clear_entry_core() 744 bytes *= nent; in iopgtable_clear_entry_core() 746 memset(iopgd, 0, nent * sizeof(*iopgd)); in iopgtable_clear_entry_core() 747 flush_iopte_range(obj->dev, obj->pd_dma, pd_offset, nent); in iopgtable_clear_entry_core()
|
/linux-6.12.1/drivers/infiniband/hw/mlx5/ |
D | cq.c | 77 static u8 sw_ownership_bit(int n, int nent) in sw_ownership_bit() argument 79 return (n & nent) ? 1 : 0; in sw_ownership_bit() 673 int nent, in alloc_cq_frag_buf() argument 682 nent * cqe_size, in alloc_cq_frag_buf() 691 buf->nent = nent; in alloc_cq_frag_buf() 880 for (i = 0; i < buf->nent; i++) { in init_cq_frag_buf() 1247 (i + 1) & cq->resize_buf->nent); in copy_resize_cqes() 1249 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); in copy_resize_cqes()
|
/linux-6.12.1/include/linux/mlx5/ |
D | eq.h | 16 int nent; member
|
/linux-6.12.1/lib/ |
D | scatterlist.c | 618 unsigned int nent, nalloc; in sgl_alloc_order() local 621 nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order); in sgl_alloc_order() 623 if (length > (nent << (PAGE_SHIFT + order))) in sgl_alloc_order() 625 nalloc = nent; in sgl_alloc_order() 653 *nent_p = nent; in sgl_alloc_order()
|