/linux-6.12.1/sound/soc/codecs/ |
D | lpass-va-macro.c | 21 /* VA macro registers */ 267 /* VA macro */ 291 /* VA core */ 453 static int va_clk_rsc_fs_gen_request(struct va_macro *va, bool enable) in va_clk_rsc_fs_gen_request() argument 455 struct regmap *regmap = va->regmap; in va_clk_rsc_fs_gen_request() 486 static int va_macro_mclk_enable(struct va_macro *va, bool mclk_enable) in va_macro_mclk_enable() argument 488 struct regmap *regmap = va->regmap; in va_macro_mclk_enable() 491 va_clk_rsc_fs_gen_request(va, true); in va_macro_mclk_enable() 495 va_clk_rsc_fs_gen_request(va, false); in va_macro_mclk_enable() 505 struct va_macro *va = snd_soc_component_get_drvdata(comp); in va_macro_mclk_event() local [all …]
|
/linux-6.12.1/drivers/gpu/drm/nouveau/ |
D | nouveau_uvmm.c | 6 * The uvmm mutex protects any operations on the GPU VA space provided by the 7 * DRM GPU VA manager. 70 } va; member 174 u64 addr = reg->va.addr; in nouveau_uvma_region_sparse_unref() 175 u64 range = reg->va.range; in nouveau_uvma_region_sparse_unref() 183 u64 addr = uvma->va.va.addr; in nouveau_uvma_vmm_put() 184 u64 range = uvma->va.va.range; in nouveau_uvma_vmm_put() 193 u64 addr = uvma->va.va.addr; in nouveau_uvma_map() 194 u64 offset = uvma->va.gem.offset; in nouveau_uvma_map() 195 u64 range = uvma->va.va.range; in nouveau_uvma_map() [all …]
|
/linux-6.12.1/mm/ |
D | vmalloc.c | 830 * All vmap_area objects in this tree are sorted by va->va_start 971 va_size(struct vmap_area *va) in va_size() argument 973 return (va->va_end - va->va_start); in va_size() 979 struct vmap_area *va; in get_subtree_max_size() local 981 va = rb_entry_safe(node, struct vmap_area, rb_node); in get_subtree_max_size() 982 return va ? va->subtree_max_size : 0; in get_subtree_max_size() 1007 struct vmap_area *va; in __find_vmap_area() local 1009 va = rb_entry(n, struct vmap_area, rb_node); in __find_vmap_area() 1010 if (addr < va->va_start) in __find_vmap_area() 1012 else if (addr >= va->va_end) in __find_vmap_area() [all …]
|
/linux-6.12.1/drivers/gpu/drm/ |
D | drm_gpuvm.c | 36 * The DRM GPU VA Manager, represented by struct drm_gpuvm keeps track of a 37 * GPU's virtual address (VA) space and manages the corresponding virtual 42 * all existent GPU VA mappings using this &drm_gem_object as backing buffer. 47 * The GPU VA manager internally uses a rb-tree to manage the 51 * portion of VA space reserved by the kernel. This node is initialized together 52 * with the GPU VA manager instance and removed when the GPU VA manager is 105 * Besides its capability to manage and represent a GPU VA space, the 106 * GPU VA manager also provides functions to let the &drm_gpuvm calculate a 109 * Therefore the DRM GPU VA manager provides an algorithm implementing splitting 110 * and merging of existent GPU VA mappings with the ones that are requested to [all …]
|
/linux-6.12.1/drivers/scsi/qedi/ |
D | qedi_dbg.c | 14 va_list va; in qedi_dbg_err() local 17 va_start(va, fmt); in qedi_dbg_err() 20 vaf.va = &va; in qedi_dbg_err() 28 va_end(va); in qedi_dbg_err() 35 va_list va; in qedi_dbg_warn() local 38 va_start(va, fmt); in qedi_dbg_warn() 41 vaf.va = &va; in qedi_dbg_warn() 53 va_end(va); in qedi_dbg_warn() 60 va_list va; in qedi_dbg_notice() local 63 va_start(va, fmt); in qedi_dbg_notice() [all …]
|
/linux-6.12.1/drivers/scsi/qedf/ |
D | qedf_dbg.c | 13 va_list va; in qedf_dbg_err() local 16 va_start(va, fmt); in qedf_dbg_err() 19 vaf.va = &va; in qedf_dbg_err() 27 va_end(va); in qedf_dbg_err() 34 va_list va; in qedf_dbg_warn() local 37 va_start(va, fmt); in qedf_dbg_warn() 40 vaf.va = &va; in qedf_dbg_warn() 52 va_end(va); in qedf_dbg_warn() 59 va_list va; in qedf_dbg_notice() local 62 va_start(va, fmt); in qedf_dbg_notice() [all …]
|
/linux-6.12.1/Documentation/devicetree/bindings/sound/ |
D | qcom,lpass-va-macro.yaml | 4 $id: http://devicetree.org/schemas/sound/qcom,lpass-va-macro.yaml# 7 title: LPASS(Low Power Audio Subsystem) VA Macro audio codec 16 - qcom,sc7280-lpass-va-macro 17 - qcom,sm8250-lpass-va-macro 18 - qcom,sm8450-lpass-va-macro 19 - qcom,sm8550-lpass-va-macro 20 - qcom,sc8280xp-lpass-va-macro 23 - qcom,sm8650-lpass-va-macro 24 - qcom,x1e80100-lpass-va-macro 25 - const: qcom,sm8550-lpass-va-macro [all …]
|
D | cs42l56.txt | 9 - VA-supply, VCP-supply, VLDO-supply : power supplies for the device, 31 0 = 0.5 x VA 32 1 = 0.6 x VA 33 2 = 0.7 x VA 34 3 = 0.8 x VA 35 4 = 0.83 x VA 36 5 = 0.91 x VA 62 VA-supply = <®_audio>;
|
/linux-6.12.1/include/drm/ |
D | drm_gpuvm.h | 66 * struct drm_gpuva - structure to track a GPU VA mapping 68 * This structure represents a GPU VA mapping and is associated with a 91 * @va: structure containing the address and range of the &drm_gpuva 95 * @va.addr: the start address 103 } va; member 150 int drm_gpuva_insert(struct drm_gpuvm *gpuvm, struct drm_gpuva *va); 151 void drm_gpuva_remove(struct drm_gpuva *va); 153 void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo); 154 void drm_gpuva_unlink(struct drm_gpuva *va); 163 static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range, in drm_gpuva_init() argument [all …]
|
/linux-6.12.1/drivers/gpu/drm/i915/gt/ |
D | selftest_tlb.c | 33 struct i915_vma *va, in pte_tlbinv() argument 64 /* Pin va at random but aligned offset after vma */ in pte_tlbinv() 68 va->size, align); in pte_tlbinv() 69 err = i915_vma_pin(va, 0, 0, addr | PIN_OFFSET_FIXED | PIN_USER); in pte_tlbinv() 71 pr_err("Cannot pin at %llx+%llx\n", addr, va->size); in pte_tlbinv() 74 GEM_BUG_ON(i915_vma_offset(va) != addr); in pte_tlbinv() 75 if (vb != va) { in pte_tlbinv() 77 vb->node = va->node; /* overwrites the _same_ PTE */ in pte_tlbinv() 93 if (va != vb) in pte_tlbinv() 95 ce->engine->name, va->obj->mm.region->name ?: "smem", in pte_tlbinv() [all …]
|
/linux-6.12.1/tools/testing/selftests/kvm/lib/ |
D | ucall_common.c | 86 va_list va; in ucall_assert() local 95 va_start(va, fmt); in ucall_assert() 96 guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va); in ucall_assert() 97 va_end(va); in ucall_assert() 107 va_list va; in ucall_fmt() local 112 va_start(va, fmt); in ucall_fmt() 113 guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va); in ucall_fmt() 114 va_end(va); in ucall_fmt() 124 va_list va; in ucall() local 133 va_start(va, nargs); in ucall() [all …]
|
/linux-6.12.1/lib/ |
D | test_debug_virtual.c | 25 void *va; in test_debug_virtual_init() local 27 va = (void *)VMALLOC_START; in test_debug_virtual_init() 28 pa = virt_to_phys(va); in test_debug_virtual_init() 30 pr_info("PA: %pa for VA: 0x%lx\n", &pa, (unsigned long)va); in test_debug_virtual_init() 37 va = foo; in test_debug_virtual_init() 38 pr_info("PA: %pa for VA: 0x%lx\n", &pa, (unsigned long)va); in test_debug_virtual_init()
|
/linux-6.12.1/arch/powerpc/mm/nohash/ |
D | 8xx.c | 23 * Return PA for this VA if it is in an area mapped with LTLBs or fixmap. 26 phys_addr_t v_block_mapped(unsigned long va) in v_block_mapped() argument 30 if (va >= VIRT_IMMR_BASE && va < VIRT_IMMR_BASE + IMMR_SIZE) in v_block_mapped() 31 return p + va - VIRT_IMMR_BASE; in v_block_mapped() 32 if (va >= PAGE_OFFSET && va < PAGE_OFFSET + block_mapped_ram) in v_block_mapped() 33 return __pa(va); in v_block_mapped() 38 * Return VA for a given PA mapped with LTLBs or fixmap 52 static int __ref __early_map_kernel_hugepage(unsigned long va, phys_addr_t pa, in __early_map_kernel_hugepage() argument 55 pmd_t *pmdp = pmd_off_k(va); in __early_map_kernel_hugepage() 66 ptep = early_pte_alloc_kernel(pmdp, va); in __early_map_kernel_hugepage() [all …]
|
/linux-6.12.1/drivers/dio/ |
D | dio.c | 127 void *va; in dio_find() local 139 va = (void *)(pa + DIO_VIRADDRBASE); in dio_find() 141 va = ioremap(pa, PAGE_SIZE); in dio_find() 144 (unsigned char *)va + DIO_IDOFF, 1)) { in dio_find() 146 iounmap(va); in dio_find() 150 prid = DIO_ID(va); in dio_find() 153 secid = DIO_SECID(va); in dio_find() 160 iounmap(va); in dio_find() 200 u_char *va; in dio_init() local 212 va = (void *)(pa + DIO_VIRADDRBASE); in dio_init() [all …]
|
/linux-6.12.1/arch/powerpc/mm/book3s64/ |
D | hash_native.c | 72 unsigned long va; in ___tlbie() local 77 * We need 14 to 65 bits of va for a tlibe of 4K page in ___tlbie() 83 va = vpn << VPN_SHIFT; in ___tlbie() 85 * clear top 16 bits of 64bit va, non SLS segment in ___tlbie() 90 va &= ~(0xffffULL << 48); in ___tlbie() 95 va &= ~((1ul << (64 - 52)) - 1); in ___tlbie() 96 va |= ssize << 8; in ___tlbie() 98 va |= sllp << 5; in ___tlbie() 100 : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206) in ___tlbie() 104 /* We need 14 to 14 + i bits of va */ in ___tlbie() [all …]
|
/linux-6.12.1/arch/riscv/mm/ |
D | init.c | 378 static inline phys_addr_t __init alloc_pte_early(uintptr_t va) in alloc_pte_early() argument 387 static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va) in alloc_pte_fixmap() argument 392 static phys_addr_t __meminit alloc_pte_late(uintptr_t va) in alloc_pte_late() argument 400 static void __meminit create_pte_mapping(pte_t *ptep, uintptr_t va, phys_addr_t pa, phys_addr_t sz, in create_pte_mapping() argument 403 uintptr_t pte_idx = pte_index(va); in create_pte_mapping() 460 static phys_addr_t __init alloc_pmd_early(uintptr_t va) in alloc_pmd_early() argument 462 BUG_ON((va - kernel_map.virt_addr) >> PUD_SHIFT); in alloc_pmd_early() 467 static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va) in alloc_pmd_fixmap() argument 472 static phys_addr_t __meminit alloc_pmd_late(uintptr_t va) in alloc_pmd_late() argument 481 uintptr_t va, phys_addr_t pa, in create_pmd_mapping() argument [all …]
|
/linux-6.12.1/fs/ceph/ |
D | ceph_frag.c | 10 unsigned va = ceph_frag_value(a); in ceph_frag_compare() local 12 if (va < vb) in ceph_frag_compare() 14 if (va > vb) in ceph_frag_compare() 16 va = ceph_frag_bits(a); in ceph_frag_compare() 18 if (va < vb) in ceph_frag_compare() 20 if (va > vb) in ceph_frag_compare()
|
/linux-6.12.1/drivers/infiniband/hw/usnic/ |
D | usnic_uiom.c | 59 usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n", in usnic_uiom_dma_fault() 166 usnic_dbg("va: 0x%lx pa: %pa\n", in usnic_uiom_get_pages() 194 long unsigned va, size; in usnic_uiom_unmap_sorted_intervals() local 197 va = interval->start << PAGE_SHIFT; in usnic_uiom_unmap_sorted_intervals() 201 usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE); in usnic_uiom_unmap_sorted_intervals() 202 iommu_unmap(pd->domain, va, PAGE_SIZE); in usnic_uiom_unmap_sorted_intervals() 203 va += PAGE_SIZE; in usnic_uiom_unmap_sorted_intervals() 220 vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT; in __usnic_uiom_reg_release() 251 long int va = uiomr->va & PAGE_MASK; in usnic_uiom_map_sorted_intervals() local 259 for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) { in usnic_uiom_map_sorted_intervals() [all …]
|
/linux-6.12.1/arch/riscv/include/asm/ |
D | sections.h | 18 static inline bool is_va_kernel_text(uintptr_t va) in is_va_kernel_text() argument 23 return va >= start && va < end; in is_va_kernel_text() 26 static inline bool is_va_kernel_lm_alias_text(uintptr_t va) in is_va_kernel_lm_alias_text() argument 31 return va >= start && va < end; in is_va_kernel_lm_alias_text()
|
/linux-6.12.1/drivers/gpu/drm/panthor/ |
D | panthor_mmu.c | 173 /** @va: Virtual range targeted by the VM operation. */ 180 } va; member 186 * specified VA range. 189 * the specified VA range. 213 * @new_vma: The new VMA object that will be inserted to the VA tree. 240 * We delegate all the VA management to the common drm_gpuvm framework 289 * @mm: Memory management object representing the auto-VA/kernel-VA. 291 * Used to auto-allocate VA space for kernel-managed objects (tiler 294 * For the MCU VM, this is managing the VA range that's used to map 298 * exceed half of the VA space addressable. [all …]
|
/linux-6.12.1/drivers/media/platform/mediatek/vcodec/common/ |
D | mtk_vcodec_util.c | 66 mem->va = dma_alloc_attrs(&plat_dev->dev, mem->size, &mem->dma_addr, in mtk_vcodec_mem_alloc() 68 if (!mem->va) { in mtk_vcodec_mem_alloc() 74 mtk_v4l2_debug(plat_dev, 3, "[%d] - va = %p dma = 0x%lx size = 0x%zx", id, mem->va, in mtk_vcodec_mem_alloc() 99 if (!mem->va) { in mtk_vcodec_mem_free() 100 mtk_v4l2_err(plat_dev, "%s: Tried to free a NULL VA", __func__); in mtk_vcodec_mem_free() 106 mtk_v4l2_debug(plat_dev, 3, "[%d] - va = %p dma = 0x%lx size = 0x%zx", id, mem->va, in mtk_vcodec_mem_free() 109 dma_free_coherent(&plat_dev->dev, mem->size, mem->va, mem->dma_addr); in mtk_vcodec_mem_free() 110 mem->va = NULL; in mtk_vcodec_mem_free()
|
/linux-6.12.1/drivers/misc/vmw_vmci/ |
D | vmci_queue_pair.h | 35 u64 ppn_va; /* Start VA of queue pair PPNs. */ 44 u64 va; /* Start VA of queue pair PPNs. */ member 54 * pass down the VA of the mapped file. Before host support was added 59 * provide the VA of the mapped files. 80 u64 produce_page_file; /* User VA. */ 81 u64 consume_page_file; /* User VA. */ 86 u64 produce_va; /* User VA of the mapped file. */ 87 u64 consume_va; /* User VA of the mapped file. */ 101 * this is a list of PPNs, and on hosted, it is a user VA where the
|
/linux-6.12.1/drivers/infiniband/sw/rxe/ |
D | rxe_mr.c | 252 void *va; in rxe_mr_copy_xarray() local 261 va = kmap_local_page(page); in rxe_mr_copy_xarray() 263 memcpy(addr, va + page_offset, bytes); in rxe_mr_copy_xarray() 265 memcpy(va + page_offset, addr, bytes); in rxe_mr_copy_xarray() 266 kunmap_local(va); in rxe_mr_copy_xarray() 283 u8 *va; in rxe_mr_copy_dma() local 289 va = kmap_local_page(page); in rxe_mr_copy_dma() 292 memcpy(va + page_offset, addr, bytes); in rxe_mr_copy_dma() 294 memcpy(addr, va + page_offset, bytes); in rxe_mr_copy_dma() 296 kunmap_local(va); in rxe_mr_copy_dma() [all …]
|
/linux-6.12.1/drivers/media/platform/mediatek/vcodec/decoder/vdec/ |
D | vdec_vp9_if.c | 31 * @va : cpu address 37 unsigned long va; member 227 if (fb->base_y.va == addr) { in vp9_rm_from_fb_use_list() 278 vsi->frm_bufs[ref_idx].buf.fb->base_y.va); in vp9_ref_cnt_fb() 296 if (vsi->sf_ref_fb[i].fb.base_y.va) { in vp9_free_all_sf_ref_fb() 321 if (vsi->sf_ref_fb[idx].fb.base_y.va && in vp9_get_sf_ref_fb() 330 if (vsi->sf_ref_fb[idx].fb.base_y.va == NULL) in vp9_get_sf_ref_fb() 391 if (mem->va) in vp9_alloc_work_buf() 402 /* Set the va again */ in vp9_alloc_work_buf() 403 vsi->mv_buf.va = (unsigned long)mem->va; in vp9_alloc_work_buf() [all …]
|
/linux-6.12.1/arch/parisc/kernel/ |
D | entry.S | 176 va = r8 /* virtual address for which the trap occurred */ define 189 mfctl %pcoq, va 206 mfctl %pcoq, va 220 mfctl %ior,va 238 mfctl %ior,va 252 mfctl %ior, va 270 mfctl %ior, va 282 mfctl %ior,va 298 mfctl %ior,va 312 mfctl %ior,va [all …]
|