Lines Matching +full:0 +full:x00000000 +full:- +full:0 +full:x0fffffff
62 mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff,
63 mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff,
64 mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff,
65 mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff
69 mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104
74 switch (adev->asic_type) { in gmc_v7_0_init_golden_registers()
97 WREG32(mmBIF_FB_EN, 0); in gmc_v7_0_mc_stop()
100 MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0); in gmc_v7_0_mc_stop()
113 tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0); in gmc_v7_0_mc_resume()
116 tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); in gmc_v7_0_mc_resume()
122 * gmc_v7_0_init_microcode - load ucode images from disk
128 * Returns 0 on success, error on failure.
137 switch (adev->asic_type) { in gmc_v7_0_init_microcode()
150 return 0; in gmc_v7_0_init_microcode()
152 return -EINVAL; in gmc_v7_0_init_microcode()
155 err = amdgpu_ucode_request(adev, &adev->gmc.fw, "amdgpu/%s_mc.bin", chip_name); in gmc_v7_0_init_microcode()
158 amdgpu_ucode_release(&adev->gmc.fw); in gmc_v7_0_init_microcode()
164 * gmc_v7_0_mc_load_microcode - load MC ucode into the hw
169 * Returns 0 on success, error on failure.
179 if (!adev->gmc.fw) in gmc_v7_0_mc_load_microcode()
180 return -EINVAL; in gmc_v7_0_mc_load_microcode()
182 hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data; in gmc_v7_0_mc_load_microcode()
183 amdgpu_ucode_print_mc_hdr(&hdr->header); in gmc_v7_0_mc_load_microcode()
185 adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version); in gmc_v7_0_mc_load_microcode()
186 regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); in gmc_v7_0_mc_load_microcode()
188 (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); in gmc_v7_0_mc_load_microcode()
189 ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; in gmc_v7_0_mc_load_microcode()
191 (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); in gmc_v7_0_mc_load_microcode()
195 if (running == 0) { in gmc_v7_0_mc_load_microcode()
197 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); in gmc_v7_0_mc_load_microcode()
198 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); in gmc_v7_0_mc_load_microcode()
201 for (i = 0; i < regs_size; i++) { in gmc_v7_0_mc_load_microcode()
206 for (i = 0; i < ucode_size; i++) in gmc_v7_0_mc_load_microcode()
210 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); in gmc_v7_0_mc_load_microcode()
211 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004); in gmc_v7_0_mc_load_microcode()
212 WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001); in gmc_v7_0_mc_load_microcode()
215 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v7_0_mc_load_microcode()
221 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v7_0_mc_load_microcode()
229 return 0; in gmc_v7_0_mc_load_microcode()
235 u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; in gmc_v7_0_vram_gtt_location()
245 * gmc_v7_0_mc_program - program the GPU memory controller
258 for (i = 0, j = 0; i < 32; i++, j += 0x6) { in gmc_v7_0_mc_program()
259 WREG32((0xb05 + j), 0x00000000); in gmc_v7_0_mc_program()
260 WREG32((0xb06 + j), 0x00000000); in gmc_v7_0_mc_program()
261 WREG32((0xb07 + j), 0x00000000); in gmc_v7_0_mc_program()
262 WREG32((0xb08 + j), 0x00000000); in gmc_v7_0_mc_program()
263 WREG32((0xb09 + j), 0x00000000); in gmc_v7_0_mc_program()
265 WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); in gmc_v7_0_mc_program()
268 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); in gmc_v7_0_mc_program()
270 if (adev->mode_info.num_crtc) { in gmc_v7_0_mc_program()
278 tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); in gmc_v7_0_mc_program()
283 adev->gmc.vram_start >> 12); in gmc_v7_0_mc_program()
285 adev->gmc.vram_end >> 12); in gmc_v7_0_mc_program()
287 adev->mem_scratch.gpu_addr >> 12); in gmc_v7_0_mc_program()
288 WREG32(mmMC_VM_AGP_BASE, 0); in gmc_v7_0_mc_program()
289 WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22); in gmc_v7_0_mc_program()
290 WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22); in gmc_v7_0_mc_program()
292 dev_warn(adev->dev, "Wait for MC idle timedout !\n"); in gmc_v7_0_mc_program()
297 tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0); in gmc_v7_0_mc_program()
305 * gmc_v7_0_mc_init - initialize the memory controller driver params
311 * Returns 0 for success.
317 adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev); in gmc_v7_0_mc_init()
318 if (!adev->gmc.vram_width) { in gmc_v7_0_mc_init()
331 case 0: in gmc_v7_0_mc_init()
360 adev->gmc.vram_width = numchan * chansize; in gmc_v7_0_mc_init()
363 adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; in gmc_v7_0_mc_init()
364 adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; in gmc_v7_0_mc_init()
366 if (!(adev->flags & AMD_IS_APU)) { in gmc_v7_0_mc_init()
371 adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); in gmc_v7_0_mc_init()
372 adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); in gmc_v7_0_mc_init()
375 if ((adev->flags & AMD_IS_APU) && in gmc_v7_0_mc_init()
376 adev->gmc.real_vram_size > adev->gmc.aper_size && in gmc_v7_0_mc_init()
378 adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; in gmc_v7_0_mc_init()
379 adev->gmc.aper_size = adev->gmc.real_vram_size; in gmc_v7_0_mc_init()
383 adev->gmc.visible_vram_size = adev->gmc.aper_size; in gmc_v7_0_mc_init()
386 if (amdgpu_gart_size == -1) { in gmc_v7_0_mc_init()
387 switch (adev->asic_type) { in gmc_v7_0_mc_init()
390 adev->gmc.gart_size = 256ULL << 20; in gmc_v7_0_mc_init()
398 adev->gmc.gart_size = 1024ULL << 20; in gmc_v7_0_mc_init()
403 adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; in gmc_v7_0_mc_init()
406 adev->gmc.gart_size += adev->pm.smu_prv_buffer_size; in gmc_v7_0_mc_init()
407 gmc_v7_0_vram_gtt_location(adev, &adev->gmc); in gmc_v7_0_mc_init()
409 return 0; in gmc_v7_0_mc_init()
413 * gmc_v7_0_flush_gpu_tlb_pasid - tlb flush via pasid
427 u32 mask = 0x0; in gmc_v7_0_flush_gpu_tlb_pasid()
444 * VMID 0 is the physical GPU addresses as used by the kernel.
445 * VMIDs 1-15 are used for userspace clients and are handled
450 * gmc_v7_0_flush_gpu_tlb - gart tlb flush callback
462 /* bits 0-15 are the VM contexts0-15 */ in gmc_v7_0_flush_gpu_tlb()
474 reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8; in gmc_v7_0_emit_flush_gpu_tlb()
477 /* bits 0-15 are the VM contexts0-15 */ in gmc_v7_0_emit_flush_gpu_tlb()
492 BUG_ON(*addr & 0xFFFFFF0000000FFFULL); in gmc_v7_0_get_vm_pde()
504 * gmc_v7_0_set_fault_enable_default - update VM fault handling
531 * gmc_v7_0_set_prt - set PRT VM fault
540 if (enable && !adev->gmc.prt_warning) { in gmc_v7_0_set_prt()
541 dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n"); in gmc_v7_0_set_prt()
542 adev->gmc.prt_warning = true; in gmc_v7_0_set_prt()
565 uint32_t high = adev->vm_manager.max_pfn - in gmc_v7_0_set_prt()
577 WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff); in gmc_v7_0_set_prt()
578 WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff); in gmc_v7_0_set_prt()
579 WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff); in gmc_v7_0_set_prt()
580 WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff); in gmc_v7_0_set_prt()
581 WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0); in gmc_v7_0_set_prt()
582 WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0); in gmc_v7_0_set_prt()
583 WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0); in gmc_v7_0_set_prt()
584 WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0); in gmc_v7_0_set_prt()
589 * gmc_v7_0_gart_enable - gart enable
594 * sets up the hw for VMIDs 1-15 which are allocated on
597 * Returns 0 for success, errors for failure.
605 if (adev->gart.bo == NULL) { in gmc_v7_0_gart_enable()
606 dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); in gmc_v7_0_gart_enable()
607 return -EINVAL; in gmc_v7_0_gart_enable()
609 amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); in gmc_v7_0_gart_enable()
610 table_addr = amdgpu_bo_gpu_offset(adev->gart.bo); in gmc_v7_0_gart_enable()
618 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); in gmc_v7_0_gart_enable()
630 tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); in gmc_v7_0_gart_enable()
634 field = adev->vm_manager.fragment_size; in gmc_v7_0_gart_enable()
641 WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12); in gmc_v7_0_gart_enable()
642 WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12); in gmc_v7_0_gart_enable()
645 (u32)(adev->dummy_page_addr >> 12)); in gmc_v7_0_gart_enable()
646 WREG32(mmVM_CONTEXT0_CNTL2, 0); in gmc_v7_0_gart_enable()
649 tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); in gmc_v7_0_gart_enable()
653 WREG32(0x575, 0); in gmc_v7_0_gart_enable()
654 WREG32(0x576, 0); in gmc_v7_0_gart_enable()
655 WREG32(0x577, 0); in gmc_v7_0_gart_enable()
657 /* empty context1-15 */ in gmc_v7_0_gart_enable()
662 WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); in gmc_v7_0_gart_enable()
663 WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1); in gmc_v7_0_gart_enable()
669 WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8, in gmc_v7_0_gart_enable()
673 /* enable context1-15 */ in gmc_v7_0_gart_enable()
675 (u32)(adev->dummy_page_addr >> 12)); in gmc_v7_0_gart_enable()
681 adev->vm_manager.block_size - 9); in gmc_v7_0_gart_enable()
688 if (adev->asic_type == CHIP_KAVERI) { in gmc_v7_0_gart_enable()
694 gmc_v7_0_flush_gpu_tlb(adev, 0, 0, 0); in gmc_v7_0_gart_enable()
695 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", in gmc_v7_0_gart_enable()
696 (unsigned int)(adev->gmc.gart_size >> 20), in gmc_v7_0_gart_enable()
698 return 0; in gmc_v7_0_gart_enable()
705 if (adev->gart.bo) { in gmc_v7_0_gart_init()
707 return 0; in gmc_v7_0_gart_init()
713 adev->gart.table_size = adev->gart.num_gpu_pages * 8; in gmc_v7_0_gart_init()
714 adev->gart.gart_pte_flags = 0; in gmc_v7_0_gart_init()
719 * gmc_v7_0_gart_disable - gart disable
730 WREG32(mmVM_CONTEXT0_CNTL, 0); in gmc_v7_0_gart_disable()
731 WREG32(mmVM_CONTEXT1_CNTL, 0); in gmc_v7_0_gart_disable()
734 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); in gmc_v7_0_gart_disable()
735 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0); in gmc_v7_0_gart_disable()
736 tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0); in gmc_v7_0_gart_disable()
740 tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); in gmc_v7_0_gart_disable()
742 WREG32(mmVM_L2_CNTL2, 0); in gmc_v7_0_gart_disable()
746 * gmc_v7_0_vm_decode_fault - print human readable fault info
752 * @pasid: debug logging only - no functional use
762 char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, in gmc_v7_0_vm_decode_fault()
763 (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; in gmc_v7_0_vm_decode_fault()
769 dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n", in gmc_v7_0_vm_decode_fault()
819 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { in gmc_v7_0_enable_mc_ls()
821 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) in gmc_v7_0_enable_mc_ls()
836 for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { in gmc_v7_0_enable_mc_mgcg()
838 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) in gmc_v7_0_enable_mc_mgcg()
854 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) { in gmc_v7_0_enable_bif_mgls()
860 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0); in gmc_v7_0_enable_bif_mgls()
861 data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0); in gmc_v7_0_enable_bif_mgls()
862 data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0); in gmc_v7_0_enable_bif_mgls()
863 data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0); in gmc_v7_0_enable_bif_mgls()
877 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) in gmc_v7_0_enable_hdp_mgcg()
878 data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0); in gmc_v7_0_enable_hdp_mgcg()
893 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) in gmc_v7_0_enable_hdp_ls()
896 data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0); in gmc_v7_0_enable_hdp_ls()
931 adev->gmc.shared_aperture_start = 0x2000000000000000ULL; in gmc_v7_0_early_init()
932 adev->gmc.shared_aperture_end = in gmc_v7_0_early_init()
933 adev->gmc.shared_aperture_start + (4ULL << 30) - 1; in gmc_v7_0_early_init()
934 adev->gmc.private_aperture_start = in gmc_v7_0_early_init()
935 adev->gmc.shared_aperture_end + 1; in gmc_v7_0_early_init()
936 adev->gmc.private_aperture_end = in gmc_v7_0_early_init()
937 adev->gmc.private_aperture_start + (4ULL << 30) - 1; in gmc_v7_0_early_init()
938 adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF; in gmc_v7_0_early_init()
940 return 0; in gmc_v7_0_early_init()
948 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); in gmc_v7_0_late_init()
950 return 0; in gmc_v7_0_late_init()
976 set_bit(AMDGPU_GFXHUB(0), adev->vmhubs_mask); in gmc_v7_0_sw_init()
978 if (adev->flags & AMD_IS_APU) { in gmc_v7_0_sw_init()
979 adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; in gmc_v7_0_sw_init()
984 adev->gmc.vram_type = gmc_v7_0_convert_vram_type(tmp); in gmc_v7_0_sw_init()
987 …_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, &adev->gmc.vm_fault); in gmc_v7_0_sw_init()
991 …_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, &adev->gmc.vm_fault); in gmc_v7_0_sw_init()
1005 adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ in gmc_v7_0_sw_init()
1007 r = dma_set_mask_and_coherent(adev->dev, DMA_BIT_MASK(40)); in gmc_v7_0_sw_init()
1012 adev->need_swiotlb = drm_need_swiotlb(40); in gmc_v7_0_sw_init()
1037 * VMID 0 is reserved for System in gmc_v7_0_sw_init()
1038 * amdgpu graphics/compute will use VMIDs 1-7 in gmc_v7_0_sw_init()
1039 * amdkfd will use VMIDs 8-15 in gmc_v7_0_sw_init()
1041 adev->vm_manager.first_kfd_vmid = 8; in gmc_v7_0_sw_init()
1045 if (adev->flags & AMD_IS_APU) { in gmc_v7_0_sw_init()
1049 adev->vm_manager.vram_base_offset = tmp; in gmc_v7_0_sw_init()
1051 adev->vm_manager.vram_base_offset = 0; in gmc_v7_0_sw_init()
1054 adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info), in gmc_v7_0_sw_init()
1056 if (!adev->gmc.vm_fault_info) in gmc_v7_0_sw_init()
1057 return -ENOMEM; in gmc_v7_0_sw_init()
1058 atomic_set(&adev->gmc.vm_fault_info_updated, 0); in gmc_v7_0_sw_init()
1060 return 0; in gmc_v7_0_sw_init()
1069 kfree(adev->gmc.vm_fault_info); in gmc_v7_0_sw_fini()
1072 amdgpu_ucode_release(&adev->gmc.fw); in gmc_v7_0_sw_fini()
1074 return 0; in gmc_v7_0_sw_fini()
1086 if (!(adev->flags & AMD_IS_APU)) { in gmc_v7_0_hw_init()
1101 return 0; in gmc_v7_0_hw_init()
1108 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); in gmc_v7_0_hw_fini()
1111 return 0; in gmc_v7_0_hw_fini()
1120 return 0; in gmc_v7_0_suspend()
1134 return 0; in gmc_v7_0_resume()
1155 for (i = 0; i < adev->usec_timeout; i++) { in gmc_v7_0_wait_for_idle()
1163 return 0; in gmc_v7_0_wait_for_idle()
1166 return -ETIMEDOUT; in gmc_v7_0_wait_for_idle()
1173 u32 srbm_soft_reset = 0; in gmc_v7_0_soft_reset()
1182 if (!(adev->flags & AMD_IS_APU)) in gmc_v7_0_soft_reset()
1190 dev_warn(adev->dev, "Wait for GMC idle timed out !\n"); in gmc_v7_0_soft_reset()
1194 dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n", tmp); in gmc_v7_0_soft_reset()
1211 return 0; in gmc_v7_0_soft_reset()
1252 return 0; in gmc_v7_0_vm_fault_interrupt_state()
1268 return 0; in gmc_v7_0_process_interrupt()
1270 amdgpu_vm_update_fault_cache(adev, entry->pasid, in gmc_v7_0_process_interrupt()
1271 ((u64)addr) << AMDGPU_GPU_PAGE_SHIFT, status, AMDGPU_GFXHUB(0)); in gmc_v7_0_process_interrupt()
1277 dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n", in gmc_v7_0_process_interrupt()
1278 entry->src_id, entry->src_data[0]); in gmc_v7_0_process_interrupt()
1279 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", in gmc_v7_0_process_interrupt()
1281 dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", in gmc_v7_0_process_interrupt()
1284 entry->pasid); in gmc_v7_0_process_interrupt()
1290 && !atomic_read(&adev->gmc.vm_fault_info_updated)) { in gmc_v7_0_process_interrupt()
1291 struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info; in gmc_v7_0_process_interrupt()
1296 info->vmid = vmid; in gmc_v7_0_process_interrupt()
1297 info->mc_id = REG_GET_FIELD(status, in gmc_v7_0_process_interrupt()
1300 info->status = status; in gmc_v7_0_process_interrupt()
1301 info->page_addr = addr; in gmc_v7_0_process_interrupt()
1302 info->prot_valid = protections & 0x7 ? true : false; in gmc_v7_0_process_interrupt()
1303 info->prot_read = protections & 0x8 ? true : false; in gmc_v7_0_process_interrupt()
1304 info->prot_write = protections & 0x10 ? true : false; in gmc_v7_0_process_interrupt()
1305 info->prot_exec = protections & 0x20 ? true : false; in gmc_v7_0_process_interrupt()
1307 atomic_set(&adev->gmc.vm_fault_info_updated, 1); in gmc_v7_0_process_interrupt()
1310 return 0; in gmc_v7_0_process_interrupt()
1322 if (!(adev->flags & AMD_IS_APU)) { in gmc_v7_0_set_clockgating_state()
1330 return 0; in gmc_v7_0_set_clockgating_state()
1336 return 0; in gmc_v7_0_set_powergating_state()
1376 adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs; in gmc_v7_0_set_gmc_funcs()
1381 adev->gmc.vm_fault.num_types = 1; in gmc_v7_0_set_irq_funcs()
1382 adev->gmc.vm_fault.funcs = &gmc_v7_0_irq_funcs; in gmc_v7_0_set_irq_funcs()
1388 .minor = 0,
1389 .rev = 0,
1397 .rev = 0,