/linux-6.12.1/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_flat_memory.c | 317 static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id) in kfd_init_apertures_vi() argument 323 pdd->lds_base = MAKE_LDS_APP_BASE_VI(); in kfd_init_apertures_vi() 324 pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base); in kfd_init_apertures_vi() 330 pdd->gpuvm_base = max(SVM_USER_BASE, AMDGPU_VA_RESERVED_BOTTOM); in kfd_init_apertures_vi() 331 pdd->gpuvm_limit = in kfd_init_apertures_vi() 332 pdd->dev->kfd->shared_resources.gpuvm_size - 1; in kfd_init_apertures_vi() 337 pdd->qpd.cwsr_base = SVM_CWSR_BASE; in kfd_init_apertures_vi() 338 pdd->qpd.ib_base = SVM_IB_BASE; in kfd_init_apertures_vi() 340 pdd->scratch_base = MAKE_SCRATCH_APP_BASE_VI(); in kfd_init_apertures_vi() 341 pdd->scratch_limit = MAKE_SCRATCH_APP_LIMIT(pdd->scratch_base); in kfd_init_apertures_vi() [all …]
|
D | kfd_debug.c | 72 struct kfd_process_device *pdd = process->pdds[i]; in kfd_dbg_ev_query_debug_event() local 74 & pdd->exception_status; in kfd_dbg_ev_query_debug_event() 79 *event_status = pdd->exception_status; in kfd_dbg_ev_query_debug_event() 80 *gpu_id = pdd->dev->id; in kfd_dbg_ev_query_debug_event() 81 pdd->exception_status &= ~exception_clear_mask; in kfd_dbg_ev_query_debug_event() 133 struct kfd_process_device *pdd = process->pdds[i]; in kfd_dbg_ev_raise() local 135 if (pdd->dev != dev) in kfd_dbg_ev_raise() 138 pdd->exception_status |= event_mask & KFD_EC_MASK_DEVICE; in kfd_dbg_ev_raise() 141 if (!pdd->vm_fault_exc_data) { in kfd_dbg_ev_raise() 142 pdd->vm_fault_exc_data = kmemdup( in kfd_dbg_ev_raise() [all …]
|
D | kfd_process.c | 75 static void kfd_process_device_destroy_cwsr_dgpu(struct kfd_process_device *pdd); 88 struct kfd_process_device *pdd; member 102 struct kfd_process_device *pdd; in kfd_sdma_activity_worker() local 115 pdd = workarea->pdd; in kfd_sdma_activity_worker() 116 if (!pdd) in kfd_sdma_activity_worker() 118 dqm = pdd->dev->dqm; in kfd_sdma_activity_worker() 119 qpd = &pdd->qpd; in kfd_sdma_activity_worker() 174 workarea->sdma_activity_counter = pdd->sdma_past_activity_counter; in kfd_sdma_activity_worker() 184 mm = get_task_mm(pdd->process->lead_thread); in kfd_sdma_activity_worker() 211 workarea->sdma_activity_counter += pdd->sdma_past_activity_counter; in kfd_sdma_activity_worker() [all …]
|
D | kfd_chardev.c | 72 struct kfd_process_device *pdd; in kfd_lock_pdd_by_id() local 75 pdd = kfd_process_device_data_by_id(p, gpu_id); in kfd_lock_pdd_by_id() 77 if (pdd) in kfd_lock_pdd_by_id() 78 return pdd; in kfd_lock_pdd_by_id() 84 static inline void kfd_unlock_pdd(struct kfd_process_device *pdd) in kfd_unlock_pdd() argument 86 mutex_unlock(&pdd->process->mutex); in kfd_unlock_pdd() 308 struct kfd_process_device *pdd; in kfd_ioctl_create_queue() local 324 pdd = kfd_process_device_data_by_id(p, args->gpu_id); in kfd_ioctl_create_queue() 325 if (!pdd) { in kfd_ioctl_create_queue() 330 dev = pdd->dev; in kfd_ioctl_create_queue() [all …]
|
D | kfd_process_queue_manager.c | 83 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd) in kfd_process_dequeue_from_device() argument 85 struct kfd_node *dev = pdd->dev; in kfd_process_dequeue_from_device() 87 if (pdd->already_dequeued) in kfd_process_dequeue_from_device() 90 dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd); in kfd_process_dequeue_from_device() 94 pdd->proc_ctx_gpu_addr); in kfd_process_dequeue_from_device() 97 pdd->already_dequeued = true; in kfd_process_dequeue_from_device() 106 struct kfd_process_device *pdd; in pqm_set_gws() local 121 pdd = kfd_get_process_device_data(dev, pqm->process); in pqm_set_gws() 122 if (!pdd) { in pqm_set_gws() 128 if (gws && pdd->qpd.num_gws) in pqm_set_gws() [all …]
|
D | kfd_doorbell.c | 110 struct kfd_process_device *pdd; in kfd_doorbell_mmap() local 119 pdd = kfd_get_process_device_data(dev, process); in kfd_doorbell_mmap() 120 if (!pdd) in kfd_doorbell_mmap() 124 address = kfd_get_process_doorbells(pdd); in kfd_doorbell_mmap() 235 phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd) in kfd_get_process_doorbells() argument 237 struct amdgpu_device *adev = pdd->dev->adev; in kfd_get_process_doorbells() 240 if (!pdd->qpd.proc_doorbells) { in kfd_get_process_doorbells() 241 if (kfd_alloc_process_doorbells(pdd->dev->kfd, pdd)) in kfd_get_process_doorbells() 247 pdd->qpd.proc_doorbells, in kfd_get_process_doorbells() 249 pdd->dev->kfd->device_info.doorbell_size); in kfd_get_process_doorbells() [all …]
|
D | kfd_queue.c | 90 static int kfd_queue_buffer_svm_get(struct kfd_process_device *pdd, u64 addr, u64 size) in kfd_queue_buffer_svm_get() argument 92 struct kfd_process *p = pdd->process; in kfd_queue_buffer_svm_get() 117 r = kfd_process_gpuid_from_node(p, pdd->dev, &gpuid, &gpuidx); in kfd_queue_buffer_svm_get() 151 static void kfd_queue_buffer_svm_put(struct kfd_process_device *pdd, u64 addr, u64 size) in kfd_queue_buffer_svm_put() argument 153 struct kfd_process *p = pdd->process; in kfd_queue_buffer_svm_put() 185 static int kfd_queue_buffer_svm_get(struct kfd_process_device *pdd, u64 addr, u64 size) in kfd_queue_buffer_svm_get() argument 190 static void kfd_queue_buffer_svm_put(struct kfd_process_device *pdd, u64 addr, u64 size) in kfd_queue_buffer_svm_put() argument 233 int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_properties *properties) in kfd_queue_acquire_buffers() argument 240 topo_dev = kfd_topology_device_by_id(pdd->dev->id); in kfd_queue_acquire_buffers() 244 vm = drm_priv_to_vm(pdd->drm_priv); in kfd_queue_acquire_buffers() [all …]
|
D | kfd_device_queue_manager_v9.c | 42 static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd) in compute_sh_mem_bases_64bit() argument 44 uint32_t shared_base = pdd->lds_base >> 48; in compute_sh_mem_bases_64bit() 45 uint32_t private_base = pdd->scratch_base >> 48; in compute_sh_mem_bases_64bit() 54 struct kfd_process_device *pdd; in update_qpd_v9() local 56 pdd = qpd_to_pdd(qpd); in update_qpd_v9() 76 if (!pdd->process->xnack_enabled) in update_qpd_v9() 82 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd); in update_qpd_v9()
|
D | kfd_device_queue_manager_v11.c | 42 static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd) in compute_sh_mem_bases_64bit() argument 44 uint32_t shared_base = pdd->lds_base >> 48; in compute_sh_mem_bases_64bit() 45 uint32_t private_base = pdd->scratch_base >> 48; in compute_sh_mem_bases_64bit() 54 struct kfd_process_device *pdd; in update_qpd_v11() local 56 pdd = qpd_to_pdd(qpd); in update_qpd_v11() 69 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd); in update_qpd_v11()
|
D | kfd_device_queue_manager_v10.c | 43 static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd) in compute_sh_mem_bases_64bit() argument 45 uint32_t shared_base = pdd->lds_base >> 48; in compute_sh_mem_bases_64bit() 46 uint32_t private_base = pdd->scratch_base >> 48; in compute_sh_mem_bases_64bit() 55 struct kfd_process_device *pdd; in update_qpd_v10() local 57 pdd = qpd_to_pdd(qpd); in update_qpd_v10() 69 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd); in update_qpd_v10()
|
D | kfd_device_queue_manager_v12.c | 42 static uint32_t compute_sh_mem_bases_64bit(struct kfd_process_device *pdd) in compute_sh_mem_bases_64bit() argument 44 uint32_t shared_base = pdd->lds_base >> 48; in compute_sh_mem_bases_64bit() 45 uint32_t private_base = pdd->scratch_base >> 48; in compute_sh_mem_bases_64bit() 54 struct kfd_process_device *pdd; in update_qpd_v12() local 56 pdd = qpd_to_pdd(qpd); in update_qpd_v12() 69 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd); in update_qpd_v12()
|
D | kfd_device_queue_manager.c | 164 struct kfd_process_device *pdd = qpd_to_pdd(qpd); in kfd_hws_hang() local 166 pdd->has_reset_queue = true; in kfd_hws_hang() 200 struct kfd_process_device *pdd = qpd_to_pdd(qpd); in add_queue_mes() local 215 queue_input.process_context_addr = pdd->proc_ctx_gpu_addr; in add_queue_mes() 755 struct kfd_process_device *pdd; in dbgdev_wave_reset_wavefronts() local 793 pdd = kfd_get_process_device_data(dev, p); in dbgdev_wave_reset_wavefronts() 794 if (!pdd) in dbgdev_wave_reset_wavefronts() 881 struct kfd_process_device *pdd = qpd_to_pdd(qpd); in destroy_queue_nocpsch() local 898 pdd->sdma_past_activity_counter += sdma_val; in destroy_queue_nocpsch() 912 struct kfd_process_device *pdd; in update_queue() local [all …]
|
D | kfd_svm.c | 217 struct kfd_process_device *pdd; in svm_range_dma_map() local 220 pdd = kfd_process_device_from_gpuidx(p, gpuidx); in svm_range_dma_map() 221 if (!pdd) { in svm_range_dma_map() 226 r = svm_range_dma_map_dev(pdd->dev->adev, prange, offset, npages, in svm_range_dma_map() 255 struct kfd_process_device *pdd; in svm_range_dma_unmap() local 268 pdd = kfd_process_device_from_gpuidx(p, gpuidx); in svm_range_dma_unmap() 269 if (!pdd) { in svm_range_dma_unmap() 273 dev = &pdd->dev->adev->pdev->dev; in svm_range_dma_unmap() 410 struct kfd_process_device *pdd; in svm_range_bo_release() local 421 pdd = kfd_get_process_device_data(svm_bo->node, p); in svm_range_bo_release() [all …]
|
D | kfd_priv.h | 1063 int kfd_process_device_init_vm(struct kfd_process_device *pdd, 1078 int kfd_process_device_create_obj_handle(struct kfd_process_device *pdd, 1082 void kfd_process_device_remove_obj_handle(struct kfd_process_device *pdd, 1107 struct kfd_process_device *pdd, 1109 phys_addr_t kfd_get_process_doorbells(struct kfd_process_device *pdd); 1111 struct kfd_process_device *pdd); 1113 struct kfd_process_device *pdd); 1177 int kfd_process_drain_interrupts(struct kfd_process_device *pdd); 1310 int kfd_queue_acquire_buffers(struct kfd_process_device *pdd, struct queue_properties *properties); 1311 int kfd_queue_release_buffers(struct kfd_process_device *pdd, struct queue_properties *properties); [all …]
|
D | kfd_packet_manager_v9.c | 38 struct kfd_process_device *pdd = in pm_map_process_v9() local 58 if (kfd->dqm->trap_debug_vmid && pdd->process->debug_trap_enabled && in pm_map_process_v9() 59 pdd->process->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) { in pm_map_process_v9() 96 struct kfd_process_device *pdd = in pm_map_process_aldebaran() local 116 packet->spi_gdbg_per_vmid_cntl = pdd->spi_dbg_override | in pm_map_process_aldebaran() 117 pdd->spi_dbg_launch_mode; in pm_map_process_aldebaran() 119 if (pdd->process->debug_trap_enabled) { in pm_map_process_aldebaran() 121 packet->tcp_watch_cntl[i] = pdd->watch_points[i]; in pm_map_process_aldebaran() 124 !!(pdd->process->dbg_flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP); in pm_map_process_aldebaran()
|
D | kfd_device_queue_manager_vi.c | 109 struct kfd_process_device *pdd; in update_qpd_vi() local 112 pdd = qpd_to_pdd(qpd); in update_qpd_vi() 131 temp = get_sh_mem_bases_nybble_64(pdd); in update_qpd_vi()
|
D | kfd_device_queue_manager_cik.c | 107 struct kfd_process_device *pdd; in update_qpd_cik() local 110 pdd = qpd_to_pdd(qpd); in update_qpd_cik() 125 temp = get_sh_mem_bases_nybble_64(pdd); in update_qpd_cik()
|
D | kfd_debug.h | 58 int kfd_dbg_trap_clear_dev_address_watch(struct kfd_process_device *pdd, 60 int kfd_dbg_trap_set_dev_address_watch(struct kfd_process_device *pdd, 130 int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd, bool sq_trap_en);
|
D | kfd_device_queue_manager.h | 331 static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) in get_sh_mem_bases_32() argument 333 return (pdd->lds_base >> 16) & 0xFF; in get_sh_mem_bases_32() 337 get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd) in get_sh_mem_bases_nybble_64() argument 339 return (pdd->lds_base >> 60) & 0x0E; in get_sh_mem_bases_nybble_64()
|
D | kfd_migrate.c | 388 struct kfd_process_device *pdd; in svm_migrate_vma_to_vram() local 458 pdd = svm_range_get_pdd_by_node(prange, node); in svm_migrate_vma_to_vram() 459 if (pdd) in svm_migrate_vma_to_vram() 460 WRITE_ONCE(pdd->page_in, pdd->page_in + mpages); in svm_migrate_vma_to_vram() 690 struct kfd_process_device *pdd; in svm_migrate_vma_to_ram() local 765 pdd = svm_range_get_pdd_by_node(prange, node); in svm_migrate_vma_to_ram() 766 if (pdd) in svm_migrate_vma_to_ram() 767 WRITE_ONCE(pdd->page_out, pdd->page_out + mpages); in svm_migrate_vma_to_ram()
|
D | kfd_events.c | 353 struct kfd_process_device *pdd; in kfd_kmap_event_page() local 363 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(event_page_offset)); in kfd_kmap_event_page() 364 if (!pdd) { in kfd_kmap_event_page() 368 kfd = pdd->dev; in kfd_kmap_event_page() 370 pdd = kfd_bind_process_to_device(kfd, p); in kfd_kmap_event_page() 371 if (IS_ERR(pdd)) in kfd_kmap_event_page() 372 return PTR_ERR(pdd); in kfd_kmap_event_page() 374 mem = kfd_process_device_translate_handle(pdd, in kfd_kmap_event_page() 1248 struct kfd_process_device *pdd = kfd_get_process_device_data(dev, p); in kfd_signal_reset_event() local 1255 if (unlikely(!pdd)) { in kfd_signal_reset_event() [all …]
|
/linux-6.12.1/drivers/pmdomain/ |
D | governor.c | 129 struct pm_domain_data *pdd; in update_domain_next_wakeup() local 143 list_for_each_entry(pdd, &genpd->dev_list, list_node) { in update_domain_next_wakeup() 144 next_wakeup = to_gpd_data(pdd)->td->next_wakeup; in update_domain_next_wakeup() 181 struct pm_domain_data *pdd; in __default_power_down_ok() local 217 list_for_each_entry(pdd, &genpd->dev_list, list_node) { in __default_power_down_ok() 226 td = to_gpd_data(pdd)->td; in __default_power_down_ok()
|
D | core.c | 317 struct pm_domain_data *pdd; in _genpd_reeval_performance_state() local 329 list_for_each_entry(pdd, &genpd->dev_list, list_node) { in _genpd_reeval_performance_state() 330 pd_data = to_gpd_data(pdd); in _genpd_reeval_performance_state() 828 struct pm_domain_data *pdd; in genpd_power_off() local 863 list_for_each_entry(pdd, &genpd->dev_list, list_node) { in genpd_power_off() 868 if (!pm_runtime_suspended(pdd->dev) || in genpd_power_off() 869 irq_safe_dev_in_sleep_domain(pdd->dev, genpd)) in genpd_power_off() 985 struct pm_domain_data *pdd; in genpd_dev_pm_qos_notifier() local 990 pdd = dev->power.subsys_data ? in genpd_dev_pm_qos_notifier() 992 if (pdd) { in genpd_dev_pm_qos_notifier() [all …]
|
/linux-6.12.1/drivers/input/touchscreen/ |
D | wm9705.c | 77 static int pdd = 8; variable 78 module_param(pdd, int, 0); 79 MODULE_PARM_DESC(pdd, "Set pen detect comparator threshold"); 168 dig2 |= (pdd & 0x000f); in wm9705_phy_init() 169 dev_dbg(wm->dev, "setting pdd to Vmid/%d", 1 - (pdd & 0x000f)); in wm9705_phy_init()
|
/linux-6.12.1/drivers/pmdomain/xilinx/ |
D | zynqmp-pm-domains.c | 108 struct pm_domain_data *pdd, *tmp; in zynqmp_gpd_power_off() local 119 list_for_each_entry_safe(pdd, tmp, &domain->dev_list, list_node) { in zynqmp_gpd_power_off() 121 may_wakeup = zynqmp_gpd_is_active_wakeup_path(pdd->dev, NULL); in zynqmp_gpd_power_off() 123 dev_dbg(pdd->dev, "device is in wakeup path in %s\n", in zynqmp_gpd_power_off()
|