Home
last modified time | relevance | path

Searched refs:tmp_adev (Results 1 – 7 of 7) sorted by relevance

/linux-6.12.1/drivers/gpu/drm/amd/amdgpu/
Daldebaran.c149 struct amdgpu_device *tmp_adev = NULL; in aldebaran_mode2_perform_reset() local
163 list_for_each_entry(tmp_adev, reset_device_list, reset_list) { in aldebaran_mode2_perform_reset()
164 mutex_lock(&tmp_adev->reset_cntl->reset_lock); in aldebaran_mode2_perform_reset()
165 tmp_adev->reset_cntl->active_reset = AMD_RESET_METHOD_MODE2; in aldebaran_mode2_perform_reset()
171 list_for_each_entry(tmp_adev, reset_device_list, reset_list) { in aldebaran_mode2_perform_reset()
173 if (tmp_adev->gmc.xgmi.num_physical_nodes > 1) { in aldebaran_mode2_perform_reset()
175 &tmp_adev->reset_cntl->reset_work)) in aldebaran_mode2_perform_reset()
178 r = aldebaran_mode2_reset(tmp_adev); in aldebaran_mode2_perform_reset()
180 dev_err(tmp_adev->dev, in aldebaran_mode2_perform_reset()
182 r, adev_to_drm(tmp_adev)->unique); in aldebaran_mode2_perform_reset()
[all …]
Damdgpu_device.c5249 struct amdgpu_device *tmp_adev = reset_context->reset_req_dev; in amdgpu_device_pre_asic_reset() local
5307 dev_info(tmp_adev->dev, "Dumping IP State\n"); in amdgpu_device_pre_asic_reset()
5309 for (i = 0; i < tmp_adev->num_ip_blocks; i++) in amdgpu_device_pre_asic_reset()
5310 if (tmp_adev->ip_blocks[i].version->funcs->dump_ip_state) in amdgpu_device_pre_asic_reset()
5311 tmp_adev->ip_blocks[i].version->funcs in amdgpu_device_pre_asic_reset()
5312 ->dump_ip_state((void *)tmp_adev); in amdgpu_device_pre_asic_reset()
5313 dev_info(tmp_adev->dev, "Dumping IP State Completed\n"); in amdgpu_device_pre_asic_reset()
5331 struct amdgpu_device *tmp_adev = NULL; in amdgpu_do_asic_reset() local
5336 tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, in amdgpu_do_asic_reset()
5340 r = amdgpu_reset_perform_reset(tmp_adev, reset_context); in amdgpu_do_asic_reset()
[all …]
Dsmu_v13_0_10.c234 struct amdgpu_device *tmp_adev = (struct amdgpu_device *)reset_ctl->handle; in smu_v13_0_10_mode2_restore_hwcontext() local
236 dev_info(tmp_adev->dev, in smu_v13_0_10_mode2_restore_hwcontext()
238 r = smu_v13_0_10_mode2_restore_ip(tmp_adev); in smu_v13_0_10_mode2_restore_hwcontext()
242 amdgpu_register_gpu_instance(tmp_adev); in smu_v13_0_10_mode2_restore_hwcontext()
245 amdgpu_ras_resume(tmp_adev); in smu_v13_0_10_mode2_restore_hwcontext()
247 amdgpu_irq_gpu_reset_resume_helper(tmp_adev); in smu_v13_0_10_mode2_restore_hwcontext()
249 r = amdgpu_ib_ring_tests(tmp_adev); in smu_v13_0_10_mode2_restore_hwcontext()
251 dev_err(tmp_adev->dev, in smu_v13_0_10_mode2_restore_hwcontext()
Dsienna_cichlid.c239 struct amdgpu_device *tmp_adev = (struct amdgpu_device *)reset_ctl->handle; in sienna_cichlid_mode2_restore_hwcontext() local
241 dev_info(tmp_adev->dev, in sienna_cichlid_mode2_restore_hwcontext()
243 r = sienna_cichlid_mode2_restore_ip(tmp_adev); in sienna_cichlid_mode2_restore_hwcontext()
251 amdgpu_register_gpu_instance(tmp_adev); in sienna_cichlid_mode2_restore_hwcontext()
254 amdgpu_ras_resume(tmp_adev); in sienna_cichlid_mode2_restore_hwcontext()
256 amdgpu_irq_gpu_reset_resume_helper(tmp_adev); in sienna_cichlid_mode2_restore_hwcontext()
258 r = amdgpu_ib_ring_tests(tmp_adev); in sienna_cichlid_mode2_restore_hwcontext()
260 dev_err(tmp_adev->dev, in sienna_cichlid_mode2_restore_hwcontext()
Damdgpu_xgmi.c812 struct amdgpu_device *tmp_adev; in amdgpu_xgmi_initialize_hive_get_data_partition() local
815 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_initialize_hive_get_data_partition()
816 ret = psp_xgmi_initialize(&tmp_adev->psp, set_extended_data, false); in amdgpu_xgmi_initialize_hive_get_data_partition()
818 dev_err(tmp_adev->dev, in amdgpu_xgmi_initialize_hive_get_data_partition()
856 struct amdgpu_device *tmp_adev = NULL; in amdgpu_xgmi_add_device() local
912 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_add_device()
914 if (tmp_adev != adev) { in amdgpu_xgmi_add_device()
915 top_info = &tmp_adev->psp.xgmi_context.top_info; in amdgpu_xgmi_add_device()
920 ret = amdgpu_xgmi_update_topology(hive, tmp_adev); in amdgpu_xgmi_add_device()
940 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { in amdgpu_xgmi_add_device()
[all …]
Damdgpu_job.c55 struct amdgpu_device *tmp_adev = NULL; in amdgpu_job_core_dump() local
68 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) in amdgpu_job_core_dump()
69 list_add_tail(&tmp_adev->reset_list, &device_list); in amdgpu_job_core_dump()
79 list_for_each_entry(tmp_adev, device_list_handle, reset_list) in amdgpu_job_core_dump()
80 amdgpu_job_do_core_dump(tmp_adev, job); in amdgpu_job_core_dump()
Damdgpu_ras.c2521 struct amdgpu_device *tmp_adev; in amdgpu_ras_set_fed_all() local
2524 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) in amdgpu_ras_set_fed_all()
2525 amdgpu_ras_set_fed(tmp_adev, status); in amdgpu_ras_set_fed_all()