Lines Matching +full:adreno +full:- +full:gmu +full:- +full:wrapper

1 // SPDX-License-Identifier: GPL-2.0-only
17 #include <linux/nvmem-consumer.h>
33 struct device *dev = &gpu->pdev->dev; in zap_shader_load_mdt()
45 return -EINVAL; in zap_shader_load_mdt()
48 np = of_get_child_by_name(dev->of_node, "zap-shader"); in zap_shader_load_mdt()
51 return -ENODEV; in zap_shader_load_mdt()
54 mem_np = of_parse_phandle(np, "memory-region", 0); in zap_shader_load_mdt()
58 return -EINVAL; in zap_shader_load_mdt()
69 * Check for a firmware-name property. This is the new scheme in zap_shader_load_mdt()
74 * If the firmware-name property is found, we bypass the in zap_shader_load_mdt()
78 * If the firmware-name property is not found, for backwards in zap_shader_load_mdt()
82 of_property_read_string_index(np, "firmware-name", 0, &signed_fwname); in zap_shader_load_mdt()
85 ret = request_firmware_direct(&fw, fwname, gpu->dev->dev); in zap_shader_load_mdt()
93 * For new targets, we require the firmware-name property, in zap_shader_load_mdt()
94 * if a zap-shader is required, rather than falling back in zap_shader_load_mdt()
102 return -ENOENT; in zap_shader_load_mdt()
120 ret = -E2BIG; in zap_shader_load_mdt()
127 ret = -ENOMEM; in zap_shader_load_mdt()
135 * with upstream linux-firmware it would be in a qcom/ subdir.. in zap_shader_load_mdt()
140 if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) { in zap_shader_load_mdt()
159 * If the scm call returns -EOPNOTSUPP we assume that this target in zap_shader_load_mdt()
162 if (ret == -EOPNOTSUPP) in zap_shader_load_mdt()
179 struct platform_device *pdev = gpu->pdev; in adreno_zap_shader_load()
183 return -ENODEV; in adreno_zap_shader_load()
187 DRM_DEV_ERROR(&pdev->dev, "SCM is not available\n"); in adreno_zap_shader_load()
188 return -EPROBE_DEFER; in adreno_zap_shader_load()
191 return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); in adreno_zap_shader_load()
211 mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks); in adreno_iommu_create_address_space()
224 start = max_t(u64, SZ_16M, geometry->aperture_start); in adreno_iommu_create_address_space()
225 size = geometry->aperture_end - start + 1; in adreno_iommu_create_address_space()
231 mmu->funcs->destroy(mmu); in adreno_iommu_create_address_space()
243 if (adreno_gpu->info->address_space_size) in adreno_private_address_space_size()
244 return adreno_gpu->info->address_space_size; in adreno_private_address_space_size()
258 bool do_devcoredump = info && !READ_ONCE(gpu->crashstate); in adreno_fault_handler()
265 gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu); in adreno_fault_handler()
270 * adreno-smmu-priv in adreno_fault_handler()
280 if (info->fsr & ARM_SMMU_FSR_TF) in adreno_fault_handler()
282 else if (info->fsr & ARM_SMMU_FSR_PF) in adreno_fault_handler()
284 else if (info->fsr & ARM_SMMU_FSR_EF) in adreno_fault_handler()
288 info->ttbr0, iova, in adreno_fault_handler()
295 del_timer(&gpu->hangcheck_timer); in adreno_fault_handler()
297 gpu->fault_info.ttbr0 = info->ttbr0; in adreno_fault_handler()
298 gpu->fault_info.iova = iova; in adreno_fault_handler()
299 gpu->fault_info.flags = flags; in adreno_fault_handler()
300 gpu->fault_info.type = type; in adreno_fault_handler()
301 gpu->fault_info.block = block; in adreno_fault_handler()
303 kthread_queue_work(gpu->worker, &gpu->fault_work); in adreno_fault_handler()
316 return -EINVAL; in adreno_get_param()
320 *value = adreno_gpu->info->revn; in adreno_get_param()
323 *value = adreno_gpu->info->gmem; in adreno_get_param()
333 *value = adreno_gpu->chip_id; in adreno_get_param()
334 if (!adreno_gpu->info->revn) in adreno_get_param()
335 *value |= ((uint64_t) adreno_gpu->speedbin) << 32; in adreno_get_param()
338 *value = adreno_gpu->base.fast_rate; in adreno_get_param()
341 if (adreno_gpu->funcs->get_timestamp) { in adreno_get_param()
344 pm_runtime_get_sync(&gpu->pdev->dev); in adreno_get_param()
345 ret = adreno_gpu->funcs->get_timestamp(gpu, value); in adreno_get_param()
346 pm_runtime_put_autosuspend(&gpu->pdev->dev); in adreno_get_param()
350 return -EINVAL; in adreno_get_param()
352 *value = gpu->nr_rings * NR_SCHED_PRIORITIES; in adreno_get_param()
358 if (ctx->aspace) in adreno_get_param()
359 *value = gpu->global_faults + ctx->aspace->faults; in adreno_get_param()
361 *value = gpu->global_faults; in adreno_get_param()
364 *value = gpu->suspend_count; in adreno_get_param()
367 if (ctx->aspace == gpu->aspace) in adreno_get_param()
368 return -EINVAL; in adreno_get_param()
369 *value = ctx->aspace->va_start; in adreno_get_param()
372 if (ctx->aspace == gpu->aspace) in adreno_get_param()
373 return -EINVAL; in adreno_get_param()
374 *value = ctx->aspace->va_size; in adreno_get_param()
377 *value = adreno_gpu->ubwc_config.highest_bank_bit; in adreno_get_param()
380 *value = adreno_gpu->has_ray_tracing; in adreno_get_param()
383 *value = adreno_gpu->ubwc_config.ubwc_swizzle; in adreno_get_param()
386 *value = adreno_gpu->ubwc_config.macrotile_mode; in adreno_get_param()
389 DBG("%s: invalid param: %u", gpu->name, param); in adreno_get_param()
390 return -EINVAL; in adreno_get_param()
404 return -EINVAL; in adreno_set_param()
408 return -EINVAL; in adreno_set_param()
420 mutex_lock(&gpu->lock); in adreno_set_param()
423 paramp = &ctx->comm; in adreno_set_param()
425 paramp = &ctx->cmdline; in adreno_set_param()
431 mutex_unlock(&gpu->lock); in adreno_set_param()
437 return -EPERM; in adreno_set_param()
440 DBG("%s: invalid param: %u", gpu->name, param); in adreno_set_param()
441 return -EINVAL; in adreno_set_param()
448 struct drm_device *drm = adreno_gpu->base.dev; in adreno_request_fw()
455 return ERR_PTR(-ENOMEM); in adreno_request_fw()
461 if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) || in adreno_request_fw()
462 (adreno_gpu->fwloc == FW_LOCATION_NEW)) { in adreno_request_fw()
464 ret = request_firmware_direct(&fw, newname, drm->dev); in adreno_request_fw()
466 DRM_DEV_INFO(drm->dev, "loaded %s from new location\n", in adreno_request_fw()
468 adreno_gpu->fwloc = FW_LOCATION_NEW; in adreno_request_fw()
470 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { in adreno_request_fw()
471 DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n", in adreno_request_fw()
481 if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) || in adreno_request_fw()
482 (adreno_gpu->fwloc == FW_LOCATION_LEGACY)) { in adreno_request_fw()
484 ret = request_firmware_direct(&fw, fwname, drm->dev); in adreno_request_fw()
486 DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n", in adreno_request_fw()
488 adreno_gpu->fwloc = FW_LOCATION_LEGACY; in adreno_request_fw()
490 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { in adreno_request_fw()
491 DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n", in adreno_request_fw()
502 if ((adreno_gpu->fwloc == FW_LOCATION_UNKNOWN) || in adreno_request_fw()
503 (adreno_gpu->fwloc == FW_LOCATION_HELPER)) { in adreno_request_fw()
505 ret = request_firmware(&fw, newname, drm->dev); in adreno_request_fw()
507 DRM_DEV_INFO(drm->dev, "loaded %s with helper\n", in adreno_request_fw()
509 adreno_gpu->fwloc = FW_LOCATION_HELPER; in adreno_request_fw()
511 } else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) { in adreno_request_fw()
512 DRM_DEV_ERROR(drm->dev, "failed to load %s: %d\n", in adreno_request_fw()
519 DRM_DEV_ERROR(drm->dev, "failed to load %s\n", fwname); in adreno_request_fw()
520 fw = ERR_PTR(-ENOENT); in adreno_request_fw()
530 for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) { in adreno_load_fw()
533 if (!adreno_gpu->info->fw[i]) in adreno_load_fw()
536 /* Skip loading GMU firwmare with GMU Wrapper */ in adreno_load_fw()
541 if (adreno_gpu->fw[i]) in adreno_load_fw()
544 fw = adreno_request_fw(adreno_gpu, adreno_gpu->info->fw[i]); in adreno_load_fw()
548 adreno_gpu->fw[i] = fw; in adreno_load_fw()
560 ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4, in adreno_fw_create_bo()
561 MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); in adreno_fw_create_bo()
566 memcpy(ptr, &fw->data[4], fw->size - 4); in adreno_fw_create_bo()
575 VERB("%s", gpu->name); in adreno_hw_init()
577 for (int i = 0; i < gpu->nr_rings; i++) { in adreno_hw_init()
578 struct msm_ringbuffer *ring = gpu->rb[i]; in adreno_hw_init()
583 ring->cur = ring->start; in adreno_hw_init()
584 ring->next = ring->start; in adreno_hw_init()
585 ring->memptrs->rptr = 0; in adreno_hw_init()
586 ring->memptrs->bv_fence = ring->fctx->completed_fence; in adreno_hw_init()
592 if (fence_before(ring->fctx->last_fence, ring->memptrs->fence)) { in adreno_hw_init()
593 ring->memptrs->fence = ring->fctx->last_fence; in adreno_hw_init()
604 struct msm_gpu *gpu = &adreno_gpu->base; in get_rptr()
606 return gpu->funcs->get_rptr(gpu, ring); in get_rptr()
611 return gpu->rb[0]; in adreno_active_ring()
616 struct drm_device *dev = gpu->dev; in adreno_recover()
619 // XXX pm-runtime?? we *need* the device to be off after this in adreno_recover()
620 // so maybe continuing to call ->pm_suspend/resume() is better? in adreno_recover()
622 gpu->funcs->pm_suspend(gpu); in adreno_recover()
623 gpu->funcs->pm_resume(gpu); in adreno_recover()
627 DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret); in adreno_recover()
637 ring->cur = ring->next; in adreno_flush()
642 * the ringbuffer and rb->next hasn't wrapped to zero yet in adreno_flush()
663 gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr); in adreno_idle()
673 WARN_ON(!mutex_is_locked(&gpu->lock)); in adreno_gpu_state_get()
675 kref_init(&state->ref); in adreno_gpu_state_get()
677 ktime_get_real_ts64(&state->time); in adreno_gpu_state_get()
679 for (i = 0; i < gpu->nr_rings; i++) { in adreno_gpu_state_get()
682 state->ring[i].fence = gpu->rb[i]->memptrs->fence; in adreno_gpu_state_get()
683 state->ring[i].iova = gpu->rb[i]->iova; in adreno_gpu_state_get()
684 state->ring[i].seqno = gpu->rb[i]->fctx->last_fence; in adreno_gpu_state_get()
685 state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]); in adreno_gpu_state_get()
686 state->ring[i].wptr = get_wptr(gpu->rb[i]); in adreno_gpu_state_get()
689 size = state->ring[i].wptr; in adreno_gpu_state_get()
692 for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++) in adreno_gpu_state_get()
693 if (gpu->rb[i]->start[j]) in adreno_gpu_state_get()
697 state->ring[i].data = kvmemdup(gpu->rb[i]->start, size << 2, GFP_KERNEL); in adreno_gpu_state_get()
698 if (state->ring[i].data) in adreno_gpu_state_get()
699 state->ring[i].data_size = size << 2; in adreno_gpu_state_get()
704 if (!adreno_gpu->registers) in adreno_gpu_state_get()
708 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) in adreno_gpu_state_get()
709 count += adreno_gpu->registers[i + 1] - in adreno_gpu_state_get()
710 adreno_gpu->registers[i] + 1; in adreno_gpu_state_get()
712 state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL); in adreno_gpu_state_get()
713 if (state->registers) { in adreno_gpu_state_get()
716 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { in adreno_gpu_state_get()
717 u32 start = adreno_gpu->registers[i]; in adreno_gpu_state_get()
718 u32 end = adreno_gpu->registers[i + 1]; in adreno_gpu_state_get()
722 state->registers[pos++] = addr; in adreno_gpu_state_get()
723 state->registers[pos++] = gpu_read(gpu, addr); in adreno_gpu_state_get()
727 state->nr_registers = count; in adreno_gpu_state_get()
737 for (i = 0; i < ARRAY_SIZE(state->ring); i++) in adreno_gpu_state_destroy()
738 kvfree(state->ring[i].data); in adreno_gpu_state_destroy()
740 for (i = 0; state->bos && i < state->nr_bos; i++) in adreno_gpu_state_destroy()
741 kvfree(state->bos[i].data); in adreno_gpu_state_destroy()
743 kfree(state->bos); in adreno_gpu_state_destroy()
744 kfree(state->comm); in adreno_gpu_state_destroy()
745 kfree(state->cmd); in adreno_gpu_state_destroy()
746 kfree(state->registers); in adreno_gpu_state_destroy()
763 return kref_put(&state->ref, adreno_gpu_state_kref_destroy); in adreno_gpu_state_put()
792 buf_itr += scnprintf(buf + buf_itr, buffer_size - buf_itr, "%s", in adreno_gpu_ascii85_encode()
815 * Only dump the non-zero part of the buffer - rarely will in adreno_show_object()
855 adreno_gpu->info->revn, in adreno_show()
856 ADRENO_CHIPID_ARGS(adreno_gpu->chip_id)); in adreno_show()
862 if (state->fault_info.ttbr0) { in adreno_show()
863 const struct msm_gpu_fault_info *info = &state->fault_info; in adreno_show()
865 drm_puts(p, "fault-info:\n"); in adreno_show()
866 drm_printf(p, " - ttbr0=%.16llx\n", info->ttbr0); in adreno_show()
867 drm_printf(p, " - iova=%.16lx\n", info->iova); in adreno_show()
868 drm_printf(p, " - dir=%s\n", info->flags & IOMMU_FAULT_WRITE ? "WRITE" : "READ"); in adreno_show()
869 drm_printf(p, " - type=%s\n", info->type); in adreno_show()
870 drm_printf(p, " - source=%s\n", info->block); in adreno_show()
873 drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status); in adreno_show()
877 for (i = 0; i < gpu->nr_rings; i++) { in adreno_show()
878 drm_printf(p, " - id: %d\n", i); in adreno_show()
879 drm_printf(p, " iova: 0x%016llx\n", state->ring[i].iova); in adreno_show()
880 drm_printf(p, " last-fence: %u\n", state->ring[i].seqno); in adreno_show()
881 drm_printf(p, " retired-fence: %u\n", state->ring[i].fence); in adreno_show()
882 drm_printf(p, " rptr: %u\n", state->ring[i].rptr); in adreno_show()
883 drm_printf(p, " wptr: %u\n", state->ring[i].wptr); in adreno_show()
886 adreno_show_object(p, &state->ring[i].data, in adreno_show()
887 state->ring[i].data_size, &state->ring[i].encoded); in adreno_show()
890 if (state->bos) { in adreno_show()
893 for (i = 0; i < state->nr_bos; i++) { in adreno_show()
894 drm_printf(p, " - iova: 0x%016llx\n", in adreno_show()
895 state->bos[i].iova); in adreno_show()
896 drm_printf(p, " size: %zd\n", state->bos[i].size); in adreno_show()
897 drm_printf(p, " flags: 0x%x\n", state->bos[i].flags); in adreno_show()
898 drm_printf(p, " name: %-32s\n", state->bos[i].name); in adreno_show()
900 adreno_show_object(p, &state->bos[i].data, in adreno_show()
901 state->bos[i].size, &state->bos[i].encoded); in adreno_show()
905 if (state->nr_registers) { in adreno_show()
908 for (i = 0; i < state->nr_registers; i++) { in adreno_show()
909 drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n", in adreno_show()
910 state->registers[i * 2] << 2, in adreno_show()
911 state->registers[(i * 2) + 1]); in adreno_show()
929 adreno_gpu->info->revn, in adreno_dump_info()
930 ADRENO_CHIPID_ARGS(adreno_gpu->chip_id)); in adreno_dump_info()
932 for (i = 0; i < gpu->nr_rings; i++) { in adreno_dump_info()
933 struct msm_ringbuffer *ring = gpu->rb[i]; in adreno_dump_info()
936 ring->memptrs->fence, in adreno_dump_info()
937 ring->fctx->last_fence); in adreno_dump_info()
950 if (!adreno_gpu->registers) in adreno_dump()
954 printk("IO:region %s 00000000 00020000\n", gpu->name); in adreno_dump()
955 for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) { in adreno_dump()
956 uint32_t start = adreno_gpu->registers[i]; in adreno_dump()
957 uint32_t end = adreno_gpu->registers[i+1]; in adreno_dump()
969 struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu); in ring_freewords()
971 /* Use ring->next to calculate free size */ in ring_freewords()
972 uint32_t wptr = ring->next - ring->start; in ring_freewords()
974 return (rptr + (size - 1) - wptr) % size; in ring_freewords()
980 DRM_DEV_ERROR(ring->gpu->dev->dev, in adreno_wait_ring()
982 ring->id); in adreno_wait_ring()
993 gpu->fast_rate = 0; in adreno_get_pwrlevels()
997 if (ret == -ENODEV) { in adreno_get_pwrlevels()
1007 return -ENODEV; in adreno_get_pwrlevels()
1019 gpu->fast_rate = freq; in adreno_get_pwrlevels()
1022 DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate); in adreno_get_pwrlevels()
1035 if (PTR_ERR(ocmem) == -ENODEV) { in adreno_gpu_ocmem_init()
1047 ocmem_hdl = ocmem_allocate(ocmem, OCMEM_GRAPHICS, adreno_gpu->info->gmem); in adreno_gpu_ocmem_init()
1051 adreno_ocmem->ocmem = ocmem; in adreno_gpu_ocmem_init()
1052 adreno_ocmem->base = ocmem_hdl->addr; in adreno_gpu_ocmem_init()
1053 adreno_ocmem->hdl = ocmem_hdl; in adreno_gpu_ocmem_init()
1055 if (WARN_ON(ocmem_hdl->len != adreno_gpu->info->gmem)) in adreno_gpu_ocmem_init()
1056 return -ENOMEM; in adreno_gpu_ocmem_init()
1063 if (adreno_ocmem && adreno_ocmem->base) in adreno_gpu_ocmem_cleanup()
1064 ocmem_free(adreno_ocmem->ocmem, OCMEM_GRAPHICS, in adreno_gpu_ocmem_cleanup()
1065 adreno_ocmem->hdl); in adreno_gpu_ocmem_cleanup()
1077 struct device *dev = &pdev->dev; in adreno_gpu_init()
1078 struct adreno_platform_config *config = dev->platform_data; in adreno_gpu_init()
1080 struct msm_gpu *gpu = &adreno_gpu->base; in adreno_gpu_init()
1085 adreno_gpu->funcs = funcs; in adreno_gpu_init()
1086 adreno_gpu->info = config->info; in adreno_gpu_init()
1087 adreno_gpu->chip_id = config->chip_id; in adreno_gpu_init()
1089 gpu->allow_relocs = config->info->family < ADRENO_6XX_GEN1; in adreno_gpu_init()
1090 gpu->pdev = pdev; in adreno_gpu_init()
1092 /* Only handle the core clock when GMU is not in use (or is absent). */ in adreno_gpu_init()
1094 adreno_gpu->info->family < ADRENO_6XX_GEN1) { in adreno_gpu_init()
1112 adreno_gpu->speedbin = (uint16_t) (0xffff & speedbin); in adreno_gpu_init()
1115 ADRENO_CHIPID_ARGS(config->chip_id)); in adreno_gpu_init()
1117 return -ENOMEM; in adreno_gpu_init()
1128 adreno_gpu->info->inactive_period); in adreno_gpu_init()
1131 return msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base, in adreno_gpu_init()
1137 struct msm_gpu *gpu = &adreno_gpu->base; in adreno_gpu_cleanup()
1138 struct msm_drm_private *priv = gpu->dev ? gpu->dev->dev_private : NULL; in adreno_gpu_cleanup()
1141 for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) in adreno_gpu_cleanup()
1142 release_firmware(adreno_gpu->fw[i]); in adreno_gpu_cleanup()
1144 if (priv && pm_runtime_enabled(&priv->gpu_pdev->dev)) in adreno_gpu_cleanup()
1145 pm_runtime_disable(&priv->gpu_pdev->dev); in adreno_gpu_cleanup()
1147 msm_gpu_cleanup(&adreno_gpu->base); in adreno_gpu_cleanup()