Lines Matching full:gpu
25 MODULE_PARM_DESC(address_space_size, "Override for size of processes private GPU address space");
30 static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname, in zap_shader_load_mdt() argument
33 struct device *dev = &gpu->pdev->dev; in zap_shader_load_mdt()
85 ret = request_firmware_direct(&fw, fwname, gpu->dev->dev); in zap_shader_load_mdt()
90 fw = adreno_request_fw(to_adreno_gpu(gpu), fwname); in zap_shader_load_mdt()
140 if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) { in zap_shader_load_mdt()
176 int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid) in adreno_zap_shader_load() argument
178 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_zap_shader_load()
179 struct platform_device *pdev = gpu->pdev; in adreno_zap_shader_load()
191 return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); in adreno_zap_shader_load()
195 adreno_create_address_space(struct msm_gpu *gpu, in adreno_create_address_space() argument
198 return adreno_iommu_create_address_space(gpu, pdev, 0); in adreno_create_address_space()
202 adreno_iommu_create_address_space(struct msm_gpu *gpu, in adreno_iommu_create_address_space() argument
211 mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks); in adreno_iommu_create_address_space()
227 aspace = msm_gem_address_space_create(mmu, "gpu", in adreno_iommu_create_address_space()
236 u64 adreno_private_address_space_size(struct msm_gpu *gpu) in adreno_private_address_space_size() argument
238 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_private_address_space_size()
253 int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, in adreno_fault_handler() argument
258 bool do_devcoredump = info && !READ_ONCE(gpu->crashstate); in adreno_fault_handler()
265 gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu); in adreno_fault_handler()
273 pr_warn_ratelimited("*** gpu fault: iova=%.16lx flags=%d (%u,%u,%u,%u)\n", in adreno_fault_handler()
287 …pr_warn_ratelimited("*** gpu fault: ttbr0=%.16llx iova=%.16lx dir=%s type=%s source=%s (%u,%u,%u,%… in adreno_fault_handler()
295 del_timer(&gpu->hangcheck_timer); in adreno_fault_handler()
297 gpu->fault_info.ttbr0 = info->ttbr0; in adreno_fault_handler()
298 gpu->fault_info.iova = iova; in adreno_fault_handler()
299 gpu->fault_info.flags = flags; in adreno_fault_handler()
300 gpu->fault_info.type = type; in adreno_fault_handler()
301 gpu->fault_info.block = block; in adreno_fault_handler()
303 kthread_queue_work(gpu->worker, &gpu->fault_work); in adreno_fault_handler()
309 int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx, in adreno_get_param() argument
312 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_get_param()
344 pm_runtime_get_sync(&gpu->pdev->dev); in adreno_get_param()
345 ret = adreno_gpu->funcs->get_timestamp(gpu, value); in adreno_get_param()
346 pm_runtime_put_autosuspend(&gpu->pdev->dev); in adreno_get_param()
352 *value = gpu->nr_rings * NR_SCHED_PRIORITIES; in adreno_get_param()
359 *value = gpu->global_faults + ctx->aspace->faults; in adreno_get_param()
361 *value = gpu->global_faults; in adreno_get_param()
364 *value = gpu->suspend_count; in adreno_get_param()
367 if (ctx->aspace == gpu->aspace) in adreno_get_param()
372 if (ctx->aspace == gpu->aspace) in adreno_get_param()
389 DBG("%s: invalid param: %u", gpu->name, param); in adreno_get_param()
394 int adreno_set_param(struct msm_gpu *gpu, struct msm_file_private *ctx, in adreno_set_param() argument
420 mutex_lock(&gpu->lock); in adreno_set_param()
431 mutex_unlock(&gpu->lock); in adreno_set_param()
438 return msm_file_private_set_sysprof(ctx, gpu, value); in adreno_set_param()
440 DBG("%s: invalid param: %u", gpu->name, param); in adreno_set_param()
554 struct drm_gem_object *adreno_fw_create_bo(struct msm_gpu *gpu, in adreno_fw_create_bo() argument
560 ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4, in adreno_fw_create_bo()
561 MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova); in adreno_fw_create_bo()
573 int adreno_hw_init(struct msm_gpu *gpu) in adreno_hw_init() argument
575 VERB("%s", gpu->name); in adreno_hw_init()
577 for (int i = 0; i < gpu->nr_rings; i++) { in adreno_hw_init()
578 struct msm_ringbuffer *ring = gpu->rb[i]; in adreno_hw_init()
588 /* Detect and clean up an impossible fence, ie. if GPU managed in adreno_hw_init()
604 struct msm_gpu *gpu = &adreno_gpu->base; in get_rptr() local
606 return gpu->funcs->get_rptr(gpu, ring); in get_rptr()
609 struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu) in adreno_active_ring() argument
611 return gpu->rb[0]; in adreno_active_ring()
614 void adreno_recover(struct msm_gpu *gpu) in adreno_recover() argument
616 struct drm_device *dev = gpu->dev; in adreno_recover()
622 gpu->funcs->pm_suspend(gpu); in adreno_recover()
623 gpu->funcs->pm_resume(gpu); in adreno_recover()
625 ret = msm_gpu_hw_init(gpu); in adreno_recover()
627 DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret); in adreno_recover()
632 void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, u32 reg) in adreno_flush() argument
649 gpu_write(gpu, reg, wptr); in adreno_flush()
652 bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in adreno_idle() argument
654 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_idle()
661 /* TODO maybe we need to reset GPU here to recover from hang? */ in adreno_idle()
663 gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr); in adreno_idle()
668 int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state) in adreno_gpu_state_get() argument
670 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_gpu_state_get()
673 WARN_ON(!mutex_is_locked(&gpu->lock)); in adreno_gpu_state_get()
679 for (i = 0; i < gpu->nr_rings; i++) { in adreno_gpu_state_get()
682 state->ring[i].fence = gpu->rb[i]->memptrs->fence; in adreno_gpu_state_get()
683 state->ring[i].iova = gpu->rb[i]->iova; in adreno_gpu_state_get()
684 state->ring[i].seqno = gpu->rb[i]->fctx->last_fence; in adreno_gpu_state_get()
685 state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]); in adreno_gpu_state_get()
686 state->ring[i].wptr = get_wptr(gpu->rb[i]); in adreno_gpu_state_get()
693 if (gpu->rb[i]->start[j]) in adreno_gpu_state_get()
697 state->ring[i].data = kvmemdup(gpu->rb[i]->start, size << 2, GFP_KERNEL); in adreno_gpu_state_get()
723 state->registers[pos++] = gpu_read(gpu, addr); in adreno_gpu_state_get()
845 void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state, in adreno_show() argument
848 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_show()
877 for (i = 0; i < gpu->nr_rings; i++) { in adreno_show()
917 /* Dump common gpu status and scratch registers on any hang, to make
919 * safe to read when GPU has hung (unlike some other regs, depending
920 * on how the GPU hung), and they are useful to match up to cmdstream
923 void adreno_dump_info(struct msm_gpu *gpu) in adreno_dump_info() argument
925 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_dump_info()
932 for (i = 0; i < gpu->nr_rings; i++) { in adreno_dump_info()
933 struct msm_ringbuffer *ring = gpu->rb[i]; in adreno_dump_info()
945 void adreno_dump(struct msm_gpu *gpu) in adreno_dump() argument
947 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_dump()
954 printk("IO:region %s 00000000 00020000\n", gpu->name); in adreno_dump()
961 uint32_t val = gpu_read(gpu, addr); in adreno_dump()
969 struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu); in ring_freewords()
980 DRM_DEV_ERROR(ring->gpu->dev->dev, in adreno_wait_ring()
986 struct msm_gpu *gpu) in adreno_get_pwrlevels() argument
988 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_get_pwrlevels()
993 gpu->fast_rate = 0; in adreno_get_pwrlevels()
1019 gpu->fast_rate = freq; in adreno_get_pwrlevels()
1022 DBG("fast_rate=%u, slow_rate=27000000", gpu->fast_rate); in adreno_get_pwrlevels()
1080 struct msm_gpu *gpu = &adreno_gpu->base; in adreno_gpu_init() local
1089 gpu->allow_relocs = config->info->family < ADRENO_6XX_GEN1; in adreno_gpu_init()
1090 gpu->pdev = pdev; in adreno_gpu_init()
1123 ret = adreno_get_pwrlevels(dev, gpu); in adreno_gpu_init()
1137 struct msm_gpu *gpu = &adreno_gpu->base; in adreno_gpu_cleanup() local
1138 struct msm_drm_private *priv = gpu->dev ? gpu->dev->dev_private : NULL; in adreno_gpu_cleanup()