Lines Matching +full:gpu +full:- +full:id
1 /* SPDX-License-Identifier: GPL-2.0-only */
10 #include <linux/adreno-smmu-priv.h>
47 int (*get_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
49 int (*set_param)(struct msm_gpu *gpu, struct msm_file_private *ctx,
51 int (*hw_init)(struct msm_gpu *gpu);
56 int (*ucode_load)(struct msm_gpu *gpu);
58 int (*pm_suspend)(struct msm_gpu *gpu);
59 int (*pm_resume)(struct msm_gpu *gpu);
60 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
61 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
63 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
64 void (*recover)(struct msm_gpu *gpu);
65 void (*destroy)(struct msm_gpu *gpu);
67 /* show GPU status in debugfs: */
68 void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
71 void (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
74 u64 (*gpu_busy)(struct msm_gpu *gpu, unsigned long *out_sample_rate);
75 struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
77 unsigned long (*gpu_get_freq)(struct msm_gpu *gpu);
79 void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp,
82 (struct msm_gpu *gpu, struct platform_device *pdev);
84 (struct msm_gpu *gpu);
85 uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
88 * progress: Has the GPU made progress?
90 * Return true if GPU position in cmdstream has advanced (or changed)
94 bool (*progress)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
107 * struct msm_gpu_devfreq - devfreq related state
119 * Shadow frequency used while the GPU is idle. From the PoV of
121 * adjust frequency while the GPU is idle, but we use this shadow
122 * value as the GPU is actually clamped to minimum frequency while
150 * Used to delay clamping to idle freq on active->idle transition.
199 * The ctx->seqno value of the last context to submit rendering,
201 * that support per-context pgtables). Tracked by seqno rather
210 * General lock for serializing all the gpu things.
212 * TODO move to per-ring locking where feasible (ie. submit/retire
230 /* does gpu need hw_init? */
234 * global_faults: number of GPU hangs not attributed to a particular
262 /* work for handling GPU ioval faults: */
265 /* work for handling GPU recovery: */
271 /* work for handling active-list retiring: */
292 * switch-over happened early enough in mesa a6xx bringup that we
318 static inline bool msm_gpu_active(struct msm_gpu *gpu) in msm_gpu_active() argument
322 for (i = 0; i < gpu->nr_rings; i++) { in msm_gpu_active()
323 struct msm_ringbuffer *ring = gpu->rb[i]; in msm_gpu_active()
325 if (fence_after(ring->fctx->last_fence, ring->memptrs->fence)) in msm_gpu_active()
332 /* Perf-Counters:
346 * The number of priority levels provided by drm gpu scheduler. The
350 #define NR_SCHED_PRIORITIES (1 + DRM_SCHED_PRIORITY_LOW - DRM_SCHED_PRIORITY_HIGH)
353 * struct msm_file_private - per-drm_file context
358 * used to assign &msm_gpu_submitqueue.id
359 * @aspace: the per-process GPU address-space
376 * pps-producer (perfetto), and restricted to CAP_SYS_ADMIN.
406 * The total (cumulative) elapsed time GPU was busy with rendering
414 * The total (cumulative) GPU cycles elapsed attributed to this
422 * Table of per-priority-level sched entities used by submitqueues
427 * create at most one &drm_sched_entity per-process per-priority-
442 * msm_gpu_convert_priority - Map userspace priority to ring # and sched priority
444 * @gpu: the gpu instance
447 * @sched_prio: [out] the gpu scheduler priority level which the userspace
459 * sched_prio = NR_SCHED_PRIORITIES -
460 * (userspace_prio % NR_SCHED_PRIORITIES) - 1
466 static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio, in msm_gpu_convert_priority() argument
473 /* invert sched priority to map to higher-numeric-is-higher- in msm_gpu_convert_priority()
476 sp = NR_SCHED_PRIORITIES - sp - 1; in msm_gpu_convert_priority()
478 if (rn >= gpu->nr_rings) in msm_gpu_convert_priority()
479 return -EINVAL; in msm_gpu_convert_priority()
488 * struct msm_gpu_submitqueues - Userspace created context.
493 * @id: userspace id for the submitqueue, unique within the drm_file
498 * @faults: the number of GPU hangs associated with this submitqueue
501 * @ctx: the per-drm_file context associated with the submitqueue (ie.
505 * @fence_idr: maps fence-id to dma_fence for userspace visible fence
510 * @entity: the submit job-queue
513 int id; member
565 static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data) in gpu_write() argument
567 writel(data, gpu->mmio + (reg << 2)); in gpu_write()
570 static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg) in gpu_read() argument
572 return readl(gpu->mmio + (reg << 2)); in gpu_read()
575 static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or) in gpu_rmw() argument
577 msm_rmw(gpu->mmio + (reg << 2), mask, or); in gpu_rmw()
580 static inline u64 gpu_read64(struct msm_gpu *gpu, u32 reg) in gpu_read64() argument
586 * not quad word aligned and 2) the GPU hardware designers have a bit in gpu_read64()
588 * spins. The longer a GPU family goes the higher the chance that in gpu_read64()
598 val = (u64) readl(gpu->mmio + (reg << 2)); in gpu_read64()
599 val |= ((u64) readl(gpu->mmio + ((reg + 1) << 2)) << 32); in gpu_read64()
604 static inline void gpu_write64(struct msm_gpu *gpu, u32 reg, u64 val) in gpu_write64() argument
607 writel(lower_32_bits(val), gpu->mmio + (reg << 2)); in gpu_write64()
608 writel(upper_32_bits(val), gpu->mmio + ((reg + 1) << 2)); in gpu_write64()
611 int msm_gpu_pm_suspend(struct msm_gpu *gpu);
612 int msm_gpu_pm_resume(struct msm_gpu *gpu);
614 void msm_gpu_show_fdinfo(struct msm_gpu *gpu, struct msm_file_private *ctx,
619 u32 id);
622 u32 prio, u32 flags, u32 *id);
625 int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
631 struct msm_gpu *gpu, int sysprof);
636 kref_put(&ctx->ref, __msm_file_private_destroy); in msm_file_private_put()
642 kref_get(&ctx->ref); in msm_file_private_get()
646 void msm_devfreq_init(struct msm_gpu *gpu);
647 void msm_devfreq_cleanup(struct msm_gpu *gpu);
648 void msm_devfreq_resume(struct msm_gpu *gpu);
649 void msm_devfreq_suspend(struct msm_gpu *gpu);
650 void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor);
651 void msm_devfreq_active(struct msm_gpu *gpu);
652 void msm_devfreq_idle(struct msm_gpu *gpu);
654 int msm_gpu_hw_init(struct msm_gpu *gpu);
656 void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
657 void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
658 int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
661 void msm_gpu_retire(struct msm_gpu *gpu);
662 void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
665 struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
669 msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
671 void msm_gpu_cleanup(struct msm_gpu *gpu);
680 kref_put(&queue->ref, msm_submitqueue_destroy); in msm_submitqueue_put()
683 static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu) in msm_gpu_crashstate_get() argument
687 mutex_lock(&gpu->lock); in msm_gpu_crashstate_get()
689 if (gpu->crashstate) { in msm_gpu_crashstate_get()
690 kref_get(&gpu->crashstate->ref); in msm_gpu_crashstate_get()
691 state = gpu->crashstate; in msm_gpu_crashstate_get()
694 mutex_unlock(&gpu->lock); in msm_gpu_crashstate_get()
699 static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu) in msm_gpu_crashstate_put() argument
701 mutex_lock(&gpu->lock); in msm_gpu_crashstate_put()
703 if (gpu->crashstate) { in msm_gpu_crashstate_put()
704 if (gpu->funcs->gpu_state_put(gpu->crashstate)) in msm_gpu_crashstate_put()
705 gpu->crashstate = NULL; in msm_gpu_crashstate_put()
708 mutex_unlock(&gpu->lock); in msm_gpu_crashstate_put()
712 * Simple macro to semi-cleanly add the MAP_PRIV flag for targets that can
715 #define check_apriv(gpu, flags) \ argument
716 (((gpu)->hw_apriv ? MSM_BO_MAP_PRIV : 0) | (flags))