/linux-6.12.1/drivers/gpu/drm/etnaviv/ |
D | etnaviv_gpu.c | 31 { .name = "etnaviv-gpu,2d" }, 39 int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value) in etnaviv_gpu_get_param() argument 41 struct etnaviv_drm_private *priv = gpu->drm->dev_private; in etnaviv_gpu_get_param() 45 *value = gpu->identity.model; in etnaviv_gpu_get_param() 49 *value = gpu->identity.revision; in etnaviv_gpu_get_param() 53 *value = gpu->identity.features; in etnaviv_gpu_get_param() 57 *value = gpu->identity.minor_features0; in etnaviv_gpu_get_param() 61 *value = gpu->identity.minor_features1; in etnaviv_gpu_get_param() 65 *value = gpu->identity.minor_features2; in etnaviv_gpu_get_param() 69 *value = gpu->identity.minor_features3; in etnaviv_gpu_get_param() [all …]
|
D | etnaviv_sched.c | 28 dev_dbg(submit->gpu->dev, "skipping bad job\n"); in etnaviv_sched_run_job() 37 struct etnaviv_gpu *gpu = submit->gpu; in etnaviv_sched_timedout_job() local 42 * If the GPU managed to complete this jobs fence, the timout is in etnaviv_sched_timedout_job() 49 * If the GPU is still making forward progress on the front-end (which in etnaviv_sched_timedout_job() 53 dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS); in etnaviv_sched_timedout_job() 54 change = dma_addr - gpu->hangcheck_dma_addr; in etnaviv_sched_timedout_job() 55 if (gpu->state == ETNA_GPU_STATE_RUNNING && in etnaviv_sched_timedout_job() 56 (gpu->completed_fence != gpu->hangcheck_fence || in etnaviv_sched_timedout_job() 58 gpu->hangcheck_dma_addr = dma_addr; in etnaviv_sched_timedout_job() 59 gpu->hangcheck_fence = gpu->completed_fence; in etnaviv_sched_timedout_job() [all …]
|
D | etnaviv_buffer.c | 90 static void etnaviv_cmd_select_pipe(struct etnaviv_gpu *gpu, in etnaviv_cmd_select_pipe() argument 95 lockdep_assert_held(&gpu->lock); in etnaviv_cmd_select_pipe() 103 if (gpu->exec_state == ETNA_PIPE_2D) in etnaviv_cmd_select_pipe() 105 else if (gpu->exec_state == ETNA_PIPE_3D) in etnaviv_cmd_select_pipe() 116 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, in etnaviv_buffer_dump() argument 122 dev_info(gpu->dev, "virt %p phys 0x%08x free 0x%08x\n", in etnaviv_buffer_dump() 124 &gpu->mmu_context->cmdbuf_mapping) + in etnaviv_buffer_dump() 133 * The GPU may be executing this WAIT while we're modifying it, so we have 134 * to write it in a specific order to avoid the GPU branching to somewhere 152 static u32 etnaviv_buffer_reserve(struct etnaviv_gpu *gpu, in etnaviv_buffer_reserve() argument [all …]
|
D | etnaviv_perfmon.c | 18 u32 (*sample)(struct etnaviv_gpu *gpu, 40 static u32 perf_reg_read(struct etnaviv_gpu *gpu, in perf_reg_read() argument 44 gpu_write(gpu, domain->profile_config, signal->data); in perf_reg_read() 46 return gpu_read(gpu, domain->profile_read); in perf_reg_read() 49 static inline void pipe_select(struct etnaviv_gpu *gpu, u32 clock, unsigned pipe) in pipe_select() argument 54 gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock); in pipe_select() 57 static u32 pipe_perf_reg_read(struct etnaviv_gpu *gpu, in pipe_perf_reg_read() argument 61 u32 clock = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL); in pipe_perf_reg_read() 65 for (i = 0; i < gpu->identity.pixel_pipes; i++) { in pipe_perf_reg_read() 66 pipe_select(gpu, clock, i); in pipe_perf_reg_read() [all …]
|
D | etnaviv_gpu.h | 90 void (*sync_point)(struct etnaviv_gpu *gpu, struct etnaviv_event *event); 167 static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data) in gpu_write() argument 169 writel(data, gpu->mmio + reg); in gpu_write() 172 static inline u32 gpu_read(struct etnaviv_gpu *gpu, u32 reg) in gpu_read() argument 179 readl(gpu->mmio + reg); in gpu_read() 181 return readl(gpu->mmio + reg); in gpu_read() 184 static inline u32 gpu_fix_power_address(struct etnaviv_gpu *gpu, u32 reg) in gpu_fix_power_address() argument 187 if (gpu->identity.model == chipModel_GC300 && in gpu_fix_power_address() 188 gpu->identity.revision < 0x2000) in gpu_fix_power_address() 194 static inline void gpu_write_power(struct etnaviv_gpu *gpu, u32 reg, u32 data) in gpu_write_power() argument [all …]
|
/linux-6.12.1/drivers/gpu/drm/msm/ |
D | msm_gpu.c | 25 static int enable_pwrrail(struct msm_gpu *gpu) in enable_pwrrail() argument 27 struct drm_device *dev = gpu->dev; in enable_pwrrail() 30 if (gpu->gpu_reg) { in enable_pwrrail() 31 ret = regulator_enable(gpu->gpu_reg); in enable_pwrrail() 38 if (gpu->gpu_cx) { in enable_pwrrail() 39 ret = regulator_enable(gpu->gpu_cx); in enable_pwrrail() 49 static int disable_pwrrail(struct msm_gpu *gpu) in disable_pwrrail() argument 51 if (gpu->gpu_cx) in disable_pwrrail() 52 regulator_disable(gpu->gpu_cx); in disable_pwrrail() 53 if (gpu->gpu_reg) in disable_pwrrail() [all …]
|
D | msm_gpu_devfreq.c | 22 struct msm_gpu *gpu = dev_to_gpu(dev); in msm_devfreq_target() local 23 struct msm_gpu_devfreq *df = &gpu->devfreq; in msm_devfreq_target() 37 * If the GPU is idle, devfreq is not aware, so just stash in msm_devfreq_target() 46 if (gpu->funcs->gpu_set_freq) { in msm_devfreq_target() 48 gpu->funcs->gpu_set_freq(gpu, opp, df->suspended); in msm_devfreq_target() 59 static unsigned long get_freq(struct msm_gpu *gpu) in get_freq() argument 61 struct msm_gpu_devfreq *df = &gpu->devfreq; in get_freq() 64 * If the GPU is idle, use the shadow/saved freq to avoid in get_freq() 71 if (gpu->funcs->gpu_get_freq) in get_freq() 72 return gpu->funcs->gpu_get_freq(gpu); in get_freq() [all …]
|
D | msm_gpu.h | 47 int (*get_param)(struct msm_gpu *gpu, struct msm_file_private *ctx, 49 int (*set_param)(struct msm_gpu *gpu, struct msm_file_private *ctx, 51 int (*hw_init)(struct msm_gpu *gpu); 56 int (*ucode_load)(struct msm_gpu *gpu); 58 int (*pm_suspend)(struct msm_gpu *gpu); 59 int (*pm_resume)(struct msm_gpu *gpu); 60 void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit); 61 void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); 63 struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu); 64 void (*recover)(struct msm_gpu *gpu); [all …]
|
/linux-6.12.1/drivers/gpu/drm/msm/adreno/ |
D | a3xx_gpu.c | 28 static void a3xx_dump(struct msm_gpu *gpu); 29 static bool a3xx_idle(struct msm_gpu *gpu); 31 static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a3xx_submit() argument 43 if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) in a3xx_submit() 69 /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ in a3xx_submit() 82 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a3xx_submit() 85 static bool a3xx_me_init(struct msm_gpu *gpu) in a3xx_me_init() argument 87 struct msm_ringbuffer *ring = gpu->rb[0]; in a3xx_me_init() 108 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a3xx_me_init() 109 return a3xx_idle(gpu); in a3xx_me_init() [all …]
|
D | a6xx_gpu.c | 19 static inline bool _a6xx_check_idle(struct msm_gpu *gpu) in _a6xx_check_idle() argument 21 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in _a6xx_check_idle() 29 if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) & in _a6xx_check_idle() 33 return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) & in _a6xx_check_idle() 37 static bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in a6xx_idle() argument 40 if (!adreno_idle(gpu, ring)) in a6xx_idle() 43 if (spin_until(_a6xx_check_idle(gpu))) { in a6xx_idle() 44 DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n", in a6xx_idle() 45 gpu->name, __builtin_return_address(0), in a6xx_idle() 46 gpu_read(gpu, REG_A6XX_RBBM_STATUS), in a6xx_idle() [all …]
|
D | adreno_gpu.h | 36 * so it helps to be able to group the GPU devices by generation and if 72 int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value); 171 * of gpu firmware to linux-firmware, the fw files were 234 * GPU specific offsets will be exported by GPU specific 270 static inline uint8_t adreno_patchid(const struct adreno_gpu *gpu) in adreno_patchid() argument 276 WARN_ON_ONCE(gpu->info->family >= ADRENO_6XX_GEN1); in adreno_patchid() 277 return gpu->chip_id & 0xff; in adreno_patchid() 280 static inline bool adreno_is_revn(const struct adreno_gpu *gpu, uint32_t revn) in adreno_is_revn() argument 282 if (WARN_ON_ONCE(!gpu->info)) in adreno_is_revn() 284 return gpu->info->revn == revn; in adreno_is_revn() [all …]
|
D | a5xx_gpu.c | 17 static void a5xx_dump(struct msm_gpu *gpu); 21 static void update_shadow_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_shadow_rptr() argument 23 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in update_shadow_rptr() 33 void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring, in a5xx_flush() argument 36 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_flush() 46 update_shadow_rptr(gpu, ring); in a5xx_flush() 63 gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); in a5xx_flush() 66 static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a5xx_submit_in_rb() argument 68 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a5xx_submit_in_rb() 80 if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) in a5xx_submit_in_rb() [all …]
|
D | a4xx_gpu.c | 22 static void a4xx_dump(struct msm_gpu *gpu); 23 static bool a4xx_idle(struct msm_gpu *gpu); 25 static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a4xx_submit() argument 37 if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) in a4xx_submit() 63 /* BIT(31) of CACHE_FLUSH_TS triggers CACHE_FLUSH_TS IRQ from GPU */ in a4xx_submit() 69 adreno_flush(gpu, ring, REG_A4XX_CP_RB_WPTR); in a4xx_submit() 76 static void a4xx_enable_hwcg(struct msm_gpu *gpu) in a4xx_enable_hwcg() argument 78 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a4xx_enable_hwcg() 81 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_TP(i), 0x02222202); in a4xx_enable_hwcg() 83 gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2_TP(i), 0x00002222); in a4xx_enable_hwcg() [all …]
|
D | a5xx_power.c | 103 static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq) in _get_mvolts() argument 105 struct drm_device *dev = gpu->dev; in _get_mvolts() 122 static void a530_lm_setup(struct msm_gpu *gpu) in a530_lm_setup() argument 124 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a530_lm_setup() 130 gpu_write(gpu, a5xx_sequence_regs[i].reg, in a530_lm_setup() 133 /* Hard code the A530 GPU thermal sensor ID for the GPMU */ in a530_lm_setup() 134 gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, 0x60007); in a530_lm_setup() 135 gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01); in a530_lm_setup() 136 gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01); in a530_lm_setup() 139 gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0); in a530_lm_setup() [all …]
|
D | adreno_device.c | 16 MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off… 41 /* identify gpu: */ in adreno_info() 62 struct msm_gpu *gpu = NULL; in adreno_load_gpu() local 67 gpu = dev_to_gpu(&pdev->dev); in adreno_load_gpu() 69 if (!gpu) { in adreno_load_gpu() 70 dev_err_once(dev->dev, "no GPU device was found\n"); in adreno_load_gpu() 74 adreno_gpu = to_adreno_gpu(gpu); in adreno_load_gpu() 86 if (gpu->funcs->ucode_load) { in adreno_load_gpu() 87 ret = gpu->funcs->ucode_load(gpu); in adreno_load_gpu() 94 * booting the gpu, go ahead and enable runpm: in adreno_load_gpu() [all …]
|
D | a5xx_preempt.c | 25 static inline void set_preempt_state(struct a5xx_gpu *gpu, in set_preempt_state() argument 34 atomic_set(&gpu->preempt_state, new); in set_preempt_state() 40 static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) in update_wptr() argument 52 gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr); in update_wptr() 56 static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu) in get_next_ring() argument 58 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in get_next_ring() 63 for (i = 0; i < gpu->nr_rings; i++) { in get_next_ring() 65 struct msm_ringbuffer *ring = gpu->rb[i]; in get_next_ring() 68 empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring)); in get_next_ring() 83 struct msm_gpu *gpu = &a5xx_gpu->base.base; in a5xx_preempt_timer() local [all …]
|
D | a2xx_gpu.c | 10 static void a2xx_dump(struct msm_gpu *gpu); 11 static bool a2xx_idle(struct msm_gpu *gpu); 13 static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit) in a2xx_submit() argument 25 if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno) in a2xx_submit() 51 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a2xx_submit() 54 static bool a2xx_me_init(struct msm_gpu *gpu) in a2xx_me_init() argument 56 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a2xx_me_init() 58 struct msm_ringbuffer *ring = gpu->rb[0]; in a2xx_me_init() 104 adreno_flush(gpu, ring, REG_AXXX_CP_RB_WPTR); in a2xx_me_init() 105 return a2xx_idle(gpu); in a2xx_me_init() [all …]
|
D | a5xx_debugfs.c | 14 static void pfp_print(struct msm_gpu *gpu, struct drm_printer *p) in pfp_print() argument 21 gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, i); in pfp_print() 23 gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA)); in pfp_print() 27 static void me_print(struct msm_gpu *gpu, struct drm_printer *p) in me_print() argument 34 gpu_write(gpu, REG_A5XX_CP_ME_STAT_ADDR, i); in me_print() 36 gpu_read(gpu, REG_A5XX_CP_ME_STAT_DATA)); in me_print() 40 static void meq_print(struct msm_gpu *gpu, struct drm_printer *p) in meq_print() argument 45 gpu_write(gpu, REG_A5XX_CP_MEQ_DBG_ADDR, 0); in meq_print() 49 gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA)); in meq_print() 53 static void roq_print(struct msm_gpu *gpu, struct drm_printer *p) in roq_print() argument [all …]
|
D | a6xx_gpu_state.c | 131 static int a6xx_crashdumper_init(struct msm_gpu *gpu, in a6xx_crashdumper_init() argument 134 dumper->ptr = msm_gem_kernel_new(gpu->dev, in a6xx_crashdumper_init() 135 SZ_1M, MSM_BO_WC, gpu->aspace, in a6xx_crashdumper_init() 144 static int a6xx_crashdumper_run(struct msm_gpu *gpu, in a6xx_crashdumper_run() argument 147 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in a6xx_crashdumper_run() 161 gpu_write64(gpu, REG_A6XX_CP_CRASH_SCRIPT_BASE, dumper->iova); in a6xx_crashdumper_run() 163 gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 1); in a6xx_crashdumper_run() 165 ret = gpu_poll_timeout(gpu, REG_A6XX_CP_CRASH_DUMP_STATUS, val, in a6xx_crashdumper_run() 168 gpu_write(gpu, REG_A6XX_CP_CRASH_DUMP_CNTL, 0); in a6xx_crashdumper_run() 174 static int debugbus_read(struct msm_gpu *gpu, u32 block, u32 offset, in debugbus_read() argument [all …]
|
D | adreno_gpu.c | 25 MODULE_PARM_DESC(address_space_size, "Override for size of processes private GPU address space"); 30 static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname, in zap_shader_load_mdt() argument 33 struct device *dev = &gpu->pdev->dev; in zap_shader_load_mdt() 85 ret = request_firmware_direct(&fw, fwname, gpu->dev->dev); in zap_shader_load_mdt() 90 fw = adreno_request_fw(to_adreno_gpu(gpu), fwname); in zap_shader_load_mdt() 140 if (signed_fwname || (to_adreno_gpu(gpu)->fwloc == FW_LOCATION_LEGACY)) { in zap_shader_load_mdt() 176 int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid) in adreno_zap_shader_load() argument 178 struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); in adreno_zap_shader_load() 179 struct platform_device *pdev = gpu->pdev; in adreno_zap_shader_load() 191 return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); in adreno_zap_shader_load() [all …]
|
/linux-6.12.1/Documentation/gpu/ |
D | i915.rst | 19 .. kernel-doc:: drivers/gpu/drm/i915/intel_runtime_pm.c 22 .. kernel-doc:: drivers/gpu/drm/i915/intel_runtime_pm.c 25 .. kernel-doc:: drivers/gpu/drm/i915/intel_uncore.c 31 .. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c 34 .. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c 37 .. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c 40 .. kernel-doc:: drivers/gpu/drm/i915/i915_irq.c 46 .. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c 49 .. kernel-doc:: drivers/gpu/drm/i915/i915_vgpu.c 55 .. kernel-doc:: drivers/gpu/drm/i915/intel_gvt.c [all …]
|
D | drm-kms-helpers.rst | 53 .. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c 59 .. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c 68 .. kernel-doc:: drivers/gpu/drm/drm_atomic_helper.c 74 .. kernel-doc:: drivers/gpu/drm/drm_atomic_state_helper.c 80 .. kernel-doc:: drivers/gpu/drm/drm_atomic_state_helper.c 86 .. kernel-doc:: drivers/gpu/drm/drm_gem_atomic_helper.c 92 .. kernel-doc:: drivers/gpu/drm/drm_gem_atomic_helper.c 98 .. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c 104 .. kernel-doc:: drivers/gpu/drm/drm_simple_kms_helper.c 110 .. kernel-doc:: drivers/gpu/drm/drm_fb_helper.c [all …]
|
/linux-6.12.1/drivers/gpu/drm/ |
D | Kconfig | 266 source "drivers/gpu/drm/display/Kconfig" 272 GPU memory management subsystem for devices with multiple 273 GPU memory types. Will be enabled automatically if a device driver 286 Enables unit tests for TTM, a GPU memory manager subsystem used 304 GPU-VM representation providing helpers to manage a GPUs virtual 347 source "drivers/gpu/drm/i2c/Kconfig" 349 source "drivers/gpu/drm/arm/Kconfig" 351 source "drivers/gpu/drm/radeon/Kconfig" 353 source "drivers/gpu/drm/amd/amdgpu/Kconfig" 355 source "drivers/gpu/drm/nouveau/Kconfig" [all …]
|
/linux-6.12.1/drivers/gpu/drm/panthor/ |
D | panthor_gpu.c | 24 * struct panthor_gpu - GPU block management data. 27 /** @irq: GPU irq. */ 33 /** @pending_reqs: Pending GPU requests. */ 36 /** @reqs_acked: GPU request wait queue. */ 41 * struct panthor_model - GPU model description 55 * GPU_MODEL() - Define a GPU model. A GPU product can be uniquely identified 58 * @_name: Name for the GPU model. 152 drm_warn(&ptdev->base, "GPU Fault 0x%08x (%s) at 0x%016llx\n", in panthor_gpu_irq_handler() 157 drm_warn(&ptdev->base, "GPU Fault in protected mode\n"); in panthor_gpu_irq_handler() 159 spin_lock(&ptdev->gpu->reqs_lock); in panthor_gpu_irq_handler() [all …]
|
/linux-6.12.1/Documentation/devicetree/bindings/gpu/ |
D | img,powervr-sgx.yaml | 6 $id: http://devicetree.org/schemas/gpu/img,powervr-sgx.yaml# 19 - ti,omap3430-gpu # Rev 121 20 - ti,omap3630-gpu # Rev 125 24 - ingenic,jz4780-gpu # Rev 130 25 - ti,omap4430-gpu # Rev 120 29 - allwinner,sun6i-a31-gpu # MP2 Rev 115 30 - ti,omap4470-gpu # MP1 Rev 112 31 - ti,omap5432-gpu # MP2 Rev 105 32 - ti,am5728-gpu # MP2 Rev 116 33 - ti,am6548-gpu # MP1 Rev 117 [all …]
|