1 // SPDX-License-Identifier: GPL-2.0+
2 /* Copyright (C) 2014-2018 Broadcom */
3
4 #include <linux/device.h>
5 #include <linux/dma-mapping.h>
6 #include <linux/io.h>
7 #include <linux/module.h>
8 #include <linux/platform_device.h>
9 #include <linux/reset.h>
10 #include <linux/sched/signal.h>
11 #include <linux/uaccess.h>
12
13 #include <drm/drm_managed.h>
14
15 #include "v3d_drv.h"
16 #include "v3d_regs.h"
17 #include "v3d_trace.h"
18
19 static void
v3d_init_core(struct v3d_dev * v3d,int core)20 v3d_init_core(struct v3d_dev *v3d, int core)
21 {
22 /* Set OVRTMUOUT, which means that the texture sampler uniform
23 * configuration's tmu output type field is used, instead of
24 * using the hardware default behavior based on the texture
25 * type. If you want the default behavior, you can still put
26 * "2" in the indirect texture state's output_type field.
27 */
28 if (v3d->ver < 40)
29 V3D_CORE_WRITE(core, V3D_CTL_MISCCFG, V3D_MISCCFG_OVRTMUOUT);
30
31 /* Whenever we flush the L2T cache, we always want to flush
32 * the whole thing.
33 */
34 V3D_CORE_WRITE(core, V3D_CTL_L2TFLSTA, 0);
35 V3D_CORE_WRITE(core, V3D_CTL_L2TFLEND, ~0);
36 }
37
38 /* Sets invariant state for the HW. */
39 static void
v3d_init_hw_state(struct v3d_dev * v3d)40 v3d_init_hw_state(struct v3d_dev *v3d)
41 {
42 v3d_init_core(v3d, 0);
43 }
44
45 static void
v3d_idle_axi(struct v3d_dev * v3d,int core)46 v3d_idle_axi(struct v3d_dev *v3d, int core)
47 {
48 V3D_CORE_WRITE(core, V3D_GMP_CFG(v3d->ver), V3D_GMP_CFG_STOP_REQ);
49
50 if (wait_for((V3D_CORE_READ(core, V3D_GMP_STATUS(v3d->ver)) &
51 (V3D_GMP_STATUS_RD_COUNT_MASK |
52 V3D_GMP_STATUS_WR_COUNT_MASK |
53 V3D_GMP_STATUS_CFG_BUSY)) == 0, 100)) {
54 DRM_ERROR("Failed to wait for safe GMP shutdown\n");
55 }
56 }
57
58 static void
v3d_idle_gca(struct v3d_dev * v3d)59 v3d_idle_gca(struct v3d_dev *v3d)
60 {
61 if (v3d->ver >= 41)
62 return;
63
64 V3D_GCA_WRITE(V3D_GCA_SAFE_SHUTDOWN, V3D_GCA_SAFE_SHUTDOWN_EN);
65
66 if (wait_for((V3D_GCA_READ(V3D_GCA_SAFE_SHUTDOWN_ACK) &
67 V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED) ==
68 V3D_GCA_SAFE_SHUTDOWN_ACK_ACKED, 100)) {
69 DRM_ERROR("Failed to wait for safe GCA shutdown\n");
70 }
71 }
72
73 static void
v3d_reset_by_bridge(struct v3d_dev * v3d)74 v3d_reset_by_bridge(struct v3d_dev *v3d)
75 {
76 int version = V3D_BRIDGE_READ(V3D_TOP_GR_BRIDGE_REVISION);
77
78 if (V3D_GET_FIELD(version, V3D_TOP_GR_BRIDGE_MAJOR) == 2) {
79 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0,
80 V3D_TOP_GR_BRIDGE_SW_INIT_0_V3D_CLK_108_SW_INIT);
81 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_0, 0);
82
83 /* GFXH-1383: The SW_INIT may cause a stray write to address 0
84 * of the unit, so reset it to its power-on value here.
85 */
86 V3D_WRITE(V3D_HUB_AXICFG, V3D_HUB_AXICFG_MAX_LEN_MASK);
87 } else {
88 WARN_ON_ONCE(V3D_GET_FIELD(version,
89 V3D_TOP_GR_BRIDGE_MAJOR) != 7);
90 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1,
91 V3D_TOP_GR_BRIDGE_SW_INIT_1_V3D_CLK_108_SW_INIT);
92 V3D_BRIDGE_WRITE(V3D_TOP_GR_BRIDGE_SW_INIT_1, 0);
93 }
94 }
95
96 static void
v3d_reset_v3d(struct v3d_dev * v3d)97 v3d_reset_v3d(struct v3d_dev *v3d)
98 {
99 if (v3d->reset)
100 reset_control_reset(v3d->reset);
101 else
102 v3d_reset_by_bridge(v3d);
103
104 v3d_init_hw_state(v3d);
105 }
106
107 void
v3d_reset(struct v3d_dev * v3d)108 v3d_reset(struct v3d_dev *v3d)
109 {
110 struct drm_device *dev = &v3d->drm;
111
112 DRM_DEV_ERROR(dev->dev, "Resetting GPU for hang.\n");
113 DRM_DEV_ERROR(dev->dev, "V3D_ERR_STAT: 0x%08x\n",
114 V3D_CORE_READ(0, V3D_ERR_STAT));
115 trace_v3d_reset_begin(dev);
116
117 /* XXX: only needed for safe powerdown, not reset. */
118 if (false)
119 v3d_idle_axi(v3d, 0);
120
121 v3d_idle_gca(v3d);
122 v3d_reset_v3d(v3d);
123
124 v3d_mmu_set_page_table(v3d);
125 v3d_irq_reset(v3d);
126
127 v3d_perfmon_stop(v3d, v3d->active_perfmon, false);
128
129 trace_v3d_reset_end(dev);
130 }
131
132 static void
v3d_flush_l3(struct v3d_dev * v3d)133 v3d_flush_l3(struct v3d_dev *v3d)
134 {
135 if (v3d->ver < 41) {
136 u32 gca_ctrl = V3D_GCA_READ(V3D_GCA_CACHE_CTRL);
137
138 V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
139 gca_ctrl | V3D_GCA_CACHE_CTRL_FLUSH);
140
141 if (v3d->ver < 33) {
142 V3D_GCA_WRITE(V3D_GCA_CACHE_CTRL,
143 gca_ctrl & ~V3D_GCA_CACHE_CTRL_FLUSH);
144 }
145 }
146 }
147
148 /* Invalidates the (read-only) L2C cache. This was the L2 cache for
149 * uniforms and instructions on V3D 3.2.
150 */
151 static void
v3d_invalidate_l2c(struct v3d_dev * v3d,int core)152 v3d_invalidate_l2c(struct v3d_dev *v3d, int core)
153 {
154 if (v3d->ver > 32)
155 return;
156
157 V3D_CORE_WRITE(core, V3D_CTL_L2CACTL,
158 V3D_L2CACTL_L2CCLR |
159 V3D_L2CACTL_L2CENA);
160 }
161
162 /* Invalidates texture L2 cachelines */
163 static void
v3d_flush_l2t(struct v3d_dev * v3d,int core)164 v3d_flush_l2t(struct v3d_dev *v3d, int core)
165 {
166 /* While there is a busy bit (V3D_L2TCACTL_L2TFLS), we don't
167 * need to wait for completion before dispatching the job --
168 * L2T accesses will be stalled until the flush has completed.
169 * However, we do need to make sure we don't try to trigger a
170 * new flush while the L2_CLEAN queue is trying to
171 * synchronously clean after a job.
172 */
173 mutex_lock(&v3d->cache_clean_lock);
174 V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
175 V3D_L2TCACTL_L2TFLS |
176 V3D_SET_FIELD(V3D_L2TCACTL_FLM_FLUSH, V3D_L2TCACTL_FLM));
177 mutex_unlock(&v3d->cache_clean_lock);
178 }
179
180 /* Cleans texture L1 and L2 cachelines (writing back dirty data).
181 *
182 * For cleaning, which happens from the CACHE_CLEAN queue after CSD has
183 * executed, we need to make sure that the clean is done before
184 * signaling job completion. So, we synchronously wait before
185 * returning, and we make sure that L2 invalidates don't happen in the
186 * meantime to confuse our are-we-done checks.
187 */
188 void
v3d_clean_caches(struct v3d_dev * v3d)189 v3d_clean_caches(struct v3d_dev *v3d)
190 {
191 struct drm_device *dev = &v3d->drm;
192 int core = 0;
193
194 trace_v3d_cache_clean_begin(dev);
195
196 V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL, V3D_L2TCACTL_TMUWCF);
197 if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
198 V3D_L2TCACTL_TMUWCF), 100)) {
199 DRM_ERROR("Timeout waiting for TMU write combiner flush\n");
200 }
201
202 mutex_lock(&v3d->cache_clean_lock);
203 V3D_CORE_WRITE(core, V3D_CTL_L2TCACTL,
204 V3D_L2TCACTL_L2TFLS |
205 V3D_SET_FIELD(V3D_L2TCACTL_FLM_CLEAN, V3D_L2TCACTL_FLM));
206
207 if (wait_for(!(V3D_CORE_READ(core, V3D_CTL_L2TCACTL) &
208 V3D_L2TCACTL_L2TFLS), 100)) {
209 DRM_ERROR("Timeout waiting for L2T clean\n");
210 }
211
212 mutex_unlock(&v3d->cache_clean_lock);
213
214 trace_v3d_cache_clean_end(dev);
215 }
216
217 /* Invalidates the slice caches. These are read-only caches. */
218 static void
v3d_invalidate_slices(struct v3d_dev * v3d,int core)219 v3d_invalidate_slices(struct v3d_dev *v3d, int core)
220 {
221 V3D_CORE_WRITE(core, V3D_CTL_SLCACTL,
222 V3D_SET_FIELD(0xf, V3D_SLCACTL_TVCCS) |
223 V3D_SET_FIELD(0xf, V3D_SLCACTL_TDCCS) |
224 V3D_SET_FIELD(0xf, V3D_SLCACTL_UCC) |
225 V3D_SET_FIELD(0xf, V3D_SLCACTL_ICC));
226 }
227
228 void
v3d_invalidate_caches(struct v3d_dev * v3d)229 v3d_invalidate_caches(struct v3d_dev *v3d)
230 {
231 /* Invalidate the caches from the outside in. That way if
232 * another CL's concurrent use of nearby memory were to pull
233 * an invalidated cacheline back in, we wouldn't leave stale
234 * data in the inner cache.
235 */
236 v3d_flush_l3(v3d);
237 v3d_invalidate_l2c(v3d, 0);
238 v3d_flush_l2t(v3d, 0);
239 v3d_invalidate_slices(v3d, 0);
240 }
241
242 int
v3d_gem_init(struct drm_device * dev)243 v3d_gem_init(struct drm_device *dev)
244 {
245 struct v3d_dev *v3d = to_v3d_dev(dev);
246 u32 pt_size = 4096 * 1024;
247 int ret, i;
248
249 for (i = 0; i < V3D_MAX_QUEUES; i++) {
250 struct v3d_queue_state *queue = &v3d->queue[i];
251
252 queue->fence_context = dma_fence_context_alloc(1);
253 memset(&queue->stats, 0, sizeof(queue->stats));
254 seqcount_init(&queue->stats.lock);
255 }
256
257 spin_lock_init(&v3d->mm_lock);
258 spin_lock_init(&v3d->job_lock);
259 ret = drmm_mutex_init(dev, &v3d->bo_lock);
260 if (ret)
261 return ret;
262 ret = drmm_mutex_init(dev, &v3d->reset_lock);
263 if (ret)
264 return ret;
265 ret = drmm_mutex_init(dev, &v3d->sched_lock);
266 if (ret)
267 return ret;
268 ret = drmm_mutex_init(dev, &v3d->cache_clean_lock);
269 if (ret)
270 return ret;
271
272 /* Note: We don't allocate address 0. Various bits of HW
273 * treat 0 as special, such as the occlusion query counters
274 * where 0 means "disabled".
275 */
276 drm_mm_init(&v3d->mm, 1, pt_size / sizeof(u32) - 1);
277
278 v3d->pt = dma_alloc_wc(v3d->drm.dev, pt_size,
279 &v3d->pt_paddr,
280 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
281 if (!v3d->pt) {
282 drm_mm_takedown(&v3d->mm);
283 dev_err(v3d->drm.dev,
284 "Failed to allocate page tables. Please ensure you have DMA enabled.\n");
285 return -ENOMEM;
286 }
287
288 v3d_init_hw_state(v3d);
289 v3d_mmu_set_page_table(v3d);
290
291 ret = v3d_sched_init(v3d);
292 if (ret) {
293 drm_mm_takedown(&v3d->mm);
294 dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
295 v3d->pt_paddr);
296 }
297
298 return 0;
299 }
300
301 void
v3d_gem_destroy(struct drm_device * dev)302 v3d_gem_destroy(struct drm_device *dev)
303 {
304 struct v3d_dev *v3d = to_v3d_dev(dev);
305
306 v3d_sched_fini(v3d);
307
308 /* Waiting for jobs to finish would need to be done before
309 * unregistering V3D.
310 */
311 WARN_ON(v3d->bin_job);
312 WARN_ON(v3d->render_job);
313 WARN_ON(v3d->tfu_job);
314 WARN_ON(v3d->csd_job);
315
316 drm_mm_takedown(&v3d->mm);
317
318 dma_free_coherent(v3d->drm.dev, 4096 * 1024, (void *)v3d->pt,
319 v3d->pt_paddr);
320 }
321