1  /*
2   * Copyright 2013 Advanced Micro Devices, Inc.
3   * All Rights Reserved.
4   *
5   * Permission is hereby granted, free of charge, to any person obtaining a
6   * copy of this software and associated documentation files (the
7   * "Software"), to deal in the Software without restriction, including
8   * without limitation the rights to use, copy, modify, merge, publish,
9   * distribute, sub license, and/or sell copies of the Software, and to
10   * permit persons to whom the Software is furnished to do so, subject to
11   * the following conditions:
12   *
13   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15   * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
16   * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
17   * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
18   * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
19   * USE OR OTHER DEALINGS IN THE SOFTWARE.
20   *
21   * The above copyright notice and this permission notice (including the
22   * next paragraph) shall be included in all copies or substantial portions
23   * of the Software.
24   *
25   * Authors: Christian König <christian.koenig@amd.com>
26   */
27  
28  #include <linux/firmware.h>
29  
30  #include "amdgpu.h"
31  #include "amdgpu_vce.h"
32  #include "cikd.h"
33  #include "vce/vce_2_0_d.h"
34  #include "vce/vce_2_0_sh_mask.h"
35  #include "smu/smu_7_0_1_d.h"
36  #include "smu/smu_7_0_1_sh_mask.h"
37  #include "oss/oss_2_0_d.h"
38  #include "oss/oss_2_0_sh_mask.h"
39  
40  #define VCE_V2_0_FW_SIZE	(256 * 1024)
41  #define VCE_V2_0_STACK_SIZE	(64 * 1024)
42  #define VCE_V2_0_DATA_SIZE	(23552 * AMDGPU_MAX_VCE_HANDLES)
43  #define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK	0x02
44  
45  static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev);
46  static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev);
47  
48  /**
49   * vce_v2_0_ring_get_rptr - get read pointer
50   *
51   * @ring: amdgpu_ring pointer
52   *
53   * Returns the current hardware read pointer
54   */
vce_v2_0_ring_get_rptr(struct amdgpu_ring * ring)55  static uint64_t vce_v2_0_ring_get_rptr(struct amdgpu_ring *ring)
56  {
57  	struct amdgpu_device *adev = ring->adev;
58  
59  	if (ring->me == 0)
60  		return RREG32(mmVCE_RB_RPTR);
61  	else
62  		return RREG32(mmVCE_RB_RPTR2);
63  }
64  
65  /**
66   * vce_v2_0_ring_get_wptr - get write pointer
67   *
68   * @ring: amdgpu_ring pointer
69   *
70   * Returns the current hardware write pointer
71   */
vce_v2_0_ring_get_wptr(struct amdgpu_ring * ring)72  static uint64_t vce_v2_0_ring_get_wptr(struct amdgpu_ring *ring)
73  {
74  	struct amdgpu_device *adev = ring->adev;
75  
76  	if (ring->me == 0)
77  		return RREG32(mmVCE_RB_WPTR);
78  	else
79  		return RREG32(mmVCE_RB_WPTR2);
80  }
81  
82  /**
83   * vce_v2_0_ring_set_wptr - set write pointer
84   *
85   * @ring: amdgpu_ring pointer
86   *
87   * Commits the write pointer to the hardware
88   */
vce_v2_0_ring_set_wptr(struct amdgpu_ring * ring)89  static void vce_v2_0_ring_set_wptr(struct amdgpu_ring *ring)
90  {
91  	struct amdgpu_device *adev = ring->adev;
92  
93  	if (ring->me == 0)
94  		WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
95  	else
96  		WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
97  }
98  
vce_v2_0_lmi_clean(struct amdgpu_device * adev)99  static int vce_v2_0_lmi_clean(struct amdgpu_device *adev)
100  {
101  	int i, j;
102  
103  	for (i = 0; i < 10; ++i) {
104  		for (j = 0; j < 100; ++j) {
105  			uint32_t status = RREG32(mmVCE_LMI_STATUS);
106  
107  			if (status & 0x337f)
108  				return 0;
109  			mdelay(10);
110  		}
111  	}
112  
113  	return -ETIMEDOUT;
114  }
115  
vce_v2_0_firmware_loaded(struct amdgpu_device * adev)116  static int vce_v2_0_firmware_loaded(struct amdgpu_device *adev)
117  {
118  	int i, j;
119  
120  	for (i = 0; i < 10; ++i) {
121  		for (j = 0; j < 100; ++j) {
122  			uint32_t status = RREG32(mmVCE_STATUS);
123  
124  			if (status & VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK)
125  				return 0;
126  			mdelay(10);
127  		}
128  
129  		DRM_ERROR("VCE not responding, trying to reset the ECPU!!!\n");
130  		WREG32_P(mmVCE_SOFT_RESET,
131  			VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK,
132  			~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
133  		mdelay(10);
134  		WREG32_P(mmVCE_SOFT_RESET, 0,
135  			~VCE_SOFT_RESET__ECPU_SOFT_RESET_MASK);
136  		mdelay(10);
137  	}
138  
139  	return -ETIMEDOUT;
140  }
141  
vce_v2_0_disable_cg(struct amdgpu_device * adev)142  static void vce_v2_0_disable_cg(struct amdgpu_device *adev)
143  {
144  	WREG32(mmVCE_CGTT_CLK_OVERRIDE, 7);
145  }
146  
vce_v2_0_init_cg(struct amdgpu_device * adev)147  static void vce_v2_0_init_cg(struct amdgpu_device *adev)
148  {
149  	u32 tmp;
150  
151  	tmp = RREG32(mmVCE_CLOCK_GATING_A);
152  	tmp &= ~0xfff;
153  	tmp |= ((0 << 0) | (4 << 4));
154  	tmp |= 0x40000;
155  	WREG32(mmVCE_CLOCK_GATING_A, tmp);
156  
157  	tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
158  	tmp &= ~0xfff;
159  	tmp |= ((0 << 0) | (4 << 4));
160  	WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
161  
162  	tmp = RREG32(mmVCE_CLOCK_GATING_B);
163  	tmp |= 0x10;
164  	tmp &= ~0x100000;
165  	WREG32(mmVCE_CLOCK_GATING_B, tmp);
166  }
167  
vce_v2_0_mc_resume(struct amdgpu_device * adev)168  static void vce_v2_0_mc_resume(struct amdgpu_device *adev)
169  {
170  	uint32_t size, offset;
171  
172  	WREG32_P(mmVCE_CLOCK_GATING_A, 0, ~(1 << 16));
173  	WREG32_P(mmVCE_UENC_CLOCK_GATING, 0x1FF000, ~0xFF9FF000);
174  	WREG32_P(mmVCE_UENC_REG_CLOCK_GATING, 0x3F, ~0x3F);
175  	WREG32(mmVCE_CLOCK_GATING_B, 0xf7);
176  
177  	WREG32(mmVCE_LMI_CTRL, 0x00398000);
178  	WREG32_P(mmVCE_LMI_CACHE_CTRL, 0x0, ~0x1);
179  	WREG32(mmVCE_LMI_SWAP_CNTL, 0);
180  	WREG32(mmVCE_LMI_SWAP_CNTL1, 0);
181  	WREG32(mmVCE_LMI_VM_CTRL, 0);
182  
183  	WREG32(mmVCE_LMI_VCPU_CACHE_40BIT_BAR, (adev->vce.gpu_addr >> 8));
184  
185  	offset = AMDGPU_VCE_FIRMWARE_OFFSET;
186  	size = VCE_V2_0_FW_SIZE;
187  	WREG32(mmVCE_VCPU_CACHE_OFFSET0, offset & 0x7fffffff);
188  	WREG32(mmVCE_VCPU_CACHE_SIZE0, size);
189  
190  	offset += size;
191  	size = VCE_V2_0_STACK_SIZE;
192  	WREG32(mmVCE_VCPU_CACHE_OFFSET1, offset & 0x7fffffff);
193  	WREG32(mmVCE_VCPU_CACHE_SIZE1, size);
194  
195  	offset += size;
196  	size = VCE_V2_0_DATA_SIZE;
197  	WREG32(mmVCE_VCPU_CACHE_OFFSET2, offset & 0x7fffffff);
198  	WREG32(mmVCE_VCPU_CACHE_SIZE2, size);
199  
200  	WREG32_P(mmVCE_LMI_CTRL2, 0x0, ~0x100);
201  	WREG32_FIELD(VCE_SYS_INT_EN, VCE_SYS_INT_TRAP_INTERRUPT_EN, 1);
202  }
203  
vce_v2_0_is_idle(void * handle)204  static bool vce_v2_0_is_idle(void *handle)
205  {
206  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
207  
208  	return !(RREG32(mmSRBM_STATUS2) & SRBM_STATUS2__VCE_BUSY_MASK);
209  }
210  
vce_v2_0_wait_for_idle(void * handle)211  static int vce_v2_0_wait_for_idle(void *handle)
212  {
213  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
214  	unsigned i;
215  
216  	for (i = 0; i < adev->usec_timeout; i++) {
217  		if (vce_v2_0_is_idle(handle))
218  			return 0;
219  	}
220  	return -ETIMEDOUT;
221  }
222  
223  /**
224   * vce_v2_0_start - start VCE block
225   *
226   * @adev: amdgpu_device pointer
227   *
228   * Setup and start the VCE block
229   */
vce_v2_0_start(struct amdgpu_device * adev)230  static int vce_v2_0_start(struct amdgpu_device *adev)
231  {
232  	struct amdgpu_ring *ring;
233  	int r;
234  
235  	/* set BUSY flag */
236  	WREG32_P(mmVCE_STATUS, 1, ~1);
237  
238  	vce_v2_0_init_cg(adev);
239  	vce_v2_0_disable_cg(adev);
240  
241  	vce_v2_0_mc_resume(adev);
242  
243  	ring = &adev->vce.ring[0];
244  	WREG32(mmVCE_RB_RPTR, lower_32_bits(ring->wptr));
245  	WREG32(mmVCE_RB_WPTR, lower_32_bits(ring->wptr));
246  	WREG32(mmVCE_RB_BASE_LO, ring->gpu_addr);
247  	WREG32(mmVCE_RB_BASE_HI, upper_32_bits(ring->gpu_addr));
248  	WREG32(mmVCE_RB_SIZE, ring->ring_size / 4);
249  
250  	ring = &adev->vce.ring[1];
251  	WREG32(mmVCE_RB_RPTR2, lower_32_bits(ring->wptr));
252  	WREG32(mmVCE_RB_WPTR2, lower_32_bits(ring->wptr));
253  	WREG32(mmVCE_RB_BASE_LO2, ring->gpu_addr);
254  	WREG32(mmVCE_RB_BASE_HI2, upper_32_bits(ring->gpu_addr));
255  	WREG32(mmVCE_RB_SIZE2, ring->ring_size / 4);
256  
257  	WREG32_FIELD(VCE_VCPU_CNTL, CLK_EN, 1);
258  	WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 1);
259  	mdelay(100);
260  	WREG32_FIELD(VCE_SOFT_RESET, ECPU_SOFT_RESET, 0);
261  
262  	r = vce_v2_0_firmware_loaded(adev);
263  
264  	/* clear BUSY flag */
265  	WREG32_P(mmVCE_STATUS, 0, ~1);
266  
267  	if (r) {
268  		DRM_ERROR("VCE not responding, giving up!!!\n");
269  		return r;
270  	}
271  
272  	return 0;
273  }
274  
vce_v2_0_stop(struct amdgpu_device * adev)275  static int vce_v2_0_stop(struct amdgpu_device *adev)
276  {
277  	int i;
278  	int status;
279  
280  	if (vce_v2_0_lmi_clean(adev)) {
281  		DRM_INFO("vce is not idle \n");
282  		return 0;
283  	}
284  
285  	if (vce_v2_0_wait_for_idle(adev)) {
286  		DRM_INFO("VCE is busy, Can't set clock gating");
287  		return 0;
288  	}
289  
290  	/* Stall UMC and register bus before resetting VCPU */
291  	WREG32_P(mmVCE_LMI_CTRL2, 1 << 8, ~(1 << 8));
292  
293  	for (i = 0; i < 100; ++i) {
294  		status = RREG32(mmVCE_LMI_STATUS);
295  		if (status & 0x240)
296  			break;
297  		mdelay(1);
298  	}
299  
300  	WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x80001);
301  
302  	/* put LMI, VCPU, RBC etc... into reset */
303  	WREG32_P(mmVCE_SOFT_RESET, 1, ~0x1);
304  
305  	WREG32(mmVCE_STATUS, 0);
306  
307  	return 0;
308  }
309  
vce_v2_0_set_sw_cg(struct amdgpu_device * adev,bool gated)310  static void vce_v2_0_set_sw_cg(struct amdgpu_device *adev, bool gated)
311  {
312  	u32 tmp;
313  
314  	if (gated) {
315  		tmp = RREG32(mmVCE_CLOCK_GATING_B);
316  		tmp |= 0xe70000;
317  		WREG32(mmVCE_CLOCK_GATING_B, tmp);
318  
319  		tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
320  		tmp |= 0xff000000;
321  		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
322  
323  		tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
324  		tmp &= ~0x3fc;
325  		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
326  
327  		WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
328  	} else {
329  		tmp = RREG32(mmVCE_CLOCK_GATING_B);
330  		tmp |= 0xe7;
331  		tmp &= ~0xe70000;
332  		WREG32(mmVCE_CLOCK_GATING_B, tmp);
333  
334  		tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
335  		tmp |= 0x1fe000;
336  		tmp &= ~0xff000000;
337  		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
338  
339  		tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
340  		tmp |= 0x3fc;
341  		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
342  	}
343  }
344  
vce_v2_0_set_dyn_cg(struct amdgpu_device * adev,bool gated)345  static void vce_v2_0_set_dyn_cg(struct amdgpu_device *adev, bool gated)
346  {
347  	u32 orig, tmp;
348  
349  /* LMI_MC/LMI_UMC always set in dynamic,
350   * set {CGC_*_GATE_MODE, CGC_*_SW_GATE} = {0, 0}
351   */
352  	tmp = RREG32(mmVCE_CLOCK_GATING_B);
353  	tmp &= ~0x00060006;
354  
355  /* Exception for ECPU, IH, SEM, SYS blocks needs to be turned on/off by SW */
356  	if (gated) {
357  		tmp |= 0xe10000;
358  		WREG32(mmVCE_CLOCK_GATING_B, tmp);
359  	} else {
360  		tmp |= 0xe1;
361  		tmp &= ~0xe10000;
362  		WREG32(mmVCE_CLOCK_GATING_B, tmp);
363  	}
364  
365  	orig = tmp = RREG32(mmVCE_UENC_CLOCK_GATING);
366  	tmp &= ~0x1fe000;
367  	tmp &= ~0xff000000;
368  	if (tmp != orig)
369  		WREG32(mmVCE_UENC_CLOCK_GATING, tmp);
370  
371  	orig = tmp = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
372  	tmp &= ~0x3fc;
373  	if (tmp != orig)
374  		WREG32(mmVCE_UENC_REG_CLOCK_GATING, tmp);
375  
376  	/* set VCE_UENC_REG_CLOCK_GATING always in dynamic mode */
377  	WREG32(mmVCE_UENC_REG_CLOCK_GATING, 0x00);
378  
379  	if(gated)
380  		WREG32(mmVCE_CGTT_CLK_OVERRIDE, 0);
381  }
382  
vce_v2_0_enable_mgcg(struct amdgpu_device * adev,bool enable,bool sw_cg)383  static void vce_v2_0_enable_mgcg(struct amdgpu_device *adev, bool enable,
384  								bool sw_cg)
385  {
386  	if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
387  		if (sw_cg)
388  			vce_v2_0_set_sw_cg(adev, true);
389  		else
390  			vce_v2_0_set_dyn_cg(adev, true);
391  	} else {
392  		vce_v2_0_disable_cg(adev);
393  
394  		if (sw_cg)
395  			vce_v2_0_set_sw_cg(adev, false);
396  		else
397  			vce_v2_0_set_dyn_cg(adev, false);
398  	}
399  }
400  
vce_v2_0_early_init(void * handle)401  static int vce_v2_0_early_init(void *handle)
402  {
403  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
404  
405  	adev->vce.num_rings = 2;
406  
407  	vce_v2_0_set_ring_funcs(adev);
408  	vce_v2_0_set_irq_funcs(adev);
409  
410  	return 0;
411  }
412  
vce_v2_0_sw_init(void * handle)413  static int vce_v2_0_sw_init(void *handle)
414  {
415  	struct amdgpu_ring *ring;
416  	int r, i;
417  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
418  
419  	/* VCE */
420  	r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 167, &adev->vce.irq);
421  	if (r)
422  		return r;
423  
424  	r = amdgpu_vce_sw_init(adev, VCE_V2_0_FW_SIZE +
425  		VCE_V2_0_STACK_SIZE + VCE_V2_0_DATA_SIZE);
426  	if (r)
427  		return r;
428  
429  	r = amdgpu_vce_resume(adev);
430  	if (r)
431  		return r;
432  
433  	for (i = 0; i < adev->vce.num_rings; i++) {
434  		enum amdgpu_ring_priority_level hw_prio = amdgpu_vce_get_ring_prio(i);
435  
436  		ring = &adev->vce.ring[i];
437  		sprintf(ring->name, "vce%d", i);
438  		r = amdgpu_ring_init(adev, ring, 512, &adev->vce.irq, 0,
439  				     hw_prio, NULL);
440  		if (r)
441  			return r;
442  	}
443  
444  	return r;
445  }
446  
vce_v2_0_sw_fini(void * handle)447  static int vce_v2_0_sw_fini(void *handle)
448  {
449  	int r;
450  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
451  
452  	r = amdgpu_vce_suspend(adev);
453  	if (r)
454  		return r;
455  
456  	return amdgpu_vce_sw_fini(adev);
457  }
458  
vce_v2_0_hw_init(void * handle)459  static int vce_v2_0_hw_init(void *handle)
460  {
461  	int r, i;
462  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
463  
464  	amdgpu_asic_set_vce_clocks(adev, 10000, 10000);
465  	vce_v2_0_enable_mgcg(adev, true, false);
466  
467  	for (i = 0; i < adev->vce.num_rings; i++) {
468  		r = amdgpu_ring_test_helper(&adev->vce.ring[i]);
469  		if (r)
470  			return r;
471  	}
472  
473  	DRM_INFO("VCE initialized successfully.\n");
474  
475  	return 0;
476  }
477  
vce_v2_0_hw_fini(void * handle)478  static int vce_v2_0_hw_fini(void *handle)
479  {
480  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
481  
482  	cancel_delayed_work_sync(&adev->vce.idle_work);
483  
484  	return 0;
485  }
486  
vce_v2_0_suspend(void * handle)487  static int vce_v2_0_suspend(void *handle)
488  {
489  	int r;
490  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
491  
492  
493  	/*
494  	 * Proper cleanups before halting the HW engine:
495  	 *   - cancel the delayed idle work
496  	 *   - enable powergating
497  	 *   - enable clockgating
498  	 *   - disable dpm
499  	 *
500  	 * TODO: to align with the VCN implementation, move the
501  	 * jobs for clockgating/powergating/dpm setting to
502  	 * ->set_powergating_state().
503  	 */
504  	cancel_delayed_work_sync(&adev->vce.idle_work);
505  
506  	if (adev->pm.dpm_enabled) {
507  		amdgpu_dpm_enable_vce(adev, false);
508  	} else {
509  		amdgpu_asic_set_vce_clocks(adev, 0, 0);
510  		amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
511  						       AMD_PG_STATE_GATE);
512  		amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
513  						       AMD_CG_STATE_GATE);
514  	}
515  
516  	r = vce_v2_0_hw_fini(adev);
517  	if (r)
518  		return r;
519  
520  	return amdgpu_vce_suspend(adev);
521  }
522  
vce_v2_0_resume(void * handle)523  static int vce_v2_0_resume(void *handle)
524  {
525  	int r;
526  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
527  
528  	r = amdgpu_vce_resume(adev);
529  	if (r)
530  		return r;
531  
532  	return vce_v2_0_hw_init(adev);
533  }
534  
vce_v2_0_soft_reset(void * handle)535  static int vce_v2_0_soft_reset(void *handle)
536  {
537  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
538  
539  	WREG32_FIELD(SRBM_SOFT_RESET, SOFT_RESET_VCE, 1);
540  	mdelay(5);
541  
542  	return vce_v2_0_start(adev);
543  }
544  
vce_v2_0_set_interrupt_state(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)545  static int vce_v2_0_set_interrupt_state(struct amdgpu_device *adev,
546  					struct amdgpu_irq_src *source,
547  					unsigned type,
548  					enum amdgpu_interrupt_state state)
549  {
550  	uint32_t val = 0;
551  
552  	if (state == AMDGPU_IRQ_STATE_ENABLE)
553  		val |= VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK;
554  
555  	WREG32_P(mmVCE_SYS_INT_EN, val, ~VCE_SYS_INT_EN__VCE_SYS_INT_TRAP_INTERRUPT_EN_MASK);
556  	return 0;
557  }
558  
vce_v2_0_process_interrupt(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)559  static int vce_v2_0_process_interrupt(struct amdgpu_device *adev,
560  				      struct amdgpu_irq_src *source,
561  				      struct amdgpu_iv_entry *entry)
562  {
563  	DRM_DEBUG("IH: VCE\n");
564  	switch (entry->src_data[0]) {
565  	case 0:
566  	case 1:
567  		amdgpu_fence_process(&adev->vce.ring[entry->src_data[0]]);
568  		break;
569  	default:
570  		DRM_ERROR("Unhandled interrupt: %d %d\n",
571  			  entry->src_id, entry->src_data[0]);
572  		break;
573  	}
574  
575  	return 0;
576  }
577  
vce_v2_0_set_clockgating_state(void * handle,enum amd_clockgating_state state)578  static int vce_v2_0_set_clockgating_state(void *handle,
579  					  enum amd_clockgating_state state)
580  {
581  	bool gate = false;
582  	bool sw_cg = false;
583  
584  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
585  
586  	if (state == AMD_CG_STATE_GATE) {
587  		gate = true;
588  		sw_cg = true;
589  	}
590  
591  	vce_v2_0_enable_mgcg(adev, gate, sw_cg);
592  
593  	return 0;
594  }
595  
vce_v2_0_set_powergating_state(void * handle,enum amd_powergating_state state)596  static int vce_v2_0_set_powergating_state(void *handle,
597  					  enum amd_powergating_state state)
598  {
599  	/* This doesn't actually powergate the VCE block.
600  	 * That's done in the dpm code via the SMC.  This
601  	 * just re-inits the block as necessary.  The actual
602  	 * gating still happens in the dpm code.  We should
603  	 * revisit this when there is a cleaner line between
604  	 * the smc and the hw blocks
605  	 */
606  	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
607  
608  	if (state == AMD_PG_STATE_GATE)
609  		return vce_v2_0_stop(adev);
610  	else
611  		return vce_v2_0_start(adev);
612  }
613  
614  static const struct amd_ip_funcs vce_v2_0_ip_funcs = {
615  	.name = "vce_v2_0",
616  	.early_init = vce_v2_0_early_init,
617  	.late_init = NULL,
618  	.sw_init = vce_v2_0_sw_init,
619  	.sw_fini = vce_v2_0_sw_fini,
620  	.hw_init = vce_v2_0_hw_init,
621  	.hw_fini = vce_v2_0_hw_fini,
622  	.suspend = vce_v2_0_suspend,
623  	.resume = vce_v2_0_resume,
624  	.is_idle = vce_v2_0_is_idle,
625  	.wait_for_idle = vce_v2_0_wait_for_idle,
626  	.soft_reset = vce_v2_0_soft_reset,
627  	.set_clockgating_state = vce_v2_0_set_clockgating_state,
628  	.set_powergating_state = vce_v2_0_set_powergating_state,
629  	.dump_ip_state = NULL,
630  	.print_ip_state = NULL,
631  };
632  
633  static const struct amdgpu_ring_funcs vce_v2_0_ring_funcs = {
634  	.type = AMDGPU_RING_TYPE_VCE,
635  	.align_mask = 0xf,
636  	.nop = VCE_CMD_NO_OP,
637  	.support_64bit_ptrs = false,
638  	.no_user_fence = true,
639  	.get_rptr = vce_v2_0_ring_get_rptr,
640  	.get_wptr = vce_v2_0_ring_get_wptr,
641  	.set_wptr = vce_v2_0_ring_set_wptr,
642  	.parse_cs = amdgpu_vce_ring_parse_cs,
643  	.emit_frame_size = 6, /* amdgpu_vce_ring_emit_fence  x1 no user fence */
644  	.emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
645  	.emit_ib = amdgpu_vce_ring_emit_ib,
646  	.emit_fence = amdgpu_vce_ring_emit_fence,
647  	.test_ring = amdgpu_vce_ring_test_ring,
648  	.test_ib = amdgpu_vce_ring_test_ib,
649  	.insert_nop = amdgpu_ring_insert_nop,
650  	.pad_ib = amdgpu_ring_generic_pad_ib,
651  	.begin_use = amdgpu_vce_ring_begin_use,
652  	.end_use = amdgpu_vce_ring_end_use,
653  };
654  
vce_v2_0_set_ring_funcs(struct amdgpu_device * adev)655  static void vce_v2_0_set_ring_funcs(struct amdgpu_device *adev)
656  {
657  	int i;
658  
659  	for (i = 0; i < adev->vce.num_rings; i++) {
660  		adev->vce.ring[i].funcs = &vce_v2_0_ring_funcs;
661  		adev->vce.ring[i].me = i;
662  	}
663  }
664  
665  static const struct amdgpu_irq_src_funcs vce_v2_0_irq_funcs = {
666  	.set = vce_v2_0_set_interrupt_state,
667  	.process = vce_v2_0_process_interrupt,
668  };
669  
vce_v2_0_set_irq_funcs(struct amdgpu_device * adev)670  static void vce_v2_0_set_irq_funcs(struct amdgpu_device *adev)
671  {
672  	adev->vce.irq.num_types = 1;
673  	adev->vce.irq.funcs = &vce_v2_0_irq_funcs;
674  };
675  
676  const struct amdgpu_ip_block_version vce_v2_0_ip_block =
677  {
678  		.type = AMD_IP_BLOCK_TYPE_VCE,
679  		.major = 2,
680  		.minor = 0,
681  		.rev = 0,
682  		.funcs = &vce_v2_0_ip_funcs,
683  };
684