1  /*
2   * Copyright 2008 Advanced Micro Devices, Inc.
3   * Copyright 2008 Red Hat Inc.
4   * Copyright 2009 Jerome Glisse.
5   *
6   * Permission is hereby granted, free of charge, to any person obtaining a
7   * copy of this software and associated documentation files (the "Software"),
8   * to deal in the Software without restriction, including without limitation
9   * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10   * and/or sell copies of the Software, and to permit persons to whom the
11   * Software is furnished to do so, subject to the following conditions:
12   *
13   * The above copyright notice and this permission notice shall be included in
14   * all copies or substantial portions of the Software.
15   *
16   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19   * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20   * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21   * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22   * OTHER DEALINGS IN THE SOFTWARE.
23   *
24   * Authors: Dave Airlie
25   *          Alex Deucher
26   *          Jerome Glisse
27   */
28  
29  #include <linux/console.h>
30  #include <linux/efi.h>
31  #include <linux/pci.h>
32  #include <linux/pm_runtime.h>
33  #include <linux/slab.h>
34  #include <linux/vga_switcheroo.h>
35  #include <linux/vgaarb.h>
36  
37  #include <drm/drm_cache.h>
38  #include <drm/drm_crtc_helper.h>
39  #include <drm/drm_device.h>
40  #include <drm/drm_file.h>
41  #include <drm/drm_framebuffer.h>
42  #include <drm/drm_probe_helper.h>
43  #include <drm/radeon_drm.h>
44  
45  #include "radeon_device.h"
46  #include "radeon_reg.h"
47  #include "radeon.h"
48  #include "atom.h"
49  
50  static const char radeon_family_name[][16] = {
51  	"R100",
52  	"RV100",
53  	"RS100",
54  	"RV200",
55  	"RS200",
56  	"R200",
57  	"RV250",
58  	"RS300",
59  	"RV280",
60  	"R300",
61  	"R350",
62  	"RV350",
63  	"RV380",
64  	"R420",
65  	"R423",
66  	"RV410",
67  	"RS400",
68  	"RS480",
69  	"RS600",
70  	"RS690",
71  	"RS740",
72  	"RV515",
73  	"R520",
74  	"RV530",
75  	"RV560",
76  	"RV570",
77  	"R580",
78  	"R600",
79  	"RV610",
80  	"RV630",
81  	"RV670",
82  	"RV620",
83  	"RV635",
84  	"RS780",
85  	"RS880",
86  	"RV770",
87  	"RV730",
88  	"RV710",
89  	"RV740",
90  	"CEDAR",
91  	"REDWOOD",
92  	"JUNIPER",
93  	"CYPRESS",
94  	"HEMLOCK",
95  	"PALM",
96  	"SUMO",
97  	"SUMO2",
98  	"BARTS",
99  	"TURKS",
100  	"CAICOS",
101  	"CAYMAN",
102  	"ARUBA",
103  	"TAHITI",
104  	"PITCAIRN",
105  	"VERDE",
106  	"OLAND",
107  	"HAINAN",
108  	"BONAIRE",
109  	"KAVERI",
110  	"KABINI",
111  	"HAWAII",
112  	"MULLINS",
113  	"LAST",
114  };
115  
116  #if defined(CONFIG_VGA_SWITCHEROO)
117  bool radeon_has_atpx_dgpu_power_cntl(void);
118  bool radeon_is_atpx_hybrid(void);
119  #else
radeon_has_atpx_dgpu_power_cntl(void)120  static inline bool radeon_has_atpx_dgpu_power_cntl(void) { return false; }
radeon_is_atpx_hybrid(void)121  static inline bool radeon_is_atpx_hybrid(void) { return false; }
122  #endif
123  
124  #define RADEON_PX_QUIRK_DISABLE_PX  (1 << 0)
125  
126  struct radeon_px_quirk {
127  	u32 chip_vendor;
128  	u32 chip_device;
129  	u32 subsys_vendor;
130  	u32 subsys_device;
131  	u32 px_quirk_flags;
132  };
133  
134  static struct radeon_px_quirk radeon_px_quirk_list[] = {
135  	/* Acer aspire 5560g (CPU: AMD A4-3305M; GPU: AMD Radeon HD 6480g + 7470m)
136  	 * https://bugzilla.kernel.org/show_bug.cgi?id=74551
137  	 */
138  	{ PCI_VENDOR_ID_ATI, 0x6760, 0x1025, 0x0672, RADEON_PX_QUIRK_DISABLE_PX },
139  	/* Asus K73TA laptop with AMD A6-3400M APU and Radeon 6550 GPU
140  	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
141  	 */
142  	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x108c, RADEON_PX_QUIRK_DISABLE_PX },
143  	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
144  	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381
145  	 */
146  	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
147  	/* Asus K53TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
148  	 * https://bugs.freedesktop.org/show_bug.cgi?id=101491
149  	 */
150  	{ PCI_VENDOR_ID_ATI, 0x6741, 0x1043, 0x2122, RADEON_PX_QUIRK_DISABLE_PX },
151  	/* Asus K73TK laptop with AMD A6-3420M APU and Radeon 7670m GPU
152  	 * https://bugzilla.kernel.org/show_bug.cgi?id=51381#c52
153  	 */
154  	{ PCI_VENDOR_ID_ATI, 0x6840, 0x1043, 0x2123, RADEON_PX_QUIRK_DISABLE_PX },
155  	{ 0, 0, 0, 0, 0 },
156  };
157  
radeon_is_px(struct drm_device * dev)158  bool radeon_is_px(struct drm_device *dev)
159  {
160  	struct radeon_device *rdev = dev->dev_private;
161  
162  	if (rdev->flags & RADEON_IS_PX)
163  		return true;
164  	return false;
165  }
166  
radeon_device_handle_px_quirks(struct radeon_device * rdev)167  static void radeon_device_handle_px_quirks(struct radeon_device *rdev)
168  {
169  	struct radeon_px_quirk *p = radeon_px_quirk_list;
170  
171  	/* Apply PX quirks */
172  	while (p && p->chip_device != 0) {
173  		if (rdev->pdev->vendor == p->chip_vendor &&
174  		    rdev->pdev->device == p->chip_device &&
175  		    rdev->pdev->subsystem_vendor == p->subsys_vendor &&
176  		    rdev->pdev->subsystem_device == p->subsys_device) {
177  			rdev->px_quirk_flags = p->px_quirk_flags;
178  			break;
179  		}
180  		++p;
181  	}
182  
183  	if (rdev->px_quirk_flags & RADEON_PX_QUIRK_DISABLE_PX)
184  		rdev->flags &= ~RADEON_IS_PX;
185  
186  	/* disable PX is the system doesn't support dGPU power control or hybrid gfx */
187  	if (!radeon_is_atpx_hybrid() &&
188  	    !radeon_has_atpx_dgpu_power_cntl())
189  		rdev->flags &= ~RADEON_IS_PX;
190  }
191  
192  /**
193   * radeon_program_register_sequence - program an array of registers.
194   *
195   * @rdev: radeon_device pointer
196   * @registers: pointer to the register array
197   * @array_size: size of the register array
198   *
199   * Programs an array or registers with and and or masks.
200   * This is a helper for setting golden registers.
201   */
radeon_program_register_sequence(struct radeon_device * rdev,const u32 * registers,const u32 array_size)202  void radeon_program_register_sequence(struct radeon_device *rdev,
203  				      const u32 *registers,
204  				      const u32 array_size)
205  {
206  	u32 tmp, reg, and_mask, or_mask;
207  	int i;
208  
209  	if (array_size % 3)
210  		return;
211  
212  	for (i = 0; i < array_size; i +=3) {
213  		reg = registers[i + 0];
214  		and_mask = registers[i + 1];
215  		or_mask = registers[i + 2];
216  
217  		if (and_mask == 0xffffffff) {
218  			tmp = or_mask;
219  		} else {
220  			tmp = RREG32(reg);
221  			tmp &= ~and_mask;
222  			tmp |= or_mask;
223  		}
224  		WREG32(reg, tmp);
225  	}
226  }
227  
radeon_pci_config_reset(struct radeon_device * rdev)228  void radeon_pci_config_reset(struct radeon_device *rdev)
229  {
230  	pci_write_config_dword(rdev->pdev, 0x7c, RADEON_ASIC_RESET_DATA);
231  }
232  
233  /**
234   * radeon_surface_init - Clear GPU surface registers.
235   *
236   * @rdev: radeon_device pointer
237   *
238   * Clear GPU surface registers (r1xx-r5xx).
239   */
radeon_surface_init(struct radeon_device * rdev)240  void radeon_surface_init(struct radeon_device *rdev)
241  {
242  	/* FIXME: check this out */
243  	if (rdev->family < CHIP_R600) {
244  		int i;
245  
246  		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
247  			if (rdev->surface_regs[i].bo)
248  				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
249  			else
250  				radeon_clear_surface_reg(rdev, i);
251  		}
252  		/* enable surfaces */
253  		WREG32(RADEON_SURFACE_CNTL, 0);
254  	}
255  }
256  
257  /*
258   * GPU scratch registers helpers function.
259   */
260  /**
261   * radeon_scratch_init - Init scratch register driver information.
262   *
263   * @rdev: radeon_device pointer
264   *
265   * Init CP scratch register driver information (r1xx-r5xx)
266   */
radeon_scratch_init(struct radeon_device * rdev)267  void radeon_scratch_init(struct radeon_device *rdev)
268  {
269  	int i;
270  
271  	/* FIXME: check this out */
272  	if (rdev->family < CHIP_R300) {
273  		rdev->scratch.num_reg = 5;
274  	} else {
275  		rdev->scratch.num_reg = 7;
276  	}
277  	rdev->scratch.reg_base = RADEON_SCRATCH_REG0;
278  	for (i = 0; i < rdev->scratch.num_reg; i++) {
279  		rdev->scratch.free[i] = true;
280  		rdev->scratch.reg[i] = rdev->scratch.reg_base + (i * 4);
281  	}
282  }
283  
284  /**
285   * radeon_scratch_get - Allocate a scratch register
286   *
287   * @rdev: radeon_device pointer
288   * @reg: scratch register mmio offset
289   *
290   * Allocate a CP scratch register for use by the driver (all asics).
291   * Returns 0 on success or -EINVAL on failure.
292   */
radeon_scratch_get(struct radeon_device * rdev,uint32_t * reg)293  int radeon_scratch_get(struct radeon_device *rdev, uint32_t *reg)
294  {
295  	int i;
296  
297  	for (i = 0; i < rdev->scratch.num_reg; i++) {
298  		if (rdev->scratch.free[i]) {
299  			rdev->scratch.free[i] = false;
300  			*reg = rdev->scratch.reg[i];
301  			return 0;
302  		}
303  	}
304  	return -EINVAL;
305  }
306  
307  /**
308   * radeon_scratch_free - Free a scratch register
309   *
310   * @rdev: radeon_device pointer
311   * @reg: scratch register mmio offset
312   *
313   * Free a CP scratch register allocated for use by the driver (all asics)
314   */
radeon_scratch_free(struct radeon_device * rdev,uint32_t reg)315  void radeon_scratch_free(struct radeon_device *rdev, uint32_t reg)
316  {
317  	int i;
318  
319  	for (i = 0; i < rdev->scratch.num_reg; i++) {
320  		if (rdev->scratch.reg[i] == reg) {
321  			rdev->scratch.free[i] = true;
322  			return;
323  		}
324  	}
325  }
326  
327  /*
328   * GPU doorbell aperture helpers function.
329   */
330  /**
331   * radeon_doorbell_init - Init doorbell driver information.
332   *
333   * @rdev: radeon_device pointer
334   *
335   * Init doorbell driver information (CIK)
336   * Returns 0 on success, error on failure.
337   */
radeon_doorbell_init(struct radeon_device * rdev)338  static int radeon_doorbell_init(struct radeon_device *rdev)
339  {
340  	/* doorbell bar mapping */
341  	rdev->doorbell.base = pci_resource_start(rdev->pdev, 2);
342  	rdev->doorbell.size = pci_resource_len(rdev->pdev, 2);
343  
344  	rdev->doorbell.num_doorbells = min_t(u32, rdev->doorbell.size / sizeof(u32), RADEON_MAX_DOORBELLS);
345  	if (rdev->doorbell.num_doorbells == 0)
346  		return -EINVAL;
347  
348  	rdev->doorbell.ptr = ioremap(rdev->doorbell.base, rdev->doorbell.num_doorbells * sizeof(u32));
349  	if (rdev->doorbell.ptr == NULL) {
350  		return -ENOMEM;
351  	}
352  	DRM_INFO("doorbell mmio base: 0x%08X\n", (uint32_t)rdev->doorbell.base);
353  	DRM_INFO("doorbell mmio size: %u\n", (unsigned)rdev->doorbell.size);
354  
355  	memset(&rdev->doorbell.used, 0, sizeof(rdev->doorbell.used));
356  
357  	return 0;
358  }
359  
360  /**
361   * radeon_doorbell_fini - Tear down doorbell driver information.
362   *
363   * @rdev: radeon_device pointer
364   *
365   * Tear down doorbell driver information (CIK)
366   */
radeon_doorbell_fini(struct radeon_device * rdev)367  static void radeon_doorbell_fini(struct radeon_device *rdev)
368  {
369  	iounmap(rdev->doorbell.ptr);
370  	rdev->doorbell.ptr = NULL;
371  }
372  
373  /**
374   * radeon_doorbell_get - Allocate a doorbell entry
375   *
376   * @rdev: radeon_device pointer
377   * @doorbell: doorbell index
378   *
379   * Allocate a doorbell for use by the driver (all asics).
380   * Returns 0 on success or -EINVAL on failure.
381   */
radeon_doorbell_get(struct radeon_device * rdev,u32 * doorbell)382  int radeon_doorbell_get(struct radeon_device *rdev, u32 *doorbell)
383  {
384  	unsigned long offset = find_first_zero_bit(rdev->doorbell.used, rdev->doorbell.num_doorbells);
385  	if (offset < rdev->doorbell.num_doorbells) {
386  		__set_bit(offset, rdev->doorbell.used);
387  		*doorbell = offset;
388  		return 0;
389  	} else {
390  		return -EINVAL;
391  	}
392  }
393  
394  /**
395   * radeon_doorbell_free - Free a doorbell entry
396   *
397   * @rdev: radeon_device pointer
398   * @doorbell: doorbell index
399   *
400   * Free a doorbell allocated for use by the driver (all asics)
401   */
radeon_doorbell_free(struct radeon_device * rdev,u32 doorbell)402  void radeon_doorbell_free(struct radeon_device *rdev, u32 doorbell)
403  {
404  	if (doorbell < rdev->doorbell.num_doorbells)
405  		__clear_bit(doorbell, rdev->doorbell.used);
406  }
407  
408  /*
409   * radeon_wb_*()
410   * Writeback is the method by which the GPU updates special pages
411   * in memory with the status of certain GPU events (fences, ring pointers,
412   * etc.).
413   */
414  
415  /**
416   * radeon_wb_disable - Disable Writeback
417   *
418   * @rdev: radeon_device pointer
419   *
420   * Disables Writeback (all asics).  Used for suspend.
421   */
radeon_wb_disable(struct radeon_device * rdev)422  void radeon_wb_disable(struct radeon_device *rdev)
423  {
424  	rdev->wb.enabled = false;
425  }
426  
427  /**
428   * radeon_wb_fini - Disable Writeback and free memory
429   *
430   * @rdev: radeon_device pointer
431   *
432   * Disables Writeback and frees the Writeback memory (all asics).
433   * Used at driver shutdown.
434   */
radeon_wb_fini(struct radeon_device * rdev)435  void radeon_wb_fini(struct radeon_device *rdev)
436  {
437  	radeon_wb_disable(rdev);
438  	if (rdev->wb.wb_obj) {
439  		if (!radeon_bo_reserve(rdev->wb.wb_obj, false)) {
440  			radeon_bo_kunmap(rdev->wb.wb_obj);
441  			radeon_bo_unpin(rdev->wb.wb_obj);
442  			radeon_bo_unreserve(rdev->wb.wb_obj);
443  		}
444  		radeon_bo_unref(&rdev->wb.wb_obj);
445  		rdev->wb.wb = NULL;
446  		rdev->wb.wb_obj = NULL;
447  	}
448  }
449  
450  /**
451   * radeon_wb_init- Init Writeback driver info and allocate memory
452   *
453   * @rdev: radeon_device pointer
454   *
455   * Disables Writeback and frees the Writeback memory (all asics).
456   * Used at driver startup.
457   * Returns 0 on success or an -error on failure.
458   */
radeon_wb_init(struct radeon_device * rdev)459  int radeon_wb_init(struct radeon_device *rdev)
460  {
461  	int r;
462  
463  	if (rdev->wb.wb_obj == NULL) {
464  		r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true,
465  				     RADEON_GEM_DOMAIN_GTT, 0, NULL, NULL,
466  				     &rdev->wb.wb_obj);
467  		if (r) {
468  			dev_warn(rdev->dev, "(%d) create WB bo failed\n", r);
469  			return r;
470  		}
471  		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
472  		if (unlikely(r != 0)) {
473  			radeon_wb_fini(rdev);
474  			return r;
475  		}
476  		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
477  				&rdev->wb.gpu_addr);
478  		if (r) {
479  			radeon_bo_unreserve(rdev->wb.wb_obj);
480  			dev_warn(rdev->dev, "(%d) pin WB bo failed\n", r);
481  			radeon_wb_fini(rdev);
482  			return r;
483  		}
484  		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
485  		radeon_bo_unreserve(rdev->wb.wb_obj);
486  		if (r) {
487  			dev_warn(rdev->dev, "(%d) map WB bo failed\n", r);
488  			radeon_wb_fini(rdev);
489  			return r;
490  		}
491  	}
492  
493  	/* clear wb memory */
494  	memset((char *)rdev->wb.wb, 0, RADEON_GPU_PAGE_SIZE);
495  	/* disable event_write fences */
496  	rdev->wb.use_event = false;
497  	/* disabled via module param */
498  	if (radeon_no_wb == 1) {
499  		rdev->wb.enabled = false;
500  	} else {
501  		if (rdev->flags & RADEON_IS_AGP) {
502  			/* often unreliable on AGP */
503  			rdev->wb.enabled = false;
504  		} else if (rdev->family < CHIP_R300) {
505  			/* often unreliable on pre-r300 */
506  			rdev->wb.enabled = false;
507  		} else {
508  			rdev->wb.enabled = true;
509  			/* event_write fences are only available on r600+ */
510  			if (rdev->family >= CHIP_R600) {
511  				rdev->wb.use_event = true;
512  			}
513  		}
514  	}
515  	/* always use writeback/events on NI, APUs */
516  	if (rdev->family >= CHIP_PALM) {
517  		rdev->wb.enabled = true;
518  		rdev->wb.use_event = true;
519  	}
520  
521  	dev_info(rdev->dev, "WB %sabled\n", rdev->wb.enabled ? "en" : "dis");
522  
523  	return 0;
524  }
525  
526  /**
527   * radeon_vram_location - try to find VRAM location
528   * @rdev: radeon device structure holding all necessary informations
529   * @mc: memory controller structure holding memory informations
530   * @base: base address at which to put VRAM
531   *
532   * Function will place try to place VRAM at base address provided
533   * as parameter (which is so far either PCI aperture address or
534   * for IGP TOM base address).
535   *
536   * If there is not enough space to fit the unvisible VRAM in the 32bits
537   * address space then we limit the VRAM size to the aperture.
538   *
539   * If we are using AGP and if the AGP aperture doesn't allow us to have
540   * room for all the VRAM than we restrict the VRAM to the PCI aperture
541   * size and print a warning.
542   *
543   * This function will never fails, worst case are limiting VRAM.
544   *
545   * Note: GTT start, end, size should be initialized before calling this
546   * function on AGP platform.
547   *
548   * Note 1: We don't explicitly enforce VRAM start to be aligned on VRAM size,
549   * this shouldn't be a problem as we are using the PCI aperture as a reference.
550   * Otherwise this would be needed for rv280, all r3xx, and all r4xx, but
551   * not IGP.
552   *
553   * Note 2: we use mc_vram_size as on some board we need to program the mc to
554   * cover the whole aperture even if VRAM size is inferior to aperture size
555   * Novell bug 204882 + along with lots of ubuntu ones
556   *
557   * Note 3: when limiting vram it's safe to overwritte real_vram_size because
558   * we are not in case where real_vram_size is inferior to mc_vram_size (ie
559   * note afected by bogus hw of Novell bug 204882 + along with lots of ubuntu
560   * ones)
561   *
562   * Note 4: IGP TOM addr should be the same as the aperture addr, we don't
563   * explicitly check for that thought.
564   *
565   * FIXME: when reducing VRAM size align new size on power of 2.
566   */
radeon_vram_location(struct radeon_device * rdev,struct radeon_mc * mc,u64 base)567  void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
568  {
569  	uint64_t limit = (uint64_t)radeon_vram_limit << 20;
570  
571  	mc->vram_start = base;
572  	if (mc->mc_vram_size > (rdev->mc.mc_mask - base + 1)) {
573  		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
574  		mc->real_vram_size = mc->aper_size;
575  		mc->mc_vram_size = mc->aper_size;
576  	}
577  	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
578  	if (rdev->flags & RADEON_IS_AGP && mc->vram_end > mc->gtt_start && mc->vram_start <= mc->gtt_end) {
579  		dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
580  		mc->real_vram_size = mc->aper_size;
581  		mc->mc_vram_size = mc->aper_size;
582  	}
583  	mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
584  	if (limit && limit < mc->real_vram_size)
585  		mc->real_vram_size = limit;
586  	dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
587  			mc->mc_vram_size >> 20, mc->vram_start,
588  			mc->vram_end, mc->real_vram_size >> 20);
589  }
590  
591  /**
592   * radeon_gtt_location - try to find GTT location
593   * @rdev: radeon device structure holding all necessary informations
594   * @mc: memory controller structure holding memory informations
595   *
596   * Function will place try to place GTT before or after VRAM.
597   *
598   * If GTT size is bigger than space left then we ajust GTT size.
599   * Thus function will never fails.
600   *
601   * FIXME: when reducing GTT size align new size on power of 2.
602   */
radeon_gtt_location(struct radeon_device * rdev,struct radeon_mc * mc)603  void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
604  {
605  	u64 size_af, size_bf;
606  
607  	size_af = ((rdev->mc.mc_mask - mc->vram_end) + mc->gtt_base_align) & ~mc->gtt_base_align;
608  	size_bf = mc->vram_start & ~mc->gtt_base_align;
609  	if (size_bf > size_af) {
610  		if (mc->gtt_size > size_bf) {
611  			dev_warn(rdev->dev, "limiting GTT\n");
612  			mc->gtt_size = size_bf;
613  		}
614  		mc->gtt_start = (mc->vram_start & ~mc->gtt_base_align) - mc->gtt_size;
615  	} else {
616  		if (mc->gtt_size > size_af) {
617  			dev_warn(rdev->dev, "limiting GTT\n");
618  			mc->gtt_size = size_af;
619  		}
620  		mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
621  	}
622  	mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
623  	dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
624  			mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
625  }
626  
627  /*
628   * GPU helpers function.
629   */
630  
631  /*
632   * radeon_device_is_virtual - check if we are running is a virtual environment
633   *
634   * Check if the asic has been passed through to a VM (all asics).
635   * Used at driver startup.
636   * Returns true if virtual or false if not.
637   */
radeon_device_is_virtual(void)638  bool radeon_device_is_virtual(void)
639  {
640  #ifdef CONFIG_X86
641  	return boot_cpu_has(X86_FEATURE_HYPERVISOR);
642  #else
643  	return false;
644  #endif
645  }
646  
647  /**
648   * radeon_card_posted - check if the hw has already been initialized
649   *
650   * @rdev: radeon_device pointer
651   *
652   * Check if the asic has been initialized (all asics).
653   * Used at driver startup.
654   * Returns true if initialized or false if not.
655   */
radeon_card_posted(struct radeon_device * rdev)656  bool radeon_card_posted(struct radeon_device *rdev)
657  {
658  	uint32_t reg;
659  
660  	/* for pass through, always force asic_init for CI */
661  	if (rdev->family >= CHIP_BONAIRE &&
662  	    radeon_device_is_virtual())
663  		return false;
664  
665  	/* required for EFI mode on macbook2,1 which uses an r5xx asic */
666  	if (efi_enabled(EFI_BOOT) &&
667  	    (rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
668  	    (rdev->family < CHIP_R600))
669  		return false;
670  
671  	if (ASIC_IS_NODCE(rdev))
672  		goto check_memsize;
673  
674  	/* first check CRTCs */
675  	if (ASIC_IS_DCE4(rdev)) {
676  		reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) |
677  			RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
678  			if (rdev->num_crtc >= 4) {
679  				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) |
680  					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
681  			}
682  			if (rdev->num_crtc >= 6) {
683  				reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) |
684  					RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
685  			}
686  		if (reg & EVERGREEN_CRTC_MASTER_EN)
687  			return true;
688  	} else if (ASIC_IS_AVIVO(rdev)) {
689  		reg = RREG32(AVIVO_D1CRTC_CONTROL) |
690  		      RREG32(AVIVO_D2CRTC_CONTROL);
691  		if (reg & AVIVO_CRTC_EN) {
692  			return true;
693  		}
694  	} else {
695  		reg = RREG32(RADEON_CRTC_GEN_CNTL) |
696  		      RREG32(RADEON_CRTC2_GEN_CNTL);
697  		if (reg & RADEON_CRTC_EN) {
698  			return true;
699  		}
700  	}
701  
702  check_memsize:
703  	/* then check MEM_SIZE, in case the crtcs are off */
704  	if (rdev->family >= CHIP_R600)
705  		reg = RREG32(R600_CONFIG_MEMSIZE);
706  	else
707  		reg = RREG32(RADEON_CONFIG_MEMSIZE);
708  
709  	if (reg)
710  		return true;
711  
712  	return false;
713  
714  }
715  
716  /**
717   * radeon_update_bandwidth_info - update display bandwidth params
718   *
719   * @rdev: radeon_device pointer
720   *
721   * Used when sclk/mclk are switched or display modes are set.
722   * params are used to calculate display watermarks (all asics)
723   */
radeon_update_bandwidth_info(struct radeon_device * rdev)724  void radeon_update_bandwidth_info(struct radeon_device *rdev)
725  {
726  	fixed20_12 a;
727  	u32 sclk = rdev->pm.current_sclk;
728  	u32 mclk = rdev->pm.current_mclk;
729  
730  	/* sclk/mclk in Mhz */
731  	a.full = dfixed_const(100);
732  	rdev->pm.sclk.full = dfixed_const(sclk);
733  	rdev->pm.sclk.full = dfixed_div(rdev->pm.sclk, a);
734  	rdev->pm.mclk.full = dfixed_const(mclk);
735  	rdev->pm.mclk.full = dfixed_div(rdev->pm.mclk, a);
736  
737  	if (rdev->flags & RADEON_IS_IGP) {
738  		a.full = dfixed_const(16);
739  		/* core_bandwidth = sclk(Mhz) * 16 */
740  		rdev->pm.core_bandwidth.full = dfixed_div(rdev->pm.sclk, a);
741  	}
742  }
743  
744  /**
745   * radeon_boot_test_post_card - check and possibly initialize the hw
746   *
747   * @rdev: radeon_device pointer
748   *
749   * Check if the asic is initialized and if not, attempt to initialize
750   * it (all asics).
751   * Returns true if initialized or false if not.
752   */
radeon_boot_test_post_card(struct radeon_device * rdev)753  bool radeon_boot_test_post_card(struct radeon_device *rdev)
754  {
755  	if (radeon_card_posted(rdev))
756  		return true;
757  
758  	if (rdev->bios) {
759  		DRM_INFO("GPU not posted. posting now...\n");
760  		if (rdev->is_atom_bios)
761  			atom_asic_init(rdev->mode_info.atom_context);
762  		else
763  			radeon_combios_asic_init(rdev_to_drm(rdev));
764  		return true;
765  	} else {
766  		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
767  		return false;
768  	}
769  }
770  
771  /**
772   * radeon_dummy_page_init - init dummy page used by the driver
773   *
774   * @rdev: radeon_device pointer
775   *
776   * Allocate the dummy page used by the driver (all asics).
777   * This dummy page is used by the driver as a filler for gart entries
778   * when pages are taken out of the GART
779   * Returns 0 on sucess, -ENOMEM on failure.
780   */
radeon_dummy_page_init(struct radeon_device * rdev)781  int radeon_dummy_page_init(struct radeon_device *rdev)
782  {
783  	if (rdev->dummy_page.page)
784  		return 0;
785  	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
786  	if (rdev->dummy_page.page == NULL)
787  		return -ENOMEM;
788  	rdev->dummy_page.addr = dma_map_page(&rdev->pdev->dev, rdev->dummy_page.page,
789  					0, PAGE_SIZE, DMA_BIDIRECTIONAL);
790  	if (dma_mapping_error(&rdev->pdev->dev, rdev->dummy_page.addr)) {
791  		dev_err(&rdev->pdev->dev, "Failed to DMA MAP the dummy page\n");
792  		__free_page(rdev->dummy_page.page);
793  		rdev->dummy_page.page = NULL;
794  		return -ENOMEM;
795  	}
796  	rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
797  							    RADEON_GART_PAGE_DUMMY);
798  	return 0;
799  }
800  
801  /**
802   * radeon_dummy_page_fini - free dummy page used by the driver
803   *
804   * @rdev: radeon_device pointer
805   *
806   * Frees the dummy page used by the driver (all asics).
807   */
radeon_dummy_page_fini(struct radeon_device * rdev)808  void radeon_dummy_page_fini(struct radeon_device *rdev)
809  {
810  	if (rdev->dummy_page.page == NULL)
811  		return;
812  	dma_unmap_page(&rdev->pdev->dev, rdev->dummy_page.addr, PAGE_SIZE,
813  		       DMA_BIDIRECTIONAL);
814  	__free_page(rdev->dummy_page.page);
815  	rdev->dummy_page.page = NULL;
816  }
817  
818  
819  /* ATOM accessor methods */
820  /*
821   * ATOM is an interpreted byte code stored in tables in the vbios.  The
822   * driver registers callbacks to access registers and the interpreter
823   * in the driver parses the tables and executes then to program specific
824   * actions (set display modes, asic init, etc.).  See radeon_atombios.c,
825   * atombios.h, and atom.c
826   */
827  
828  /**
829   * cail_pll_read - read PLL register
830   *
831   * @info: atom card_info pointer
832   * @reg: PLL register offset
833   *
834   * Provides a PLL register accessor for the atom interpreter (r4xx+).
835   * Returns the value of the PLL register.
836   */
cail_pll_read(struct card_info * info,uint32_t reg)837  static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
838  {
839  	struct radeon_device *rdev = info->dev->dev_private;
840  	uint32_t r;
841  
842  	r = rdev->pll_rreg(rdev, reg);
843  	return r;
844  }
845  
846  /**
847   * cail_pll_write - write PLL register
848   *
849   * @info: atom card_info pointer
850   * @reg: PLL register offset
851   * @val: value to write to the pll register
852   *
853   * Provides a PLL register accessor for the atom interpreter (r4xx+).
854   */
cail_pll_write(struct card_info * info,uint32_t reg,uint32_t val)855  static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
856  {
857  	struct radeon_device *rdev = info->dev->dev_private;
858  
859  	rdev->pll_wreg(rdev, reg, val);
860  }
861  
862  /**
863   * cail_mc_read - read MC (Memory Controller) register
864   *
865   * @info: atom card_info pointer
866   * @reg: MC register offset
867   *
868   * Provides an MC register accessor for the atom interpreter (r4xx+).
869   * Returns the value of the MC register.
870   */
cail_mc_read(struct card_info * info,uint32_t reg)871  static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
872  {
873  	struct radeon_device *rdev = info->dev->dev_private;
874  	uint32_t r;
875  
876  	r = rdev->mc_rreg(rdev, reg);
877  	return r;
878  }
879  
880  /**
881   * cail_mc_write - write MC (Memory Controller) register
882   *
883   * @info: atom card_info pointer
884   * @reg: MC register offset
885   * @val: value to write to the pll register
886   *
887   * Provides a MC register accessor for the atom interpreter (r4xx+).
888   */
cail_mc_write(struct card_info * info,uint32_t reg,uint32_t val)889  static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
890  {
891  	struct radeon_device *rdev = info->dev->dev_private;
892  
893  	rdev->mc_wreg(rdev, reg, val);
894  }
895  
896  /**
897   * cail_reg_write - write MMIO register
898   *
899   * @info: atom card_info pointer
900   * @reg: MMIO register offset
901   * @val: value to write to the pll register
902   *
903   * Provides a MMIO register accessor for the atom interpreter (r4xx+).
904   */
cail_reg_write(struct card_info * info,uint32_t reg,uint32_t val)905  static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
906  {
907  	struct radeon_device *rdev = info->dev->dev_private;
908  
909  	WREG32(reg*4, val);
910  }
911  
912  /**
913   * cail_reg_read - read MMIO register
914   *
915   * @info: atom card_info pointer
916   * @reg: MMIO register offset
917   *
918   * Provides an MMIO register accessor for the atom interpreter (r4xx+).
919   * Returns the value of the MMIO register.
920   */
cail_reg_read(struct card_info * info,uint32_t reg)921  static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
922  {
923  	struct radeon_device *rdev = info->dev->dev_private;
924  	uint32_t r;
925  
926  	r = RREG32(reg*4);
927  	return r;
928  }
929  
930  /**
931   * cail_ioreg_write - write IO register
932   *
933   * @info: atom card_info pointer
934   * @reg: IO register offset
935   * @val: value to write to the pll register
936   *
937   * Provides a IO register accessor for the atom interpreter (r4xx+).
938   */
cail_ioreg_write(struct card_info * info,uint32_t reg,uint32_t val)939  static void cail_ioreg_write(struct card_info *info, uint32_t reg, uint32_t val)
940  {
941  	struct radeon_device *rdev = info->dev->dev_private;
942  
943  	WREG32_IO(reg*4, val);
944  }
945  
946  /**
947   * cail_ioreg_read - read IO register
948   *
949   * @info: atom card_info pointer
950   * @reg: IO register offset
951   *
952   * Provides an IO register accessor for the atom interpreter (r4xx+).
953   * Returns the value of the IO register.
954   */
cail_ioreg_read(struct card_info * info,uint32_t reg)955  static uint32_t cail_ioreg_read(struct card_info *info, uint32_t reg)
956  {
957  	struct radeon_device *rdev = info->dev->dev_private;
958  	uint32_t r;
959  
960  	r = RREG32_IO(reg*4);
961  	return r;
962  }
963  
964  /**
965   * radeon_atombios_init - init the driver info and callbacks for atombios
966   *
967   * @rdev: radeon_device pointer
968   *
969   * Initializes the driver info and register access callbacks for the
970   * ATOM interpreter (r4xx+).
971   * Returns 0 on sucess, -ENOMEM on failure.
972   * Called at driver startup.
973   */
radeon_atombios_init(struct radeon_device * rdev)974  int radeon_atombios_init(struct radeon_device *rdev)
975  {
976  	struct card_info *atom_card_info =
977  	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
978  
979  	if (!atom_card_info)
980  		return -ENOMEM;
981  
982  	rdev->mode_info.atom_card_info = atom_card_info;
983  	atom_card_info->dev = rdev_to_drm(rdev);
984  	atom_card_info->reg_read = cail_reg_read;
985  	atom_card_info->reg_write = cail_reg_write;
986  	/* needed for iio ops */
987  	if (rdev->rio_mem) {
988  		atom_card_info->ioreg_read = cail_ioreg_read;
989  		atom_card_info->ioreg_write = cail_ioreg_write;
990  	} else {
991  		DRM_ERROR("Unable to find PCI I/O BAR; using MMIO for ATOM IIO\n");
992  		atom_card_info->ioreg_read = cail_reg_read;
993  		atom_card_info->ioreg_write = cail_reg_write;
994  	}
995  	atom_card_info->mc_read = cail_mc_read;
996  	atom_card_info->mc_write = cail_mc_write;
997  	atom_card_info->pll_read = cail_pll_read;
998  	atom_card_info->pll_write = cail_pll_write;
999  
1000  	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
1001  	if (!rdev->mode_info.atom_context) {
1002  		radeon_atombios_fini(rdev);
1003  		return -ENOMEM;
1004  	}
1005  
1006  	mutex_init(&rdev->mode_info.atom_context->mutex);
1007  	mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
1008  	radeon_atom_initialize_bios_scratch_regs(rdev_to_drm(rdev));
1009  	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
1010  	return 0;
1011  }
1012  
1013  /**
1014   * radeon_atombios_fini - free the driver info and callbacks for atombios
1015   *
1016   * @rdev: radeon_device pointer
1017   *
1018   * Frees the driver info and register access callbacks for the ATOM
1019   * interpreter (r4xx+).
1020   * Called at driver shutdown.
1021   */
radeon_atombios_fini(struct radeon_device * rdev)1022  void radeon_atombios_fini(struct radeon_device *rdev)
1023  {
1024  	if (rdev->mode_info.atom_context) {
1025  		kfree(rdev->mode_info.atom_context->scratch);
1026  		kfree(rdev->mode_info.atom_context->iio);
1027  	}
1028  	kfree(rdev->mode_info.atom_context);
1029  	rdev->mode_info.atom_context = NULL;
1030  	kfree(rdev->mode_info.atom_card_info);
1031  	rdev->mode_info.atom_card_info = NULL;
1032  }
1033  
1034  /* COMBIOS */
1035  /*
1036   * COMBIOS is the bios format prior to ATOM. It provides
1037   * command tables similar to ATOM, but doesn't have a unified
1038   * parser.  See radeon_combios.c
1039   */
1040  
1041  /**
1042   * radeon_combios_init - init the driver info for combios
1043   *
1044   * @rdev: radeon_device pointer
1045   *
1046   * Initializes the driver info for combios (r1xx-r3xx).
1047   * Returns 0 on sucess.
1048   * Called at driver startup.
1049   */
radeon_combios_init(struct radeon_device * rdev)1050  int radeon_combios_init(struct radeon_device *rdev)
1051  {
1052  	radeon_combios_initialize_bios_scratch_regs(rdev_to_drm(rdev));
1053  	return 0;
1054  }
1055  
1056  /**
1057   * radeon_combios_fini - free the driver info for combios
1058   *
1059   * @rdev: radeon_device pointer
1060   *
1061   * Frees the driver info for combios (r1xx-r3xx).
1062   * Called at driver shutdown.
1063   */
radeon_combios_fini(struct radeon_device * rdev)1064  void radeon_combios_fini(struct radeon_device *rdev)
1065  {
1066  }
1067  
1068  /* if we get transitioned to only one device, take VGA back */
1069  /**
1070   * radeon_vga_set_decode - enable/disable vga decode
1071   *
1072   * @pdev: PCI device
1073   * @state: enable/disable vga decode
1074   *
1075   * Enable/disable vga decode (all asics).
1076   * Returns VGA resource flags.
1077   */
radeon_vga_set_decode(struct pci_dev * pdev,bool state)1078  static unsigned int radeon_vga_set_decode(struct pci_dev *pdev, bool state)
1079  {
1080  	struct drm_device *dev = pci_get_drvdata(pdev);
1081  	struct radeon_device *rdev = dev->dev_private;
1082  	radeon_vga_set_state(rdev, state);
1083  	if (state)
1084  		return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
1085  		       VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1086  	else
1087  		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
1088  }
1089  
1090  /**
1091   * radeon_gart_size_auto - Determine a sensible default GART size
1092   *                         according to ASIC family.
1093   *
1094   * @family: ASIC family name
1095   */
radeon_gart_size_auto(enum radeon_family family)1096  static int radeon_gart_size_auto(enum radeon_family family)
1097  {
1098  	/* default to a larger gart size on newer asics */
1099  	if (family >= CHIP_TAHITI)
1100  		return 2048;
1101  	else if (family >= CHIP_RV770)
1102  		return 1024;
1103  	else
1104  		return 512;
1105  }
1106  
1107  /**
1108   * radeon_check_arguments - validate module params
1109   *
1110   * @rdev: radeon_device pointer
1111   *
1112   * Validates certain module parameters and updates
1113   * the associated values used by the driver (all asics).
1114   */
radeon_check_arguments(struct radeon_device * rdev)1115  static void radeon_check_arguments(struct radeon_device *rdev)
1116  {
1117  	/* vramlimit must be a power of two */
1118  	if (radeon_vram_limit != 0 && !is_power_of_2(radeon_vram_limit)) {
1119  		dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
1120  				radeon_vram_limit);
1121  		radeon_vram_limit = 0;
1122  	}
1123  
1124  	if (radeon_gart_size == -1) {
1125  		radeon_gart_size = radeon_gart_size_auto(rdev->family);
1126  	}
1127  	/* gtt size must be power of two and greater or equal to 32M */
1128  	if (radeon_gart_size < 32) {
1129  		dev_warn(rdev->dev, "gart size (%d) too small\n",
1130  				radeon_gart_size);
1131  		radeon_gart_size = radeon_gart_size_auto(rdev->family);
1132  	} else if (!is_power_of_2(radeon_gart_size)) {
1133  		dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
1134  				radeon_gart_size);
1135  		radeon_gart_size = radeon_gart_size_auto(rdev->family);
1136  	}
1137  	rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
1138  
1139  	/* AGP mode can only be -1, 1, 2, 4, 8 */
1140  	switch (radeon_agpmode) {
1141  	case -1:
1142  	case 0:
1143  	case 1:
1144  	case 2:
1145  	case 4:
1146  	case 8:
1147  		break;
1148  	default:
1149  		dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
1150  				"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
1151  		radeon_agpmode = 0;
1152  		break;
1153  	}
1154  
1155  	if (!is_power_of_2(radeon_vm_size)) {
1156  		dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
1157  			 radeon_vm_size);
1158  		radeon_vm_size = 4;
1159  	}
1160  
1161  	if (radeon_vm_size < 1) {
1162  		dev_warn(rdev->dev, "VM size (%d) too small, min is 1GB\n",
1163  			 radeon_vm_size);
1164  		radeon_vm_size = 4;
1165  	}
1166  
1167  	/*
1168  	 * Max GPUVM size for Cayman, SI and CI are 40 bits.
1169  	 */
1170  	if (radeon_vm_size > 1024) {
1171  		dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
1172  			 radeon_vm_size);
1173  		radeon_vm_size = 4;
1174  	}
1175  
1176  	/* defines number of bits in page table versus page directory,
1177  	 * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
1178  	 * page table and the remaining bits are in the page directory */
1179  	if (radeon_vm_block_size == -1) {
1180  
1181  		/* Total bits covered by PD + PTs */
1182  		unsigned bits = ilog2(radeon_vm_size) + 18;
1183  
1184  		/* Make sure the PD is 4K in size up to 8GB address space.
1185  		   Above that split equal between PD and PTs */
1186  		if (radeon_vm_size <= 8)
1187  			radeon_vm_block_size = bits - 9;
1188  		else
1189  			radeon_vm_block_size = (bits + 3) / 2;
1190  
1191  	} else if (radeon_vm_block_size < 9) {
1192  		dev_warn(rdev->dev, "VM page table size (%d) too small\n",
1193  			 radeon_vm_block_size);
1194  		radeon_vm_block_size = 9;
1195  	}
1196  
1197  	if (radeon_vm_block_size > 24 ||
1198  	    (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
1199  		dev_warn(rdev->dev, "VM page table size (%d) too large\n",
1200  			 radeon_vm_block_size);
1201  		radeon_vm_block_size = 9;
1202  	}
1203  }
1204  
1205  /**
1206   * radeon_switcheroo_set_state - set switcheroo state
1207   *
1208   * @pdev: pci dev pointer
1209   * @state: vga_switcheroo state
1210   *
1211   * Callback for the switcheroo driver.  Suspends or resumes
1212   * the asics before or after it is powered up using ACPI methods.
1213   */
radeon_switcheroo_set_state(struct pci_dev * pdev,enum vga_switcheroo_state state)1214  static void radeon_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state)
1215  {
1216  	struct drm_device *dev = pci_get_drvdata(pdev);
1217  
1218  	if (radeon_is_px(dev) && state == VGA_SWITCHEROO_OFF)
1219  		return;
1220  
1221  	if (state == VGA_SWITCHEROO_ON) {
1222  		pr_info("radeon: switched on\n");
1223  		/* don't suspend or resume card normally */
1224  		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1225  
1226  		radeon_resume_kms(dev, true, true);
1227  
1228  		dev->switch_power_state = DRM_SWITCH_POWER_ON;
1229  		drm_kms_helper_poll_enable(dev);
1230  	} else {
1231  		pr_info("radeon: switched off\n");
1232  		drm_kms_helper_poll_disable(dev);
1233  		dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
1234  		radeon_suspend_kms(dev, true, true, false);
1235  		dev->switch_power_state = DRM_SWITCH_POWER_OFF;
1236  	}
1237  }
1238  
1239  /**
1240   * radeon_switcheroo_can_switch - see if switcheroo state can change
1241   *
1242   * @pdev: pci dev pointer
1243   *
1244   * Callback for the switcheroo driver.  Check of the switcheroo
1245   * state can be changed.
1246   * Returns true if the state can be changed, false if not.
1247   */
radeon_switcheroo_can_switch(struct pci_dev * pdev)1248  static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
1249  {
1250  	struct drm_device *dev = pci_get_drvdata(pdev);
1251  
1252  	/*
1253  	 * FIXME: open_count is protected by drm_global_mutex but that would lead to
1254  	 * locking inversion with the driver load path. And the access here is
1255  	 * completely racy anyway. So don't bother with locking for now.
1256  	 */
1257  	return atomic_read(&dev->open_count) == 0;
1258  }
1259  
1260  static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
1261  	.set_gpu_state = radeon_switcheroo_set_state,
1262  	.reprobe = NULL,
1263  	.can_switch = radeon_switcheroo_can_switch,
1264  };
1265  
1266  /**
1267   * radeon_device_init - initialize the driver
1268   *
1269   * @rdev: radeon_device pointer
1270   * @ddev: drm dev pointer
1271   * @pdev: pci dev pointer
1272   * @flags: driver flags
1273   *
1274   * Initializes the driver info and hw (all asics).
1275   * Returns 0 for success or an error on failure.
1276   * Called at driver startup.
1277   */
radeon_device_init(struct radeon_device * rdev,struct drm_device * ddev,struct pci_dev * pdev,uint32_t flags)1278  int radeon_device_init(struct radeon_device *rdev,
1279  		       struct drm_device *ddev,
1280  		       struct pci_dev *pdev,
1281  		       uint32_t flags)
1282  {
1283  	int r, i;
1284  	int dma_bits;
1285  	bool runtime = false;
1286  
1287  	rdev->shutdown = false;
1288  	rdev->flags = flags;
1289  	rdev->family = flags & RADEON_FAMILY_MASK;
1290  	rdev->is_atom_bios = false;
1291  	rdev->usec_timeout = RADEON_MAX_USEC_TIMEOUT;
1292  	rdev->mc.gtt_size = 512 * 1024 * 1024;
1293  	rdev->accel_working = false;
1294  	/* set up ring ids */
1295  	for (i = 0; i < RADEON_NUM_RINGS; i++) {
1296  		rdev->ring[i].idx = i;
1297  	}
1298  	rdev->fence_context = dma_fence_context_alloc(RADEON_NUM_RINGS);
1299  
1300  	DRM_INFO("initializing kernel modesetting (%s 0x%04X:0x%04X 0x%04X:0x%04X 0x%02X).\n",
1301  		 radeon_family_name[rdev->family], pdev->vendor, pdev->device,
1302  		 pdev->subsystem_vendor, pdev->subsystem_device, pdev->revision);
1303  
1304  	/* mutex initialization are all done here so we
1305  	 * can recall function without having locking issues */
1306  	mutex_init(&rdev->ring_lock);
1307  	mutex_init(&rdev->dc_hw_i2c_mutex);
1308  	atomic_set(&rdev->ih.lock, 0);
1309  	mutex_init(&rdev->gem.mutex);
1310  	mutex_init(&rdev->pm.mutex);
1311  	mutex_init(&rdev->gpu_clock_mutex);
1312  	mutex_init(&rdev->srbm_mutex);
1313  	mutex_init(&rdev->audio.component_mutex);
1314  	init_rwsem(&rdev->pm.mclk_lock);
1315  	init_rwsem(&rdev->exclusive_lock);
1316  	init_waitqueue_head(&rdev->irq.vblank_queue);
1317  	r = radeon_gem_init(rdev);
1318  	if (r)
1319  		return r;
1320  
1321  	radeon_check_arguments(rdev);
1322  	/* Adjust VM size here.
1323  	 * Max GPUVM size for cayman+ is 40 bits.
1324  	 */
1325  	rdev->vm_manager.max_pfn = radeon_vm_size << 18;
1326  
1327  	/* Set asic functions */
1328  	r = radeon_asic_init(rdev);
1329  	if (r)
1330  		return r;
1331  
1332  	/* all of the newer IGP chips have an internal gart
1333  	 * However some rs4xx report as AGP, so remove that here.
1334  	 */
1335  	if ((rdev->family >= CHIP_RS400) &&
1336  	    (rdev->flags & RADEON_IS_IGP)) {
1337  		rdev->flags &= ~RADEON_IS_AGP;
1338  	}
1339  
1340  	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
1341  		radeon_agp_disable(rdev);
1342  	}
1343  
1344  	/* Set the internal MC address mask
1345  	 * This is the max address of the GPU's
1346  	 * internal address space.
1347  	 */
1348  	if (rdev->family >= CHIP_CAYMAN)
1349  		rdev->mc.mc_mask = 0xffffffffffULL; /* 40 bit MC */
1350  	else if (rdev->family >= CHIP_CEDAR)
1351  		rdev->mc.mc_mask = 0xfffffffffULL; /* 36 bit MC */
1352  	else
1353  		rdev->mc.mc_mask = 0xffffffffULL; /* 32 bit MC */
1354  
1355  	/* set DMA mask.
1356  	 * PCIE - can handle 40-bits.
1357  	 * IGP - can handle 40-bits
1358  	 * AGP - generally dma32 is safest
1359  	 * PCI - dma32 for legacy pci gart, 40 bits on newer asics
1360  	 */
1361  	dma_bits = 40;
1362  	if (rdev->flags & RADEON_IS_AGP)
1363  		dma_bits = 32;
1364  	if ((rdev->flags & RADEON_IS_PCI) &&
1365  	    (rdev->family <= CHIP_RS740))
1366  		dma_bits = 32;
1367  #ifdef CONFIG_PPC64
1368  	if (rdev->family == CHIP_CEDAR)
1369  		dma_bits = 32;
1370  #endif
1371  
1372  	r = dma_set_mask_and_coherent(&rdev->pdev->dev, DMA_BIT_MASK(dma_bits));
1373  	if (r) {
1374  		pr_warn("radeon: No suitable DMA available\n");
1375  		return r;
1376  	}
1377  	rdev->need_swiotlb = drm_need_swiotlb(dma_bits);
1378  
1379  	/* Registers mapping */
1380  	/* TODO: block userspace mapping of io register */
1381  	spin_lock_init(&rdev->mmio_idx_lock);
1382  	spin_lock_init(&rdev->smc_idx_lock);
1383  	spin_lock_init(&rdev->pll_idx_lock);
1384  	spin_lock_init(&rdev->mc_idx_lock);
1385  	spin_lock_init(&rdev->pcie_idx_lock);
1386  	spin_lock_init(&rdev->pciep_idx_lock);
1387  	spin_lock_init(&rdev->pif_idx_lock);
1388  	spin_lock_init(&rdev->cg_idx_lock);
1389  	spin_lock_init(&rdev->uvd_idx_lock);
1390  	spin_lock_init(&rdev->rcu_idx_lock);
1391  	spin_lock_init(&rdev->didt_idx_lock);
1392  	spin_lock_init(&rdev->end_idx_lock);
1393  	if (rdev->family >= CHIP_BONAIRE) {
1394  		rdev->rmmio_base = pci_resource_start(rdev->pdev, 5);
1395  		rdev->rmmio_size = pci_resource_len(rdev->pdev, 5);
1396  	} else {
1397  		rdev->rmmio_base = pci_resource_start(rdev->pdev, 2);
1398  		rdev->rmmio_size = pci_resource_len(rdev->pdev, 2);
1399  	}
1400  	rdev->rmmio = ioremap(rdev->rmmio_base, rdev->rmmio_size);
1401  	if (rdev->rmmio == NULL)
1402  		return -ENOMEM;
1403  
1404  	/* doorbell bar mapping */
1405  	if (rdev->family >= CHIP_BONAIRE)
1406  		radeon_doorbell_init(rdev);
1407  
1408  	/* io port mapping */
1409  	for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
1410  		if (pci_resource_flags(rdev->pdev, i) & IORESOURCE_IO) {
1411  			rdev->rio_mem_size = pci_resource_len(rdev->pdev, i);
1412  			rdev->rio_mem = pci_iomap(rdev->pdev, i, rdev->rio_mem_size);
1413  			break;
1414  		}
1415  	}
1416  	if (rdev->rio_mem == NULL)
1417  		DRM_ERROR("Unable to find PCI I/O BAR\n");
1418  
1419  	if (rdev->flags & RADEON_IS_PX)
1420  		radeon_device_handle_px_quirks(rdev);
1421  
1422  	/* if we have > 1 VGA cards, then disable the radeon VGA resources */
1423  	/* this will fail for cards that aren't VGA class devices, just
1424  	 * ignore it */
1425  	vga_client_register(rdev->pdev, radeon_vga_set_decode);
1426  
1427  	if (rdev->flags & RADEON_IS_PX)
1428  		runtime = true;
1429  	if (!pci_is_thunderbolt_attached(rdev->pdev))
1430  		vga_switcheroo_register_client(rdev->pdev,
1431  					       &radeon_switcheroo_ops, runtime);
1432  	if (runtime)
1433  		vga_switcheroo_init_domain_pm_ops(rdev->dev, &rdev->vga_pm_domain);
1434  
1435  	r = radeon_init(rdev);
1436  	if (r)
1437  		goto failed;
1438  
1439  	radeon_gem_debugfs_init(rdev);
1440  
1441  	if (rdev->flags & RADEON_IS_AGP && !rdev->accel_working) {
1442  		/* Acceleration not working on AGP card try again
1443  		 * with fallback to PCI or PCIE GART
1444  		 */
1445  		radeon_asic_reset(rdev);
1446  		radeon_fini(rdev);
1447  		radeon_agp_disable(rdev);
1448  		r = radeon_init(rdev);
1449  		if (r)
1450  			goto failed;
1451  	}
1452  
1453  	radeon_audio_component_init(rdev);
1454  
1455  	r = radeon_ib_ring_tests(rdev);
1456  	if (r)
1457  		DRM_ERROR("ib ring test failed (%d).\n", r);
1458  
1459  	/*
1460  	 * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
1461  	 * after the CP ring have chew one packet at least. Hence here we stop
1462  	 * and restart DPM after the radeon_ib_ring_tests().
1463  	 */
1464  	if (rdev->pm.dpm_enabled &&
1465  	    (rdev->pm.pm_method == PM_METHOD_DPM) &&
1466  	    (rdev->family == CHIP_TURKS) &&
1467  	    (rdev->flags & RADEON_IS_MOBILITY)) {
1468  		mutex_lock(&rdev->pm.mutex);
1469  		radeon_dpm_disable(rdev);
1470  		radeon_dpm_enable(rdev);
1471  		mutex_unlock(&rdev->pm.mutex);
1472  	}
1473  
1474  	if ((radeon_testing & 1)) {
1475  		if (rdev->accel_working)
1476  			radeon_test_moves(rdev);
1477  		else
1478  			DRM_INFO("radeon: acceleration disabled, skipping move tests\n");
1479  	}
1480  	if ((radeon_testing & 2)) {
1481  		if (rdev->accel_working)
1482  			radeon_test_syncing(rdev);
1483  		else
1484  			DRM_INFO("radeon: acceleration disabled, skipping sync tests\n");
1485  	}
1486  	if (radeon_benchmarking) {
1487  		if (rdev->accel_working)
1488  			radeon_benchmark(rdev, radeon_benchmarking);
1489  		else
1490  			DRM_INFO("radeon: acceleration disabled, skipping benchmarks\n");
1491  	}
1492  	return 0;
1493  
1494  failed:
1495  	/* balance pm_runtime_get_sync() in radeon_driver_unload_kms() */
1496  	if (radeon_is_px(ddev))
1497  		pm_runtime_put_noidle(ddev->dev);
1498  	if (runtime)
1499  		vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1500  	return r;
1501  }
1502  
1503  /**
1504   * radeon_device_fini - tear down the driver
1505   *
1506   * @rdev: radeon_device pointer
1507   *
1508   * Tear down the driver info (all asics).
1509   * Called at driver shutdown.
1510   */
radeon_device_fini(struct radeon_device * rdev)1511  void radeon_device_fini(struct radeon_device *rdev)
1512  {
1513  	DRM_INFO("radeon: finishing device.\n");
1514  	rdev->shutdown = true;
1515  	/* evict vram memory */
1516  	radeon_bo_evict_vram(rdev);
1517  	radeon_audio_component_fini(rdev);
1518  	radeon_fini(rdev);
1519  	if (!pci_is_thunderbolt_attached(rdev->pdev))
1520  		vga_switcheroo_unregister_client(rdev->pdev);
1521  	if (rdev->flags & RADEON_IS_PX)
1522  		vga_switcheroo_fini_domain_pm_ops(rdev->dev);
1523  	vga_client_unregister(rdev->pdev);
1524  	if (rdev->rio_mem)
1525  		pci_iounmap(rdev->pdev, rdev->rio_mem);
1526  	rdev->rio_mem = NULL;
1527  	iounmap(rdev->rmmio);
1528  	rdev->rmmio = NULL;
1529  	if (rdev->family >= CHIP_BONAIRE)
1530  		radeon_doorbell_fini(rdev);
1531  }
1532  
1533  
1534  /*
1535   * Suspend & resume.
1536   */
1537  /*
1538   * radeon_suspend_kms - initiate device suspend
1539   *
1540   * Puts the hw in the suspend state (all asics).
1541   * Returns 0 for success or an error on failure.
1542   * Called at driver suspend.
1543   */
radeon_suspend_kms(struct drm_device * dev,bool suspend,bool fbcon,bool freeze)1544  int radeon_suspend_kms(struct drm_device *dev, bool suspend,
1545  		       bool fbcon, bool freeze)
1546  {
1547  	struct radeon_device *rdev;
1548  	struct pci_dev *pdev;
1549  	struct drm_crtc *crtc;
1550  	struct drm_connector *connector;
1551  	int i, r;
1552  
1553  	if (dev == NULL || dev->dev_private == NULL) {
1554  		return -ENODEV;
1555  	}
1556  
1557  	rdev = dev->dev_private;
1558  	pdev = to_pci_dev(dev->dev);
1559  
1560  	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1561  		return 0;
1562  
1563  	drm_kms_helper_poll_disable(dev);
1564  
1565  	drm_modeset_lock_all(dev);
1566  	/* turn off display hw */
1567  	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1568  		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
1569  	}
1570  	drm_modeset_unlock_all(dev);
1571  
1572  	/* unpin the front buffers and cursors */
1573  	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1574  		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1575  		struct drm_framebuffer *fb = crtc->primary->fb;
1576  		struct radeon_bo *robj;
1577  
1578  		if (radeon_crtc->cursor_bo) {
1579  			struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1580  			r = radeon_bo_reserve(robj, false);
1581  			if (r == 0) {
1582  				radeon_bo_unpin(robj);
1583  				radeon_bo_unreserve(robj);
1584  			}
1585  		}
1586  
1587  		if (fb == NULL || fb->obj[0] == NULL) {
1588  			continue;
1589  		}
1590  		robj = gem_to_radeon_bo(fb->obj[0]);
1591  		/* don't unpin kernel fb objects */
1592  		if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
1593  			r = radeon_bo_reserve(robj, false);
1594  			if (r == 0) {
1595  				radeon_bo_unpin(robj);
1596  				radeon_bo_unreserve(robj);
1597  			}
1598  		}
1599  	}
1600  	/* evict vram memory */
1601  	radeon_bo_evict_vram(rdev);
1602  
1603  	/* wait for gpu to finish processing current batch */
1604  	for (i = 0; i < RADEON_NUM_RINGS; i++) {
1605  		r = radeon_fence_wait_empty(rdev, i);
1606  		if (r) {
1607  			/* delay GPU reset to resume */
1608  			radeon_fence_driver_force_completion(rdev, i);
1609  		} else {
1610  			/* finish executing delayed work */
1611  			flush_delayed_work(&rdev->fence_drv[i].lockup_work);
1612  		}
1613  	}
1614  
1615  	radeon_save_bios_scratch_regs(rdev);
1616  
1617  	radeon_suspend(rdev);
1618  	radeon_hpd_fini(rdev);
1619  	/* evict remaining vram memory
1620  	 * This second call to evict vram is to evict the gart page table
1621  	 * using the CPU.
1622  	 */
1623  	radeon_bo_evict_vram(rdev);
1624  
1625  	radeon_agp_suspend(rdev);
1626  
1627  	pci_save_state(pdev);
1628  	if (freeze && rdev->family >= CHIP_CEDAR && !(rdev->flags & RADEON_IS_IGP)) {
1629  		rdev->asic->asic_reset(rdev, true);
1630  		pci_restore_state(pdev);
1631  	} else if (suspend) {
1632  		/* Shut down the device */
1633  		pci_disable_device(pdev);
1634  		pci_set_power_state(pdev, PCI_D3hot);
1635  	}
1636  
1637  	if (fbcon) {
1638  		console_lock();
1639  		radeon_fbdev_set_suspend(rdev, 1);
1640  		console_unlock();
1641  	}
1642  	return 0;
1643  }
1644  
1645  /*
1646   * radeon_resume_kms - initiate device resume
1647   *
1648   * Bring the hw back to operating state (all asics).
1649   * Returns 0 for success or an error on failure.
1650   * Called at driver resume.
1651   */
radeon_resume_kms(struct drm_device * dev,bool resume,bool fbcon)1652  int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
1653  {
1654  	struct drm_connector *connector;
1655  	struct radeon_device *rdev = dev->dev_private;
1656  	struct pci_dev *pdev = to_pci_dev(dev->dev);
1657  	struct drm_crtc *crtc;
1658  	int r;
1659  
1660  	if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
1661  		return 0;
1662  
1663  	if (fbcon) {
1664  		console_lock();
1665  	}
1666  	if (resume) {
1667  		pci_set_power_state(pdev, PCI_D0);
1668  		pci_restore_state(pdev);
1669  		if (pci_enable_device(pdev)) {
1670  			if (fbcon)
1671  				console_unlock();
1672  			return -1;
1673  		}
1674  	}
1675  	/* resume AGP if in use */
1676  	radeon_agp_resume(rdev);
1677  	radeon_resume(rdev);
1678  
1679  	r = radeon_ib_ring_tests(rdev);
1680  	if (r)
1681  		DRM_ERROR("ib ring test failed (%d).\n", r);
1682  
1683  	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1684  		/* do dpm late init */
1685  		r = radeon_pm_late_init(rdev);
1686  		if (r) {
1687  			rdev->pm.dpm_enabled = false;
1688  			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1689  		}
1690  	} else {
1691  		/* resume old pm late */
1692  		radeon_pm_resume(rdev);
1693  	}
1694  
1695  	radeon_restore_bios_scratch_regs(rdev);
1696  
1697  	/* pin cursors */
1698  	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
1699  		struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
1700  
1701  		if (radeon_crtc->cursor_bo) {
1702  			struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
1703  			r = radeon_bo_reserve(robj, false);
1704  			if (r == 0) {
1705  				/* Only 27 bit offset for legacy cursor */
1706  				r = radeon_bo_pin_restricted(robj,
1707  							     RADEON_GEM_DOMAIN_VRAM,
1708  							     ASIC_IS_AVIVO(rdev) ?
1709  							     0 : 1 << 27,
1710  							     &radeon_crtc->cursor_addr);
1711  				if (r != 0)
1712  					DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
1713  				radeon_bo_unreserve(robj);
1714  			}
1715  		}
1716  	}
1717  
1718  	/* init dig PHYs, disp eng pll */
1719  	if (rdev->is_atom_bios) {
1720  		radeon_atom_encoder_init(rdev);
1721  		radeon_atom_disp_eng_pll_init(rdev);
1722  		/* turn on the BL */
1723  		if (rdev->mode_info.bl_encoder) {
1724  			u8 bl_level = radeon_get_backlight_level(rdev,
1725  								 rdev->mode_info.bl_encoder);
1726  			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1727  						   bl_level);
1728  		}
1729  	}
1730  	/* reset hpd state */
1731  	radeon_hpd_init(rdev);
1732  	/* blat the mode back in */
1733  	if (fbcon) {
1734  		drm_helper_resume_force_mode(dev);
1735  		/* turn on display hw */
1736  		drm_modeset_lock_all(dev);
1737  		list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
1738  			drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
1739  		}
1740  		drm_modeset_unlock_all(dev);
1741  	}
1742  
1743  	drm_kms_helper_poll_enable(dev);
1744  
1745  	/* set the power state here in case we are a PX system or headless */
1746  	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1747  		radeon_pm_compute_clocks(rdev);
1748  
1749  	if (fbcon) {
1750  		radeon_fbdev_set_suspend(rdev, 0);
1751  		console_unlock();
1752  	}
1753  
1754  	return 0;
1755  }
1756  
1757  /**
1758   * radeon_gpu_reset - reset the asic
1759   *
1760   * @rdev: radeon device pointer
1761   *
1762   * Attempt the reset the GPU if it has hung (all asics).
1763   * Returns 0 for success or an error on failure.
1764   */
radeon_gpu_reset(struct radeon_device * rdev)1765  int radeon_gpu_reset(struct radeon_device *rdev)
1766  {
1767  	unsigned ring_sizes[RADEON_NUM_RINGS];
1768  	uint32_t *ring_data[RADEON_NUM_RINGS];
1769  
1770  	bool saved = false;
1771  
1772  	int i, r;
1773  
1774  	down_write(&rdev->exclusive_lock);
1775  
1776  	if (!rdev->needs_reset) {
1777  		up_write(&rdev->exclusive_lock);
1778  		return 0;
1779  	}
1780  
1781  	atomic_inc(&rdev->gpu_reset_counter);
1782  
1783  	radeon_save_bios_scratch_regs(rdev);
1784  	radeon_suspend(rdev);
1785  	radeon_hpd_fini(rdev);
1786  
1787  	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1788  		ring_sizes[i] = radeon_ring_backup(rdev, &rdev->ring[i],
1789  						   &ring_data[i]);
1790  		if (ring_sizes[i]) {
1791  			saved = true;
1792  			dev_info(rdev->dev, "Saved %d dwords of commands "
1793  				 "on ring %d.\n", ring_sizes[i], i);
1794  		}
1795  	}
1796  
1797  	r = radeon_asic_reset(rdev);
1798  	if (!r) {
1799  		dev_info(rdev->dev, "GPU reset succeeded, trying to resume\n");
1800  		radeon_resume(rdev);
1801  	}
1802  
1803  	radeon_restore_bios_scratch_regs(rdev);
1804  
1805  	for (i = 0; i < RADEON_NUM_RINGS; ++i) {
1806  		if (!r && ring_data[i]) {
1807  			radeon_ring_restore(rdev, &rdev->ring[i],
1808  					    ring_sizes[i], ring_data[i]);
1809  		} else {
1810  			radeon_fence_driver_force_completion(rdev, i);
1811  			kfree(ring_data[i]);
1812  		}
1813  	}
1814  
1815  	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
1816  		/* do dpm late init */
1817  		r = radeon_pm_late_init(rdev);
1818  		if (r) {
1819  			rdev->pm.dpm_enabled = false;
1820  			DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
1821  		}
1822  	} else {
1823  		/* resume old pm late */
1824  		radeon_pm_resume(rdev);
1825  	}
1826  
1827  	/* init dig PHYs, disp eng pll */
1828  	if (rdev->is_atom_bios) {
1829  		radeon_atom_encoder_init(rdev);
1830  		radeon_atom_disp_eng_pll_init(rdev);
1831  		/* turn on the BL */
1832  		if (rdev->mode_info.bl_encoder) {
1833  			u8 bl_level = radeon_get_backlight_level(rdev,
1834  								 rdev->mode_info.bl_encoder);
1835  			radeon_set_backlight_level(rdev, rdev->mode_info.bl_encoder,
1836  						   bl_level);
1837  		}
1838  	}
1839  	/* reset hpd state */
1840  	radeon_hpd_init(rdev);
1841  
1842  	rdev->in_reset = true;
1843  	rdev->needs_reset = false;
1844  
1845  	downgrade_write(&rdev->exclusive_lock);
1846  
1847  	drm_helper_resume_force_mode(rdev_to_drm(rdev));
1848  
1849  	/* set the power state here in case we are a PX system or headless */
1850  	if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled)
1851  		radeon_pm_compute_clocks(rdev);
1852  
1853  	if (!r) {
1854  		r = radeon_ib_ring_tests(rdev);
1855  		if (r && saved)
1856  			r = -EAGAIN;
1857  	} else {
1858  		/* bad news, how to tell it to userspace ? */
1859  		dev_info(rdev->dev, "GPU reset failed\n");
1860  	}
1861  
1862  	rdev->needs_reset = r == -EAGAIN;
1863  	rdev->in_reset = false;
1864  
1865  	up_read(&rdev->exclusive_lock);
1866  	return r;
1867  }
1868