1  /*
2   * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
3   * All Rights Reserved.
4   *
5   * Permission is hereby granted, free of charge, to any person obtaining a
6   * copy of this software and associated documentation files (the "Software"),
7   * to deal in the Software without restriction, including without limitation
8   * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9   * and/or sell copies of the Software, and to permit persons to whom the
10   * Software is furnished to do so, subject to the following conditions:
11   *
12   * The above copyright notice and this permission notice (including the next
13   * paragraph) shall be included in all copies or substantial portions of the
14   * Software.
15   *
16   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19   * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20   * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21   * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22   * OTHER DEALINGS IN THE SOFTWARE.
23   */
24  
25  #include <drm/amdgpu_drm.h>
26  #include <drm/drm_drv.h>
27  #include <drm/drm_fbdev_ttm.h>
28  #include <drm/drm_gem.h>
29  #include <drm/drm_managed.h>
30  #include <drm/drm_pciids.h>
31  #include <drm/drm_probe_helper.h>
32  #include <drm/drm_vblank.h>
33  
34  #include <linux/cc_platform.h>
35  #include <linux/dynamic_debug.h>
36  #include <linux/module.h>
37  #include <linux/mmu_notifier.h>
38  #include <linux/pm_runtime.h>
39  #include <linux/suspend.h>
40  #include <linux/vga_switcheroo.h>
41  
42  #include "amdgpu.h"
43  #include "amdgpu_amdkfd.h"
44  #include "amdgpu_dma_buf.h"
45  #include "amdgpu_drv.h"
46  #include "amdgpu_fdinfo.h"
47  #include "amdgpu_irq.h"
48  #include "amdgpu_psp.h"
49  #include "amdgpu_ras.h"
50  #include "amdgpu_reset.h"
51  #include "amdgpu_sched.h"
52  #include "amdgpu_xgmi.h"
53  #include "../amdxcp/amdgpu_xcp_drv.h"
54  
55  /*
56   * KMS wrapper.
57   * - 3.0.0 - initial driver
58   * - 3.1.0 - allow reading more status registers (GRBM, SRBM, SDMA, CP)
59   * - 3.2.0 - GFX8: Uses EOP_TC_WB_ACTION_EN, so UMDs don't have to do the same
60   *           at the end of IBs.
61   * - 3.3.0 - Add VM support for UVD on supported hardware.
62   * - 3.4.0 - Add AMDGPU_INFO_NUM_EVICTIONS.
63   * - 3.5.0 - Add support for new UVD_NO_OP register.
64   * - 3.6.0 - kmd involves use CONTEXT_CONTROL in ring buffer.
65   * - 3.7.0 - Add support for VCE clock list packet
66   * - 3.8.0 - Add support raster config init in the kernel
67   * - 3.9.0 - Add support for memory query info about VRAM and GTT.
68   * - 3.10.0 - Add support for new fences ioctl, new gem ioctl flags
69   * - 3.11.0 - Add support for sensor query info (clocks, temp, etc).
70   * - 3.12.0 - Add query for double offchip LDS buffers
71   * - 3.13.0 - Add PRT support
72   * - 3.14.0 - Fix race in amdgpu_ctx_get_fence() and note new functionality
73   * - 3.15.0 - Export more gpu info for gfx9
74   * - 3.16.0 - Add reserved vmid support
75   * - 3.17.0 - Add AMDGPU_NUM_VRAM_CPU_PAGE_FAULTS.
76   * - 3.18.0 - Export gpu always on cu bitmap
77   * - 3.19.0 - Add support for UVD MJPEG decode
78   * - 3.20.0 - Add support for local BOs
79   * - 3.21.0 - Add DRM_AMDGPU_FENCE_TO_HANDLE ioctl
80   * - 3.22.0 - Add DRM_AMDGPU_SCHED ioctl
81   * - 3.23.0 - Add query for VRAM lost counter
82   * - 3.24.0 - Add high priority compute support for gfx9
83   * - 3.25.0 - Add support for sensor query info (stable pstate sclk/mclk).
84   * - 3.26.0 - GFX9: Process AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE.
85   * - 3.27.0 - Add new chunk to AMDGPU_CS to enable BO_LIST creation.
86   * - 3.28.0 - Add AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES
87   * - 3.29.0 - Add AMDGPU_IB_FLAG_RESET_GDS_MAX_WAVE_ID
88   * - 3.30.0 - Add AMDGPU_SCHED_OP_CONTEXT_PRIORITY_OVERRIDE.
89   * - 3.31.0 - Add support for per-flip tiling attribute changes with DC
90   * - 3.32.0 - Add syncobj timeline support to AMDGPU_CS.
91   * - 3.33.0 - Fixes for GDS ENOMEM failures in AMDGPU_CS.
92   * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
93   * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
94   * - 3.36.0 - Allow reading more status registers on si/cik
95   * - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness
96   * - 3.38.0 - Add AMDGPU_IB_FLAG_EMIT_MEM_SYNC
97   * - 3.39.0 - DMABUF implicit sync does a full pipeline sync
98   * - 3.40.0 - Add AMDGPU_IDS_FLAGS_TMZ
99   * - 3.41.0 - Add video codec query
100   * - 3.42.0 - Add 16bpc fixed point display support
101   * - 3.43.0 - Add device hot plug/unplug support
102   * - 3.44.0 - DCN3 supports DCC independent block settings: !64B && 128B, 64B && 128B
103   * - 3.45.0 - Add context ioctl stable pstate interface
104   * - 3.46.0 - To enable hot plug amdgpu tests in libdrm
105   * - 3.47.0 - Add AMDGPU_GEM_CREATE_DISCARDABLE and AMDGPU_VM_NOALLOC flags
106   * - 3.48.0 - Add IP discovery version info to HW INFO
107   * - 3.49.0 - Add gang submit into CS IOCTL
108   * - 3.50.0 - Update AMDGPU_INFO_DEV_INFO IOCTL for minimum engine and memory clock
109   *            Update AMDGPU_INFO_SENSOR IOCTL for PEAK_PSTATE engine and memory clock
110   *   3.51.0 - Return the PCIe gen and lanes from the INFO ioctl
111   *   3.52.0 - Add AMDGPU_IDS_FLAGS_CONFORMANT_TRUNC_COORD, add device_info fields:
112   *            tcp_cache_size, num_sqc_per_wgp, sqc_data_cache_size, sqc_inst_cache_size,
113   *            gl1c_cache_size, gl2c_cache_size, mall_size, enabled_rb_pipes_mask_hi
114   *   3.53.0 - Support for GFX11 CP GFX shadowing
115   *   3.54.0 - Add AMDGPU_CTX_QUERY2_FLAGS_RESET_IN_PROGRESS support
116   * - 3.55.0 - Add AMDGPU_INFO_GPUVM_FAULT query
117   * - 3.56.0 - Update IB start address and size alignment for decode and encode
118   * - 3.57.0 - Compute tunneling on GFX10+
119   * - 3.58.0 - Add GFX12 DCC support
120   * - 3.59.0 - Cleared VRAM
121   */
122  #define KMS_DRIVER_MAJOR	3
123  #define KMS_DRIVER_MINOR	59
124  #define KMS_DRIVER_PATCHLEVEL	0
125  
126  /*
127   * amdgpu.debug module options. Are all disabled by default
128   */
129  enum AMDGPU_DEBUG_MASK {
130  	AMDGPU_DEBUG_VM = BIT(0),
131  	AMDGPU_DEBUG_LARGEBAR = BIT(1),
132  	AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY = BIT(2),
133  	AMDGPU_DEBUG_USE_VRAM_FW_BUF = BIT(3),
134  	AMDGPU_DEBUG_ENABLE_RAS_ACA = BIT(4),
135  	AMDGPU_DEBUG_ENABLE_EXP_RESETS = BIT(5),
136  };
137  
138  unsigned int amdgpu_vram_limit = UINT_MAX;
139  int amdgpu_vis_vram_limit;
140  int amdgpu_gart_size = -1; /* auto */
141  int amdgpu_gtt_size = -1; /* auto */
142  int amdgpu_moverate = -1; /* auto */
143  int amdgpu_audio = -1;
144  int amdgpu_disp_priority;
145  int amdgpu_hw_i2c;
146  int amdgpu_pcie_gen2 = -1;
147  int amdgpu_msi = -1;
148  char amdgpu_lockup_timeout[AMDGPU_MAX_TIMEOUT_PARAM_LENGTH];
149  int amdgpu_dpm = -1;
150  int amdgpu_fw_load_type = -1;
151  int amdgpu_aspm = -1;
152  int amdgpu_runtime_pm = -1;
153  uint amdgpu_ip_block_mask = 0xffffffff;
154  int amdgpu_bapm = -1;
155  int amdgpu_deep_color;
156  int amdgpu_vm_size = -1;
157  int amdgpu_vm_fragment_size = -1;
158  int amdgpu_vm_block_size = -1;
159  int amdgpu_vm_fault_stop;
160  int amdgpu_vm_update_mode = -1;
161  int amdgpu_exp_hw_support;
162  int amdgpu_dc = -1;
163  int amdgpu_sched_jobs = 32;
164  int amdgpu_sched_hw_submission = 2;
165  uint amdgpu_pcie_gen_cap;
166  uint amdgpu_pcie_lane_cap;
167  u64 amdgpu_cg_mask = 0xffffffffffffffff;
168  uint amdgpu_pg_mask = 0xffffffff;
169  uint amdgpu_sdma_phase_quantum = 32;
170  char *amdgpu_disable_cu;
171  char *amdgpu_virtual_display;
172  bool enforce_isolation;
173  
174  /* Specifies the default granularity for SVM, used in buffer
175   * migration and restoration of backing memory when handling
176   * recoverable page faults.
177   *
178   * The value is given as log(numPages(buffer)); for a 2 MiB
179   * buffer it computes to be 9
180   */
181  uint amdgpu_svm_default_granularity = 9;
182  
183  /*
184   * OverDrive(bit 14) disabled by default
185   * GFX DCS(bit 19) disabled by default
186   */
187  uint amdgpu_pp_feature_mask = 0xfff7bfff;
188  uint amdgpu_force_long_training;
189  int amdgpu_lbpw = -1;
190  int amdgpu_compute_multipipe = -1;
191  int amdgpu_gpu_recovery = -1; /* auto */
192  int amdgpu_emu_mode;
193  uint amdgpu_smu_memory_pool_size;
194  int amdgpu_smu_pptable_id = -1;
195  /*
196   * FBC (bit 0) disabled by default
197   * MULTI_MON_PP_MCLK_SWITCH (bit 1) enabled by default
198   *   - With this, for multiple monitors in sync(e.g. with the same model),
199   *     mclk switching will be allowed. And the mclk will be not foced to the
200   *     highest. That helps saving some idle power.
201   * DISABLE_FRACTIONAL_PWM (bit 2) disabled by default
202   * PSR (bit 3) disabled by default
203   * EDP NO POWER SEQUENCING (bit 4) disabled by default
204   */
205  uint amdgpu_dc_feature_mask = 2;
206  uint amdgpu_dc_debug_mask;
207  uint amdgpu_dc_visual_confirm;
208  int amdgpu_async_gfx_ring = 1;
209  int amdgpu_mcbp = -1;
210  int amdgpu_discovery = -1;
211  int amdgpu_mes;
212  int amdgpu_mes_log_enable = 0;
213  int amdgpu_mes_kiq;
214  int amdgpu_uni_mes = 1;
215  int amdgpu_noretry = -1;
216  int amdgpu_force_asic_type = -1;
217  int amdgpu_tmz = -1; /* auto */
218  uint amdgpu_freesync_vid_mode;
219  int amdgpu_reset_method = -1; /* auto */
220  int amdgpu_num_kcq = -1;
221  int amdgpu_smartshift_bias;
222  int amdgpu_use_xgmi_p2p = 1;
223  int amdgpu_vcnfw_log;
224  int amdgpu_sg_display = -1; /* auto */
225  int amdgpu_user_partt_mode = AMDGPU_AUTO_COMPUTE_PARTITION_MODE;
226  int amdgpu_umsch_mm;
227  int amdgpu_seamless = -1; /* auto */
228  uint amdgpu_debug_mask;
229  int amdgpu_agp = -1; /* auto */
230  int amdgpu_wbrf = -1;
231  int amdgpu_damage_clips = -1; /* auto */
232  int amdgpu_umsch_mm_fwlog;
233  
234  static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
235  
236  DECLARE_DYNDBG_CLASSMAP(drm_debug_classes, DD_CLASS_TYPE_DISJOINT_BITS, 0,
237  			"DRM_UT_CORE",
238  			"DRM_UT_DRIVER",
239  			"DRM_UT_KMS",
240  			"DRM_UT_PRIME",
241  			"DRM_UT_ATOMIC",
242  			"DRM_UT_VBL",
243  			"DRM_UT_STATE",
244  			"DRM_UT_LEASE",
245  			"DRM_UT_DP",
246  			"DRM_UT_DRMRES");
247  
248  struct amdgpu_mgpu_info mgpu_info = {
249  	.mutex = __MUTEX_INITIALIZER(mgpu_info.mutex),
250  	.delayed_reset_work = __DELAYED_WORK_INITIALIZER(
251  			mgpu_info.delayed_reset_work,
252  			amdgpu_drv_delayed_reset_work_handler, 0),
253  };
254  int amdgpu_ras_enable = -1;
255  uint amdgpu_ras_mask = 0xffffffff;
256  int amdgpu_bad_page_threshold = -1;
257  struct amdgpu_watchdog_timer amdgpu_watchdog_timer = {
258  	.timeout_fatal_disable = false,
259  	.period = 0x0, /* default to 0x0 (timeout disable) */
260  };
261  
262  /**
263   * DOC: vramlimit (int)
264   * Restrict the total amount of VRAM in MiB for testing.  The default is 0 (Use full VRAM).
265   */
266  MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
267  module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
268  
269  /**
270   * DOC: vis_vramlimit (int)
271   * Restrict the amount of CPU visible VRAM in MiB for testing.  The default is 0 (Use full CPU visible VRAM).
272   */
273  MODULE_PARM_DESC(vis_vramlimit, "Restrict visible VRAM for testing, in megabytes");
274  module_param_named(vis_vramlimit, amdgpu_vis_vram_limit, int, 0444);
275  
276  /**
277   * DOC: gartsize (uint)
278   * Restrict the size of GART (for kernel use) in Mib (32, 64, etc.) for testing.
279   * The default is -1 (The size depends on asic).
280   */
281  MODULE_PARM_DESC(gartsize, "Size of kernel GART to setup in megabytes (32, 64, etc., -1=auto)");
282  module_param_named(gartsize, amdgpu_gart_size, uint, 0600);
283  
284  /**
285   * DOC: gttsize (int)
286   * Restrict the size of GTT domain (for userspace use) in MiB for testing.
287   * The default is -1 (Use 1/2 RAM, minimum value is 3GB).
288   */
289  MODULE_PARM_DESC(gttsize, "Size of the GTT userspace domain in megabytes (-1 = auto)");
290  module_param_named(gttsize, amdgpu_gtt_size, int, 0600);
291  
292  /**
293   * DOC: moverate (int)
294   * Set maximum buffer migration rate in MB/s. The default is -1 (8 MB/s).
295   */
296  MODULE_PARM_DESC(moverate, "Maximum buffer migration rate in MB/s. (32, 64, etc., -1=auto, 0=1=disabled)");
297  module_param_named(moverate, amdgpu_moverate, int, 0600);
298  
299  /**
300   * DOC: audio (int)
301   * Set HDMI/DPAudio. Only affects non-DC display handling. The default is -1 (Enabled), set 0 to disabled it.
302   */
303  MODULE_PARM_DESC(audio, "Audio enable (-1 = auto, 0 = disable, 1 = enable)");
304  module_param_named(audio, amdgpu_audio, int, 0444);
305  
306  /**
307   * DOC: disp_priority (int)
308   * Set display Priority (1 = normal, 2 = high). Only affects non-DC display handling. The default is 0 (auto).
309   */
310  MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
311  module_param_named(disp_priority, amdgpu_disp_priority, int, 0444);
312  
313  /**
314   * DOC: hw_i2c (int)
315   * To enable hw i2c engine. Only affects non-DC display handling. The default is 0 (Disabled).
316   */
317  MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
318  module_param_named(hw_i2c, amdgpu_hw_i2c, int, 0444);
319  
320  /**
321   * DOC: pcie_gen2 (int)
322   * To disable PCIE Gen2/3 mode (0 = disable, 1 = enable). The default is -1 (auto, enabled).
323   */
324  MODULE_PARM_DESC(pcie_gen2, "PCIE Gen2 mode (-1 = auto, 0 = disable, 1 = enable)");
325  module_param_named(pcie_gen2, amdgpu_pcie_gen2, int, 0444);
326  
327  /**
328   * DOC: msi (int)
329   * To disable Message Signaled Interrupts (MSI) functionality (1 = enable, 0 = disable). The default is -1 (auto, enabled).
330   */
331  MODULE_PARM_DESC(msi, "MSI support (1 = enable, 0 = disable, -1 = auto)");
332  module_param_named(msi, amdgpu_msi, int, 0444);
333  
334  /**
335   * DOC: svm_default_granularity (uint)
336   * Used in buffer migration and handling of recoverable page faults
337   */
338  MODULE_PARM_DESC(svm_default_granularity, "SVM's default granularity in log(2^Pages), default 9 = 2^9 = 2 MiB");
339  module_param_named(svm_default_granularity, amdgpu_svm_default_granularity, uint, 0644);
340  
341  /**
342   * DOC: lockup_timeout (string)
343   * Set GPU scheduler timeout value in ms.
344   *
345   * The format can be [Non-Compute] or [GFX,Compute,SDMA,Video]. That is there can be one or
346   * multiple values specified. 0 and negative values are invalidated. They will be adjusted
347   * to the default timeout.
348   *
349   * - With one value specified, the setting will apply to all non-compute jobs.
350   * - With multiple values specified, the first one will be for GFX.
351   *   The second one is for Compute. The third and fourth ones are
352   *   for SDMA and Video.
353   *
354   * By default(with no lockup_timeout settings), the timeout for all non-compute(GFX, SDMA and Video)
355   * jobs is 10000. The timeout for compute is 60000.
356   */
357  MODULE_PARM_DESC(lockup_timeout, "GPU lockup timeout in ms (default: for bare metal 10000 for non-compute jobs and 60000 for compute jobs; "
358  		"for passthrough or sriov, 10000 for all jobs. 0: keep default value. negative: infinity timeout), format: for bare metal [Non-Compute] or [GFX,Compute,SDMA,Video]; "
359  		"for passthrough or sriov [all jobs] or [GFX,Compute,SDMA,Video].");
360  module_param_string(lockup_timeout, amdgpu_lockup_timeout, sizeof(amdgpu_lockup_timeout), 0444);
361  
362  /**
363   * DOC: dpm (int)
364   * Override for dynamic power management setting
365   * (0 = disable, 1 = enable)
366   * The default is -1 (auto).
367   */
368  MODULE_PARM_DESC(dpm, "DPM support (1 = enable, 0 = disable, -1 = auto)");
369  module_param_named(dpm, amdgpu_dpm, int, 0444);
370  
371  /**
372   * DOC: fw_load_type (int)
373   * Set different firmware loading type for debugging, if supported.
374   * Set to 0 to force direct loading if supported by the ASIC.  Set
375   * to -1 to select the default loading mode for the ASIC, as defined
376   * by the driver.  The default is -1 (auto).
377   */
378  MODULE_PARM_DESC(fw_load_type, "firmware loading type (3 = rlc backdoor autoload if supported, 2 = smu load if supported, 1 = psp load, 0 = force direct if supported, -1 = auto)");
379  module_param_named(fw_load_type, amdgpu_fw_load_type, int, 0444);
380  
381  /**
382   * DOC: aspm (int)
383   * To disable ASPM (1 = enable, 0 = disable). The default is -1 (auto, enabled).
384   */
385  MODULE_PARM_DESC(aspm, "ASPM support (1 = enable, 0 = disable, -1 = auto)");
386  module_param_named(aspm, amdgpu_aspm, int, 0444);
387  
388  /**
389   * DOC: runpm (int)
390   * Override for runtime power management control for dGPUs. The amdgpu driver can dynamically power down
391   * the dGPUs when they are idle if supported. The default is -1 (auto enable).
392   * Setting the value to 0 disables this functionality.
393   * Setting the value to -2 is auto enabled with power down when displays are attached.
394   */
395  MODULE_PARM_DESC(runpm, "PX runtime pm (2 = force enable with BAMACO, 1 = force enable with BACO, 0 = disable, -1 = auto, -2 = auto with displays)");
396  module_param_named(runpm, amdgpu_runtime_pm, int, 0444);
397  
398  /**
399   * DOC: ip_block_mask (uint)
400   * Override what IP blocks are enabled on the GPU. Each GPU is a collection of IP blocks (gfx, display, video, etc.).
401   * Use this parameter to disable specific blocks. Note that the IP blocks do not have a fixed index. Some asics may not have
402   * some IPs or may include multiple instances of an IP so the ordering various from asic to asic. See the driver output in
403   * the kernel log for the list of IPs on the asic. The default is 0xffffffff (enable all blocks on a device).
404   */
405  MODULE_PARM_DESC(ip_block_mask, "IP Block Mask (all blocks enabled (default))");
406  module_param_named(ip_block_mask, amdgpu_ip_block_mask, uint, 0444);
407  
408  /**
409   * DOC: bapm (int)
410   * Bidirectional Application Power Management (BAPM) used to dynamically share TDP between CPU and GPU. Set value 0 to disable it.
411   * The default -1 (auto, enabled)
412   */
413  MODULE_PARM_DESC(bapm, "BAPM support (1 = enable, 0 = disable, -1 = auto)");
414  module_param_named(bapm, amdgpu_bapm, int, 0444);
415  
416  /**
417   * DOC: deep_color (int)
418   * Set 1 to enable Deep Color support. Only affects non-DC display handling. The default is 0 (disabled).
419   */
420  MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
421  module_param_named(deep_color, amdgpu_deep_color, int, 0444);
422  
423  /**
424   * DOC: vm_size (int)
425   * Override the size of the GPU's per client virtual address space in GiB.  The default is -1 (automatic for each asic).
426   */
427  MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 64GB)");
428  module_param_named(vm_size, amdgpu_vm_size, int, 0444);
429  
430  /**
431   * DOC: vm_fragment_size (int)
432   * Override VM fragment size in bits (4, 5, etc. 4 = 64K, 9 = 2M). The default is -1 (automatic for each asic).
433   */
434  MODULE_PARM_DESC(vm_fragment_size, "VM fragment size in bits (4, 5, etc. 4 = 64K (default), Max 9 = 2M)");
435  module_param_named(vm_fragment_size, amdgpu_vm_fragment_size, int, 0444);
436  
437  /**
438   * DOC: vm_block_size (int)
439   * Override VM page table size in bits (default depending on vm_size and hw setup). The default is -1 (automatic for each asic).
440   */
441  MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default depending on vm_size)");
442  module_param_named(vm_block_size, amdgpu_vm_block_size, int, 0444);
443  
444  /**
445   * DOC: vm_fault_stop (int)
446   * Stop on VM fault for debugging (0 = never, 1 = print first, 2 = always). The default is 0 (No stop).
447   */
448  MODULE_PARM_DESC(vm_fault_stop, "Stop on VM fault (0 = never (default), 1 = print first, 2 = always)");
449  module_param_named(vm_fault_stop, amdgpu_vm_fault_stop, int, 0444);
450  
451  /**
452   * DOC: vm_update_mode (int)
453   * Override VM update mode. VM updated by using CPU (0 = never, 1 = Graphics only, 2 = Compute only, 3 = Both). The default
454   * is -1 (Only in large BAR(LB) systems Compute VM tables will be updated by CPU, otherwise 0, never).
455   */
456  MODULE_PARM_DESC(vm_update_mode, "VM update using CPU (0 = never (default except for large BAR(LB)), 1 = Graphics only, 2 = Compute only (default for LB), 3 = Both");
457  module_param_named(vm_update_mode, amdgpu_vm_update_mode, int, 0444);
458  
459  /**
460   * DOC: exp_hw_support (int)
461   * Enable experimental hw support (1 = enable). The default is 0 (disabled).
462   */
463  MODULE_PARM_DESC(exp_hw_support, "experimental hw support (1 = enable, 0 = disable (default))");
464  module_param_named(exp_hw_support, amdgpu_exp_hw_support, int, 0444);
465  
466  /**
467   * DOC: dc (int)
468   * Disable/Enable Display Core driver for debugging (1 = enable, 0 = disable). The default is -1 (automatic for each asic).
469   */
470  MODULE_PARM_DESC(dc, "Display Core driver (1 = enable, 0 = disable, -1 = auto (default))");
471  module_param_named(dc, amdgpu_dc, int, 0444);
472  
473  /**
474   * DOC: sched_jobs (int)
475   * Override the max number of jobs supported in the sw queue. The default is 32.
476   */
477  MODULE_PARM_DESC(sched_jobs, "the max number of jobs supported in the sw queue (default 32)");
478  module_param_named(sched_jobs, amdgpu_sched_jobs, int, 0444);
479  
480  /**
481   * DOC: sched_hw_submission (int)
482   * Override the max number of HW submissions. The default is 2.
483   */
484  MODULE_PARM_DESC(sched_hw_submission, "the max number of HW submissions (default 2)");
485  module_param_named(sched_hw_submission, amdgpu_sched_hw_submission, int, 0444);
486  
487  /**
488   * DOC: ppfeaturemask (hexint)
489   * Override power features enabled. See enum PP_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
490   * The default is the current set of stable power features.
491   */
492  MODULE_PARM_DESC(ppfeaturemask, "all power features enabled (default))");
493  module_param_named(ppfeaturemask, amdgpu_pp_feature_mask, hexint, 0444);
494  
495  /**
496   * DOC: forcelongtraining (uint)
497   * Force long memory training in resume.
498   * The default is zero, indicates short training in resume.
499   */
500  MODULE_PARM_DESC(forcelongtraining, "force memory long training");
501  module_param_named(forcelongtraining, amdgpu_force_long_training, uint, 0444);
502  
503  /**
504   * DOC: pcie_gen_cap (uint)
505   * Override PCIE gen speed capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
506   * The default is 0 (automatic for each asic).
507   */
508  MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
509  module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
510  
511  /**
512   * DOC: pcie_lane_cap (uint)
513   * Override PCIE lanes capabilities. See the CAIL flags in drivers/gpu/drm/amd/include/amd_pcie.h.
514   * The default is 0 (automatic for each asic).
515   */
516  MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
517  module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
518  
519  /**
520   * DOC: cg_mask (ullong)
521   * Override Clockgating features enabled on GPU (0 = disable clock gating). See the AMD_CG_SUPPORT flags in
522   * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffffffffffff (all enabled).
523   */
524  MODULE_PARM_DESC(cg_mask, "Clockgating flags mask (0 = disable clock gating)");
525  module_param_named(cg_mask, amdgpu_cg_mask, ullong, 0444);
526  
527  /**
528   * DOC: pg_mask (uint)
529   * Override Powergating features enabled on GPU (0 = disable power gating). See the AMD_PG_SUPPORT flags in
530   * drivers/gpu/drm/amd/include/amd_shared.h. The default is 0xffffffff (all enabled).
531   */
532  MODULE_PARM_DESC(pg_mask, "Powergating flags mask (0 = disable power gating)");
533  module_param_named(pg_mask, amdgpu_pg_mask, uint, 0444);
534  
535  /**
536   * DOC: sdma_phase_quantum (uint)
537   * Override SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change). The default is 32.
538   */
539  MODULE_PARM_DESC(sdma_phase_quantum, "SDMA context switch phase quantum (x 1K GPU clock cycles, 0 = no change (default 32))");
540  module_param_named(sdma_phase_quantum, amdgpu_sdma_phase_quantum, uint, 0444);
541  
542  /**
543   * DOC: disable_cu (charp)
544   * Set to disable CUs (It's set like se.sh.cu,...). The default is NULL.
545   */
546  MODULE_PARM_DESC(disable_cu, "Disable CUs (se.sh.cu,...)");
547  module_param_named(disable_cu, amdgpu_disable_cu, charp, 0444);
548  
549  /**
550   * DOC: virtual_display (charp)
551   * Set to enable virtual display feature. This feature provides a virtual display hardware on headless boards
552   * or in virtualized environments. It will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x. It's the pci address of
553   * the device, plus the number of crtcs to expose. E.g., 0000:26:00.0,4 would enable 4 virtual crtcs on the pci
554   * device at 26:00.0. The default is NULL.
555   */
556  MODULE_PARM_DESC(virtual_display,
557  		 "Enable virtual display feature (the virtual_display will be set like xxxx:xx:xx.x,x;xxxx:xx:xx.x,x)");
558  module_param_named(virtual_display, amdgpu_virtual_display, charp, 0444);
559  
560  /**
561   * DOC: lbpw (int)
562   * Override Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable). The default is -1 (auto, enabled).
563   */
564  MODULE_PARM_DESC(lbpw, "Load Balancing Per Watt (LBPW) support (1 = enable, 0 = disable, -1 = auto)");
565  module_param_named(lbpw, amdgpu_lbpw, int, 0444);
566  
567  MODULE_PARM_DESC(compute_multipipe, "Force compute queues to be spread across pipes (1 = enable, 0 = disable, -1 = auto)");
568  module_param_named(compute_multipipe, amdgpu_compute_multipipe, int, 0444);
569  
570  /**
571   * DOC: gpu_recovery (int)
572   * Set to enable GPU recovery mechanism (1 = enable, 0 = disable). The default is -1 (auto, disabled except SRIOV).
573   */
574  MODULE_PARM_DESC(gpu_recovery, "Enable GPU recovery mechanism, (1 = enable, 0 = disable, -1 = auto)");
575  module_param_named(gpu_recovery, amdgpu_gpu_recovery, int, 0444);
576  
577  /**
578   * DOC: emu_mode (int)
579   * Set value 1 to enable emulation mode. This is only needed when running on an emulator. The default is 0 (disabled).
580   */
581  MODULE_PARM_DESC(emu_mode, "Emulation mode, (1 = enable, 0 = disable)");
582  module_param_named(emu_mode, amdgpu_emu_mode, int, 0444);
583  
584  /**
585   * DOC: ras_enable (int)
586   * Enable RAS features on the GPU (0 = disable, 1 = enable, -1 = auto (default))
587   */
588  MODULE_PARM_DESC(ras_enable, "Enable RAS features on the GPU (0 = disable, 1 = enable, -1 = auto (default))");
589  module_param_named(ras_enable, amdgpu_ras_enable, int, 0444);
590  
591  /**
592   * DOC: ras_mask (uint)
593   * Mask of RAS features to enable (default 0xffffffff), only valid when ras_enable == 1
594   * See the flags in drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
595   */
596  MODULE_PARM_DESC(ras_mask, "Mask of RAS features to enable (default 0xffffffff), only valid when ras_enable == 1");
597  module_param_named(ras_mask, amdgpu_ras_mask, uint, 0444);
598  
599  /**
600   * DOC: timeout_fatal_disable (bool)
601   * Disable Watchdog timeout fatal error event
602   */
603  MODULE_PARM_DESC(timeout_fatal_disable, "disable watchdog timeout fatal error (false = default)");
604  module_param_named(timeout_fatal_disable, amdgpu_watchdog_timer.timeout_fatal_disable, bool, 0644);
605  
606  /**
607   * DOC: timeout_period (uint)
608   * Modify the watchdog timeout max_cycles as (1 << period)
609   */
610  MODULE_PARM_DESC(timeout_period, "watchdog timeout period (0 = timeout disabled, 1 ~ 0x23 = timeout maxcycles = (1 << period)");
611  module_param_named(timeout_period, amdgpu_watchdog_timer.period, uint, 0644);
612  
613  /**
614   * DOC: si_support (int)
615   * Set SI support driver. This parameter works after set config CONFIG_DRM_AMDGPU_SI. For SI asic, when radeon driver is enabled,
616   * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
617   * otherwise using amdgpu driver.
618   */
619  #ifdef CONFIG_DRM_AMDGPU_SI
620  
621  #if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE)
622  int amdgpu_si_support;
623  MODULE_PARM_DESC(si_support, "SI support (1 = enabled, 0 = disabled (default))");
624  #else
625  int amdgpu_si_support = 1;
626  MODULE_PARM_DESC(si_support, "SI support (1 = enabled (default), 0 = disabled)");
627  #endif
628  
629  module_param_named(si_support, amdgpu_si_support, int, 0444);
630  #endif
631  
632  /**
633   * DOC: cik_support (int)
634   * Set CIK support driver. This parameter works after set config CONFIG_DRM_AMDGPU_CIK. For CIK asic, when radeon driver is enabled,
635   * set value 0 to use radeon driver, while set value 1 to use amdgpu driver. The default is using radeon driver when it available,
636   * otherwise using amdgpu driver.
637   */
638  #ifdef CONFIG_DRM_AMDGPU_CIK
639  
640  #if IS_ENABLED(CONFIG_DRM_RADEON) || IS_ENABLED(CONFIG_DRM_RADEON_MODULE)
641  int amdgpu_cik_support;
642  MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled, 0 = disabled (default))");
643  #else
644  int amdgpu_cik_support = 1;
645  MODULE_PARM_DESC(cik_support, "CIK support (1 = enabled (default), 0 = disabled)");
646  #endif
647  
648  module_param_named(cik_support, amdgpu_cik_support, int, 0444);
649  #endif
650  
651  /**
652   * DOC: smu_memory_pool_size (uint)
653   * It is used to reserve gtt for smu debug usage, setting value 0 to disable it. The actual size is value * 256MiB.
654   * E.g. 0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte. The default is 0 (disabled).
655   */
656  MODULE_PARM_DESC(smu_memory_pool_size,
657  	"reserve gtt for smu debug usage, 0 = disable,0x1 = 256Mbyte, 0x2 = 512Mbyte, 0x4 = 1 Gbyte, 0x8 = 2GByte");
658  module_param_named(smu_memory_pool_size, amdgpu_smu_memory_pool_size, uint, 0444);
659  
660  /**
661   * DOC: async_gfx_ring (int)
662   * It is used to enable gfx rings that could be configured with different prioritites or equal priorities
663   */
664  MODULE_PARM_DESC(async_gfx_ring,
665  	"Asynchronous GFX rings that could be configured with either different priorities (HP3D ring and LP3D ring), or equal priorities (0 = disabled, 1 = enabled (default))");
666  module_param_named(async_gfx_ring, amdgpu_async_gfx_ring, int, 0444);
667  
668  /**
669   * DOC: mcbp (int)
670   * It is used to enable mid command buffer preemption. (0 = disabled, 1 = enabled, -1 auto (default))
671   */
672  MODULE_PARM_DESC(mcbp,
673  	"Enable Mid-command buffer preemption (0 = disabled, 1 = enabled), -1 = auto (default)");
674  module_param_named(mcbp, amdgpu_mcbp, int, 0444);
675  
676  /**
677   * DOC: discovery (int)
678   * Allow driver to discover hardware IP information from IP Discovery table at the top of VRAM.
679   * (-1 = auto (default), 0 = disabled, 1 = enabled, 2 = use ip_discovery table from file)
680   */
681  MODULE_PARM_DESC(discovery,
682  	"Allow driver to discover hardware IPs from IP Discovery table at the top of VRAM");
683  module_param_named(discovery, amdgpu_discovery, int, 0444);
684  
685  /**
686   * DOC: mes (int)
687   * Enable Micro Engine Scheduler. This is a new hw scheduling engine for gfx, sdma, and compute.
688   * (0 = disabled (default), 1 = enabled)
689   */
690  MODULE_PARM_DESC(mes,
691  	"Enable Micro Engine Scheduler (0 = disabled (default), 1 = enabled)");
692  module_param_named(mes, amdgpu_mes, int, 0444);
693  
694  /**
695   * DOC: mes_log_enable (int)
696   * Enable Micro Engine Scheduler log. This is used to enable/disable MES internal log.
697   * (0 = disabled (default), 1 = enabled)
698   */
699  MODULE_PARM_DESC(mes_log_enable,
700  	"Enable Micro Engine Scheduler log (0 = disabled (default), 1 = enabled)");
701  module_param_named(mes_log_enable, amdgpu_mes_log_enable, int, 0444);
702  
703  /**
704   * DOC: mes_kiq (int)
705   * Enable Micro Engine Scheduler KIQ. This is a new engine pipe for kiq.
706   * (0 = disabled (default), 1 = enabled)
707   */
708  MODULE_PARM_DESC(mes_kiq,
709  	"Enable Micro Engine Scheduler KIQ (0 = disabled (default), 1 = enabled)");
710  module_param_named(mes_kiq, amdgpu_mes_kiq, int, 0444);
711  
712  /**
713   * DOC: uni_mes (int)
714   * Enable Unified Micro Engine Scheduler. This is a new engine pipe for unified scheduler.
715   * (0 = disabled (default), 1 = enabled)
716   */
717  MODULE_PARM_DESC(uni_mes,
718  	"Enable Unified Micro Engine Scheduler (0 = disabled, 1 = enabled(default)");
719  module_param_named(uni_mes, amdgpu_uni_mes, int, 0444);
720  
721  /**
722   * DOC: noretry (int)
723   * Disable XNACK retry in the SQ by default on GFXv9 hardware. On ASICs that
724   * do not support per-process XNACK this also disables retry page faults.
725   * (0 = retry enabled, 1 = retry disabled, -1 auto (default))
726   */
727  MODULE_PARM_DESC(noretry,
728  	"Disable retry faults (0 = retry enabled, 1 = retry disabled, -1 auto (default))");
729  module_param_named(noretry, amdgpu_noretry, int, 0644);
730  
731  /**
732   * DOC: force_asic_type (int)
733   * A non negative value used to specify the asic type for all supported GPUs.
734   */
735  MODULE_PARM_DESC(force_asic_type,
736  	"A non negative value used to specify the asic type for all supported GPUs");
737  module_param_named(force_asic_type, amdgpu_force_asic_type, int, 0444);
738  
739  /**
740   * DOC: use_xgmi_p2p (int)
741   * Enables/disables XGMI P2P interface (0 = disable, 1 = enable).
742   */
743  MODULE_PARM_DESC(use_xgmi_p2p,
744  	"Enable XGMI P2P interface (0 = disable; 1 = enable (default))");
745  module_param_named(use_xgmi_p2p, amdgpu_use_xgmi_p2p, int, 0444);
746  
747  
748  #ifdef CONFIG_HSA_AMD
749  /**
750   * DOC: sched_policy (int)
751   * Set scheduling policy. Default is HWS(hardware scheduling) with over-subscription.
752   * Setting 1 disables over-subscription. Setting 2 disables HWS and statically
753   * assigns queues to HQDs.
754   */
755  int sched_policy = KFD_SCHED_POLICY_HWS;
756  module_param(sched_policy, int, 0444);
757  MODULE_PARM_DESC(sched_policy,
758  	"Scheduling policy (0 = HWS (Default), 1 = HWS without over-subscription, 2 = Non-HWS (Used for debugging only)");
759  
760  /**
761   * DOC: hws_max_conc_proc (int)
762   * Maximum number of processes that HWS can schedule concurrently. The maximum is the
763   * number of VMIDs assigned to the HWS, which is also the default.
764   */
765  int hws_max_conc_proc = -1;
766  module_param(hws_max_conc_proc, int, 0444);
767  MODULE_PARM_DESC(hws_max_conc_proc,
768  	"Max # processes HWS can execute concurrently when sched_policy=0 (0 = no concurrency, #VMIDs for KFD = Maximum(default))");
769  
770  /**
771   * DOC: cwsr_enable (int)
772   * CWSR(compute wave store and resume) allows the GPU to preempt shader execution in
773   * the middle of a compute wave. Default is 1 to enable this feature. Setting 0
774   * disables it.
775   */
776  int cwsr_enable = 1;
777  module_param(cwsr_enable, int, 0444);
778  MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = Off, 1 = On (Default))");
779  
780  /**
781   * DOC: max_num_of_queues_per_device (int)
782   * Maximum number of queues per device. Valid setting is between 1 and 4096. Default
783   * is 4096.
784   */
785  int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
786  module_param(max_num_of_queues_per_device, int, 0444);
787  MODULE_PARM_DESC(max_num_of_queues_per_device,
788  	"Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
789  
790  /**
791   * DOC: send_sigterm (int)
792   * Send sigterm to HSA process on unhandled exceptions. Default is not to send sigterm
793   * but just print errors on dmesg. Setting 1 enables sending sigterm.
794   */
795  int send_sigterm;
796  module_param(send_sigterm, int, 0444);
797  MODULE_PARM_DESC(send_sigterm,
798  	"Send sigterm to HSA process on unhandled exception (0 = disable, 1 = enable)");
799  
800  /**
801   * DOC: halt_if_hws_hang (int)
802   * Halt if HWS hang is detected. Default value, 0, disables the halt on hang.
803   * Setting 1 enables halt on hang.
804   */
805  int halt_if_hws_hang;
806  module_param(halt_if_hws_hang, int, 0644);
807  MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
808  
809  /**
810   * DOC: hws_gws_support(bool)
811   * Assume that HWS supports GWS barriers regardless of what firmware version
812   * check says. Default value: false (rely on MEC2 firmware version check).
813   */
814  bool hws_gws_support;
815  module_param(hws_gws_support, bool, 0444);
816  MODULE_PARM_DESC(hws_gws_support, "Assume MEC2 FW supports GWS barriers (false = rely on FW version check (Default), true = force supported)");
817  
818  /**
819   * DOC: queue_preemption_timeout_ms (int)
820   * queue preemption timeout in ms (1 = Minimum, 9000 = default)
821   */
822  int queue_preemption_timeout_ms = 9000;
823  module_param(queue_preemption_timeout_ms, int, 0644);
824  MODULE_PARM_DESC(queue_preemption_timeout_ms, "queue preemption timeout in ms (1 = Minimum, 9000 = default)");
825  
826  /**
827   * DOC: debug_evictions(bool)
828   * Enable extra debug messages to help determine the cause of evictions
829   */
830  bool debug_evictions;
831  module_param(debug_evictions, bool, 0644);
832  MODULE_PARM_DESC(debug_evictions, "enable eviction debug messages (false = default)");
833  
834  /**
835   * DOC: no_system_mem_limit(bool)
836   * Disable system memory limit, to support multiple process shared memory
837   */
838  bool no_system_mem_limit;
839  module_param(no_system_mem_limit, bool, 0644);
840  MODULE_PARM_DESC(no_system_mem_limit, "disable system memory limit (false = default)");
841  
842  /**
843   * DOC: no_queue_eviction_on_vm_fault (int)
844   * If set, process queues will not be evicted on gpuvm fault. This is to keep the wavefront context for debugging (0 = queue eviction, 1 = no queue eviction). The default is 0 (queue eviction).
845   */
846  int amdgpu_no_queue_eviction_on_vm_fault;
847  MODULE_PARM_DESC(no_queue_eviction_on_vm_fault, "No queue eviction on VM fault (0 = queue eviction, 1 = no queue eviction)");
848  module_param_named(no_queue_eviction_on_vm_fault, amdgpu_no_queue_eviction_on_vm_fault, int, 0444);
849  #endif
850  
851  /**
852   * DOC: mtype_local (int)
853   */
854  int amdgpu_mtype_local;
855  MODULE_PARM_DESC(mtype_local, "MTYPE for local memory (0 = MTYPE_RW (default), 1 = MTYPE_NC, 2 = MTYPE_CC)");
856  module_param_named(mtype_local, amdgpu_mtype_local, int, 0444);
857  
858  /**
859   * DOC: pcie_p2p (bool)
860   * Enable PCIe P2P (requires large-BAR). Default value: true (on)
861   */
862  #ifdef CONFIG_HSA_AMD_P2P
863  bool pcie_p2p = true;
864  module_param(pcie_p2p, bool, 0444);
865  MODULE_PARM_DESC(pcie_p2p, "Enable PCIe P2P (requires large-BAR). (N = off, Y = on(default))");
866  #endif
867  
868  /**
869   * DOC: dcfeaturemask (uint)
870   * Override display features enabled. See enum DC_FEATURE_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
871   * The default is the current set of stable display features.
872   */
873  MODULE_PARM_DESC(dcfeaturemask, "all stable DC features enabled (default))");
874  module_param_named(dcfeaturemask, amdgpu_dc_feature_mask, uint, 0444);
875  
876  /**
877   * DOC: dcdebugmask (uint)
878   * Override display features enabled. See enum DC_DEBUG_MASK in drivers/gpu/drm/amd/include/amd_shared.h.
879   */
880  MODULE_PARM_DESC(dcdebugmask, "all debug options disabled (default))");
881  module_param_named(dcdebugmask, amdgpu_dc_debug_mask, uint, 0444);
882  
883  MODULE_PARM_DESC(visualconfirm, "Visual confirm (0 = off (default), 1 = MPO, 5 = PSR)");
884  module_param_named(visualconfirm, amdgpu_dc_visual_confirm, uint, 0444);
885  
886  /**
887   * DOC: abmlevel (uint)
888   * Override the default ABM (Adaptive Backlight Management) level used for DC
889   * enabled hardware. Requires DMCU to be supported and loaded.
890   * Valid levels are 0-4. A value of 0 indicates that ABM should be disabled by
891   * default. Values 1-4 control the maximum allowable brightness reduction via
892   * the ABM algorithm, with 1 being the least reduction and 4 being the most
893   * reduction.
894   *
895   * Defaults to -1, or disabled. Userspace can only override this level after
896   * boot if it's set to auto.
897   */
898  int amdgpu_dm_abm_level = -1;
899  MODULE_PARM_DESC(abmlevel,
900  		 "ABM level (0 = off, 1-4 = backlight reduction level, -1 auto (default))");
901  module_param_named(abmlevel, amdgpu_dm_abm_level, int, 0444);
902  
903  int amdgpu_backlight = -1;
904  MODULE_PARM_DESC(backlight, "Backlight control (0 = pwm, 1 = aux, -1 auto (default))");
905  module_param_named(backlight, amdgpu_backlight, bint, 0444);
906  
907  /**
908   * DOC: damageclips (int)
909   * Enable or disable damage clips support. If damage clips support is disabled,
910   * we will force full frame updates, irrespective of what user space sends to
911   * us.
912   *
913   * Defaults to -1 (where it is enabled unless a PSR-SU display is detected).
914   */
915  MODULE_PARM_DESC(damageclips,
916  		 "Damage clips support (0 = disable, 1 = enable, -1 auto (default))");
917  module_param_named(damageclips, amdgpu_damage_clips, int, 0444);
918  
919  /**
920   * DOC: tmz (int)
921   * Trusted Memory Zone (TMZ) is a method to protect data being written
922   * to or read from memory.
923   *
924   * The default value: 0 (off).  TODO: change to auto till it is completed.
925   */
926  MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)");
927  module_param_named(tmz, amdgpu_tmz, int, 0444);
928  
929  /**
930   * DOC: freesync_video (uint)
931   * Enable the optimization to adjust front porch timing to achieve seamless
932   * mode change experience when setting a freesync supported mode for which full
933   * modeset is not needed.
934   *
935   * The Display Core will add a set of modes derived from the base FreeSync
936   * video mode into the corresponding connector's mode list based on commonly
937   * used refresh rates and VRR range of the connected display, when users enable
938   * this feature. From the userspace perspective, they can see a seamless mode
939   * change experience when the change between different refresh rates under the
940   * same resolution. Additionally, userspace applications such as Video playback
941   * can read this modeset list and change the refresh rate based on the video
942   * frame rate. Finally, the userspace can also derive an appropriate mode for a
943   * particular refresh rate based on the FreeSync Mode and add it to the
944   * connector's mode list.
945   *
946   * Note: This is an experimental feature.
947   *
948   * The default value: 0 (off).
949   */
950  MODULE_PARM_DESC(
951  	freesync_video,
952  	"Enable freesync modesetting optimization feature (0 = off (default), 1 = on)");
953  module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444);
954  
955  /**
956   * DOC: reset_method (int)
957   * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco)
958   */
959  MODULE_PARM_DESC(reset_method, "GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco/bamaco)");
960  module_param_named(reset_method, amdgpu_reset_method, int, 0644);
961  
962  /**
963   * DOC: bad_page_threshold (int) Bad page threshold is specifies the
964   * threshold value of faulty pages detected by RAS ECC, which may
965   * result in the GPU entering bad status when the number of total
966   * faulty pages by ECC exceeds the threshold value.
967   */
968  MODULE_PARM_DESC(bad_page_threshold, "Bad page threshold(-1 = ignore threshold (default value), 0 = disable bad page retirement, -2 = driver sets threshold)");
969  module_param_named(bad_page_threshold, amdgpu_bad_page_threshold, int, 0444);
970  
971  MODULE_PARM_DESC(num_kcq, "number of kernel compute queue user want to setup (8 if set to greater than 8 or less than 0, only affect gfx 8+)");
972  module_param_named(num_kcq, amdgpu_num_kcq, int, 0444);
973  
974  /**
975   * DOC: vcnfw_log (int)
976   * Enable vcnfw log output for debugging, the default is disabled.
977   */
978  MODULE_PARM_DESC(vcnfw_log, "Enable vcnfw log(0 = disable (default value), 1 = enable)");
979  module_param_named(vcnfw_log, amdgpu_vcnfw_log, int, 0444);
980  
981  /**
982   * DOC: sg_display (int)
983   * Disable S/G (scatter/gather) display (i.e., display from system memory).
984   * This option is only relevant on APUs.  Set this option to 0 to disable
985   * S/G display if you experience flickering or other issues under memory
986   * pressure and report the issue.
987   */
988  MODULE_PARM_DESC(sg_display, "S/G Display (-1 = auto (default), 0 = disable)");
989  module_param_named(sg_display, amdgpu_sg_display, int, 0444);
990  
991  /**
992   * DOC: umsch_mm (int)
993   * Enable Multi Media User Mode Scheduler. This is a HW scheduling engine for VCN and VPE.
994   * (0 = disabled (default), 1 = enabled)
995   */
996  MODULE_PARM_DESC(umsch_mm,
997  	"Enable Multi Media User Mode Scheduler (0 = disabled (default), 1 = enabled)");
998  module_param_named(umsch_mm, amdgpu_umsch_mm, int, 0444);
999  
1000  /**
1001   * DOC: umsch_mm_fwlog (int)
1002   * Enable umschfw log output for debugging, the default is disabled.
1003   */
1004  MODULE_PARM_DESC(umsch_mm_fwlog, "Enable umschfw log(0 = disable (default value), 1 = enable)");
1005  module_param_named(umsch_mm_fwlog, amdgpu_umsch_mm_fwlog, int, 0444);
1006  
1007  /**
1008   * DOC: smu_pptable_id (int)
1009   * Used to override pptable id. id = 0 use VBIOS pptable.
1010   * id > 0 use the soft pptable with specicfied id.
1011   */
1012  MODULE_PARM_DESC(smu_pptable_id,
1013  	"specify pptable id to be used (-1 = auto(default) value, 0 = use pptable from vbios, > 0 = soft pptable id)");
1014  module_param_named(smu_pptable_id, amdgpu_smu_pptable_id, int, 0444);
1015  
1016  /**
1017   * DOC: partition_mode (int)
1018   * Used to override the default SPX mode.
1019   */
1020  MODULE_PARM_DESC(
1021  	user_partt_mode,
1022  	"specify partition mode to be used (-2 = AMDGPU_AUTO_COMPUTE_PARTITION_MODE(default value) \
1023  						0 = AMDGPU_SPX_PARTITION_MODE, \
1024  						1 = AMDGPU_DPX_PARTITION_MODE, \
1025  						2 = AMDGPU_TPX_PARTITION_MODE, \
1026  						3 = AMDGPU_QPX_PARTITION_MODE, \
1027  						4 = AMDGPU_CPX_PARTITION_MODE)");
1028  module_param_named(user_partt_mode, amdgpu_user_partt_mode, uint, 0444);
1029  
1030  
1031  /**
1032   * DOC: enforce_isolation (bool)
1033   * enforce process isolation between graphics and compute via using the same reserved vmid.
1034   */
1035  module_param(enforce_isolation, bool, 0444);
1036  MODULE_PARM_DESC(enforce_isolation, "enforce process isolation between graphics and compute . enforce_isolation = on");
1037  
1038  /**
1039   * DOC: seamless (int)
1040   * Seamless boot will keep the image on the screen during the boot process.
1041   */
1042  MODULE_PARM_DESC(seamless, "Seamless boot (-1 = auto (default), 0 = disable, 1 = enable)");
1043  module_param_named(seamless, amdgpu_seamless, int, 0444);
1044  
1045  /**
1046   * DOC: debug_mask (uint)
1047   * Debug options for amdgpu, work as a binary mask with the following options:
1048   *
1049   * - 0x1: Debug VM handling
1050   * - 0x2: Enable simulating large-bar capability on non-large bar system. This
1051   *   limits the VRAM size reported to ROCm applications to the visible
1052   *   size, usually 256MB.
1053   * - 0x4: Disable GPU soft recovery, always do a full reset
1054   */
1055  MODULE_PARM_DESC(debug_mask, "debug options for amdgpu, disabled by default");
1056  module_param_named(debug_mask, amdgpu_debug_mask, uint, 0444);
1057  
1058  /**
1059   * DOC: agp (int)
1060   * Enable the AGP aperture.  This provides an aperture in the GPU's internal
1061   * address space for direct access to system memory.  Note that these accesses
1062   * are non-snooped, so they are only used for access to uncached memory.
1063   */
1064  MODULE_PARM_DESC(agp, "AGP (-1 = auto (default), 0 = disable, 1 = enable)");
1065  module_param_named(agp, amdgpu_agp, int, 0444);
1066  
1067  /**
1068   * DOC: wbrf (int)
1069   * Enable Wifi RFI interference mitigation feature.
1070   * Due to electrical and mechanical constraints there may be likely interference of
1071   * relatively high-powered harmonics of the (G-)DDR memory clocks with local radio
1072   * module frequency bands used by Wifi 6/6e/7. To mitigate the possible RFI interference,
1073   * with this feature enabled, PMFW will use either “shadowed P-State” or “P-State” based
1074   * on active list of frequencies in-use (to be avoided) as part of initial setting or
1075   * P-state transition. However, there may be potential performance impact with this
1076   * feature enabled.
1077   * (0 = disabled, 1 = enabled, -1 = auto (default setting, will be enabled if supported))
1078   */
1079  MODULE_PARM_DESC(wbrf,
1080  	"Enable Wifi RFI interference mitigation (0 = disabled, 1 = enabled, -1 = auto(default)");
1081  module_param_named(wbrf, amdgpu_wbrf, int, 0444);
1082  
1083  /* These devices are not supported by amdgpu.
1084   * They are supported by the mach64, r128, radeon drivers
1085   */
1086  static const u16 amdgpu_unsupported_pciidlist[] = {
1087  	/* mach64 */
1088  	0x4354,
1089  	0x4358,
1090  	0x4554,
1091  	0x4742,
1092  	0x4744,
1093  	0x4749,
1094  	0x474C,
1095  	0x474D,
1096  	0x474E,
1097  	0x474F,
1098  	0x4750,
1099  	0x4751,
1100  	0x4752,
1101  	0x4753,
1102  	0x4754,
1103  	0x4755,
1104  	0x4756,
1105  	0x4757,
1106  	0x4758,
1107  	0x4759,
1108  	0x475A,
1109  	0x4C42,
1110  	0x4C44,
1111  	0x4C47,
1112  	0x4C49,
1113  	0x4C4D,
1114  	0x4C4E,
1115  	0x4C50,
1116  	0x4C51,
1117  	0x4C52,
1118  	0x4C53,
1119  	0x5654,
1120  	0x5655,
1121  	0x5656,
1122  	/* r128 */
1123  	0x4c45,
1124  	0x4c46,
1125  	0x4d46,
1126  	0x4d4c,
1127  	0x5041,
1128  	0x5042,
1129  	0x5043,
1130  	0x5044,
1131  	0x5045,
1132  	0x5046,
1133  	0x5047,
1134  	0x5048,
1135  	0x5049,
1136  	0x504A,
1137  	0x504B,
1138  	0x504C,
1139  	0x504D,
1140  	0x504E,
1141  	0x504F,
1142  	0x5050,
1143  	0x5051,
1144  	0x5052,
1145  	0x5053,
1146  	0x5054,
1147  	0x5055,
1148  	0x5056,
1149  	0x5057,
1150  	0x5058,
1151  	0x5245,
1152  	0x5246,
1153  	0x5247,
1154  	0x524b,
1155  	0x524c,
1156  	0x534d,
1157  	0x5446,
1158  	0x544C,
1159  	0x5452,
1160  	/* radeon */
1161  	0x3150,
1162  	0x3151,
1163  	0x3152,
1164  	0x3154,
1165  	0x3155,
1166  	0x3E50,
1167  	0x3E54,
1168  	0x4136,
1169  	0x4137,
1170  	0x4144,
1171  	0x4145,
1172  	0x4146,
1173  	0x4147,
1174  	0x4148,
1175  	0x4149,
1176  	0x414A,
1177  	0x414B,
1178  	0x4150,
1179  	0x4151,
1180  	0x4152,
1181  	0x4153,
1182  	0x4154,
1183  	0x4155,
1184  	0x4156,
1185  	0x4237,
1186  	0x4242,
1187  	0x4336,
1188  	0x4337,
1189  	0x4437,
1190  	0x4966,
1191  	0x4967,
1192  	0x4A48,
1193  	0x4A49,
1194  	0x4A4A,
1195  	0x4A4B,
1196  	0x4A4C,
1197  	0x4A4D,
1198  	0x4A4E,
1199  	0x4A4F,
1200  	0x4A50,
1201  	0x4A54,
1202  	0x4B48,
1203  	0x4B49,
1204  	0x4B4A,
1205  	0x4B4B,
1206  	0x4B4C,
1207  	0x4C57,
1208  	0x4C58,
1209  	0x4C59,
1210  	0x4C5A,
1211  	0x4C64,
1212  	0x4C66,
1213  	0x4C67,
1214  	0x4E44,
1215  	0x4E45,
1216  	0x4E46,
1217  	0x4E47,
1218  	0x4E48,
1219  	0x4E49,
1220  	0x4E4A,
1221  	0x4E4B,
1222  	0x4E50,
1223  	0x4E51,
1224  	0x4E52,
1225  	0x4E53,
1226  	0x4E54,
1227  	0x4E56,
1228  	0x5144,
1229  	0x5145,
1230  	0x5146,
1231  	0x5147,
1232  	0x5148,
1233  	0x514C,
1234  	0x514D,
1235  	0x5157,
1236  	0x5158,
1237  	0x5159,
1238  	0x515A,
1239  	0x515E,
1240  	0x5460,
1241  	0x5462,
1242  	0x5464,
1243  	0x5548,
1244  	0x5549,
1245  	0x554A,
1246  	0x554B,
1247  	0x554C,
1248  	0x554D,
1249  	0x554E,
1250  	0x554F,
1251  	0x5550,
1252  	0x5551,
1253  	0x5552,
1254  	0x5554,
1255  	0x564A,
1256  	0x564B,
1257  	0x564F,
1258  	0x5652,
1259  	0x5653,
1260  	0x5657,
1261  	0x5834,
1262  	0x5835,
1263  	0x5954,
1264  	0x5955,
1265  	0x5974,
1266  	0x5975,
1267  	0x5960,
1268  	0x5961,
1269  	0x5962,
1270  	0x5964,
1271  	0x5965,
1272  	0x5969,
1273  	0x5a41,
1274  	0x5a42,
1275  	0x5a61,
1276  	0x5a62,
1277  	0x5b60,
1278  	0x5b62,
1279  	0x5b63,
1280  	0x5b64,
1281  	0x5b65,
1282  	0x5c61,
1283  	0x5c63,
1284  	0x5d48,
1285  	0x5d49,
1286  	0x5d4a,
1287  	0x5d4c,
1288  	0x5d4d,
1289  	0x5d4e,
1290  	0x5d4f,
1291  	0x5d50,
1292  	0x5d52,
1293  	0x5d57,
1294  	0x5e48,
1295  	0x5e4a,
1296  	0x5e4b,
1297  	0x5e4c,
1298  	0x5e4d,
1299  	0x5e4f,
1300  	0x6700,
1301  	0x6701,
1302  	0x6702,
1303  	0x6703,
1304  	0x6704,
1305  	0x6705,
1306  	0x6706,
1307  	0x6707,
1308  	0x6708,
1309  	0x6709,
1310  	0x6718,
1311  	0x6719,
1312  	0x671c,
1313  	0x671d,
1314  	0x671f,
1315  	0x6720,
1316  	0x6721,
1317  	0x6722,
1318  	0x6723,
1319  	0x6724,
1320  	0x6725,
1321  	0x6726,
1322  	0x6727,
1323  	0x6728,
1324  	0x6729,
1325  	0x6738,
1326  	0x6739,
1327  	0x673e,
1328  	0x6740,
1329  	0x6741,
1330  	0x6742,
1331  	0x6743,
1332  	0x6744,
1333  	0x6745,
1334  	0x6746,
1335  	0x6747,
1336  	0x6748,
1337  	0x6749,
1338  	0x674A,
1339  	0x6750,
1340  	0x6751,
1341  	0x6758,
1342  	0x6759,
1343  	0x675B,
1344  	0x675D,
1345  	0x675F,
1346  	0x6760,
1347  	0x6761,
1348  	0x6762,
1349  	0x6763,
1350  	0x6764,
1351  	0x6765,
1352  	0x6766,
1353  	0x6767,
1354  	0x6768,
1355  	0x6770,
1356  	0x6771,
1357  	0x6772,
1358  	0x6778,
1359  	0x6779,
1360  	0x677B,
1361  	0x6840,
1362  	0x6841,
1363  	0x6842,
1364  	0x6843,
1365  	0x6849,
1366  	0x684C,
1367  	0x6850,
1368  	0x6858,
1369  	0x6859,
1370  	0x6880,
1371  	0x6888,
1372  	0x6889,
1373  	0x688A,
1374  	0x688C,
1375  	0x688D,
1376  	0x6898,
1377  	0x6899,
1378  	0x689b,
1379  	0x689c,
1380  	0x689d,
1381  	0x689e,
1382  	0x68a0,
1383  	0x68a1,
1384  	0x68a8,
1385  	0x68a9,
1386  	0x68b0,
1387  	0x68b8,
1388  	0x68b9,
1389  	0x68ba,
1390  	0x68be,
1391  	0x68bf,
1392  	0x68c0,
1393  	0x68c1,
1394  	0x68c7,
1395  	0x68c8,
1396  	0x68c9,
1397  	0x68d8,
1398  	0x68d9,
1399  	0x68da,
1400  	0x68de,
1401  	0x68e0,
1402  	0x68e1,
1403  	0x68e4,
1404  	0x68e5,
1405  	0x68e8,
1406  	0x68e9,
1407  	0x68f1,
1408  	0x68f2,
1409  	0x68f8,
1410  	0x68f9,
1411  	0x68fa,
1412  	0x68fe,
1413  	0x7100,
1414  	0x7101,
1415  	0x7102,
1416  	0x7103,
1417  	0x7104,
1418  	0x7105,
1419  	0x7106,
1420  	0x7108,
1421  	0x7109,
1422  	0x710A,
1423  	0x710B,
1424  	0x710C,
1425  	0x710E,
1426  	0x710F,
1427  	0x7140,
1428  	0x7141,
1429  	0x7142,
1430  	0x7143,
1431  	0x7144,
1432  	0x7145,
1433  	0x7146,
1434  	0x7147,
1435  	0x7149,
1436  	0x714A,
1437  	0x714B,
1438  	0x714C,
1439  	0x714D,
1440  	0x714E,
1441  	0x714F,
1442  	0x7151,
1443  	0x7152,
1444  	0x7153,
1445  	0x715E,
1446  	0x715F,
1447  	0x7180,
1448  	0x7181,
1449  	0x7183,
1450  	0x7186,
1451  	0x7187,
1452  	0x7188,
1453  	0x718A,
1454  	0x718B,
1455  	0x718C,
1456  	0x718D,
1457  	0x718F,
1458  	0x7193,
1459  	0x7196,
1460  	0x719B,
1461  	0x719F,
1462  	0x71C0,
1463  	0x71C1,
1464  	0x71C2,
1465  	0x71C3,
1466  	0x71C4,
1467  	0x71C5,
1468  	0x71C6,
1469  	0x71C7,
1470  	0x71CD,
1471  	0x71CE,
1472  	0x71D2,
1473  	0x71D4,
1474  	0x71D5,
1475  	0x71D6,
1476  	0x71DA,
1477  	0x71DE,
1478  	0x7200,
1479  	0x7210,
1480  	0x7211,
1481  	0x7240,
1482  	0x7243,
1483  	0x7244,
1484  	0x7245,
1485  	0x7246,
1486  	0x7247,
1487  	0x7248,
1488  	0x7249,
1489  	0x724A,
1490  	0x724B,
1491  	0x724C,
1492  	0x724D,
1493  	0x724E,
1494  	0x724F,
1495  	0x7280,
1496  	0x7281,
1497  	0x7283,
1498  	0x7284,
1499  	0x7287,
1500  	0x7288,
1501  	0x7289,
1502  	0x728B,
1503  	0x728C,
1504  	0x7290,
1505  	0x7291,
1506  	0x7293,
1507  	0x7297,
1508  	0x7834,
1509  	0x7835,
1510  	0x791e,
1511  	0x791f,
1512  	0x793f,
1513  	0x7941,
1514  	0x7942,
1515  	0x796c,
1516  	0x796d,
1517  	0x796e,
1518  	0x796f,
1519  	0x9400,
1520  	0x9401,
1521  	0x9402,
1522  	0x9403,
1523  	0x9405,
1524  	0x940A,
1525  	0x940B,
1526  	0x940F,
1527  	0x94A0,
1528  	0x94A1,
1529  	0x94A3,
1530  	0x94B1,
1531  	0x94B3,
1532  	0x94B4,
1533  	0x94B5,
1534  	0x94B9,
1535  	0x9440,
1536  	0x9441,
1537  	0x9442,
1538  	0x9443,
1539  	0x9444,
1540  	0x9446,
1541  	0x944A,
1542  	0x944B,
1543  	0x944C,
1544  	0x944E,
1545  	0x9450,
1546  	0x9452,
1547  	0x9456,
1548  	0x945A,
1549  	0x945B,
1550  	0x945E,
1551  	0x9460,
1552  	0x9462,
1553  	0x946A,
1554  	0x946B,
1555  	0x947A,
1556  	0x947B,
1557  	0x9480,
1558  	0x9487,
1559  	0x9488,
1560  	0x9489,
1561  	0x948A,
1562  	0x948F,
1563  	0x9490,
1564  	0x9491,
1565  	0x9495,
1566  	0x9498,
1567  	0x949C,
1568  	0x949E,
1569  	0x949F,
1570  	0x94C0,
1571  	0x94C1,
1572  	0x94C3,
1573  	0x94C4,
1574  	0x94C5,
1575  	0x94C6,
1576  	0x94C7,
1577  	0x94C8,
1578  	0x94C9,
1579  	0x94CB,
1580  	0x94CC,
1581  	0x94CD,
1582  	0x9500,
1583  	0x9501,
1584  	0x9504,
1585  	0x9505,
1586  	0x9506,
1587  	0x9507,
1588  	0x9508,
1589  	0x9509,
1590  	0x950F,
1591  	0x9511,
1592  	0x9515,
1593  	0x9517,
1594  	0x9519,
1595  	0x9540,
1596  	0x9541,
1597  	0x9542,
1598  	0x954E,
1599  	0x954F,
1600  	0x9552,
1601  	0x9553,
1602  	0x9555,
1603  	0x9557,
1604  	0x955f,
1605  	0x9580,
1606  	0x9581,
1607  	0x9583,
1608  	0x9586,
1609  	0x9587,
1610  	0x9588,
1611  	0x9589,
1612  	0x958A,
1613  	0x958B,
1614  	0x958C,
1615  	0x958D,
1616  	0x958E,
1617  	0x958F,
1618  	0x9590,
1619  	0x9591,
1620  	0x9593,
1621  	0x9595,
1622  	0x9596,
1623  	0x9597,
1624  	0x9598,
1625  	0x9599,
1626  	0x959B,
1627  	0x95C0,
1628  	0x95C2,
1629  	0x95C4,
1630  	0x95C5,
1631  	0x95C6,
1632  	0x95C7,
1633  	0x95C9,
1634  	0x95CC,
1635  	0x95CD,
1636  	0x95CE,
1637  	0x95CF,
1638  	0x9610,
1639  	0x9611,
1640  	0x9612,
1641  	0x9613,
1642  	0x9614,
1643  	0x9615,
1644  	0x9616,
1645  	0x9640,
1646  	0x9641,
1647  	0x9642,
1648  	0x9643,
1649  	0x9644,
1650  	0x9645,
1651  	0x9647,
1652  	0x9648,
1653  	0x9649,
1654  	0x964a,
1655  	0x964b,
1656  	0x964c,
1657  	0x964e,
1658  	0x964f,
1659  	0x9710,
1660  	0x9711,
1661  	0x9712,
1662  	0x9713,
1663  	0x9714,
1664  	0x9715,
1665  	0x9802,
1666  	0x9803,
1667  	0x9804,
1668  	0x9805,
1669  	0x9806,
1670  	0x9807,
1671  	0x9808,
1672  	0x9809,
1673  	0x980A,
1674  	0x9900,
1675  	0x9901,
1676  	0x9903,
1677  	0x9904,
1678  	0x9905,
1679  	0x9906,
1680  	0x9907,
1681  	0x9908,
1682  	0x9909,
1683  	0x990A,
1684  	0x990B,
1685  	0x990C,
1686  	0x990D,
1687  	0x990E,
1688  	0x990F,
1689  	0x9910,
1690  	0x9913,
1691  	0x9917,
1692  	0x9918,
1693  	0x9919,
1694  	0x9990,
1695  	0x9991,
1696  	0x9992,
1697  	0x9993,
1698  	0x9994,
1699  	0x9995,
1700  	0x9996,
1701  	0x9997,
1702  	0x9998,
1703  	0x9999,
1704  	0x999A,
1705  	0x999B,
1706  	0x999C,
1707  	0x999D,
1708  	0x99A0,
1709  	0x99A2,
1710  	0x99A4,
1711  	/* radeon secondary ids */
1712  	0x3171,
1713  	0x3e70,
1714  	0x4164,
1715  	0x4165,
1716  	0x4166,
1717  	0x4168,
1718  	0x4170,
1719  	0x4171,
1720  	0x4172,
1721  	0x4173,
1722  	0x496e,
1723  	0x4a69,
1724  	0x4a6a,
1725  	0x4a6b,
1726  	0x4a70,
1727  	0x4a74,
1728  	0x4b69,
1729  	0x4b6b,
1730  	0x4b6c,
1731  	0x4c6e,
1732  	0x4e64,
1733  	0x4e65,
1734  	0x4e66,
1735  	0x4e67,
1736  	0x4e68,
1737  	0x4e69,
1738  	0x4e6a,
1739  	0x4e71,
1740  	0x4f73,
1741  	0x5569,
1742  	0x556b,
1743  	0x556d,
1744  	0x556f,
1745  	0x5571,
1746  	0x5854,
1747  	0x5874,
1748  	0x5940,
1749  	0x5941,
1750  	0x5b70,
1751  	0x5b72,
1752  	0x5b73,
1753  	0x5b74,
1754  	0x5b75,
1755  	0x5d44,
1756  	0x5d45,
1757  	0x5d6d,
1758  	0x5d6f,
1759  	0x5d72,
1760  	0x5d77,
1761  	0x5e6b,
1762  	0x5e6d,
1763  	0x7120,
1764  	0x7124,
1765  	0x7129,
1766  	0x712e,
1767  	0x712f,
1768  	0x7162,
1769  	0x7163,
1770  	0x7166,
1771  	0x7167,
1772  	0x7172,
1773  	0x7173,
1774  	0x71a0,
1775  	0x71a1,
1776  	0x71a3,
1777  	0x71a7,
1778  	0x71bb,
1779  	0x71e0,
1780  	0x71e1,
1781  	0x71e2,
1782  	0x71e6,
1783  	0x71e7,
1784  	0x71f2,
1785  	0x7269,
1786  	0x726b,
1787  	0x726e,
1788  	0x72a0,
1789  	0x72a8,
1790  	0x72b1,
1791  	0x72b3,
1792  	0x793f,
1793  };
1794  
1795  static const struct pci_device_id pciidlist[] = {
1796  #ifdef CONFIG_DRM_AMDGPU_SI
1797  	{0x1002, 0x6780, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
1798  	{0x1002, 0x6784, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
1799  	{0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
1800  	{0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
1801  	{0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
1802  	{0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
1803  	{0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
1804  	{0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
1805  	{0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
1806  	{0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
1807  	{0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
1808  	{0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
1809  	{0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI},
1810  	{0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
1811  	{0x1002, 0x6801, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
1812  	{0x1002, 0x6802, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|AMD_IS_MOBILITY},
1813  	{0x1002, 0x6806, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
1814  	{0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
1815  	{0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
1816  	{0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
1817  	{0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
1818  	{0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
1819  	{0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
1820  	{0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
1821  	{0x1002, 0x6819, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN},
1822  	{0x1002, 0x6600, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
1823  	{0x1002, 0x6601, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
1824  	{0x1002, 0x6602, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
1825  	{0x1002, 0x6603, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
1826  	{0x1002, 0x6604, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
1827  	{0x1002, 0x6605, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
1828  	{0x1002, 0x6606, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
1829  	{0x1002, 0x6607, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
1830  	{0x1002, 0x6608, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
1831  	{0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
1832  	{0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
1833  	{0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
1834  	{0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
1835  	{0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
1836  	{0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
1837  	{0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|AMD_IS_MOBILITY},
1838  	{0x1002, 0x6631, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND},
1839  	{0x1002, 0x6820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
1840  	{0x1002, 0x6821, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
1841  	{0x1002, 0x6822, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
1842  	{0x1002, 0x6823, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
1843  	{0x1002, 0x6824, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
1844  	{0x1002, 0x6825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
1845  	{0x1002, 0x6826, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
1846  	{0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
1847  	{0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
1848  	{0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
1849  	{0x1002, 0x682A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
1850  	{0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
1851  	{0x1002, 0x682C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
1852  	{0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
1853  	{0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
1854  	{0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
1855  	{0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|AMD_IS_MOBILITY},
1856  	{0x1002, 0x6835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
1857  	{0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
1858  	{0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
1859  	{0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
1860  	{0x1002, 0x683B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
1861  	{0x1002, 0x683D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
1862  	{0x1002, 0x683F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE},
1863  	{0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
1864  	{0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
1865  	{0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
1866  	{0x1002, 0x6665, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
1867  	{0x1002, 0x6667, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
1868  	{0x1002, 0x666F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|AMD_IS_MOBILITY},
1869  #endif
1870  #ifdef CONFIG_DRM_AMDGPU_CIK
1871  	/* Kaveri */
1872  	{0x1002, 0x1304, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
1873  	{0x1002, 0x1305, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
1874  	{0x1002, 0x1306, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
1875  	{0x1002, 0x1307, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
1876  	{0x1002, 0x1309, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
1877  	{0x1002, 0x130A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
1878  	{0x1002, 0x130B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
1879  	{0x1002, 0x130C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
1880  	{0x1002, 0x130D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
1881  	{0x1002, 0x130E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
1882  	{0x1002, 0x130F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
1883  	{0x1002, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
1884  	{0x1002, 0x1311, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
1885  	{0x1002, 0x1312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
1886  	{0x1002, 0x1313, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
1887  	{0x1002, 0x1315, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
1888  	{0x1002, 0x1316, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
1889  	{0x1002, 0x1317, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
1890  	{0x1002, 0x1318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_MOBILITY|AMD_IS_APU},
1891  	{0x1002, 0x131B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
1892  	{0x1002, 0x131C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
1893  	{0x1002, 0x131D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KAVERI|AMD_IS_APU},
1894  	/* Bonaire */
1895  	{0x1002, 0x6640, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
1896  	{0x1002, 0x6641, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
1897  	{0x1002, 0x6646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
1898  	{0x1002, 0x6647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|AMD_IS_MOBILITY},
1899  	{0x1002, 0x6649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
1900  	{0x1002, 0x6650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
1901  	{0x1002, 0x6651, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
1902  	{0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
1903  	{0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
1904  	{0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
1905  	{0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE},
1906  	/* Hawaii */
1907  	{0x1002, 0x67A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
1908  	{0x1002, 0x67A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
1909  	{0x1002, 0x67A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
1910  	{0x1002, 0x67A8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
1911  	{0x1002, 0x67A9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
1912  	{0x1002, 0x67AA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
1913  	{0x1002, 0x67B0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
1914  	{0x1002, 0x67B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
1915  	{0x1002, 0x67B8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
1916  	{0x1002, 0x67B9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
1917  	{0x1002, 0x67BA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
1918  	{0x1002, 0x67BE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAWAII},
1919  	/* Kabini */
1920  	{0x1002, 0x9830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
1921  	{0x1002, 0x9831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
1922  	{0x1002, 0x9832, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
1923  	{0x1002, 0x9833, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
1924  	{0x1002, 0x9834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
1925  	{0x1002, 0x9835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
1926  	{0x1002, 0x9836, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
1927  	{0x1002, 0x9837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
1928  	{0x1002, 0x9838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
1929  	{0x1002, 0x9839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
1930  	{0x1002, 0x983a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
1931  	{0x1002, 0x983b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_MOBILITY|AMD_IS_APU},
1932  	{0x1002, 0x983c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
1933  	{0x1002, 0x983d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
1934  	{0x1002, 0x983e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
1935  	{0x1002, 0x983f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_KABINI|AMD_IS_APU},
1936  	/* mullins */
1937  	{0x1002, 0x9850, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
1938  	{0x1002, 0x9851, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
1939  	{0x1002, 0x9852, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
1940  	{0x1002, 0x9853, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
1941  	{0x1002, 0x9854, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
1942  	{0x1002, 0x9855, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
1943  	{0x1002, 0x9856, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
1944  	{0x1002, 0x9857, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
1945  	{0x1002, 0x9858, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
1946  	{0x1002, 0x9859, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
1947  	{0x1002, 0x985A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
1948  	{0x1002, 0x985B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
1949  	{0x1002, 0x985C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
1950  	{0x1002, 0x985D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
1951  	{0x1002, 0x985E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
1952  	{0x1002, 0x985F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_MULLINS|AMD_IS_MOBILITY|AMD_IS_APU},
1953  #endif
1954  	/* topaz */
1955  	{0x1002, 0x6900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
1956  	{0x1002, 0x6901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
1957  	{0x1002, 0x6902, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
1958  	{0x1002, 0x6903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
1959  	{0x1002, 0x6907, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TOPAZ},
1960  	/* tonga */
1961  	{0x1002, 0x6920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
1962  	{0x1002, 0x6921, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
1963  	{0x1002, 0x6928, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
1964  	{0x1002, 0x6929, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
1965  	{0x1002, 0x692B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
1966  	{0x1002, 0x692F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
1967  	{0x1002, 0x6930, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
1968  	{0x1002, 0x6938, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
1969  	{0x1002, 0x6939, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TONGA},
1970  	/* fiji */
1971  	{0x1002, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
1972  	{0x1002, 0x730F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_FIJI},
1973  	/* carrizo */
1974  	{0x1002, 0x9870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
1975  	{0x1002, 0x9874, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
1976  	{0x1002, 0x9875, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
1977  	{0x1002, 0x9876, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
1978  	{0x1002, 0x9877, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CARRIZO|AMD_IS_APU},
1979  	/* stoney */
1980  	{0x1002, 0x98E4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_STONEY|AMD_IS_APU},
1981  	/* Polaris11 */
1982  	{0x1002, 0x67E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
1983  	{0x1002, 0x67E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
1984  	{0x1002, 0x67E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
1985  	{0x1002, 0x67EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
1986  	{0x1002, 0x67EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
1987  	{0x1002, 0x67FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
1988  	{0x1002, 0x67E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
1989  	{0x1002, 0x67E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
1990  	{0x1002, 0x67E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS11},
1991  	/* Polaris10 */
1992  	{0x1002, 0x67C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
1993  	{0x1002, 0x67C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
1994  	{0x1002, 0x67C2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
1995  	{0x1002, 0x67C4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
1996  	{0x1002, 0x67C7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
1997  	{0x1002, 0x67D0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
1998  	{0x1002, 0x67DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
1999  	{0x1002, 0x67C8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
2000  	{0x1002, 0x67C9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
2001  	{0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
2002  	{0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
2003  	{0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
2004  	{0x1002, 0x6FDF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
2005  	/* Polaris12 */
2006  	{0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
2007  	{0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
2008  	{0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
2009  	{0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
2010  	{0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
2011  	{0x1002, 0x6995, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
2012  	{0x1002, 0x6997, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
2013  	{0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
2014  	/* VEGAM */
2015  	{0x1002, 0x694C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
2016  	{0x1002, 0x694E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
2017  	{0x1002, 0x694F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGAM},
2018  	/* Vega 10 */
2019  	{0x1002, 0x6860, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
2020  	{0x1002, 0x6861, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
2021  	{0x1002, 0x6862, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
2022  	{0x1002, 0x6863, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
2023  	{0x1002, 0x6864, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
2024  	{0x1002, 0x6867, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
2025  	{0x1002, 0x6868, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
2026  	{0x1002, 0x6869, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
2027  	{0x1002, 0x686a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
2028  	{0x1002, 0x686b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
2029  	{0x1002, 0x686c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
2030  	{0x1002, 0x686d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
2031  	{0x1002, 0x686e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
2032  	{0x1002, 0x686f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
2033  	{0x1002, 0x687f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA10},
2034  	/* Vega 12 */
2035  	{0x1002, 0x69A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
2036  	{0x1002, 0x69A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
2037  	{0x1002, 0x69A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
2038  	{0x1002, 0x69A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
2039  	{0x1002, 0x69AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA12},
2040  	/* Vega 20 */
2041  	{0x1002, 0x66A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
2042  	{0x1002, 0x66A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
2043  	{0x1002, 0x66A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
2044  	{0x1002, 0x66A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
2045  	{0x1002, 0x66A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
2046  	{0x1002, 0x66A7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
2047  	{0x1002, 0x66AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VEGA20},
2048  	/* Raven */
2049  	{0x1002, 0x15dd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
2050  	{0x1002, 0x15d8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RAVEN|AMD_IS_APU},
2051  	/* Arcturus */
2052  	{0x1002, 0x738C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
2053  	{0x1002, 0x7388, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
2054  	{0x1002, 0x738E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
2055  	{0x1002, 0x7390, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARCTURUS},
2056  	/* Navi10 */
2057  	{0x1002, 0x7310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
2058  	{0x1002, 0x7312, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
2059  	{0x1002, 0x7318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
2060  	{0x1002, 0x7319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
2061  	{0x1002, 0x731A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
2062  	{0x1002, 0x731B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
2063  	{0x1002, 0x731E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
2064  	{0x1002, 0x731F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI10},
2065  	/* Navi14 */
2066  	{0x1002, 0x7340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
2067  	{0x1002, 0x7341, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
2068  	{0x1002, 0x7347, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
2069  	{0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14},
2070  
2071  	/* Renoir */
2072  	{0x1002, 0x15E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
2073  	{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
2074  	{0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
2075  	{0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
2076  
2077  	/* Navi12 */
2078  	{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
2079  	{0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
2080  
2081  	/* Sienna_Cichlid */
2082  	{0x1002, 0x73A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
2083  	{0x1002, 0x73A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
2084  	{0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
2085  	{0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
2086  	{0x1002, 0x73A5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
2087  	{0x1002, 0x73A8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
2088  	{0x1002, 0x73A9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
2089  	{0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
2090  	{0x1002, 0x73AC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
2091  	{0x1002, 0x73AD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
2092  	{0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
2093  	{0x1002, 0x73AF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
2094  	{0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID},
2095  
2096  	/* Yellow Carp */
2097  	{0x1002, 0x164D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU},
2098  	{0x1002, 0x1681, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_YELLOW_CARP|AMD_IS_APU},
2099  
2100  	/* Navy_Flounder */
2101  	{0x1002, 0x73C0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
2102  	{0x1002, 0x73C1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
2103  	{0x1002, 0x73C3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
2104  	{0x1002, 0x73DA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
2105  	{0x1002, 0x73DB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
2106  	{0x1002, 0x73DC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
2107  	{0x1002, 0x73DD, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
2108  	{0x1002, 0x73DE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
2109  	{0x1002, 0x73DF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVY_FLOUNDER},
2110  
2111  	/* DIMGREY_CAVEFISH */
2112  	{0x1002, 0x73E0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
2113  	{0x1002, 0x73E1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
2114  	{0x1002, 0x73E2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
2115  	{0x1002, 0x73E3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
2116  	{0x1002, 0x73E8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
2117  	{0x1002, 0x73E9, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
2118  	{0x1002, 0x73EA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
2119  	{0x1002, 0x73EB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
2120  	{0x1002, 0x73EC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
2121  	{0x1002, 0x73ED, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
2122  	{0x1002, 0x73EF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
2123  	{0x1002, 0x73FF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_DIMGREY_CAVEFISH},
2124  
2125  	/* Aldebaran */
2126  	{0x1002, 0x7408, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
2127  	{0x1002, 0x740C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
2128  	{0x1002, 0x740F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
2129  	{0x1002, 0x7410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ALDEBARAN},
2130  
2131  	/* CYAN_SKILLFISH */
2132  	{0x1002, 0x13FE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
2133  	{0x1002, 0x143F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CYAN_SKILLFISH|AMD_IS_APU},
2134  
2135  	/* BEIGE_GOBY */
2136  	{0x1002, 0x7420, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
2137  	{0x1002, 0x7421, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
2138  	{0x1002, 0x7422, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
2139  	{0x1002, 0x7423, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
2140  	{0x1002, 0x7424, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
2141  	{0x1002, 0x743F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BEIGE_GOBY},
2142  
2143  	{ PCI_DEVICE(0x1002, PCI_ANY_ID),
2144  	  .class = PCI_CLASS_DISPLAY_VGA << 8,
2145  	  .class_mask = 0xffffff,
2146  	  .driver_data = CHIP_IP_DISCOVERY },
2147  
2148  	{ PCI_DEVICE(0x1002, PCI_ANY_ID),
2149  	  .class = PCI_CLASS_DISPLAY_OTHER << 8,
2150  	  .class_mask = 0xffffff,
2151  	  .driver_data = CHIP_IP_DISCOVERY },
2152  
2153  	{ PCI_DEVICE(0x1002, PCI_ANY_ID),
2154  	  .class = PCI_CLASS_ACCELERATOR_PROCESSING << 8,
2155  	  .class_mask = 0xffffff,
2156  	  .driver_data = CHIP_IP_DISCOVERY },
2157  
2158  	{0, 0, 0}
2159  };
2160  
2161  MODULE_DEVICE_TABLE(pci, pciidlist);
2162  
2163  static const struct amdgpu_asic_type_quirk asic_type_quirks[] = {
2164  	/* differentiate between P10 and P11 asics with the same DID */
2165  	{0x67FF, 0xE3, CHIP_POLARIS10},
2166  	{0x67FF, 0xE7, CHIP_POLARIS10},
2167  	{0x67FF, 0xF3, CHIP_POLARIS10},
2168  	{0x67FF, 0xF7, CHIP_POLARIS10},
2169  };
2170  
2171  static const struct drm_driver amdgpu_kms_driver;
2172  
amdgpu_get_secondary_funcs(struct amdgpu_device * adev)2173  static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev)
2174  {
2175  	struct pci_dev *p = NULL;
2176  	int i;
2177  
2178  	/* 0 - GPU
2179  	 * 1 - audio
2180  	 * 2 - USB
2181  	 * 3 - UCSI
2182  	 */
2183  	for (i = 1; i < 4; i++) {
2184  		p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus),
2185  						adev->pdev->bus->number, i);
2186  		if (p) {
2187  			pm_runtime_get_sync(&p->dev);
2188  			pm_runtime_mark_last_busy(&p->dev);
2189  			pm_runtime_put_autosuspend(&p->dev);
2190  			pci_dev_put(p);
2191  		}
2192  	}
2193  }
2194  
amdgpu_init_debug_options(struct amdgpu_device * adev)2195  static void amdgpu_init_debug_options(struct amdgpu_device *adev)
2196  {
2197  	if (amdgpu_debug_mask & AMDGPU_DEBUG_VM) {
2198  		pr_info("debug: VM handling debug enabled\n");
2199  		adev->debug_vm = true;
2200  	}
2201  
2202  	if (amdgpu_debug_mask & AMDGPU_DEBUG_LARGEBAR) {
2203  		pr_info("debug: enabled simulating large-bar capability on non-large bar system\n");
2204  		adev->debug_largebar = true;
2205  	}
2206  
2207  	if (amdgpu_debug_mask & AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY) {
2208  		pr_info("debug: soft reset for GPU recovery disabled\n");
2209  		adev->debug_disable_soft_recovery = true;
2210  	}
2211  
2212  	if (amdgpu_debug_mask & AMDGPU_DEBUG_USE_VRAM_FW_BUF) {
2213  		pr_info("debug: place fw in vram for frontdoor loading\n");
2214  		adev->debug_use_vram_fw_buf = true;
2215  	}
2216  
2217  	if (amdgpu_debug_mask & AMDGPU_DEBUG_ENABLE_RAS_ACA) {
2218  		pr_info("debug: enable RAS ACA\n");
2219  		adev->debug_enable_ras_aca = true;
2220  	}
2221  
2222  	if (amdgpu_debug_mask & AMDGPU_DEBUG_ENABLE_EXP_RESETS) {
2223  		pr_info("debug: enable experimental reset features\n");
2224  		adev->debug_exp_resets = true;
2225  	}
2226  }
2227  
amdgpu_fix_asic_type(struct pci_dev * pdev,unsigned long flags)2228  static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
2229  {
2230  	int i;
2231  
2232  	for (i = 0; i < ARRAY_SIZE(asic_type_quirks); i++) {
2233  		if (pdev->device == asic_type_quirks[i].device &&
2234  			pdev->revision == asic_type_quirks[i].revision) {
2235  				flags &= ~AMD_ASIC_MASK;
2236  				flags |= asic_type_quirks[i].type;
2237  				break;
2238  			}
2239  	}
2240  
2241  	return flags;
2242  }
2243  
amdgpu_pci_probe(struct pci_dev * pdev,const struct pci_device_id * ent)2244  static int amdgpu_pci_probe(struct pci_dev *pdev,
2245  			    const struct pci_device_id *ent)
2246  {
2247  	struct drm_device *ddev;
2248  	struct amdgpu_device *adev;
2249  	unsigned long flags = ent->driver_data;
2250  	int ret, retry = 0, i;
2251  	bool supports_atomic = false;
2252  
2253  	/* skip devices which are owned by radeon */
2254  	for (i = 0; i < ARRAY_SIZE(amdgpu_unsupported_pciidlist); i++) {
2255  		if (amdgpu_unsupported_pciidlist[i] == pdev->device)
2256  			return -ENODEV;
2257  	}
2258  
2259  	if (amdgpu_aspm == -1 && !pcie_aspm_enabled(pdev))
2260  		amdgpu_aspm = 0;
2261  
2262  	if (amdgpu_virtual_display ||
2263  	    amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK))
2264  		supports_atomic = true;
2265  
2266  	if ((flags & AMD_EXP_HW_SUPPORT) && !amdgpu_exp_hw_support) {
2267  		DRM_INFO("This hardware requires experimental hardware support.\n"
2268  			 "See modparam exp_hw_support\n");
2269  		return -ENODEV;
2270  	}
2271  
2272  	flags = amdgpu_fix_asic_type(pdev, flags);
2273  
2274  	/* Due to hardware bugs, S/G Display on raven requires a 1:1 IOMMU mapping,
2275  	 * however, SME requires an indirect IOMMU mapping because the encryption
2276  	 * bit is beyond the DMA mask of the chip.
2277  	 */
2278  	if (cc_platform_has(CC_ATTR_MEM_ENCRYPT) &&
2279  	    ((flags & AMD_ASIC_MASK) == CHIP_RAVEN)) {
2280  		dev_info(&pdev->dev,
2281  			 "SME is not compatible with RAVEN\n");
2282  		return -ENOTSUPP;
2283  	}
2284  
2285  #ifdef CONFIG_DRM_AMDGPU_SI
2286  	if (!amdgpu_si_support) {
2287  		switch (flags & AMD_ASIC_MASK) {
2288  		case CHIP_TAHITI:
2289  		case CHIP_PITCAIRN:
2290  		case CHIP_VERDE:
2291  		case CHIP_OLAND:
2292  		case CHIP_HAINAN:
2293  			dev_info(&pdev->dev,
2294  				 "SI support provided by radeon.\n");
2295  			dev_info(&pdev->dev,
2296  				 "Use radeon.si_support=0 amdgpu.si_support=1 to override.\n"
2297  				);
2298  			return -ENODEV;
2299  		}
2300  	}
2301  #endif
2302  #ifdef CONFIG_DRM_AMDGPU_CIK
2303  	if (!amdgpu_cik_support) {
2304  		switch (flags & AMD_ASIC_MASK) {
2305  		case CHIP_KAVERI:
2306  		case CHIP_BONAIRE:
2307  		case CHIP_HAWAII:
2308  		case CHIP_KABINI:
2309  		case CHIP_MULLINS:
2310  			dev_info(&pdev->dev,
2311  				 "CIK support provided by radeon.\n");
2312  			dev_info(&pdev->dev,
2313  				 "Use radeon.cik_support=0 amdgpu.cik_support=1 to override.\n"
2314  				);
2315  			return -ENODEV;
2316  		}
2317  	}
2318  #endif
2319  
2320  	adev = devm_drm_dev_alloc(&pdev->dev, &amdgpu_kms_driver, typeof(*adev), ddev);
2321  	if (IS_ERR(adev))
2322  		return PTR_ERR(adev);
2323  
2324  	adev->dev  = &pdev->dev;
2325  	adev->pdev = pdev;
2326  	ddev = adev_to_drm(adev);
2327  
2328  	if (!supports_atomic)
2329  		ddev->driver_features &= ~DRIVER_ATOMIC;
2330  
2331  	ret = pci_enable_device(pdev);
2332  	if (ret)
2333  		return ret;
2334  
2335  	pci_set_drvdata(pdev, ddev);
2336  
2337  	amdgpu_init_debug_options(adev);
2338  
2339  	ret = amdgpu_driver_load_kms(adev, flags);
2340  	if (ret)
2341  		goto err_pci;
2342  
2343  retry_init:
2344  	ret = drm_dev_register(ddev, flags);
2345  	if (ret == -EAGAIN && ++retry <= 3) {
2346  		DRM_INFO("retry init %d\n", retry);
2347  		/* Don't request EX mode too frequently which is attacking */
2348  		msleep(5000);
2349  		goto retry_init;
2350  	} else if (ret) {
2351  		goto err_pci;
2352  	}
2353  
2354  	ret = amdgpu_xcp_dev_register(adev, ent);
2355  	if (ret)
2356  		goto err_pci;
2357  
2358  	ret = amdgpu_amdkfd_drm_client_create(adev);
2359  	if (ret)
2360  		goto err_pci;
2361  
2362  	/*
2363  	 * 1. don't init fbdev on hw without DCE
2364  	 * 2. don't init fbdev if there are no connectors
2365  	 */
2366  	if (adev->mode_info.mode_config_initialized &&
2367  	    !list_empty(&adev_to_drm(adev)->mode_config.connector_list)) {
2368  		/* select 8 bpp console on low vram cards */
2369  		if (adev->gmc.real_vram_size <= (32*1024*1024))
2370  			drm_fbdev_ttm_setup(adev_to_drm(adev), 8);
2371  		else
2372  			drm_fbdev_ttm_setup(adev_to_drm(adev), 32);
2373  	}
2374  
2375  	ret = amdgpu_debugfs_init(adev);
2376  	if (ret)
2377  		DRM_ERROR("Creating debugfs files failed (%d).\n", ret);
2378  
2379  	if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
2380  		/* only need to skip on ATPX */
2381  		if (amdgpu_device_supports_px(ddev))
2382  			dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_NO_DIRECT_COMPLETE);
2383  		/* we want direct complete for BOCO */
2384  		if (amdgpu_device_supports_boco(ddev))
2385  			dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_SMART_PREPARE |
2386  						DPM_FLAG_SMART_SUSPEND |
2387  						DPM_FLAG_MAY_SKIP_RESUME);
2388  		pm_runtime_use_autosuspend(ddev->dev);
2389  		pm_runtime_set_autosuspend_delay(ddev->dev, 5000);
2390  
2391  		pm_runtime_allow(ddev->dev);
2392  
2393  		pm_runtime_mark_last_busy(ddev->dev);
2394  		pm_runtime_put_autosuspend(ddev->dev);
2395  
2396  		pci_wake_from_d3(pdev, TRUE);
2397  
2398  		/*
2399  		 * For runpm implemented via BACO, PMFW will handle the
2400  		 * timing for BACO in and out:
2401  		 *   - put ASIC into BACO state only when both video and
2402  		 *     audio functions are in D3 state.
2403  		 *   - pull ASIC out of BACO state when either video or
2404  		 *     audio function is in D0 state.
2405  		 * Also, at startup, PMFW assumes both functions are in
2406  		 * D0 state.
2407  		 *
2408  		 * So if snd driver was loaded prior to amdgpu driver
2409  		 * and audio function was put into D3 state, there will
2410  		 * be no PMFW-aware D-state transition(D0->D3) on runpm
2411  		 * suspend. Thus the BACO will be not correctly kicked in.
2412  		 *
2413  		 * Via amdgpu_get_secondary_funcs(), the audio dev is put
2414  		 * into D0 state. Then there will be a PMFW-aware D-state
2415  		 * transition(D0->D3) on runpm suspend.
2416  		 */
2417  		if (amdgpu_device_supports_baco(ddev) &&
2418  		    !(adev->flags & AMD_IS_APU) &&
2419  		    (adev->asic_type >= CHIP_NAVI10))
2420  			amdgpu_get_secondary_funcs(adev);
2421  	}
2422  
2423  	return 0;
2424  
2425  err_pci:
2426  	pci_disable_device(pdev);
2427  	return ret;
2428  }
2429  
2430  static void
amdgpu_pci_remove(struct pci_dev * pdev)2431  amdgpu_pci_remove(struct pci_dev *pdev)
2432  {
2433  	struct drm_device *dev = pci_get_drvdata(pdev);
2434  	struct amdgpu_device *adev = drm_to_adev(dev);
2435  
2436  	amdgpu_xcp_dev_unplug(adev);
2437  	drm_dev_unplug(dev);
2438  
2439  	if (adev->pm.rpm_mode != AMDGPU_RUNPM_NONE) {
2440  		pm_runtime_get_sync(dev->dev);
2441  		pm_runtime_forbid(dev->dev);
2442  	}
2443  
2444  	amdgpu_driver_unload_kms(dev);
2445  
2446  	/*
2447  	 * Flush any in flight DMA operations from device.
2448  	 * Clear the Bus Master Enable bit and then wait on the PCIe Device
2449  	 * StatusTransactions Pending bit.
2450  	 */
2451  	pci_disable_device(pdev);
2452  	pci_wait_for_pending_transaction(pdev);
2453  }
2454  
2455  static void
amdgpu_pci_shutdown(struct pci_dev * pdev)2456  amdgpu_pci_shutdown(struct pci_dev *pdev)
2457  {
2458  	struct drm_device *dev = pci_get_drvdata(pdev);
2459  	struct amdgpu_device *adev = drm_to_adev(dev);
2460  
2461  	if (amdgpu_ras_intr_triggered())
2462  		return;
2463  
2464  	/* if we are running in a VM, make sure the device
2465  	 * torn down properly on reboot/shutdown.
2466  	 * unfortunately we can't detect certain
2467  	 * hypervisors so just do this all the time.
2468  	 */
2469  	if (!amdgpu_passthrough(adev))
2470  		adev->mp1_state = PP_MP1_STATE_UNLOAD;
2471  	amdgpu_device_ip_suspend(adev);
2472  	adev->mp1_state = PP_MP1_STATE_NONE;
2473  }
2474  
2475  /**
2476   * amdgpu_drv_delayed_reset_work_handler - work handler for reset
2477   *
2478   * @work: work_struct.
2479   */
amdgpu_drv_delayed_reset_work_handler(struct work_struct * work)2480  static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work)
2481  {
2482  	struct list_head device_list;
2483  	struct amdgpu_device *adev;
2484  	int i, r;
2485  	struct amdgpu_reset_context reset_context;
2486  
2487  	memset(&reset_context, 0, sizeof(reset_context));
2488  
2489  	mutex_lock(&mgpu_info.mutex);
2490  	if (mgpu_info.pending_reset == true) {
2491  		mutex_unlock(&mgpu_info.mutex);
2492  		return;
2493  	}
2494  	mgpu_info.pending_reset = true;
2495  	mutex_unlock(&mgpu_info.mutex);
2496  
2497  	/* Use a common context, just need to make sure full reset is done */
2498  	reset_context.method = AMD_RESET_METHOD_NONE;
2499  	set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
2500  
2501  	for (i = 0; i < mgpu_info.num_dgpu; i++) {
2502  		adev = mgpu_info.gpu_ins[i].adev;
2503  		reset_context.reset_req_dev = adev;
2504  		r = amdgpu_device_pre_asic_reset(adev, &reset_context);
2505  		if (r) {
2506  			dev_err(adev->dev, "GPU pre asic reset failed with err, %d for drm dev, %s ",
2507  				r, adev_to_drm(adev)->unique);
2508  		}
2509  		if (!queue_work(system_unbound_wq, &adev->xgmi_reset_work))
2510  			r = -EALREADY;
2511  	}
2512  	for (i = 0; i < mgpu_info.num_dgpu; i++) {
2513  		adev = mgpu_info.gpu_ins[i].adev;
2514  		flush_work(&adev->xgmi_reset_work);
2515  		adev->gmc.xgmi.pending_reset = false;
2516  	}
2517  
2518  	/* reset function will rebuild the xgmi hive info , clear it now */
2519  	for (i = 0; i < mgpu_info.num_dgpu; i++)
2520  		amdgpu_xgmi_remove_device(mgpu_info.gpu_ins[i].adev);
2521  
2522  	INIT_LIST_HEAD(&device_list);
2523  
2524  	for (i = 0; i < mgpu_info.num_dgpu; i++)
2525  		list_add_tail(&mgpu_info.gpu_ins[i].adev->reset_list, &device_list);
2526  
2527  	/* unregister the GPU first, reset function will add them back */
2528  	list_for_each_entry(adev, &device_list, reset_list)
2529  		amdgpu_unregister_gpu_instance(adev);
2530  
2531  	/* Use a common context, just need to make sure full reset is done */
2532  	set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags);
2533  	set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
2534  	r = amdgpu_do_asic_reset(&device_list, &reset_context);
2535  
2536  	if (r) {
2537  		DRM_ERROR("reinit gpus failure");
2538  		return;
2539  	}
2540  	for (i = 0; i < mgpu_info.num_dgpu; i++) {
2541  		adev = mgpu_info.gpu_ins[i].adev;
2542  		if (!adev->kfd.init_complete) {
2543  			kgd2kfd_init_zone_device(adev);
2544  			amdgpu_amdkfd_device_init(adev);
2545  			amdgpu_amdkfd_drm_client_create(adev);
2546  		}
2547  		amdgpu_ttm_set_buffer_funcs_status(adev, true);
2548  	}
2549  }
2550  
amdgpu_pmops_prepare(struct device * dev)2551  static int amdgpu_pmops_prepare(struct device *dev)
2552  {
2553  	struct drm_device *drm_dev = dev_get_drvdata(dev);
2554  	struct amdgpu_device *adev = drm_to_adev(drm_dev);
2555  
2556  	/* Return a positive number here so
2557  	 * DPM_FLAG_SMART_SUSPEND works properly
2558  	 */
2559  	if (amdgpu_device_supports_boco(drm_dev) &&
2560  	    pm_runtime_suspended(dev))
2561  		return 1;
2562  
2563  	/* if we will not support s3 or s2i for the device
2564  	 *  then skip suspend
2565  	 */
2566  	if (!amdgpu_acpi_is_s0ix_active(adev) &&
2567  	    !amdgpu_acpi_is_s3_active(adev))
2568  		return 1;
2569  
2570  	return amdgpu_device_prepare(drm_dev);
2571  }
2572  
amdgpu_pmops_complete(struct device * dev)2573  static void amdgpu_pmops_complete(struct device *dev)
2574  {
2575  	/* nothing to do */
2576  }
2577  
amdgpu_pmops_suspend(struct device * dev)2578  static int amdgpu_pmops_suspend(struct device *dev)
2579  {
2580  	struct drm_device *drm_dev = dev_get_drvdata(dev);
2581  	struct amdgpu_device *adev = drm_to_adev(drm_dev);
2582  
2583  	adev->suspend_complete = false;
2584  	if (amdgpu_acpi_is_s0ix_active(adev))
2585  		adev->in_s0ix = true;
2586  	else if (amdgpu_acpi_is_s3_active(adev))
2587  		adev->in_s3 = true;
2588  	if (!adev->in_s0ix && !adev->in_s3)
2589  		return 0;
2590  	return amdgpu_device_suspend(drm_dev, true);
2591  }
2592  
amdgpu_pmops_suspend_noirq(struct device * dev)2593  static int amdgpu_pmops_suspend_noirq(struct device *dev)
2594  {
2595  	struct drm_device *drm_dev = dev_get_drvdata(dev);
2596  	struct amdgpu_device *adev = drm_to_adev(drm_dev);
2597  
2598  	adev->suspend_complete = true;
2599  	if (amdgpu_acpi_should_gpu_reset(adev))
2600  		return amdgpu_asic_reset(adev);
2601  
2602  	return 0;
2603  }
2604  
amdgpu_pmops_resume(struct device * dev)2605  static int amdgpu_pmops_resume(struct device *dev)
2606  {
2607  	struct drm_device *drm_dev = dev_get_drvdata(dev);
2608  	struct amdgpu_device *adev = drm_to_adev(drm_dev);
2609  	int r;
2610  
2611  	if (!adev->in_s0ix && !adev->in_s3)
2612  		return 0;
2613  
2614  	/* Avoids registers access if device is physically gone */
2615  	if (!pci_device_is_present(adev->pdev))
2616  		adev->no_hw_access = true;
2617  
2618  	r = amdgpu_device_resume(drm_dev, true);
2619  	if (amdgpu_acpi_is_s0ix_active(adev))
2620  		adev->in_s0ix = false;
2621  	else
2622  		adev->in_s3 = false;
2623  	return r;
2624  }
2625  
amdgpu_pmops_freeze(struct device * dev)2626  static int amdgpu_pmops_freeze(struct device *dev)
2627  {
2628  	struct drm_device *drm_dev = dev_get_drvdata(dev);
2629  	struct amdgpu_device *adev = drm_to_adev(drm_dev);
2630  	int r;
2631  
2632  	adev->in_s4 = true;
2633  	r = amdgpu_device_suspend(drm_dev, true);
2634  	adev->in_s4 = false;
2635  	if (r)
2636  		return r;
2637  
2638  	if (amdgpu_acpi_should_gpu_reset(adev))
2639  		return amdgpu_asic_reset(adev);
2640  	return 0;
2641  }
2642  
amdgpu_pmops_thaw(struct device * dev)2643  static int amdgpu_pmops_thaw(struct device *dev)
2644  {
2645  	struct drm_device *drm_dev = dev_get_drvdata(dev);
2646  
2647  	return amdgpu_device_resume(drm_dev, true);
2648  }
2649  
amdgpu_pmops_poweroff(struct device * dev)2650  static int amdgpu_pmops_poweroff(struct device *dev)
2651  {
2652  	struct drm_device *drm_dev = dev_get_drvdata(dev);
2653  
2654  	return amdgpu_device_suspend(drm_dev, true);
2655  }
2656  
amdgpu_pmops_restore(struct device * dev)2657  static int amdgpu_pmops_restore(struct device *dev)
2658  {
2659  	struct drm_device *drm_dev = dev_get_drvdata(dev);
2660  
2661  	return amdgpu_device_resume(drm_dev, true);
2662  }
2663  
amdgpu_runtime_idle_check_display(struct device * dev)2664  static int amdgpu_runtime_idle_check_display(struct device *dev)
2665  {
2666  	struct pci_dev *pdev = to_pci_dev(dev);
2667  	struct drm_device *drm_dev = pci_get_drvdata(pdev);
2668  	struct amdgpu_device *adev = drm_to_adev(drm_dev);
2669  
2670  	if (adev->mode_info.num_crtc) {
2671  		struct drm_connector *list_connector;
2672  		struct drm_connector_list_iter iter;
2673  		int ret = 0;
2674  
2675  		if (amdgpu_runtime_pm != -2) {
2676  			/* XXX: Return busy if any displays are connected to avoid
2677  			 * possible display wakeups after runtime resume due to
2678  			 * hotplug events in case any displays were connected while
2679  			 * the GPU was in suspend.  Remove this once that is fixed.
2680  			 */
2681  			mutex_lock(&drm_dev->mode_config.mutex);
2682  			drm_connector_list_iter_begin(drm_dev, &iter);
2683  			drm_for_each_connector_iter(list_connector, &iter) {
2684  				if (list_connector->status == connector_status_connected) {
2685  					ret = -EBUSY;
2686  					break;
2687  				}
2688  			}
2689  			drm_connector_list_iter_end(&iter);
2690  			mutex_unlock(&drm_dev->mode_config.mutex);
2691  
2692  			if (ret)
2693  				return ret;
2694  		}
2695  
2696  		if (adev->dc_enabled) {
2697  			struct drm_crtc *crtc;
2698  
2699  			drm_for_each_crtc(crtc, drm_dev) {
2700  				drm_modeset_lock(&crtc->mutex, NULL);
2701  				if (crtc->state->active)
2702  					ret = -EBUSY;
2703  				drm_modeset_unlock(&crtc->mutex);
2704  				if (ret < 0)
2705  					break;
2706  			}
2707  		} else {
2708  			mutex_lock(&drm_dev->mode_config.mutex);
2709  			drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);
2710  
2711  			drm_connector_list_iter_begin(drm_dev, &iter);
2712  			drm_for_each_connector_iter(list_connector, &iter) {
2713  				if (list_connector->dpms ==  DRM_MODE_DPMS_ON) {
2714  					ret = -EBUSY;
2715  					break;
2716  				}
2717  			}
2718  
2719  			drm_connector_list_iter_end(&iter);
2720  
2721  			drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
2722  			mutex_unlock(&drm_dev->mode_config.mutex);
2723  		}
2724  		if (ret)
2725  			return ret;
2726  	}
2727  
2728  	return 0;
2729  }
2730  
amdgpu_pmops_runtime_suspend(struct device * dev)2731  static int amdgpu_pmops_runtime_suspend(struct device *dev)
2732  {
2733  	struct pci_dev *pdev = to_pci_dev(dev);
2734  	struct drm_device *drm_dev = pci_get_drvdata(pdev);
2735  	struct amdgpu_device *adev = drm_to_adev(drm_dev);
2736  	int ret, i;
2737  
2738  	if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) {
2739  		pm_runtime_forbid(dev);
2740  		return -EBUSY;
2741  	}
2742  
2743  	ret = amdgpu_runtime_idle_check_display(dev);
2744  	if (ret)
2745  		return ret;
2746  
2747  	/* wait for all rings to drain before suspending */
2748  	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
2749  		struct amdgpu_ring *ring = adev->rings[i];
2750  
2751  		if (ring && ring->sched.ready) {
2752  			ret = amdgpu_fence_wait_empty(ring);
2753  			if (ret)
2754  				return -EBUSY;
2755  		}
2756  	}
2757  
2758  	adev->in_runpm = true;
2759  	if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX)
2760  		drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
2761  
2762  	/*
2763  	 * By setting mp1_state as PP_MP1_STATE_UNLOAD, MP1 will do some
2764  	 * proper cleanups and put itself into a state ready for PNP. That
2765  	 * can address some random resuming failure observed on BOCO capable
2766  	 * platforms.
2767  	 * TODO: this may be also needed for PX capable platform.
2768  	 */
2769  	if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO)
2770  		adev->mp1_state = PP_MP1_STATE_UNLOAD;
2771  
2772  	ret = amdgpu_device_prepare(drm_dev);
2773  	if (ret)
2774  		return ret;
2775  	ret = amdgpu_device_suspend(drm_dev, false);
2776  	if (ret) {
2777  		adev->in_runpm = false;
2778  		if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO)
2779  			adev->mp1_state = PP_MP1_STATE_NONE;
2780  		return ret;
2781  	}
2782  
2783  	if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO)
2784  		adev->mp1_state = PP_MP1_STATE_NONE;
2785  
2786  	if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX) {
2787  		/* Only need to handle PCI state in the driver for ATPX
2788  		 * PCI core handles it for _PR3.
2789  		 */
2790  		amdgpu_device_cache_pci_state(pdev);
2791  		pci_disable_device(pdev);
2792  		pci_ignore_hotplug(pdev);
2793  		pci_set_power_state(pdev, PCI_D3cold);
2794  		drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
2795  	} else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO) {
2796  		/* nothing to do */
2797  	} else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2798  			(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) {
2799  		amdgpu_device_baco_enter(drm_dev);
2800  	}
2801  
2802  	dev_dbg(&pdev->dev, "asic/device is runtime suspended\n");
2803  
2804  	return 0;
2805  }
2806  
amdgpu_pmops_runtime_resume(struct device * dev)2807  static int amdgpu_pmops_runtime_resume(struct device *dev)
2808  {
2809  	struct pci_dev *pdev = to_pci_dev(dev);
2810  	struct drm_device *drm_dev = pci_get_drvdata(pdev);
2811  	struct amdgpu_device *adev = drm_to_adev(drm_dev);
2812  	int ret;
2813  
2814  	if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE)
2815  		return -EINVAL;
2816  
2817  	/* Avoids registers access if device is physically gone */
2818  	if (!pci_device_is_present(adev->pdev))
2819  		adev->no_hw_access = true;
2820  
2821  	if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX) {
2822  		drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
2823  
2824  		/* Only need to handle PCI state in the driver for ATPX
2825  		 * PCI core handles it for _PR3.
2826  		 */
2827  		pci_set_power_state(pdev, PCI_D0);
2828  		amdgpu_device_load_pci_state(pdev);
2829  		ret = pci_enable_device(pdev);
2830  		if (ret)
2831  			return ret;
2832  		pci_set_master(pdev);
2833  	} else if (adev->pm.rpm_mode == AMDGPU_RUNPM_BOCO) {
2834  		/* Only need to handle PCI state in the driver for ATPX
2835  		 * PCI core handles it for _PR3.
2836  		 */
2837  		pci_set_master(pdev);
2838  	} else if ((adev->pm.rpm_mode == AMDGPU_RUNPM_BACO) ||
2839  			(adev->pm.rpm_mode == AMDGPU_RUNPM_BAMACO)) {
2840  		amdgpu_device_baco_exit(drm_dev);
2841  	}
2842  	ret = amdgpu_device_resume(drm_dev, false);
2843  	if (ret) {
2844  		if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX)
2845  			pci_disable_device(pdev);
2846  		return ret;
2847  	}
2848  
2849  	if (adev->pm.rpm_mode == AMDGPU_RUNPM_PX)
2850  		drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
2851  	adev->in_runpm = false;
2852  	return 0;
2853  }
2854  
amdgpu_pmops_runtime_idle(struct device * dev)2855  static int amdgpu_pmops_runtime_idle(struct device *dev)
2856  {
2857  	struct drm_device *drm_dev = dev_get_drvdata(dev);
2858  	struct amdgpu_device *adev = drm_to_adev(drm_dev);
2859  	int ret;
2860  
2861  	if (adev->pm.rpm_mode == AMDGPU_RUNPM_NONE) {
2862  		pm_runtime_forbid(dev);
2863  		return -EBUSY;
2864  	}
2865  
2866  	ret = amdgpu_runtime_idle_check_display(dev);
2867  
2868  	pm_runtime_mark_last_busy(dev);
2869  	pm_runtime_autosuspend(dev);
2870  	return ret;
2871  }
2872  
amdgpu_drm_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)2873  long amdgpu_drm_ioctl(struct file *filp,
2874  		      unsigned int cmd, unsigned long arg)
2875  {
2876  	struct drm_file *file_priv = filp->private_data;
2877  	struct drm_device *dev;
2878  	long ret;
2879  
2880  	dev = file_priv->minor->dev;
2881  	ret = pm_runtime_get_sync(dev->dev);
2882  	if (ret < 0)
2883  		goto out;
2884  
2885  	ret = drm_ioctl(filp, cmd, arg);
2886  
2887  	pm_runtime_mark_last_busy(dev->dev);
2888  out:
2889  	pm_runtime_put_autosuspend(dev->dev);
2890  	return ret;
2891  }
2892  
2893  static const struct dev_pm_ops amdgpu_pm_ops = {
2894  	.prepare = amdgpu_pmops_prepare,
2895  	.complete = amdgpu_pmops_complete,
2896  	.suspend = amdgpu_pmops_suspend,
2897  	.suspend_noirq = amdgpu_pmops_suspend_noirq,
2898  	.resume = amdgpu_pmops_resume,
2899  	.freeze = amdgpu_pmops_freeze,
2900  	.thaw = amdgpu_pmops_thaw,
2901  	.poweroff = amdgpu_pmops_poweroff,
2902  	.restore = amdgpu_pmops_restore,
2903  	.runtime_suspend = amdgpu_pmops_runtime_suspend,
2904  	.runtime_resume = amdgpu_pmops_runtime_resume,
2905  	.runtime_idle = amdgpu_pmops_runtime_idle,
2906  };
2907  
amdgpu_flush(struct file * f,fl_owner_t id)2908  static int amdgpu_flush(struct file *f, fl_owner_t id)
2909  {
2910  	struct drm_file *file_priv = f->private_data;
2911  	struct amdgpu_fpriv *fpriv = file_priv->driver_priv;
2912  	long timeout = MAX_WAIT_SCHED_ENTITY_Q_EMPTY;
2913  
2914  	timeout = amdgpu_ctx_mgr_entity_flush(&fpriv->ctx_mgr, timeout);
2915  	timeout = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
2916  
2917  	return timeout >= 0 ? 0 : timeout;
2918  }
2919  
2920  static const struct file_operations amdgpu_driver_kms_fops = {
2921  	.owner = THIS_MODULE,
2922  	.open = drm_open,
2923  	.flush = amdgpu_flush,
2924  	.release = drm_release,
2925  	.unlocked_ioctl = amdgpu_drm_ioctl,
2926  	.mmap = drm_gem_mmap,
2927  	.poll = drm_poll,
2928  	.read = drm_read,
2929  #ifdef CONFIG_COMPAT
2930  	.compat_ioctl = amdgpu_kms_compat_ioctl,
2931  #endif
2932  #ifdef CONFIG_PROC_FS
2933  	.show_fdinfo = drm_show_fdinfo,
2934  #endif
2935  	.fop_flags = FOP_UNSIGNED_OFFSET,
2936  };
2937  
amdgpu_file_to_fpriv(struct file * filp,struct amdgpu_fpriv ** fpriv)2938  int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
2939  {
2940  	struct drm_file *file;
2941  
2942  	if (!filp)
2943  		return -EINVAL;
2944  
2945  	if (filp->f_op != &amdgpu_driver_kms_fops)
2946  		return -EINVAL;
2947  
2948  	file = filp->private_data;
2949  	*fpriv = file->driver_priv;
2950  	return 0;
2951  }
2952  
2953  const struct drm_ioctl_desc amdgpu_ioctls_kms[] = {
2954  	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_CREATE, amdgpu_gem_create_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2955  	DRM_IOCTL_DEF_DRV(AMDGPU_CTX, amdgpu_ctx_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2956  	DRM_IOCTL_DEF_DRV(AMDGPU_VM, amdgpu_vm_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2957  	DRM_IOCTL_DEF_DRV(AMDGPU_SCHED, amdgpu_sched_ioctl, DRM_MASTER),
2958  	DRM_IOCTL_DEF_DRV(AMDGPU_BO_LIST, amdgpu_bo_list_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2959  	DRM_IOCTL_DEF_DRV(AMDGPU_FENCE_TO_HANDLE, amdgpu_cs_fence_to_handle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2960  	/* KMS */
2961  	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_MMAP, amdgpu_gem_mmap_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2962  	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_WAIT_IDLE, amdgpu_gem_wait_idle_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2963  	DRM_IOCTL_DEF_DRV(AMDGPU_CS, amdgpu_cs_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2964  	DRM_IOCTL_DEF_DRV(AMDGPU_INFO, amdgpu_info_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2965  	DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_CS, amdgpu_cs_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2966  	DRM_IOCTL_DEF_DRV(AMDGPU_WAIT_FENCES, amdgpu_cs_wait_fences_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2967  	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_METADATA, amdgpu_gem_metadata_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2968  	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_VA, amdgpu_gem_va_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2969  	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_OP, amdgpu_gem_op_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2970  	DRM_IOCTL_DEF_DRV(AMDGPU_GEM_USERPTR, amdgpu_gem_userptr_ioctl, DRM_AUTH|DRM_RENDER_ALLOW),
2971  };
2972  
2973  static const struct drm_driver amdgpu_kms_driver = {
2974  	.driver_features =
2975  	    DRIVER_ATOMIC |
2976  	    DRIVER_GEM |
2977  	    DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
2978  	    DRIVER_SYNCOBJ_TIMELINE,
2979  	.open = amdgpu_driver_open_kms,
2980  	.postclose = amdgpu_driver_postclose_kms,
2981  	.ioctls = amdgpu_ioctls_kms,
2982  	.num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
2983  	.dumb_create = amdgpu_mode_dumb_create,
2984  	.dumb_map_offset = amdgpu_mode_dumb_mmap,
2985  	.fops = &amdgpu_driver_kms_fops,
2986  	.release = &amdgpu_driver_release_kms,
2987  #ifdef CONFIG_PROC_FS
2988  	.show_fdinfo = amdgpu_show_fdinfo,
2989  #endif
2990  
2991  	.gem_prime_import = amdgpu_gem_prime_import,
2992  
2993  	.name = DRIVER_NAME,
2994  	.desc = DRIVER_DESC,
2995  	.date = DRIVER_DATE,
2996  	.major = KMS_DRIVER_MAJOR,
2997  	.minor = KMS_DRIVER_MINOR,
2998  	.patchlevel = KMS_DRIVER_PATCHLEVEL,
2999  };
3000  
3001  const struct drm_driver amdgpu_partition_driver = {
3002  	.driver_features =
3003  	    DRIVER_GEM | DRIVER_RENDER | DRIVER_SYNCOBJ |
3004  	    DRIVER_SYNCOBJ_TIMELINE,
3005  	.open = amdgpu_driver_open_kms,
3006  	.postclose = amdgpu_driver_postclose_kms,
3007  	.ioctls = amdgpu_ioctls_kms,
3008  	.num_ioctls = ARRAY_SIZE(amdgpu_ioctls_kms),
3009  	.dumb_create = amdgpu_mode_dumb_create,
3010  	.dumb_map_offset = amdgpu_mode_dumb_mmap,
3011  	.fops = &amdgpu_driver_kms_fops,
3012  	.release = &amdgpu_driver_release_kms,
3013  
3014  	.gem_prime_import = amdgpu_gem_prime_import,
3015  
3016  	.name = DRIVER_NAME,
3017  	.desc = DRIVER_DESC,
3018  	.date = DRIVER_DATE,
3019  	.major = KMS_DRIVER_MAJOR,
3020  	.minor = KMS_DRIVER_MINOR,
3021  	.patchlevel = KMS_DRIVER_PATCHLEVEL,
3022  };
3023  
3024  static struct pci_error_handlers amdgpu_pci_err_handler = {
3025  	.error_detected	= amdgpu_pci_error_detected,
3026  	.mmio_enabled	= amdgpu_pci_mmio_enabled,
3027  	.slot_reset	= amdgpu_pci_slot_reset,
3028  	.resume		= amdgpu_pci_resume,
3029  };
3030  
3031  static const struct attribute_group *amdgpu_sysfs_groups[] = {
3032  	&amdgpu_vram_mgr_attr_group,
3033  	&amdgpu_gtt_mgr_attr_group,
3034  	&amdgpu_flash_attr_group,
3035  	NULL,
3036  };
3037  
3038  static struct pci_driver amdgpu_kms_pci_driver = {
3039  	.name = DRIVER_NAME,
3040  	.id_table = pciidlist,
3041  	.probe = amdgpu_pci_probe,
3042  	.remove = amdgpu_pci_remove,
3043  	.shutdown = amdgpu_pci_shutdown,
3044  	.driver.pm = &amdgpu_pm_ops,
3045  	.err_handler = &amdgpu_pci_err_handler,
3046  	.dev_groups = amdgpu_sysfs_groups,
3047  };
3048  
amdgpu_init(void)3049  static int __init amdgpu_init(void)
3050  {
3051  	int r;
3052  
3053  	if (drm_firmware_drivers_only())
3054  		return -EINVAL;
3055  
3056  	r = amdgpu_sync_init();
3057  	if (r)
3058  		goto error_sync;
3059  
3060  	r = amdgpu_fence_slab_init();
3061  	if (r)
3062  		goto error_fence;
3063  
3064  	DRM_INFO("amdgpu kernel modesetting enabled.\n");
3065  	amdgpu_register_atpx_handler();
3066  	amdgpu_acpi_detect();
3067  
3068  	/* Ignore KFD init failures. Normal when CONFIG_HSA_AMD is not set. */
3069  	amdgpu_amdkfd_init();
3070  
3071  	/* let modprobe override vga console setting */
3072  	return pci_register_driver(&amdgpu_kms_pci_driver);
3073  
3074  error_fence:
3075  	amdgpu_sync_fini();
3076  
3077  error_sync:
3078  	return r;
3079  }
3080  
amdgpu_exit(void)3081  static void __exit amdgpu_exit(void)
3082  {
3083  	amdgpu_amdkfd_fini();
3084  	pci_unregister_driver(&amdgpu_kms_pci_driver);
3085  	amdgpu_unregister_atpx_handler();
3086  	amdgpu_acpi_release();
3087  	amdgpu_sync_fini();
3088  	amdgpu_fence_slab_fini();
3089  	mmu_notifier_synchronize();
3090  	amdgpu_xcp_drv_release();
3091  }
3092  
3093  module_init(amdgpu_init);
3094  module_exit(amdgpu_exit);
3095  
3096  MODULE_AUTHOR(DRIVER_AUTHOR);
3097  MODULE_DESCRIPTION(DRIVER_DESC);
3098  MODULE_LICENSE("GPL and additional rights");
3099