1  /*
2   * Copyright 2014 Advanced Micro Devices, Inc.
3   *
4   * Permission is hereby granted, free of charge, to any person obtaining a
5   * copy of this software and associated documentation files (the "Software"),
6   * to deal in the Software without restriction, including without limitation
7   * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8   * and/or sell copies of the Software, and to permit persons to whom the
9   * Software is furnished to do so, subject to the following conditions:
10   *
11   * The above copyright notice and this permission notice shall be included in
12   * all copies or substantial portions of the Software.
13   *
14   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17   * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18   * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19   * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20   * OTHER DEALINGS IN THE SOFTWARE.
21   *
22   */
23  
24  #include <linux/firmware.h>
25  #include <linux/slab.h>
26  #include <linux/module.h>
27  
28  #include "amdgpu.h"
29  #include "amdgpu_ucode.h"
30  
31  #define AMDGPU_UCODE_NAME_MAX		(128)
32  
amdgpu_ucode_print_common_hdr(const struct common_firmware_header * hdr)33  static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header *hdr)
34  {
35  	DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr->size_bytes));
36  	DRM_DEBUG("header_size_bytes: %u\n", le32_to_cpu(hdr->header_size_bytes));
37  	DRM_DEBUG("header_version_major: %u\n", le16_to_cpu(hdr->header_version_major));
38  	DRM_DEBUG("header_version_minor: %u\n", le16_to_cpu(hdr->header_version_minor));
39  	DRM_DEBUG("ip_version_major: %u\n", le16_to_cpu(hdr->ip_version_major));
40  	DRM_DEBUG("ip_version_minor: %u\n", le16_to_cpu(hdr->ip_version_minor));
41  	DRM_DEBUG("ucode_version: 0x%08x\n", le32_to_cpu(hdr->ucode_version));
42  	DRM_DEBUG("ucode_size_bytes: %u\n", le32_to_cpu(hdr->ucode_size_bytes));
43  	DRM_DEBUG("ucode_array_offset_bytes: %u\n",
44  		  le32_to_cpu(hdr->ucode_array_offset_bytes));
45  	DRM_DEBUG("crc32: 0x%08x\n", le32_to_cpu(hdr->crc32));
46  }
47  
amdgpu_ucode_print_mc_hdr(const struct common_firmware_header * hdr)48  void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr)
49  {
50  	uint16_t version_major = le16_to_cpu(hdr->header_version_major);
51  	uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
52  
53  	DRM_DEBUG("MC\n");
54  	amdgpu_ucode_print_common_hdr(hdr);
55  
56  	if (version_major == 1) {
57  		const struct mc_firmware_header_v1_0 *mc_hdr =
58  			container_of(hdr, struct mc_firmware_header_v1_0, header);
59  
60  		DRM_DEBUG("io_debug_size_bytes: %u\n",
61  			  le32_to_cpu(mc_hdr->io_debug_size_bytes));
62  		DRM_DEBUG("io_debug_array_offset_bytes: %u\n",
63  			  le32_to_cpu(mc_hdr->io_debug_array_offset_bytes));
64  	} else {
65  		DRM_ERROR("Unknown MC ucode version: %u.%u\n", version_major, version_minor);
66  	}
67  }
68  
amdgpu_ucode_print_smc_hdr(const struct common_firmware_header * hdr)69  void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header *hdr)
70  {
71  	uint16_t version_major = le16_to_cpu(hdr->header_version_major);
72  	uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
73  	const struct smc_firmware_header_v1_0 *v1_0_hdr;
74  	const struct smc_firmware_header_v2_0 *v2_0_hdr;
75  	const struct smc_firmware_header_v2_1 *v2_1_hdr;
76  
77  	DRM_DEBUG("SMC\n");
78  	amdgpu_ucode_print_common_hdr(hdr);
79  
80  	if (version_major == 1) {
81  		v1_0_hdr = container_of(hdr, struct smc_firmware_header_v1_0, header);
82  		DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(v1_0_hdr->ucode_start_addr));
83  	} else if (version_major == 2) {
84  		switch (version_minor) {
85  		case 0:
86  			v2_0_hdr = container_of(hdr, struct smc_firmware_header_v2_0, v1_0.header);
87  			DRM_DEBUG("ppt_offset_bytes: %u\n", le32_to_cpu(v2_0_hdr->ppt_offset_bytes));
88  			DRM_DEBUG("ppt_size_bytes: %u\n", le32_to_cpu(v2_0_hdr->ppt_size_bytes));
89  			break;
90  		case 1:
91  			v2_1_hdr = container_of(hdr, struct smc_firmware_header_v2_1, v1_0.header);
92  			DRM_DEBUG("pptable_count: %u\n", le32_to_cpu(v2_1_hdr->pptable_count));
93  			DRM_DEBUG("pptable_entry_offset: %u\n", le32_to_cpu(v2_1_hdr->pptable_entry_offset));
94  			break;
95  		default:
96  			break;
97  		}
98  
99  	} else {
100  		DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major, version_minor);
101  	}
102  }
103  
amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header * hdr)104  void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header *hdr)
105  {
106  	uint16_t version_major = le16_to_cpu(hdr->header_version_major);
107  	uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
108  
109  	DRM_DEBUG("GFX\n");
110  	amdgpu_ucode_print_common_hdr(hdr);
111  
112  	if (version_major == 1) {
113  		const struct gfx_firmware_header_v1_0 *gfx_hdr =
114  			container_of(hdr, struct gfx_firmware_header_v1_0, header);
115  
116  		DRM_DEBUG("ucode_feature_version: %u\n",
117  			  le32_to_cpu(gfx_hdr->ucode_feature_version));
118  		DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(gfx_hdr->jt_offset));
119  		DRM_DEBUG("jt_size: %u\n", le32_to_cpu(gfx_hdr->jt_size));
120  	} else if (version_major == 2) {
121  		const struct gfx_firmware_header_v2_0 *gfx_hdr =
122  			container_of(hdr, struct gfx_firmware_header_v2_0, header);
123  
124  		DRM_DEBUG("ucode_feature_version: %u\n",
125  			  le32_to_cpu(gfx_hdr->ucode_feature_version));
126  	} else {
127  		DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major, version_minor);
128  	}
129  }
130  
amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header * hdr)131  void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
132  {
133  	uint16_t version_major = le16_to_cpu(hdr->header_version_major);
134  	uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
135  
136  	DRM_DEBUG("RLC\n");
137  	amdgpu_ucode_print_common_hdr(hdr);
138  
139  	if (version_major == 1) {
140  		const struct rlc_firmware_header_v1_0 *rlc_hdr =
141  			container_of(hdr, struct rlc_firmware_header_v1_0, header);
142  
143  		DRM_DEBUG("ucode_feature_version: %u\n",
144  			  le32_to_cpu(rlc_hdr->ucode_feature_version));
145  		DRM_DEBUG("save_and_restore_offset: %u\n",
146  			  le32_to_cpu(rlc_hdr->save_and_restore_offset));
147  		DRM_DEBUG("clear_state_descriptor_offset: %u\n",
148  			  le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
149  		DRM_DEBUG("avail_scratch_ram_locations: %u\n",
150  			  le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
151  		DRM_DEBUG("master_pkt_description_offset: %u\n",
152  			  le32_to_cpu(rlc_hdr->master_pkt_description_offset));
153  	} else if (version_major == 2) {
154  		const struct rlc_firmware_header_v2_0 *rlc_hdr =
155  			container_of(hdr, struct rlc_firmware_header_v2_0, header);
156  		const struct rlc_firmware_header_v2_1 *rlc_hdr_v2_1 =
157  			container_of(rlc_hdr, struct rlc_firmware_header_v2_1, v2_0);
158  		const struct rlc_firmware_header_v2_2 *rlc_hdr_v2_2 =
159  			container_of(rlc_hdr_v2_1, struct rlc_firmware_header_v2_2, v2_1);
160  		const struct rlc_firmware_header_v2_3 *rlc_hdr_v2_3 =
161  			container_of(rlc_hdr_v2_2, struct rlc_firmware_header_v2_3, v2_2);
162  		const struct rlc_firmware_header_v2_4 *rlc_hdr_v2_4 =
163  			container_of(rlc_hdr_v2_3, struct rlc_firmware_header_v2_4, v2_3);
164  
165  		switch (version_minor) {
166  		case 0:
167  			/* rlc_hdr v2_0 */
168  			DRM_DEBUG("ucode_feature_version: %u\n",
169  				  le32_to_cpu(rlc_hdr->ucode_feature_version));
170  			DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(rlc_hdr->jt_offset));
171  			DRM_DEBUG("jt_size: %u\n", le32_to_cpu(rlc_hdr->jt_size));
172  			DRM_DEBUG("save_and_restore_offset: %u\n",
173  				  le32_to_cpu(rlc_hdr->save_and_restore_offset));
174  			DRM_DEBUG("clear_state_descriptor_offset: %u\n",
175  				  le32_to_cpu(rlc_hdr->clear_state_descriptor_offset));
176  			DRM_DEBUG("avail_scratch_ram_locations: %u\n",
177  				  le32_to_cpu(rlc_hdr->avail_scratch_ram_locations));
178  			DRM_DEBUG("reg_restore_list_size: %u\n",
179  				  le32_to_cpu(rlc_hdr->reg_restore_list_size));
180  			DRM_DEBUG("reg_list_format_start: %u\n",
181  				  le32_to_cpu(rlc_hdr->reg_list_format_start));
182  			DRM_DEBUG("reg_list_format_separate_start: %u\n",
183  				  le32_to_cpu(rlc_hdr->reg_list_format_separate_start));
184  			DRM_DEBUG("starting_offsets_start: %u\n",
185  				  le32_to_cpu(rlc_hdr->starting_offsets_start));
186  			DRM_DEBUG("reg_list_format_size_bytes: %u\n",
187  				  le32_to_cpu(rlc_hdr->reg_list_format_size_bytes));
188  			DRM_DEBUG("reg_list_format_array_offset_bytes: %u\n",
189  				  le32_to_cpu(rlc_hdr->reg_list_format_array_offset_bytes));
190  			DRM_DEBUG("reg_list_size_bytes: %u\n",
191  				  le32_to_cpu(rlc_hdr->reg_list_size_bytes));
192  			DRM_DEBUG("reg_list_array_offset_bytes: %u\n",
193  				  le32_to_cpu(rlc_hdr->reg_list_array_offset_bytes));
194  			DRM_DEBUG("reg_list_format_separate_size_bytes: %u\n",
195  				  le32_to_cpu(rlc_hdr->reg_list_format_separate_size_bytes));
196  			DRM_DEBUG("reg_list_format_separate_array_offset_bytes: %u\n",
197  				  le32_to_cpu(rlc_hdr->reg_list_format_separate_array_offset_bytes));
198  			DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
199  				  le32_to_cpu(rlc_hdr->reg_list_separate_size_bytes));
200  			DRM_DEBUG("reg_list_separate_array_offset_bytes: %u\n",
201  				  le32_to_cpu(rlc_hdr->reg_list_separate_array_offset_bytes));
202  			break;
203  		case 1:
204  			/* rlc_hdr v2_1 */
205  			DRM_DEBUG("reg_list_format_direct_reg_list_length: %u\n",
206  				  le32_to_cpu(rlc_hdr_v2_1->reg_list_format_direct_reg_list_length));
207  			DRM_DEBUG("save_restore_list_cntl_ucode_ver: %u\n",
208  				  le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_ucode_ver));
209  			DRM_DEBUG("save_restore_list_cntl_feature_ver: %u\n",
210  				  le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_feature_ver));
211  			DRM_DEBUG("save_restore_list_cntl_size_bytes %u\n",
212  				  le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_size_bytes));
213  			DRM_DEBUG("save_restore_list_cntl_offset_bytes: %u\n",
214  				  le32_to_cpu(rlc_hdr_v2_1->save_restore_list_cntl_offset_bytes));
215  			DRM_DEBUG("save_restore_list_gpm_ucode_ver: %u\n",
216  				  le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_ucode_ver));
217  			DRM_DEBUG("save_restore_list_gpm_feature_ver: %u\n",
218  				  le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_feature_ver));
219  			DRM_DEBUG("save_restore_list_gpm_size_bytes %u\n",
220  				  le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_size_bytes));
221  			DRM_DEBUG("save_restore_list_gpm_offset_bytes: %u\n",
222  				  le32_to_cpu(rlc_hdr_v2_1->save_restore_list_gpm_offset_bytes));
223  			DRM_DEBUG("save_restore_list_srm_ucode_ver: %u\n",
224  				  le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_ucode_ver));
225  			DRM_DEBUG("save_restore_list_srm_feature_ver: %u\n",
226  				  le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_feature_ver));
227  			DRM_DEBUG("save_restore_list_srm_size_bytes %u\n",
228  				  le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_size_bytes));
229  			DRM_DEBUG("save_restore_list_srm_offset_bytes: %u\n",
230  				  le32_to_cpu(rlc_hdr_v2_1->save_restore_list_srm_offset_bytes));
231  			break;
232  		case 2:
233  			/* rlc_hdr v2_2 */
234  			DRM_DEBUG("rlc_iram_ucode_size_bytes: %u\n",
235  				  le32_to_cpu(rlc_hdr_v2_2->rlc_iram_ucode_size_bytes));
236  			DRM_DEBUG("rlc_iram_ucode_offset_bytes: %u\n",
237  				  le32_to_cpu(rlc_hdr_v2_2->rlc_iram_ucode_offset_bytes));
238  			DRM_DEBUG("rlc_dram_ucode_size_bytes: %u\n",
239  				  le32_to_cpu(rlc_hdr_v2_2->rlc_dram_ucode_size_bytes));
240  			DRM_DEBUG("rlc_dram_ucode_offset_bytes: %u\n",
241  				  le32_to_cpu(rlc_hdr_v2_2->rlc_dram_ucode_offset_bytes));
242  			break;
243  		case 3:
244  			/* rlc_hdr v2_3 */
245  			DRM_DEBUG("rlcp_ucode_version: %u\n",
246  				  le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_version));
247  			DRM_DEBUG("rlcp_ucode_feature_version: %u\n",
248  				  le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_feature_version));
249  			DRM_DEBUG("rlcp_ucode_size_bytes: %u\n",
250  				  le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_size_bytes));
251  			DRM_DEBUG("rlcp_ucode_offset_bytes: %u\n",
252  				  le32_to_cpu(rlc_hdr_v2_3->rlcp_ucode_offset_bytes));
253  			DRM_DEBUG("rlcv_ucode_version: %u\n",
254  				  le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_version));
255  			DRM_DEBUG("rlcv_ucode_feature_version: %u\n",
256  				  le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_feature_version));
257  			DRM_DEBUG("rlcv_ucode_size_bytes: %u\n",
258  				  le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_size_bytes));
259  			DRM_DEBUG("rlcv_ucode_offset_bytes: %u\n",
260  				  le32_to_cpu(rlc_hdr_v2_3->rlcv_ucode_offset_bytes));
261  			break;
262  		case 4:
263  			/* rlc_hdr v2_4 */
264  			DRM_DEBUG("global_tap_delays_ucode_size_bytes :%u\n",
265  				  le32_to_cpu(rlc_hdr_v2_4->global_tap_delays_ucode_size_bytes));
266  			DRM_DEBUG("global_tap_delays_ucode_offset_bytes: %u\n",
267  				  le32_to_cpu(rlc_hdr_v2_4->global_tap_delays_ucode_offset_bytes));
268  			DRM_DEBUG("se0_tap_delays_ucode_size_bytes :%u\n",
269  				  le32_to_cpu(rlc_hdr_v2_4->se0_tap_delays_ucode_size_bytes));
270  			DRM_DEBUG("se0_tap_delays_ucode_offset_bytes: %u\n",
271  				  le32_to_cpu(rlc_hdr_v2_4->se0_tap_delays_ucode_offset_bytes));
272  			DRM_DEBUG("se1_tap_delays_ucode_size_bytes :%u\n",
273  				  le32_to_cpu(rlc_hdr_v2_4->se1_tap_delays_ucode_size_bytes));
274  			DRM_DEBUG("se1_tap_delays_ucode_offset_bytes: %u\n",
275  				  le32_to_cpu(rlc_hdr_v2_4->se1_tap_delays_ucode_offset_bytes));
276  			DRM_DEBUG("se2_tap_delays_ucode_size_bytes :%u\n",
277  				  le32_to_cpu(rlc_hdr_v2_4->se2_tap_delays_ucode_size_bytes));
278  			DRM_DEBUG("se2_tap_delays_ucode_offset_bytes: %u\n",
279  				  le32_to_cpu(rlc_hdr_v2_4->se2_tap_delays_ucode_offset_bytes));
280  			DRM_DEBUG("se3_tap_delays_ucode_size_bytes :%u\n",
281  				  le32_to_cpu(rlc_hdr_v2_4->se3_tap_delays_ucode_size_bytes));
282  			DRM_DEBUG("se3_tap_delays_ucode_offset_bytes: %u\n",
283  				  le32_to_cpu(rlc_hdr_v2_4->se3_tap_delays_ucode_offset_bytes));
284  			break;
285  		default:
286  			DRM_ERROR("Unknown RLC v2 ucode: v2.%u\n", version_minor);
287  			break;
288  		}
289  	} else {
290  		DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major, version_minor);
291  	}
292  }
293  
amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header * hdr)294  void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header *hdr)
295  {
296  	uint16_t version_major = le16_to_cpu(hdr->header_version_major);
297  	uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
298  
299  	DRM_DEBUG("SDMA\n");
300  	amdgpu_ucode_print_common_hdr(hdr);
301  
302  	if (version_major == 1) {
303  		const struct sdma_firmware_header_v1_0 *sdma_hdr =
304  			container_of(hdr, struct sdma_firmware_header_v1_0, header);
305  
306  		DRM_DEBUG("ucode_feature_version: %u\n",
307  			  le32_to_cpu(sdma_hdr->ucode_feature_version));
308  		DRM_DEBUG("ucode_change_version: %u\n",
309  			  le32_to_cpu(sdma_hdr->ucode_change_version));
310  		DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(sdma_hdr->jt_offset));
311  		DRM_DEBUG("jt_size: %u\n", le32_to_cpu(sdma_hdr->jt_size));
312  		if (version_minor >= 1) {
313  			const struct sdma_firmware_header_v1_1 *sdma_v1_1_hdr =
314  				container_of(sdma_hdr, struct sdma_firmware_header_v1_1, v1_0);
315  			DRM_DEBUG("digest_size: %u\n", le32_to_cpu(sdma_v1_1_hdr->digest_size));
316  		}
317  	} else if (version_major == 2) {
318  		const struct sdma_firmware_header_v2_0 *sdma_hdr =
319  			container_of(hdr, struct sdma_firmware_header_v2_0, header);
320  
321  		DRM_DEBUG("ucode_feature_version: %u\n",
322  			  le32_to_cpu(sdma_hdr->ucode_feature_version));
323  		DRM_DEBUG("ctx_jt_offset: %u\n", le32_to_cpu(sdma_hdr->ctx_jt_offset));
324  		DRM_DEBUG("ctx_jt_size: %u\n", le32_to_cpu(sdma_hdr->ctx_jt_size));
325  		DRM_DEBUG("ctl_ucode_offset: %u\n", le32_to_cpu(sdma_hdr->ctl_ucode_offset));
326  		DRM_DEBUG("ctl_jt_offset: %u\n", le32_to_cpu(sdma_hdr->ctl_jt_offset));
327  		DRM_DEBUG("ctl_jt_size: %u\n", le32_to_cpu(sdma_hdr->ctl_jt_size));
328  	} else if (version_major == 3) {
329  		const struct sdma_firmware_header_v3_0 *sdma_hdr =
330  			container_of(hdr, struct sdma_firmware_header_v3_0, header);
331  
332  		DRM_DEBUG("ucode_reversion: %u\n",
333  			  le32_to_cpu(sdma_hdr->ucode_feature_version));
334  	} else {
335  		DRM_ERROR("Unknown SDMA ucode version: %u.%u\n",
336  			  version_major, version_minor);
337  	}
338  }
339  
amdgpu_ucode_print_psp_hdr(const struct common_firmware_header * hdr)340  void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header *hdr)
341  {
342  	uint16_t version_major = le16_to_cpu(hdr->header_version_major);
343  	uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
344  	uint32_t fw_index;
345  	const struct psp_fw_bin_desc *desc;
346  
347  	DRM_DEBUG("PSP\n");
348  	amdgpu_ucode_print_common_hdr(hdr);
349  
350  	if (version_major == 1) {
351  		const struct psp_firmware_header_v1_0 *psp_hdr =
352  			container_of(hdr, struct psp_firmware_header_v1_0, header);
353  
354  		DRM_DEBUG("ucode_feature_version: %u\n",
355  			  le32_to_cpu(psp_hdr->sos.fw_version));
356  		DRM_DEBUG("sos_offset_bytes: %u\n",
357  			  le32_to_cpu(psp_hdr->sos.offset_bytes));
358  		DRM_DEBUG("sos_size_bytes: %u\n",
359  			  le32_to_cpu(psp_hdr->sos.size_bytes));
360  		if (version_minor == 1) {
361  			const struct psp_firmware_header_v1_1 *psp_hdr_v1_1 =
362  				container_of(psp_hdr, struct psp_firmware_header_v1_1, v1_0);
363  			DRM_DEBUG("toc_header_version: %u\n",
364  				  le32_to_cpu(psp_hdr_v1_1->toc.fw_version));
365  			DRM_DEBUG("toc_offset_bytes: %u\n",
366  				  le32_to_cpu(psp_hdr_v1_1->toc.offset_bytes));
367  			DRM_DEBUG("toc_size_bytes: %u\n",
368  				  le32_to_cpu(psp_hdr_v1_1->toc.size_bytes));
369  			DRM_DEBUG("kdb_header_version: %u\n",
370  				  le32_to_cpu(psp_hdr_v1_1->kdb.fw_version));
371  			DRM_DEBUG("kdb_offset_bytes: %u\n",
372  				  le32_to_cpu(psp_hdr_v1_1->kdb.offset_bytes));
373  			DRM_DEBUG("kdb_size_bytes: %u\n",
374  				  le32_to_cpu(psp_hdr_v1_1->kdb.size_bytes));
375  		}
376  		if (version_minor == 2) {
377  			const struct psp_firmware_header_v1_2 *psp_hdr_v1_2 =
378  				container_of(psp_hdr, struct psp_firmware_header_v1_2, v1_0);
379  			DRM_DEBUG("kdb_header_version: %u\n",
380  				  le32_to_cpu(psp_hdr_v1_2->kdb.fw_version));
381  			DRM_DEBUG("kdb_offset_bytes: %u\n",
382  				  le32_to_cpu(psp_hdr_v1_2->kdb.offset_bytes));
383  			DRM_DEBUG("kdb_size_bytes: %u\n",
384  				  le32_to_cpu(psp_hdr_v1_2->kdb.size_bytes));
385  		}
386  		if (version_minor == 3) {
387  			const struct psp_firmware_header_v1_1 *psp_hdr_v1_1 =
388  				container_of(psp_hdr, struct psp_firmware_header_v1_1, v1_0);
389  			const struct psp_firmware_header_v1_3 *psp_hdr_v1_3 =
390  				container_of(psp_hdr_v1_1, struct psp_firmware_header_v1_3, v1_1);
391  			DRM_DEBUG("toc_header_version: %u\n",
392  				  le32_to_cpu(psp_hdr_v1_3->v1_1.toc.fw_version));
393  			DRM_DEBUG("toc_offset_bytes: %u\n",
394  				  le32_to_cpu(psp_hdr_v1_3->v1_1.toc.offset_bytes));
395  			DRM_DEBUG("toc_size_bytes: %u\n",
396  				  le32_to_cpu(psp_hdr_v1_3->v1_1.toc.size_bytes));
397  			DRM_DEBUG("kdb_header_version: %u\n",
398  				  le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.fw_version));
399  			DRM_DEBUG("kdb_offset_bytes: %u\n",
400  				  le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.offset_bytes));
401  			DRM_DEBUG("kdb_size_bytes: %u\n",
402  				  le32_to_cpu(psp_hdr_v1_3->v1_1.kdb.size_bytes));
403  			DRM_DEBUG("spl_header_version: %u\n",
404  				  le32_to_cpu(psp_hdr_v1_3->spl.fw_version));
405  			DRM_DEBUG("spl_offset_bytes: %u\n",
406  				  le32_to_cpu(psp_hdr_v1_3->spl.offset_bytes));
407  			DRM_DEBUG("spl_size_bytes: %u\n",
408  				  le32_to_cpu(psp_hdr_v1_3->spl.size_bytes));
409  		}
410  	} else if (version_major == 2) {
411  		const struct psp_firmware_header_v2_0 *psp_hdr_v2_0 =
412  			 container_of(hdr, struct psp_firmware_header_v2_0, header);
413  		for (fw_index = 0; fw_index < le32_to_cpu(psp_hdr_v2_0->psp_fw_bin_count); fw_index++) {
414  			desc = &(psp_hdr_v2_0->psp_fw_bin[fw_index]);
415  			switch (desc->fw_type) {
416  			case PSP_FW_TYPE_PSP_SOS:
417  				DRM_DEBUG("psp_sos_version: %u\n",
418  					  le32_to_cpu(desc->fw_version));
419  				DRM_DEBUG("psp_sos_size_bytes: %u\n",
420  					  le32_to_cpu(desc->size_bytes));
421  				break;
422  			case PSP_FW_TYPE_PSP_SYS_DRV:
423  				DRM_DEBUG("psp_sys_drv_version: %u\n",
424  					  le32_to_cpu(desc->fw_version));
425  				DRM_DEBUG("psp_sys_drv_size_bytes: %u\n",
426  					  le32_to_cpu(desc->size_bytes));
427  				break;
428  			case PSP_FW_TYPE_PSP_KDB:
429  				DRM_DEBUG("psp_kdb_version: %u\n",
430  					  le32_to_cpu(desc->fw_version));
431  				DRM_DEBUG("psp_kdb_size_bytes: %u\n",
432  					  le32_to_cpu(desc->size_bytes));
433  				break;
434  			case PSP_FW_TYPE_PSP_TOC:
435  				DRM_DEBUG("psp_toc_version: %u\n",
436  					  le32_to_cpu(desc->fw_version));
437  				DRM_DEBUG("psp_toc_size_bytes: %u\n",
438  					  le32_to_cpu(desc->size_bytes));
439  				break;
440  			case PSP_FW_TYPE_PSP_SPL:
441  				DRM_DEBUG("psp_spl_version: %u\n",
442  					  le32_to_cpu(desc->fw_version));
443  				DRM_DEBUG("psp_spl_size_bytes: %u\n",
444  					  le32_to_cpu(desc->size_bytes));
445  				break;
446  			case PSP_FW_TYPE_PSP_RL:
447  				DRM_DEBUG("psp_rl_version: %u\n",
448  					  le32_to_cpu(desc->fw_version));
449  				DRM_DEBUG("psp_rl_size_bytes: %u\n",
450  					  le32_to_cpu(desc->size_bytes));
451  				break;
452  			case PSP_FW_TYPE_PSP_SOC_DRV:
453  				DRM_DEBUG("psp_soc_drv_version: %u\n",
454  					  le32_to_cpu(desc->fw_version));
455  				DRM_DEBUG("psp_soc_drv_size_bytes: %u\n",
456  					  le32_to_cpu(desc->size_bytes));
457  				break;
458  			case PSP_FW_TYPE_PSP_INTF_DRV:
459  				DRM_DEBUG("psp_intf_drv_version: %u\n",
460  					  le32_to_cpu(desc->fw_version));
461  				DRM_DEBUG("psp_intf_drv_size_bytes: %u\n",
462  					  le32_to_cpu(desc->size_bytes));
463  				break;
464  			case PSP_FW_TYPE_PSP_DBG_DRV:
465  				DRM_DEBUG("psp_dbg_drv_version: %u\n",
466  					  le32_to_cpu(desc->fw_version));
467  				DRM_DEBUG("psp_dbg_drv_size_bytes: %u\n",
468  					  le32_to_cpu(desc->size_bytes));
469  				break;
470  			case PSP_FW_TYPE_PSP_RAS_DRV:
471  				DRM_DEBUG("psp_ras_drv_version: %u\n",
472  					  le32_to_cpu(desc->fw_version));
473  				DRM_DEBUG("psp_ras_drv_size_bytes: %u\n",
474  					  le32_to_cpu(desc->size_bytes));
475  				break;
476  			default:
477  				DRM_DEBUG("Unsupported PSP fw type: %d\n", desc->fw_type);
478  				break;
479  			}
480  		}
481  	} else {
482  		DRM_ERROR("Unknown PSP ucode version: %u.%u\n",
483  			  version_major, version_minor);
484  	}
485  }
486  
amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header * hdr)487  void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header *hdr)
488  {
489  	uint16_t version_major = le16_to_cpu(hdr->header_version_major);
490  	uint16_t version_minor = le16_to_cpu(hdr->header_version_minor);
491  
492  	DRM_DEBUG("GPU_INFO\n");
493  	amdgpu_ucode_print_common_hdr(hdr);
494  
495  	if (version_major == 1) {
496  		const struct gpu_info_firmware_header_v1_0 *gpu_info_hdr =
497  			container_of(hdr, struct gpu_info_firmware_header_v1_0, header);
498  
499  		DRM_DEBUG("version_major: %u\n",
500  			  le16_to_cpu(gpu_info_hdr->version_major));
501  		DRM_DEBUG("version_minor: %u\n",
502  			  le16_to_cpu(gpu_info_hdr->version_minor));
503  	} else {
504  		DRM_ERROR("Unknown gpu_info ucode version: %u.%u\n", version_major, version_minor);
505  	}
506  }
507  
amdgpu_ucode_validate(const struct firmware * fw)508  static int amdgpu_ucode_validate(const struct firmware *fw)
509  {
510  	const struct common_firmware_header *hdr =
511  		(const struct common_firmware_header *)fw->data;
512  
513  	if (fw->size == le32_to_cpu(hdr->size_bytes))
514  		return 0;
515  
516  	return -EINVAL;
517  }
518  
amdgpu_ucode_hdr_version(union amdgpu_firmware_header * hdr,uint16_t hdr_major,uint16_t hdr_minor)519  bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header *hdr,
520  				uint16_t hdr_major, uint16_t hdr_minor)
521  {
522  	if ((hdr->common.header_version_major == hdr_major) &&
523  		(hdr->common.header_version_minor == hdr_minor))
524  		return true;
525  	return false;
526  }
527  
528  enum amdgpu_firmware_load_type
amdgpu_ucode_get_load_type(struct amdgpu_device * adev,int load_type)529  amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)
530  {
531  	switch (adev->asic_type) {
532  #ifdef CONFIG_DRM_AMDGPU_SI
533  	case CHIP_TAHITI:
534  	case CHIP_PITCAIRN:
535  	case CHIP_VERDE:
536  	case CHIP_OLAND:
537  	case CHIP_HAINAN:
538  		return AMDGPU_FW_LOAD_DIRECT;
539  #endif
540  #ifdef CONFIG_DRM_AMDGPU_CIK
541  	case CHIP_BONAIRE:
542  	case CHIP_KAVERI:
543  	case CHIP_KABINI:
544  	case CHIP_HAWAII:
545  	case CHIP_MULLINS:
546  		return AMDGPU_FW_LOAD_DIRECT;
547  #endif
548  	case CHIP_TOPAZ:
549  	case CHIP_TONGA:
550  	case CHIP_FIJI:
551  	case CHIP_CARRIZO:
552  	case CHIP_STONEY:
553  	case CHIP_POLARIS10:
554  	case CHIP_POLARIS11:
555  	case CHIP_POLARIS12:
556  	case CHIP_VEGAM:
557  		return AMDGPU_FW_LOAD_SMU;
558  	case CHIP_CYAN_SKILLFISH:
559  		if (!(load_type &&
560  		      adev->apu_flags & AMD_APU_IS_CYAN_SKILLFISH2))
561  			return AMDGPU_FW_LOAD_DIRECT;
562  		else
563  			return AMDGPU_FW_LOAD_PSP;
564  	default:
565  		if (!load_type)
566  			return AMDGPU_FW_LOAD_DIRECT;
567  		else if (load_type == 3)
568  			return AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO;
569  		else
570  			return AMDGPU_FW_LOAD_PSP;
571  	}
572  }
573  
amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id)574  const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id)
575  {
576  	switch (ucode_id) {
577  	case AMDGPU_UCODE_ID_SDMA0:
578  		return "SDMA0";
579  	case AMDGPU_UCODE_ID_SDMA1:
580  		return "SDMA1";
581  	case AMDGPU_UCODE_ID_SDMA2:
582  		return "SDMA2";
583  	case AMDGPU_UCODE_ID_SDMA3:
584  		return "SDMA3";
585  	case AMDGPU_UCODE_ID_SDMA4:
586  		return "SDMA4";
587  	case AMDGPU_UCODE_ID_SDMA5:
588  		return "SDMA5";
589  	case AMDGPU_UCODE_ID_SDMA6:
590  		return "SDMA6";
591  	case AMDGPU_UCODE_ID_SDMA7:
592  		return "SDMA7";
593  	case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
594  		return "SDMA_CTX";
595  	case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
596  		return "SDMA_CTL";
597  	case AMDGPU_UCODE_ID_CP_CE:
598  		return "CP_CE";
599  	case AMDGPU_UCODE_ID_CP_PFP:
600  		return "CP_PFP";
601  	case AMDGPU_UCODE_ID_CP_ME:
602  		return "CP_ME";
603  	case AMDGPU_UCODE_ID_CP_MEC1:
604  		return "CP_MEC1";
605  	case AMDGPU_UCODE_ID_CP_MEC1_JT:
606  		return "CP_MEC1_JT";
607  	case AMDGPU_UCODE_ID_CP_MEC2:
608  		return "CP_MEC2";
609  	case AMDGPU_UCODE_ID_CP_MEC2_JT:
610  		return "CP_MEC2_JT";
611  	case AMDGPU_UCODE_ID_CP_MES:
612  		return "CP_MES";
613  	case AMDGPU_UCODE_ID_CP_MES_DATA:
614  		return "CP_MES_DATA";
615  	case AMDGPU_UCODE_ID_CP_MES1:
616  		return "CP_MES_KIQ";
617  	case AMDGPU_UCODE_ID_CP_MES1_DATA:
618  		return "CP_MES_KIQ_DATA";
619  	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
620  		return "RLC_RESTORE_LIST_CNTL";
621  	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
622  		return "RLC_RESTORE_LIST_GPM_MEM";
623  	case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
624  		return "RLC_RESTORE_LIST_SRM_MEM";
625  	case AMDGPU_UCODE_ID_RLC_IRAM:
626  		return "RLC_IRAM";
627  	case AMDGPU_UCODE_ID_RLC_DRAM:
628  		return "RLC_DRAM";
629  	case AMDGPU_UCODE_ID_RLC_G:
630  		return "RLC_G";
631  	case AMDGPU_UCODE_ID_RLC_P:
632  		return "RLC_P";
633  	case AMDGPU_UCODE_ID_RLC_V:
634  		return "RLC_V";
635  	case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
636  		return "GLOBAL_TAP_DELAYS";
637  	case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
638  		return "SE0_TAP_DELAYS";
639  	case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
640  		return "SE1_TAP_DELAYS";
641  	case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
642  		return "SE2_TAP_DELAYS";
643  	case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
644  		return "SE3_TAP_DELAYS";
645  	case AMDGPU_UCODE_ID_IMU_I:
646  		return "IMU_I";
647  	case AMDGPU_UCODE_ID_IMU_D:
648  		return "IMU_D";
649  	case AMDGPU_UCODE_ID_STORAGE:
650  		return "STORAGE";
651  	case AMDGPU_UCODE_ID_SMC:
652  		return "SMC";
653  	case AMDGPU_UCODE_ID_PPTABLE:
654  		return "PPTABLE";
655  	case AMDGPU_UCODE_ID_P2S_TABLE:
656  		return "P2STABLE";
657  	case AMDGPU_UCODE_ID_UVD:
658  		return "UVD";
659  	case AMDGPU_UCODE_ID_UVD1:
660  		return "UVD1";
661  	case AMDGPU_UCODE_ID_VCE:
662  		return "VCE";
663  	case AMDGPU_UCODE_ID_VCN:
664  		return "VCN";
665  	case AMDGPU_UCODE_ID_VCN1:
666  		return "VCN1";
667  	case AMDGPU_UCODE_ID_DMCU_ERAM:
668  		return "DMCU_ERAM";
669  	case AMDGPU_UCODE_ID_DMCU_INTV:
670  		return "DMCU_INTV";
671  	case AMDGPU_UCODE_ID_VCN0_RAM:
672  		return "VCN0_RAM";
673  	case AMDGPU_UCODE_ID_VCN1_RAM:
674  		return "VCN1_RAM";
675  	case AMDGPU_UCODE_ID_DMCUB:
676  		return "DMCUB";
677  	case AMDGPU_UCODE_ID_CAP:
678  		return "CAP";
679  	case AMDGPU_UCODE_ID_VPE_CTX:
680  		return "VPE_CTX";
681  	case AMDGPU_UCODE_ID_VPE_CTL:
682  		return "VPE_CTL";
683  	case AMDGPU_UCODE_ID_VPE:
684  		return "VPE";
685  	case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
686  		return "UMSCH_MM_UCODE";
687  	case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
688  		return "UMSCH_MM_DATA";
689  	case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER:
690  		return "UMSCH_MM_CMD_BUFFER";
691  	case AMDGPU_UCODE_ID_JPEG_RAM:
692  		return "JPEG";
693  	case AMDGPU_UCODE_ID_SDMA_RS64:
694  		return "RS64_SDMA";
695  	case AMDGPU_UCODE_ID_CP_RS64_PFP:
696  		return "RS64_PFP";
697  	case AMDGPU_UCODE_ID_CP_RS64_ME:
698  		return "RS64_ME";
699  	case AMDGPU_UCODE_ID_CP_RS64_MEC:
700  		return "RS64_MEC";
701  	case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
702  		return "RS64_PFP_P0_STACK";
703  	case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
704  		return "RS64_PFP_P1_STACK";
705  	case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
706  		return "RS64_ME_P0_STACK";
707  	case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
708  		return "RS64_ME_P1_STACK";
709  	case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
710  		return "RS64_MEC_P0_STACK";
711  	case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
712  		return "RS64_MEC_P1_STACK";
713  	case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
714  		return "RS64_MEC_P2_STACK";
715  	case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
716  		return "RS64_MEC_P3_STACK";
717  	case AMDGPU_UCODE_ID_ISP:
718  		return "ISP";
719  	default:
720  		return "UNKNOWN UCODE";
721  	}
722  }
723  
amdgpu_ucode_is_valid(uint32_t fw_version)724  static inline int amdgpu_ucode_is_valid(uint32_t fw_version)
725  {
726  	if (!fw_version)
727  		return -EINVAL;
728  
729  	return 0;
730  }
731  
732  #define FW_VERSION_ATTR(name, mode, field)				\
733  static ssize_t show_##name(struct device *dev,				\
734  			   struct device_attribute *attr, char *buf)	\
735  {									\
736  	struct drm_device *ddev = dev_get_drvdata(dev);			\
737  	struct amdgpu_device *adev = drm_to_adev(ddev);			\
738  									\
739  	if (!buf)							\
740  		return amdgpu_ucode_is_valid(adev->field);		\
741  									\
742  	return sysfs_emit(buf, "0x%08x\n", adev->field);		\
743  }									\
744  static DEVICE_ATTR(name, mode, show_##name, NULL)
745  
746  FW_VERSION_ATTR(vce_fw_version, 0444, vce.fw_version);
747  FW_VERSION_ATTR(uvd_fw_version, 0444, uvd.fw_version);
748  FW_VERSION_ATTR(mc_fw_version, 0444, gmc.fw_version);
749  FW_VERSION_ATTR(me_fw_version, 0444, gfx.me_fw_version);
750  FW_VERSION_ATTR(pfp_fw_version, 0444, gfx.pfp_fw_version);
751  FW_VERSION_ATTR(ce_fw_version, 0444, gfx.ce_fw_version);
752  FW_VERSION_ATTR(rlc_fw_version, 0444, gfx.rlc_fw_version);
753  FW_VERSION_ATTR(rlc_srlc_fw_version, 0444, gfx.rlc_srlc_fw_version);
754  FW_VERSION_ATTR(rlc_srlg_fw_version, 0444, gfx.rlc_srlg_fw_version);
755  FW_VERSION_ATTR(rlc_srls_fw_version, 0444, gfx.rlc_srls_fw_version);
756  FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version);
757  FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version);
758  FW_VERSION_ATTR(imu_fw_version, 0444, gfx.imu_fw_version);
759  FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos.fw_version);
760  FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_context.bin_desc.fw_version);
761  FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras_context.context.bin_desc.fw_version);
762  FW_VERSION_ATTR(ta_xgmi_fw_version, 0444, psp.xgmi_context.context.bin_desc.fw_version);
763  FW_VERSION_ATTR(smc_fw_version, 0444, pm.fw_version);
764  FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);
765  FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);
766  FW_VERSION_ATTR(vcn_fw_version, 0444, vcn.fw_version);
767  FW_VERSION_ATTR(dmcu_fw_version, 0444, dm.dmcu_fw_version);
768  FW_VERSION_ATTR(mes_fw_version, 0444, mes.sched_version & AMDGPU_MES_VERSION_MASK);
769  FW_VERSION_ATTR(mes_kiq_fw_version, 0444, mes.kiq_version & AMDGPU_MES_VERSION_MASK);
770  
771  static struct attribute *fw_attrs[] = {
772  	&dev_attr_vce_fw_version.attr, &dev_attr_uvd_fw_version.attr,
773  	&dev_attr_mc_fw_version.attr, &dev_attr_me_fw_version.attr,
774  	&dev_attr_pfp_fw_version.attr, &dev_attr_ce_fw_version.attr,
775  	&dev_attr_rlc_fw_version.attr, &dev_attr_rlc_srlc_fw_version.attr,
776  	&dev_attr_rlc_srlg_fw_version.attr, &dev_attr_rlc_srls_fw_version.attr,
777  	&dev_attr_mec_fw_version.attr, &dev_attr_mec2_fw_version.attr,
778  	&dev_attr_sos_fw_version.attr, &dev_attr_asd_fw_version.attr,
779  	&dev_attr_ta_ras_fw_version.attr, &dev_attr_ta_xgmi_fw_version.attr,
780  	&dev_attr_smc_fw_version.attr, &dev_attr_sdma_fw_version.attr,
781  	&dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr,
782  	&dev_attr_dmcu_fw_version.attr, &dev_attr_imu_fw_version.attr,
783  	&dev_attr_mes_fw_version.attr, &dev_attr_mes_kiq_fw_version.attr,
784  	NULL
785  };
786  
787  #define to_dev_attr(x) container_of(x, struct device_attribute, attr)
788  
amdgpu_ucode_sys_visible(struct kobject * kobj,struct attribute * attr,int idx)789  static umode_t amdgpu_ucode_sys_visible(struct kobject *kobj,
790  					struct attribute *attr, int idx)
791  {
792  	struct device_attribute *dev_attr = to_dev_attr(attr);
793  	struct device *dev = kobj_to_dev(kobj);
794  
795  	if (dev_attr->show(dev, dev_attr, NULL) == -EINVAL)
796  		return 0;
797  
798  	return attr->mode;
799  }
800  
801  static const struct attribute_group fw_attr_group = {
802  	.name = "fw_version",
803  	.attrs = fw_attrs,
804  	.is_visible = amdgpu_ucode_sys_visible
805  };
806  
amdgpu_ucode_sysfs_init(struct amdgpu_device * adev)807  int amdgpu_ucode_sysfs_init(struct amdgpu_device *adev)
808  {
809  	return sysfs_create_group(&adev->dev->kobj, &fw_attr_group);
810  }
811  
amdgpu_ucode_sysfs_fini(struct amdgpu_device * adev)812  void amdgpu_ucode_sysfs_fini(struct amdgpu_device *adev)
813  {
814  	sysfs_remove_group(&adev->dev->kobj, &fw_attr_group);
815  }
816  
amdgpu_ucode_init_single_fw(struct amdgpu_device * adev,struct amdgpu_firmware_info * ucode,uint64_t mc_addr,void * kptr)817  static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,
818  				       struct amdgpu_firmware_info *ucode,
819  				       uint64_t mc_addr, void *kptr)
820  {
821  	const struct common_firmware_header *header = NULL;
822  	const struct gfx_firmware_header_v1_0 *cp_hdr = NULL;
823  	const struct gfx_firmware_header_v2_0 *cpv2_hdr = NULL;
824  	const struct dmcu_firmware_header_v1_0 *dmcu_hdr = NULL;
825  	const struct dmcub_firmware_header_v1_0 *dmcub_hdr = NULL;
826  	const struct mes_firmware_header_v1_0 *mes_hdr = NULL;
827  	const struct sdma_firmware_header_v2_0 *sdma_hdr = NULL;
828  	const struct sdma_firmware_header_v3_0 *sdmav3_hdr = NULL;
829  	const struct imu_firmware_header_v1_0 *imu_hdr = NULL;
830  	const struct vpe_firmware_header_v1_0 *vpe_hdr = NULL;
831  	const struct umsch_mm_firmware_header_v1_0 *umsch_mm_hdr = NULL;
832  	u8 *ucode_addr;
833  
834  	if (!ucode->fw)
835  		return 0;
836  
837  	ucode->mc_addr = mc_addr;
838  	ucode->kaddr = kptr;
839  
840  	if (ucode->ucode_id == AMDGPU_UCODE_ID_STORAGE)
841  		return 0;
842  
843  	header = (const struct common_firmware_header *)ucode->fw->data;
844  	cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
845  	cpv2_hdr = (const struct gfx_firmware_header_v2_0 *)ucode->fw->data;
846  	dmcu_hdr = (const struct dmcu_firmware_header_v1_0 *)ucode->fw->data;
847  	dmcub_hdr = (const struct dmcub_firmware_header_v1_0 *)ucode->fw->data;
848  	mes_hdr = (const struct mes_firmware_header_v1_0 *)ucode->fw->data;
849  	sdma_hdr = (const struct sdma_firmware_header_v2_0 *)ucode->fw->data;
850  	sdmav3_hdr = (const struct sdma_firmware_header_v3_0 *)ucode->fw->data;
851  	imu_hdr = (const struct imu_firmware_header_v1_0 *)ucode->fw->data;
852  	vpe_hdr = (const struct vpe_firmware_header_v1_0 *)ucode->fw->data;
853  	umsch_mm_hdr = (const struct umsch_mm_firmware_header_v1_0 *)ucode->fw->data;
854  
855  	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
856  		switch (ucode->ucode_id) {
857  		case AMDGPU_UCODE_ID_SDMA_UCODE_TH0:
858  			ucode->ucode_size = le32_to_cpu(sdma_hdr->ctx_ucode_size_bytes);
859  			ucode_addr = (u8 *)ucode->fw->data +
860  				le32_to_cpu(sdma_hdr->header.ucode_array_offset_bytes);
861  			break;
862  		case AMDGPU_UCODE_ID_SDMA_UCODE_TH1:
863  			ucode->ucode_size = le32_to_cpu(sdma_hdr->ctl_ucode_size_bytes);
864  			ucode_addr = (u8 *)ucode->fw->data +
865  				le32_to_cpu(sdma_hdr->ctl_ucode_offset);
866  			break;
867  		case AMDGPU_UCODE_ID_SDMA_RS64:
868  			ucode->ucode_size = le32_to_cpu(sdmav3_hdr->ucode_size_bytes);
869  			ucode_addr = (u8 *)ucode->fw->data +
870  				le32_to_cpu(sdmav3_hdr->header.ucode_array_offset_bytes);
871  			break;
872  		case AMDGPU_UCODE_ID_CP_MEC1:
873  		case AMDGPU_UCODE_ID_CP_MEC2:
874  			ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) -
875  				le32_to_cpu(cp_hdr->jt_size) * 4;
876  			ucode_addr = (u8 *)ucode->fw->data +
877  				le32_to_cpu(header->ucode_array_offset_bytes);
878  			break;
879  		case AMDGPU_UCODE_ID_CP_MEC1_JT:
880  		case AMDGPU_UCODE_ID_CP_MEC2_JT:
881  			ucode->ucode_size = le32_to_cpu(cp_hdr->jt_size) * 4;
882  			ucode_addr = (u8 *)ucode->fw->data +
883  				le32_to_cpu(header->ucode_array_offset_bytes) +
884  				le32_to_cpu(cp_hdr->jt_offset) * 4;
885  			break;
886  		case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL:
887  			ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes;
888  			ucode_addr = adev->gfx.rlc.save_restore_list_cntl;
889  			break;
890  		case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM:
891  			ucode->ucode_size = adev->gfx.rlc.save_restore_list_gpm_size_bytes;
892  			ucode_addr = adev->gfx.rlc.save_restore_list_gpm;
893  			break;
894  		case AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM:
895  			ucode->ucode_size = adev->gfx.rlc.save_restore_list_srm_size_bytes;
896  			ucode_addr = adev->gfx.rlc.save_restore_list_srm;
897  			break;
898  		case AMDGPU_UCODE_ID_RLC_IRAM:
899  			ucode->ucode_size = adev->gfx.rlc.rlc_iram_ucode_size_bytes;
900  			ucode_addr = adev->gfx.rlc.rlc_iram_ucode;
901  			break;
902  		case AMDGPU_UCODE_ID_RLC_DRAM:
903  			ucode->ucode_size = adev->gfx.rlc.rlc_dram_ucode_size_bytes;
904  			ucode_addr = adev->gfx.rlc.rlc_dram_ucode;
905  			break;
906  		case AMDGPU_UCODE_ID_RLC_P:
907  			ucode->ucode_size = adev->gfx.rlc.rlcp_ucode_size_bytes;
908  			ucode_addr = adev->gfx.rlc.rlcp_ucode;
909  			break;
910  		case AMDGPU_UCODE_ID_RLC_V:
911  			ucode->ucode_size = adev->gfx.rlc.rlcv_ucode_size_bytes;
912  			ucode_addr = adev->gfx.rlc.rlcv_ucode;
913  			break;
914  		case AMDGPU_UCODE_ID_GLOBAL_TAP_DELAYS:
915  			ucode->ucode_size = adev->gfx.rlc.global_tap_delays_ucode_size_bytes;
916  			ucode_addr = adev->gfx.rlc.global_tap_delays_ucode;
917  			break;
918  		case AMDGPU_UCODE_ID_SE0_TAP_DELAYS:
919  			ucode->ucode_size = adev->gfx.rlc.se0_tap_delays_ucode_size_bytes;
920  			ucode_addr = adev->gfx.rlc.se0_tap_delays_ucode;
921  			break;
922  		case AMDGPU_UCODE_ID_SE1_TAP_DELAYS:
923  			ucode->ucode_size = adev->gfx.rlc.se1_tap_delays_ucode_size_bytes;
924  			ucode_addr = adev->gfx.rlc.se1_tap_delays_ucode;
925  			break;
926  		case AMDGPU_UCODE_ID_SE2_TAP_DELAYS:
927  			ucode->ucode_size = adev->gfx.rlc.se2_tap_delays_ucode_size_bytes;
928  			ucode_addr = adev->gfx.rlc.se2_tap_delays_ucode;
929  			break;
930  		case AMDGPU_UCODE_ID_SE3_TAP_DELAYS:
931  			ucode->ucode_size = adev->gfx.rlc.se3_tap_delays_ucode_size_bytes;
932  			ucode_addr = adev->gfx.rlc.se3_tap_delays_ucode;
933  			break;
934  		case AMDGPU_UCODE_ID_CP_MES:
935  			ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
936  			ucode_addr = (u8 *)ucode->fw->data +
937  				le32_to_cpu(mes_hdr->mes_ucode_offset_bytes);
938  			break;
939  		case AMDGPU_UCODE_ID_CP_MES_DATA:
940  			ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
941  			ucode_addr = (u8 *)ucode->fw->data +
942  				le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes);
943  			break;
944  		case AMDGPU_UCODE_ID_CP_MES1:
945  			ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_size_bytes);
946  			ucode_addr = (u8 *)ucode->fw->data +
947  				le32_to_cpu(mes_hdr->mes_ucode_offset_bytes);
948  			break;
949  		case AMDGPU_UCODE_ID_CP_MES1_DATA:
950  			ucode->ucode_size = le32_to_cpu(mes_hdr->mes_ucode_data_size_bytes);
951  			ucode_addr = (u8 *)ucode->fw->data +
952  				le32_to_cpu(mes_hdr->mes_ucode_data_offset_bytes);
953  			break;
954  		case AMDGPU_UCODE_ID_DMCU_ERAM:
955  			ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) -
956  				le32_to_cpu(dmcu_hdr->intv_size_bytes);
957  			ucode_addr = (u8 *)ucode->fw->data +
958  				le32_to_cpu(header->ucode_array_offset_bytes);
959  			break;
960  		case AMDGPU_UCODE_ID_DMCU_INTV:
961  			ucode->ucode_size = le32_to_cpu(dmcu_hdr->intv_size_bytes);
962  			ucode_addr = (u8 *)ucode->fw->data +
963  				le32_to_cpu(header->ucode_array_offset_bytes) +
964  				le32_to_cpu(dmcu_hdr->intv_offset_bytes);
965  			break;
966  		case AMDGPU_UCODE_ID_DMCUB:
967  			ucode->ucode_size = le32_to_cpu(dmcub_hdr->inst_const_bytes);
968  			ucode_addr = (u8 *)ucode->fw->data +
969  				le32_to_cpu(header->ucode_array_offset_bytes);
970  			break;
971  		case AMDGPU_UCODE_ID_PPTABLE:
972  			ucode->ucode_size = ucode->fw->size;
973  			ucode_addr = (u8 *)ucode->fw->data;
974  			break;
975  		case AMDGPU_UCODE_ID_P2S_TABLE:
976  			ucode->ucode_size = ucode->fw->size;
977  			ucode_addr = (u8 *)ucode->fw->data;
978  			break;
979  		case AMDGPU_UCODE_ID_IMU_I:
980  			ucode->ucode_size = le32_to_cpu(imu_hdr->imu_iram_ucode_size_bytes);
981  			ucode_addr = (u8 *)ucode->fw->data +
982  				le32_to_cpu(imu_hdr->header.ucode_array_offset_bytes);
983  			break;
984  		case AMDGPU_UCODE_ID_IMU_D:
985  			ucode->ucode_size = le32_to_cpu(imu_hdr->imu_dram_ucode_size_bytes);
986  			ucode_addr = (u8 *)ucode->fw->data +
987  				le32_to_cpu(imu_hdr->header.ucode_array_offset_bytes) +
988  				le32_to_cpu(imu_hdr->imu_iram_ucode_size_bytes);
989  			break;
990  		case AMDGPU_UCODE_ID_CP_RS64_PFP:
991  			ucode->ucode_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
992  			ucode_addr = (u8 *)ucode->fw->data +
993  				le32_to_cpu(header->ucode_array_offset_bytes);
994  			break;
995  		case AMDGPU_UCODE_ID_CP_RS64_PFP_P0_STACK:
996  			ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
997  			ucode_addr = (u8 *)ucode->fw->data +
998  				le32_to_cpu(cpv2_hdr->data_offset_bytes);
999  			break;
1000  		case AMDGPU_UCODE_ID_CP_RS64_PFP_P1_STACK:
1001  			ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1002  			ucode_addr = (u8 *)ucode->fw->data +
1003  				le32_to_cpu(cpv2_hdr->data_offset_bytes);
1004  			break;
1005  		case AMDGPU_UCODE_ID_CP_RS64_ME:
1006  			ucode->ucode_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1007  			ucode_addr = (u8 *)ucode->fw->data +
1008  				le32_to_cpu(header->ucode_array_offset_bytes);
1009  			break;
1010  		case AMDGPU_UCODE_ID_CP_RS64_ME_P0_STACK:
1011  			ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1012  			ucode_addr = (u8 *)ucode->fw->data +
1013  				le32_to_cpu(cpv2_hdr->data_offset_bytes);
1014  			break;
1015  		case AMDGPU_UCODE_ID_CP_RS64_ME_P1_STACK:
1016  			ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1017  			ucode_addr = (u8 *)ucode->fw->data +
1018  				le32_to_cpu(cpv2_hdr->data_offset_bytes);
1019  			break;
1020  		case AMDGPU_UCODE_ID_CP_RS64_MEC:
1021  			ucode->ucode_size = le32_to_cpu(cpv2_hdr->ucode_size_bytes);
1022  			ucode_addr = (u8 *)ucode->fw->data +
1023  				le32_to_cpu(header->ucode_array_offset_bytes);
1024  			break;
1025  		case AMDGPU_UCODE_ID_CP_RS64_MEC_P0_STACK:
1026  			ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1027  			ucode_addr = (u8 *)ucode->fw->data +
1028  				le32_to_cpu(cpv2_hdr->data_offset_bytes);
1029  			break;
1030  		case AMDGPU_UCODE_ID_CP_RS64_MEC_P1_STACK:
1031  			ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1032  			ucode_addr = (u8 *)ucode->fw->data +
1033  				le32_to_cpu(cpv2_hdr->data_offset_bytes);
1034  			break;
1035  		case AMDGPU_UCODE_ID_CP_RS64_MEC_P2_STACK:
1036  			ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1037  			ucode_addr = (u8 *)ucode->fw->data +
1038  				le32_to_cpu(cpv2_hdr->data_offset_bytes);
1039  			break;
1040  		case AMDGPU_UCODE_ID_CP_RS64_MEC_P3_STACK:
1041  			ucode->ucode_size = le32_to_cpu(cpv2_hdr->data_size_bytes);
1042  			ucode_addr = (u8 *)ucode->fw->data +
1043  				le32_to_cpu(cpv2_hdr->data_offset_bytes);
1044  			break;
1045  		case AMDGPU_UCODE_ID_VPE_CTX:
1046  			ucode->ucode_size = le32_to_cpu(vpe_hdr->ctx_ucode_size_bytes);
1047  			ucode_addr = (u8 *)ucode->fw->data +
1048  				le32_to_cpu(vpe_hdr->header.ucode_array_offset_bytes);
1049  			break;
1050  		case AMDGPU_UCODE_ID_VPE_CTL:
1051  			ucode->ucode_size = le32_to_cpu(vpe_hdr->ctl_ucode_size_bytes);
1052  			ucode_addr = (u8 *)ucode->fw->data +
1053  				le32_to_cpu(vpe_hdr->ctl_ucode_offset);
1054  			break;
1055  		case AMDGPU_UCODE_ID_UMSCH_MM_UCODE:
1056  			ucode->ucode_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_size_bytes);
1057  			ucode_addr = (u8 *)ucode->fw->data +
1058  				le32_to_cpu(umsch_mm_hdr->header.ucode_array_offset_bytes);
1059  			break;
1060  		case AMDGPU_UCODE_ID_UMSCH_MM_DATA:
1061  			ucode->ucode_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_size_bytes);
1062  			ucode_addr = (u8 *)ucode->fw->data +
1063  				le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_offset_bytes);
1064  			break;
1065  		default:
1066  			ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
1067  			ucode_addr = (u8 *)ucode->fw->data +
1068  				le32_to_cpu(header->ucode_array_offset_bytes);
1069  			break;
1070  		}
1071  	} else {
1072  		ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);
1073  		ucode_addr = (u8 *)ucode->fw->data +
1074  			le32_to_cpu(header->ucode_array_offset_bytes);
1075  	}
1076  
1077  	memcpy(ucode->kaddr, ucode_addr, ucode->ucode_size);
1078  
1079  	return 0;
1080  }
1081  
amdgpu_ucode_patch_jt(struct amdgpu_firmware_info * ucode,uint64_t mc_addr,void * kptr)1082  static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,
1083  				uint64_t mc_addr, void *kptr)
1084  {
1085  	const struct gfx_firmware_header_v1_0 *header = NULL;
1086  	const struct common_firmware_header *comm_hdr = NULL;
1087  	uint8_t *src_addr = NULL;
1088  	uint8_t *dst_addr = NULL;
1089  
1090  	if (!ucode->fw)
1091  		return 0;
1092  
1093  	comm_hdr = (const struct common_firmware_header *)ucode->fw->data;
1094  	header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
1095  	dst_addr = ucode->kaddr +
1096  			   ALIGN(le32_to_cpu(comm_hdr->ucode_size_bytes),
1097  			   PAGE_SIZE);
1098  	src_addr = (uint8_t *)ucode->fw->data +
1099  			   le32_to_cpu(comm_hdr->ucode_array_offset_bytes) +
1100  			   (le32_to_cpu(header->jt_offset) * 4);
1101  	memcpy(dst_addr, src_addr, le32_to_cpu(header->jt_size) * 4);
1102  
1103  	return 0;
1104  }
1105  
amdgpu_ucode_create_bo(struct amdgpu_device * adev)1106  int amdgpu_ucode_create_bo(struct amdgpu_device *adev)
1107  {
1108  	if ((adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) &&
1109  	    (adev->firmware.load_type != AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)) {
1110  		amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE,
1111  			(amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ?
1112  			AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,
1113  			&adev->firmware.fw_buf,
1114  			&adev->firmware.fw_buf_mc,
1115  			&adev->firmware.fw_buf_ptr);
1116  		if (!adev->firmware.fw_buf) {
1117  			dev_err(adev->dev, "failed to create kernel buffer for firmware.fw_buf\n");
1118  			return -ENOMEM;
1119  		} else if (amdgpu_sriov_vf(adev)) {
1120  			memset(adev->firmware.fw_buf_ptr, 0, adev->firmware.fw_size);
1121  		}
1122  	}
1123  	return 0;
1124  }
1125  
amdgpu_ucode_free_bo(struct amdgpu_device * adev)1126  void amdgpu_ucode_free_bo(struct amdgpu_device *adev)
1127  {
1128  	amdgpu_bo_free_kernel(&adev->firmware.fw_buf,
1129  		&adev->firmware.fw_buf_mc,
1130  		&adev->firmware.fw_buf_ptr);
1131  }
1132  
amdgpu_ucode_init_bo(struct amdgpu_device * adev)1133  int amdgpu_ucode_init_bo(struct amdgpu_device *adev)
1134  {
1135  	uint64_t fw_offset = 0;
1136  	int i;
1137  	struct amdgpu_firmware_info *ucode = NULL;
1138  
1139   /* for baremetal, the ucode is allocated in gtt, so don't need to fill the bo when reset/suspend */
1140  	if (!amdgpu_sriov_vf(adev) && (amdgpu_in_reset(adev) || adev->in_suspend))
1141  		return 0;
1142  	/*
1143  	 * if SMU loaded firmware, it needn't add SMC, UVD, and VCE
1144  	 * ucode info here
1145  	 */
1146  	if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1147  		if (amdgpu_sriov_vf(adev))
1148  			adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM - 3;
1149  		else
1150  			adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM - 4;
1151  	} else {
1152  		adev->firmware.max_ucodes = AMDGPU_UCODE_ID_MAXIMUM;
1153  	}
1154  
1155  	for (i = 0; i < adev->firmware.max_ucodes; i++) {
1156  		ucode = &adev->firmware.ucode[i];
1157  		if (ucode->fw) {
1158  			amdgpu_ucode_init_single_fw(adev, ucode, adev->firmware.fw_buf_mc + fw_offset,
1159  						    adev->firmware.fw_buf_ptr + fw_offset);
1160  			if (i == AMDGPU_UCODE_ID_CP_MEC1 &&
1161  			    adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
1162  				const struct gfx_firmware_header_v1_0 *cp_hdr;
1163  
1164  				cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
1165  				amdgpu_ucode_patch_jt(ucode,  adev->firmware.fw_buf_mc + fw_offset,
1166  						    adev->firmware.fw_buf_ptr + fw_offset);
1167  				fw_offset += ALIGN(le32_to_cpu(cp_hdr->jt_size) << 2, PAGE_SIZE);
1168  			}
1169  			fw_offset += ALIGN(ucode->ucode_size, PAGE_SIZE);
1170  		}
1171  	}
1172  	return 0;
1173  }
1174  
amdgpu_ucode_legacy_naming(struct amdgpu_device * adev,int block_type)1175  static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int block_type)
1176  {
1177  	if (block_type == MP0_HWIP) {
1178  		switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {
1179  		case IP_VERSION(9, 0, 0):
1180  			switch (adev->asic_type) {
1181  			case CHIP_VEGA10:
1182  				return "vega10";
1183  			case CHIP_VEGA12:
1184  				return "vega12";
1185  			default:
1186  				return NULL;
1187  			}
1188  		case IP_VERSION(10, 0, 0):
1189  		case IP_VERSION(10, 0, 1):
1190  			if (adev->asic_type == CHIP_RAVEN) {
1191  				if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1192  					return "raven2";
1193  				else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1194  					return "picasso";
1195  				return "raven";
1196  			}
1197  			break;
1198  		case IP_VERSION(11, 0, 0):
1199  			return "navi10";
1200  		case IP_VERSION(11, 0, 2):
1201  			return "vega20";
1202  		case IP_VERSION(11, 0, 3):
1203  			return "renoir";
1204  		case IP_VERSION(11, 0, 4):
1205  			return "arcturus";
1206  		case IP_VERSION(11, 0, 5):
1207  			return "navi14";
1208  		case IP_VERSION(11, 0, 7):
1209  			return "sienna_cichlid";
1210  		case IP_VERSION(11, 0, 9):
1211  			return "navi12";
1212  		case IP_VERSION(11, 0, 11):
1213  			return "navy_flounder";
1214  		case IP_VERSION(11, 0, 12):
1215  			return "dimgrey_cavefish";
1216  		case IP_VERSION(11, 0, 13):
1217  			return "beige_goby";
1218  		case IP_VERSION(11, 5, 0):
1219  			return "vangogh";
1220  		case IP_VERSION(12, 0, 1):
1221  			return "green_sardine";
1222  		case IP_VERSION(13, 0, 2):
1223  			return "aldebaran";
1224  		case IP_VERSION(13, 0, 1):
1225  		case IP_VERSION(13, 0, 3):
1226  			return "yellow_carp";
1227  		}
1228  	} else if (block_type == MP1_HWIP) {
1229  		switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
1230  		case IP_VERSION(9, 0, 0):
1231  		case IP_VERSION(10, 0, 0):
1232  		case IP_VERSION(10, 0, 1):
1233  		case IP_VERSION(11, 0, 2):
1234  			if (adev->asic_type == CHIP_ARCTURUS)
1235  				return "arcturus_smc";
1236  			return NULL;
1237  		case IP_VERSION(11, 0, 0):
1238  			return "navi10_smc";
1239  		case IP_VERSION(11, 0, 5):
1240  			return "navi14_smc";
1241  		case IP_VERSION(11, 0, 9):
1242  			return "navi12_smc";
1243  		case IP_VERSION(11, 0, 7):
1244  			return "sienna_cichlid_smc";
1245  		case IP_VERSION(11, 0, 11):
1246  			return "navy_flounder_smc";
1247  		case IP_VERSION(11, 0, 12):
1248  			return "dimgrey_cavefish_smc";
1249  		case IP_VERSION(11, 0, 13):
1250  			return "beige_goby_smc";
1251  		case IP_VERSION(13, 0, 2):
1252  			return "aldebaran_smc";
1253  		}
1254  	} else if (block_type == SDMA0_HWIP) {
1255  		switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {
1256  		case IP_VERSION(4, 0, 0):
1257  			return "vega10_sdma";
1258  		case IP_VERSION(4, 0, 1):
1259  			return "vega12_sdma";
1260  		case IP_VERSION(4, 1, 0):
1261  		case IP_VERSION(4, 1, 1):
1262  			if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1263  				return "raven2_sdma";
1264  			else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1265  				return "picasso_sdma";
1266  			return "raven_sdma";
1267  		case IP_VERSION(4, 1, 2):
1268  			if (adev->apu_flags & AMD_APU_IS_RENOIR)
1269  				return "renoir_sdma";
1270  			return "green_sardine_sdma";
1271  		case IP_VERSION(4, 2, 0):
1272  			return "vega20_sdma";
1273  		case IP_VERSION(4, 2, 2):
1274  			return "arcturus_sdma";
1275  		case IP_VERSION(4, 4, 0):
1276  			return "aldebaran_sdma";
1277  		case IP_VERSION(5, 0, 0):
1278  			return "navi10_sdma";
1279  		case IP_VERSION(5, 0, 1):
1280  			return "cyan_skillfish2_sdma";
1281  		case IP_VERSION(5, 0, 2):
1282  			return "navi14_sdma";
1283  		case IP_VERSION(5, 0, 5):
1284  			return "navi12_sdma";
1285  		case IP_VERSION(5, 2, 0):
1286  			return "sienna_cichlid_sdma";
1287  		case IP_VERSION(5, 2, 2):
1288  			return "navy_flounder_sdma";
1289  		case IP_VERSION(5, 2, 4):
1290  			return "dimgrey_cavefish_sdma";
1291  		case IP_VERSION(5, 2, 5):
1292  			return "beige_goby_sdma";
1293  		case IP_VERSION(5, 2, 3):
1294  			return "yellow_carp_sdma";
1295  		case IP_VERSION(5, 2, 1):
1296  			return "vangogh_sdma";
1297  		}
1298  	} else if (block_type == UVD_HWIP) {
1299  		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {
1300  		case IP_VERSION(1, 0, 0):
1301  		case IP_VERSION(1, 0, 1):
1302  			if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1303  				return "raven2_vcn";
1304  			else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1305  				return "picasso_vcn";
1306  			return "raven_vcn";
1307  		case IP_VERSION(2, 5, 0):
1308  			return "arcturus_vcn";
1309  		case IP_VERSION(2, 2, 0):
1310  			if (adev->apu_flags & AMD_APU_IS_RENOIR)
1311  				return "renoir_vcn";
1312  			return "green_sardine_vcn";
1313  		case IP_VERSION(2, 6, 0):
1314  			return "aldebaran_vcn";
1315  		case IP_VERSION(2, 0, 0):
1316  			return "navi10_vcn";
1317  		case IP_VERSION(2, 0, 2):
1318  			if (adev->asic_type == CHIP_NAVI12)
1319  				return "navi12_vcn";
1320  			return "navi14_vcn";
1321  		case IP_VERSION(3, 0, 0):
1322  		case IP_VERSION(3, 0, 64):
1323  		case IP_VERSION(3, 0, 192):
1324  			if (amdgpu_ip_version(adev, GC_HWIP, 0) ==
1325  			    IP_VERSION(10, 3, 0))
1326  				return "sienna_cichlid_vcn";
1327  			return "navy_flounder_vcn";
1328  		case IP_VERSION(3, 0, 2):
1329  			return "vangogh_vcn";
1330  		case IP_VERSION(3, 0, 16):
1331  			return "dimgrey_cavefish_vcn";
1332  		case IP_VERSION(3, 0, 33):
1333  			return "beige_goby_vcn";
1334  		case IP_VERSION(3, 1, 1):
1335  			return "yellow_carp_vcn";
1336  		}
1337  	} else if (block_type == GC_HWIP) {
1338  		switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
1339  		case IP_VERSION(9, 0, 1):
1340  			return "vega10";
1341  		case IP_VERSION(9, 2, 1):
1342  			return "vega12";
1343  		case IP_VERSION(9, 4, 0):
1344  			return "vega20";
1345  		case IP_VERSION(9, 2, 2):
1346  		case IP_VERSION(9, 1, 0):
1347  			if (adev->apu_flags & AMD_APU_IS_RAVEN2)
1348  				return "raven2";
1349  			else if (adev->apu_flags & AMD_APU_IS_PICASSO)
1350  				return "picasso";
1351  			return "raven";
1352  		case IP_VERSION(9, 4, 1):
1353  			return "arcturus";
1354  		case IP_VERSION(9, 3, 0):
1355  			if (adev->apu_flags & AMD_APU_IS_RENOIR)
1356  				return "renoir";
1357  			return "green_sardine";
1358  		case IP_VERSION(9, 4, 2):
1359  			return "aldebaran";
1360  		case IP_VERSION(10, 1, 10):
1361  			return "navi10";
1362  		case IP_VERSION(10, 1, 1):
1363  			return "navi14";
1364  		case IP_VERSION(10, 1, 2):
1365  			return "navi12";
1366  		case IP_VERSION(10, 3, 0):
1367  			return "sienna_cichlid";
1368  		case IP_VERSION(10, 3, 2):
1369  			return "navy_flounder";
1370  		case IP_VERSION(10, 3, 1):
1371  			return "vangogh";
1372  		case IP_VERSION(10, 3, 4):
1373  			return "dimgrey_cavefish";
1374  		case IP_VERSION(10, 3, 5):
1375  			return "beige_goby";
1376  		case IP_VERSION(10, 3, 3):
1377  			return "yellow_carp";
1378  		case IP_VERSION(10, 1, 3):
1379  		case IP_VERSION(10, 1, 4):
1380  			return "cyan_skillfish2";
1381  		}
1382  	}
1383  	return NULL;
1384  }
1385  
amdgpu_ucode_ip_version_decode(struct amdgpu_device * adev,int block_type,char * ucode_prefix,int len)1386  void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type, char *ucode_prefix, int len)
1387  {
1388  	int maj, min, rev;
1389  	char *ip_name;
1390  	const char *legacy;
1391  	uint32_t version = amdgpu_ip_version(adev, block_type, 0);
1392  
1393  	legacy = amdgpu_ucode_legacy_naming(adev, block_type);
1394  	if (legacy) {
1395  		snprintf(ucode_prefix, len, "%s", legacy);
1396  		return;
1397  	}
1398  
1399  	switch (block_type) {
1400  	case GC_HWIP:
1401  		ip_name = "gc";
1402  		break;
1403  	case SDMA0_HWIP:
1404  		ip_name = "sdma";
1405  		break;
1406  	case MP0_HWIP:
1407  		ip_name = "psp";
1408  		break;
1409  	case MP1_HWIP:
1410  		ip_name = "smu";
1411  		break;
1412  	case UVD_HWIP:
1413  		ip_name = "vcn";
1414  		break;
1415  	case VPE_HWIP:
1416  		ip_name = "vpe";
1417  		break;
1418  	case ISP_HWIP:
1419  		ip_name = "isp";
1420  		break;
1421  	default:
1422  		BUG();
1423  	}
1424  
1425  	maj = IP_VERSION_MAJ(version);
1426  	min = IP_VERSION_MIN(version);
1427  	rev = IP_VERSION_REV(version);
1428  
1429  	snprintf(ucode_prefix, len, "%s_%d_%d_%d", ip_name, maj, min, rev);
1430  }
1431  
1432  /*
1433   * amdgpu_ucode_request - Fetch and validate amdgpu microcode
1434   *
1435   * @adev: amdgpu device
1436   * @fw: pointer to load firmware to
1437   * @fmt: firmware name format string
1438   * @...: variable arguments
1439   *
1440   * This is a helper that will use request_firmware and amdgpu_ucode_validate
1441   * to load and run basic validation on firmware. If the load fails, remap
1442   * the error code to -ENODEV, so that early_init functions will fail to load.
1443   */
amdgpu_ucode_request(struct amdgpu_device * adev,const struct firmware ** fw,const char * fmt,...)1444  int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
1445  			 const char *fmt, ...)
1446  {
1447  	char fname[AMDGPU_UCODE_NAME_MAX];
1448  	va_list ap;
1449  	int r;
1450  
1451  	va_start(ap, fmt);
1452  	r = vsnprintf(fname, sizeof(fname), fmt, ap);
1453  	va_end(ap);
1454  	if (r == sizeof(fname)) {
1455  		dev_warn(adev->dev, "amdgpu firmware name buffer overflow\n");
1456  		return -EOVERFLOW;
1457  	}
1458  
1459  	r = request_firmware(fw, fname, adev->dev);
1460  	if (r)
1461  		return -ENODEV;
1462  
1463  	r = amdgpu_ucode_validate(*fw);
1464  	if (r) {
1465  		dev_dbg(adev->dev, "\"%s\" failed to validate\n", fname);
1466  		release_firmware(*fw);
1467  		*fw = NULL;
1468  	}
1469  
1470  	return r;
1471  }
1472  
1473  /*
1474   * amdgpu_ucode_release - Release firmware microcode
1475   *
1476   * @fw: pointer to firmware to release
1477   */
amdgpu_ucode_release(const struct firmware ** fw)1478  void amdgpu_ucode_release(const struct firmware **fw)
1479  {
1480  	release_firmware(*fw);
1481  	*fw = NULL;
1482  }
1483