1  /*
2   * Copyright 2007-8 Advanced Micro Devices, Inc.
3   * Copyright 2008 Red Hat Inc.
4   *
5   * Permission is hereby granted, free of charge, to any person obtaining a
6   * copy of this software and associated documentation files (the "Software"),
7   * to deal in the Software without restriction, including without limitation
8   * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9   * and/or sell copies of the Software, and to permit persons to whom the
10   * Software is furnished to do so, subject to the following conditions:
11   *
12   * The above copyright notice and this permission notice shall be included in
13   * all copies or substantial portions of the Software.
14   *
15   * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16   * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17   * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18   * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19   * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20   * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21   * OTHER DEALINGS IN THE SOFTWARE.
22   *
23   * Authors: Dave Airlie
24   *          Alex Deucher
25   */
26  
27  #include <drm/amdgpu_drm.h>
28  #include "amdgpu.h"
29  #include "amdgpu_atombios.h"
30  #include "amdgpu_atomfirmware.h"
31  #include "amdgpu_i2c.h"
32  #include "amdgpu_display.h"
33  
34  #include "atom.h"
35  #include "atom-bits.h"
36  #include "atombios_encoders.h"
37  #include "bif/bif_4_1_d.h"
38  
amdgpu_atombios_lookup_i2c_gpio_quirks(struct amdgpu_device * adev,ATOM_GPIO_I2C_ASSIGMENT * gpio,u8 index)39  static void amdgpu_atombios_lookup_i2c_gpio_quirks(struct amdgpu_device *adev,
40  					  ATOM_GPIO_I2C_ASSIGMENT *gpio,
41  					  u8 index)
42  {
43  
44  }
45  
amdgpu_atombios_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT * gpio)46  static struct amdgpu_i2c_bus_rec amdgpu_atombios_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
47  {
48  	struct amdgpu_i2c_bus_rec i2c;
49  
50  	memset(&i2c, 0, sizeof(struct amdgpu_i2c_bus_rec));
51  
52  	i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex);
53  	i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex);
54  	i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex);
55  	i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex);
56  	i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex);
57  	i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex);
58  	i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex);
59  	i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex);
60  	i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
61  	i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
62  	i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
63  	i2c.en_data_mask = (1 << gpio->ucDataEnShift);
64  	i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
65  	i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
66  	i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
67  	i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
68  
69  	if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
70  		i2c.hw_capable = true;
71  	else
72  		i2c.hw_capable = false;
73  
74  	if (gpio->sucI2cId.ucAccess == 0xa0)
75  		i2c.mm_i2c = true;
76  	else
77  		i2c.mm_i2c = false;
78  
79  	i2c.i2c_id = gpio->sucI2cId.ucAccess;
80  
81  	if (i2c.mask_clk_reg)
82  		i2c.valid = true;
83  	else
84  		i2c.valid = false;
85  
86  	return i2c;
87  }
88  
amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device * adev,uint8_t id)89  struct amdgpu_i2c_bus_rec amdgpu_atombios_lookup_i2c_gpio(struct amdgpu_device *adev,
90  							  uint8_t id)
91  {
92  	struct atom_context *ctx = adev->mode_info.atom_context;
93  	ATOM_GPIO_I2C_ASSIGMENT *gpio;
94  	struct amdgpu_i2c_bus_rec i2c;
95  	int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
96  	struct _ATOM_GPIO_I2C_INFO *i2c_info;
97  	uint16_t data_offset, size;
98  	int i, num_indices;
99  
100  	memset(&i2c, 0, sizeof(struct amdgpu_i2c_bus_rec));
101  	i2c.valid = false;
102  
103  	if (amdgpu_atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
104  		i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
105  
106  		num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
107  			sizeof(ATOM_GPIO_I2C_ASSIGMENT);
108  
109  		gpio = &i2c_info->asGPIO_Info[0];
110  		for (i = 0; i < num_indices; i++) {
111  
112  			amdgpu_atombios_lookup_i2c_gpio_quirks(adev, gpio, i);
113  
114  			if (gpio->sucI2cId.ucAccess == id) {
115  				i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio);
116  				break;
117  			}
118  			gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
119  				((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
120  		}
121  	}
122  
123  	return i2c;
124  }
125  
amdgpu_atombios_i2c_init(struct amdgpu_device * adev)126  void amdgpu_atombios_i2c_init(struct amdgpu_device *adev)
127  {
128  	struct atom_context *ctx = adev->mode_info.atom_context;
129  	ATOM_GPIO_I2C_ASSIGMENT *gpio;
130  	struct amdgpu_i2c_bus_rec i2c;
131  	int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
132  	struct _ATOM_GPIO_I2C_INFO *i2c_info;
133  	uint16_t data_offset, size;
134  	int i, num_indices;
135  	char stmp[32];
136  
137  	if (amdgpu_atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
138  		i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
139  
140  		num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
141  			sizeof(ATOM_GPIO_I2C_ASSIGMENT);
142  
143  		gpio = &i2c_info->asGPIO_Info[0];
144  		for (i = 0; i < num_indices; i++) {
145  			amdgpu_atombios_lookup_i2c_gpio_quirks(adev, gpio, i);
146  
147  			i2c = amdgpu_atombios_get_bus_rec_for_i2c_gpio(gpio);
148  
149  			if (i2c.valid) {
150  				sprintf(stmp, "0x%x", i2c.i2c_id);
151  				adev->i2c_bus[i] = amdgpu_i2c_create(adev_to_drm(adev), &i2c, stmp);
152  			}
153  			gpio = (ATOM_GPIO_I2C_ASSIGMENT *)
154  				((u8 *)gpio + sizeof(ATOM_GPIO_I2C_ASSIGMENT));
155  		}
156  	}
157  }
158  
159  struct amdgpu_gpio_rec
amdgpu_atombios_lookup_gpio(struct amdgpu_device * adev,u8 id)160  amdgpu_atombios_lookup_gpio(struct amdgpu_device *adev,
161  			    u8 id)
162  {
163  	struct atom_context *ctx = adev->mode_info.atom_context;
164  	struct amdgpu_gpio_rec gpio;
165  	int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT);
166  	struct _ATOM_GPIO_PIN_LUT *gpio_info;
167  	ATOM_GPIO_PIN_ASSIGNMENT *pin;
168  	u16 data_offset, size;
169  	int i, num_indices;
170  
171  	memset(&gpio, 0, sizeof(struct amdgpu_gpio_rec));
172  	gpio.valid = false;
173  
174  	if (amdgpu_atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
175  		gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
176  
177  		num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
178  			sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
179  
180  		pin = gpio_info->asGPIO_Pin;
181  		for (i = 0; i < num_indices; i++) {
182  			if (id == pin->ucGPIO_ID) {
183  				gpio.id = pin->ucGPIO_ID;
184  				gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex);
185  				gpio.shift = pin->ucGpioPinBitShift;
186  				gpio.mask = (1 << pin->ucGpioPinBitShift);
187  				gpio.valid = true;
188  				break;
189  			}
190  			pin = (ATOM_GPIO_PIN_ASSIGNMENT *)
191  				((u8 *)pin + sizeof(ATOM_GPIO_PIN_ASSIGNMENT));
192  		}
193  	}
194  
195  	return gpio;
196  }
197  
198  static struct amdgpu_hpd
amdgpu_atombios_get_hpd_info_from_gpio(struct amdgpu_device * adev,struct amdgpu_gpio_rec * gpio)199  amdgpu_atombios_get_hpd_info_from_gpio(struct amdgpu_device *adev,
200  				       struct amdgpu_gpio_rec *gpio)
201  {
202  	struct amdgpu_hpd hpd;
203  	u32 reg;
204  
205  	memset(&hpd, 0, sizeof(struct amdgpu_hpd));
206  
207  	reg = amdgpu_display_hpd_get_gpio_reg(adev);
208  
209  	hpd.gpio = *gpio;
210  	if (gpio->reg == reg) {
211  		switch(gpio->mask) {
212  		case (1 << 0):
213  			hpd.hpd = AMDGPU_HPD_1;
214  			break;
215  		case (1 << 8):
216  			hpd.hpd = AMDGPU_HPD_2;
217  			break;
218  		case (1 << 16):
219  			hpd.hpd = AMDGPU_HPD_3;
220  			break;
221  		case (1 << 24):
222  			hpd.hpd = AMDGPU_HPD_4;
223  			break;
224  		case (1 << 26):
225  			hpd.hpd = AMDGPU_HPD_5;
226  			break;
227  		case (1 << 28):
228  			hpd.hpd = AMDGPU_HPD_6;
229  			break;
230  		default:
231  			hpd.hpd = AMDGPU_HPD_NONE;
232  			break;
233  		}
234  	} else
235  		hpd.hpd = AMDGPU_HPD_NONE;
236  	return hpd;
237  }
238  
239  static const int object_connector_convert[] = {
240  	DRM_MODE_CONNECTOR_Unknown,
241  	DRM_MODE_CONNECTOR_DVII,
242  	DRM_MODE_CONNECTOR_DVII,
243  	DRM_MODE_CONNECTOR_DVID,
244  	DRM_MODE_CONNECTOR_DVID,
245  	DRM_MODE_CONNECTOR_VGA,
246  	DRM_MODE_CONNECTOR_Composite,
247  	DRM_MODE_CONNECTOR_SVIDEO,
248  	DRM_MODE_CONNECTOR_Unknown,
249  	DRM_MODE_CONNECTOR_Unknown,
250  	DRM_MODE_CONNECTOR_9PinDIN,
251  	DRM_MODE_CONNECTOR_Unknown,
252  	DRM_MODE_CONNECTOR_HDMIA,
253  	DRM_MODE_CONNECTOR_HDMIB,
254  	DRM_MODE_CONNECTOR_LVDS,
255  	DRM_MODE_CONNECTOR_9PinDIN,
256  	DRM_MODE_CONNECTOR_Unknown,
257  	DRM_MODE_CONNECTOR_Unknown,
258  	DRM_MODE_CONNECTOR_Unknown,
259  	DRM_MODE_CONNECTOR_DisplayPort,
260  	DRM_MODE_CONNECTOR_eDP,
261  	DRM_MODE_CONNECTOR_Unknown
262  };
263  
amdgpu_atombios_has_dce_engine_info(struct amdgpu_device * adev)264  bool amdgpu_atombios_has_dce_engine_info(struct amdgpu_device *adev)
265  {
266  	struct amdgpu_mode_info *mode_info = &adev->mode_info;
267  	struct atom_context *ctx = mode_info->atom_context;
268  	int index = GetIndexIntoMasterTable(DATA, Object_Header);
269  	u16 size, data_offset;
270  	u8 frev, crev;
271  	ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
272  	ATOM_OBJECT_HEADER *obj_header;
273  
274  	if (!amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
275  		return false;
276  
277  	if (crev < 2)
278  		return false;
279  
280  	obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
281  	path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
282  	    (ctx->bios + data_offset +
283  	     le16_to_cpu(obj_header->usDisplayPathTableOffset));
284  
285  	if (path_obj->ucNumOfDispPath)
286  		return true;
287  	else
288  		return false;
289  }
290  
amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device * adev)291  bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *adev)
292  {
293  	struct amdgpu_mode_info *mode_info = &adev->mode_info;
294  	struct atom_context *ctx = mode_info->atom_context;
295  	int index = GetIndexIntoMasterTable(DATA, Object_Header);
296  	u16 size, data_offset;
297  	u8 frev, crev;
298  	ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
299  	ATOM_ENCODER_OBJECT_TABLE *enc_obj;
300  	ATOM_OBJECT_TABLE *router_obj;
301  	ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
302  	ATOM_OBJECT_HEADER *obj_header;
303  	int i, j, k, path_size, device_support;
304  	int connector_type;
305  	u16 conn_id, connector_object_id;
306  	struct amdgpu_i2c_bus_rec ddc_bus;
307  	struct amdgpu_router router;
308  	struct amdgpu_gpio_rec gpio;
309  	struct amdgpu_hpd hpd;
310  
311  	if (!amdgpu_atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
312  		return false;
313  
314  	if (crev < 2)
315  		return false;
316  
317  	obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset);
318  	path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *)
319  	    (ctx->bios + data_offset +
320  	     le16_to_cpu(obj_header->usDisplayPathTableOffset));
321  	con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *)
322  	    (ctx->bios + data_offset +
323  	     le16_to_cpu(obj_header->usConnectorObjectTableOffset));
324  	enc_obj = (ATOM_ENCODER_OBJECT_TABLE *)
325  	    (ctx->bios + data_offset +
326  	     le16_to_cpu(obj_header->usEncoderObjectTableOffset));
327  	router_obj = (ATOM_OBJECT_TABLE *)
328  		(ctx->bios + data_offset +
329  		 le16_to_cpu(obj_header->usRouterObjectTableOffset));
330  	device_support = le16_to_cpu(obj_header->usDeviceSupport);
331  
332  	path_size = 0;
333  	for (i = 0; i < path_obj->ucNumOfDispPath; i++) {
334  		uint8_t *addr = (uint8_t *) path_obj->asDispPath;
335  		ATOM_DISPLAY_OBJECT_PATH *path;
336  		addr += path_size;
337  		path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
338  		path_size += le16_to_cpu(path->usSize);
339  
340  		if (device_support & le16_to_cpu(path->usDeviceTag)) {
341  			uint8_t con_obj_id =
342  			    (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK)
343  			    >> OBJECT_ID_SHIFT;
344  
345  			/* Skip TV/CV support */
346  			if ((le16_to_cpu(path->usDeviceTag) ==
347  			     ATOM_DEVICE_TV1_SUPPORT) ||
348  			    (le16_to_cpu(path->usDeviceTag) ==
349  			     ATOM_DEVICE_CV_SUPPORT))
350  				continue;
351  
352  			if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) {
353  				DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n",
354  					  con_obj_id, le16_to_cpu(path->usDeviceTag));
355  				continue;
356  			}
357  
358  			connector_type =
359  				object_connector_convert[con_obj_id];
360  			connector_object_id = con_obj_id;
361  
362  			if (connector_type == DRM_MODE_CONNECTOR_Unknown)
363  				continue;
364  
365  			router.ddc_valid = false;
366  			router.cd_valid = false;
367  			for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) {
368  				uint8_t grph_obj_type =
369  				    (le16_to_cpu(path->usGraphicObjIds[j]) &
370  				     OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
371  
372  				if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) {
373  					for (k = 0; k < enc_obj->ucNumberOfObjects; k++) {
374  						u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID);
375  						if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) {
376  							ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
377  								(ctx->bios + data_offset +
378  								 le16_to_cpu(enc_obj->asObjects[k].usRecordOffset));
379  							ATOM_ENCODER_CAP_RECORD *cap_record;
380  							u16 caps = 0;
381  
382  							while (record->ucRecordSize > 0 &&
383  							       record->ucRecordType > 0 &&
384  							       record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
385  								switch (record->ucRecordType) {
386  								case ATOM_ENCODER_CAP_RECORD_TYPE:
387  									cap_record =(ATOM_ENCODER_CAP_RECORD *)
388  										record;
389  									caps = le16_to_cpu(cap_record->usEncoderCap);
390  									break;
391  								}
392  								record = (ATOM_COMMON_RECORD_HEADER *)
393  									((char *)record + record->ucRecordSize);
394  							}
395  							amdgpu_display_add_encoder(adev, encoder_obj,
396  										    le16_to_cpu(path->usDeviceTag),
397  										    caps);
398  						}
399  					}
400  				} else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) {
401  					for (k = 0; k < router_obj->ucNumberOfObjects; k++) {
402  						u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID);
403  						if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) {
404  							ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *)
405  								(ctx->bios + data_offset +
406  								 le16_to_cpu(router_obj->asObjects[k].usRecordOffset));
407  							ATOM_I2C_RECORD *i2c_record;
408  							ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
409  							ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path;
410  							ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path;
411  							ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table =
412  								(ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *)
413  								(ctx->bios + data_offset +
414  								 le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset));
415  							u8 *num_dst_objs = (u8 *)
416  								((u8 *)router_src_dst_table + 1 +
417  								 (router_src_dst_table->ucNumberOfSrc * 2));
418  							u16 *dst_objs = (u16 *)(num_dst_objs + 1);
419  							int enum_id;
420  
421  							router.router_id = router_obj_id;
422  							for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) {
423  								if (le16_to_cpu(path->usConnObjectId) ==
424  								    le16_to_cpu(dst_objs[enum_id]))
425  									break;
426  							}
427  
428  							while (record->ucRecordSize > 0 &&
429  							       record->ucRecordType > 0 &&
430  							       record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
431  								switch (record->ucRecordType) {
432  								case ATOM_I2C_RECORD_TYPE:
433  									i2c_record =
434  										(ATOM_I2C_RECORD *)
435  										record;
436  									i2c_config =
437  										(ATOM_I2C_ID_CONFIG_ACCESS *)
438  										&i2c_record->sucI2cId;
439  									router.i2c_info =
440  										amdgpu_atombios_lookup_i2c_gpio(adev,
441  												       i2c_config->
442  												       ucAccess);
443  									router.i2c_addr = i2c_record->ucI2CAddr >> 1;
444  									break;
445  								case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE:
446  									ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *)
447  										record;
448  									router.ddc_valid = true;
449  									router.ddc_mux_type = ddc_path->ucMuxType;
450  									router.ddc_mux_control_pin = ddc_path->ucMuxControlPin;
451  									router.ddc_mux_state = ddc_path->ucMuxState[enum_id];
452  									break;
453  								case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE:
454  									cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *)
455  										record;
456  									router.cd_valid = true;
457  									router.cd_mux_type = cd_path->ucMuxType;
458  									router.cd_mux_control_pin = cd_path->ucMuxControlPin;
459  									router.cd_mux_state = cd_path->ucMuxState[enum_id];
460  									break;
461  								}
462  								record = (ATOM_COMMON_RECORD_HEADER *)
463  									((char *)record + record->ucRecordSize);
464  							}
465  						}
466  					}
467  				}
468  			}
469  
470  			/* look up gpio for ddc, hpd */
471  			ddc_bus.valid = false;
472  			hpd.hpd = AMDGPU_HPD_NONE;
473  			if ((le16_to_cpu(path->usDeviceTag) &
474  			     (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
475  				for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
476  					if (le16_to_cpu(path->usConnObjectId) ==
477  					    le16_to_cpu(con_obj->asObjects[j].
478  							usObjectID)) {
479  						ATOM_COMMON_RECORD_HEADER
480  						    *record =
481  						    (ATOM_COMMON_RECORD_HEADER
482  						     *)
483  						    (ctx->bios + data_offset +
484  						     le16_to_cpu(con_obj->
485  								 asObjects[j].
486  								 usRecordOffset));
487  						ATOM_I2C_RECORD *i2c_record;
488  						ATOM_HPD_INT_RECORD *hpd_record;
489  						ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
490  
491  						while (record->ucRecordSize > 0 &&
492  						       record->ucRecordType > 0 &&
493  						       record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) {
494  							switch (record->ucRecordType) {
495  							case ATOM_I2C_RECORD_TYPE:
496  								i2c_record =
497  								    (ATOM_I2C_RECORD *)
498  									record;
499  								i2c_config =
500  									(ATOM_I2C_ID_CONFIG_ACCESS *)
501  									&i2c_record->sucI2cId;
502  								ddc_bus = amdgpu_atombios_lookup_i2c_gpio(adev,
503  												 i2c_config->
504  												 ucAccess);
505  								break;
506  							case ATOM_HPD_INT_RECORD_TYPE:
507  								hpd_record =
508  									(ATOM_HPD_INT_RECORD *)
509  									record;
510  								gpio = amdgpu_atombios_lookup_gpio(adev,
511  											  hpd_record->ucHPDIntGPIOID);
512  								hpd = amdgpu_atombios_get_hpd_info_from_gpio(adev, &gpio);
513  								hpd.plugged_state = hpd_record->ucPlugged_PinState;
514  								break;
515  							}
516  							record =
517  							    (ATOM_COMMON_RECORD_HEADER
518  							     *) ((char *)record
519  								 +
520  								 record->
521  								 ucRecordSize);
522  						}
523  						break;
524  					}
525  				}
526  			}
527  
528  			/* needed for aux chan transactions */
529  			ddc_bus.hpd = hpd.hpd;
530  
531  			conn_id = le16_to_cpu(path->usConnObjectId);
532  
533  			amdgpu_display_add_connector(adev,
534  						      conn_id,
535  						      le16_to_cpu(path->usDeviceTag),
536  						      connector_type, &ddc_bus,
537  						      connector_object_id,
538  						      &hpd,
539  						      &router);
540  
541  		}
542  	}
543  
544  	amdgpu_link_encoder_connector(adev_to_drm(adev));
545  
546  	return true;
547  }
548  
549  union firmware_info {
550  	ATOM_FIRMWARE_INFO info;
551  	ATOM_FIRMWARE_INFO_V1_2 info_12;
552  	ATOM_FIRMWARE_INFO_V1_3 info_13;
553  	ATOM_FIRMWARE_INFO_V1_4 info_14;
554  	ATOM_FIRMWARE_INFO_V2_1 info_21;
555  	ATOM_FIRMWARE_INFO_V2_2 info_22;
556  };
557  
amdgpu_atombios_get_clock_info(struct amdgpu_device * adev)558  int amdgpu_atombios_get_clock_info(struct amdgpu_device *adev)
559  {
560  	struct amdgpu_mode_info *mode_info = &adev->mode_info;
561  	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
562  	uint8_t frev, crev;
563  	uint16_t data_offset;
564  	int ret = -EINVAL;
565  
566  	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
567  				   &frev, &crev, &data_offset)) {
568  		int i;
569  		struct amdgpu_pll *ppll = &adev->clock.ppll[0];
570  		struct amdgpu_pll *spll = &adev->clock.spll;
571  		struct amdgpu_pll *mpll = &adev->clock.mpll;
572  		union firmware_info *firmware_info =
573  			(union firmware_info *)(mode_info->atom_context->bios +
574  						data_offset);
575  		/* pixel clocks */
576  		ppll->reference_freq =
577  		    le16_to_cpu(firmware_info->info.usReferenceClock);
578  		ppll->reference_div = 0;
579  
580  		ppll->pll_out_min =
581  			le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output);
582  		ppll->pll_out_max =
583  		    le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
584  
585  		ppll->lcd_pll_out_min =
586  			le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
587  		if (ppll->lcd_pll_out_min == 0)
588  			ppll->lcd_pll_out_min = ppll->pll_out_min;
589  		ppll->lcd_pll_out_max =
590  			le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
591  		if (ppll->lcd_pll_out_max == 0)
592  			ppll->lcd_pll_out_max = ppll->pll_out_max;
593  
594  		if (ppll->pll_out_min == 0)
595  			ppll->pll_out_min = 64800;
596  
597  		ppll->pll_in_min =
598  		    le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Input);
599  		ppll->pll_in_max =
600  		    le16_to_cpu(firmware_info->info.usMaxPixelClockPLL_Input);
601  
602  		ppll->min_post_div = 2;
603  		ppll->max_post_div = 0x7f;
604  		ppll->min_frac_feedback_div = 0;
605  		ppll->max_frac_feedback_div = 9;
606  		ppll->min_ref_div = 2;
607  		ppll->max_ref_div = 0x3ff;
608  		ppll->min_feedback_div = 4;
609  		ppll->max_feedback_div = 0xfff;
610  		ppll->best_vco = 0;
611  
612  		for (i = 1; i < AMDGPU_MAX_PPLL; i++)
613  			adev->clock.ppll[i] = *ppll;
614  
615  		/* system clock */
616  		spll->reference_freq =
617  			le16_to_cpu(firmware_info->info_21.usCoreReferenceClock);
618  		spll->reference_div = 0;
619  
620  		spll->pll_out_min =
621  		    le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Output);
622  		spll->pll_out_max =
623  		    le32_to_cpu(firmware_info->info.ulMaxEngineClockPLL_Output);
624  
625  		/* ??? */
626  		if (spll->pll_out_min == 0)
627  			spll->pll_out_min = 64800;
628  
629  		spll->pll_in_min =
630  		    le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Input);
631  		spll->pll_in_max =
632  		    le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input);
633  
634  		spll->min_post_div = 1;
635  		spll->max_post_div = 1;
636  		spll->min_ref_div = 2;
637  		spll->max_ref_div = 0xff;
638  		spll->min_feedback_div = 4;
639  		spll->max_feedback_div = 0xff;
640  		spll->best_vco = 0;
641  
642  		/* memory clock */
643  		mpll->reference_freq =
644  			le16_to_cpu(firmware_info->info_21.usMemoryReferenceClock);
645  		mpll->reference_div = 0;
646  
647  		mpll->pll_out_min =
648  		    le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Output);
649  		mpll->pll_out_max =
650  		    le32_to_cpu(firmware_info->info.ulMaxMemoryClockPLL_Output);
651  
652  		/* ??? */
653  		if (mpll->pll_out_min == 0)
654  			mpll->pll_out_min = 64800;
655  
656  		mpll->pll_in_min =
657  		    le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Input);
658  		mpll->pll_in_max =
659  		    le16_to_cpu(firmware_info->info.usMaxMemoryClockPLL_Input);
660  
661  		adev->clock.default_sclk =
662  		    le32_to_cpu(firmware_info->info.ulDefaultEngineClock);
663  		adev->clock.default_mclk =
664  		    le32_to_cpu(firmware_info->info.ulDefaultMemoryClock);
665  
666  		mpll->min_post_div = 1;
667  		mpll->max_post_div = 1;
668  		mpll->min_ref_div = 2;
669  		mpll->max_ref_div = 0xff;
670  		mpll->min_feedback_div = 4;
671  		mpll->max_feedback_div = 0xff;
672  		mpll->best_vco = 0;
673  
674  		/* disp clock */
675  		adev->clock.default_dispclk =
676  			le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
677  		/* set a reasonable default for DP */
678  		if (adev->clock.default_dispclk < 53900) {
679  			DRM_DEBUG("Changing default dispclk from %dMhz to 600Mhz\n",
680  				  adev->clock.default_dispclk / 100);
681  			adev->clock.default_dispclk = 60000;
682  		} else if (adev->clock.default_dispclk <= 60000) {
683  			DRM_DEBUG("Changing default dispclk from %dMhz to 625Mhz\n",
684  				  adev->clock.default_dispclk / 100);
685  			adev->clock.default_dispclk = 62500;
686  		}
687  		adev->clock.dp_extclk =
688  			le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
689  		adev->clock.current_dispclk = adev->clock.default_dispclk;
690  
691  		adev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock);
692  		if (adev->clock.max_pixel_clock == 0)
693  			adev->clock.max_pixel_clock = 40000;
694  
695  		/* not technically a clock, but... */
696  		adev->mode_info.firmware_flags =
697  			le16_to_cpu(firmware_info->info.usFirmwareCapability.susAccess);
698  
699  		ret = 0;
700  	}
701  
702  	adev->pm.current_sclk = adev->clock.default_sclk;
703  	adev->pm.current_mclk = adev->clock.default_mclk;
704  
705  	return ret;
706  }
707  
708  union gfx_info {
709  	ATOM_GFX_INFO_V2_1 info;
710  };
711  
amdgpu_atombios_get_gfx_info(struct amdgpu_device * adev)712  int amdgpu_atombios_get_gfx_info(struct amdgpu_device *adev)
713  {
714  	struct amdgpu_mode_info *mode_info = &adev->mode_info;
715  	int index = GetIndexIntoMasterTable(DATA, GFX_Info);
716  	uint8_t frev, crev;
717  	uint16_t data_offset;
718  	int ret = -EINVAL;
719  
720  	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
721  				   &frev, &crev, &data_offset)) {
722  		union gfx_info *gfx_info = (union gfx_info *)
723  			(mode_info->atom_context->bios + data_offset);
724  
725  		adev->gfx.config.max_shader_engines = gfx_info->info.max_shader_engines;
726  		adev->gfx.config.max_tile_pipes = gfx_info->info.max_tile_pipes;
727  		adev->gfx.config.max_cu_per_sh = gfx_info->info.max_cu_per_sh;
728  		adev->gfx.config.max_sh_per_se = gfx_info->info.max_sh_per_se;
729  		adev->gfx.config.max_backends_per_se = gfx_info->info.max_backends_per_se;
730  		adev->gfx.config.max_texture_channel_caches =
731  			gfx_info->info.max_texture_channel_caches;
732  
733  		ret = 0;
734  	}
735  	return ret;
736  }
737  
738  union igp_info {
739  	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
740  	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
741  	struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
742  	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
743  	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
744  	struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_9 info_9;
745  };
746  
747  /*
748   * Return vram width from integrated system info table, if available,
749   * or 0 if not.
750   */
amdgpu_atombios_get_vram_width(struct amdgpu_device * adev)751  int amdgpu_atombios_get_vram_width(struct amdgpu_device *adev)
752  {
753  	struct amdgpu_mode_info *mode_info = &adev->mode_info;
754  	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
755  	u16 data_offset, size;
756  	union igp_info *igp_info;
757  	u8 frev, crev;
758  
759  	/* get any igp specific overrides */
760  	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
761  				   &frev, &crev, &data_offset)) {
762  		igp_info = (union igp_info *)
763  			(mode_info->atom_context->bios + data_offset);
764  		switch (crev) {
765  		case 8:
766  		case 9:
767  			return igp_info->info_8.ucUMAChannelNumber * 64;
768  		default:
769  			return 0;
770  		}
771  	}
772  
773  	return 0;
774  }
775  
amdgpu_atombios_get_igp_ss_overrides(struct amdgpu_device * adev,struct amdgpu_atom_ss * ss,int id)776  static void amdgpu_atombios_get_igp_ss_overrides(struct amdgpu_device *adev,
777  						 struct amdgpu_atom_ss *ss,
778  						 int id)
779  {
780  	struct amdgpu_mode_info *mode_info = &adev->mode_info;
781  	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
782  	u16 data_offset, size;
783  	union igp_info *igp_info;
784  	u8 frev, crev;
785  	u16 percentage = 0, rate = 0;
786  
787  	/* get any igp specific overrides */
788  	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
789  				   &frev, &crev, &data_offset)) {
790  		igp_info = (union igp_info *)
791  			(mode_info->atom_context->bios + data_offset);
792  		switch (crev) {
793  		case 6:
794  			switch (id) {
795  			case ASIC_INTERNAL_SS_ON_TMDS:
796  				percentage = le16_to_cpu(igp_info->info_6.usDVISSPercentage);
797  				rate = le16_to_cpu(igp_info->info_6.usDVISSpreadRateIn10Hz);
798  				break;
799  			case ASIC_INTERNAL_SS_ON_HDMI:
800  				percentage = le16_to_cpu(igp_info->info_6.usHDMISSPercentage);
801  				rate = le16_to_cpu(igp_info->info_6.usHDMISSpreadRateIn10Hz);
802  				break;
803  			case ASIC_INTERNAL_SS_ON_LVDS:
804  				percentage = le16_to_cpu(igp_info->info_6.usLvdsSSPercentage);
805  				rate = le16_to_cpu(igp_info->info_6.usLvdsSSpreadRateIn10Hz);
806  				break;
807  			}
808  			break;
809  		case 7:
810  			switch (id) {
811  			case ASIC_INTERNAL_SS_ON_TMDS:
812  				percentage = le16_to_cpu(igp_info->info_7.usDVISSPercentage);
813  				rate = le16_to_cpu(igp_info->info_7.usDVISSpreadRateIn10Hz);
814  				break;
815  			case ASIC_INTERNAL_SS_ON_HDMI:
816  				percentage = le16_to_cpu(igp_info->info_7.usHDMISSPercentage);
817  				rate = le16_to_cpu(igp_info->info_7.usHDMISSpreadRateIn10Hz);
818  				break;
819  			case ASIC_INTERNAL_SS_ON_LVDS:
820  				percentage = le16_to_cpu(igp_info->info_7.usLvdsSSPercentage);
821  				rate = le16_to_cpu(igp_info->info_7.usLvdsSSpreadRateIn10Hz);
822  				break;
823  			}
824  			break;
825  		case 8:
826  			switch (id) {
827  			case ASIC_INTERNAL_SS_ON_TMDS:
828  				percentage = le16_to_cpu(igp_info->info_8.usDVISSPercentage);
829  				rate = le16_to_cpu(igp_info->info_8.usDVISSpreadRateIn10Hz);
830  				break;
831  			case ASIC_INTERNAL_SS_ON_HDMI:
832  				percentage = le16_to_cpu(igp_info->info_8.usHDMISSPercentage);
833  				rate = le16_to_cpu(igp_info->info_8.usHDMISSpreadRateIn10Hz);
834  				break;
835  			case ASIC_INTERNAL_SS_ON_LVDS:
836  				percentage = le16_to_cpu(igp_info->info_8.usLvdsSSPercentage);
837  				rate = le16_to_cpu(igp_info->info_8.usLvdsSSpreadRateIn10Hz);
838  				break;
839  			}
840  			break;
841  		case 9:
842  			switch (id) {
843  			case ASIC_INTERNAL_SS_ON_TMDS:
844  				percentage = le16_to_cpu(igp_info->info_9.usDVISSPercentage);
845  				rate = le16_to_cpu(igp_info->info_9.usDVISSpreadRateIn10Hz);
846  				break;
847  			case ASIC_INTERNAL_SS_ON_HDMI:
848  				percentage = le16_to_cpu(igp_info->info_9.usHDMISSPercentage);
849  				rate = le16_to_cpu(igp_info->info_9.usHDMISSpreadRateIn10Hz);
850  				break;
851  			case ASIC_INTERNAL_SS_ON_LVDS:
852  				percentage = le16_to_cpu(igp_info->info_9.usLvdsSSPercentage);
853  				rate = le16_to_cpu(igp_info->info_9.usLvdsSSpreadRateIn10Hz);
854  				break;
855  			}
856  			break;
857  		default:
858  			DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
859  			break;
860  		}
861  		if (percentage)
862  			ss->percentage = percentage;
863  		if (rate)
864  			ss->rate = rate;
865  	}
866  }
867  
868  union asic_ss_info {
869  	struct _ATOM_ASIC_INTERNAL_SS_INFO info;
870  	struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2;
871  	struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3;
872  };
873  
874  union asic_ss_assignment {
875  	struct _ATOM_ASIC_SS_ASSIGNMENT v1;
876  	struct _ATOM_ASIC_SS_ASSIGNMENT_V2 v2;
877  	struct _ATOM_ASIC_SS_ASSIGNMENT_V3 v3;
878  };
879  
amdgpu_atombios_get_asic_ss_info(struct amdgpu_device * adev,struct amdgpu_atom_ss * ss,int id,u32 clock)880  bool amdgpu_atombios_get_asic_ss_info(struct amdgpu_device *adev,
881  				      struct amdgpu_atom_ss *ss,
882  				      int id, u32 clock)
883  {
884  	struct amdgpu_mode_info *mode_info = &adev->mode_info;
885  	int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
886  	uint16_t data_offset, size;
887  	union asic_ss_info *ss_info;
888  	union asic_ss_assignment *ss_assign;
889  	uint8_t frev, crev;
890  	int i, num_indices;
891  
892  	if (id == ASIC_INTERNAL_MEMORY_SS) {
893  		if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_MEMORY_CLOCK_SS_SUPPORT))
894  			return false;
895  	}
896  	if (id == ASIC_INTERNAL_ENGINE_SS) {
897  		if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_ENGINE_CLOCK_SS_SUPPORT))
898  			return false;
899  	}
900  
901  	memset(ss, 0, sizeof(struct amdgpu_atom_ss));
902  	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, &size,
903  				   &frev, &crev, &data_offset)) {
904  
905  		ss_info =
906  			(union asic_ss_info *)(mode_info->atom_context->bios + data_offset);
907  
908  		switch (frev) {
909  		case 1:
910  			num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
911  				sizeof(ATOM_ASIC_SS_ASSIGNMENT);
912  
913  			ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info.asSpreadSpectrum[0]);
914  			for (i = 0; i < num_indices; i++) {
915  				if ((ss_assign->v1.ucClockIndication == id) &&
916  				    (clock <= le32_to_cpu(ss_assign->v1.ulTargetClockRange))) {
917  					ss->percentage =
918  						le16_to_cpu(ss_assign->v1.usSpreadSpectrumPercentage);
919  					ss->type = ss_assign->v1.ucSpreadSpectrumMode;
920  					ss->rate = le16_to_cpu(ss_assign->v1.usSpreadRateInKhz);
921  					ss->percentage_divider = 100;
922  					return true;
923  				}
924  				ss_assign = (union asic_ss_assignment *)
925  					((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT));
926  			}
927  			break;
928  		case 2:
929  			num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
930  				sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2);
931  			ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_2.asSpreadSpectrum[0]);
932  			for (i = 0; i < num_indices; i++) {
933  				if ((ss_assign->v2.ucClockIndication == id) &&
934  				    (clock <= le32_to_cpu(ss_assign->v2.ulTargetClockRange))) {
935  					ss->percentage =
936  						le16_to_cpu(ss_assign->v2.usSpreadSpectrumPercentage);
937  					ss->type = ss_assign->v2.ucSpreadSpectrumMode;
938  					ss->rate = le16_to_cpu(ss_assign->v2.usSpreadRateIn10Hz);
939  					ss->percentage_divider = 100;
940  					if ((crev == 2) &&
941  					    ((id == ASIC_INTERNAL_ENGINE_SS) ||
942  					     (id == ASIC_INTERNAL_MEMORY_SS)))
943  						ss->rate /= 100;
944  					return true;
945  				}
946  				ss_assign = (union asic_ss_assignment *)
947  					((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2));
948  			}
949  			break;
950  		case 3:
951  			num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
952  				sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3);
953  			ss_assign = (union asic_ss_assignment *)((u8 *)&ss_info->info_3.asSpreadSpectrum[0]);
954  			for (i = 0; i < num_indices; i++) {
955  				if ((ss_assign->v3.ucClockIndication == id) &&
956  				    (clock <= le32_to_cpu(ss_assign->v3.ulTargetClockRange))) {
957  					ss->percentage =
958  						le16_to_cpu(ss_assign->v3.usSpreadSpectrumPercentage);
959  					ss->type = ss_assign->v3.ucSpreadSpectrumMode;
960  					ss->rate = le16_to_cpu(ss_assign->v3.usSpreadRateIn10Hz);
961  					if (ss_assign->v3.ucSpreadSpectrumMode &
962  					    SS_MODE_V3_PERCENTAGE_DIV_BY_1000_MASK)
963  						ss->percentage_divider = 1000;
964  					else
965  						ss->percentage_divider = 100;
966  					if ((id == ASIC_INTERNAL_ENGINE_SS) ||
967  					    (id == ASIC_INTERNAL_MEMORY_SS))
968  						ss->rate /= 100;
969  					if (adev->flags & AMD_IS_APU)
970  						amdgpu_atombios_get_igp_ss_overrides(adev, ss, id);
971  					return true;
972  				}
973  				ss_assign = (union asic_ss_assignment *)
974  					((u8 *)ss_assign + sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3));
975  			}
976  			break;
977  		default:
978  			DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev, crev);
979  			break;
980  		}
981  
982  	}
983  	return false;
984  }
985  
986  union get_clock_dividers {
987  	struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS v1;
988  	struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V2 v2;
989  	struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V3 v3;
990  	struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V4 v4;
991  	struct _COMPUTE_MEMORY_ENGINE_PLL_PARAMETERS_V5 v5;
992  	struct _COMPUTE_GPU_CLOCK_INPUT_PARAMETERS_V1_6 v6_in;
993  	struct _COMPUTE_GPU_CLOCK_OUTPUT_PARAMETERS_V1_6 v6_out;
994  };
995  
amdgpu_atombios_get_clock_dividers(struct amdgpu_device * adev,u8 clock_type,u32 clock,bool strobe_mode,struct atom_clock_dividers * dividers)996  int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev,
997  				       u8 clock_type,
998  				       u32 clock,
999  				       bool strobe_mode,
1000  				       struct atom_clock_dividers *dividers)
1001  {
1002  	union get_clock_dividers args;
1003  	int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryEnginePLL);
1004  	u8 frev, crev;
1005  
1006  	memset(&args, 0, sizeof(args));
1007  	memset(dividers, 0, sizeof(struct atom_clock_dividers));
1008  
1009  	if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
1010  		return -EINVAL;
1011  
1012  	switch (crev) {
1013  	case 2:
1014  	case 3:
1015  	case 5:
1016  		/* r6xx, r7xx, evergreen, ni, si.
1017  		 * TODO: add support for asic_type <= CHIP_RV770*/
1018  		if (clock_type == COMPUTE_ENGINE_PLL_PARAM) {
1019  			args.v3.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
1020  
1021  			if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
1022  			    index, (uint32_t *)&args, sizeof(args)))
1023  				return -EINVAL;
1024  
1025  			dividers->post_div = args.v3.ucPostDiv;
1026  			dividers->enable_post_div = (args.v3.ucCntlFlag &
1027  						     ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
1028  			dividers->enable_dithen = (args.v3.ucCntlFlag &
1029  						   ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
1030  			dividers->whole_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv);
1031  			dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac);
1032  			dividers->ref_div = args.v3.ucRefDiv;
1033  			dividers->vco_mode = (args.v3.ucCntlFlag &
1034  					      ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
1035  		} else {
1036  			/* for SI we use ComputeMemoryClockParam for memory plls */
1037  			if (adev->asic_type >= CHIP_TAHITI)
1038  				return -EINVAL;
1039  			args.v5.ulClockParams = cpu_to_le32((clock_type << 24) | clock);
1040  			if (strobe_mode)
1041  				args.v5.ucInputFlag = ATOM_PLL_INPUT_FLAG_PLL_STROBE_MODE_EN;
1042  
1043  			if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
1044  			    index, (uint32_t *)&args, sizeof(args)))
1045  				return -EINVAL;
1046  
1047  			dividers->post_div = args.v5.ucPostDiv;
1048  			dividers->enable_post_div = (args.v5.ucCntlFlag &
1049  						     ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false;
1050  			dividers->enable_dithen = (args.v5.ucCntlFlag &
1051  						   ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true;
1052  			dividers->whole_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDiv);
1053  			dividers->frac_fb_div = le16_to_cpu(args.v5.ulFbDiv.usFbDivFrac);
1054  			dividers->ref_div = args.v5.ucRefDiv;
1055  			dividers->vco_mode = (args.v5.ucCntlFlag &
1056  					      ATOM_PLL_CNTL_FLAG_MPLL_VCO_MODE) ? 1 : 0;
1057  		}
1058  		break;
1059  	case 4:
1060  		/* fusion */
1061  		args.v4.ulClock = cpu_to_le32(clock);	/* 10 khz */
1062  
1063  		if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
1064  		    index, (uint32_t *)&args, sizeof(args)))
1065  			return -EINVAL;
1066  
1067  		dividers->post_divider = dividers->post_div = args.v4.ucPostDiv;
1068  		dividers->real_clock = le32_to_cpu(args.v4.ulClock);
1069  		break;
1070  	case 6:
1071  		/* CI */
1072  		/* COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK, COMPUTE_GPUCLK_INPUT_FLAG_SCLK */
1073  		args.v6_in.ulClock.ulComputeClockFlag = clock_type;
1074  		args.v6_in.ulClock.ulClockFreq = cpu_to_le32(clock);	/* 10 khz */
1075  
1076  		if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
1077  		    index, (uint32_t *)&args, sizeof(args)))
1078  			return -EINVAL;
1079  
1080  		dividers->whole_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDiv);
1081  		dividers->frac_fb_div = le16_to_cpu(args.v6_out.ulFbDiv.usFbDivFrac);
1082  		dividers->ref_div = args.v6_out.ucPllRefDiv;
1083  		dividers->post_div = args.v6_out.ucPllPostDiv;
1084  		dividers->flags = args.v6_out.ucPllCntlFlag;
1085  		dividers->real_clock = le32_to_cpu(args.v6_out.ulClock.ulClock);
1086  		dividers->post_divider = args.v6_out.ulClock.ucPostDiv;
1087  		break;
1088  	default:
1089  		return -EINVAL;
1090  	}
1091  	return 0;
1092  }
1093  
1094  #ifdef CONFIG_DRM_AMDGPU_SI
amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device * adev,u32 clock,bool strobe_mode,struct atom_mpll_param * mpll_param)1095  int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev,
1096  					    u32 clock,
1097  					    bool strobe_mode,
1098  					    struct atom_mpll_param *mpll_param)
1099  {
1100  	COMPUTE_MEMORY_CLOCK_PARAM_PARAMETERS_V2_1 args;
1101  	int index = GetIndexIntoMasterTable(COMMAND, ComputeMemoryClockParam);
1102  	u8 frev, crev;
1103  
1104  	memset(&args, 0, sizeof(args));
1105  	memset(mpll_param, 0, sizeof(struct atom_mpll_param));
1106  
1107  	if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
1108  		return -EINVAL;
1109  
1110  	switch (frev) {
1111  	case 2:
1112  		switch (crev) {
1113  		case 1:
1114  			/* SI */
1115  			args.ulClock = cpu_to_le32(clock);	/* 10 khz */
1116  			args.ucInputFlag = 0;
1117  			if (strobe_mode)
1118  				args.ucInputFlag |= MPLL_INPUT_FLAG_STROBE_MODE_EN;
1119  
1120  			if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
1121  			    index, (uint32_t *)&args, sizeof(args)))
1122  				return -EINVAL;
1123  
1124  			mpll_param->clkfrac = le16_to_cpu(args.ulFbDiv.usFbDivFrac);
1125  			mpll_param->clkf = le16_to_cpu(args.ulFbDiv.usFbDiv);
1126  			mpll_param->post_div = args.ucPostDiv;
1127  			mpll_param->dll_speed = args.ucDllSpeed;
1128  			mpll_param->bwcntl = args.ucBWCntl;
1129  			mpll_param->vco_mode =
1130  				(args.ucPllCntlFlag & MPLL_CNTL_FLAG_VCO_MODE_MASK);
1131  			mpll_param->yclk_sel =
1132  				(args.ucPllCntlFlag & MPLL_CNTL_FLAG_BYPASS_DQ_PLL) ? 1 : 0;
1133  			mpll_param->qdr =
1134  				(args.ucPllCntlFlag & MPLL_CNTL_FLAG_QDR_ENABLE) ? 1 : 0;
1135  			mpll_param->half_rate =
1136  				(args.ucPllCntlFlag & MPLL_CNTL_FLAG_AD_HALF_RATE) ? 1 : 0;
1137  			break;
1138  		default:
1139  			return -EINVAL;
1140  		}
1141  		break;
1142  	default:
1143  		return -EINVAL;
1144  	}
1145  	return 0;
1146  }
1147  
amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device * adev,u32 eng_clock,u32 mem_clock)1148  void amdgpu_atombios_set_engine_dram_timings(struct amdgpu_device *adev,
1149  					     u32 eng_clock, u32 mem_clock)
1150  {
1151  	SET_ENGINE_CLOCK_PS_ALLOCATION args;
1152  	int index = GetIndexIntoMasterTable(COMMAND, DynamicMemorySettings);
1153  	u32 tmp;
1154  
1155  	memset(&args, 0, sizeof(args));
1156  
1157  	tmp = eng_clock & SET_CLOCK_FREQ_MASK;
1158  	tmp |= (COMPUTE_ENGINE_PLL_PARAM << 24);
1159  
1160  	args.ulTargetEngineClock = cpu_to_le32(tmp);
1161  	if (mem_clock)
1162  		args.sReserved.ulClock = cpu_to_le32(mem_clock & SET_CLOCK_FREQ_MASK);
1163  
1164  	amdgpu_atom_execute_table(adev->mode_info.atom_context, index, (uint32_t *)&args,
1165  		sizeof(args));
1166  }
1167  
amdgpu_atombios_get_default_voltages(struct amdgpu_device * adev,u16 * vddc,u16 * vddci,u16 * mvdd)1168  void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev,
1169  					  u16 *vddc, u16 *vddci, u16 *mvdd)
1170  {
1171  	struct amdgpu_mode_info *mode_info = &adev->mode_info;
1172  	int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
1173  	u8 frev, crev;
1174  	u16 data_offset;
1175  	union firmware_info *firmware_info;
1176  
1177  	*vddc = 0;
1178  	*vddci = 0;
1179  	*mvdd = 0;
1180  
1181  	if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
1182  				   &frev, &crev, &data_offset)) {
1183  		firmware_info =
1184  			(union firmware_info *)(mode_info->atom_context->bios +
1185  						data_offset);
1186  		*vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage);
1187  		if ((frev == 2) && (crev >= 2)) {
1188  			*vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage);
1189  			*mvdd = le16_to_cpu(firmware_info->info_22.usBootUpMVDDCVoltage);
1190  		}
1191  	}
1192  }
1193  
1194  union set_voltage {
1195  	struct _SET_VOLTAGE_PS_ALLOCATION alloc;
1196  	struct _SET_VOLTAGE_PARAMETERS v1;
1197  	struct _SET_VOLTAGE_PARAMETERS_V2 v2;
1198  	struct _SET_VOLTAGE_PARAMETERS_V1_3 v3;
1199  };
1200  
amdgpu_atombios_get_max_vddc(struct amdgpu_device * adev,u8 voltage_type,u16 voltage_id,u16 * voltage)1201  int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type,
1202  			     u16 voltage_id, u16 *voltage)
1203  {
1204  	union set_voltage args;
1205  	int index = GetIndexIntoMasterTable(COMMAND, SetVoltage);
1206  	u8 frev, crev;
1207  
1208  	if (!amdgpu_atom_parse_cmd_header(adev->mode_info.atom_context, index, &frev, &crev))
1209  		return -EINVAL;
1210  
1211  	switch (crev) {
1212  	case 1:
1213  		return -EINVAL;
1214  	case 2:
1215  		args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE;
1216  		args.v2.ucVoltageMode = 0;
1217  		args.v2.usVoltageLevel = 0;
1218  
1219  		if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
1220  		    index, (uint32_t *)&args, sizeof(args)))
1221  			return -EINVAL;
1222  
1223  		*voltage = le16_to_cpu(args.v2.usVoltageLevel);
1224  		break;
1225  	case 3:
1226  		args.v3.ucVoltageType = voltage_type;
1227  		args.v3.ucVoltageMode = ATOM_GET_VOLTAGE_LEVEL;
1228  		args.v3.usVoltageLevel = cpu_to_le16(voltage_id);
1229  
1230  		if (amdgpu_atom_execute_table(adev->mode_info.atom_context,
1231  		    index, (uint32_t *)&args, sizeof(args)))
1232  			return -EINVAL;
1233  
1234  		*voltage = le16_to_cpu(args.v3.usVoltageLevel);
1235  		break;
1236  	default:
1237  		DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
1238  		return -EINVAL;
1239  	}
1240  
1241  	return 0;
1242  }
1243  
amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device * adev,u16 * voltage,u16 leakage_idx)1244  int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev,
1245  						      u16 *voltage,
1246  						      u16 leakage_idx)
1247  {
1248  	return amdgpu_atombios_get_max_vddc(adev, VOLTAGE_TYPE_VDDC, leakage_idx, voltage);
1249  }
1250  
1251  union voltage_object_info {
1252  	struct _ATOM_VOLTAGE_OBJECT_INFO v1;
1253  	struct _ATOM_VOLTAGE_OBJECT_INFO_V2 v2;
1254  	struct _ATOM_VOLTAGE_OBJECT_INFO_V3_1 v3;
1255  };
1256  
1257  union voltage_object {
1258  	struct _ATOM_VOLTAGE_OBJECT v1;
1259  	struct _ATOM_VOLTAGE_OBJECT_V2 v2;
1260  	union _ATOM_VOLTAGE_OBJECT_V3 v3;
1261  };
1262  
1263  
amdgpu_atombios_lookup_voltage_object_v3(ATOM_VOLTAGE_OBJECT_INFO_V3_1 * v3,u8 voltage_type,u8 voltage_mode)1264  static ATOM_VOLTAGE_OBJECT_V3 *amdgpu_atombios_lookup_voltage_object_v3(ATOM_VOLTAGE_OBJECT_INFO_V3_1 *v3,
1265  									u8 voltage_type, u8 voltage_mode)
1266  {
1267  	u32 size = le16_to_cpu(v3->sHeader.usStructureSize);
1268  	u32 offset = offsetof(ATOM_VOLTAGE_OBJECT_INFO_V3_1, asVoltageObj[0]);
1269  	u8 *start = (u8 *)v3;
1270  
1271  	while (offset < size) {
1272  		ATOM_VOLTAGE_OBJECT_V3 *vo = (ATOM_VOLTAGE_OBJECT_V3 *)(start + offset);
1273  		if ((vo->asGpioVoltageObj.sHeader.ucVoltageType == voltage_type) &&
1274  		    (vo->asGpioVoltageObj.sHeader.ucVoltageMode == voltage_mode))
1275  			return vo;
1276  		offset += le16_to_cpu(vo->asGpioVoltageObj.sHeader.usSize);
1277  	}
1278  	return NULL;
1279  }
1280  
amdgpu_atombios_get_svi2_info(struct amdgpu_device * adev,u8 voltage_type,u8 * svd_gpio_id,u8 * svc_gpio_id)1281  int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev,
1282  			      u8 voltage_type,
1283  			      u8 *svd_gpio_id, u8 *svc_gpio_id)
1284  {
1285  	int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
1286  	u8 frev, crev;
1287  	u16 data_offset, size;
1288  	union voltage_object_info *voltage_info;
1289  	union voltage_object *voltage_object = NULL;
1290  
1291  	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
1292  				   &frev, &crev, &data_offset)) {
1293  		voltage_info = (union voltage_object_info *)
1294  			(adev->mode_info.atom_context->bios + data_offset);
1295  
1296  		switch (frev) {
1297  		case 3:
1298  			switch (crev) {
1299  			case 1:
1300  				voltage_object = (union voltage_object *)
1301  					amdgpu_atombios_lookup_voltage_object_v3(&voltage_info->v3,
1302  								      voltage_type,
1303  								      VOLTAGE_OBJ_SVID2);
1304  				if (voltage_object) {
1305  					*svd_gpio_id = voltage_object->v3.asSVID2Obj.ucSVDGpioId;
1306  					*svc_gpio_id = voltage_object->v3.asSVID2Obj.ucSVCGpioId;
1307  				} else {
1308  					return -EINVAL;
1309  				}
1310  				break;
1311  			default:
1312  				DRM_ERROR("unknown voltage object table\n");
1313  				return -EINVAL;
1314  			}
1315  			break;
1316  		default:
1317  			DRM_ERROR("unknown voltage object table\n");
1318  			return -EINVAL;
1319  		}
1320  
1321  	}
1322  	return 0;
1323  }
1324  
1325  bool
amdgpu_atombios_is_voltage_gpio(struct amdgpu_device * adev,u8 voltage_type,u8 voltage_mode)1326  amdgpu_atombios_is_voltage_gpio(struct amdgpu_device *adev,
1327  				u8 voltage_type, u8 voltage_mode)
1328  {
1329  	int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
1330  	u8 frev, crev;
1331  	u16 data_offset, size;
1332  	union voltage_object_info *voltage_info;
1333  
1334  	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
1335  				   &frev, &crev, &data_offset)) {
1336  		voltage_info = (union voltage_object_info *)
1337  			(adev->mode_info.atom_context->bios + data_offset);
1338  
1339  		switch (frev) {
1340  		case 3:
1341  			switch (crev) {
1342  			case 1:
1343  				if (amdgpu_atombios_lookup_voltage_object_v3(&voltage_info->v3,
1344  								  voltage_type, voltage_mode))
1345  					return true;
1346  				break;
1347  			default:
1348  				DRM_ERROR("unknown voltage object table\n");
1349  				return false;
1350  			}
1351  			break;
1352  		default:
1353  			DRM_ERROR("unknown voltage object table\n");
1354  			return false;
1355  		}
1356  
1357  	}
1358  	return false;
1359  }
1360  
amdgpu_atombios_get_voltage_table(struct amdgpu_device * adev,u8 voltage_type,u8 voltage_mode,struct atom_voltage_table * voltage_table)1361  int amdgpu_atombios_get_voltage_table(struct amdgpu_device *adev,
1362  				      u8 voltage_type, u8 voltage_mode,
1363  				      struct atom_voltage_table *voltage_table)
1364  {
1365  	int index = GetIndexIntoMasterTable(DATA, VoltageObjectInfo);
1366  	u8 frev, crev;
1367  	u16 data_offset, size;
1368  	int i;
1369  	union voltage_object_info *voltage_info;
1370  	union voltage_object *voltage_object = NULL;
1371  
1372  	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
1373  				   &frev, &crev, &data_offset)) {
1374  		voltage_info = (union voltage_object_info *)
1375  			(adev->mode_info.atom_context->bios + data_offset);
1376  
1377  		switch (frev) {
1378  		case 3:
1379  			switch (crev) {
1380  			case 1:
1381  				voltage_object = (union voltage_object *)
1382  					amdgpu_atombios_lookup_voltage_object_v3(&voltage_info->v3,
1383  								      voltage_type, voltage_mode);
1384  				if (voltage_object) {
1385  					ATOM_GPIO_VOLTAGE_OBJECT_V3 *gpio =
1386  						&voltage_object->v3.asGpioVoltageObj;
1387  					VOLTAGE_LUT_ENTRY_V2 *lut;
1388  					if (gpio->ucGpioEntryNum > MAX_VOLTAGE_ENTRIES)
1389  						return -EINVAL;
1390  					lut = &gpio->asVolGpioLut[0];
1391  					for (i = 0; i < gpio->ucGpioEntryNum; i++) {
1392  						voltage_table->entries[i].value =
1393  							le16_to_cpu(lut->usVoltageValue);
1394  						voltage_table->entries[i].smio_low =
1395  							le32_to_cpu(lut->ulVoltageId);
1396  						lut = (VOLTAGE_LUT_ENTRY_V2 *)
1397  							((u8 *)lut + sizeof(VOLTAGE_LUT_ENTRY_V2));
1398  					}
1399  					voltage_table->mask_low = le32_to_cpu(gpio->ulGpioMaskVal);
1400  					voltage_table->count = gpio->ucGpioEntryNum;
1401  					voltage_table->phase_delay = gpio->ucPhaseDelay;
1402  					return 0;
1403  				}
1404  				break;
1405  			default:
1406  				DRM_ERROR("unknown voltage object table\n");
1407  				return -EINVAL;
1408  			}
1409  			break;
1410  		default:
1411  			DRM_ERROR("unknown voltage object table\n");
1412  			return -EINVAL;
1413  		}
1414  	}
1415  	return -EINVAL;
1416  }
1417  
1418  union vram_info {
1419  	struct _ATOM_VRAM_INFO_V3 v1_3;
1420  	struct _ATOM_VRAM_INFO_V4 v1_4;
1421  	struct _ATOM_VRAM_INFO_HEADER_V2_1 v2_1;
1422  };
1423  
1424  #define MEM_ID_MASK           0xff000000
1425  #define MEM_ID_SHIFT          24
1426  #define CLOCK_RANGE_MASK      0x00ffffff
1427  #define CLOCK_RANGE_SHIFT     0
1428  #define LOW_NIBBLE_MASK       0xf
1429  #define DATA_EQU_PREV         0
1430  #define DATA_FROM_TABLE       4
1431  
amdgpu_atombios_init_mc_reg_table(struct amdgpu_device * adev,u8 module_index,struct atom_mc_reg_table * reg_table)1432  int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev,
1433  				      u8 module_index,
1434  				      struct atom_mc_reg_table *reg_table)
1435  {
1436  	int index = GetIndexIntoMasterTable(DATA, VRAM_Info);
1437  	u8 frev, crev, num_entries, t_mem_id, num_ranges = 0;
1438  	u32 i = 0, j;
1439  	u16 data_offset, size;
1440  	union vram_info *vram_info;
1441  
1442  	memset(reg_table, 0, sizeof(struct atom_mc_reg_table));
1443  
1444  	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
1445  				   &frev, &crev, &data_offset)) {
1446  		vram_info = (union vram_info *)
1447  			(adev->mode_info.atom_context->bios + data_offset);
1448  		switch (frev) {
1449  		case 1:
1450  			DRM_ERROR("old table version %d, %d\n", frev, crev);
1451  			return -EINVAL;
1452  		case 2:
1453  			switch (crev) {
1454  			case 1:
1455  				if (module_index < vram_info->v2_1.ucNumOfVRAMModule) {
1456  					ATOM_INIT_REG_BLOCK *reg_block =
1457  						(ATOM_INIT_REG_BLOCK *)
1458  						((u8 *)vram_info + le16_to_cpu(vram_info->v2_1.usMemClkPatchTblOffset));
1459  					ATOM_MEMORY_SETTING_DATA_BLOCK *reg_data =
1460  						(ATOM_MEMORY_SETTING_DATA_BLOCK *)
1461  						((u8 *)reg_block + (2 * sizeof(u16)) +
1462  						 le16_to_cpu(reg_block->usRegIndexTblSize));
1463  					ATOM_INIT_REG_INDEX_FORMAT *format = &reg_block->asRegIndexBuf[0];
1464  					num_entries = (u8)((le16_to_cpu(reg_block->usRegIndexTblSize)) /
1465  							   sizeof(ATOM_INIT_REG_INDEX_FORMAT)) - 1;
1466  					if (num_entries > VBIOS_MC_REGISTER_ARRAY_SIZE)
1467  						return -EINVAL;
1468  					while (i < num_entries) {
1469  						if (format->ucPreRegDataLength & ACCESS_PLACEHOLDER)
1470  							break;
1471  						reg_table->mc_reg_address[i].s1 =
1472  							(u16)(le16_to_cpu(format->usRegIndex));
1473  						reg_table->mc_reg_address[i].pre_reg_data =
1474  							(u8)(format->ucPreRegDataLength);
1475  						i++;
1476  						format = (ATOM_INIT_REG_INDEX_FORMAT *)
1477  							((u8 *)format + sizeof(ATOM_INIT_REG_INDEX_FORMAT));
1478  					}
1479  					reg_table->last = i;
1480  					while ((le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK) &&
1481  					       (num_ranges < VBIOS_MAX_AC_TIMING_ENTRIES)) {
1482  						t_mem_id = (u8)((le32_to_cpu(*(u32 *)reg_data) & MEM_ID_MASK)
1483  								>> MEM_ID_SHIFT);
1484  						if (module_index == t_mem_id) {
1485  							reg_table->mc_reg_table_entry[num_ranges].mclk_max =
1486  								(u32)((le32_to_cpu(*(u32 *)reg_data) & CLOCK_RANGE_MASK)
1487  								      >> CLOCK_RANGE_SHIFT);
1488  							for (i = 0, j = 1; i < reg_table->last; i++) {
1489  								if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_FROM_TABLE) {
1490  									reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
1491  										(u32)le32_to_cpu(*((u32 *)reg_data + j));
1492  									j++;
1493  								} else if ((reg_table->mc_reg_address[i].pre_reg_data & LOW_NIBBLE_MASK) == DATA_EQU_PREV) {
1494  									if (i == 0)
1495  										continue;
1496  									reg_table->mc_reg_table_entry[num_ranges].mc_data[i] =
1497  										reg_table->mc_reg_table_entry[num_ranges].mc_data[i - 1];
1498  								}
1499  							}
1500  							num_ranges++;
1501  						}
1502  						reg_data = (ATOM_MEMORY_SETTING_DATA_BLOCK *)
1503  							((u8 *)reg_data + le16_to_cpu(reg_block->usRegDataBlkSize));
1504  					}
1505  					if (le32_to_cpu(*(u32 *)reg_data) != END_OF_REG_DATA_BLOCK)
1506  						return -EINVAL;
1507  					reg_table->num_entries = num_ranges;
1508  				} else
1509  					return -EINVAL;
1510  				break;
1511  			default:
1512  				DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
1513  				return -EINVAL;
1514  			}
1515  			break;
1516  		default:
1517  			DRM_ERROR("Unknown table version %d, %d\n", frev, crev);
1518  			return -EINVAL;
1519  		}
1520  		return 0;
1521  	}
1522  	return -EINVAL;
1523  }
1524  #endif
1525  
amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device * adev)1526  bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev)
1527  {
1528  	int index = GetIndexIntoMasterTable(DATA, GPUVirtualizationInfo);
1529  	u8 frev, crev;
1530  	u16 data_offset, size;
1531  
1532  	if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
1533  					  &frev, &crev, &data_offset))
1534  		return true;
1535  
1536  	return false;
1537  }
1538  
amdgpu_atombios_scratch_regs_lock(struct amdgpu_device * adev,bool lock)1539  void amdgpu_atombios_scratch_regs_lock(struct amdgpu_device *adev, bool lock)
1540  {
1541  	uint32_t bios_6_scratch;
1542  
1543  	bios_6_scratch = RREG32(adev->bios_scratch_reg_offset + 6);
1544  
1545  	if (lock) {
1546  		bios_6_scratch |= ATOM_S6_CRITICAL_STATE;
1547  		bios_6_scratch &= ~ATOM_S6_ACC_MODE;
1548  	} else {
1549  		bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE;
1550  		bios_6_scratch |= ATOM_S6_ACC_MODE;
1551  	}
1552  
1553  	WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch);
1554  }
1555  
amdgpu_atombios_scratch_regs_init(struct amdgpu_device * adev)1556  static void amdgpu_atombios_scratch_regs_init(struct amdgpu_device *adev)
1557  {
1558  	uint32_t bios_2_scratch, bios_6_scratch;
1559  
1560  	adev->bios_scratch_reg_offset = mmBIOS_SCRATCH_0;
1561  
1562  	bios_2_scratch = RREG32(adev->bios_scratch_reg_offset + 2);
1563  	bios_6_scratch = RREG32(adev->bios_scratch_reg_offset + 6);
1564  
1565  	/* let the bios control the backlight */
1566  	bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE;
1567  
1568  	/* tell the bios not to handle mode switching */
1569  	bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH;
1570  
1571  	/* clear the vbios dpms state */
1572  	bios_2_scratch &= ~ATOM_S2_DEVICE_DPMS_STATE;
1573  
1574  	WREG32(adev->bios_scratch_reg_offset + 2, bios_2_scratch);
1575  	WREG32(adev->bios_scratch_reg_offset + 6, bios_6_scratch);
1576  }
1577  
amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device * adev,bool hung)1578  void amdgpu_atombios_scratch_regs_engine_hung(struct amdgpu_device *adev,
1579  					      bool hung)
1580  {
1581  	u32 tmp = RREG32(adev->bios_scratch_reg_offset + 3);
1582  
1583  	if (hung)
1584  		tmp |= ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1585  	else
1586  		tmp &= ~ATOM_S3_ASIC_GUI_ENGINE_HUNG;
1587  
1588  	WREG32(adev->bios_scratch_reg_offset + 3, tmp);
1589  }
1590  
amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device * adev,u32 backlight_level)1591  void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev,
1592  						      u32 backlight_level)
1593  {
1594  	u32 tmp = RREG32(adev->bios_scratch_reg_offset + 2);
1595  
1596  	tmp &= ~ATOM_S2_CURRENT_BL_LEVEL_MASK;
1597  	tmp |= (backlight_level << ATOM_S2_CURRENT_BL_LEVEL_SHIFT) &
1598  		ATOM_S2_CURRENT_BL_LEVEL_MASK;
1599  
1600  	WREG32(adev->bios_scratch_reg_offset + 2, tmp);
1601  }
1602  
amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device * adev)1603  bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev)
1604  {
1605  	u32 tmp = RREG32(adev->bios_scratch_reg_offset + 7);
1606  
1607  	if (tmp & ATOM_S7_ASIC_INIT_COMPLETE_MASK)
1608  		return false;
1609  	else
1610  		return true;
1611  }
1612  
1613  /* Atom needs data in little endian format so swap as appropriate when copying
1614   * data to or from atom. Note that atom operates on dw units.
1615   *
1616   * Use to_le=true when sending data to atom and provide at least
1617   * ALIGN(num_bytes,4) bytes in the dst buffer.
1618   *
1619   * Use to_le=false when receiving data from atom and provide ALIGN(num_bytes,4)
1620   * byes in the src buffer.
1621   */
amdgpu_atombios_copy_swap(u8 * dst,u8 * src,u8 num_bytes,bool to_le)1622  void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le)
1623  {
1624  #ifdef __BIG_ENDIAN
1625  	u32 src_tmp[5], dst_tmp[5];
1626  	int i;
1627  	u8 align_num_bytes = ALIGN(num_bytes, 4);
1628  
1629  	if (to_le) {
1630  		memcpy(src_tmp, src, num_bytes);
1631  		for (i = 0; i < align_num_bytes / 4; i++)
1632  			dst_tmp[i] = cpu_to_le32(src_tmp[i]);
1633  		memcpy(dst, dst_tmp, align_num_bytes);
1634  	} else {
1635  		memcpy(src_tmp, src, align_num_bytes);
1636  		for (i = 0; i < align_num_bytes / 4; i++)
1637  			dst_tmp[i] = le32_to_cpu(src_tmp[i]);
1638  		memcpy(dst, dst_tmp, num_bytes);
1639  	}
1640  #else
1641  	memcpy(dst, src, num_bytes);
1642  #endif
1643  }
1644  
amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device * adev)1645  static int amdgpu_atombios_allocate_fb_scratch(struct amdgpu_device *adev)
1646  {
1647  	struct atom_context *ctx = adev->mode_info.atom_context;
1648  	int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
1649  	uint16_t data_offset;
1650  	int usage_bytes = 0;
1651  	struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
1652  	u64 start_addr;
1653  	u64 size;
1654  
1655  	if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
1656  		firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
1657  
1658  		DRM_DEBUG("atom firmware requested %08x %dkb\n",
1659  			  le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware),
1660  			  le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb));
1661  
1662  		start_addr = firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware;
1663  		size = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb;
1664  
1665  		if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) ==
1666  			(uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION <<
1667  			ATOM_VRAM_OPERATION_FLAGS_SHIFT)) {
1668  			/* Firmware request VRAM reservation for SR-IOV */
1669  			adev->mman.fw_vram_usage_start_offset = (start_addr &
1670  				(~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10;
1671  			adev->mman.fw_vram_usage_size = size << 10;
1672  			/* Use the default scratch size */
1673  			usage_bytes = 0;
1674  		} else {
1675  			usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024;
1676  		}
1677  	}
1678  	ctx->scratch_size_bytes = 0;
1679  	if (usage_bytes == 0)
1680  		usage_bytes = 20 * 1024;
1681  	/* allocate some scratch memory */
1682  	ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
1683  	if (!ctx->scratch)
1684  		return -ENOMEM;
1685  	ctx->scratch_size_bytes = usage_bytes;
1686  	return 0;
1687  }
1688  
1689  /* ATOM accessor methods */
1690  /*
1691   * ATOM is an interpreted byte code stored in tables in the vbios.  The
1692   * driver registers callbacks to access registers and the interpreter
1693   * in the driver parses the tables and executes then to program specific
1694   * actions (set display modes, asic init, etc.).  See amdgpu_atombios.c,
1695   * atombios.h, and atom.c
1696   */
1697  
1698  /**
1699   * cail_pll_read - read PLL register
1700   *
1701   * @info: atom card_info pointer
1702   * @reg: PLL register offset
1703   *
1704   * Provides a PLL register accessor for the atom interpreter (r4xx+).
1705   * Returns the value of the PLL register.
1706   */
cail_pll_read(struct card_info * info,uint32_t reg)1707  static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
1708  {
1709  	return 0;
1710  }
1711  
1712  /**
1713   * cail_pll_write - write PLL register
1714   *
1715   * @info: atom card_info pointer
1716   * @reg: PLL register offset
1717   * @val: value to write to the pll register
1718   *
1719   * Provides a PLL register accessor for the atom interpreter (r4xx+).
1720   */
cail_pll_write(struct card_info * info,uint32_t reg,uint32_t val)1721  static void cail_pll_write(struct card_info *info, uint32_t reg, uint32_t val)
1722  {
1723  
1724  }
1725  
1726  /**
1727   * cail_mc_read - read MC (Memory Controller) register
1728   *
1729   * @info: atom card_info pointer
1730   * @reg: MC register offset
1731   *
1732   * Provides an MC register accessor for the atom interpreter (r4xx+).
1733   * Returns the value of the MC register.
1734   */
cail_mc_read(struct card_info * info,uint32_t reg)1735  static uint32_t cail_mc_read(struct card_info *info, uint32_t reg)
1736  {
1737  	return 0;
1738  }
1739  
1740  /**
1741   * cail_mc_write - write MC (Memory Controller) register
1742   *
1743   * @info: atom card_info pointer
1744   * @reg: MC register offset
1745   * @val: value to write to the pll register
1746   *
1747   * Provides a MC register accessor for the atom interpreter (r4xx+).
1748   */
cail_mc_write(struct card_info * info,uint32_t reg,uint32_t val)1749  static void cail_mc_write(struct card_info *info, uint32_t reg, uint32_t val)
1750  {
1751  
1752  }
1753  
1754  /**
1755   * cail_reg_write - write MMIO register
1756   *
1757   * @info: atom card_info pointer
1758   * @reg: MMIO register offset
1759   * @val: value to write to the pll register
1760   *
1761   * Provides a MMIO register accessor for the atom interpreter (r4xx+).
1762   */
cail_reg_write(struct card_info * info,uint32_t reg,uint32_t val)1763  static void cail_reg_write(struct card_info *info, uint32_t reg, uint32_t val)
1764  {
1765  	struct amdgpu_device *adev = drm_to_adev(info->dev);
1766  
1767  	WREG32(reg, val);
1768  }
1769  
1770  /**
1771   * cail_reg_read - read MMIO register
1772   *
1773   * @info: atom card_info pointer
1774   * @reg: MMIO register offset
1775   *
1776   * Provides an MMIO register accessor for the atom interpreter (r4xx+).
1777   * Returns the value of the MMIO register.
1778   */
cail_reg_read(struct card_info * info,uint32_t reg)1779  static uint32_t cail_reg_read(struct card_info *info, uint32_t reg)
1780  {
1781  	struct amdgpu_device *adev = drm_to_adev(info->dev);
1782  	uint32_t r;
1783  
1784  	r = RREG32(reg);
1785  	return r;
1786  }
1787  
amdgpu_atombios_get_vbios_version(struct device * dev,struct device_attribute * attr,char * buf)1788  static ssize_t amdgpu_atombios_get_vbios_version(struct device *dev,
1789  						 struct device_attribute *attr,
1790  						 char *buf)
1791  {
1792  	struct drm_device *ddev = dev_get_drvdata(dev);
1793  	struct amdgpu_device *adev = drm_to_adev(ddev);
1794  	struct atom_context *ctx = adev->mode_info.atom_context;
1795  
1796  	return sysfs_emit(buf, "%s\n", ctx->vbios_pn);
1797  }
1798  
1799  static DEVICE_ATTR(vbios_version, 0444, amdgpu_atombios_get_vbios_version,
1800  		   NULL);
1801  
1802  static struct attribute *amdgpu_vbios_version_attrs[] = {
1803  	&dev_attr_vbios_version.attr,
1804  	NULL
1805  };
1806  
1807  const struct attribute_group amdgpu_vbios_version_attr_group = {
1808  	.attrs = amdgpu_vbios_version_attrs
1809  };
1810  
amdgpu_atombios_sysfs_init(struct amdgpu_device * adev)1811  int amdgpu_atombios_sysfs_init(struct amdgpu_device *adev)
1812  {
1813  	if (adev->mode_info.atom_context)
1814  		return devm_device_add_group(adev->dev,
1815  					     &amdgpu_vbios_version_attr_group);
1816  
1817  	return 0;
1818  }
1819  
1820  /**
1821   * amdgpu_atombios_fini - free the driver info and callbacks for atombios
1822   *
1823   * @adev: amdgpu_device pointer
1824   *
1825   * Frees the driver info and register access callbacks for the ATOM
1826   * interpreter (r4xx+).
1827   * Called at driver shutdown.
1828   */
amdgpu_atombios_fini(struct amdgpu_device * adev)1829  void amdgpu_atombios_fini(struct amdgpu_device *adev)
1830  {
1831  	if (adev->mode_info.atom_context) {
1832  		kfree(adev->mode_info.atom_context->scratch);
1833  		kfree(adev->mode_info.atom_context->iio);
1834  	}
1835  	kfree(adev->mode_info.atom_context);
1836  	adev->mode_info.atom_context = NULL;
1837  	kfree(adev->mode_info.atom_card_info);
1838  	adev->mode_info.atom_card_info = NULL;
1839  }
1840  
1841  /**
1842   * amdgpu_atombios_init - init the driver info and callbacks for atombios
1843   *
1844   * @adev: amdgpu_device pointer
1845   *
1846   * Initializes the driver info and register access callbacks for the
1847   * ATOM interpreter (r4xx+).
1848   * Returns 0 on sucess, -ENOMEM on failure.
1849   * Called at driver startup.
1850   */
amdgpu_atombios_init(struct amdgpu_device * adev)1851  int amdgpu_atombios_init(struct amdgpu_device *adev)
1852  {
1853  	struct card_info *atom_card_info =
1854  	    kzalloc(sizeof(struct card_info), GFP_KERNEL);
1855  
1856  	if (!atom_card_info)
1857  		return -ENOMEM;
1858  
1859  	adev->mode_info.atom_card_info = atom_card_info;
1860  	atom_card_info->dev = adev_to_drm(adev);
1861  	atom_card_info->reg_read = cail_reg_read;
1862  	atom_card_info->reg_write = cail_reg_write;
1863  	atom_card_info->mc_read = cail_mc_read;
1864  	atom_card_info->mc_write = cail_mc_write;
1865  	atom_card_info->pll_read = cail_pll_read;
1866  	atom_card_info->pll_write = cail_pll_write;
1867  
1868  	adev->mode_info.atom_context = amdgpu_atom_parse(atom_card_info, adev->bios);
1869  	if (!adev->mode_info.atom_context) {
1870  		amdgpu_atombios_fini(adev);
1871  		return -ENOMEM;
1872  	}
1873  
1874  	mutex_init(&adev->mode_info.atom_context->mutex);
1875  	if (adev->is_atom_fw) {
1876  		amdgpu_atomfirmware_scratch_regs_init(adev);
1877  		amdgpu_atomfirmware_allocate_fb_scratch(adev);
1878  		/* cached firmware_flags for further usage */
1879  		adev->mode_info.firmware_flags =
1880  			amdgpu_atomfirmware_query_firmware_capability(adev);
1881  	} else {
1882  		amdgpu_atombios_scratch_regs_init(adev);
1883  		amdgpu_atombios_allocate_fb_scratch(adev);
1884  	}
1885  
1886  	return 0;
1887  }
1888  
amdgpu_atombios_get_data_table(struct amdgpu_device * adev,uint32_t table,uint16_t * size,uint8_t * frev,uint8_t * crev,uint8_t ** addr)1889  int amdgpu_atombios_get_data_table(struct amdgpu_device *adev,
1890  				   uint32_t table,
1891  				   uint16_t *size,
1892  				   uint8_t *frev,
1893  				   uint8_t *crev,
1894  				   uint8_t **addr)
1895  {
1896  	uint16_t data_start;
1897  
1898  	if (!amdgpu_atom_parse_data_header(adev->mode_info.atom_context, table,
1899  					   size, frev, crev, &data_start))
1900  		return -EINVAL;
1901  
1902  	*addr = (uint8_t *)adev->mode_info.atom_context->bios + data_start;
1903  
1904  	return 0;
1905  }
1906