1 /*
2 * Copyright 2020 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24 #define SWSMU_CODE_LAYER_L2
25
26 #include "amdgpu.h"
27 #include "amdgpu_smu.h"
28 #include "smu_v11_0.h"
29 #include "smu11_driver_if_vangogh.h"
30 #include "vangogh_ppt.h"
31 #include "smu_v11_5_ppsmc.h"
32 #include "smu_v11_5_pmfw.h"
33 #include "smu_cmn.h"
34 #include "soc15_common.h"
35 #include "asic_reg/gc/gc_10_3_0_offset.h"
36 #include "asic_reg/gc/gc_10_3_0_sh_mask.h"
37 #include <asm/processor.h>
38
39 /*
40 * DO NOT use these for err/warn/info/debug messages.
41 * Use dev_err, dev_warn, dev_info and dev_dbg instead.
42 * They are more MGPU friendly.
43 */
44 #undef pr_err
45 #undef pr_warn
46 #undef pr_info
47 #undef pr_debug
48
49 // Registers related to GFXOFF
50 // addressBlock: smuio_smuio_SmuSmuioDec
51 // base address: 0x5a000
52 #define mmSMUIO_GFX_MISC_CNTL 0x00c5
53 #define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0
54
55 //SMUIO_GFX_MISC_CNTL
56 #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff__SHIFT 0x0
57 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1
58 #define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff_MASK 0x00000001L
59 #define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
60
61 #define FEATURE_MASK(feature) (1ULL << feature)
62 #define SMC_DPM_FEATURE ( \
63 FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
64 FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \
65 FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \
66 FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT) | \
67 FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT) | \
68 FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \
69 FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT) | \
70 FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT)| \
71 FEATURE_MASK(FEATURE_GFX_DPM_BIT))
72
73 static struct cmn2asic_msg_mapping vangogh_message_map[SMU_MSG_MAX_COUNT] = {
74 MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 0),
75 MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 0),
76 MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 0),
77 MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 0),
78 MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0),
79 MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
80 MSG_MAP(PowerDownIspByTile, PPSMC_MSG_PowerDownIspByTile, 0),
81 MSG_MAP(PowerUpIspByTile, PPSMC_MSG_PowerUpIspByTile, 0),
82 MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 0),
83 MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0),
84 MSG_MAP(RlcPowerNotify, PPSMC_MSG_RlcPowerNotify, 0),
85 MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 0),
86 MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 0),
87 MSG_MAP(ActiveProcessNotify, PPSMC_MSG_ActiveProcessNotify, 0),
88 MSG_MAP(SetHardMinIspiclkByFreq, PPSMC_MSG_SetHardMinIspiclkByFreq, 0),
89 MSG_MAP(SetHardMinIspxclkByFreq, PPSMC_MSG_SetHardMinIspxclkByFreq, 0),
90 MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 0),
91 MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 0),
92 MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 0),
93 MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu, 0),
94 MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 0),
95 MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 0),
96 MSG_MAP(SetHardMinSocclkByFreq, PPSMC_MSG_SetHardMinSocclkByFreq, 0),
97 MSG_MAP(SetSoftMinFclk, PPSMC_MSG_SetSoftMinFclk, 0),
98 MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 0),
99 MSG_MAP(EnablePostCode, PPSMC_MSG_EnablePostCode, 0),
100 MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 0),
101 MSG_MAP(GetFclkFrequency, PPSMC_MSG_GetFclkFrequency, 0),
102 MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 0),
103 MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 0),
104 MSG_MAP(SetSoftMaxSocclkByFreq, PPSMC_MSG_SetSoftMaxSocclkByFreq, 0),
105 MSG_MAP(SetSoftMaxFclkByFreq, PPSMC_MSG_SetSoftMaxFclkByFreq, 0),
106 MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 0),
107 MSG_MAP(SetPowerLimitPercentage, PPSMC_MSG_SetPowerLimitPercentage, 0),
108 MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
109 MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 0),
110 MSG_MAP(SetHardMinFclkByFreq, PPSMC_MSG_SetHardMinFclkByFreq, 0),
111 MSG_MAP(SetSoftMinSocclkByFreq, PPSMC_MSG_SetSoftMinSocclkByFreq, 0),
112 MSG_MAP(PowerUpCvip, PPSMC_MSG_PowerUpCvip, 0),
113 MSG_MAP(PowerDownCvip, PPSMC_MSG_PowerDownCvip, 0),
114 MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
115 MSG_MAP(GetThermalLimit, PPSMC_MSG_GetThermalLimit, 0),
116 MSG_MAP(GetCurrentTemperature, PPSMC_MSG_GetCurrentTemperature, 0),
117 MSG_MAP(GetCurrentPower, PPSMC_MSG_GetCurrentPower, 0),
118 MSG_MAP(GetCurrentVoltage, PPSMC_MSG_GetCurrentVoltage, 0),
119 MSG_MAP(GetCurrentCurrent, PPSMC_MSG_GetCurrentCurrent, 0),
120 MSG_MAP(GetAverageCpuActivity, PPSMC_MSG_GetAverageCpuActivity, 0),
121 MSG_MAP(GetAverageGfxActivity, PPSMC_MSG_GetAverageGfxActivity, 0),
122 MSG_MAP(GetAveragePower, PPSMC_MSG_GetAveragePower, 0),
123 MSG_MAP(GetAverageTemperature, PPSMC_MSG_GetAverageTemperature, 0),
124 MSG_MAP(SetAveragePowerTimeConstant, PPSMC_MSG_SetAveragePowerTimeConstant, 0),
125 MSG_MAP(SetAverageActivityTimeConstant, PPSMC_MSG_SetAverageActivityTimeConstant, 0),
126 MSG_MAP(SetAverageTemperatureTimeConstant, PPSMC_MSG_SetAverageTemperatureTimeConstant, 0),
127 MSG_MAP(SetMitigationEndHysteresis, PPSMC_MSG_SetMitigationEndHysteresis, 0),
128 MSG_MAP(GetCurrentFreq, PPSMC_MSG_GetCurrentFreq, 0),
129 MSG_MAP(SetReducedPptLimit, PPSMC_MSG_SetReducedPptLimit, 0),
130 MSG_MAP(SetReducedThermalLimit, PPSMC_MSG_SetReducedThermalLimit, 0),
131 MSG_MAP(DramLogSetDramAddr, PPSMC_MSG_DramLogSetDramAddr, 0),
132 MSG_MAP(StartDramLogging, PPSMC_MSG_StartDramLogging, 0),
133 MSG_MAP(StopDramLogging, PPSMC_MSG_StopDramLogging, 0),
134 MSG_MAP(SetSoftMinCclk, PPSMC_MSG_SetSoftMinCclk, 0),
135 MSG_MAP(SetSoftMaxCclk, PPSMC_MSG_SetSoftMaxCclk, 0),
136 MSG_MAP(RequestActiveWgp, PPSMC_MSG_RequestActiveWgp, 0),
137 MSG_MAP(SetFastPPTLimit, PPSMC_MSG_SetFastPPTLimit, 0),
138 MSG_MAP(SetSlowPPTLimit, PPSMC_MSG_SetSlowPPTLimit, 0),
139 MSG_MAP(GetFastPPTLimit, PPSMC_MSG_GetFastPPTLimit, 0),
140 MSG_MAP(GetSlowPPTLimit, PPSMC_MSG_GetSlowPPTLimit, 0),
141 MSG_MAP(GetGfxOffStatus, PPSMC_MSG_GetGfxOffStatus, 0),
142 MSG_MAP(GetGfxOffEntryCount, PPSMC_MSG_GetGfxOffEntryCount, 0),
143 MSG_MAP(LogGfxOffResidency, PPSMC_MSG_LogGfxOffResidency, 0),
144 };
145
146 static struct cmn2asic_mapping vangogh_feature_mask_map[SMU_FEATURE_COUNT] = {
147 FEA_MAP(PPT),
148 FEA_MAP(TDC),
149 FEA_MAP(THERMAL),
150 FEA_MAP(DS_GFXCLK),
151 FEA_MAP(DS_SOCCLK),
152 FEA_MAP(DS_LCLK),
153 FEA_MAP(DS_FCLK),
154 FEA_MAP(DS_MP1CLK),
155 FEA_MAP(DS_MP0CLK),
156 FEA_MAP(ATHUB_PG),
157 FEA_MAP(CCLK_DPM),
158 FEA_MAP(FAN_CONTROLLER),
159 FEA_MAP(ULV),
160 FEA_MAP(VCN_DPM),
161 FEA_MAP(LCLK_DPM),
162 FEA_MAP(SHUBCLK_DPM),
163 FEA_MAP(DCFCLK_DPM),
164 FEA_MAP(DS_DCFCLK),
165 FEA_MAP(S0I2),
166 FEA_MAP(SMU_LOW_POWER),
167 FEA_MAP(GFX_DEM),
168 FEA_MAP(PSI),
169 FEA_MAP(PROCHOT),
170 FEA_MAP(CPUOFF),
171 FEA_MAP(STAPM),
172 FEA_MAP(S0I3),
173 FEA_MAP(DF_CSTATES),
174 FEA_MAP(PERF_LIMIT),
175 FEA_MAP(CORE_DLDO),
176 FEA_MAP(RSMU_LOW_POWER),
177 FEA_MAP(SMN_LOW_POWER),
178 FEA_MAP(THM_LOW_POWER),
179 FEA_MAP(SMUIO_LOW_POWER),
180 FEA_MAP(MP1_LOW_POWER),
181 FEA_MAP(DS_VCN),
182 FEA_MAP(CPPC),
183 FEA_MAP(OS_CSTATES),
184 FEA_MAP(ISP_DPM),
185 FEA_MAP(A55_DPM),
186 FEA_MAP(CVIP_DSP_DPM),
187 FEA_MAP(MSMU_LOW_POWER),
188 FEA_MAP_REVERSE(SOCCLK),
189 FEA_MAP_REVERSE(FCLK),
190 FEA_MAP_HALF_REVERSE(GFX),
191 };
192
193 static struct cmn2asic_mapping vangogh_table_map[SMU_TABLE_COUNT] = {
194 TAB_MAP_VALID(WATERMARKS),
195 TAB_MAP_VALID(SMU_METRICS),
196 TAB_MAP_VALID(CUSTOM_DPM),
197 TAB_MAP_VALID(DPMCLOCKS),
198 };
199
200 static struct cmn2asic_mapping vangogh_workload_map[PP_SMC_POWER_PROFILE_COUNT] = {
201 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_FULLSCREEN3D, WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT),
202 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VIDEO, WORKLOAD_PPLIB_VIDEO_BIT),
203 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_VR, WORKLOAD_PPLIB_VR_BIT),
204 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_COMPUTE, WORKLOAD_PPLIB_COMPUTE_BIT),
205 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CUSTOM, WORKLOAD_PPLIB_CUSTOM_BIT),
206 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_CAPPED, WORKLOAD_PPLIB_CAPPED_BIT),
207 WORKLOAD_MAP(PP_SMC_POWER_PROFILE_UNCAPPED, WORKLOAD_PPLIB_UNCAPPED_BIT),
208 };
209
210 static const uint8_t vangogh_throttler_map[] = {
211 [THROTTLER_STATUS_BIT_SPL] = (SMU_THROTTLER_SPL_BIT),
212 [THROTTLER_STATUS_BIT_FPPT] = (SMU_THROTTLER_FPPT_BIT),
213 [THROTTLER_STATUS_BIT_SPPT] = (SMU_THROTTLER_SPPT_BIT),
214 [THROTTLER_STATUS_BIT_SPPT_APU] = (SMU_THROTTLER_SPPT_APU_BIT),
215 [THROTTLER_STATUS_BIT_THM_CORE] = (SMU_THROTTLER_TEMP_CORE_BIT),
216 [THROTTLER_STATUS_BIT_THM_GFX] = (SMU_THROTTLER_TEMP_GPU_BIT),
217 [THROTTLER_STATUS_BIT_THM_SOC] = (SMU_THROTTLER_TEMP_SOC_BIT),
218 [THROTTLER_STATUS_BIT_TDC_VDD] = (SMU_THROTTLER_TDC_VDD_BIT),
219 [THROTTLER_STATUS_BIT_TDC_SOC] = (SMU_THROTTLER_TDC_SOC_BIT),
220 [THROTTLER_STATUS_BIT_TDC_GFX] = (SMU_THROTTLER_TDC_GFX_BIT),
221 [THROTTLER_STATUS_BIT_TDC_CVIP] = (SMU_THROTTLER_TDC_CVIP_BIT),
222 };
223
vangogh_tables_init(struct smu_context * smu)224 static int vangogh_tables_init(struct smu_context *smu)
225 {
226 struct smu_table_context *smu_table = &smu->smu_table;
227 struct smu_table *tables = smu_table->tables;
228
229 SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
230 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
231 SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
232 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
233 SMU_TABLE_INIT(tables, SMU_TABLE_PMSTATUSLOG, SMU11_TOOL_SIZE,
234 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
235 SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffExt_t),
236 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
237 SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, max(sizeof(SmuMetrics_t), sizeof(SmuMetrics_legacy_t)),
238 PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
239
240 smu_table->metrics_table = kzalloc(max(sizeof(SmuMetrics_t), sizeof(SmuMetrics_legacy_t)), GFP_KERNEL);
241 if (!smu_table->metrics_table)
242 goto err0_out;
243 smu_table->metrics_time = 0;
244
245 smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_2);
246 smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_3));
247 smu_table->gpu_metrics_table_size = max(smu_table->gpu_metrics_table_size, sizeof(struct gpu_metrics_v2_4));
248 smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
249 if (!smu_table->gpu_metrics_table)
250 goto err1_out;
251
252 smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
253 if (!smu_table->watermarks_table)
254 goto err2_out;
255
256 smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
257 if (!smu_table->clocks_table)
258 goto err3_out;
259
260 return 0;
261
262 err3_out:
263 kfree(smu_table->watermarks_table);
264 err2_out:
265 kfree(smu_table->gpu_metrics_table);
266 err1_out:
267 kfree(smu_table->metrics_table);
268 err0_out:
269 return -ENOMEM;
270 }
271
vangogh_get_legacy_smu_metrics_data(struct smu_context * smu,MetricsMember_t member,uint32_t * value)272 static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu,
273 MetricsMember_t member,
274 uint32_t *value)
275 {
276 struct smu_table_context *smu_table = &smu->smu_table;
277 SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table;
278 int ret = 0;
279
280 ret = smu_cmn_get_metrics_table(smu,
281 NULL,
282 false);
283 if (ret)
284 return ret;
285
286 switch (member) {
287 case METRICS_CURR_GFXCLK:
288 *value = metrics->GfxclkFrequency;
289 break;
290 case METRICS_AVERAGE_SOCCLK:
291 *value = metrics->SocclkFrequency;
292 break;
293 case METRICS_AVERAGE_VCLK:
294 *value = metrics->VclkFrequency;
295 break;
296 case METRICS_AVERAGE_DCLK:
297 *value = metrics->DclkFrequency;
298 break;
299 case METRICS_CURR_UCLK:
300 *value = metrics->MemclkFrequency;
301 break;
302 case METRICS_AVERAGE_GFXACTIVITY:
303 *value = metrics->GfxActivity / 100;
304 break;
305 case METRICS_AVERAGE_VCNACTIVITY:
306 *value = metrics->UvdActivity / 100;
307 break;
308 case METRICS_AVERAGE_SOCKETPOWER:
309 *value = (metrics->CurrentSocketPower << 8) /
310 1000 ;
311 break;
312 case METRICS_TEMPERATURE_EDGE:
313 *value = metrics->GfxTemperature / 100 *
314 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
315 break;
316 case METRICS_TEMPERATURE_HOTSPOT:
317 *value = metrics->SocTemperature / 100 *
318 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
319 break;
320 case METRICS_THROTTLER_STATUS:
321 *value = metrics->ThrottlerStatus;
322 break;
323 case METRICS_VOLTAGE_VDDGFX:
324 *value = metrics->Voltage[2];
325 break;
326 case METRICS_VOLTAGE_VDDSOC:
327 *value = metrics->Voltage[1];
328 break;
329 case METRICS_AVERAGE_CPUCLK:
330 memcpy(value, &metrics->CoreFrequency[0],
331 smu->cpu_core_num * sizeof(uint16_t));
332 break;
333 default:
334 *value = UINT_MAX;
335 break;
336 }
337
338 return ret;
339 }
340
vangogh_get_smu_metrics_data(struct smu_context * smu,MetricsMember_t member,uint32_t * value)341 static int vangogh_get_smu_metrics_data(struct smu_context *smu,
342 MetricsMember_t member,
343 uint32_t *value)
344 {
345 struct smu_table_context *smu_table = &smu->smu_table;
346 SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
347 int ret = 0;
348
349 ret = smu_cmn_get_metrics_table(smu,
350 NULL,
351 false);
352 if (ret)
353 return ret;
354
355 switch (member) {
356 case METRICS_CURR_GFXCLK:
357 *value = metrics->Current.GfxclkFrequency;
358 break;
359 case METRICS_AVERAGE_SOCCLK:
360 *value = metrics->Current.SocclkFrequency;
361 break;
362 case METRICS_AVERAGE_VCLK:
363 *value = metrics->Current.VclkFrequency;
364 break;
365 case METRICS_AVERAGE_DCLK:
366 *value = metrics->Current.DclkFrequency;
367 break;
368 case METRICS_CURR_UCLK:
369 *value = metrics->Current.MemclkFrequency;
370 break;
371 case METRICS_AVERAGE_GFXACTIVITY:
372 *value = metrics->Current.GfxActivity;
373 break;
374 case METRICS_AVERAGE_VCNACTIVITY:
375 *value = metrics->Current.UvdActivity;
376 break;
377 case METRICS_AVERAGE_SOCKETPOWER:
378 *value = (metrics->Average.CurrentSocketPower << 8) /
379 1000;
380 break;
381 case METRICS_CURR_SOCKETPOWER:
382 *value = (metrics->Current.CurrentSocketPower << 8) /
383 1000;
384 break;
385 case METRICS_TEMPERATURE_EDGE:
386 *value = metrics->Current.GfxTemperature / 100 *
387 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
388 break;
389 case METRICS_TEMPERATURE_HOTSPOT:
390 *value = metrics->Current.SocTemperature / 100 *
391 SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
392 break;
393 case METRICS_THROTTLER_STATUS:
394 *value = metrics->Current.ThrottlerStatus;
395 break;
396 case METRICS_VOLTAGE_VDDGFX:
397 *value = metrics->Current.Voltage[2];
398 break;
399 case METRICS_VOLTAGE_VDDSOC:
400 *value = metrics->Current.Voltage[1];
401 break;
402 case METRICS_AVERAGE_CPUCLK:
403 memcpy(value, &metrics->Current.CoreFrequency[0],
404 smu->cpu_core_num * sizeof(uint16_t));
405 break;
406 default:
407 *value = UINT_MAX;
408 break;
409 }
410
411 return ret;
412 }
413
vangogh_common_get_smu_metrics_data(struct smu_context * smu,MetricsMember_t member,uint32_t * value)414 static int vangogh_common_get_smu_metrics_data(struct smu_context *smu,
415 MetricsMember_t member,
416 uint32_t *value)
417 {
418 int ret = 0;
419
420 if (smu->smc_fw_if_version < 0x3)
421 ret = vangogh_get_legacy_smu_metrics_data(smu, member, value);
422 else
423 ret = vangogh_get_smu_metrics_data(smu, member, value);
424
425 return ret;
426 }
427
vangogh_allocate_dpm_context(struct smu_context * smu)428 static int vangogh_allocate_dpm_context(struct smu_context *smu)
429 {
430 struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
431
432 smu_dpm->dpm_context = kzalloc(sizeof(struct smu_11_0_dpm_context),
433 GFP_KERNEL);
434 if (!smu_dpm->dpm_context)
435 return -ENOMEM;
436
437 smu_dpm->dpm_context_size = sizeof(struct smu_11_0_dpm_context);
438
439 return 0;
440 }
441
vangogh_init_smc_tables(struct smu_context * smu)442 static int vangogh_init_smc_tables(struct smu_context *smu)
443 {
444 int ret = 0;
445
446 ret = vangogh_tables_init(smu);
447 if (ret)
448 return ret;
449
450 ret = vangogh_allocate_dpm_context(smu);
451 if (ret)
452 return ret;
453
454 #ifdef CONFIG_X86
455 /* AMD x86 APU only */
456 smu->cpu_core_num = topology_num_cores_per_package();
457 #else
458 smu->cpu_core_num = 4;
459 #endif
460
461 return smu_v11_0_init_smc_tables(smu);
462 }
463
vangogh_dpm_set_vcn_enable(struct smu_context * smu,bool enable)464 static int vangogh_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
465 {
466 int ret = 0;
467
468 if (enable) {
469 /* vcn dpm on is a prerequisite for vcn power gate messages */
470 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
471 if (ret)
472 return ret;
473 } else {
474 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn, 0, NULL);
475 if (ret)
476 return ret;
477 }
478
479 return ret;
480 }
481
vangogh_dpm_set_jpeg_enable(struct smu_context * smu,bool enable)482 static int vangogh_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
483 {
484 int ret = 0;
485
486 if (enable) {
487 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
488 if (ret)
489 return ret;
490 } else {
491 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
492 if (ret)
493 return ret;
494 }
495
496 return ret;
497 }
498
vangogh_is_dpm_running(struct smu_context * smu)499 static bool vangogh_is_dpm_running(struct smu_context *smu)
500 {
501 struct amdgpu_device *adev = smu->adev;
502 int ret = 0;
503 uint64_t feature_enabled;
504
505 /* we need to re-init after suspend so return false */
506 if (adev->in_suspend)
507 return false;
508
509 ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
510
511 if (ret)
512 return false;
513
514 return !!(feature_enabled & SMC_DPM_FEATURE);
515 }
516
vangogh_get_dpm_clk_limited(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t dpm_level,uint32_t * freq)517 static int vangogh_get_dpm_clk_limited(struct smu_context *smu, enum smu_clk_type clk_type,
518 uint32_t dpm_level, uint32_t *freq)
519 {
520 DpmClocks_t *clk_table = smu->smu_table.clocks_table;
521
522 if (!clk_table || clk_type >= SMU_CLK_COUNT)
523 return -EINVAL;
524
525 switch (clk_type) {
526 case SMU_SOCCLK:
527 if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
528 return -EINVAL;
529 *freq = clk_table->SocClocks[dpm_level];
530 break;
531 case SMU_VCLK:
532 if (dpm_level >= clk_table->VcnClkLevelsEnabled)
533 return -EINVAL;
534 *freq = clk_table->VcnClocks[dpm_level].vclk;
535 break;
536 case SMU_DCLK:
537 if (dpm_level >= clk_table->VcnClkLevelsEnabled)
538 return -EINVAL;
539 *freq = clk_table->VcnClocks[dpm_level].dclk;
540 break;
541 case SMU_UCLK:
542 case SMU_MCLK:
543 if (dpm_level >= clk_table->NumDfPstatesEnabled)
544 return -EINVAL;
545 *freq = clk_table->DfPstateTable[dpm_level].memclk;
546
547 break;
548 case SMU_FCLK:
549 if (dpm_level >= clk_table->NumDfPstatesEnabled)
550 return -EINVAL;
551 *freq = clk_table->DfPstateTable[dpm_level].fclk;
552 break;
553 default:
554 return -EINVAL;
555 }
556
557 return 0;
558 }
559
vangogh_print_legacy_clk_levels(struct smu_context * smu,enum smu_clk_type clk_type,char * buf)560 static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
561 enum smu_clk_type clk_type, char *buf)
562 {
563 DpmClocks_t *clk_table = smu->smu_table.clocks_table;
564 SmuMetrics_legacy_t metrics;
565 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
566 int i, idx, size = 0, ret = 0;
567 uint32_t cur_value = 0, value = 0, count = 0;
568 bool cur_value_match_level = false;
569
570 memset(&metrics, 0, sizeof(metrics));
571
572 ret = smu_cmn_get_metrics_table(smu, &metrics, false);
573 if (ret)
574 return ret;
575
576 smu_cmn_get_sysfs_buf(&buf, &size);
577
578 switch (clk_type) {
579 case SMU_OD_SCLK:
580 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
581 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
582 size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
583 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
584 size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
585 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
586 }
587 break;
588 case SMU_OD_CCLK:
589 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
590 size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
591 size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
592 (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
593 size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
594 (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
595 }
596 break;
597 case SMU_OD_RANGE:
598 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
599 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
600 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
601 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
602 size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
603 smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
604 }
605 break;
606 case SMU_SOCCLK:
607 /* the level 3 ~ 6 of socclk use the same frequency for vangogh */
608 count = clk_table->NumSocClkLevelsEnabled;
609 cur_value = metrics.SocclkFrequency;
610 break;
611 case SMU_VCLK:
612 count = clk_table->VcnClkLevelsEnabled;
613 cur_value = metrics.VclkFrequency;
614 break;
615 case SMU_DCLK:
616 count = clk_table->VcnClkLevelsEnabled;
617 cur_value = metrics.DclkFrequency;
618 break;
619 case SMU_MCLK:
620 count = clk_table->NumDfPstatesEnabled;
621 cur_value = metrics.MemclkFrequency;
622 break;
623 case SMU_FCLK:
624 count = clk_table->NumDfPstatesEnabled;
625 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value);
626 if (ret)
627 return ret;
628 break;
629 default:
630 break;
631 }
632
633 switch (clk_type) {
634 case SMU_SOCCLK:
635 case SMU_VCLK:
636 case SMU_DCLK:
637 case SMU_MCLK:
638 case SMU_FCLK:
639 for (i = 0; i < count; i++) {
640 idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
641 ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value);
642 if (ret)
643 return ret;
644 if (!value)
645 continue;
646 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
647 cur_value == value ? "*" : "");
648 if (cur_value == value)
649 cur_value_match_level = true;
650 }
651
652 if (!cur_value_match_level)
653 size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value);
654 break;
655 default:
656 break;
657 }
658
659 return size;
660 }
661
vangogh_print_clk_levels(struct smu_context * smu,enum smu_clk_type clk_type,char * buf)662 static int vangogh_print_clk_levels(struct smu_context *smu,
663 enum smu_clk_type clk_type, char *buf)
664 {
665 DpmClocks_t *clk_table = smu->smu_table.clocks_table;
666 SmuMetrics_t metrics;
667 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
668 int i, idx, size = 0, ret = 0;
669 uint32_t cur_value = 0, value = 0, count = 0;
670 bool cur_value_match_level = false;
671 uint32_t min, max;
672
673 memset(&metrics, 0, sizeof(metrics));
674
675 ret = smu_cmn_get_metrics_table(smu, &metrics, false);
676 if (ret)
677 return ret;
678
679 smu_cmn_get_sysfs_buf(&buf, &size);
680
681 switch (clk_type) {
682 case SMU_OD_SCLK:
683 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
684 size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
685 size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
686 (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
687 size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
688 (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
689 }
690 break;
691 case SMU_OD_CCLK:
692 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
693 size += sysfs_emit_at(buf, size, "CCLK_RANGE in Core%d:\n", smu->cpu_core_id_select);
694 size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
695 (smu->cpu_actual_soft_min_freq > 0) ? smu->cpu_actual_soft_min_freq : smu->cpu_default_soft_min_freq);
696 size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
697 (smu->cpu_actual_soft_max_freq > 0) ? smu->cpu_actual_soft_max_freq : smu->cpu_default_soft_max_freq);
698 }
699 break;
700 case SMU_OD_RANGE:
701 if (smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL) {
702 size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
703 size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
704 smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
705 size += sysfs_emit_at(buf, size, "CCLK: %7uMhz %10uMhz\n",
706 smu->cpu_default_soft_min_freq, smu->cpu_default_soft_max_freq);
707 }
708 break;
709 case SMU_SOCCLK:
710 /* the level 3 ~ 6 of socclk use the same frequency for vangogh */
711 count = clk_table->NumSocClkLevelsEnabled;
712 cur_value = metrics.Current.SocclkFrequency;
713 break;
714 case SMU_VCLK:
715 count = clk_table->VcnClkLevelsEnabled;
716 cur_value = metrics.Current.VclkFrequency;
717 break;
718 case SMU_DCLK:
719 count = clk_table->VcnClkLevelsEnabled;
720 cur_value = metrics.Current.DclkFrequency;
721 break;
722 case SMU_MCLK:
723 count = clk_table->NumDfPstatesEnabled;
724 cur_value = metrics.Current.MemclkFrequency;
725 break;
726 case SMU_FCLK:
727 count = clk_table->NumDfPstatesEnabled;
728 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetFclkFrequency, 0, &cur_value);
729 if (ret)
730 return ret;
731 break;
732 case SMU_GFXCLK:
733 case SMU_SCLK:
734 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetGfxclkFrequency, 0, &cur_value);
735 if (ret) {
736 return ret;
737 }
738 break;
739 default:
740 break;
741 }
742
743 switch (clk_type) {
744 case SMU_SOCCLK:
745 case SMU_VCLK:
746 case SMU_DCLK:
747 case SMU_MCLK:
748 case SMU_FCLK:
749 for (i = 0; i < count; i++) {
750 idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
751 ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value);
752 if (ret)
753 return ret;
754 if (!value)
755 continue;
756 size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
757 cur_value == value ? "*" : "");
758 if (cur_value == value)
759 cur_value_match_level = true;
760 }
761
762 if (!cur_value_match_level)
763 size += sysfs_emit_at(buf, size, " %uMhz *\n", cur_value);
764 break;
765 case SMU_GFXCLK:
766 case SMU_SCLK:
767 min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
768 max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
769 if (cur_value == max)
770 i = 2;
771 else if (cur_value == min)
772 i = 0;
773 else
774 i = 1;
775 size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min,
776 i == 0 ? "*" : "");
777 size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
778 i == 1 ? cur_value : VANGOGH_UMD_PSTATE_STANDARD_GFXCLK,
779 i == 1 ? "*" : "");
780 size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
781 i == 2 ? "*" : "");
782 break;
783 default:
784 break;
785 }
786
787 return size;
788 }
789
vangogh_common_print_clk_levels(struct smu_context * smu,enum smu_clk_type clk_type,char * buf)790 static int vangogh_common_print_clk_levels(struct smu_context *smu,
791 enum smu_clk_type clk_type, char *buf)
792 {
793 int ret = 0;
794
795 if (smu->smc_fw_if_version < 0x3)
796 ret = vangogh_print_legacy_clk_levels(smu, clk_type, buf);
797 else
798 ret = vangogh_print_clk_levels(smu, clk_type, buf);
799
800 return ret;
801 }
802
vangogh_get_profiling_clk_mask(struct smu_context * smu,enum amd_dpm_forced_level level,uint32_t * vclk_mask,uint32_t * dclk_mask,uint32_t * mclk_mask,uint32_t * fclk_mask,uint32_t * soc_mask)803 static int vangogh_get_profiling_clk_mask(struct smu_context *smu,
804 enum amd_dpm_forced_level level,
805 uint32_t *vclk_mask,
806 uint32_t *dclk_mask,
807 uint32_t *mclk_mask,
808 uint32_t *fclk_mask,
809 uint32_t *soc_mask)
810 {
811 DpmClocks_t *clk_table = smu->smu_table.clocks_table;
812
813 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
814 if (mclk_mask)
815 *mclk_mask = clk_table->NumDfPstatesEnabled - 1;
816
817 if (fclk_mask)
818 *fclk_mask = clk_table->NumDfPstatesEnabled - 1;
819
820 if (soc_mask)
821 *soc_mask = 0;
822 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
823 if (mclk_mask)
824 *mclk_mask = 0;
825
826 if (fclk_mask)
827 *fclk_mask = 0;
828
829 if (soc_mask)
830 *soc_mask = 1;
831
832 if (vclk_mask)
833 *vclk_mask = 1;
834
835 if (dclk_mask)
836 *dclk_mask = 1;
837 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) {
838 if (mclk_mask)
839 *mclk_mask = 0;
840
841 if (fclk_mask)
842 *fclk_mask = 0;
843
844 if (soc_mask)
845 *soc_mask = 1;
846
847 if (vclk_mask)
848 *vclk_mask = 1;
849
850 if (dclk_mask)
851 *dclk_mask = 1;
852 }
853
854 return 0;
855 }
856
vangogh_clk_dpm_is_enabled(struct smu_context * smu,enum smu_clk_type clk_type)857 static bool vangogh_clk_dpm_is_enabled(struct smu_context *smu,
858 enum smu_clk_type clk_type)
859 {
860 enum smu_feature_mask feature_id = 0;
861
862 switch (clk_type) {
863 case SMU_MCLK:
864 case SMU_UCLK:
865 case SMU_FCLK:
866 feature_id = SMU_FEATURE_DPM_FCLK_BIT;
867 break;
868 case SMU_GFXCLK:
869 case SMU_SCLK:
870 feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
871 break;
872 case SMU_SOCCLK:
873 feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
874 break;
875 case SMU_VCLK:
876 case SMU_DCLK:
877 feature_id = SMU_FEATURE_VCN_DPM_BIT;
878 break;
879 default:
880 return true;
881 }
882
883 if (!smu_cmn_feature_is_enabled(smu, feature_id))
884 return false;
885
886 return true;
887 }
888
vangogh_get_dpm_ultimate_freq(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t * min,uint32_t * max)889 static int vangogh_get_dpm_ultimate_freq(struct smu_context *smu,
890 enum smu_clk_type clk_type,
891 uint32_t *min,
892 uint32_t *max)
893 {
894 int ret = 0;
895 uint32_t soc_mask;
896 uint32_t vclk_mask;
897 uint32_t dclk_mask;
898 uint32_t mclk_mask;
899 uint32_t fclk_mask;
900 uint32_t clock_limit;
901
902 if (!vangogh_clk_dpm_is_enabled(smu, clk_type)) {
903 switch (clk_type) {
904 case SMU_MCLK:
905 case SMU_UCLK:
906 clock_limit = smu->smu_table.boot_values.uclk;
907 break;
908 case SMU_FCLK:
909 clock_limit = smu->smu_table.boot_values.fclk;
910 break;
911 case SMU_GFXCLK:
912 case SMU_SCLK:
913 clock_limit = smu->smu_table.boot_values.gfxclk;
914 break;
915 case SMU_SOCCLK:
916 clock_limit = smu->smu_table.boot_values.socclk;
917 break;
918 case SMU_VCLK:
919 clock_limit = smu->smu_table.boot_values.vclk;
920 break;
921 case SMU_DCLK:
922 clock_limit = smu->smu_table.boot_values.dclk;
923 break;
924 default:
925 clock_limit = 0;
926 break;
927 }
928
929 /* clock in Mhz unit */
930 if (min)
931 *min = clock_limit / 100;
932 if (max)
933 *max = clock_limit / 100;
934
935 return 0;
936 }
937 if (max) {
938 ret = vangogh_get_profiling_clk_mask(smu,
939 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK,
940 &vclk_mask,
941 &dclk_mask,
942 &mclk_mask,
943 &fclk_mask,
944 &soc_mask);
945 if (ret)
946 goto failed;
947
948 switch (clk_type) {
949 case SMU_UCLK:
950 case SMU_MCLK:
951 ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, max);
952 if (ret)
953 goto failed;
954 break;
955 case SMU_SOCCLK:
956 ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, max);
957 if (ret)
958 goto failed;
959 break;
960 case SMU_FCLK:
961 ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, max);
962 if (ret)
963 goto failed;
964 break;
965 case SMU_VCLK:
966 ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, max);
967 if (ret)
968 goto failed;
969 break;
970 case SMU_DCLK:
971 ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, max);
972 if (ret)
973 goto failed;
974 break;
975 default:
976 ret = -EINVAL;
977 goto failed;
978 }
979 }
980 if (min) {
981 ret = vangogh_get_profiling_clk_mask(smu,
982 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK,
983 NULL,
984 NULL,
985 &mclk_mask,
986 &fclk_mask,
987 &soc_mask);
988 if (ret)
989 goto failed;
990
991 vclk_mask = dclk_mask = 0;
992
993 switch (clk_type) {
994 case SMU_UCLK:
995 case SMU_MCLK:
996 ret = vangogh_get_dpm_clk_limited(smu, clk_type, mclk_mask, min);
997 if (ret)
998 goto failed;
999 break;
1000 case SMU_SOCCLK:
1001 ret = vangogh_get_dpm_clk_limited(smu, clk_type, soc_mask, min);
1002 if (ret)
1003 goto failed;
1004 break;
1005 case SMU_FCLK:
1006 ret = vangogh_get_dpm_clk_limited(smu, clk_type, fclk_mask, min);
1007 if (ret)
1008 goto failed;
1009 break;
1010 case SMU_VCLK:
1011 ret = vangogh_get_dpm_clk_limited(smu, clk_type, vclk_mask, min);
1012 if (ret)
1013 goto failed;
1014 break;
1015 case SMU_DCLK:
1016 ret = vangogh_get_dpm_clk_limited(smu, clk_type, dclk_mask, min);
1017 if (ret)
1018 goto failed;
1019 break;
1020 default:
1021 ret = -EINVAL;
1022 goto failed;
1023 }
1024 }
1025 failed:
1026 return ret;
1027 }
1028
vangogh_get_power_profile_mode(struct smu_context * smu,char * buf)1029 static int vangogh_get_power_profile_mode(struct smu_context *smu,
1030 char *buf)
1031 {
1032 uint32_t i, size = 0;
1033 int16_t workload_type = 0;
1034
1035 if (!buf)
1036 return -EINVAL;
1037
1038 for (i = 0; i < PP_SMC_POWER_PROFILE_COUNT; i++) {
1039 /*
1040 * Conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT
1041 * Not all profile modes are supported on vangogh.
1042 */
1043 workload_type = smu_cmn_to_asic_specific_index(smu,
1044 CMN2ASIC_MAPPING_WORKLOAD,
1045 i);
1046
1047 if (workload_type < 0)
1048 continue;
1049
1050 size += sysfs_emit_at(buf, size, "%2d %14s%s\n",
1051 i, amdgpu_pp_profile_name[i], (i == smu->power_profile_mode) ? "*" : " ");
1052 }
1053
1054 return size;
1055 }
1056
vangogh_set_power_profile_mode(struct smu_context * smu,long * input,uint32_t size)1057 static int vangogh_set_power_profile_mode(struct smu_context *smu, long *input, uint32_t size)
1058 {
1059 int workload_type, ret;
1060 uint32_t profile_mode = input[size];
1061
1062 if (profile_mode >= PP_SMC_POWER_PROFILE_COUNT) {
1063 dev_err(smu->adev->dev, "Invalid power profile mode %d\n", profile_mode);
1064 return -EINVAL;
1065 }
1066
1067 if (profile_mode == PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT ||
1068 profile_mode == PP_SMC_POWER_PROFILE_POWERSAVING)
1069 return 0;
1070
1071 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1072 workload_type = smu_cmn_to_asic_specific_index(smu,
1073 CMN2ASIC_MAPPING_WORKLOAD,
1074 profile_mode);
1075 if (workload_type < 0) {
1076 dev_dbg(smu->adev->dev, "Unsupported power profile mode %d on VANGOGH\n",
1077 profile_mode);
1078 return -EINVAL;
1079 }
1080
1081 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ActiveProcessNotify,
1082 1 << workload_type,
1083 NULL);
1084 if (ret) {
1085 dev_err_once(smu->adev->dev, "Fail to set workload type %d\n",
1086 workload_type);
1087 return ret;
1088 }
1089
1090 smu->power_profile_mode = profile_mode;
1091
1092 return 0;
1093 }
1094
vangogh_set_soft_freq_limited_range(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t min,uint32_t max)1095 static int vangogh_set_soft_freq_limited_range(struct smu_context *smu,
1096 enum smu_clk_type clk_type,
1097 uint32_t min,
1098 uint32_t max)
1099 {
1100 int ret = 0;
1101
1102 if (!vangogh_clk_dpm_is_enabled(smu, clk_type))
1103 return 0;
1104
1105 switch (clk_type) {
1106 case SMU_GFXCLK:
1107 case SMU_SCLK:
1108 ret = smu_cmn_send_smc_msg_with_param(smu,
1109 SMU_MSG_SetHardMinGfxClk,
1110 min, NULL);
1111 if (ret)
1112 return ret;
1113
1114 ret = smu_cmn_send_smc_msg_with_param(smu,
1115 SMU_MSG_SetSoftMaxGfxClk,
1116 max, NULL);
1117 if (ret)
1118 return ret;
1119 break;
1120 case SMU_FCLK:
1121 ret = smu_cmn_send_smc_msg_with_param(smu,
1122 SMU_MSG_SetHardMinFclkByFreq,
1123 min, NULL);
1124 if (ret)
1125 return ret;
1126
1127 ret = smu_cmn_send_smc_msg_with_param(smu,
1128 SMU_MSG_SetSoftMaxFclkByFreq,
1129 max, NULL);
1130 if (ret)
1131 return ret;
1132 break;
1133 case SMU_SOCCLK:
1134 ret = smu_cmn_send_smc_msg_with_param(smu,
1135 SMU_MSG_SetHardMinSocclkByFreq,
1136 min, NULL);
1137 if (ret)
1138 return ret;
1139
1140 ret = smu_cmn_send_smc_msg_with_param(smu,
1141 SMU_MSG_SetSoftMaxSocclkByFreq,
1142 max, NULL);
1143 if (ret)
1144 return ret;
1145 break;
1146 case SMU_VCLK:
1147 ret = smu_cmn_send_smc_msg_with_param(smu,
1148 SMU_MSG_SetHardMinVcn,
1149 min << 16, NULL);
1150 if (ret)
1151 return ret;
1152 ret = smu_cmn_send_smc_msg_with_param(smu,
1153 SMU_MSG_SetSoftMaxVcn,
1154 max << 16, NULL);
1155 if (ret)
1156 return ret;
1157 break;
1158 case SMU_DCLK:
1159 ret = smu_cmn_send_smc_msg_with_param(smu,
1160 SMU_MSG_SetHardMinVcn,
1161 min, NULL);
1162 if (ret)
1163 return ret;
1164 ret = smu_cmn_send_smc_msg_with_param(smu,
1165 SMU_MSG_SetSoftMaxVcn,
1166 max, NULL);
1167 if (ret)
1168 return ret;
1169 break;
1170 default:
1171 return -EINVAL;
1172 }
1173
1174 return ret;
1175 }
1176
vangogh_force_clk_levels(struct smu_context * smu,enum smu_clk_type clk_type,uint32_t mask)1177 static int vangogh_force_clk_levels(struct smu_context *smu,
1178 enum smu_clk_type clk_type, uint32_t mask)
1179 {
1180 uint32_t soft_min_level = 0, soft_max_level = 0;
1181 uint32_t min_freq = 0, max_freq = 0;
1182 int ret = 0 ;
1183
1184 soft_min_level = mask ? (ffs(mask) - 1) : 0;
1185 soft_max_level = mask ? (fls(mask) - 1) : 0;
1186
1187 switch (clk_type) {
1188 case SMU_SOCCLK:
1189 ret = vangogh_get_dpm_clk_limited(smu, clk_type,
1190 soft_min_level, &min_freq);
1191 if (ret)
1192 return ret;
1193 ret = vangogh_get_dpm_clk_limited(smu, clk_type,
1194 soft_max_level, &max_freq);
1195 if (ret)
1196 return ret;
1197 ret = smu_cmn_send_smc_msg_with_param(smu,
1198 SMU_MSG_SetSoftMaxSocclkByFreq,
1199 max_freq, NULL);
1200 if (ret)
1201 return ret;
1202 ret = smu_cmn_send_smc_msg_with_param(smu,
1203 SMU_MSG_SetHardMinSocclkByFreq,
1204 min_freq, NULL);
1205 if (ret)
1206 return ret;
1207 break;
1208 case SMU_FCLK:
1209 ret = vangogh_get_dpm_clk_limited(smu,
1210 clk_type, soft_min_level, &min_freq);
1211 if (ret)
1212 return ret;
1213 ret = vangogh_get_dpm_clk_limited(smu,
1214 clk_type, soft_max_level, &max_freq);
1215 if (ret)
1216 return ret;
1217 ret = smu_cmn_send_smc_msg_with_param(smu,
1218 SMU_MSG_SetSoftMaxFclkByFreq,
1219 max_freq, NULL);
1220 if (ret)
1221 return ret;
1222 ret = smu_cmn_send_smc_msg_with_param(smu,
1223 SMU_MSG_SetHardMinFclkByFreq,
1224 min_freq, NULL);
1225 if (ret)
1226 return ret;
1227 break;
1228 case SMU_VCLK:
1229 ret = vangogh_get_dpm_clk_limited(smu,
1230 clk_type, soft_min_level, &min_freq);
1231 if (ret)
1232 return ret;
1233
1234 ret = vangogh_get_dpm_clk_limited(smu,
1235 clk_type, soft_max_level, &max_freq);
1236 if (ret)
1237 return ret;
1238
1239
1240 ret = smu_cmn_send_smc_msg_with_param(smu,
1241 SMU_MSG_SetHardMinVcn,
1242 min_freq << 16, NULL);
1243 if (ret)
1244 return ret;
1245
1246 ret = smu_cmn_send_smc_msg_with_param(smu,
1247 SMU_MSG_SetSoftMaxVcn,
1248 max_freq << 16, NULL);
1249 if (ret)
1250 return ret;
1251
1252 break;
1253 case SMU_DCLK:
1254 ret = vangogh_get_dpm_clk_limited(smu,
1255 clk_type, soft_min_level, &min_freq);
1256 if (ret)
1257 return ret;
1258
1259 ret = vangogh_get_dpm_clk_limited(smu,
1260 clk_type, soft_max_level, &max_freq);
1261 if (ret)
1262 return ret;
1263
1264 ret = smu_cmn_send_smc_msg_with_param(smu,
1265 SMU_MSG_SetHardMinVcn,
1266 min_freq, NULL);
1267 if (ret)
1268 return ret;
1269
1270 ret = smu_cmn_send_smc_msg_with_param(smu,
1271 SMU_MSG_SetSoftMaxVcn,
1272 max_freq, NULL);
1273 if (ret)
1274 return ret;
1275
1276 break;
1277 default:
1278 break;
1279 }
1280
1281 return ret;
1282 }
1283
vangogh_force_dpm_limit_value(struct smu_context * smu,bool highest)1284 static int vangogh_force_dpm_limit_value(struct smu_context *smu, bool highest)
1285 {
1286 int ret = 0, i = 0;
1287 uint32_t min_freq, max_freq, force_freq;
1288 enum smu_clk_type clk_type;
1289
1290 enum smu_clk_type clks[] = {
1291 SMU_SOCCLK,
1292 SMU_VCLK,
1293 SMU_DCLK,
1294 SMU_FCLK,
1295 };
1296
1297 for (i = 0; i < ARRAY_SIZE(clks); i++) {
1298 clk_type = clks[i];
1299 ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
1300 if (ret)
1301 return ret;
1302
1303 force_freq = highest ? max_freq : min_freq;
1304 ret = vangogh_set_soft_freq_limited_range(smu, clk_type, force_freq, force_freq);
1305 if (ret)
1306 return ret;
1307 }
1308
1309 return ret;
1310 }
1311
vangogh_unforce_dpm_levels(struct smu_context * smu)1312 static int vangogh_unforce_dpm_levels(struct smu_context *smu)
1313 {
1314 int ret = 0, i = 0;
1315 uint32_t min_freq, max_freq;
1316 enum smu_clk_type clk_type;
1317
1318 struct clk_feature_map {
1319 enum smu_clk_type clk_type;
1320 uint32_t feature;
1321 } clk_feature_map[] = {
1322 {SMU_FCLK, SMU_FEATURE_DPM_FCLK_BIT},
1323 {SMU_SOCCLK, SMU_FEATURE_DPM_SOCCLK_BIT},
1324 {SMU_VCLK, SMU_FEATURE_VCN_DPM_BIT},
1325 {SMU_DCLK, SMU_FEATURE_VCN_DPM_BIT},
1326 };
1327
1328 for (i = 0; i < ARRAY_SIZE(clk_feature_map); i++) {
1329
1330 if (!smu_cmn_feature_is_enabled(smu, clk_feature_map[i].feature))
1331 continue;
1332
1333 clk_type = clk_feature_map[i].clk_type;
1334
1335 ret = vangogh_get_dpm_ultimate_freq(smu, clk_type, &min_freq, &max_freq);
1336
1337 if (ret)
1338 return ret;
1339
1340 ret = vangogh_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
1341
1342 if (ret)
1343 return ret;
1344 }
1345
1346 return ret;
1347 }
1348
vangogh_set_peak_clock_by_device(struct smu_context * smu)1349 static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
1350 {
1351 int ret = 0;
1352 uint32_t socclk_freq = 0, fclk_freq = 0;
1353 uint32_t vclk_freq = 0, dclk_freq = 0;
1354
1355 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_FCLK, NULL, &fclk_freq);
1356 if (ret)
1357 return ret;
1358
1359 ret = vangogh_set_soft_freq_limited_range(smu, SMU_FCLK, fclk_freq, fclk_freq);
1360 if (ret)
1361 return ret;
1362
1363 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_SOCCLK, NULL, &socclk_freq);
1364 if (ret)
1365 return ret;
1366
1367 ret = vangogh_set_soft_freq_limited_range(smu, SMU_SOCCLK, socclk_freq, socclk_freq);
1368 if (ret)
1369 return ret;
1370
1371 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_VCLK, NULL, &vclk_freq);
1372 if (ret)
1373 return ret;
1374
1375 ret = vangogh_set_soft_freq_limited_range(smu, SMU_VCLK, vclk_freq, vclk_freq);
1376 if (ret)
1377 return ret;
1378
1379 ret = vangogh_get_dpm_ultimate_freq(smu, SMU_DCLK, NULL, &dclk_freq);
1380 if (ret)
1381 return ret;
1382
1383 ret = vangogh_set_soft_freq_limited_range(smu, SMU_DCLK, dclk_freq, dclk_freq);
1384 if (ret)
1385 return ret;
1386
1387 return ret;
1388 }
1389
vangogh_set_performance_level(struct smu_context * smu,enum amd_dpm_forced_level level)1390 static int vangogh_set_performance_level(struct smu_context *smu,
1391 enum amd_dpm_forced_level level)
1392 {
1393 int ret = 0, i;
1394 uint32_t soc_mask, mclk_mask, fclk_mask;
1395 uint32_t vclk_mask = 0, dclk_mask = 0;
1396
1397 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
1398 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
1399
1400 switch (level) {
1401 case AMD_DPM_FORCED_LEVEL_HIGH:
1402 smu->gfx_actual_hard_min_freq = smu->gfx_default_soft_max_freq;
1403 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1404
1405
1406 ret = vangogh_force_dpm_limit_value(smu, true);
1407 if (ret)
1408 return ret;
1409 break;
1410 case AMD_DPM_FORCED_LEVEL_LOW:
1411 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1412 smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq;
1413
1414 ret = vangogh_force_dpm_limit_value(smu, false);
1415 if (ret)
1416 return ret;
1417 break;
1418 case AMD_DPM_FORCED_LEVEL_AUTO:
1419 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1420 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1421
1422 ret = vangogh_unforce_dpm_levels(smu);
1423 if (ret)
1424 return ret;
1425 break;
1426 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
1427 smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK;
1428 smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_STANDARD_GFXCLK;
1429
1430 ret = vangogh_get_profiling_clk_mask(smu, level,
1431 &vclk_mask,
1432 &dclk_mask,
1433 &mclk_mask,
1434 &fclk_mask,
1435 &soc_mask);
1436 if (ret)
1437 return ret;
1438
1439 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
1440 vangogh_force_clk_levels(smu, SMU_SOCCLK, 1 << soc_mask);
1441 vangogh_force_clk_levels(smu, SMU_VCLK, 1 << vclk_mask);
1442 vangogh_force_clk_levels(smu, SMU_DCLK, 1 << dclk_mask);
1443 break;
1444 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
1445 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1446 smu->gfx_actual_soft_max_freq = smu->gfx_default_hard_min_freq;
1447 break;
1448 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
1449 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
1450 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
1451
1452 ret = vangogh_get_profiling_clk_mask(smu, level,
1453 NULL,
1454 NULL,
1455 &mclk_mask,
1456 &fclk_mask,
1457 NULL);
1458 if (ret)
1459 return ret;
1460
1461 vangogh_force_clk_levels(smu, SMU_FCLK, 1 << fclk_mask);
1462 break;
1463 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
1464 smu->gfx_actual_hard_min_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK;
1465 smu->gfx_actual_soft_max_freq = VANGOGH_UMD_PSTATE_PEAK_GFXCLK;
1466
1467 ret = vangogh_set_peak_clock_by_device(smu);
1468 if (ret)
1469 return ret;
1470 break;
1471 case AMD_DPM_FORCED_LEVEL_MANUAL:
1472 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
1473 default:
1474 return 0;
1475 }
1476
1477 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
1478 smu->gfx_actual_hard_min_freq, NULL);
1479 if (ret)
1480 return ret;
1481
1482 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
1483 smu->gfx_actual_soft_max_freq, NULL);
1484 if (ret)
1485 return ret;
1486
1487 if (smu->adev->pm.fw_version >= 0x43f1b00) {
1488 for (i = 0; i < smu->cpu_core_num; i++) {
1489 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
1490 ((i << 20)
1491 | smu->cpu_actual_soft_min_freq),
1492 NULL);
1493 if (ret)
1494 return ret;
1495
1496 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
1497 ((i << 20)
1498 | smu->cpu_actual_soft_max_freq),
1499 NULL);
1500 if (ret)
1501 return ret;
1502 }
1503 }
1504
1505 return ret;
1506 }
1507
vangogh_read_sensor(struct smu_context * smu,enum amd_pp_sensors sensor,void * data,uint32_t * size)1508 static int vangogh_read_sensor(struct smu_context *smu,
1509 enum amd_pp_sensors sensor,
1510 void *data, uint32_t *size)
1511 {
1512 int ret = 0;
1513
1514 if (!data || !size)
1515 return -EINVAL;
1516
1517 switch (sensor) {
1518 case AMDGPU_PP_SENSOR_GPU_LOAD:
1519 ret = vangogh_common_get_smu_metrics_data(smu,
1520 METRICS_AVERAGE_GFXACTIVITY,
1521 (uint32_t *)data);
1522 *size = 4;
1523 break;
1524 case AMDGPU_PP_SENSOR_VCN_LOAD:
1525 ret = vangogh_common_get_smu_metrics_data(smu,
1526 METRICS_AVERAGE_VCNACTIVITY,
1527 (uint32_t *)data);
1528 *size = 4;
1529 break;
1530 case AMDGPU_PP_SENSOR_GPU_AVG_POWER:
1531 ret = vangogh_common_get_smu_metrics_data(smu,
1532 METRICS_AVERAGE_SOCKETPOWER,
1533 (uint32_t *)data);
1534 *size = 4;
1535 break;
1536 case AMDGPU_PP_SENSOR_GPU_INPUT_POWER:
1537 ret = vangogh_common_get_smu_metrics_data(smu,
1538 METRICS_CURR_SOCKETPOWER,
1539 (uint32_t *)data);
1540 *size = 4;
1541 break;
1542 case AMDGPU_PP_SENSOR_EDGE_TEMP:
1543 ret = vangogh_common_get_smu_metrics_data(smu,
1544 METRICS_TEMPERATURE_EDGE,
1545 (uint32_t *)data);
1546 *size = 4;
1547 break;
1548 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
1549 ret = vangogh_common_get_smu_metrics_data(smu,
1550 METRICS_TEMPERATURE_HOTSPOT,
1551 (uint32_t *)data);
1552 *size = 4;
1553 break;
1554 case AMDGPU_PP_SENSOR_GFX_MCLK:
1555 ret = vangogh_common_get_smu_metrics_data(smu,
1556 METRICS_CURR_UCLK,
1557 (uint32_t *)data);
1558 *(uint32_t *)data *= 100;
1559 *size = 4;
1560 break;
1561 case AMDGPU_PP_SENSOR_GFX_SCLK:
1562 ret = vangogh_common_get_smu_metrics_data(smu,
1563 METRICS_CURR_GFXCLK,
1564 (uint32_t *)data);
1565 *(uint32_t *)data *= 100;
1566 *size = 4;
1567 break;
1568 case AMDGPU_PP_SENSOR_VDDGFX:
1569 ret = vangogh_common_get_smu_metrics_data(smu,
1570 METRICS_VOLTAGE_VDDGFX,
1571 (uint32_t *)data);
1572 *size = 4;
1573 break;
1574 case AMDGPU_PP_SENSOR_VDDNB:
1575 ret = vangogh_common_get_smu_metrics_data(smu,
1576 METRICS_VOLTAGE_VDDSOC,
1577 (uint32_t *)data);
1578 *size = 4;
1579 break;
1580 case AMDGPU_PP_SENSOR_CPU_CLK:
1581 ret = vangogh_common_get_smu_metrics_data(smu,
1582 METRICS_AVERAGE_CPUCLK,
1583 (uint32_t *)data);
1584 *size = smu->cpu_core_num * sizeof(uint16_t);
1585 break;
1586 default:
1587 ret = -EOPNOTSUPP;
1588 break;
1589 }
1590
1591 return ret;
1592 }
1593
vangogh_get_apu_thermal_limit(struct smu_context * smu,uint32_t * limit)1594 static int vangogh_get_apu_thermal_limit(struct smu_context *smu, uint32_t *limit)
1595 {
1596 return smu_cmn_send_smc_msg_with_param(smu,
1597 SMU_MSG_GetThermalLimit,
1598 0, limit);
1599 }
1600
vangogh_set_apu_thermal_limit(struct smu_context * smu,uint32_t limit)1601 static int vangogh_set_apu_thermal_limit(struct smu_context *smu, uint32_t limit)
1602 {
1603 return smu_cmn_send_smc_msg_with_param(smu,
1604 SMU_MSG_SetReducedThermalLimit,
1605 limit, NULL);
1606 }
1607
1608
vangogh_set_watermarks_table(struct smu_context * smu,struct pp_smu_wm_range_sets * clock_ranges)1609 static int vangogh_set_watermarks_table(struct smu_context *smu,
1610 struct pp_smu_wm_range_sets *clock_ranges)
1611 {
1612 int i;
1613 int ret = 0;
1614 Watermarks_t *table = smu->smu_table.watermarks_table;
1615
1616 if (!table || !clock_ranges)
1617 return -EINVAL;
1618
1619 if (clock_ranges) {
1620 if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
1621 clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
1622 return -EINVAL;
1623
1624 for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
1625 table->WatermarkRow[WM_DCFCLK][i].MinClock =
1626 clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
1627 table->WatermarkRow[WM_DCFCLK][i].MaxClock =
1628 clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
1629 table->WatermarkRow[WM_DCFCLK][i].MinMclk =
1630 clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
1631 table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
1632 clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
1633
1634 table->WatermarkRow[WM_DCFCLK][i].WmSetting =
1635 clock_ranges->reader_wm_sets[i].wm_inst;
1636 }
1637
1638 for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
1639 table->WatermarkRow[WM_SOCCLK][i].MinClock =
1640 clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
1641 table->WatermarkRow[WM_SOCCLK][i].MaxClock =
1642 clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
1643 table->WatermarkRow[WM_SOCCLK][i].MinMclk =
1644 clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
1645 table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
1646 clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
1647
1648 table->WatermarkRow[WM_SOCCLK][i].WmSetting =
1649 clock_ranges->writer_wm_sets[i].wm_inst;
1650 }
1651
1652 smu->watermarks_bitmap |= WATERMARKS_EXIST;
1653 }
1654
1655 /* pass data to smu controller */
1656 if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
1657 !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
1658 ret = smu_cmn_write_watermarks_table(smu);
1659 if (ret) {
1660 dev_err(smu->adev->dev, "Failed to update WMTABLE!");
1661 return ret;
1662 }
1663 smu->watermarks_bitmap |= WATERMARKS_LOADED;
1664 }
1665
1666 return 0;
1667 }
1668
vangogh_get_legacy_gpu_metrics_v2_3(struct smu_context * smu,void ** table)1669 static ssize_t vangogh_get_legacy_gpu_metrics_v2_3(struct smu_context *smu,
1670 void **table)
1671 {
1672 struct smu_table_context *smu_table = &smu->smu_table;
1673 struct gpu_metrics_v2_3 *gpu_metrics =
1674 (struct gpu_metrics_v2_3 *)smu_table->gpu_metrics_table;
1675 SmuMetrics_legacy_t metrics;
1676 int ret = 0;
1677
1678 ret = smu_cmn_get_metrics_table(smu, &metrics, true);
1679 if (ret)
1680 return ret;
1681
1682 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 3);
1683
1684 gpu_metrics->temperature_gfx = metrics.GfxTemperature;
1685 gpu_metrics->temperature_soc = metrics.SocTemperature;
1686 memcpy(&gpu_metrics->temperature_core[0],
1687 &metrics.CoreTemperature[0],
1688 sizeof(uint16_t) * 4);
1689 gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0];
1690
1691 gpu_metrics->average_gfx_activity = metrics.GfxActivity;
1692 gpu_metrics->average_mm_activity = metrics.UvdActivity;
1693
1694 gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
1695 gpu_metrics->average_cpu_power = metrics.Power[0];
1696 gpu_metrics->average_soc_power = metrics.Power[1];
1697 gpu_metrics->average_gfx_power = metrics.Power[2];
1698 memcpy(&gpu_metrics->average_core_power[0],
1699 &metrics.CorePower[0],
1700 sizeof(uint16_t) * 4);
1701
1702 gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
1703 gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
1704 gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
1705 gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
1706 gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
1707 gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
1708
1709 memcpy(&gpu_metrics->current_coreclk[0],
1710 &metrics.CoreFrequency[0],
1711 sizeof(uint16_t) * 4);
1712 gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0];
1713
1714 gpu_metrics->throttle_status = metrics.ThrottlerStatus;
1715 gpu_metrics->indep_throttle_status =
1716 smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
1717 vangogh_throttler_map);
1718
1719 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1720
1721 *table = (void *)gpu_metrics;
1722
1723 return sizeof(struct gpu_metrics_v2_3);
1724 }
1725
vangogh_get_legacy_gpu_metrics(struct smu_context * smu,void ** table)1726 static ssize_t vangogh_get_legacy_gpu_metrics(struct smu_context *smu,
1727 void **table)
1728 {
1729 struct smu_table_context *smu_table = &smu->smu_table;
1730 struct gpu_metrics_v2_2 *gpu_metrics =
1731 (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
1732 SmuMetrics_legacy_t metrics;
1733 int ret = 0;
1734
1735 ret = smu_cmn_get_metrics_table(smu, &metrics, true);
1736 if (ret)
1737 return ret;
1738
1739 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
1740
1741 gpu_metrics->temperature_gfx = metrics.GfxTemperature;
1742 gpu_metrics->temperature_soc = metrics.SocTemperature;
1743 memcpy(&gpu_metrics->temperature_core[0],
1744 &metrics.CoreTemperature[0],
1745 sizeof(uint16_t) * 4);
1746 gpu_metrics->temperature_l3[0] = metrics.L3Temperature[0];
1747
1748 gpu_metrics->average_gfx_activity = metrics.GfxActivity;
1749 gpu_metrics->average_mm_activity = metrics.UvdActivity;
1750
1751 gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
1752 gpu_metrics->average_cpu_power = metrics.Power[0];
1753 gpu_metrics->average_soc_power = metrics.Power[1];
1754 gpu_metrics->average_gfx_power = metrics.Power[2];
1755 memcpy(&gpu_metrics->average_core_power[0],
1756 &metrics.CorePower[0],
1757 sizeof(uint16_t) * 4);
1758
1759 gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
1760 gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
1761 gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
1762 gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
1763 gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
1764 gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
1765
1766 memcpy(&gpu_metrics->current_coreclk[0],
1767 &metrics.CoreFrequency[0],
1768 sizeof(uint16_t) * 4);
1769 gpu_metrics->current_l3clk[0] = metrics.L3Frequency[0];
1770
1771 gpu_metrics->throttle_status = metrics.ThrottlerStatus;
1772 gpu_metrics->indep_throttle_status =
1773 smu_cmn_get_indep_throttler_status(metrics.ThrottlerStatus,
1774 vangogh_throttler_map);
1775
1776 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1777
1778 *table = (void *)gpu_metrics;
1779
1780 return sizeof(struct gpu_metrics_v2_2);
1781 }
1782
vangogh_get_gpu_metrics_v2_3(struct smu_context * smu,void ** table)1783 static ssize_t vangogh_get_gpu_metrics_v2_3(struct smu_context *smu,
1784 void **table)
1785 {
1786 struct smu_table_context *smu_table = &smu->smu_table;
1787 struct gpu_metrics_v2_3 *gpu_metrics =
1788 (struct gpu_metrics_v2_3 *)smu_table->gpu_metrics_table;
1789 SmuMetrics_t metrics;
1790 int ret = 0;
1791
1792 ret = smu_cmn_get_metrics_table(smu, &metrics, true);
1793 if (ret)
1794 return ret;
1795
1796 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 3);
1797
1798 gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
1799 gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
1800 memcpy(&gpu_metrics->temperature_core[0],
1801 &metrics.Current.CoreTemperature[0],
1802 sizeof(uint16_t) * 4);
1803 gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0];
1804
1805 gpu_metrics->average_temperature_gfx = metrics.Average.GfxTemperature;
1806 gpu_metrics->average_temperature_soc = metrics.Average.SocTemperature;
1807 memcpy(&gpu_metrics->average_temperature_core[0],
1808 &metrics.Average.CoreTemperature[0],
1809 sizeof(uint16_t) * 4);
1810 gpu_metrics->average_temperature_l3[0] = metrics.Average.L3Temperature[0];
1811
1812 gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity;
1813 gpu_metrics->average_mm_activity = metrics.Current.UvdActivity;
1814
1815 gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
1816 gpu_metrics->average_cpu_power = metrics.Current.Power[0];
1817 gpu_metrics->average_soc_power = metrics.Current.Power[1];
1818 gpu_metrics->average_gfx_power = metrics.Current.Power[2];
1819 memcpy(&gpu_metrics->average_core_power[0],
1820 &metrics.Average.CorePower[0],
1821 sizeof(uint16_t) * 4);
1822
1823 gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
1824 gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
1825 gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
1826 gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
1827 gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
1828 gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
1829
1830 gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
1831 gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
1832 gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
1833 gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
1834 gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
1835 gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
1836
1837 memcpy(&gpu_metrics->current_coreclk[0],
1838 &metrics.Current.CoreFrequency[0],
1839 sizeof(uint16_t) * 4);
1840 gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0];
1841
1842 gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
1843 gpu_metrics->indep_throttle_status =
1844 smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus,
1845 vangogh_throttler_map);
1846
1847 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1848
1849 *table = (void *)gpu_metrics;
1850
1851 return sizeof(struct gpu_metrics_v2_3);
1852 }
1853
vangogh_get_gpu_metrics_v2_4(struct smu_context * smu,void ** table)1854 static ssize_t vangogh_get_gpu_metrics_v2_4(struct smu_context *smu,
1855 void **table)
1856 {
1857 SmuMetrics_t metrics;
1858 struct smu_table_context *smu_table = &smu->smu_table;
1859 struct gpu_metrics_v2_4 *gpu_metrics =
1860 (struct gpu_metrics_v2_4 *)smu_table->gpu_metrics_table;
1861 int ret = 0;
1862
1863 ret = smu_cmn_get_metrics_table(smu, &metrics, true);
1864 if (ret)
1865 return ret;
1866
1867 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 4);
1868
1869 gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
1870 gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
1871 memcpy(&gpu_metrics->temperature_core[0],
1872 &metrics.Current.CoreTemperature[0],
1873 sizeof(uint16_t) * 4);
1874 gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0];
1875
1876 gpu_metrics->average_temperature_gfx = metrics.Average.GfxTemperature;
1877 gpu_metrics->average_temperature_soc = metrics.Average.SocTemperature;
1878 memcpy(&gpu_metrics->average_temperature_core[0],
1879 &metrics.Average.CoreTemperature[0],
1880 sizeof(uint16_t) * 4);
1881 gpu_metrics->average_temperature_l3[0] = metrics.Average.L3Temperature[0];
1882
1883 gpu_metrics->average_gfx_activity = metrics.Average.GfxActivity;
1884 gpu_metrics->average_mm_activity = metrics.Average.UvdActivity;
1885
1886 gpu_metrics->average_socket_power = metrics.Average.CurrentSocketPower;
1887 gpu_metrics->average_cpu_power = metrics.Average.Power[0];
1888 gpu_metrics->average_soc_power = metrics.Average.Power[1];
1889 gpu_metrics->average_gfx_power = metrics.Average.Power[2];
1890
1891 gpu_metrics->average_cpu_voltage = metrics.Average.Voltage[0];
1892 gpu_metrics->average_soc_voltage = metrics.Average.Voltage[1];
1893 gpu_metrics->average_gfx_voltage = metrics.Average.Voltage[2];
1894
1895 gpu_metrics->average_cpu_current = metrics.Average.Current[0];
1896 gpu_metrics->average_soc_current = metrics.Average.Current[1];
1897 gpu_metrics->average_gfx_current = metrics.Average.Current[2];
1898
1899 memcpy(&gpu_metrics->average_core_power[0],
1900 &metrics.Average.CorePower[0],
1901 sizeof(uint16_t) * 4);
1902
1903 gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
1904 gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
1905 gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
1906 gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
1907 gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
1908 gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
1909
1910 gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
1911 gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
1912 gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
1913 gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
1914 gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
1915 gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
1916
1917 memcpy(&gpu_metrics->current_coreclk[0],
1918 &metrics.Current.CoreFrequency[0],
1919 sizeof(uint16_t) * 4);
1920 gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0];
1921
1922 gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
1923 gpu_metrics->indep_throttle_status =
1924 smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus,
1925 vangogh_throttler_map);
1926
1927 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1928
1929 *table = (void *)gpu_metrics;
1930
1931 return sizeof(struct gpu_metrics_v2_4);
1932 }
1933
vangogh_get_gpu_metrics(struct smu_context * smu,void ** table)1934 static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu,
1935 void **table)
1936 {
1937 struct smu_table_context *smu_table = &smu->smu_table;
1938 struct gpu_metrics_v2_2 *gpu_metrics =
1939 (struct gpu_metrics_v2_2 *)smu_table->gpu_metrics_table;
1940 SmuMetrics_t metrics;
1941 int ret = 0;
1942
1943 ret = smu_cmn_get_metrics_table(smu, &metrics, true);
1944 if (ret)
1945 return ret;
1946
1947 smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 2);
1948
1949 gpu_metrics->temperature_gfx = metrics.Current.GfxTemperature;
1950 gpu_metrics->temperature_soc = metrics.Current.SocTemperature;
1951 memcpy(&gpu_metrics->temperature_core[0],
1952 &metrics.Current.CoreTemperature[0],
1953 sizeof(uint16_t) * 4);
1954 gpu_metrics->temperature_l3[0] = metrics.Current.L3Temperature[0];
1955
1956 gpu_metrics->average_gfx_activity = metrics.Current.GfxActivity;
1957 gpu_metrics->average_mm_activity = metrics.Current.UvdActivity;
1958
1959 gpu_metrics->average_socket_power = metrics.Current.CurrentSocketPower;
1960 gpu_metrics->average_cpu_power = metrics.Current.Power[0];
1961 gpu_metrics->average_soc_power = metrics.Current.Power[1];
1962 gpu_metrics->average_gfx_power = metrics.Current.Power[2];
1963 memcpy(&gpu_metrics->average_core_power[0],
1964 &metrics.Average.CorePower[0],
1965 sizeof(uint16_t) * 4);
1966
1967 gpu_metrics->average_gfxclk_frequency = metrics.Average.GfxclkFrequency;
1968 gpu_metrics->average_socclk_frequency = metrics.Average.SocclkFrequency;
1969 gpu_metrics->average_uclk_frequency = metrics.Average.MemclkFrequency;
1970 gpu_metrics->average_fclk_frequency = metrics.Average.MemclkFrequency;
1971 gpu_metrics->average_vclk_frequency = metrics.Average.VclkFrequency;
1972 gpu_metrics->average_dclk_frequency = metrics.Average.DclkFrequency;
1973
1974 gpu_metrics->current_gfxclk = metrics.Current.GfxclkFrequency;
1975 gpu_metrics->current_socclk = metrics.Current.SocclkFrequency;
1976 gpu_metrics->current_uclk = metrics.Current.MemclkFrequency;
1977 gpu_metrics->current_fclk = metrics.Current.MemclkFrequency;
1978 gpu_metrics->current_vclk = metrics.Current.VclkFrequency;
1979 gpu_metrics->current_dclk = metrics.Current.DclkFrequency;
1980
1981 memcpy(&gpu_metrics->current_coreclk[0],
1982 &metrics.Current.CoreFrequency[0],
1983 sizeof(uint16_t) * 4);
1984 gpu_metrics->current_l3clk[0] = metrics.Current.L3Frequency[0];
1985
1986 gpu_metrics->throttle_status = metrics.Current.ThrottlerStatus;
1987 gpu_metrics->indep_throttle_status =
1988 smu_cmn_get_indep_throttler_status(metrics.Current.ThrottlerStatus,
1989 vangogh_throttler_map);
1990
1991 gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
1992
1993 *table = (void *)gpu_metrics;
1994
1995 return sizeof(struct gpu_metrics_v2_2);
1996 }
1997
vangogh_common_get_gpu_metrics(struct smu_context * smu,void ** table)1998 static ssize_t vangogh_common_get_gpu_metrics(struct smu_context *smu,
1999 void **table)
2000 {
2001 uint32_t smu_program;
2002 uint32_t fw_version;
2003 int ret = 0;
2004
2005 smu_program = (smu->smc_fw_version >> 24) & 0xff;
2006 fw_version = smu->smc_fw_version & 0xffffff;
2007 if (smu_program == 6) {
2008 if (fw_version >= 0x3F0800)
2009 ret = vangogh_get_gpu_metrics_v2_4(smu, table);
2010 else
2011 ret = vangogh_get_gpu_metrics_v2_3(smu, table);
2012
2013 } else {
2014 if (smu->smc_fw_version >= 0x043F3E00) {
2015 if (smu->smc_fw_if_version < 0x3)
2016 ret = vangogh_get_legacy_gpu_metrics_v2_3(smu, table);
2017 else
2018 ret = vangogh_get_gpu_metrics_v2_3(smu, table);
2019 } else {
2020 if (smu->smc_fw_if_version < 0x3)
2021 ret = vangogh_get_legacy_gpu_metrics(smu, table);
2022 else
2023 ret = vangogh_get_gpu_metrics(smu, table);
2024 }
2025 }
2026
2027 return ret;
2028 }
2029
vangogh_od_edit_dpm_table(struct smu_context * smu,enum PP_OD_DPM_TABLE_COMMAND type,long input[],uint32_t size)2030 static int vangogh_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
2031 long input[], uint32_t size)
2032 {
2033 int ret = 0;
2034 struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
2035
2036 if (!(smu_dpm_ctx->dpm_level == AMD_DPM_FORCED_LEVEL_MANUAL)) {
2037 dev_warn(smu->adev->dev,
2038 "pp_od_clk_voltage is not accessible if power_dpm_force_performance_level is not in manual mode!\n");
2039 return -EINVAL;
2040 }
2041
2042 switch (type) {
2043 case PP_OD_EDIT_CCLK_VDDC_TABLE:
2044 if (size != 3) {
2045 dev_err(smu->adev->dev, "Input parameter number not correct (should be 4 for processor)\n");
2046 return -EINVAL;
2047 }
2048 if (input[0] >= smu->cpu_core_num) {
2049 dev_err(smu->adev->dev, "core index is overflow, should be less than %d\n",
2050 smu->cpu_core_num);
2051 }
2052 smu->cpu_core_id_select = input[0];
2053 if (input[1] == 0) {
2054 if (input[2] < smu->cpu_default_soft_min_freq) {
2055 dev_warn(smu->adev->dev, "Fine grain setting minimum cclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
2056 input[2], smu->cpu_default_soft_min_freq);
2057 return -EINVAL;
2058 }
2059 smu->cpu_actual_soft_min_freq = input[2];
2060 } else if (input[1] == 1) {
2061 if (input[2] > smu->cpu_default_soft_max_freq) {
2062 dev_warn(smu->adev->dev, "Fine grain setting maximum cclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
2063 input[2], smu->cpu_default_soft_max_freq);
2064 return -EINVAL;
2065 }
2066 smu->cpu_actual_soft_max_freq = input[2];
2067 } else {
2068 return -EINVAL;
2069 }
2070 break;
2071 case PP_OD_EDIT_SCLK_VDDC_TABLE:
2072 if (size != 2) {
2073 dev_err(smu->adev->dev, "Input parameter number not correct\n");
2074 return -EINVAL;
2075 }
2076
2077 if (input[0] == 0) {
2078 if (input[1] < smu->gfx_default_hard_min_freq) {
2079 dev_warn(smu->adev->dev,
2080 "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
2081 input[1], smu->gfx_default_hard_min_freq);
2082 return -EINVAL;
2083 }
2084 smu->gfx_actual_hard_min_freq = input[1];
2085 } else if (input[0] == 1) {
2086 if (input[1] > smu->gfx_default_soft_max_freq) {
2087 dev_warn(smu->adev->dev,
2088 "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
2089 input[1], smu->gfx_default_soft_max_freq);
2090 return -EINVAL;
2091 }
2092 smu->gfx_actual_soft_max_freq = input[1];
2093 } else {
2094 return -EINVAL;
2095 }
2096 break;
2097 case PP_OD_RESTORE_DEFAULT_TABLE:
2098 if (size != 0) {
2099 dev_err(smu->adev->dev, "Input parameter number not correct\n");
2100 return -EINVAL;
2101 } else {
2102 smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
2103 smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
2104 smu->cpu_actual_soft_min_freq = smu->cpu_default_soft_min_freq;
2105 smu->cpu_actual_soft_max_freq = smu->cpu_default_soft_max_freq;
2106 }
2107 break;
2108 case PP_OD_COMMIT_DPM_TABLE:
2109 if (size != 0) {
2110 dev_err(smu->adev->dev, "Input parameter number not correct\n");
2111 return -EINVAL;
2112 } else {
2113 if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
2114 dev_err(smu->adev->dev,
2115 "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
2116 smu->gfx_actual_hard_min_freq,
2117 smu->gfx_actual_soft_max_freq);
2118 return -EINVAL;
2119 }
2120
2121 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
2122 smu->gfx_actual_hard_min_freq, NULL);
2123 if (ret) {
2124 dev_err(smu->adev->dev, "Set hard min sclk failed!");
2125 return ret;
2126 }
2127
2128 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
2129 smu->gfx_actual_soft_max_freq, NULL);
2130 if (ret) {
2131 dev_err(smu->adev->dev, "Set soft max sclk failed!");
2132 return ret;
2133 }
2134
2135 if (smu->adev->pm.fw_version < 0x43f1b00) {
2136 dev_warn(smu->adev->dev, "CPUSoftMax/CPUSoftMin are not supported, please update SBIOS!\n");
2137 break;
2138 }
2139
2140 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
2141 ((smu->cpu_core_id_select << 20)
2142 | smu->cpu_actual_soft_min_freq),
2143 NULL);
2144 if (ret) {
2145 dev_err(smu->adev->dev, "Set hard min cclk failed!");
2146 return ret;
2147 }
2148
2149 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
2150 ((smu->cpu_core_id_select << 20)
2151 | smu->cpu_actual_soft_max_freq),
2152 NULL);
2153 if (ret) {
2154 dev_err(smu->adev->dev, "Set soft max cclk failed!");
2155 return ret;
2156 }
2157 }
2158 break;
2159 default:
2160 return -ENOSYS;
2161 }
2162
2163 return ret;
2164 }
2165
vangogh_set_default_dpm_tables(struct smu_context * smu)2166 static int vangogh_set_default_dpm_tables(struct smu_context *smu)
2167 {
2168 struct smu_table_context *smu_table = &smu->smu_table;
2169
2170 return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
2171 }
2172
vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context * smu)2173 static int vangogh_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
2174 {
2175 DpmClocks_t *clk_table = smu->smu_table.clocks_table;
2176
2177 smu->gfx_default_hard_min_freq = clk_table->MinGfxClk;
2178 smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk;
2179 smu->gfx_actual_hard_min_freq = 0;
2180 smu->gfx_actual_soft_max_freq = 0;
2181
2182 smu->cpu_default_soft_min_freq = 1400;
2183 smu->cpu_default_soft_max_freq = 3500;
2184 smu->cpu_actual_soft_min_freq = 0;
2185 smu->cpu_actual_soft_max_freq = 0;
2186
2187 return 0;
2188 }
2189
vangogh_get_dpm_clock_table(struct smu_context * smu,struct dpm_clocks * clock_table)2190 static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clocks *clock_table)
2191 {
2192 DpmClocks_t *table = smu->smu_table.clocks_table;
2193 int i;
2194
2195 if (!clock_table || !table)
2196 return -EINVAL;
2197
2198 for (i = 0; i < NUM_SOCCLK_DPM_LEVELS; i++) {
2199 clock_table->SocClocks[i].Freq = table->SocClocks[i];
2200 clock_table->SocClocks[i].Vol = table->SocVoltage[i];
2201 }
2202
2203 for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
2204 clock_table->FClocks[i].Freq = table->DfPstateTable[i].fclk;
2205 clock_table->FClocks[i].Vol = table->DfPstateTable[i].voltage;
2206 }
2207
2208 for (i = 0; i < NUM_FCLK_DPM_LEVELS; i++) {
2209 clock_table->MemClocks[i].Freq = table->DfPstateTable[i].memclk;
2210 clock_table->MemClocks[i].Vol = table->DfPstateTable[i].voltage;
2211 }
2212
2213 return 0;
2214 }
2215
vangogh_notify_rlc_state(struct smu_context * smu,bool en)2216 static int vangogh_notify_rlc_state(struct smu_context *smu, bool en)
2217 {
2218 struct amdgpu_device *adev = smu->adev;
2219 int ret = 0;
2220
2221 if (adev->pm.fw_version >= 0x43f1700 && !en)
2222 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify,
2223 RLC_STATUS_OFF, NULL);
2224
2225 return ret;
2226 }
2227
vangogh_post_smu_init(struct smu_context * smu)2228 static int vangogh_post_smu_init(struct smu_context *smu)
2229 {
2230 struct amdgpu_device *adev = smu->adev;
2231 uint32_t tmp;
2232 int ret = 0;
2233 uint8_t aon_bits = 0;
2234 /* Two CUs in one WGP */
2235 uint32_t req_active_wgps = adev->gfx.cu_info.number/2;
2236 uint32_t total_cu = adev->gfx.config.max_cu_per_sh *
2237 adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
2238
2239 /* allow message will be sent after enable message on Vangogh*/
2240 if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
2241 (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
2242 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
2243 if (ret) {
2244 dev_err(adev->dev, "Failed to Enable GfxOff!\n");
2245 return ret;
2246 }
2247 } else {
2248 adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
2249 dev_info(adev->dev, "If GFX DPM or power gate disabled, disable GFXOFF\n");
2250 }
2251
2252 /* if all CUs are active, no need to power off any WGPs */
2253 if (total_cu == adev->gfx.cu_info.number)
2254 return 0;
2255
2256 /*
2257 * Calculate the total bits number of always on WGPs for all SA/SEs in
2258 * RLC_PG_ALWAYS_ON_WGP_MASK.
2259 */
2260 tmp = RREG32_KIQ(SOC15_REG_OFFSET(GC, 0, mmRLC_PG_ALWAYS_ON_WGP_MASK));
2261 tmp &= RLC_PG_ALWAYS_ON_WGP_MASK__AON_WGP_MASK_MASK;
2262
2263 aon_bits = hweight32(tmp) * adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
2264
2265 /* Do not request any WGPs less than set in the AON_WGP_MASK */
2266 if (aon_bits > req_active_wgps) {
2267 dev_info(adev->dev, "Number of always on WGPs greater than active WGPs: WGP power save not requested.\n");
2268 return 0;
2269 } else {
2270 return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RequestActiveWgp, req_active_wgps, NULL);
2271 }
2272 }
2273
vangogh_mode_reset(struct smu_context * smu,int type)2274 static int vangogh_mode_reset(struct smu_context *smu, int type)
2275 {
2276 int ret = 0, index = 0;
2277
2278 index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
2279 SMU_MSG_GfxDeviceDriverReset);
2280 if (index < 0)
2281 return index == -EACCES ? 0 : index;
2282
2283 mutex_lock(&smu->message_lock);
2284
2285 ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type);
2286
2287 mutex_unlock(&smu->message_lock);
2288
2289 mdelay(10);
2290
2291 return ret;
2292 }
2293
vangogh_mode2_reset(struct smu_context * smu)2294 static int vangogh_mode2_reset(struct smu_context *smu)
2295 {
2296 return vangogh_mode_reset(smu, SMU_RESET_MODE_2);
2297 }
2298
2299 /**
2300 * vangogh_get_gfxoff_status - Get gfxoff status
2301 *
2302 * @smu: amdgpu_device pointer
2303 *
2304 * Get current gfxoff status
2305 *
2306 * Return:
2307 * * 0 - GFXOFF (default if enabled).
2308 * * 1 - Transition out of GFX State.
2309 * * 2 - Not in GFXOFF.
2310 * * 3 - Transition into GFXOFF.
2311 */
vangogh_get_gfxoff_status(struct smu_context * smu)2312 static u32 vangogh_get_gfxoff_status(struct smu_context *smu)
2313 {
2314 struct amdgpu_device *adev = smu->adev;
2315 u32 reg, gfxoff_status;
2316
2317 reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL);
2318 gfxoff_status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK)
2319 >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT;
2320
2321 return gfxoff_status;
2322 }
2323
vangogh_get_power_limit(struct smu_context * smu,uint32_t * current_power_limit,uint32_t * default_power_limit,uint32_t * max_power_limit,uint32_t * min_power_limit)2324 static int vangogh_get_power_limit(struct smu_context *smu,
2325 uint32_t *current_power_limit,
2326 uint32_t *default_power_limit,
2327 uint32_t *max_power_limit,
2328 uint32_t *min_power_limit)
2329 {
2330 struct smu_11_5_power_context *power_context =
2331 smu->smu_power.power_context;
2332 uint32_t ppt_limit;
2333 int ret = 0;
2334
2335 if (smu->adev->pm.fw_version < 0x43f1e00)
2336 return ret;
2337
2338 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetSlowPPTLimit, &ppt_limit);
2339 if (ret) {
2340 dev_err(smu->adev->dev, "Get slow PPT limit failed!\n");
2341 return ret;
2342 }
2343 /* convert from milliwatt to watt */
2344 if (current_power_limit)
2345 *current_power_limit = ppt_limit / 1000;
2346 if (default_power_limit)
2347 *default_power_limit = ppt_limit / 1000;
2348 if (max_power_limit)
2349 *max_power_limit = 29;
2350 if (min_power_limit)
2351 *min_power_limit = 0;
2352
2353 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetFastPPTLimit, &ppt_limit);
2354 if (ret) {
2355 dev_err(smu->adev->dev, "Get fast PPT limit failed!\n");
2356 return ret;
2357 }
2358 /* convert from milliwatt to watt */
2359 power_context->current_fast_ppt_limit =
2360 power_context->default_fast_ppt_limit = ppt_limit / 1000;
2361 power_context->max_fast_ppt_limit = 30;
2362
2363 return ret;
2364 }
2365
vangogh_get_ppt_limit(struct smu_context * smu,uint32_t * ppt_limit,enum smu_ppt_limit_type type,enum smu_ppt_limit_level level)2366 static int vangogh_get_ppt_limit(struct smu_context *smu,
2367 uint32_t *ppt_limit,
2368 enum smu_ppt_limit_type type,
2369 enum smu_ppt_limit_level level)
2370 {
2371 struct smu_11_5_power_context *power_context =
2372 smu->smu_power.power_context;
2373
2374 if (!power_context)
2375 return -EOPNOTSUPP;
2376
2377 if (type == SMU_FAST_PPT_LIMIT) {
2378 switch (level) {
2379 case SMU_PPT_LIMIT_MAX:
2380 *ppt_limit = power_context->max_fast_ppt_limit;
2381 break;
2382 case SMU_PPT_LIMIT_CURRENT:
2383 *ppt_limit = power_context->current_fast_ppt_limit;
2384 break;
2385 case SMU_PPT_LIMIT_DEFAULT:
2386 *ppt_limit = power_context->default_fast_ppt_limit;
2387 break;
2388 default:
2389 break;
2390 }
2391 }
2392
2393 return 0;
2394 }
2395
vangogh_set_power_limit(struct smu_context * smu,enum smu_ppt_limit_type limit_type,uint32_t ppt_limit)2396 static int vangogh_set_power_limit(struct smu_context *smu,
2397 enum smu_ppt_limit_type limit_type,
2398 uint32_t ppt_limit)
2399 {
2400 struct smu_11_5_power_context *power_context =
2401 smu->smu_power.power_context;
2402 int ret = 0;
2403
2404 if (!smu_cmn_feature_is_enabled(smu, SMU_FEATURE_PPT_BIT)) {
2405 dev_err(smu->adev->dev, "Setting new power limit is not supported!\n");
2406 return -EOPNOTSUPP;
2407 }
2408
2409 switch (limit_type) {
2410 case SMU_DEFAULT_PPT_LIMIT:
2411 ret = smu_cmn_send_smc_msg_with_param(smu,
2412 SMU_MSG_SetSlowPPTLimit,
2413 ppt_limit * 1000, /* convert from watt to milliwatt */
2414 NULL);
2415 if (ret)
2416 return ret;
2417
2418 smu->current_power_limit = ppt_limit;
2419 break;
2420 case SMU_FAST_PPT_LIMIT:
2421 ppt_limit &= ~(SMU_FAST_PPT_LIMIT << 24);
2422 if (ppt_limit > power_context->max_fast_ppt_limit) {
2423 dev_err(smu->adev->dev,
2424 "New power limit (%d) is over the max allowed %d\n",
2425 ppt_limit, power_context->max_fast_ppt_limit);
2426 return ret;
2427 }
2428
2429 ret = smu_cmn_send_smc_msg_with_param(smu,
2430 SMU_MSG_SetFastPPTLimit,
2431 ppt_limit * 1000, /* convert from watt to milliwatt */
2432 NULL);
2433 if (ret)
2434 return ret;
2435
2436 power_context->current_fast_ppt_limit = ppt_limit;
2437 break;
2438 default:
2439 return -EINVAL;
2440 }
2441
2442 return ret;
2443 }
2444
2445 /**
2446 * vangogh_set_gfxoff_residency
2447 *
2448 * @smu: amdgpu_device pointer
2449 * @start: start/stop residency log
2450 *
2451 * This function will be used to log gfxoff residency
2452 *
2453 *
2454 * Returns standard response codes.
2455 */
vangogh_set_gfxoff_residency(struct smu_context * smu,bool start)2456 static u32 vangogh_set_gfxoff_residency(struct smu_context *smu, bool start)
2457 {
2458 int ret = 0;
2459 u32 residency;
2460 struct amdgpu_device *adev = smu->adev;
2461
2462 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
2463 return 0;
2464
2465 ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_LogGfxOffResidency,
2466 start, &residency);
2467 if (ret)
2468 return ret;
2469
2470 if (!start)
2471 adev->gfx.gfx_off_residency = residency;
2472
2473 return ret;
2474 }
2475
2476 /**
2477 * vangogh_get_gfxoff_residency
2478 *
2479 * @smu: amdgpu_device pointer
2480 * @residency: placeholder for return value
2481 *
2482 * This function will be used to get gfxoff residency.
2483 *
2484 * Returns standard response codes.
2485 */
vangogh_get_gfxoff_residency(struct smu_context * smu,uint32_t * residency)2486 static u32 vangogh_get_gfxoff_residency(struct smu_context *smu, uint32_t *residency)
2487 {
2488 struct amdgpu_device *adev = smu->adev;
2489
2490 *residency = adev->gfx.gfx_off_residency;
2491
2492 return 0;
2493 }
2494
2495 /**
2496 * vangogh_get_gfxoff_entrycount - get gfxoff entry count
2497 *
2498 * @smu: amdgpu_device pointer
2499 * @entrycount: placeholder for return value
2500 *
2501 * This function will be used to get gfxoff entry count
2502 *
2503 * Returns standard response codes.
2504 */
vangogh_get_gfxoff_entrycount(struct smu_context * smu,uint64_t * entrycount)2505 static u32 vangogh_get_gfxoff_entrycount(struct smu_context *smu, uint64_t *entrycount)
2506 {
2507 int ret = 0, value = 0;
2508 struct amdgpu_device *adev = smu->adev;
2509
2510 if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
2511 return 0;
2512
2513 ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetGfxOffEntryCount, &value);
2514 *entrycount = value + adev->gfx.gfx_off_entrycount;
2515
2516 return ret;
2517 }
2518
2519 static const struct pptable_funcs vangogh_ppt_funcs = {
2520
2521 .check_fw_status = smu_v11_0_check_fw_status,
2522 .check_fw_version = smu_v11_0_check_fw_version,
2523 .init_smc_tables = vangogh_init_smc_tables,
2524 .fini_smc_tables = smu_v11_0_fini_smc_tables,
2525 .init_power = smu_v11_0_init_power,
2526 .fini_power = smu_v11_0_fini_power,
2527 .register_irq_handler = smu_v11_0_register_irq_handler,
2528 .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
2529 .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
2530 .send_smc_msg = smu_cmn_send_smc_msg,
2531 .dpm_set_vcn_enable = vangogh_dpm_set_vcn_enable,
2532 .dpm_set_jpeg_enable = vangogh_dpm_set_jpeg_enable,
2533 .is_dpm_running = vangogh_is_dpm_running,
2534 .read_sensor = vangogh_read_sensor,
2535 .get_apu_thermal_limit = vangogh_get_apu_thermal_limit,
2536 .set_apu_thermal_limit = vangogh_set_apu_thermal_limit,
2537 .get_enabled_mask = smu_cmn_get_enabled_mask,
2538 .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
2539 .set_watermarks_table = vangogh_set_watermarks_table,
2540 .set_driver_table_location = smu_v11_0_set_driver_table_location,
2541 .interrupt_work = smu_v11_0_interrupt_work,
2542 .get_gpu_metrics = vangogh_common_get_gpu_metrics,
2543 .od_edit_dpm_table = vangogh_od_edit_dpm_table,
2544 .print_clk_levels = vangogh_common_print_clk_levels,
2545 .set_default_dpm_table = vangogh_set_default_dpm_tables,
2546 .set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters,
2547 .notify_rlc_state = vangogh_notify_rlc_state,
2548 .feature_is_enabled = smu_cmn_feature_is_enabled,
2549 .set_power_profile_mode = vangogh_set_power_profile_mode,
2550 .get_power_profile_mode = vangogh_get_power_profile_mode,
2551 .get_dpm_clock_table = vangogh_get_dpm_clock_table,
2552 .force_clk_levels = vangogh_force_clk_levels,
2553 .set_performance_level = vangogh_set_performance_level,
2554 .post_init = vangogh_post_smu_init,
2555 .mode2_reset = vangogh_mode2_reset,
2556 .gfx_off_control = smu_v11_0_gfx_off_control,
2557 .get_gfx_off_status = vangogh_get_gfxoff_status,
2558 .get_gfx_off_entrycount = vangogh_get_gfxoff_entrycount,
2559 .get_gfx_off_residency = vangogh_get_gfxoff_residency,
2560 .set_gfx_off_residency = vangogh_set_gfxoff_residency,
2561 .get_ppt_limit = vangogh_get_ppt_limit,
2562 .get_power_limit = vangogh_get_power_limit,
2563 .set_power_limit = vangogh_set_power_limit,
2564 .get_vbios_bootup_values = smu_v11_0_get_vbios_bootup_values,
2565 };
2566
vangogh_set_ppt_funcs(struct smu_context * smu)2567 void vangogh_set_ppt_funcs(struct smu_context *smu)
2568 {
2569 smu->ppt_funcs = &vangogh_ppt_funcs;
2570 smu->message_map = vangogh_message_map;
2571 smu->feature_map = vangogh_feature_mask_map;
2572 smu->table_map = vangogh_table_map;
2573 smu->workload_map = vangogh_workload_map;
2574 smu->is_apu = true;
2575 smu_v11_0_set_smu_mailbox_registers(smu);
2576 }
2577