2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/delay.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
30 #include "amd_powerplay.h"
31 #include "vega10_smumgr.h"
32 #include "hardwaremanager.h"
33 #include "ppatomfwctrl.h"
34 #include "atomfirmware.h"
35 #include "cgs_common.h"
36 #include "vega10_powertune.h"
38 #include "smu9_driver_if.h"
39 #include "vega10_inc.h"
41 #include "pppcielanes.h"
42 #include "vega10_hwmgr.h"
43 #include "vega10_processpptables.h"
44 #include "vega10_pptable.h"
45 #include "vega10_thermal.h"
48 #include "amd_pcie_helpers.h"
49 #include "cgs_linux.h"
50 #include "ppinterrupt.h"
51 #include "pp_overdriver.h"
53 #define VOLTAGE_SCALE 4
54 #define VOLTAGE_VID_OFFSET_SCALE1 625
55 #define VOLTAGE_VID_OFFSET_SCALE2 100
57 #define HBM_MEMORY_CHANNEL_WIDTH 128
59 uint32_t channel_number[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
61 #define MEM_FREQ_LOW_LATENCY 25000
62 #define MEM_FREQ_HIGH_LATENCY 80000
63 #define MEM_LATENCY_HIGH 245
64 #define MEM_LATENCY_LOW 35
65 #define MEM_LATENCY_ERR 0xFFFF
67 #define mmDF_CS_AON0_DramBaseAddress0 0x0044
68 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
70 //DF_CS_AON0_DramBaseAddress0
71 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
72 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
73 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
74 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
75 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
76 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
77 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
78 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
79 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
80 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
81 static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
82 enum pp_clock_type type, uint32_t mask);
84 const ULONG PhwVega10_Magic = (ULONG)(PHM_VIslands_Magic);
86 struct vega10_power_state *cast_phw_vega10_power_state(
87 struct pp_hw_power_state *hw_ps)
89 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
90 "Invalid Powerstate Type!",
93 return (struct vega10_power_state *)hw_ps;
96 const struct vega10_power_state *cast_const_phw_vega10_power_state(
97 const struct pp_hw_power_state *hw_ps)
99 PP_ASSERT_WITH_CODE((PhwVega10_Magic == hw_ps->magic),
100 "Invalid Powerstate Type!",
103 return (const struct vega10_power_state *)hw_ps;
106 static void vega10_set_default_registry_data(struct pp_hwmgr *hwmgr)
108 struct vega10_hwmgr *data =
109 (struct vega10_hwmgr *)(hwmgr->backend);
111 data->registry_data.sclk_dpm_key_disabled =
112 hwmgr->feature_mask & PP_SCLK_DPM_MASK ? false : true;
113 data->registry_data.socclk_dpm_key_disabled =
114 hwmgr->feature_mask & PP_SOCCLK_DPM_MASK ? false : true;
115 data->registry_data.mclk_dpm_key_disabled =
116 hwmgr->feature_mask & PP_MCLK_DPM_MASK ? false : true;
117 data->registry_data.pcie_dpm_key_disabled =
118 hwmgr->feature_mask & PP_PCIE_DPM_MASK ? false : true;
120 data->registry_data.dcefclk_dpm_key_disabled =
121 hwmgr->feature_mask & PP_DCEFCLK_DPM_MASK ? false : true;
123 if (hwmgr->feature_mask & PP_POWER_CONTAINMENT_MASK) {
124 data->registry_data.power_containment_support = 1;
125 data->registry_data.enable_pkg_pwr_tracking_feature = 1;
126 data->registry_data.enable_tdc_limit_feature = 1;
129 data->registry_data.clock_stretcher_support =
130 hwmgr->feature_mask & PP_CLOCK_STRETCH_MASK ? true : false;
132 data->registry_data.ulv_support =
133 hwmgr->feature_mask & PP_ULV_MASK ? true : false;
135 data->registry_data.sclk_deep_sleep_support =
136 hwmgr->feature_mask & PP_SCLK_DEEP_SLEEP_MASK ? true : false;
138 data->registry_data.disable_water_mark = 0;
140 data->registry_data.fan_control_support = 1;
141 data->registry_data.thermal_support = 1;
142 data->registry_data.fw_ctf_enabled = 1;
144 data->registry_data.avfs_support = 1;
145 data->registry_data.led_dpm_enabled = 1;
147 data->registry_data.vr0hot_enabled = 1;
148 data->registry_data.vr1hot_enabled = 1;
149 data->registry_data.regulator_hot_gpio_support = 1;
151 data->registry_data.didt_support = 1;
152 if (data->registry_data.didt_support) {
153 data->registry_data.didt_mode = 6;
154 data->registry_data.sq_ramping_support = 1;
155 data->registry_data.db_ramping_support = 0;
156 data->registry_data.td_ramping_support = 0;
157 data->registry_data.tcp_ramping_support = 0;
158 data->registry_data.dbr_ramping_support = 0;
159 data->registry_data.edc_didt_support = 1;
160 data->registry_data.gc_didt_support = 0;
161 data->registry_data.psm_didt_support = 0;
164 data->display_voltage_mode = PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT;
165 data->dcef_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
166 data->dcef_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
167 data->dcef_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
168 data->disp_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
169 data->disp_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
170 data->disp_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
171 data->pixel_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
172 data->pixel_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
173 data->pixel_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
174 data->phy_clk_quad_eqn_a = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
175 data->phy_clk_quad_eqn_b = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
176 data->phy_clk_quad_eqn_c = PPREGKEY_VEGA10QUADRATICEQUATION_DFLT;
178 data->gfxclk_average_alpha = PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT;
179 data->socclk_average_alpha = PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT;
180 data->uclk_average_alpha = PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT;
181 data->gfx_activity_average_alpha = PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT;
184 static int vega10_set_features_platform_caps(struct pp_hwmgr *hwmgr)
186 struct vega10_hwmgr *data =
187 (struct vega10_hwmgr *)(hwmgr->backend);
188 struct phm_ppt_v2_information *table_info =
189 (struct phm_ppt_v2_information *)hwmgr->pptable;
190 struct cgs_system_info sys_info = {0};
193 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
194 PHM_PlatformCaps_SclkDeepSleep);
196 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
197 PHM_PlatformCaps_DynamicPatchPowerState);
199 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE)
200 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
201 PHM_PlatformCaps_ControlVDDCI);
203 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
204 PHM_PlatformCaps_TablelessHardwareInterface);
206 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
207 PHM_PlatformCaps_EnableSMU7ThermalManagement);
209 sys_info.size = sizeof(struct cgs_system_info);
210 sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
211 result = cgs_query_system_info(hwmgr->device, &sys_info);
213 if (!result && (sys_info.value & AMD_PG_SUPPORT_UVD))
214 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
215 PHM_PlatformCaps_UVDPowerGating);
217 if (!result && (sys_info.value & AMD_PG_SUPPORT_VCE))
218 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
219 PHM_PlatformCaps_VCEPowerGating);
221 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
222 PHM_PlatformCaps_UnTabledHardwareInterface);
224 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
225 PHM_PlatformCaps_FanSpeedInTableIsRPM);
227 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
228 PHM_PlatformCaps_ODFuzzyFanControlSupport);
230 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
231 PHM_PlatformCaps_DynamicPowerManagement);
233 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
234 PHM_PlatformCaps_SMC);
236 /* power tune caps */
237 /* assume disabled */
238 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
239 PHM_PlatformCaps_PowerContainment);
240 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
241 PHM_PlatformCaps_DiDtSupport);
242 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
243 PHM_PlatformCaps_SQRamping);
244 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
245 PHM_PlatformCaps_DBRamping);
246 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
247 PHM_PlatformCaps_TDRamping);
248 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
249 PHM_PlatformCaps_TCPRamping);
250 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
251 PHM_PlatformCaps_DBRRamping);
252 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
253 PHM_PlatformCaps_DiDtEDCEnable);
254 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
255 PHM_PlatformCaps_GCEDC);
256 phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
257 PHM_PlatformCaps_PSM);
259 if (data->registry_data.didt_support) {
260 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtSupport);
261 if (data->registry_data.sq_ramping_support)
262 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SQRamping);
263 if (data->registry_data.db_ramping_support)
264 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRamping);
265 if (data->registry_data.td_ramping_support)
266 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TDRamping);
267 if (data->registry_data.tcp_ramping_support)
268 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_TCPRamping);
269 if (data->registry_data.dbr_ramping_support)
270 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DBRRamping);
271 if (data->registry_data.edc_didt_support)
272 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DiDtEDCEnable);
273 if (data->registry_data.gc_didt_support)
274 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_GCEDC);
275 if (data->registry_data.psm_didt_support)
276 phm_cap_set(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_PSM);
279 if (data->registry_data.power_containment_support)
280 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
281 PHM_PlatformCaps_PowerContainment);
282 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
283 PHM_PlatformCaps_CAC);
285 if (table_info->tdp_table->usClockStretchAmount &&
286 data->registry_data.clock_stretcher_support)
287 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
288 PHM_PlatformCaps_ClockStretcher);
290 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
291 PHM_PlatformCaps_RegulatorHot);
292 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
293 PHM_PlatformCaps_AutomaticDCTransition);
295 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
296 PHM_PlatformCaps_UVDDPM);
297 phm_cap_set(hwmgr->platform_descriptor.platformCaps,
298 PHM_PlatformCaps_VCEDPM);
303 static void vega10_init_dpm_defaults(struct pp_hwmgr *hwmgr)
305 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
308 vega10_initialize_power_tune_defaults(hwmgr);
310 for (i = 0; i < GNLD_FEATURES_MAX; i++) {
311 data->smu_features[i].smu_feature_id = 0xffff;
312 data->smu_features[i].smu_feature_bitmap = 1 << i;
313 data->smu_features[i].enabled = false;
314 data->smu_features[i].supported = false;
317 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_id =
318 FEATURE_DPM_PREFETCHER_BIT;
319 data->smu_features[GNLD_DPM_GFXCLK].smu_feature_id =
320 FEATURE_DPM_GFXCLK_BIT;
321 data->smu_features[GNLD_DPM_UCLK].smu_feature_id =
322 FEATURE_DPM_UCLK_BIT;
323 data->smu_features[GNLD_DPM_SOCCLK].smu_feature_id =
324 FEATURE_DPM_SOCCLK_BIT;
325 data->smu_features[GNLD_DPM_UVD].smu_feature_id =
327 data->smu_features[GNLD_DPM_VCE].smu_feature_id =
329 data->smu_features[GNLD_DPM_MP0CLK].smu_feature_id =
330 FEATURE_DPM_MP0CLK_BIT;
331 data->smu_features[GNLD_DPM_LINK].smu_feature_id =
332 FEATURE_DPM_LINK_BIT;
333 data->smu_features[GNLD_DPM_DCEFCLK].smu_feature_id =
334 FEATURE_DPM_DCEFCLK_BIT;
335 data->smu_features[GNLD_ULV].smu_feature_id =
337 data->smu_features[GNLD_AVFS].smu_feature_id =
339 data->smu_features[GNLD_DS_GFXCLK].smu_feature_id =
340 FEATURE_DS_GFXCLK_BIT;
341 data->smu_features[GNLD_DS_SOCCLK].smu_feature_id =
342 FEATURE_DS_SOCCLK_BIT;
343 data->smu_features[GNLD_DS_LCLK].smu_feature_id =
345 data->smu_features[GNLD_PPT].smu_feature_id =
347 data->smu_features[GNLD_TDC].smu_feature_id =
349 data->smu_features[GNLD_THERMAL].smu_feature_id =
351 data->smu_features[GNLD_GFX_PER_CU_CG].smu_feature_id =
352 FEATURE_GFX_PER_CU_CG_BIT;
353 data->smu_features[GNLD_RM].smu_feature_id =
355 data->smu_features[GNLD_DS_DCEFCLK].smu_feature_id =
356 FEATURE_DS_DCEFCLK_BIT;
357 data->smu_features[GNLD_ACDC].smu_feature_id =
359 data->smu_features[GNLD_VR0HOT].smu_feature_id =
361 data->smu_features[GNLD_VR1HOT].smu_feature_id =
363 data->smu_features[GNLD_FW_CTF].smu_feature_id =
365 data->smu_features[GNLD_LED_DISPLAY].smu_feature_id =
366 FEATURE_LED_DISPLAY_BIT;
367 data->smu_features[GNLD_FAN_CONTROL].smu_feature_id =
368 FEATURE_FAN_CONTROL_BIT;
369 data->smu_features[GNLD_ACG].smu_feature_id = FEATURE_ACG_BIT;
370 data->smu_features[GNLD_DIDT].smu_feature_id = FEATURE_GFX_EDC_BIT;
372 if (!data->registry_data.prefetcher_dpm_key_disabled)
373 data->smu_features[GNLD_DPM_PREFETCHER].supported = true;
375 if (!data->registry_data.sclk_dpm_key_disabled)
376 data->smu_features[GNLD_DPM_GFXCLK].supported = true;
378 if (!data->registry_data.mclk_dpm_key_disabled)
379 data->smu_features[GNLD_DPM_UCLK].supported = true;
381 if (!data->registry_data.socclk_dpm_key_disabled)
382 data->smu_features[GNLD_DPM_SOCCLK].supported = true;
384 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
385 PHM_PlatformCaps_UVDDPM))
386 data->smu_features[GNLD_DPM_UVD].supported = true;
388 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
389 PHM_PlatformCaps_VCEDPM))
390 data->smu_features[GNLD_DPM_VCE].supported = true;
392 if (!data->registry_data.pcie_dpm_key_disabled)
393 data->smu_features[GNLD_DPM_LINK].supported = true;
395 if (!data->registry_data.dcefclk_dpm_key_disabled)
396 data->smu_features[GNLD_DPM_DCEFCLK].supported = true;
398 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
399 PHM_PlatformCaps_SclkDeepSleep) &&
400 data->registry_data.sclk_deep_sleep_support) {
401 data->smu_features[GNLD_DS_GFXCLK].supported = true;
402 data->smu_features[GNLD_DS_SOCCLK].supported = true;
403 data->smu_features[GNLD_DS_LCLK].supported = true;
404 data->smu_features[GNLD_DS_DCEFCLK].supported = true;
407 if (data->registry_data.enable_pkg_pwr_tracking_feature)
408 data->smu_features[GNLD_PPT].supported = true;
410 if (data->registry_data.enable_tdc_limit_feature)
411 data->smu_features[GNLD_TDC].supported = true;
413 if (data->registry_data.thermal_support)
414 data->smu_features[GNLD_THERMAL].supported = true;
416 if (data->registry_data.fan_control_support)
417 data->smu_features[GNLD_FAN_CONTROL].supported = true;
419 if (data->registry_data.fw_ctf_enabled)
420 data->smu_features[GNLD_FW_CTF].supported = true;
422 if (data->registry_data.avfs_support)
423 data->smu_features[GNLD_AVFS].supported = true;
425 if (data->registry_data.led_dpm_enabled)
426 data->smu_features[GNLD_LED_DISPLAY].supported = true;
428 if (data->registry_data.vr1hot_enabled)
429 data->smu_features[GNLD_VR1HOT].supported = true;
431 if (data->registry_data.vr0hot_enabled)
432 data->smu_features[GNLD_VR0HOT].supported = true;
434 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetSmuVersion);
435 vega10_read_arg_from_smc(hwmgr->smumgr, &(data->smu_version));
436 /* ACG firmware has major version 5 */
437 if ((data->smu_version & 0xff000000) == 0x5000000)
438 data->smu_features[GNLD_ACG].supported = true;
440 if (data->registry_data.didt_support)
441 data->smu_features[GNLD_DIDT].supported = true;
445 #ifdef PPLIB_VEGA10_EVV_SUPPORT
446 static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr *hwmgr,
447 phm_ppt_v1_voltage_lookup_table *lookup_table,
448 uint16_t virtual_voltage_id, int32_t *socclk)
452 struct phm_ppt_v2_information *table_info =
453 (struct phm_ppt_v2_information *)(hwmgr->pptable);
455 PP_ASSERT_WITH_CODE(lookup_table->count != 0,
456 "Lookup table is empty",
459 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */
460 for (entry_id = 0; entry_id < table_info->vdd_dep_on_sclk->count; entry_id++) {
461 voltage_id = table_info->vdd_dep_on_socclk->entries[entry_id].vddInd;
462 if (lookup_table->entries[voltage_id].us_vdd == virtual_voltage_id)
466 PP_ASSERT_WITH_CODE(entry_id < table_info->vdd_dep_on_socclk->count,
467 "Can't find requested voltage id in vdd_dep_on_socclk table!",
470 *socclk = table_info->vdd_dep_on_socclk->entries[entry_id].clk;
475 #define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
477 * Get Leakage VDDC based on leakage ID.
479 * @param hwmgr the address of the powerplay hardware manager.
482 static int vega10_get_evv_voltages(struct pp_hwmgr *hwmgr)
484 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
489 struct phm_ppt_v2_information *table_info =
490 (struct phm_ppt_v2_information *)hwmgr->pptable;
491 struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
492 table_info->vdd_dep_on_socclk;
495 for (i = 0; i < VEGA10_MAX_LEAKAGE_COUNT; i++) {
496 vv_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
498 if (!vega10_get_socclk_for_voltage_evv(hwmgr,
499 table_info->vddc_lookup_table, vv_id, &sclk)) {
500 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
501 PHM_PlatformCaps_ClockStretcher)) {
502 for (j = 1; j < socclk_table->count; j++) {
503 if (socclk_table->entries[j].clk == sclk &&
504 socclk_table->entries[j].cks_enable == 0) {
511 PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr,
512 VOLTAGE_TYPE_VDDC, sclk, vv_id, &vddc),
513 "Error retrieving EVV voltage value!",
517 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
518 PP_ASSERT_WITH_CODE((vddc < 2000 && vddc != 0),
519 "Invalid VDDC value", result = -EINVAL;);
521 /* the voltage should not be zero nor equal to leakage ID */
522 if (vddc != 0 && vddc != vv_id) {
523 data->vddc_leakage.actual_voltage[data->vddc_leakage.count] = (uint16_t)(vddc/100);
524 data->vddc_leakage.leakage_id[data->vddc_leakage.count] = vv_id;
525 data->vddc_leakage.count++;
534 * Change virtual leakage voltage to actual value.
536 * @param hwmgr the address of the powerplay hardware manager.
537 * @param pointer to changing voltage
538 * @param pointer to leakage table
540 static void vega10_patch_with_vdd_leakage(struct pp_hwmgr *hwmgr,
541 uint16_t *voltage, struct vega10_leakage_voltage *leakage_table)
545 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
546 for (index = 0; index < leakage_table->count; index++) {
547 /* if this voltage matches a leakage voltage ID */
548 /* patch with actual leakage voltage */
549 if (leakage_table->leakage_id[index] == *voltage) {
550 *voltage = leakage_table->actual_voltage[index];
555 if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
556 pr_info("Voltage value looks like a Leakage ID \
557 but it's not patched\n");
561 * Patch voltage lookup table by EVV leakages.
563 * @param hwmgr the address of the powerplay hardware manager.
564 * @param pointer to voltage lookup table
565 * @param pointer to leakage table
568 static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr *hwmgr,
569 phm_ppt_v1_voltage_lookup_table *lookup_table,
570 struct vega10_leakage_voltage *leakage_table)
574 for (i = 0; i < lookup_table->count; i++)
575 vega10_patch_with_vdd_leakage(hwmgr,
576 &lookup_table->entries[i].us_vdd, leakage_table);
581 static int vega10_patch_clock_voltage_limits_with_vddc_leakage(
582 struct pp_hwmgr *hwmgr, struct vega10_leakage_voltage *leakage_table,
585 vega10_patch_with_vdd_leakage(hwmgr, (uint16_t *)vddc, leakage_table);
591 static int vega10_patch_voltage_dependency_tables_with_lookup_table(
592 struct pp_hwmgr *hwmgr)
596 struct phm_ppt_v2_information *table_info =
597 (struct phm_ppt_v2_information *)(hwmgr->pptable);
598 struct phm_ppt_v1_clock_voltage_dependency_table *socclk_table =
599 table_info->vdd_dep_on_socclk;
600 struct phm_ppt_v1_clock_voltage_dependency_table *gfxclk_table =
601 table_info->vdd_dep_on_sclk;
602 struct phm_ppt_v1_clock_voltage_dependency_table *dcefclk_table =
603 table_info->vdd_dep_on_dcefclk;
604 struct phm_ppt_v1_clock_voltage_dependency_table *pixclk_table =
605 table_info->vdd_dep_on_pixclk;
606 struct phm_ppt_v1_clock_voltage_dependency_table *dspclk_table =
607 table_info->vdd_dep_on_dispclk;
608 struct phm_ppt_v1_clock_voltage_dependency_table *phyclk_table =
609 table_info->vdd_dep_on_phyclk;
610 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table =
611 table_info->vdd_dep_on_mclk;
612 struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_table =
613 table_info->mm_dep_table;
615 for (entry_id = 0; entry_id < socclk_table->count; entry_id++) {
616 voltage_id = socclk_table->entries[entry_id].vddInd;
617 socclk_table->entries[entry_id].vddc =
618 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
621 for (entry_id = 0; entry_id < gfxclk_table->count; entry_id++) {
622 voltage_id = gfxclk_table->entries[entry_id].vddInd;
623 gfxclk_table->entries[entry_id].vddc =
624 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
627 for (entry_id = 0; entry_id < dcefclk_table->count; entry_id++) {
628 voltage_id = dcefclk_table->entries[entry_id].vddInd;
629 dcefclk_table->entries[entry_id].vddc =
630 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
633 for (entry_id = 0; entry_id < pixclk_table->count; entry_id++) {
634 voltage_id = pixclk_table->entries[entry_id].vddInd;
635 pixclk_table->entries[entry_id].vddc =
636 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
639 for (entry_id = 0; entry_id < dspclk_table->count; entry_id++) {
640 voltage_id = dspclk_table->entries[entry_id].vddInd;
641 dspclk_table->entries[entry_id].vddc =
642 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
645 for (entry_id = 0; entry_id < phyclk_table->count; entry_id++) {
646 voltage_id = phyclk_table->entries[entry_id].vddInd;
647 phyclk_table->entries[entry_id].vddc =
648 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
651 for (entry_id = 0; entry_id < mclk_table->count; ++entry_id) {
652 voltage_id = mclk_table->entries[entry_id].vddInd;
653 mclk_table->entries[entry_id].vddc =
654 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
655 voltage_id = mclk_table->entries[entry_id].vddciInd;
656 mclk_table->entries[entry_id].vddci =
657 table_info->vddci_lookup_table->entries[voltage_id].us_vdd;
658 voltage_id = mclk_table->entries[entry_id].mvddInd;
659 mclk_table->entries[entry_id].mvdd =
660 table_info->vddmem_lookup_table->entries[voltage_id].us_vdd;
663 for (entry_id = 0; entry_id < mm_table->count; ++entry_id) {
664 voltage_id = mm_table->entries[entry_id].vddcInd;
665 mm_table->entries[entry_id].vddc =
666 table_info->vddc_lookup_table->entries[voltage_id].us_vdd;
673 static int vega10_sort_lookup_table(struct pp_hwmgr *hwmgr,
674 struct phm_ppt_v1_voltage_lookup_table *lookup_table)
676 uint32_t table_size, i, j;
677 struct phm_ppt_v1_voltage_lookup_record tmp_voltage_lookup_record;
679 PP_ASSERT_WITH_CODE(lookup_table && lookup_table->count,
680 "Lookup table is empty", return -EINVAL);
682 table_size = lookup_table->count;
684 /* Sorting voltages */
685 for (i = 0; i < table_size - 1; i++) {
686 for (j = i + 1; j > 0; j--) {
687 if (lookup_table->entries[j].us_vdd <
688 lookup_table->entries[j - 1].us_vdd) {
689 tmp_voltage_lookup_record = lookup_table->entries[j - 1];
690 lookup_table->entries[j - 1] = lookup_table->entries[j];
691 lookup_table->entries[j] = tmp_voltage_lookup_record;
699 static int vega10_complete_dependency_tables(struct pp_hwmgr *hwmgr)
703 struct phm_ppt_v2_information *table_info =
704 (struct phm_ppt_v2_information *)(hwmgr->pptable);
705 #ifdef PPLIB_VEGA10_EVV_SUPPORT
706 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
708 tmp_result = vega10_patch_lookup_table_with_leakage(hwmgr,
709 table_info->vddc_lookup_table, &(data->vddc_leakage));
713 tmp_result = vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr,
714 &(data->vddc_leakage), &table_info->max_clock_voltage_on_dc.vddc);
719 tmp_result = vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr);
723 tmp_result = vega10_sort_lookup_table(hwmgr, table_info->vddc_lookup_table);
730 static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr *hwmgr)
732 struct phm_ppt_v2_information *table_info =
733 (struct phm_ppt_v2_information *)(hwmgr->pptable);
734 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_sclk_vdd_table =
735 table_info->vdd_dep_on_socclk;
736 struct phm_ppt_v1_clock_voltage_dependency_table *allowed_mclk_vdd_table =
737 table_info->vdd_dep_on_mclk;
739 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table,
740 "VDD dependency on SCLK table is missing. \
741 This table is mandatory", return -EINVAL);
742 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table->count >= 1,
743 "VDD dependency on SCLK table is empty. \
744 This table is mandatory", return -EINVAL);
746 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table,
747 "VDD dependency on MCLK table is missing. \
748 This table is mandatory", return -EINVAL);
749 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table->count >= 1,
750 "VDD dependency on MCLK table is empty. \
751 This table is mandatory", return -EINVAL);
753 table_info->max_clock_voltage_on_ac.sclk =
754 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].clk;
755 table_info->max_clock_voltage_on_ac.mclk =
756 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].clk;
757 table_info->max_clock_voltage_on_ac.vddc =
758 allowed_sclk_vdd_table->entries[allowed_sclk_vdd_table->count - 1].vddc;
759 table_info->max_clock_voltage_on_ac.vddci =
760 allowed_mclk_vdd_table->entries[allowed_mclk_vdd_table->count - 1].vddci;
762 hwmgr->dyn_state.max_clock_voltage_on_ac.sclk =
763 table_info->max_clock_voltage_on_ac.sclk;
764 hwmgr->dyn_state.max_clock_voltage_on_ac.mclk =
765 table_info->max_clock_voltage_on_ac.mclk;
766 hwmgr->dyn_state.max_clock_voltage_on_ac.vddc =
767 table_info->max_clock_voltage_on_ac.vddc;
768 hwmgr->dyn_state.max_clock_voltage_on_ac.vddci =
769 table_info->max_clock_voltage_on_ac.vddci;
774 static int vega10_hwmgr_backend_fini(struct pp_hwmgr *hwmgr)
776 kfree(hwmgr->dyn_state.vddc_dep_on_dal_pwrl);
777 hwmgr->dyn_state.vddc_dep_on_dal_pwrl = NULL;
779 kfree(hwmgr->backend);
780 hwmgr->backend = NULL;
785 static int vega10_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
788 struct vega10_hwmgr *data;
789 uint32_t config_telemetry = 0;
790 struct pp_atomfwctrl_voltage_table vol_table;
791 struct cgs_system_info sys_info = {0};
793 data = kzalloc(sizeof(struct vega10_hwmgr), GFP_KERNEL);
797 hwmgr->backend = data;
799 vega10_set_default_registry_data(hwmgr);
801 data->disable_dpm_mask = 0xff;
802 data->workload_mask = 0xff;
804 /* need to set voltage control types before EVV patching */
805 data->vddc_control = VEGA10_VOLTAGE_CONTROL_NONE;
806 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_NONE;
807 data->vddci_control = VEGA10_VOLTAGE_CONTROL_NONE;
810 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
811 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2)) {
812 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
813 VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2,
815 config_telemetry = ((vol_table.telemetry_slope << 8) & 0xff00) |
816 (vol_table.telemetry_offset & 0xff);
817 data->vddc_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
820 kfree(hwmgr->backend);
821 hwmgr->backend = NULL;
822 PP_ASSERT_WITH_CODE(false,
823 "VDDCR_SOC is not SVID2!",
828 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
829 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2)) {
830 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr,
831 VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2,
834 ((vol_table.telemetry_slope << 24) & 0xff000000) |
835 ((vol_table.telemetry_offset << 16) & 0xff0000);
836 data->mvdd_control = VEGA10_VOLTAGE_CONTROL_BY_SVID2;
841 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
842 PHM_PlatformCaps_ControlVDDCI)) {
843 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr,
844 VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
845 data->vddci_control = VEGA10_VOLTAGE_CONTROL_BY_GPIO;
848 data->config_telemetry = config_telemetry;
850 vega10_set_features_platform_caps(hwmgr);
852 vega10_init_dpm_defaults(hwmgr);
854 #ifdef PPLIB_VEGA10_EVV_SUPPORT
855 /* Get leakage voltage based on leakage ID. */
856 PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr),
857 "Get EVV Voltage Failed. Abort Driver loading!",
861 /* Patch our voltage dependency table with actual leakage voltage
862 * We need to perform leakage translation before it's used by other functions
864 vega10_complete_dependency_tables(hwmgr);
866 /* Parse pptable data read from VBIOS */
867 vega10_set_private_data_based_on_pptable(hwmgr);
869 data->is_tlu_enabled = false;
871 hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
872 VEGA10_MAX_HARDWARE_POWERLEVELS;
873 hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
874 hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
876 hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
877 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
878 hwmgr->platform_descriptor.clockStep.engineClock = 500;
879 hwmgr->platform_descriptor.clockStep.memoryClock = 500;
881 sys_info.size = sizeof(struct cgs_system_info);
882 sys_info.info_id = CGS_SYSTEM_INFO_GFX_CU_INFO;
883 result = cgs_query_system_info(hwmgr->device, &sys_info);
884 data->total_active_cus = sys_info.value;
885 /* Setup default Overdrive Fan control settings */
886 data->odn_fan_table.target_fan_speed =
887 hwmgr->thermal_controller.advanceFanControlParameters.usMaxFanRPM;
888 data->odn_fan_table.target_temperature =
889 hwmgr->thermal_controller.
890 advanceFanControlParameters.ucTargetTemperature;
891 data->odn_fan_table.min_performance_clock =
892 hwmgr->thermal_controller.advanceFanControlParameters.
893 ulMinFanSCLKAcousticLimit;
894 data->odn_fan_table.min_fan_limit =
895 hwmgr->thermal_controller.
896 advanceFanControlParameters.usFanPWMMinLimit *
897 hwmgr->thermal_controller.fanInfo.ulMaxRPM / 100;
902 static int vega10_init_sclk_threshold(struct pp_hwmgr *hwmgr)
904 struct vega10_hwmgr *data =
905 (struct vega10_hwmgr *)(hwmgr->backend);
907 data->low_sclk_interrupt_threshold = 0;
912 static int vega10_setup_dpm_led_config(struct pp_hwmgr *hwmgr)
914 struct vega10_hwmgr *data =
915 (struct vega10_hwmgr *)(hwmgr->backend);
916 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
918 struct pp_atomfwctrl_voltage_table table;
924 ret = pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_LEDDPM,
925 VOLTAGE_OBJ_GPIO_LUT, &table);
928 tmp = table.mask_low;
929 for (i = 0, j = 0; i < 32; i++) {
931 mask |= (uint32_t)(i << (8 * j));
939 pp_table->LedPin0 = (uint8_t)(mask & 0xff);
940 pp_table->LedPin1 = (uint8_t)((mask >> 8) & 0xff);
941 pp_table->LedPin2 = (uint8_t)((mask >> 16) & 0xff);
945 static int vega10_setup_asic_task(struct pp_hwmgr *hwmgr)
947 PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr),
948 "Failed to init sclk threshold!",
951 PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr),
952 "Failed to set up led dpm config!",
958 static bool vega10_is_dpm_running(struct pp_hwmgr *hwmgr)
960 uint32_t features_enabled;
962 if (!vega10_get_smc_features(hwmgr->smumgr, &features_enabled)) {
963 if (features_enabled & SMC_DPM_FEATURES)
970 * Remove repeated voltage values and create table with unique values.
972 * @param hwmgr the address of the powerplay hardware manager.
973 * @param vol_table the pointer to changing voltage table
974 * @return 0 in success
977 static int vega10_trim_voltage_table(struct pp_hwmgr *hwmgr,
978 struct pp_atomfwctrl_voltage_table *vol_table)
983 struct pp_atomfwctrl_voltage_table *table;
985 PP_ASSERT_WITH_CODE(vol_table,
986 "Voltage Table empty.", return -EINVAL);
987 table = kzalloc(sizeof(struct pp_atomfwctrl_voltage_table),
993 table->mask_low = vol_table->mask_low;
994 table->phase_delay = vol_table->phase_delay;
996 for (i = 0; i < vol_table->count; i++) {
997 vvalue = vol_table->entries[i].value;
1000 for (j = 0; j < table->count; j++) {
1001 if (vvalue == table->entries[j].value) {
1008 table->entries[table->count].value = vvalue;
1009 table->entries[table->count].smio_low =
1010 vol_table->entries[i].smio_low;
1015 memcpy(vol_table, table, sizeof(struct pp_atomfwctrl_voltage_table));
1021 static int vega10_get_mvdd_voltage_table(struct pp_hwmgr *hwmgr,
1022 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1023 struct pp_atomfwctrl_voltage_table *vol_table)
1027 PP_ASSERT_WITH_CODE(dep_table->count,
1028 "Voltage Dependency Table empty.",
1031 vol_table->mask_low = 0;
1032 vol_table->phase_delay = 0;
1033 vol_table->count = dep_table->count;
1035 for (i = 0; i < vol_table->count; i++) {
1036 vol_table->entries[i].value = dep_table->entries[i].mvdd;
1037 vol_table->entries[i].smio_low = 0;
1040 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr,
1042 "Failed to trim MVDD Table!",
1048 static int vega10_get_vddci_voltage_table(struct pp_hwmgr *hwmgr,
1049 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1050 struct pp_atomfwctrl_voltage_table *vol_table)
1054 PP_ASSERT_WITH_CODE(dep_table->count,
1055 "Voltage Dependency Table empty.",
1058 vol_table->mask_low = 0;
1059 vol_table->phase_delay = 0;
1060 vol_table->count = dep_table->count;
1062 for (i = 0; i < dep_table->count; i++) {
1063 vol_table->entries[i].value = dep_table->entries[i].vddci;
1064 vol_table->entries[i].smio_low = 0;
1067 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr, vol_table),
1068 "Failed to trim VDDCI table.",
1074 static int vega10_get_vdd_voltage_table(struct pp_hwmgr *hwmgr,
1075 phm_ppt_v1_clock_voltage_dependency_table *dep_table,
1076 struct pp_atomfwctrl_voltage_table *vol_table)
1080 PP_ASSERT_WITH_CODE(dep_table->count,
1081 "Voltage Dependency Table empty.",
1084 vol_table->mask_low = 0;
1085 vol_table->phase_delay = 0;
1086 vol_table->count = dep_table->count;
1088 for (i = 0; i < vol_table->count; i++) {
1089 vol_table->entries[i].value = dep_table->entries[i].vddc;
1090 vol_table->entries[i].smio_low = 0;
1096 /* ---- Voltage Tables ----
1097 * If the voltage table would be bigger than
1098 * what will fit into the state table on
1099 * the SMC keep only the higher entries.
1101 static void vega10_trim_voltage_table_to_fit_state_table(
1102 struct pp_hwmgr *hwmgr,
1103 uint32_t max_vol_steps,
1104 struct pp_atomfwctrl_voltage_table *vol_table)
1106 unsigned int i, diff;
1108 if (vol_table->count <= max_vol_steps)
1111 diff = vol_table->count - max_vol_steps;
1113 for (i = 0; i < max_vol_steps; i++)
1114 vol_table->entries[i] = vol_table->entries[i + diff];
1116 vol_table->count = max_vol_steps;
1120 * Create Voltage Tables.
1122 * @param hwmgr the address of the powerplay hardware manager.
1125 static int vega10_construct_voltage_tables(struct pp_hwmgr *hwmgr)
1127 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
1128 struct phm_ppt_v2_information *table_info =
1129 (struct phm_ppt_v2_information *)hwmgr->pptable;
1132 if (data->mvdd_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1133 data->mvdd_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1134 result = vega10_get_mvdd_voltage_table(hwmgr,
1135 table_info->vdd_dep_on_mclk,
1136 &(data->mvdd_voltage_table));
1137 PP_ASSERT_WITH_CODE(!result,
1138 "Failed to retrieve MVDDC table!",
1142 if (data->vddci_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1143 result = vega10_get_vddci_voltage_table(hwmgr,
1144 table_info->vdd_dep_on_mclk,
1145 &(data->vddci_voltage_table));
1146 PP_ASSERT_WITH_CODE(!result,
1147 "Failed to retrieve VDDCI_MEM table!",
1151 if (data->vddc_control == VEGA10_VOLTAGE_CONTROL_BY_SVID2 ||
1152 data->vddc_control == VEGA10_VOLTAGE_CONTROL_NONE) {
1153 result = vega10_get_vdd_voltage_table(hwmgr,
1154 table_info->vdd_dep_on_sclk,
1155 &(data->vddc_voltage_table));
1156 PP_ASSERT_WITH_CODE(!result,
1157 "Failed to retrieve VDDCR_SOC table!",
1161 PP_ASSERT_WITH_CODE(data->vddc_voltage_table.count <= 16,
1162 "Too many voltage values for VDDC. Trimming to fit state table.",
1163 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1164 16, &(data->vddc_voltage_table)));
1166 PP_ASSERT_WITH_CODE(data->vddci_voltage_table.count <= 16,
1167 "Too many voltage values for VDDCI. Trimming to fit state table.",
1168 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1169 16, &(data->vddci_voltage_table)));
1171 PP_ASSERT_WITH_CODE(data->mvdd_voltage_table.count <= 16,
1172 "Too many voltage values for MVDD. Trimming to fit state table.",
1173 vega10_trim_voltage_table_to_fit_state_table(hwmgr,
1174 16, &(data->mvdd_voltage_table)));
1181 * @fn vega10_init_dpm_state
1182 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
1184 * @param dpm_state - the address of the DPM Table to initiailize.
1187 static void vega10_init_dpm_state(struct vega10_dpm_state *dpm_state)
1189 dpm_state->soft_min_level = 0xff;
1190 dpm_state->soft_max_level = 0xff;
1191 dpm_state->hard_min_level = 0xff;
1192 dpm_state->hard_max_level = 0xff;
1195 static void vega10_setup_default_single_dpm_table(struct pp_hwmgr *hwmgr,
1196 struct vega10_single_dpm_table *dpm_table,
1197 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1201 for (i = 0; i < dep_table->count; i++) {
1202 if (i == 0 || dpm_table->dpm_levels[dpm_table->count - 1].value <=
1203 dep_table->entries[i].clk) {
1204 dpm_table->dpm_levels[dpm_table->count].value =
1205 dep_table->entries[i].clk;
1206 dpm_table->dpm_levels[dpm_table->count].enabled = true;
1211 static int vega10_setup_default_pcie_table(struct pp_hwmgr *hwmgr)
1213 struct vega10_hwmgr *data =
1214 (struct vega10_hwmgr *)(hwmgr->backend);
1215 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
1216 struct phm_ppt_v2_information *table_info =
1217 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1218 struct phm_ppt_v1_pcie_table *bios_pcie_table =
1219 table_info->pcie_table;
1222 PP_ASSERT_WITH_CODE(bios_pcie_table->count,
1223 "Incorrect number of PCIE States from VBIOS!",
1226 for (i = 0; i < NUM_LINK_LEVELS; i++) {
1227 if (data->registry_data.pcieSpeedOverride)
1228 pcie_table->pcie_gen[i] =
1229 data->registry_data.pcieSpeedOverride;
1231 pcie_table->pcie_gen[i] =
1232 bios_pcie_table->entries[i].gen_speed;
1234 if (data->registry_data.pcieLaneOverride)
1235 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1236 data->registry_data.pcieLaneOverride);
1238 pcie_table->pcie_lane[i] = (uint8_t)encode_pcie_lane_width(
1239 bios_pcie_table->entries[i].lane_width);
1240 if (data->registry_data.pcieClockOverride)
1241 pcie_table->lclk[i] =
1242 data->registry_data.pcieClockOverride;
1244 pcie_table->lclk[i] =
1245 bios_pcie_table->entries[i].pcie_sclk;
1248 pcie_table->count = NUM_LINK_LEVELS;
1254 * This function is to initialize all DPM state tables
1255 * for SMU based on the dependency table.
1256 * Dynamic state patching function will then trim these
1257 * state tables to the allowed range based
1258 * on the power policy or external client requests,
1259 * such as UVD request, etc.
1261 static int vega10_setup_default_dpm_tables(struct pp_hwmgr *hwmgr)
1263 struct vega10_hwmgr *data =
1264 (struct vega10_hwmgr *)(hwmgr->backend);
1265 struct phm_ppt_v2_information *table_info =
1266 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1267 struct vega10_single_dpm_table *dpm_table;
1270 struct phm_ppt_v1_clock_voltage_dependency_table *dep_soc_table =
1271 table_info->vdd_dep_on_socclk;
1272 struct phm_ppt_v1_clock_voltage_dependency_table *dep_gfx_table =
1273 table_info->vdd_dep_on_sclk;
1274 struct phm_ppt_v1_clock_voltage_dependency_table *dep_mclk_table =
1275 table_info->vdd_dep_on_mclk;
1276 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_mm_table =
1277 table_info->mm_dep_table;
1278 struct phm_ppt_v1_clock_voltage_dependency_table *dep_dcef_table =
1279 table_info->vdd_dep_on_dcefclk;
1280 struct phm_ppt_v1_clock_voltage_dependency_table *dep_pix_table =
1281 table_info->vdd_dep_on_pixclk;
1282 struct phm_ppt_v1_clock_voltage_dependency_table *dep_disp_table =
1283 table_info->vdd_dep_on_dispclk;
1284 struct phm_ppt_v1_clock_voltage_dependency_table *dep_phy_table =
1285 table_info->vdd_dep_on_phyclk;
1287 PP_ASSERT_WITH_CODE(dep_soc_table,
1288 "SOCCLK dependency table is missing. This table is mandatory",
1290 PP_ASSERT_WITH_CODE(dep_soc_table->count >= 1,
1291 "SOCCLK dependency table is empty. This table is mandatory",
1294 PP_ASSERT_WITH_CODE(dep_gfx_table,
1295 "GFXCLK dependency table is missing. This table is mandatory",
1297 PP_ASSERT_WITH_CODE(dep_gfx_table->count >= 1,
1298 "GFXCLK dependency table is empty. This table is mandatory",
1301 PP_ASSERT_WITH_CODE(dep_mclk_table,
1302 "MCLK dependency table is missing. This table is mandatory",
1304 PP_ASSERT_WITH_CODE(dep_mclk_table->count >= 1,
1305 "MCLK dependency table has to have is missing. This table is mandatory",
1308 /* Initialize Sclk DPM table based on allow Sclk values */
1309 data->dpm_table.soc_table.count = 0;
1310 data->dpm_table.gfx_table.count = 0;
1311 data->dpm_table.dcef_table.count = 0;
1313 dpm_table = &(data->dpm_table.soc_table);
1314 vega10_setup_default_single_dpm_table(hwmgr,
1318 vega10_init_dpm_state(&(dpm_table->dpm_state));
1320 dpm_table = &(data->dpm_table.gfx_table);
1321 vega10_setup_default_single_dpm_table(hwmgr,
1324 vega10_init_dpm_state(&(dpm_table->dpm_state));
1326 /* Initialize Mclk DPM table based on allow Mclk values */
1327 data->dpm_table.mem_table.count = 0;
1328 dpm_table = &(data->dpm_table.mem_table);
1329 vega10_setup_default_single_dpm_table(hwmgr,
1332 vega10_init_dpm_state(&(dpm_table->dpm_state));
1334 data->dpm_table.eclk_table.count = 0;
1335 dpm_table = &(data->dpm_table.eclk_table);
1336 for (i = 0; i < dep_mm_table->count; i++) {
1337 if (i == 0 || dpm_table->dpm_levels
1338 [dpm_table->count - 1].value <=
1339 dep_mm_table->entries[i].eclk) {
1340 dpm_table->dpm_levels[dpm_table->count].value =
1341 dep_mm_table->entries[i].eclk;
1342 dpm_table->dpm_levels[dpm_table->count].enabled =
1343 (i == 0) ? true : false;
1347 vega10_init_dpm_state(&(dpm_table->dpm_state));
1349 data->dpm_table.vclk_table.count = 0;
1350 data->dpm_table.dclk_table.count = 0;
1351 dpm_table = &(data->dpm_table.vclk_table);
1352 for (i = 0; i < dep_mm_table->count; i++) {
1353 if (i == 0 || dpm_table->dpm_levels
1354 [dpm_table->count - 1].value <=
1355 dep_mm_table->entries[i].vclk) {
1356 dpm_table->dpm_levels[dpm_table->count].value =
1357 dep_mm_table->entries[i].vclk;
1358 dpm_table->dpm_levels[dpm_table->count].enabled =
1359 (i == 0) ? true : false;
1363 vega10_init_dpm_state(&(dpm_table->dpm_state));
1365 dpm_table = &(data->dpm_table.dclk_table);
1366 for (i = 0; i < dep_mm_table->count; i++) {
1367 if (i == 0 || dpm_table->dpm_levels
1368 [dpm_table->count - 1].value <=
1369 dep_mm_table->entries[i].dclk) {
1370 dpm_table->dpm_levels[dpm_table->count].value =
1371 dep_mm_table->entries[i].dclk;
1372 dpm_table->dpm_levels[dpm_table->count].enabled =
1373 (i == 0) ? true : false;
1377 vega10_init_dpm_state(&(dpm_table->dpm_state));
1379 /* Assume there is no headless Vega10 for now */
1380 dpm_table = &(data->dpm_table.dcef_table);
1381 vega10_setup_default_single_dpm_table(hwmgr,
1385 vega10_init_dpm_state(&(dpm_table->dpm_state));
1387 dpm_table = &(data->dpm_table.pixel_table);
1388 vega10_setup_default_single_dpm_table(hwmgr,
1392 vega10_init_dpm_state(&(dpm_table->dpm_state));
1394 dpm_table = &(data->dpm_table.display_table);
1395 vega10_setup_default_single_dpm_table(hwmgr,
1399 vega10_init_dpm_state(&(dpm_table->dpm_state));
1401 dpm_table = &(data->dpm_table.phy_table);
1402 vega10_setup_default_single_dpm_table(hwmgr,
1406 vega10_init_dpm_state(&(dpm_table->dpm_state));
1408 vega10_setup_default_pcie_table(hwmgr);
1410 /* save a copy of the default DPM table */
1411 memcpy(&(data->golden_dpm_table), &(data->dpm_table),
1412 sizeof(struct vega10_dpm_table));
1414 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1415 PHM_PlatformCaps_ODNinACSupport) ||
1416 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1417 PHM_PlatformCaps_ODNinDCSupport)) {
1418 data->odn_dpm_table.odn_core_clock_dpm_levels.
1419 number_of_performance_levels = data->dpm_table.gfx_table.count;
1420 for (i = 0; i < data->dpm_table.gfx_table.count; i++) {
1421 data->odn_dpm_table.odn_core_clock_dpm_levels.
1422 performance_level_entries[i].clock =
1423 data->dpm_table.gfx_table.dpm_levels[i].value;
1424 data->odn_dpm_table.odn_core_clock_dpm_levels.
1425 performance_level_entries[i].enabled = true;
1428 data->odn_dpm_table.vdd_dependency_on_sclk.count =
1429 dep_gfx_table->count;
1430 for (i = 0; i < dep_gfx_table->count; i++) {
1431 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].clk =
1432 dep_gfx_table->entries[i].clk;
1433 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].vddInd =
1434 dep_gfx_table->entries[i].vddInd;
1435 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_enable =
1436 dep_gfx_table->entries[i].cks_enable;
1437 data->odn_dpm_table.vdd_dependency_on_sclk.entries[i].cks_voffset =
1438 dep_gfx_table->entries[i].cks_voffset;
1441 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1442 number_of_performance_levels = data->dpm_table.mem_table.count;
1443 for (i = 0; i < data->dpm_table.mem_table.count; i++) {
1444 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1445 performance_level_entries[i].clock =
1446 data->dpm_table.mem_table.dpm_levels[i].value;
1447 data->odn_dpm_table.odn_memory_clock_dpm_levels.
1448 performance_level_entries[i].enabled = true;
1451 data->odn_dpm_table.vdd_dependency_on_mclk.count = dep_mclk_table->count;
1452 for (i = 0; i < dep_mclk_table->count; i++) {
1453 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].clk =
1454 dep_mclk_table->entries[i].clk;
1455 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddInd =
1456 dep_mclk_table->entries[i].vddInd;
1457 data->odn_dpm_table.vdd_dependency_on_mclk.entries[i].vddci =
1458 dep_mclk_table->entries[i].vddci;
1466 * @fn vega10_populate_ulv_state
1467 * @brief Function to provide parameters for Utral Low Voltage state to SMC.
1469 * @param hwmgr - the address of the hardware manager.
1472 static int vega10_populate_ulv_state(struct pp_hwmgr *hwmgr)
1474 struct vega10_hwmgr *data =
1475 (struct vega10_hwmgr *)(hwmgr->backend);
1476 struct phm_ppt_v2_information *table_info =
1477 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1479 data->smc_state_table.pp_table.UlvOffsetVid =
1480 (uint8_t)table_info->us_ulv_voltage_offset;
1482 data->smc_state_table.pp_table.UlvSmnclkDid =
1483 (uint8_t)(table_info->us_ulv_smnclk_did);
1484 data->smc_state_table.pp_table.UlvMp1clkDid =
1485 (uint8_t)(table_info->us_ulv_mp1clk_did);
1486 data->smc_state_table.pp_table.UlvGfxclkBypass =
1487 (uint8_t)(table_info->us_ulv_gfxclk_bypass);
1488 data->smc_state_table.pp_table.UlvPhaseSheddingPsi0 =
1489 (uint8_t)(data->vddc_voltage_table.psi0_enable);
1490 data->smc_state_table.pp_table.UlvPhaseSheddingPsi1 =
1491 (uint8_t)(data->vddc_voltage_table.psi1_enable);
1496 static int vega10_populate_single_lclk_level(struct pp_hwmgr *hwmgr,
1497 uint32_t lclock, uint8_t *curr_lclk_did)
1499 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1501 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1503 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1505 "Failed to get LCLK clock settings from VBIOS!",
1508 *curr_lclk_did = dividers.ulDid;
1513 static int vega10_populate_smc_link_levels(struct pp_hwmgr *hwmgr)
1516 struct vega10_hwmgr *data =
1517 (struct vega10_hwmgr *)(hwmgr->backend);
1518 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1519 struct vega10_pcie_table *pcie_table =
1520 &(data->dpm_table.pcie_table);
1523 for (i = 0; i < pcie_table->count; i++) {
1524 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[i];
1525 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[i];
1527 result = vega10_populate_single_lclk_level(hwmgr,
1528 pcie_table->lclk[i], &(pp_table->LclkDid[i]));
1530 pr_info("Populate LClock Level %d Failed!\n", i);
1536 while (i < NUM_LINK_LEVELS) {
1537 pp_table->PcieGenSpeed[i] = pcie_table->pcie_gen[j];
1538 pp_table->PcieLaneCount[i] = pcie_table->pcie_lane[j];
1540 result = vega10_populate_single_lclk_level(hwmgr,
1541 pcie_table->lclk[j], &(pp_table->LclkDid[i]));
1543 pr_info("Populate LClock Level %d Failed!\n", i);
1553 * Populates single SMC GFXSCLK structure using the provided engine clock
1555 * @param hwmgr the address of the hardware manager
1556 * @param gfx_clock the GFX clock to use to populate the structure.
1557 * @param current_gfxclk_level location in PPTable for the SMC GFXCLK structure.
1560 static int vega10_populate_single_gfx_level(struct pp_hwmgr *hwmgr,
1561 uint32_t gfx_clock, PllSetting_t *current_gfxclk_level,
1564 struct phm_ppt_v2_information *table_info =
1565 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1566 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_sclk =
1567 table_info->vdd_dep_on_sclk;
1568 struct vega10_hwmgr *data =
1569 (struct vega10_hwmgr *)(hwmgr->backend);
1570 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1571 uint32_t gfx_max_clock =
1572 hwmgr->platform_descriptor.overdriveLimit.engineClock;
1575 if (data->apply_overdrive_next_settings_mask &
1576 DPMTABLE_OD_UPDATE_VDDC)
1577 dep_on_sclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1578 &(data->odn_dpm_table.vdd_dependency_on_sclk);
1580 PP_ASSERT_WITH_CODE(dep_on_sclk,
1581 "Invalid SOC_VDD-GFX_CLK Dependency Table!",
1584 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
1585 gfx_clock = gfx_clock > gfx_max_clock ? gfx_max_clock : gfx_clock;
1587 for (i = 0; i < dep_on_sclk->count; i++) {
1588 if (dep_on_sclk->entries[i].clk == gfx_clock)
1591 PP_ASSERT_WITH_CODE(dep_on_sclk->count > i,
1592 "Cannot find gfx_clk in SOC_VDD-GFX_CLK!",
1596 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1597 COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK,
1598 gfx_clock, ÷rs),
1599 "Failed to get GFX Clock settings from VBIOS!",
1602 /* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */
1603 current_gfxclk_level->FbMult =
1604 cpu_to_le32(dividers.ulPll_fb_mult);
1605 /* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */
1606 current_gfxclk_level->SsOn = dividers.ucPll_ss_enable;
1607 current_gfxclk_level->SsFbMult =
1608 cpu_to_le32(dividers.ulPll_ss_fbsmult);
1609 current_gfxclk_level->SsSlewFrac =
1610 cpu_to_le16(dividers.usPll_ss_slew_frac);
1611 current_gfxclk_level->Did = (uint8_t)(dividers.ulDid);
1613 *acg_freq = gfx_clock / 100; /* 100 Khz to Mhz conversion */
1619 * @brief Populates single SMC SOCCLK structure using the provided clock.
1621 * @param hwmgr - the address of the hardware manager.
1622 * @param soc_clock - the SOC clock to use to populate the structure.
1623 * @param current_socclk_level - location in PPTable for the SMC SOCCLK structure.
1624 * @return 0 on success..
1626 static int vega10_populate_single_soc_level(struct pp_hwmgr *hwmgr,
1627 uint32_t soc_clock, uint8_t *current_soc_did,
1628 uint8_t *current_vol_index)
1630 struct phm_ppt_v2_information *table_info =
1631 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1632 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_soc =
1633 table_info->vdd_dep_on_socclk;
1634 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1637 PP_ASSERT_WITH_CODE(dep_on_soc,
1638 "Invalid SOC_VDD-SOC_CLK Dependency Table!",
1640 for (i = 0; i < dep_on_soc->count; i++) {
1641 if (dep_on_soc->entries[i].clk == soc_clock)
1644 PP_ASSERT_WITH_CODE(dep_on_soc->count > i,
1645 "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table",
1647 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1648 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1649 soc_clock, ÷rs),
1650 "Failed to get SOC Clock settings from VBIOS!",
1653 *current_soc_did = (uint8_t)dividers.ulDid;
1654 *current_vol_index = (uint8_t)(dep_on_soc->entries[i].vddInd);
1659 uint16_t vega10_locate_vddc_given_clock(struct pp_hwmgr *hwmgr,
1661 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table)
1665 for (i = 0; i < dep_table->count; i++) {
1666 if (dep_table->entries[i].clk == clk)
1667 return dep_table->entries[i].vddc;
1670 pr_info("[LocateVddcGivenClock] Cannot locate SOC Vddc for this clock!");
1675 * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
1677 * @param hwmgr the address of the hardware manager
1679 static int vega10_populate_all_graphic_levels(struct pp_hwmgr *hwmgr)
1681 struct vega10_hwmgr *data =
1682 (struct vega10_hwmgr *)(hwmgr->backend);
1683 struct phm_ppt_v2_information *table_info =
1684 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1685 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
1686 table_info->vdd_dep_on_socclk;
1687 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1688 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
1692 for (i = 0; i < dpm_table->count; i++) {
1693 result = vega10_populate_single_gfx_level(hwmgr,
1694 dpm_table->dpm_levels[i].value,
1695 &(pp_table->GfxclkLevel[i]),
1696 &(pp_table->AcgFreqTable[i]));
1702 while (i < NUM_GFXCLK_DPM_LEVELS) {
1703 result = vega10_populate_single_gfx_level(hwmgr,
1704 dpm_table->dpm_levels[j].value,
1705 &(pp_table->GfxclkLevel[i]),
1706 &(pp_table->AcgFreqTable[i]));
1712 pp_table->GfxclkSlewRate =
1713 cpu_to_le16(table_info->us_gfxclk_slew_rate);
1715 dpm_table = &(data->dpm_table.soc_table);
1716 for (i = 0; i < dpm_table->count; i++) {
1717 pp_table->SocVid[i] =
1718 (uint8_t)convert_to_vid(
1719 vega10_locate_vddc_given_clock(hwmgr,
1720 dpm_table->dpm_levels[i].value,
1722 result = vega10_populate_single_soc_level(hwmgr,
1723 dpm_table->dpm_levels[i].value,
1724 &(pp_table->SocclkDid[i]),
1725 &(pp_table->SocDpmVoltageIndex[i]));
1731 while (i < NUM_SOCCLK_DPM_LEVELS) {
1732 pp_table->SocVid[i] = pp_table->SocVid[j];
1733 result = vega10_populate_single_soc_level(hwmgr,
1734 dpm_table->dpm_levels[j].value,
1735 &(pp_table->SocclkDid[i]),
1736 &(pp_table->SocDpmVoltageIndex[i]));
1746 * @brief Populates single SMC GFXCLK structure using the provided clock.
1748 * @param hwmgr - the address of the hardware manager.
1749 * @param mem_clock - the memory clock to use to populate the structure.
1750 * @return 0 on success..
1752 static int vega10_populate_single_memory_level(struct pp_hwmgr *hwmgr,
1753 uint32_t mem_clock, uint8_t *current_mem_vid,
1754 PllSetting_t *current_memclk_level, uint8_t *current_mem_soc_vind)
1756 struct vega10_hwmgr *data =
1757 (struct vega10_hwmgr *)(hwmgr->backend);
1758 struct phm_ppt_v2_information *table_info =
1759 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1760 struct phm_ppt_v1_clock_voltage_dependency_table *dep_on_mclk =
1761 table_info->vdd_dep_on_mclk;
1762 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1763 uint32_t mem_max_clock =
1764 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
1767 if (data->apply_overdrive_next_settings_mask &
1768 DPMTABLE_OD_UPDATE_VDDC)
1769 dep_on_mclk = (struct phm_ppt_v1_clock_voltage_dependency_table *)
1770 &data->odn_dpm_table.vdd_dependency_on_mclk;
1772 PP_ASSERT_WITH_CODE(dep_on_mclk,
1773 "Invalid SOC_VDD-UCLK Dependency Table!",
1776 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
1777 mem_clock = mem_clock > mem_max_clock ? mem_max_clock : mem_clock;
1779 for (i = 0; i < dep_on_mclk->count; i++) {
1780 if (dep_on_mclk->entries[i].clk == mem_clock)
1783 PP_ASSERT_WITH_CODE(dep_on_mclk->count > i,
1784 "Cannot find UCLK in SOC_VDD-UCLK Dependency Table!",
1788 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1789 hwmgr, COMPUTE_GPUCLK_INPUT_FLAG_UCLK, mem_clock, ÷rs),
1790 "Failed to get UCLK settings from VBIOS!",
1794 (uint8_t)(convert_to_vid(dep_on_mclk->entries[i].mvdd));
1795 *current_mem_soc_vind =
1796 (uint8_t)(dep_on_mclk->entries[i].vddInd);
1797 current_memclk_level->FbMult = cpu_to_le32(dividers.ulPll_fb_mult);
1798 current_memclk_level->Did = (uint8_t)(dividers.ulDid);
1800 PP_ASSERT_WITH_CODE(current_memclk_level->Did >= 1,
1801 "Invalid Divider ID!",
1808 * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
1810 * @param pHwMgr - the address of the hardware manager.
1811 * @return PP_Result_OK on success.
1813 static int vega10_populate_all_memory_levels(struct pp_hwmgr *hwmgr)
1815 struct vega10_hwmgr *data =
1816 (struct vega10_hwmgr *)(hwmgr->backend);
1817 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1818 struct vega10_single_dpm_table *dpm_table =
1819 &(data->dpm_table.mem_table);
1821 uint32_t i, j, reg, mem_channels;
1823 for (i = 0; i < dpm_table->count; i++) {
1824 result = vega10_populate_single_memory_level(hwmgr,
1825 dpm_table->dpm_levels[i].value,
1826 &(pp_table->MemVid[i]),
1827 &(pp_table->UclkLevel[i]),
1828 &(pp_table->MemSocVoltageIndex[i]));
1834 while (i < NUM_UCLK_DPM_LEVELS) {
1835 result = vega10_populate_single_memory_level(hwmgr,
1836 dpm_table->dpm_levels[j].value,
1837 &(pp_table->MemVid[i]),
1838 &(pp_table->UclkLevel[i]),
1839 &(pp_table->MemSocVoltageIndex[i]));
1845 reg = soc15_get_register_offset(DF_HWID, 0,
1846 mmDF_CS_AON0_DramBaseAddress0_BASE_IDX,
1847 mmDF_CS_AON0_DramBaseAddress0);
1848 mem_channels = (cgs_read_register(hwmgr->device, reg) &
1849 DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK) >>
1850 DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT;
1851 pp_table->NumMemoryChannels = cpu_to_le16(mem_channels);
1852 pp_table->MemoryChannelWidth =
1853 cpu_to_le16(HBM_MEMORY_CHANNEL_WIDTH *
1854 channel_number[mem_channels]);
1856 pp_table->LowestUclkReservedForUlv =
1857 (uint8_t)(data->lowest_uclk_reserved_for_ulv);
1862 static int vega10_populate_single_display_type(struct pp_hwmgr *hwmgr,
1863 DSPCLK_e disp_clock)
1865 struct vega10_hwmgr *data =
1866 (struct vega10_hwmgr *)(hwmgr->backend);
1867 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1868 struct phm_ppt_v2_information *table_info =
1869 (struct phm_ppt_v2_information *)
1871 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
1873 uint16_t clk = 0, vddc = 0;
1876 switch (disp_clock) {
1877 case DSPCLK_DCEFCLK:
1878 dep_table = table_info->vdd_dep_on_dcefclk;
1880 case DSPCLK_DISPCLK:
1881 dep_table = table_info->vdd_dep_on_dispclk;
1884 dep_table = table_info->vdd_dep_on_pixclk;
1887 dep_table = table_info->vdd_dep_on_phyclk;
1893 PP_ASSERT_WITH_CODE(dep_table->count <= NUM_DSPCLK_LEVELS,
1894 "Number Of Entries Exceeded maximum!",
1897 for (i = 0; i < dep_table->count; i++) {
1898 clk = (uint16_t)(dep_table->entries[i].clk / 100);
1899 vddc = table_info->vddc_lookup_table->
1900 entries[dep_table->entries[i].vddInd].us_vdd;
1901 vid = (uint8_t)convert_to_vid(vddc);
1902 pp_table->DisplayClockTable[disp_clock][i].Freq =
1904 pp_table->DisplayClockTable[disp_clock][i].Vid =
1908 while (i < NUM_DSPCLK_LEVELS) {
1909 pp_table->DisplayClockTable[disp_clock][i].Freq =
1911 pp_table->DisplayClockTable[disp_clock][i].Vid =
1919 static int vega10_populate_all_display_clock_levels(struct pp_hwmgr *hwmgr)
1923 for (i = 0; i < DSPCLK_COUNT; i++) {
1924 PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr, i),
1925 "Failed to populate Clock in DisplayClockTable!",
1932 static int vega10_populate_single_eclock_level(struct pp_hwmgr *hwmgr,
1933 uint32_t eclock, uint8_t *current_eclk_did,
1934 uint8_t *current_soc_vol)
1936 struct phm_ppt_v2_information *table_info =
1937 (struct phm_ppt_v2_information *)(hwmgr->pptable);
1938 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
1939 table_info->mm_dep_table;
1940 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1943 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1944 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1946 "Failed to get ECLK clock settings from VBIOS!",
1949 *current_eclk_did = (uint8_t)dividers.ulDid;
1951 for (i = 0; i < dep_table->count; i++) {
1952 if (dep_table->entries[i].eclk == eclock)
1953 *current_soc_vol = dep_table->entries[i].vddcInd;
1959 static int vega10_populate_smc_vce_levels(struct pp_hwmgr *hwmgr)
1961 struct vega10_hwmgr *data =
1962 (struct vega10_hwmgr *)(hwmgr->backend);
1963 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
1964 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.eclk_table);
1965 int result = -EINVAL;
1968 for (i = 0; i < dpm_table->count; i++) {
1969 result = vega10_populate_single_eclock_level(hwmgr,
1970 dpm_table->dpm_levels[i].value,
1971 &(pp_table->EclkDid[i]),
1972 &(pp_table->VceDpmVoltageIndex[i]));
1978 while (i < NUM_VCE_DPM_LEVELS) {
1979 result = vega10_populate_single_eclock_level(hwmgr,
1980 dpm_table->dpm_levels[j].value,
1981 &(pp_table->EclkDid[i]),
1982 &(pp_table->VceDpmVoltageIndex[i]));
1991 static int vega10_populate_single_vclock_level(struct pp_hwmgr *hwmgr,
1992 uint32_t vclock, uint8_t *current_vclk_did)
1994 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
1996 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
1997 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
1999 "Failed to get VCLK clock settings from VBIOS!",
2002 *current_vclk_did = (uint8_t)dividers.ulDid;
2007 static int vega10_populate_single_dclock_level(struct pp_hwmgr *hwmgr,
2008 uint32_t dclock, uint8_t *current_dclk_did)
2010 struct pp_atomfwctrl_clock_dividers_soc15 dividers;
2012 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr,
2013 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2015 "Failed to get DCLK clock settings from VBIOS!",
2018 *current_dclk_did = (uint8_t)dividers.ulDid;
2023 static int vega10_populate_smc_uvd_levels(struct pp_hwmgr *hwmgr)
2025 struct vega10_hwmgr *data =
2026 (struct vega10_hwmgr *)(hwmgr->backend);
2027 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2028 struct vega10_single_dpm_table *vclk_dpm_table =
2029 &(data->dpm_table.vclk_table);
2030 struct vega10_single_dpm_table *dclk_dpm_table =
2031 &(data->dpm_table.dclk_table);
2032 struct phm_ppt_v2_information *table_info =
2033 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2034 struct phm_ppt_v1_mm_clock_voltage_dependency_table *dep_table =
2035 table_info->mm_dep_table;
2036 int result = -EINVAL;
2039 for (i = 0; i < vclk_dpm_table->count; i++) {
2040 result = vega10_populate_single_vclock_level(hwmgr,
2041 vclk_dpm_table->dpm_levels[i].value,
2042 &(pp_table->VclkDid[i]));
2048 while (i < NUM_UVD_DPM_LEVELS) {
2049 result = vega10_populate_single_vclock_level(hwmgr,
2050 vclk_dpm_table->dpm_levels[j].value,
2051 &(pp_table->VclkDid[i]));
2057 for (i = 0; i < dclk_dpm_table->count; i++) {
2058 result = vega10_populate_single_dclock_level(hwmgr,
2059 dclk_dpm_table->dpm_levels[i].value,
2060 &(pp_table->DclkDid[i]));
2066 while (i < NUM_UVD_DPM_LEVELS) {
2067 result = vega10_populate_single_dclock_level(hwmgr,
2068 dclk_dpm_table->dpm_levels[j].value,
2069 &(pp_table->DclkDid[i]));
2075 for (i = 0; i < dep_table->count; i++) {
2076 if (dep_table->entries[i].vclk ==
2077 vclk_dpm_table->dpm_levels[i].value &&
2078 dep_table->entries[i].dclk ==
2079 dclk_dpm_table->dpm_levels[i].value)
2080 pp_table->UvdDpmVoltageIndex[i] =
2081 dep_table->entries[i].vddcInd;
2087 while (i < NUM_UVD_DPM_LEVELS) {
2088 pp_table->UvdDpmVoltageIndex[i] = dep_table->entries[j].vddcInd;
2095 static int vega10_populate_clock_stretcher_table(struct pp_hwmgr *hwmgr)
2097 struct vega10_hwmgr *data =
2098 (struct vega10_hwmgr *)(hwmgr->backend);
2099 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2100 struct phm_ppt_v2_information *table_info =
2101 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2102 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2103 table_info->vdd_dep_on_sclk;
2106 for (i = 0; i < dep_table->count; i++) {
2107 pp_table->CksEnable[i] = dep_table->entries[i].cks_enable;
2108 pp_table->CksVidOffset[i] = (uint8_t)(dep_table->entries[i].cks_voffset
2109 * VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
2115 static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
2117 struct vega10_hwmgr *data =
2118 (struct vega10_hwmgr *)(hwmgr->backend);
2119 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2120 struct phm_ppt_v2_information *table_info =
2121 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2122 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
2123 table_info->vdd_dep_on_sclk;
2124 struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
2128 pp_table->MinVoltageVid = (uint8_t)0xff;
2129 pp_table->MaxVoltageVid = (uint8_t)0;
2131 if (data->smu_features[GNLD_AVFS].supported) {
2132 result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
2134 pp_table->MinVoltageVid = (uint8_t)
2135 convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
2136 pp_table->MaxVoltageVid = (uint8_t)
2137 convert_to_vid((uint16_t)(avfs_params.ulMaxVddc));
2139 pp_table->AConstant[0] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant0);
2140 pp_table->AConstant[1] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant1);
2141 pp_table->AConstant[2] = cpu_to_le32(avfs_params.ulMeanNsigmaAcontant2);
2142 pp_table->DC_tol_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2143 pp_table->Platform_mean = cpu_to_le16(avfs_params.usMeanNsigmaPlatformMean);
2144 pp_table->Platform_sigma = cpu_to_le16(avfs_params.usMeanNsigmaDcTolSigma);
2145 pp_table->PSM_Age_CompFactor = cpu_to_le16(avfs_params.usPsmAgeComfactor);
2147 pp_table->BtcGbVdroopTableCksOff.a0 =
2148 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA0);
2149 pp_table->BtcGbVdroopTableCksOff.a0_shift = 20;
2150 pp_table->BtcGbVdroopTableCksOff.a1 =
2151 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA1);
2152 pp_table->BtcGbVdroopTableCksOff.a1_shift = 20;
2153 pp_table->BtcGbVdroopTableCksOff.a2 =
2154 cpu_to_le32(avfs_params.ulGbVdroopTableCksoffA2);
2155 pp_table->BtcGbVdroopTableCksOff.a2_shift = 20;
2157 pp_table->OverrideBtcGbCksOn = avfs_params.ucEnableGbVdroopTableCkson;
2158 pp_table->BtcGbVdroopTableCksOn.a0 =
2159 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA0);
2160 pp_table->BtcGbVdroopTableCksOn.a0_shift = 20;
2161 pp_table->BtcGbVdroopTableCksOn.a1 =
2162 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA1);
2163 pp_table->BtcGbVdroopTableCksOn.a1_shift = 20;
2164 pp_table->BtcGbVdroopTableCksOn.a2 =
2165 cpu_to_le32(avfs_params.ulGbVdroopTableCksonA2);
2166 pp_table->BtcGbVdroopTableCksOn.a2_shift = 20;
2168 pp_table->AvfsGbCksOn.m1 =
2169 cpu_to_le32(avfs_params.ulGbFuseTableCksonM1);
2170 pp_table->AvfsGbCksOn.m2 =
2171 cpu_to_le32(avfs_params.ulGbFuseTableCksonM2);
2172 pp_table->AvfsGbCksOn.b =
2173 cpu_to_le32(avfs_params.ulGbFuseTableCksonB);
2174 pp_table->AvfsGbCksOn.m1_shift = 24;
2175 pp_table->AvfsGbCksOn.m2_shift = 12;
2176 pp_table->AvfsGbCksOn.b_shift = 0;
2178 pp_table->OverrideAvfsGbCksOn =
2179 avfs_params.ucEnableGbFuseTableCkson;
2180 pp_table->AvfsGbCksOff.m1 =
2181 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM1);
2182 pp_table->AvfsGbCksOff.m2 =
2183 cpu_to_le32(avfs_params.ulGbFuseTableCksoffM2);
2184 pp_table->AvfsGbCksOff.b =
2185 cpu_to_le32(avfs_params.ulGbFuseTableCksoffB);
2186 pp_table->AvfsGbCksOff.m1_shift = 24;
2187 pp_table->AvfsGbCksOff.m2_shift = 12;
2188 pp_table->AvfsGbCksOff.b_shift = 0;
2190 for (i = 0; i < dep_table->count; i++)
2191 pp_table->StaticVoltageOffsetVid[i] =
2192 convert_to_vid((uint8_t)(dep_table->entries[i].sclk_offset));
2194 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2195 data->disp_clk_quad_eqn_a) &&
2196 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2197 data->disp_clk_quad_eqn_b)) {
2198 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2199 (int32_t)data->disp_clk_quad_eqn_a;
2200 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
2201 (int32_t)data->disp_clk_quad_eqn_b;
2202 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2203 (int32_t)data->disp_clk_quad_eqn_c;
2205 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1 =
2206 (int32_t)avfs_params.ulDispclk2GfxclkM1;
2207 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2 =
2208 (int32_t)avfs_params.ulDispclk2GfxclkM2;
2209 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b =
2210 (int32_t)avfs_params.ulDispclk2GfxclkB;
2213 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m1_shift = 24;
2214 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].m2_shift = 12;
2215 pp_table->DisplayClock2Gfxclk[DSPCLK_DISPCLK].b_shift = 12;
2217 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2218 data->dcef_clk_quad_eqn_a) &&
2219 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2220 data->dcef_clk_quad_eqn_b)) {
2221 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2222 (int32_t)data->dcef_clk_quad_eqn_a;
2223 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
2224 (int32_t)data->dcef_clk_quad_eqn_b;
2225 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2226 (int32_t)data->dcef_clk_quad_eqn_c;
2228 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1 =
2229 (int32_t)avfs_params.ulDcefclk2GfxclkM1;
2230 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2 =
2231 (int32_t)avfs_params.ulDcefclk2GfxclkM2;
2232 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b =
2233 (int32_t)avfs_params.ulDcefclk2GfxclkB;
2236 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m1_shift = 24;
2237 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].m2_shift = 12;
2238 pp_table->DisplayClock2Gfxclk[DSPCLK_DCEFCLK].b_shift = 12;
2240 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2241 data->pixel_clk_quad_eqn_a) &&
2242 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2243 data->pixel_clk_quad_eqn_b)) {
2244 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2245 (int32_t)data->pixel_clk_quad_eqn_a;
2246 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
2247 (int32_t)data->pixel_clk_quad_eqn_b;
2248 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2249 (int32_t)data->pixel_clk_quad_eqn_c;
2251 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1 =
2252 (int32_t)avfs_params.ulPixelclk2GfxclkM1;
2253 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2 =
2254 (int32_t)avfs_params.ulPixelclk2GfxclkM2;
2255 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b =
2256 (int32_t)avfs_params.ulPixelclk2GfxclkB;
2259 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m1_shift = 24;
2260 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].m2_shift = 12;
2261 pp_table->DisplayClock2Gfxclk[DSPCLK_PIXCLK].b_shift = 12;
2262 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2263 data->phy_clk_quad_eqn_a) &&
2264 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT !=
2265 data->phy_clk_quad_eqn_b)) {
2266 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2267 (int32_t)data->phy_clk_quad_eqn_a;
2268 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
2269 (int32_t)data->phy_clk_quad_eqn_b;
2270 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2271 (int32_t)data->phy_clk_quad_eqn_c;
2273 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1 =
2274 (int32_t)avfs_params.ulPhyclk2GfxclkM1;
2275 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2 =
2276 (int32_t)avfs_params.ulPhyclk2GfxclkM2;
2277 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b =
2278 (int32_t)avfs_params.ulPhyclk2GfxclkB;
2281 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m1_shift = 24;
2282 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].m2_shift = 12;
2283 pp_table->DisplayClock2Gfxclk[DSPCLK_PHYCLK].b_shift = 12;
2285 pp_table->AcgBtcGbVdroopTable.a0 = avfs_params.ulAcgGbVdroopTableA0;
2286 pp_table->AcgBtcGbVdroopTable.a0_shift = 20;
2287 pp_table->AcgBtcGbVdroopTable.a1 = avfs_params.ulAcgGbVdroopTableA1;
2288 pp_table->AcgBtcGbVdroopTable.a1_shift = 20;
2289 pp_table->AcgBtcGbVdroopTable.a2 = avfs_params.ulAcgGbVdroopTableA2;
2290 pp_table->AcgBtcGbVdroopTable.a2_shift = 20;
2292 pp_table->AcgAvfsGb.m1 = avfs_params.ulAcgGbFuseTableM1;
2293 pp_table->AcgAvfsGb.m2 = avfs_params.ulAcgGbFuseTableM2;
2294 pp_table->AcgAvfsGb.b = avfs_params.ulAcgGbFuseTableB;
2295 pp_table->AcgAvfsGb.m1_shift = 0;
2296 pp_table->AcgAvfsGb.m2_shift = 0;
2297 pp_table->AcgAvfsGb.b_shift = 0;
2300 data->smu_features[GNLD_AVFS].supported = false;
2307 static int vega10_acg_enable(struct pp_hwmgr *hwmgr)
2309 struct vega10_hwmgr *data =
2310 (struct vega10_hwmgr *)(hwmgr->backend);
2311 uint32_t agc_btc_response;
2313 if (data->smu_features[GNLD_ACG].supported) {
2314 if (0 == vega10_enable_smc_features(hwmgr->smumgr, true,
2315 data->smu_features[GNLD_DPM_PREFETCHER].smu_feature_bitmap))
2316 data->smu_features[GNLD_DPM_PREFETCHER].enabled = true;
2318 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_InitializeAcg);
2320 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgBtc);
2321 vega10_read_arg_from_smc(hwmgr->smumgr, &agc_btc_response);
2323 if (1 == agc_btc_response) {
2324 if (1 == data->acg_loop_state)
2325 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgInClosedLoop);
2326 else if (2 == data->acg_loop_state)
2327 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_RunAcgInOpenLoop);
2328 if (0 == vega10_enable_smc_features(hwmgr->smumgr, true,
2329 data->smu_features[GNLD_ACG].smu_feature_bitmap))
2330 data->smu_features[GNLD_ACG].enabled = true;
2332 pr_info("[ACG_Enable] ACG BTC Returned Failed Status!\n");
2333 data->smu_features[GNLD_ACG].enabled = false;
2340 static int vega10_acg_disable(struct pp_hwmgr *hwmgr)
2342 struct vega10_hwmgr *data =
2343 (struct vega10_hwmgr *)(hwmgr->backend);
2345 if (data->smu_features[GNLD_ACG].supported) {
2346 if (data->smu_features[GNLD_ACG].enabled) {
2347 if (0 == vega10_enable_smc_features(hwmgr->smumgr, false,
2348 data->smu_features[GNLD_ACG].smu_feature_bitmap))
2349 data->smu_features[GNLD_ACG].enabled = false;
2356 static int vega10_populate_gpio_parameters(struct pp_hwmgr *hwmgr)
2358 struct vega10_hwmgr *data =
2359 (struct vega10_hwmgr *)(hwmgr->backend);
2360 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2361 struct pp_atomfwctrl_gpio_parameters gpio_params = {0};
2364 result = pp_atomfwctrl_get_gpio_information(hwmgr, &gpio_params);
2366 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2367 PHM_PlatformCaps_RegulatorHot) &&
2368 (data->registry_data.regulator_hot_gpio_support)) {
2369 pp_table->VR0HotGpio = gpio_params.ucVR0HotGpio;
2370 pp_table->VR0HotPolarity = gpio_params.ucVR0HotPolarity;
2371 pp_table->VR1HotGpio = gpio_params.ucVR1HotGpio;
2372 pp_table->VR1HotPolarity = gpio_params.ucVR1HotPolarity;
2374 pp_table->VR0HotGpio = 0;
2375 pp_table->VR0HotPolarity = 0;
2376 pp_table->VR1HotGpio = 0;
2377 pp_table->VR1HotPolarity = 0;
2380 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2381 PHM_PlatformCaps_AutomaticDCTransition) &&
2382 (data->registry_data.ac_dc_switch_gpio_support)) {
2383 pp_table->AcDcGpio = gpio_params.ucAcDcGpio;
2384 pp_table->AcDcPolarity = gpio_params.ucAcDcPolarity;
2386 pp_table->AcDcGpio = 0;
2387 pp_table->AcDcPolarity = 0;
2394 static int vega10_avfs_enable(struct pp_hwmgr *hwmgr, bool enable)
2396 struct vega10_hwmgr *data =
2397 (struct vega10_hwmgr *)(hwmgr->backend);
2399 if (data->smu_features[GNLD_AVFS].supported) {
2401 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2403 data->smu_features[GNLD_AVFS].smu_feature_bitmap),
2404 "[avfs_control] Attempt to Enable AVFS feature Failed!",
2406 data->smu_features[GNLD_AVFS].enabled = true;
2408 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2410 data->smu_features[GNLD_AVFS].smu_feature_id),
2411 "[avfs_control] Attempt to Disable AVFS feature Failed!",
2413 data->smu_features[GNLD_AVFS].enabled = false;
2420 static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr *hwmgr)
2424 uint64_t serial_number = 0;
2425 uint32_t top32, bottom32;
2426 struct phm_fuses_default fuse;
2428 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2429 AvfsFuseOverride_t *avfs_fuse_table = &(data->smc_state_table.avfs_fuse_override_table);
2431 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ReadSerialNumTop32);
2432 vega10_read_arg_from_smc(hwmgr->smumgr, &top32);
2434 smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_ReadSerialNumBottom32);
2435 vega10_read_arg_from_smc(hwmgr->smumgr, &bottom32);
2437 serial_number = ((uint64_t)bottom32 << 32) | top32;
2439 if (pp_override_get_default_fuse_value(serial_number, vega10_fuses_default, &fuse) == 0) {
2440 avfs_fuse_table->VFT0_b = fuse.VFT0_b;
2441 avfs_fuse_table->VFT0_m1 = fuse.VFT0_m1;
2442 avfs_fuse_table->VFT0_m2 = fuse.VFT0_m2;
2443 avfs_fuse_table->VFT1_b = fuse.VFT1_b;
2444 avfs_fuse_table->VFT1_m1 = fuse.VFT1_m1;
2445 avfs_fuse_table->VFT1_m2 = fuse.VFT1_m2;
2446 avfs_fuse_table->VFT2_b = fuse.VFT2_b;
2447 avfs_fuse_table->VFT2_m1 = fuse.VFT2_m1;
2448 avfs_fuse_table->VFT2_m2 = fuse.VFT2_m2;
2449 result = vega10_copy_table_to_smc(hwmgr->smumgr,
2450 (uint8_t *)avfs_fuse_table, AVFSFUSETABLE);
2451 PP_ASSERT_WITH_CODE(!result,
2452 "Failed to upload FuseOVerride!",
2459 static int vega10_save_default_power_profile(struct pp_hwmgr *hwmgr)
2461 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2462 struct vega10_single_dpm_table *dpm_table = &(data->dpm_table.gfx_table);
2465 hwmgr->default_gfx_power_profile.type = AMD_PP_GFX_PROFILE;
2466 hwmgr->default_compute_power_profile.type = AMD_PP_COMPUTE_PROFILE;
2468 /* Optimize compute power profile: Use only highest
2469 * 2 power levels (if more than 2 are available)
2471 if (dpm_table->count > 2)
2472 min_level = dpm_table->count - 2;
2473 else if (dpm_table->count == 2)
2478 hwmgr->default_compute_power_profile.min_sclk =
2479 dpm_table->dpm_levels[min_level].value;
2481 hwmgr->gfx_power_profile = hwmgr->default_gfx_power_profile;
2482 hwmgr->compute_power_profile = hwmgr->default_compute_power_profile;
2488 * Initializes the SMC table and uploads it
2490 * @param hwmgr the address of the powerplay hardware manager.
2491 * @param pInput the pointer to input data (PowerState)
2494 static int vega10_init_smc_table(struct pp_hwmgr *hwmgr)
2497 struct vega10_hwmgr *data =
2498 (struct vega10_hwmgr *)(hwmgr->backend);
2499 struct phm_ppt_v2_information *table_info =
2500 (struct phm_ppt_v2_information *)(hwmgr->pptable);
2501 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
2502 struct pp_atomfwctrl_voltage_table voltage_table;
2503 struct pp_atomfwctrl_bios_boot_up_values boot_up_values;
2505 result = vega10_setup_default_dpm_tables(hwmgr);
2506 PP_ASSERT_WITH_CODE(!result,
2507 "Failed to setup default DPM tables!",
2510 pp_atomfwctrl_get_voltage_table_v4(hwmgr, VOLTAGE_TYPE_VDDC,
2511 VOLTAGE_OBJ_SVID2, &voltage_table);
2512 pp_table->MaxVidStep = voltage_table.max_vid_step;
2514 pp_table->GfxDpmVoltageMode =
2515 (uint8_t)(table_info->uc_gfx_dpm_voltage_mode);
2516 pp_table->SocDpmVoltageMode =
2517 (uint8_t)(table_info->uc_soc_dpm_voltage_mode);
2518 pp_table->UclkDpmVoltageMode =
2519 (uint8_t)(table_info->uc_uclk_dpm_voltage_mode);
2520 pp_table->UvdDpmVoltageMode =
2521 (uint8_t)(table_info->uc_uvd_dpm_voltage_mode);
2522 pp_table->VceDpmVoltageMode =
2523 (uint8_t)(table_info->uc_vce_dpm_voltage_mode);
2524 pp_table->Mp0DpmVoltageMode =
2525 (uint8_t)(table_info->uc_mp0_dpm_voltage_mode);
2527 pp_table->DisplayDpmVoltageMode =
2528 (uint8_t)(table_info->uc_dcef_dpm_voltage_mode);
2530 data->vddc_voltage_table.psi0_enable = voltage_table.psi0_enable;
2531 data->vddc_voltage_table.psi1_enable = voltage_table.psi1_enable;
2533 if (data->registry_data.ulv_support &&
2534 table_info->us_ulv_voltage_offset) {
2535 result = vega10_populate_ulv_state(hwmgr);
2536 PP_ASSERT_WITH_CODE(!result,
2537 "Failed to initialize ULV state!",
2541 result = vega10_populate_smc_link_levels(hwmgr);
2542 PP_ASSERT_WITH_CODE(!result,
2543 "Failed to initialize Link Level!",
2546 result = vega10_populate_all_graphic_levels(hwmgr);
2547 PP_ASSERT_WITH_CODE(!result,
2548 "Failed to initialize Graphics Level!",
2551 result = vega10_populate_all_memory_levels(hwmgr);
2552 PP_ASSERT_WITH_CODE(!result,
2553 "Failed to initialize Memory Level!",
2556 result = vega10_populate_all_display_clock_levels(hwmgr);
2557 PP_ASSERT_WITH_CODE(!result,
2558 "Failed to initialize Display Level!",
2561 result = vega10_populate_smc_vce_levels(hwmgr);
2562 PP_ASSERT_WITH_CODE(!result,
2563 "Failed to initialize VCE Level!",
2566 result = vega10_populate_smc_uvd_levels(hwmgr);
2567 PP_ASSERT_WITH_CODE(!result,
2568 "Failed to initialize UVD Level!",
2571 if (data->registry_data.clock_stretcher_support) {
2572 result = vega10_populate_clock_stretcher_table(hwmgr);
2573 PP_ASSERT_WITH_CODE(!result,
2574 "Failed to populate Clock Stretcher Table!",
2578 result = pp_atomfwctrl_get_vbios_bootup_values(hwmgr, &boot_up_values);
2580 data->vbios_boot_state.vddc = boot_up_values.usVddc;
2581 data->vbios_boot_state.vddci = boot_up_values.usVddci;
2582 data->vbios_boot_state.mvddc = boot_up_values.usMvddc;
2583 data->vbios_boot_state.gfx_clock = boot_up_values.ulGfxClk;
2584 data->vbios_boot_state.mem_clock = boot_up_values.ulUClk;
2585 data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
2586 data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
2587 if (0 != boot_up_values.usVddc) {
2588 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2589 PPSMC_MSG_SetFloorSocVoltage,
2590 (boot_up_values.usVddc * 4));
2591 data->vbios_boot_state.bsoc_vddc_lock = true;
2593 data->vbios_boot_state.bsoc_vddc_lock = false;
2595 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2596 PPSMC_MSG_SetMinDeepSleepDcefclk,
2597 (uint32_t)(data->vbios_boot_state.dcef_clock / 100));
2600 result = vega10_populate_avfs_parameters(hwmgr);
2601 PP_ASSERT_WITH_CODE(!result,
2602 "Failed to initialize AVFS Parameters!",
2605 result = vega10_populate_gpio_parameters(hwmgr);
2606 PP_ASSERT_WITH_CODE(!result,
2607 "Failed to initialize GPIO Parameters!",
2610 pp_table->GfxclkAverageAlpha = (uint8_t)
2611 (data->gfxclk_average_alpha);
2612 pp_table->SocclkAverageAlpha = (uint8_t)
2613 (data->socclk_average_alpha);
2614 pp_table->UclkAverageAlpha = (uint8_t)
2615 (data->uclk_average_alpha);
2616 pp_table->GfxActivityAverageAlpha = (uint8_t)
2617 (data->gfx_activity_average_alpha);
2619 vega10_populate_and_upload_avfs_fuse_override(hwmgr);
2621 result = vega10_copy_table_to_smc(hwmgr->smumgr,
2622 (uint8_t *)pp_table, PPTABLE);
2623 PP_ASSERT_WITH_CODE(!result,
2624 "Failed to upload PPtable!", return result);
2626 result = vega10_avfs_enable(hwmgr, true);
2627 PP_ASSERT_WITH_CODE(!result, "Attempt to enable AVFS feature Failed!",
2629 vega10_acg_enable(hwmgr);
2630 vega10_save_default_power_profile(hwmgr);
2635 static int vega10_enable_thermal_protection(struct pp_hwmgr *hwmgr)
2637 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2639 if (data->smu_features[GNLD_THERMAL].supported) {
2640 if (data->smu_features[GNLD_THERMAL].enabled)
2641 pr_info("THERMAL Feature Already enabled!");
2643 PP_ASSERT_WITH_CODE(
2644 !vega10_enable_smc_features(hwmgr->smumgr,
2646 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2647 "Enable THERMAL Feature Failed!",
2649 data->smu_features[GNLD_THERMAL].enabled = true;
2655 static int vega10_disable_thermal_protection(struct pp_hwmgr *hwmgr)
2657 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
2659 if (data->smu_features[GNLD_THERMAL].supported) {
2660 if (!data->smu_features[GNLD_THERMAL].enabled)
2661 pr_info("THERMAL Feature Already disabled!");
2663 PP_ASSERT_WITH_CODE(
2664 !vega10_enable_smc_features(hwmgr->smumgr,
2666 data->smu_features[GNLD_THERMAL].smu_feature_bitmap),
2667 "disable THERMAL Feature Failed!",
2669 data->smu_features[GNLD_THERMAL].enabled = false;
2675 static int vega10_enable_vrhot_feature(struct pp_hwmgr *hwmgr)
2677 struct vega10_hwmgr *data =
2678 (struct vega10_hwmgr *)(hwmgr->backend);
2680 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2681 PHM_PlatformCaps_RegulatorHot)) {
2682 if (data->smu_features[GNLD_VR0HOT].supported) {
2683 PP_ASSERT_WITH_CODE(
2684 !vega10_enable_smc_features(hwmgr->smumgr,
2686 data->smu_features[GNLD_VR0HOT].smu_feature_bitmap),
2687 "Attempt to Enable VR0 Hot feature Failed!",
2689 data->smu_features[GNLD_VR0HOT].enabled = true;
2691 if (data->smu_features[GNLD_VR1HOT].supported) {
2692 PP_ASSERT_WITH_CODE(
2693 !vega10_enable_smc_features(hwmgr->smumgr,
2695 data->smu_features[GNLD_VR1HOT].smu_feature_bitmap),
2696 "Attempt to Enable VR0 Hot feature Failed!",
2698 data->smu_features[GNLD_VR1HOT].enabled = true;
2705 static int vega10_enable_ulv(struct pp_hwmgr *hwmgr)
2707 struct vega10_hwmgr *data =
2708 (struct vega10_hwmgr *)(hwmgr->backend);
2710 if (data->registry_data.ulv_support) {
2711 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2712 true, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2713 "Enable ULV Feature Failed!",
2715 data->smu_features[GNLD_ULV].enabled = true;
2721 static int vega10_disable_ulv(struct pp_hwmgr *hwmgr)
2723 struct vega10_hwmgr *data =
2724 (struct vega10_hwmgr *)(hwmgr->backend);
2726 if (data->registry_data.ulv_support) {
2727 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2728 false, data->smu_features[GNLD_ULV].smu_feature_bitmap),
2729 "disable ULV Feature Failed!",
2731 data->smu_features[GNLD_ULV].enabled = false;
2737 static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2739 struct vega10_hwmgr *data =
2740 (struct vega10_hwmgr *)(hwmgr->backend);
2742 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2743 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2744 true, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2745 "Attempt to Enable DS_GFXCLK Feature Failed!",
2747 data->smu_features[GNLD_DS_GFXCLK].enabled = true;
2750 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2751 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2752 true, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2753 "Attempt to Enable DS_SOCCLK Feature Failed!",
2755 data->smu_features[GNLD_DS_SOCCLK].enabled = true;
2758 if (data->smu_features[GNLD_DS_LCLK].supported) {
2759 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2760 true, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2761 "Attempt to Enable DS_LCLK Feature Failed!",
2763 data->smu_features[GNLD_DS_LCLK].enabled = true;
2766 if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
2767 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2768 true, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2769 "Attempt to Enable DS_DCEFCLK Feature Failed!",
2771 data->smu_features[GNLD_DS_DCEFCLK].enabled = true;
2777 static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr *hwmgr)
2779 struct vega10_hwmgr *data =
2780 (struct vega10_hwmgr *)(hwmgr->backend);
2782 if (data->smu_features[GNLD_DS_GFXCLK].supported) {
2783 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2784 false, data->smu_features[GNLD_DS_GFXCLK].smu_feature_bitmap),
2785 "Attempt to disable DS_GFXCLK Feature Failed!",
2787 data->smu_features[GNLD_DS_GFXCLK].enabled = false;
2790 if (data->smu_features[GNLD_DS_SOCCLK].supported) {
2791 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2792 false, data->smu_features[GNLD_DS_SOCCLK].smu_feature_bitmap),
2793 "Attempt to disable DS_ Feature Failed!",
2795 data->smu_features[GNLD_DS_SOCCLK].enabled = false;
2798 if (data->smu_features[GNLD_DS_LCLK].supported) {
2799 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2800 false, data->smu_features[GNLD_DS_LCLK].smu_feature_bitmap),
2801 "Attempt to disable DS_LCLK Feature Failed!",
2803 data->smu_features[GNLD_DS_LCLK].enabled = false;
2806 if (data->smu_features[GNLD_DS_DCEFCLK].supported) {
2807 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2808 false, data->smu_features[GNLD_DS_DCEFCLK].smu_feature_bitmap),
2809 "Attempt to disable DS_DCEFCLK Feature Failed!",
2811 data->smu_features[GNLD_DS_DCEFCLK].enabled = false;
2817 static int vega10_stop_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2819 struct vega10_hwmgr *data =
2820 (struct vega10_hwmgr *)(hwmgr->backend);
2821 uint32_t i, feature_mask = 0;
2824 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2825 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2826 false, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2827 "Attempt to disable LED DPM feature failed!", return -EINVAL);
2828 data->smu_features[GNLD_LED_DISPLAY].enabled = false;
2831 for (i = 0; i < GNLD_DPM_MAX; i++) {
2832 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2833 if (data->smu_features[i].supported) {
2834 if (data->smu_features[i].enabled) {
2835 feature_mask |= data->smu_features[i].
2837 data->smu_features[i].enabled = false;
2843 vega10_enable_smc_features(hwmgr->smumgr, false, feature_mask);
2849 * @brief Tell SMC to enabled the supported DPMs.
2851 * @param hwmgr - the address of the powerplay hardware manager.
2852 * @Param bitmap - bitmap for the features to enabled.
2853 * @return 0 on at least one DPM is successfully enabled.
2855 static int vega10_start_dpm(struct pp_hwmgr *hwmgr, uint32_t bitmap)
2857 struct vega10_hwmgr *data =
2858 (struct vega10_hwmgr *)(hwmgr->backend);
2859 uint32_t i, feature_mask = 0;
2861 for (i = 0; i < GNLD_DPM_MAX; i++) {
2862 if (data->smu_features[i].smu_feature_bitmap & bitmap) {
2863 if (data->smu_features[i].supported) {
2864 if (!data->smu_features[i].enabled) {
2865 feature_mask |= data->smu_features[i].
2867 data->smu_features[i].enabled = true;
2873 if (vega10_enable_smc_features(hwmgr->smumgr,
2874 true, feature_mask)) {
2875 for (i = 0; i < GNLD_DPM_MAX; i++) {
2876 if (data->smu_features[i].smu_feature_bitmap &
2878 data->smu_features[i].enabled = false;
2882 if(data->smu_features[GNLD_LED_DISPLAY].supported == true){
2883 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2884 true, data->smu_features[GNLD_LED_DISPLAY].smu_feature_bitmap),
2885 "Attempt to Enable LED DPM feature Failed!", return -EINVAL);
2886 data->smu_features[GNLD_LED_DISPLAY].enabled = true;
2889 if (data->vbios_boot_state.bsoc_vddc_lock) {
2890 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2891 PPSMC_MSG_SetFloorSocVoltage, 0);
2892 data->vbios_boot_state.bsoc_vddc_lock = false;
2895 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2896 PHM_PlatformCaps_Falcon_QuickTransition)) {
2897 if (data->smu_features[GNLD_ACDC].supported) {
2898 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
2899 true, data->smu_features[GNLD_ACDC].smu_feature_bitmap),
2900 "Attempt to Enable DS_GFXCLK Feature Failed!",
2902 data->smu_features[GNLD_ACDC].enabled = true;
2909 static int vega10_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
2911 struct vega10_hwmgr *data =
2912 (struct vega10_hwmgr *)(hwmgr->backend);
2913 int tmp_result, result = 0;
2915 tmp_result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2916 PPSMC_MSG_ConfigureTelemetry, data->config_telemetry);
2917 PP_ASSERT_WITH_CODE(!tmp_result,
2918 "Failed to configure telemetry!",
2921 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
2922 PPSMC_MSG_NumOfDisplays, 0);
2924 tmp_result = (!vega10_is_dpm_running(hwmgr)) ? 0 : -1;
2925 PP_ASSERT_WITH_CODE(!tmp_result,
2926 "DPM is already running right , skipping re-enablement!",
2929 tmp_result = vega10_construct_voltage_tables(hwmgr);
2930 PP_ASSERT_WITH_CODE(!tmp_result,
2931 "Failed to contruct voltage tables!",
2932 result = tmp_result);
2934 tmp_result = vega10_init_smc_table(hwmgr);
2935 PP_ASSERT_WITH_CODE(!tmp_result,
2936 "Failed to initialize SMC table!",
2937 result = tmp_result);
2939 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
2940 PHM_PlatformCaps_ThermalController)) {
2941 tmp_result = vega10_enable_thermal_protection(hwmgr);
2942 PP_ASSERT_WITH_CODE(!tmp_result,
2943 "Failed to enable thermal protection!",
2944 result = tmp_result);
2947 tmp_result = vega10_enable_vrhot_feature(hwmgr);
2948 PP_ASSERT_WITH_CODE(!tmp_result,
2949 "Failed to enable VR hot feature!",
2950 result = tmp_result);
2952 tmp_result = vega10_enable_deep_sleep_master_switch(hwmgr);
2953 PP_ASSERT_WITH_CODE(!tmp_result,
2954 "Failed to enable deep sleep master switch!",
2955 result = tmp_result);
2957 tmp_result = vega10_start_dpm(hwmgr, SMC_DPM_FEATURES);
2958 PP_ASSERT_WITH_CODE(!tmp_result,
2959 "Failed to start DPM!", result = tmp_result);
2961 /* enable didt, do not abort if failed didt */
2962 tmp_result = vega10_enable_didt_config(hwmgr);
2963 PP_ASSERT(!tmp_result,
2964 "Failed to enable didt config!");
2966 tmp_result = vega10_enable_power_containment(hwmgr);
2967 PP_ASSERT_WITH_CODE(!tmp_result,
2968 "Failed to enable power containment!",
2969 result = tmp_result);
2971 tmp_result = vega10_power_control_set_level(hwmgr);
2972 PP_ASSERT_WITH_CODE(!tmp_result,
2973 "Failed to power control set level!",
2974 result = tmp_result);
2976 tmp_result = vega10_enable_ulv(hwmgr);
2977 PP_ASSERT_WITH_CODE(!tmp_result,
2978 "Failed to enable ULV!",
2979 result = tmp_result);
2984 static int vega10_get_power_state_size(struct pp_hwmgr *hwmgr)
2986 return sizeof(struct vega10_power_state);
2989 static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
2990 void *state, struct pp_power_state *power_state,
2991 void *pp_table, uint32_t classification_flag)
2993 ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_V2;
2994 struct vega10_power_state *vega10_power_state =
2995 cast_phw_vega10_power_state(&(power_state->hardware));
2996 struct vega10_performance_level *performance_level;
2997 ATOM_Vega10_State *state_entry = (ATOM_Vega10_State *)state;
2998 ATOM_Vega10_POWERPLAYTABLE *powerplay_table =
2999 (ATOM_Vega10_POWERPLAYTABLE *)pp_table;
3000 ATOM_Vega10_SOCCLK_Dependency_Table *socclk_dep_table =
3001 (ATOM_Vega10_SOCCLK_Dependency_Table *)
3002 (((unsigned long)powerplay_table) +
3003 le16_to_cpu(powerplay_table->usSocclkDependencyTableOffset));
3004 ATOM_Vega10_GFXCLK_Dependency_Table *gfxclk_dep_table =
3005 (ATOM_Vega10_GFXCLK_Dependency_Table *)
3006 (((unsigned long)powerplay_table) +
3007 le16_to_cpu(powerplay_table->usGfxclkDependencyTableOffset));
3008 ATOM_Vega10_MCLK_Dependency_Table *mclk_dep_table =
3009 (ATOM_Vega10_MCLK_Dependency_Table *)
3010 (((unsigned long)powerplay_table) +
3011 le16_to_cpu(powerplay_table->usMclkDependencyTableOffset));
3014 /* The following fields are not initialized here:
3015 * id orderedList allStatesList
3017 power_state->classification.ui_label =
3018 (le16_to_cpu(state_entry->usClassification) &
3019 ATOM_PPLIB_CLASSIFICATION_UI_MASK) >>
3020 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT;
3021 power_state->classification.flags = classification_flag;
3022 /* NOTE: There is a classification2 flag in BIOS
3023 * that is not being used right now
3025 power_state->classification.temporary_state = false;
3026 power_state->classification.to_be_deleted = false;
3028 power_state->validation.disallowOnDC =
3029 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
3030 ATOM_Vega10_DISALLOW_ON_DC) != 0);
3032 power_state->display.disableFrameModulation = false;
3033 power_state->display.limitRefreshrate = false;
3034 power_state->display.enableVariBright =
3035 ((le32_to_cpu(state_entry->ulCapsAndSettings) &
3036 ATOM_Vega10_ENABLE_VARIBRIGHT) != 0);
3038 power_state->validation.supportedPowerLevels = 0;
3039 power_state->uvd_clocks.VCLK = 0;
3040 power_state->uvd_clocks.DCLK = 0;
3041 power_state->temperatures.min = 0;
3042 power_state->temperatures.max = 0;
3044 performance_level = &(vega10_power_state->performance_levels
3045 [vega10_power_state->performance_level_count++]);
3047 PP_ASSERT_WITH_CODE(
3048 (vega10_power_state->performance_level_count <
3049 NUM_GFXCLK_DPM_LEVELS),
3050 "Performance levels exceeds SMC limit!",
3053 PP_ASSERT_WITH_CODE(
3054 (vega10_power_state->performance_level_count <=
3055 hwmgr->platform_descriptor.
3056 hardwareActivityPerformanceLevels),
3057 "Performance levels exceeds Driver limit!",
3060 /* Performance levels are arranged from low to high. */
3061 performance_level->soc_clock = socclk_dep_table->entries
3062 [state_entry->ucSocClockIndexLow].ulClk;
3063 performance_level->gfx_clock = gfxclk_dep_table->entries
3064 [state_entry->ucGfxClockIndexLow].ulClk;
3065 performance_level->mem_clock = mclk_dep_table->entries
3066 [state_entry->ucMemClockIndexLow].ulMemClk;
3068 performance_level = &(vega10_power_state->performance_levels
3069 [vega10_power_state->performance_level_count++]);
3070 performance_level->soc_clock = socclk_dep_table->entries
3071 [state_entry->ucSocClockIndexHigh].ulClk;
3072 if (gfxclk_dep_table->ucRevId == 0) {
3073 performance_level->gfx_clock = gfxclk_dep_table->entries
3074 [state_entry->ucGfxClockIndexHigh].ulClk;
3075 } else if (gfxclk_dep_table->ucRevId == 1) {
3076 patom_record_V2 = (ATOM_Vega10_GFXCLK_Dependency_Record_V2 *)gfxclk_dep_table->entries;
3077 performance_level->gfx_clock = patom_record_V2[state_entry->ucGfxClockIndexHigh].ulClk;
3080 performance_level->mem_clock = mclk_dep_table->entries
3081 [state_entry->ucMemClockIndexHigh].ulMemClk;
3085 static int vega10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
3086 unsigned long entry_index, struct pp_power_state *state)
3089 struct vega10_power_state *ps;
3091 state->hardware.magic = PhwVega10_Magic;
3093 ps = cast_phw_vega10_power_state(&state->hardware);
3095 result = vega10_get_powerplay_table_entry(hwmgr, entry_index, state,
3096 vega10_get_pp_table_entry_callback_func);
3099 * This is the earliest time we have all the dependency table
3100 * and the VBIOS boot state
3102 /* set DC compatible flag if this state supports DC */
3103 if (!state->validation.disallowOnDC)
3104 ps->dc_compatible = true;
3106 ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
3107 ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
3112 static int vega10_patch_boot_state(struct pp_hwmgr *hwmgr,
3113 struct pp_hw_power_state *hw_ps)
3118 static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
3119 struct pp_power_state *request_ps,
3120 const struct pp_power_state *current_ps)
3122 struct vega10_power_state *vega10_ps =
3123 cast_phw_vega10_power_state(&request_ps->hardware);
3126 struct PP_Clocks minimum_clocks = {0};
3127 bool disable_mclk_switching;
3128 bool disable_mclk_switching_for_frame_lock;
3129 bool disable_mclk_switching_for_vr;
3130 bool force_mclk_high;
3131 struct cgs_display_info info = {0};
3132 const struct phm_clock_and_voltage_limits *max_limits;
3134 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
3135 struct phm_ppt_v2_information *table_info =
3136 (struct phm_ppt_v2_information *)(hwmgr->pptable);
3138 uint32_t stable_pstate_sclk_dpm_percentage;
3139 uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
3142 data->battery_state = (PP_StateUILabel_Battery ==
3143 request_ps->classification.ui_label);
3145 if (vega10_ps->performance_level_count != 2)
3146 pr_info("VI should always have 2 performance levels");
3148 max_limits = (PP_PowerSource_AC == hwmgr->power_source) ?
3149 &(hwmgr->dyn_state.max_clock_voltage_on_ac) :
3150 &(hwmgr->dyn_state.max_clock_voltage_on_dc);
3152 /* Cap clock DPM tables at DC MAX if it is in DC. */
3153 if (PP_PowerSource_DC == hwmgr->power_source) {
3154 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3155 if (vega10_ps->performance_levels[i].mem_clock >
3157 vega10_ps->performance_levels[i].mem_clock =
3159 if (vega10_ps->performance_levels[i].gfx_clock >
3161 vega10_ps->performance_levels[i].gfx_clock =
3166 vega10_ps->vce_clks.evclk = hwmgr->vce_arbiter.evclk;
3167 vega10_ps->vce_clks.ecclk = hwmgr->vce_arbiter.ecclk;
3169 cgs_get_active_displays_info(hwmgr->device, &info);
3171 /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
3172 minimum_clocks.engineClock = hwmgr->display_config.min_core_set_clock;
3173 minimum_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
3175 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3176 PHM_PlatformCaps_StablePState)) {
3177 PP_ASSERT_WITH_CODE(
3178 data->registry_data.stable_pstate_sclk_dpm_percentage >= 1 &&
3179 data->registry_data.stable_pstate_sclk_dpm_percentage <= 100,
3180 "percent sclk value must range from 1% to 100%, setting default value",
3181 stable_pstate_sclk_dpm_percentage = 75);
3183 max_limits = &(hwmgr->dyn_state.max_clock_voltage_on_ac);
3184 stable_pstate_sclk = (max_limits->sclk *
3185 stable_pstate_sclk_dpm_percentage) / 100;
3187 for (count = table_info->vdd_dep_on_sclk->count - 1;
3188 count >= 0; count--) {
3189 if (stable_pstate_sclk >=
3190 table_info->vdd_dep_on_sclk->entries[count].clk) {
3191 stable_pstate_sclk =
3192 table_info->vdd_dep_on_sclk->entries[count].clk;
3198 stable_pstate_sclk = table_info->vdd_dep_on_sclk->entries[0].clk;
3200 stable_pstate_mclk = max_limits->mclk;
3202 minimum_clocks.engineClock = stable_pstate_sclk;
3203 minimum_clocks.memoryClock = stable_pstate_mclk;
3206 if (minimum_clocks.engineClock < hwmgr->gfx_arbiter.sclk)
3207 minimum_clocks.engineClock = hwmgr->gfx_arbiter.sclk;
3209 if (minimum_clocks.memoryClock < hwmgr->gfx_arbiter.mclk)
3210 minimum_clocks.memoryClock = hwmgr->gfx_arbiter.mclk;
3212 vega10_ps->sclk_threshold = hwmgr->gfx_arbiter.sclk_threshold;
3214 if (hwmgr->gfx_arbiter.sclk_over_drive) {
3215 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.sclk_over_drive <=
3216 hwmgr->platform_descriptor.overdriveLimit.engineClock),
3217 "Overdrive sclk exceeds limit",
3218 hwmgr->gfx_arbiter.sclk_over_drive =
3219 hwmgr->platform_descriptor.overdriveLimit.engineClock);
3221 if (hwmgr->gfx_arbiter.sclk_over_drive >= hwmgr->gfx_arbiter.sclk)
3222 vega10_ps->performance_levels[1].gfx_clock =
3223 hwmgr->gfx_arbiter.sclk_over_drive;
3226 if (hwmgr->gfx_arbiter.mclk_over_drive) {
3227 PP_ASSERT_WITH_CODE((hwmgr->gfx_arbiter.mclk_over_drive <=
3228 hwmgr->platform_descriptor.overdriveLimit.memoryClock),
3229 "Overdrive mclk exceeds limit",
3230 hwmgr->gfx_arbiter.mclk_over_drive =
3231 hwmgr->platform_descriptor.overdriveLimit.memoryClock);
3233 if (hwmgr->gfx_arbiter.mclk_over_drive >= hwmgr->gfx_arbiter.mclk)
3234 vega10_ps->performance_levels[1].mem_clock =
3235 hwmgr->gfx_arbiter.mclk_over_drive;
3238 disable_mclk_switching_for_frame_lock = phm_cap_enabled(
3239 hwmgr->platform_descriptor.platformCaps,
3240 PHM_PlatformCaps_DisableMclkSwitchingForFrameLock);
3241 disable_mclk_switching_for_vr = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3242 PHM_PlatformCaps_DisableMclkSwitchForVR);
3243 force_mclk_high = phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3244 PHM_PlatformCaps_ForceMclkHigh);
3246 if (info.display_count == 0)
3247 disable_mclk_switching = false;
3249 disable_mclk_switching = (info.display_count > 1) ||
3250 disable_mclk_switching_for_frame_lock ||
3251 disable_mclk_switching_for_vr ||
3254 sclk = vega10_ps->performance_levels[0].gfx_clock;
3255 mclk = vega10_ps->performance_levels[0].mem_clock;
3257 if (sclk < minimum_clocks.engineClock)
3258 sclk = (minimum_clocks.engineClock > max_limits->sclk) ?
3259 max_limits->sclk : minimum_clocks.engineClock;
3261 if (mclk < minimum_clocks.memoryClock)
3262 mclk = (minimum_clocks.memoryClock > max_limits->mclk) ?
3263 max_limits->mclk : minimum_clocks.memoryClock;
3265 vega10_ps->performance_levels[0].gfx_clock = sclk;
3266 vega10_ps->performance_levels[0].mem_clock = mclk;
3268 if (vega10_ps->performance_levels[1].gfx_clock <
3269 vega10_ps->performance_levels[0].gfx_clock)
3270 vega10_ps->performance_levels[0].gfx_clock =
3271 vega10_ps->performance_levels[1].gfx_clock;
3273 if (disable_mclk_switching) {
3274 /* Set Mclk the max of level 0 and level 1 */
3275 if (mclk < vega10_ps->performance_levels[1].mem_clock)
3276 mclk = vega10_ps->performance_levels[1].mem_clock;
3278 /* Find the lowest MCLK frequency that is within
3279 * the tolerable latency defined in DAL
3282 for (i = 0; i < data->mclk_latency_table.count; i++) {
3283 if ((data->mclk_latency_table.entries[i].latency <= latency) &&
3284 (data->mclk_latency_table.entries[i].frequency >=
3285 vega10_ps->performance_levels[0].mem_clock) &&
3286 (data->mclk_latency_table.entries[i].frequency <=
3287 vega10_ps->performance_levels[1].mem_clock))
3288 mclk = data->mclk_latency_table.entries[i].frequency;
3290 vega10_ps->performance_levels[0].mem_clock = mclk;
3292 if (vega10_ps->performance_levels[1].mem_clock <
3293 vega10_ps->performance_levels[0].mem_clock)
3294 vega10_ps->performance_levels[0].mem_clock =
3295 vega10_ps->performance_levels[1].mem_clock;
3298 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3299 PHM_PlatformCaps_StablePState)) {
3300 for (i = 0; i < vega10_ps->performance_level_count; i++) {
3301 vega10_ps->performance_levels[i].gfx_clock = stable_pstate_sclk;
3302 vega10_ps->performance_levels[i].mem_clock = stable_pstate_mclk;
3309 static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, const void *input)
3311 const struct phm_set_power_state_input *states =
3312 (const struct phm_set_power_state_input *)input;
3313 const struct vega10_power_state *vega10_ps =
3314 cast_const_phw_vega10_power_state(states->pnew_state);
3315 struct vega10_hwmgr *data =
3316 (struct vega10_hwmgr *)(hwmgr->backend);
3317 struct vega10_single_dpm_table *sclk_table =
3318 &(data->dpm_table.gfx_table);
3319 uint32_t sclk = vega10_ps->performance_levels
3320 [vega10_ps->performance_level_count - 1].gfx_clock;
3321 struct vega10_single_dpm_table *mclk_table =
3322 &(data->dpm_table.mem_table);
3323 uint32_t mclk = vega10_ps->performance_levels
3324 [vega10_ps->performance_level_count - 1].mem_clock;
3325 struct PP_Clocks min_clocks = {0};
3327 struct cgs_display_info info = {0};
3329 data->need_update_dpm_table = 0;
3331 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3332 PHM_PlatformCaps_ODNinACSupport) ||
3333 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3334 PHM_PlatformCaps_ODNinDCSupport)) {
3335 for (i = 0; i < sclk_table->count; i++) {
3336 if (sclk == sclk_table->dpm_levels[i].value)
3340 if (!(data->apply_overdrive_next_settings_mask &
3341 DPMTABLE_OD_UPDATE_SCLK) && i >= sclk_table->count) {
3342 /* Check SCLK in DAL's minimum clocks
3343 * in case DeepSleep divider update is required.
3345 if (data->display_timing.min_clock_in_sr !=
3346 min_clocks.engineClockInSR &&
3347 (min_clocks.engineClockInSR >=
3348 VEGA10_MINIMUM_ENGINE_CLOCK ||
3349 data->display_timing.min_clock_in_sr >=
3350 VEGA10_MINIMUM_ENGINE_CLOCK))
3351 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
3354 cgs_get_active_displays_info(hwmgr->device, &info);
3356 if (data->display_timing.num_existing_displays !=
3358 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3360 for (i = 0; i < sclk_table->count; i++) {
3361 if (sclk == sclk_table->dpm_levels[i].value)
3365 if (i >= sclk_table->count)
3366 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3368 /* Check SCLK in DAL's minimum clocks
3369 * in case DeepSleep divider update is required.
3371 if (data->display_timing.min_clock_in_sr !=
3372 min_clocks.engineClockInSR &&
3373 (min_clocks.engineClockInSR >=
3374 VEGA10_MINIMUM_ENGINE_CLOCK ||
3375 data->display_timing.min_clock_in_sr >=
3376 VEGA10_MINIMUM_ENGINE_CLOCK))
3377 data->need_update_dpm_table |= DPMTABLE_UPDATE_SCLK;
3380 for (i = 0; i < mclk_table->count; i++) {
3381 if (mclk == mclk_table->dpm_levels[i].value)
3385 cgs_get_active_displays_info(hwmgr->device, &info);
3387 if (i >= mclk_table->count)
3388 data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
3390 if (data->display_timing.num_existing_displays !=
3391 info.display_count ||
3392 i >= mclk_table->count)
3393 data->need_update_dpm_table |= DPMTABLE_UPDATE_MCLK;
3398 static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3399 struct pp_hwmgr *hwmgr, const void *input)
3402 const struct phm_set_power_state_input *states =
3403 (const struct phm_set_power_state_input *)input;
3404 const struct vega10_power_state *vega10_ps =
3405 cast_const_phw_vega10_power_state(states->pnew_state);
3406 struct vega10_hwmgr *data =
3407 (struct vega10_hwmgr *)(hwmgr->backend);
3408 uint32_t sclk = vega10_ps->performance_levels
3409 [vega10_ps->performance_level_count - 1].gfx_clock;
3410 uint32_t mclk = vega10_ps->performance_levels
3411 [vega10_ps->performance_level_count - 1].mem_clock;
3412 struct vega10_dpm_table *dpm_table = &data->dpm_table;
3413 struct vega10_dpm_table *golden_dpm_table =
3414 &data->golden_dpm_table;
3415 uint32_t dpm_count, clock_percent;
3418 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3419 PHM_PlatformCaps_ODNinACSupport) ||
3420 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3421 PHM_PlatformCaps_ODNinDCSupport)) {
3423 if (!data->need_update_dpm_table &&
3424 !data->apply_optimized_settings &&
3425 !data->apply_overdrive_next_settings_mask)
3428 if (data->apply_overdrive_next_settings_mask &
3429 DPMTABLE_OD_UPDATE_SCLK) {
3431 dpm_count < dpm_table->gfx_table.count;
3433 dpm_table->gfx_table.dpm_levels[dpm_count].enabled =
3434 data->odn_dpm_table.odn_core_clock_dpm_levels.
3435 performance_level_entries[dpm_count].enabled;
3436 dpm_table->gfx_table.dpm_levels[dpm_count].value =
3437 data->odn_dpm_table.odn_core_clock_dpm_levels.
3438 performance_level_entries[dpm_count].clock;
3442 if (data->apply_overdrive_next_settings_mask &
3443 DPMTABLE_OD_UPDATE_MCLK) {
3445 dpm_count < dpm_table->mem_table.count;
3447 dpm_table->mem_table.dpm_levels[dpm_count].enabled =
3448 data->odn_dpm_table.odn_memory_clock_dpm_levels.
3449 performance_level_entries[dpm_count].enabled;
3450 dpm_table->mem_table.dpm_levels[dpm_count].value =
3451 data->odn_dpm_table.odn_memory_clock_dpm_levels.
3452 performance_level_entries[dpm_count].clock;
3456 if ((data->need_update_dpm_table & DPMTABLE_UPDATE_SCLK) ||
3457 data->apply_optimized_settings ||
3458 (data->apply_overdrive_next_settings_mask &
3459 DPMTABLE_OD_UPDATE_SCLK)) {
3460 result = vega10_populate_all_graphic_levels(hwmgr);
3461 PP_ASSERT_WITH_CODE(!result,
3462 "Failed to populate SCLK during \
3463 PopulateNewDPMClocksStates Function!",
3467 if ((data->need_update_dpm_table & DPMTABLE_UPDATE_MCLK) ||
3468 (data->apply_overdrive_next_settings_mask &
3469 DPMTABLE_OD_UPDATE_MCLK)){
3470 result = vega10_populate_all_memory_levels(hwmgr);
3471 PP_ASSERT_WITH_CODE(!result,
3472 "Failed to populate MCLK during \
3473 PopulateNewDPMClocksStates Function!",
3477 if (!data->need_update_dpm_table &&
3478 !data->apply_optimized_settings)
3481 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK &&
3482 data->smu_features[GNLD_DPM_GFXCLK].supported) {
3484 gfx_table.dpm_levels[dpm_table->gfx_table.count - 1].
3486 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3487 PHM_PlatformCaps_OD6PlusinACSupport) ||
3488 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3489 PHM_PlatformCaps_OD6PlusinDCSupport)) {
3490 /* Need to do calculation based on the golden DPM table
3491 * as the Heatmap GPU Clock axis is also based on
3492 * the default values
3494 PP_ASSERT_WITH_CODE(
3495 golden_dpm_table->gfx_table.dpm_levels
3496 [golden_dpm_table->gfx_table.count - 1].value,
3500 dpm_count = dpm_table->gfx_table.count < 2 ?
3501 0 : dpm_table->gfx_table.count - 2;
3502 for (i = dpm_count; i > 1; i--) {
3503 if (sclk > golden_dpm_table->gfx_table.dpm_levels
3504 [golden_dpm_table->gfx_table.count - 1].value) {
3506 ((sclk - golden_dpm_table->gfx_table.dpm_levels
3507 [golden_dpm_table->gfx_table.count - 1].value) *
3509 golden_dpm_table->gfx_table.dpm_levels
3510 [golden_dpm_table->gfx_table.count - 1].value;
3512 dpm_table->gfx_table.dpm_levels[i].value =
3513 golden_dpm_table->gfx_table.dpm_levels[i].value +
3514 (golden_dpm_table->gfx_table.dpm_levels[i].value *
3515 clock_percent) / 100;
3516 } else if (golden_dpm_table->
3517 gfx_table.dpm_levels[dpm_table->gfx_table.count-1].value >
3520 ((golden_dpm_table->gfx_table.dpm_levels
3521 [golden_dpm_table->gfx_table.count - 1].value -
3523 golden_dpm_table->gfx_table.dpm_levels
3524 [golden_dpm_table->gfx_table.count-1].value;
3526 dpm_table->gfx_table.dpm_levels[i].value =
3527 golden_dpm_table->gfx_table.dpm_levels[i].value -
3528 (golden_dpm_table->gfx_table.dpm_levels[i].value *
3529 clock_percent) / 100;
3531 dpm_table->gfx_table.dpm_levels[i].value =
3532 golden_dpm_table->gfx_table.dpm_levels[i].value;
3537 if (data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK &&
3538 data->smu_features[GNLD_DPM_UCLK].supported) {
3540 mem_table.dpm_levels[dpm_table->mem_table.count - 1].
3543 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3544 PHM_PlatformCaps_OD6PlusinACSupport) ||
3545 phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3546 PHM_PlatformCaps_OD6PlusinDCSupport)) {
3548 PP_ASSERT_WITH_CODE(
3549 golden_dpm_table->mem_table.dpm_levels
3550 [golden_dpm_table->mem_table.count - 1].value,
3554 dpm_count = dpm_table->mem_table.count < 2 ?
3555 0 : dpm_table->mem_table.count - 2;
3556 for (i = dpm_count; i > 1; i--) {
3557 if (mclk > golden_dpm_table->mem_table.dpm_levels
3558 [golden_dpm_table->mem_table.count-1].value) {
3559 clock_percent = ((mclk -
3560 golden_dpm_table->mem_table.dpm_levels
3561 [golden_dpm_table->mem_table.count-1].value) *
3563 golden_dpm_table->mem_table.dpm_levels
3564 [golden_dpm_table->mem_table.count-1].value;
3566 dpm_table->mem_table.dpm_levels[i].value =
3567 golden_dpm_table->mem_table.dpm_levels[i].value +
3568 (golden_dpm_table->mem_table.dpm_levels[i].value *
3569 clock_percent) / 100;
3570 } else if (golden_dpm_table->mem_table.dpm_levels
3571 [dpm_table->mem_table.count-1].value > mclk) {
3572 clock_percent = ((golden_dpm_table->mem_table.dpm_levels
3573 [golden_dpm_table->mem_table.count-1].value - mclk) *
3575 golden_dpm_table->mem_table.dpm_levels
3576 [golden_dpm_table->mem_table.count-1].value;
3578 dpm_table->mem_table.dpm_levels[i].value =
3579 golden_dpm_table->mem_table.dpm_levels[i].value -
3580 (golden_dpm_table->mem_table.dpm_levels[i].value *
3581 clock_percent) / 100;
3583 dpm_table->mem_table.dpm_levels[i].value =
3584 golden_dpm_table->mem_table.dpm_levels[i].value;
3589 if ((data->need_update_dpm_table &
3590 (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK)) ||
3591 data->apply_optimized_settings) {
3592 result = vega10_populate_all_graphic_levels(hwmgr);
3593 PP_ASSERT_WITH_CODE(!result,
3594 "Failed to populate SCLK during \
3595 PopulateNewDPMClocksStates Function!",
3599 if (data->need_update_dpm_table &
3600 (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
3601 result = vega10_populate_all_memory_levels(hwmgr);
3602 PP_ASSERT_WITH_CODE(!result,
3603 "Failed to populate MCLK during \
3604 PopulateNewDPMClocksStates Function!",
3611 static int vega10_trim_single_dpm_states(struct pp_hwmgr *hwmgr,
3612 struct vega10_single_dpm_table *dpm_table,
3613 uint32_t low_limit, uint32_t high_limit)
3617 for (i = 0; i < dpm_table->count; i++) {
3618 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3619 (dpm_table->dpm_levels[i].value > high_limit))
3620 dpm_table->dpm_levels[i].enabled = false;
3622 dpm_table->dpm_levels[i].enabled = true;
3627 static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr *hwmgr,
3628 struct vega10_single_dpm_table *dpm_table,
3629 uint32_t low_limit, uint32_t high_limit,
3630 uint32_t disable_dpm_mask)
3634 for (i = 0; i < dpm_table->count; i++) {
3635 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3636 (dpm_table->dpm_levels[i].value > high_limit))
3637 dpm_table->dpm_levels[i].enabled = false;
3638 else if (!((1 << i) & disable_dpm_mask))
3639 dpm_table->dpm_levels[i].enabled = false;
3641 dpm_table->dpm_levels[i].enabled = true;
3646 static int vega10_trim_dpm_states(struct pp_hwmgr *hwmgr,
3647 const struct vega10_power_state *vega10_ps)
3649 struct vega10_hwmgr *data =
3650 (struct vega10_hwmgr *)(hwmgr->backend);
3651 uint32_t high_limit_count;
3653 PP_ASSERT_WITH_CODE((vega10_ps->performance_level_count >= 1),
3654 "power state did not have any performance level",
3657 high_limit_count = (vega10_ps->performance_level_count == 1) ? 0 : 1;
3659 vega10_trim_single_dpm_states(hwmgr,
3660 &(data->dpm_table.soc_table),
3661 vega10_ps->performance_levels[0].soc_clock,
3662 vega10_ps->performance_levels[high_limit_count].soc_clock);
3664 vega10_trim_single_dpm_states_with_mask(hwmgr,
3665 &(data->dpm_table.gfx_table),
3666 vega10_ps->performance_levels[0].gfx_clock,
3667 vega10_ps->performance_levels[high_limit_count].gfx_clock,
3668 data->disable_dpm_mask);
3670 vega10_trim_single_dpm_states(hwmgr,
3671 &(data->dpm_table.mem_table),
3672 vega10_ps->performance_levels[0].mem_clock,
3673 vega10_ps->performance_levels[high_limit_count].mem_clock);
3678 static uint32_t vega10_find_lowest_dpm_level(
3679 struct vega10_single_dpm_table *table)
3683 for (i = 0; i < table->count; i++) {
3684 if (table->dpm_levels[i].enabled)
3691 static uint32_t vega10_find_highest_dpm_level(
3692 struct vega10_single_dpm_table *table)
3696 if (table->count <= MAX_REGULAR_DPM_NUMBER) {
3697 for (i = table->count; i > 0; i--) {
3698 if (table->dpm_levels[i - 1].enabled)
3702 pr_info("DPM Table Has Too Many Entries!");
3703 return MAX_REGULAR_DPM_NUMBER - 1;
3709 static void vega10_apply_dal_minimum_voltage_request(
3710 struct pp_hwmgr *hwmgr)
3715 static int vega10_get_soc_index_for_max_uclk(struct pp_hwmgr *hwmgr)
3717 struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_table_on_mclk;
3718 struct phm_ppt_v2_information *table_info =
3719 (struct phm_ppt_v2_information *)(hwmgr->pptable);
3721 vdd_dep_table_on_mclk = table_info->vdd_dep_on_mclk;
3723 return vdd_dep_table_on_mclk->entries[NUM_UCLK_DPM_LEVELS - 1].vddInd + 1;
3726 static int vega10_upload_dpm_bootup_level(struct pp_hwmgr *hwmgr)
3728 struct vega10_hwmgr *data =
3729 (struct vega10_hwmgr *)(hwmgr->backend);
3730 uint32_t socclk_idx;
3732 vega10_apply_dal_minimum_voltage_request(hwmgr);
3734 if (!data->registry_data.sclk_dpm_key_disabled) {
3735 if (data->smc_state_table.gfx_boot_level !=
3736 data->dpm_table.gfx_table.dpm_state.soft_min_level) {
3737 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3739 PPSMC_MSG_SetSoftMinGfxclkByIndex,
3740 data->smc_state_table.gfx_boot_level),
3741 "Failed to set soft min sclk index!",
3743 data->dpm_table.gfx_table.dpm_state.soft_min_level =
3744 data->smc_state_table.gfx_boot_level;
3748 if (!data->registry_data.mclk_dpm_key_disabled) {
3749 if (data->smc_state_table.mem_boot_level !=
3750 data->dpm_table.mem_table.dpm_state.soft_min_level) {
3751 if (data->smc_state_table.mem_boot_level == NUM_UCLK_DPM_LEVELS - 1) {
3752 socclk_idx = vega10_get_soc_index_for_max_uclk(hwmgr);
3753 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3755 PPSMC_MSG_SetSoftMinSocclkByIndex,
3757 "Failed to set soft min uclk index!",
3760 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3762 PPSMC_MSG_SetSoftMinUclkByIndex,
3763 data->smc_state_table.mem_boot_level),
3764 "Failed to set soft min uclk index!",
3767 data->dpm_table.mem_table.dpm_state.soft_min_level =
3768 data->smc_state_table.mem_boot_level;
3775 static int vega10_upload_dpm_max_level(struct pp_hwmgr *hwmgr)
3777 struct vega10_hwmgr *data =
3778 (struct vega10_hwmgr *)(hwmgr->backend);
3780 vega10_apply_dal_minimum_voltage_request(hwmgr);
3782 if (!data->registry_data.sclk_dpm_key_disabled) {
3783 if (data->smc_state_table.gfx_max_level !=
3784 data->dpm_table.gfx_table.dpm_state.soft_max_level) {
3785 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3787 PPSMC_MSG_SetSoftMaxGfxclkByIndex,
3788 data->smc_state_table.gfx_max_level),
3789 "Failed to set soft max sclk index!",
3791 data->dpm_table.gfx_table.dpm_state.soft_max_level =
3792 data->smc_state_table.gfx_max_level;
3796 if (!data->registry_data.mclk_dpm_key_disabled) {
3797 if (data->smc_state_table.mem_max_level !=
3798 data->dpm_table.mem_table.dpm_state.soft_max_level) {
3799 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
3801 PPSMC_MSG_SetSoftMaxUclkByIndex,
3802 data->smc_state_table.mem_max_level),
3803 "Failed to set soft max mclk index!",
3805 data->dpm_table.mem_table.dpm_state.soft_max_level =
3806 data->smc_state_table.mem_max_level;
3813 static int vega10_generate_dpm_level_enable_mask(
3814 struct pp_hwmgr *hwmgr, const void *input)
3816 struct vega10_hwmgr *data =
3817 (struct vega10_hwmgr *)(hwmgr->backend);
3818 const struct phm_set_power_state_input *states =
3819 (const struct phm_set_power_state_input *)input;
3820 const struct vega10_power_state *vega10_ps =
3821 cast_const_phw_vega10_power_state(states->pnew_state);
3824 PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
3825 "Attempt to Trim DPM States Failed!",
3828 data->smc_state_table.gfx_boot_level =
3829 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
3830 data->smc_state_table.gfx_max_level =
3831 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
3832 data->smc_state_table.mem_boot_level =
3833 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
3834 data->smc_state_table.mem_max_level =
3835 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
3837 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
3838 "Attempt to upload DPM Bootup Levels Failed!",
3840 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
3841 "Attempt to upload DPM Max Levels Failed!",
3843 for(i = data->smc_state_table.gfx_boot_level; i < data->smc_state_table.gfx_max_level; i++)
3844 data->dpm_table.gfx_table.dpm_levels[i].enabled = true;
3847 for(i = data->smc_state_table.mem_boot_level; i < data->smc_state_table.mem_max_level; i++)
3848 data->dpm_table.mem_table.dpm_levels[i].enabled = true;
3853 int vega10_enable_disable_vce_dpm(struct pp_hwmgr *hwmgr, bool enable)
3855 struct vega10_hwmgr *data =
3856 (struct vega10_hwmgr *)(hwmgr->backend);
3858 if (data->smu_features[GNLD_DPM_VCE].supported) {
3859 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
3861 data->smu_features[GNLD_DPM_VCE].smu_feature_bitmap),
3862 "Attempt to Enable/Disable DPM VCE Failed!",
3864 data->smu_features[GNLD_DPM_VCE].enabled = enable;
3870 static int vega10_update_sclk_threshold(struct pp_hwmgr *hwmgr)
3872 struct vega10_hwmgr *data =
3873 (struct vega10_hwmgr *)(hwmgr->backend);
3875 uint32_t low_sclk_interrupt_threshold = 0;
3877 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
3878 PHM_PlatformCaps_SclkThrottleLowNotification)
3879 && (hwmgr->gfx_arbiter.sclk_threshold !=
3880 data->low_sclk_interrupt_threshold)) {
3881 data->low_sclk_interrupt_threshold =
3882 hwmgr->gfx_arbiter.sclk_threshold;
3883 low_sclk_interrupt_threshold =
3884 data->low_sclk_interrupt_threshold;
3886 data->smc_state_table.pp_table.LowGfxclkInterruptThreshold =
3887 cpu_to_le32(low_sclk_interrupt_threshold);
3889 /* This message will also enable SmcToHost Interrupt */
3890 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
3891 PPSMC_MSG_SetLowGfxclkInterruptThreshold,
3892 (uint32_t)low_sclk_interrupt_threshold);
3898 static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
3901 int tmp_result, result = 0;
3902 struct vega10_hwmgr *data =
3903 (struct vega10_hwmgr *)(hwmgr->backend);
3904 PPTable_t *pp_table = &(data->smc_state_table.pp_table);
3906 tmp_result = vega10_find_dpm_states_clocks_in_dpm_table(hwmgr, input);
3907 PP_ASSERT_WITH_CODE(!tmp_result,
3908 "Failed to find DPM states clocks in DPM table!",
3909 result = tmp_result);
3911 tmp_result = vega10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr, input);
3912 PP_ASSERT_WITH_CODE(!tmp_result,
3913 "Failed to populate and upload SCLK MCLK DPM levels!",
3914 result = tmp_result);
3916 tmp_result = vega10_generate_dpm_level_enable_mask(hwmgr, input);
3917 PP_ASSERT_WITH_CODE(!tmp_result,
3918 "Failed to generate DPM level enabled mask!",
3919 result = tmp_result);
3921 tmp_result = vega10_update_sclk_threshold(hwmgr);
3922 PP_ASSERT_WITH_CODE(!tmp_result,
3923 "Failed to update SCLK threshold!",
3924 result = tmp_result);
3926 result = vega10_copy_table_to_smc(hwmgr->smumgr,
3927 (uint8_t *)pp_table, PPTABLE);
3928 PP_ASSERT_WITH_CODE(!result,
3929 "Failed to upload PPtable!", return result);
3931 data->apply_optimized_settings = false;
3932 data->apply_overdrive_next_settings_mask = 0;
3937 static int vega10_dpm_get_sclk(struct pp_hwmgr *hwmgr, bool low)
3939 struct pp_power_state *ps;
3940 struct vega10_power_state *vega10_ps;
3945 ps = hwmgr->request_ps;
3950 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3953 return vega10_ps->performance_levels[0].gfx_clock;
3955 return vega10_ps->performance_levels
3956 [vega10_ps->performance_level_count - 1].gfx_clock;
3959 static int vega10_dpm_get_mclk(struct pp_hwmgr *hwmgr, bool low)
3961 struct pp_power_state *ps;
3962 struct vega10_power_state *vega10_ps;
3967 ps = hwmgr->request_ps;
3972 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
3975 return vega10_ps->performance_levels[0].mem_clock;
3977 return vega10_ps->performance_levels
3978 [vega10_ps->performance_level_count-1].mem_clock;
3981 static int vega10_get_gpu_power(struct pp_hwmgr *hwmgr,
3982 struct pp_gpu_power *query)
3986 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
3987 PPSMC_MSG_GetCurrPkgPwr),
3988 "Failed to get current package power!",
3991 vega10_read_arg_from_smc(hwmgr->smumgr, &value);
3992 /* power value is an integer */
3993 query->average_gpu_power = value << 8;
3998 static int vega10_read_sensor(struct pp_hwmgr *hwmgr, int idx,
3999 void *value, int *size)
4001 uint32_t sclk_idx, mclk_idx, activity_percent = 0;
4002 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4003 struct vega10_dpm_table *dpm_table = &data->dpm_table;
4007 case AMDGPU_PP_SENSOR_GFX_SCLK:
4008 ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentGfxclkIndex);
4010 vega10_read_arg_from_smc(hwmgr->smumgr, &sclk_idx);
4011 *((uint32_t *)value) = dpm_table->gfx_table.dpm_levels[sclk_idx].value;
4015 case AMDGPU_PP_SENSOR_GFX_MCLK:
4016 ret = smum_send_msg_to_smc(hwmgr->smumgr, PPSMC_MSG_GetCurrentUclkIndex);
4018 vega10_read_arg_from_smc(hwmgr->smumgr, &mclk_idx);
4019 *((uint32_t *)value) = dpm_table->mem_table.dpm_levels[mclk_idx].value;
4023 case AMDGPU_PP_SENSOR_GPU_LOAD:
4024 ret = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_GetAverageGfxActivity, 0);
4026 vega10_read_arg_from_smc(hwmgr->smumgr, &activity_percent);
4027 *((uint32_t *)value) = activity_percent > 100 ? 100 : activity_percent;
4031 case AMDGPU_PP_SENSOR_GPU_TEMP:
4032 *((uint32_t *)value) = vega10_thermal_get_temperature(hwmgr);
4035 case AMDGPU_PP_SENSOR_UVD_POWER:
4036 *((uint32_t *)value) = data->uvd_power_gated ? 0 : 1;
4039 case AMDGPU_PP_SENSOR_VCE_POWER:
4040 *((uint32_t *)value) = data->vce_power_gated ? 0 : 1;
4043 case AMDGPU_PP_SENSOR_GPU_POWER:
4044 if (*size < sizeof(struct pp_gpu_power))
4047 *size = sizeof(struct pp_gpu_power);
4048 ret = vega10_get_gpu_power(hwmgr, (struct pp_gpu_power *)value);
4058 static int vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
4061 return smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4062 PPSMC_MSG_SetUclkFastSwitch,
4066 int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
4067 struct pp_display_clock_request *clock_req)
4070 enum amd_pp_clock_type clk_type = clock_req->clock_type;
4071 uint32_t clk_freq = clock_req->clock_freq_in_khz / 1000;
4072 DSPCLK_e clk_select = 0;
4073 uint32_t clk_request = 0;
4076 case amd_pp_dcef_clock:
4077 clk_select = DSPCLK_DCEFCLK;
4079 case amd_pp_disp_clock:
4080 clk_select = DSPCLK_DISPCLK;
4082 case amd_pp_pixel_clock:
4083 clk_select = DSPCLK_PIXCLK;
4085 case amd_pp_phy_clock:
4086 clk_select = DSPCLK_PHYCLK;
4089 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
4095 clk_request = (clk_freq << 16) | clk_select;
4096 result = smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4097 PPSMC_MSG_RequestDisplayClockByFreq,
4104 static uint8_t vega10_get_uclk_index(struct pp_hwmgr *hwmgr,
4105 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table,
4111 if (mclk_table == NULL || mclk_table->count == 0)
4114 count = (uint8_t)(mclk_table->count);
4116 for(i = 0; i < count; i++) {
4117 if(mclk_table->entries[i].clk >= frequency)
4124 static int vega10_notify_smc_display_config_after_ps_adjustment(
4125 struct pp_hwmgr *hwmgr)
4127 struct vega10_hwmgr *data =
4128 (struct vega10_hwmgr *)(hwmgr->backend);
4129 struct vega10_single_dpm_table *dpm_table =
4130 &data->dpm_table.dcef_table;
4131 struct phm_ppt_v2_information *table_info =
4132 (struct phm_ppt_v2_information *)hwmgr->pptable;
4133 struct phm_ppt_v1_clock_voltage_dependency_table *mclk_table = table_info->vdd_dep_on_mclk;
4135 uint32_t num_active_disps = 0;
4136 struct cgs_display_info info = {0};
4137 struct PP_Clocks min_clocks = {0};
4139 struct pp_display_clock_request clock_req;
4141 info.mode_info = NULL;
4143 cgs_get_active_displays_info(hwmgr->device, &info);
4145 num_active_disps = info.display_count;
4147 if (num_active_disps > 1)
4148 vega10_notify_smc_display_change(hwmgr, false);
4150 vega10_notify_smc_display_change(hwmgr, true);
4152 min_clocks.dcefClock = hwmgr->display_config.min_dcef_set_clk;
4153 min_clocks.dcefClockInSR = hwmgr->display_config.min_dcef_deep_sleep_set_clk;
4154 min_clocks.memoryClock = hwmgr->display_config.min_mem_set_clock;
4156 for (i = 0; i < dpm_table->count; i++) {
4157 if (dpm_table->dpm_levels[i].value == min_clocks.dcefClock)
4161 if (i < dpm_table->count) {
4162 clock_req.clock_type = amd_pp_dcef_clock;
4163 clock_req.clock_freq_in_khz = dpm_table->dpm_levels[i].value;
4164 if (!vega10_display_clock_voltage_request(hwmgr, &clock_req)) {
4165 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc_with_parameter(
4166 hwmgr->smumgr, PPSMC_MSG_SetMinDeepSleepDcefclk,
4167 min_clocks.dcefClockInSR /100),
4168 "Attempt to set divider for DCEFCLK Failed!",);
4170 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
4173 pr_debug("Cannot find requested DCEFCLK!");
4176 if (min_clocks.memoryClock != 0) {
4177 idx = vega10_get_uclk_index(hwmgr, mclk_table, min_clocks.memoryClock);
4178 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr, PPSMC_MSG_SetSoftMinUclkByIndex, idx);
4179 data->dpm_table.mem_table.dpm_state.soft_min_level= idx;
4185 static int vega10_force_dpm_highest(struct pp_hwmgr *hwmgr)
4187 struct vega10_hwmgr *data =
4188 (struct vega10_hwmgr *)(hwmgr->backend);
4190 data->smc_state_table.gfx_boot_level =
4191 data->smc_state_table.gfx_max_level =
4192 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
4193 data->smc_state_table.mem_boot_level =
4194 data->smc_state_table.mem_max_level =
4195 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
4197 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4198 "Failed to upload boot level to highest!",
4201 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4202 "Failed to upload dpm max level to highest!",
4208 static int vega10_force_dpm_lowest(struct pp_hwmgr *hwmgr)
4210 struct vega10_hwmgr *data =
4211 (struct vega10_hwmgr *)(hwmgr->backend);
4213 data->smc_state_table.gfx_boot_level =
4214 data->smc_state_table.gfx_max_level =
4215 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
4216 data->smc_state_table.mem_boot_level =
4217 data->smc_state_table.mem_max_level =
4218 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
4220 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4221 "Failed to upload boot level to highest!",
4224 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4225 "Failed to upload dpm max level to highest!",
4232 static int vega10_unforce_dpm_levels(struct pp_hwmgr *hwmgr)
4234 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4236 data->smc_state_table.gfx_boot_level =
4237 vega10_find_lowest_dpm_level(&(data->dpm_table.gfx_table));
4238 data->smc_state_table.gfx_max_level =
4239 vega10_find_highest_dpm_level(&(data->dpm_table.gfx_table));
4240 data->smc_state_table.mem_boot_level =
4241 vega10_find_lowest_dpm_level(&(data->dpm_table.mem_table));
4242 data->smc_state_table.mem_max_level =
4243 vega10_find_highest_dpm_level(&(data->dpm_table.mem_table));
4245 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4246 "Failed to upload DPM Bootup Levels!",
4249 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4250 "Failed to upload DPM Max Levels!",
4255 static int vega10_get_profiling_clk_mask(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level,
4256 uint32_t *sclk_mask, uint32_t *mclk_mask, uint32_t *soc_mask)
4258 struct phm_ppt_v2_information *table_info =
4259 (struct phm_ppt_v2_information *)(hwmgr->pptable);
4261 if (table_info->vdd_dep_on_sclk->count > VEGA10_UMD_PSTATE_GFXCLK_LEVEL &&
4262 table_info->vdd_dep_on_socclk->count > VEGA10_UMD_PSTATE_SOCCLK_LEVEL &&
4263 table_info->vdd_dep_on_mclk->count > VEGA10_UMD_PSTATE_MCLK_LEVEL) {
4264 *sclk_mask = VEGA10_UMD_PSTATE_GFXCLK_LEVEL;
4265 *soc_mask = VEGA10_UMD_PSTATE_SOCCLK_LEVEL;
4266 *mclk_mask = VEGA10_UMD_PSTATE_MCLK_LEVEL;
4269 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) {
4271 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) {
4273 } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) {
4274 *sclk_mask = table_info->vdd_dep_on_sclk->count - 1;
4275 *soc_mask = table_info->vdd_dep_on_socclk->count - 1;
4276 *mclk_mask = table_info->vdd_dep_on_mclk->count - 1;
4281 static int vega10_set_fan_control_mode(struct pp_hwmgr *hwmgr, uint32_t mode)
4286 case AMD_FAN_CTRL_NONE:
4287 result = vega10_fan_ctrl_set_fan_speed_percent(hwmgr, 100);
4289 case AMD_FAN_CTRL_MANUAL:
4290 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4291 PHM_PlatformCaps_MicrocodeFanControl))
4292 result = vega10_fan_ctrl_stop_smc_fan_control(hwmgr);
4294 case AMD_FAN_CTRL_AUTO:
4295 result = vega10_fan_ctrl_set_static_mode(hwmgr, mode);
4297 result = vega10_fan_ctrl_start_smc_fan_control(hwmgr);
4305 static int vega10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr,
4306 enum amd_dpm_forced_level level)
4309 uint32_t sclk_mask = 0;
4310 uint32_t mclk_mask = 0;
4311 uint32_t soc_mask = 0;
4312 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
4313 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
4314 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
4315 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
4317 if (level == hwmgr->dpm_level)
4320 if (!(hwmgr->dpm_level & profile_mode_mask)) {
4321 /* enter profile mode, save current level, disable gfx cg*/
4322 if (level & profile_mode_mask) {
4323 hwmgr->saved_dpm_level = hwmgr->dpm_level;
4324 cgs_set_clockgating_state(hwmgr->device,
4325 AMD_IP_BLOCK_TYPE_GFX,
4326 AMD_CG_STATE_UNGATE);
4329 /* exit profile mode, restore level, enable gfx cg*/
4330 if (!(level & profile_mode_mask)) {
4331 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
4332 level = hwmgr->saved_dpm_level;
4333 cgs_set_clockgating_state(hwmgr->device,
4334 AMD_IP_BLOCK_TYPE_GFX,
4340 case AMD_DPM_FORCED_LEVEL_HIGH:
4341 ret = vega10_force_dpm_highest(hwmgr);
4344 hwmgr->dpm_level = level;
4346 case AMD_DPM_FORCED_LEVEL_LOW:
4347 ret = vega10_force_dpm_lowest(hwmgr);
4350 hwmgr->dpm_level = level;
4352 case AMD_DPM_FORCED_LEVEL_AUTO:
4353 ret = vega10_unforce_dpm_levels(hwmgr);
4356 hwmgr->dpm_level = level;
4358 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
4359 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
4360 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
4361 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
4362 ret = vega10_get_profiling_clk_mask(hwmgr, level, &sclk_mask, &mclk_mask, &soc_mask);
4365 hwmgr->dpm_level = level;
4366 vega10_force_clock_level(hwmgr, PP_SCLK, 1<<sclk_mask);
4367 vega10_force_clock_level(hwmgr, PP_MCLK, 1<<mclk_mask);
4369 case AMD_DPM_FORCED_LEVEL_MANUAL:
4370 hwmgr->dpm_level = level;
4372 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
4377 if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4378 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_NONE);
4379 else if (level != AMD_DPM_FORCED_LEVEL_PROFILE_PEAK && hwmgr->saved_dpm_level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)
4380 vega10_set_fan_control_mode(hwmgr, AMD_FAN_CTRL_AUTO);
4385 static int vega10_get_fan_control_mode(struct pp_hwmgr *hwmgr)
4387 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4389 if (data->smu_features[GNLD_FAN_CONTROL].enabled == false)
4390 return AMD_FAN_CTRL_MANUAL;
4392 return AMD_FAN_CTRL_AUTO;
4395 static int vega10_get_dal_power_level(struct pp_hwmgr *hwmgr,
4396 struct amd_pp_simple_clock_info *info)
4398 struct phm_ppt_v2_information *table_info =
4399 (struct phm_ppt_v2_information *)hwmgr->pptable;
4400 struct phm_clock_and_voltage_limits *max_limits =
4401 &table_info->max_clock_voltage_on_ac;
4403 info->engine_max_clock = max_limits->sclk;
4404 info->memory_max_clock = max_limits->mclk;
4409 static void vega10_get_sclks(struct pp_hwmgr *hwmgr,
4410 struct pp_clock_levels_with_latency *clocks)
4412 struct phm_ppt_v2_information *table_info =
4413 (struct phm_ppt_v2_information *)hwmgr->pptable;
4414 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4415 table_info->vdd_dep_on_sclk;
4418 for (i = 0; i < dep_table->count; i++) {
4419 if (dep_table->entries[i].clk) {
4420 clocks->data[clocks->num_levels].clocks_in_khz =
4421 dep_table->entries[i].clk;
4422 clocks->num_levels++;
4428 static uint32_t vega10_get_mem_latency(struct pp_hwmgr *hwmgr,
4431 if (clock >= MEM_FREQ_LOW_LATENCY &&
4432 clock < MEM_FREQ_HIGH_LATENCY)
4433 return MEM_LATENCY_HIGH;
4434 else if (clock >= MEM_FREQ_HIGH_LATENCY)
4435 return MEM_LATENCY_LOW;
4437 return MEM_LATENCY_ERR;
4440 static void vega10_get_memclocks(struct pp_hwmgr *hwmgr,
4441 struct pp_clock_levels_with_latency *clocks)
4443 struct phm_ppt_v2_information *table_info =
4444 (struct phm_ppt_v2_information *)hwmgr->pptable;
4445 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4446 table_info->vdd_dep_on_mclk;
4447 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4450 clocks->num_levels = 0;
4451 data->mclk_latency_table.count = 0;
4453 for (i = 0; i < dep_table->count; i++) {
4454 if (dep_table->entries[i].clk) {
4455 clocks->data[clocks->num_levels].clocks_in_khz =
4456 data->mclk_latency_table.entries
4457 [data->mclk_latency_table.count].frequency =
4458 dep_table->entries[i].clk;
4459 clocks->data[clocks->num_levels].latency_in_us =
4460 data->mclk_latency_table.entries
4461 [data->mclk_latency_table.count].latency =
4462 vega10_get_mem_latency(hwmgr,
4463 dep_table->entries[i].clk);
4464 clocks->num_levels++;
4465 data->mclk_latency_table.count++;
4470 static void vega10_get_dcefclocks(struct pp_hwmgr *hwmgr,
4471 struct pp_clock_levels_with_latency *clocks)
4473 struct phm_ppt_v2_information *table_info =
4474 (struct phm_ppt_v2_information *)hwmgr->pptable;
4475 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4476 table_info->vdd_dep_on_dcefclk;
4479 for (i = 0; i < dep_table->count; i++) {
4480 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4481 clocks->data[i].latency_in_us = 0;
4482 clocks->num_levels++;
4486 static void vega10_get_socclocks(struct pp_hwmgr *hwmgr,
4487 struct pp_clock_levels_with_latency *clocks)
4489 struct phm_ppt_v2_information *table_info =
4490 (struct phm_ppt_v2_information *)hwmgr->pptable;
4491 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table =
4492 table_info->vdd_dep_on_socclk;
4495 for (i = 0; i < dep_table->count; i++) {
4496 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4497 clocks->data[i].latency_in_us = 0;
4498 clocks->num_levels++;
4502 static int vega10_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr,
4503 enum amd_pp_clock_type type,
4504 struct pp_clock_levels_with_latency *clocks)
4507 case amd_pp_sys_clock:
4508 vega10_get_sclks(hwmgr, clocks);
4510 case amd_pp_mem_clock:
4511 vega10_get_memclocks(hwmgr, clocks);
4513 case amd_pp_dcef_clock:
4514 vega10_get_dcefclocks(hwmgr, clocks);
4516 case amd_pp_soc_clock:
4517 vega10_get_socclocks(hwmgr, clocks);
4526 static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr,
4527 enum amd_pp_clock_type type,
4528 struct pp_clock_levels_with_voltage *clocks)
4530 struct phm_ppt_v2_information *table_info =
4531 (struct phm_ppt_v2_information *)hwmgr->pptable;
4532 struct phm_ppt_v1_clock_voltage_dependency_table *dep_table;
4536 case amd_pp_mem_clock:
4537 dep_table = table_info->vdd_dep_on_mclk;
4539 case amd_pp_dcef_clock:
4540 dep_table = table_info->vdd_dep_on_dcefclk;
4542 case amd_pp_disp_clock:
4543 dep_table = table_info->vdd_dep_on_dispclk;
4545 case amd_pp_pixel_clock:
4546 dep_table = table_info->vdd_dep_on_pixclk;
4548 case amd_pp_phy_clock:
4549 dep_table = table_info->vdd_dep_on_phyclk;
4555 for (i = 0; i < dep_table->count; i++) {
4556 clocks->data[i].clocks_in_khz = dep_table->entries[i].clk;
4557 clocks->data[i].voltage_in_mv = (uint32_t)(table_info->vddc_lookup_table->
4558 entries[dep_table->entries[i].vddInd].us_vdd);
4559 clocks->num_levels++;
4562 if (i < dep_table->count)
4568 static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr,
4569 struct pp_wm_sets_with_clock_ranges_soc15 *wm_with_clock_ranges)
4571 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4572 Watermarks_t *table = &(data->smc_state_table.water_marks_table);
4576 if (!data->registry_data.disable_water_mark) {
4577 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_dmif; i++) {
4578 table->WatermarkRow[WM_DCEFCLK][i].MinClock =
4579 cpu_to_le16((uint16_t)
4580 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_dcefclk_in_khz) /
4582 table->WatermarkRow[WM_DCEFCLK][i].MaxClock =
4583 cpu_to_le16((uint16_t)
4584 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_dcefclk_in_khz) /
4586 table->WatermarkRow[WM_DCEFCLK][i].MinUclk =
4587 cpu_to_le16((uint16_t)
4588 (wm_with_clock_ranges->wm_sets_dmif[i].wm_min_memclk_in_khz) /
4590 table->WatermarkRow[WM_DCEFCLK][i].MaxUclk =
4591 cpu_to_le16((uint16_t)
4592 (wm_with_clock_ranges->wm_sets_dmif[i].wm_max_memclk_in_khz) /
4594 table->WatermarkRow[WM_DCEFCLK][i].WmSetting = (uint8_t)
4595 wm_with_clock_ranges->wm_sets_dmif[i].wm_set_id;
4598 for (i = 0; i < wm_with_clock_ranges->num_wm_sets_mcif; i++) {
4599 table->WatermarkRow[WM_SOCCLK][i].MinClock =
4600 cpu_to_le16((uint16_t)
4601 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_socclk_in_khz) /
4603 table->WatermarkRow[WM_SOCCLK][i].MaxClock =
4604 cpu_to_le16((uint16_t)
4605 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_socclk_in_khz) /
4607 table->WatermarkRow[WM_SOCCLK][i].MinUclk =
4608 cpu_to_le16((uint16_t)
4609 (wm_with_clock_ranges->wm_sets_mcif[i].wm_min_memclk_in_khz) /
4611 table->WatermarkRow[WM_SOCCLK][i].MaxUclk =
4612 cpu_to_le16((uint16_t)
4613 (wm_with_clock_ranges->wm_sets_mcif[i].wm_max_memclk_in_khz) /
4615 table->WatermarkRow[WM_SOCCLK][i].WmSetting = (uint8_t)
4616 wm_with_clock_ranges->wm_sets_mcif[i].wm_set_id;
4618 data->water_marks_bitmap = WaterMarksExist;
4624 static int vega10_force_clock_level(struct pp_hwmgr *hwmgr,
4625 enum pp_clock_type type, uint32_t mask)
4627 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4630 if (hwmgr->dpm_level & (AMD_DPM_FORCED_LEVEL_AUTO |
4631 AMD_DPM_FORCED_LEVEL_LOW |
4632 AMD_DPM_FORCED_LEVEL_HIGH))
4637 for (i = 0; i < 32; i++) {
4638 if (mask & (1 << i))
4641 data->smc_state_table.gfx_boot_level = i;
4643 for (i = 31; i >= 0; i--) {
4644 if (mask & (1 << i))
4647 data->smc_state_table.gfx_max_level = i;
4649 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4650 "Failed to upload boot level to lowest!",
4653 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4654 "Failed to upload dpm max level to highest!",
4659 for (i = 0; i < 32; i++) {
4660 if (mask & (1 << i))
4663 data->smc_state_table.mem_boot_level = i;
4665 for (i = 31; i >= 0; i--) {
4666 if (mask & (1 << i))
4669 data->smc_state_table.mem_max_level = i;
4671 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr),
4672 "Failed to upload boot level to lowest!",
4675 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr),
4676 "Failed to upload dpm max level to highest!",
4689 static int vega10_print_clock_levels(struct pp_hwmgr *hwmgr,
4690 enum pp_clock_type type, char *buf)
4692 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4693 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
4694 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
4695 struct vega10_pcie_table *pcie_table = &(data->dpm_table.pcie_table);
4696 int i, now, size = 0;
4700 if (data->registry_data.sclk_dpm_key_disabled)
4703 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
4704 PPSMC_MSG_GetCurrentGfxclkIndex),
4705 "Attempt to get current sclk index Failed!",
4707 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
4709 "Attempt to read sclk index Failed!",
4712 for (i = 0; i < sclk_table->count; i++)
4713 size += sprintf(buf + size, "%d: %uMhz %s\n",
4714 i, sclk_table->dpm_levels[i].value / 100,
4715 (i == now) ? "*" : "");
4718 if (data->registry_data.mclk_dpm_key_disabled)
4721 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
4722 PPSMC_MSG_GetCurrentUclkIndex),
4723 "Attempt to get current mclk index Failed!",
4725 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
4727 "Attempt to read mclk index Failed!",
4730 for (i = 0; i < mclk_table->count; i++)
4731 size += sprintf(buf + size, "%d: %uMhz %s\n",
4732 i, mclk_table->dpm_levels[i].value / 100,
4733 (i == now) ? "*" : "");
4736 PP_ASSERT_WITH_CODE(!smum_send_msg_to_smc(hwmgr->smumgr,
4737 PPSMC_MSG_GetCurrentLinkIndex),
4738 "Attempt to get current mclk index Failed!",
4740 PP_ASSERT_WITH_CODE(!vega10_read_arg_from_smc(hwmgr->smumgr,
4742 "Attempt to read mclk index Failed!",
4745 for (i = 0; i < pcie_table->count; i++)
4746 size += sprintf(buf + size, "%d: %s %s\n", i,
4747 (pcie_table->pcie_gen[i] == 0) ? "2.5GB, x1" :
4748 (pcie_table->pcie_gen[i] == 1) ? "5.0GB, x16" :
4749 (pcie_table->pcie_gen[i] == 2) ? "8.0GB, x16" : "",
4750 (i == now) ? "*" : "");
4758 static int vega10_display_configuration_changed_task(struct pp_hwmgr *hwmgr)
4760 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4762 uint32_t num_turned_on_displays = 1;
4763 Watermarks_t *wm_table = &(data->smc_state_table.water_marks_table);
4764 struct cgs_display_info info = {0};
4766 if ((data->water_marks_bitmap & WaterMarksExist) &&
4767 !(data->water_marks_bitmap & WaterMarksLoaded)) {
4768 result = vega10_copy_table_to_smc(hwmgr->smumgr,
4769 (uint8_t *)wm_table, WMTABLE);
4770 PP_ASSERT_WITH_CODE(result, "Failed to update WMTABLE!", return EINVAL);
4771 data->water_marks_bitmap |= WaterMarksLoaded;
4774 if (data->water_marks_bitmap & WaterMarksLoaded) {
4775 cgs_get_active_displays_info(hwmgr->device, &info);
4776 num_turned_on_displays = info.display_count;
4777 smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
4778 PPSMC_MSG_NumOfDisplays, num_turned_on_displays);
4784 int vega10_enable_disable_uvd_dpm(struct pp_hwmgr *hwmgr, bool enable)
4786 struct vega10_hwmgr *data =
4787 (struct vega10_hwmgr *)(hwmgr->backend);
4789 if (data->smu_features[GNLD_DPM_UVD].supported) {
4790 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr->smumgr,
4792 data->smu_features[GNLD_DPM_UVD].smu_feature_bitmap),
4793 "Attempt to Enable/Disable DPM UVD Failed!",
4795 data->smu_features[GNLD_DPM_UVD].enabled = enable;
4800 static int vega10_power_gate_vce(struct pp_hwmgr *hwmgr, bool bgate)
4802 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4804 data->vce_power_gated = bgate;
4805 return vega10_enable_disable_vce_dpm(hwmgr, !bgate);
4808 static int vega10_power_gate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
4810 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4812 data->uvd_power_gated = bgate;
4813 return vega10_enable_disable_uvd_dpm(hwmgr, !bgate);
4816 static inline bool vega10_are_power_levels_equal(
4817 const struct vega10_performance_level *pl1,
4818 const struct vega10_performance_level *pl2)
4820 return ((pl1->soc_clock == pl2->soc_clock) &&
4821 (pl1->gfx_clock == pl2->gfx_clock) &&
4822 (pl1->mem_clock == pl2->mem_clock));
4825 static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
4826 const struct pp_hw_power_state *pstate1,
4827 const struct pp_hw_power_state *pstate2, bool *equal)
4829 const struct vega10_power_state *psa;
4830 const struct vega10_power_state *psb;
4833 if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
4836 psa = cast_const_phw_vega10_power_state(pstate1);
4837 psb = cast_const_phw_vega10_power_state(pstate2);
4838 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4839 if (psa->performance_level_count != psb->performance_level_count) {
4844 for (i = 0; i < psa->performance_level_count; i++) {
4845 if (!vega10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
4846 /* If we have found even one performance level pair that is different the states are different. */
4852 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4853 *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
4854 *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
4855 *equal &= (psa->sclk_threshold == psb->sclk_threshold);
4861 vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr)
4863 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4864 bool is_update_required = false;
4865 struct cgs_display_info info = {0, 0, NULL};
4867 cgs_get_active_displays_info(hwmgr->device, &info);
4869 if (data->display_timing.num_existing_displays != info.display_count)
4870 is_update_required = true;
4872 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_SclkDeepSleep)) {
4873 if (data->display_timing.min_clock_in_sr != hwmgr->display_config.min_core_set_clock_in_sr)
4874 is_update_required = true;
4877 return is_update_required;
4880 static int vega10_disable_dpm_tasks(struct pp_hwmgr *hwmgr)
4882 int tmp_result, result = 0;
4884 tmp_result = (vega10_is_dpm_running(hwmgr)) ? 0 : -1;
4885 PP_ASSERT_WITH_CODE(tmp_result == 0,
4886 "DPM is not running right now, no need to disable DPM!",
4889 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
4890 PHM_PlatformCaps_ThermalController))
4891 vega10_disable_thermal_protection(hwmgr);
4893 tmp_result = vega10_disable_power_containment(hwmgr);
4894 PP_ASSERT_WITH_CODE((tmp_result == 0),
4895 "Failed to disable power containment!", result = tmp_result);
4897 tmp_result = vega10_disable_didt_config(hwmgr);
4898 PP_ASSERT_WITH_CODE((tmp_result == 0),
4899 "Failed to disable didt config!", result = tmp_result);
4901 tmp_result = vega10_avfs_enable(hwmgr, false);
4902 PP_ASSERT_WITH_CODE((tmp_result == 0),
4903 "Failed to disable AVFS!", result = tmp_result);
4905 tmp_result = vega10_stop_dpm(hwmgr, SMC_DPM_FEATURES);
4906 PP_ASSERT_WITH_CODE((tmp_result == 0),
4907 "Failed to stop DPM!", result = tmp_result);
4909 tmp_result = vega10_disable_deep_sleep_master_switch(hwmgr);
4910 PP_ASSERT_WITH_CODE((tmp_result == 0),
4911 "Failed to disable deep sleep!", result = tmp_result);
4913 tmp_result = vega10_disable_ulv(hwmgr);
4914 PP_ASSERT_WITH_CODE((tmp_result == 0),
4915 "Failed to disable ulv!", result = tmp_result);
4917 tmp_result = vega10_acg_disable(hwmgr);
4918 PP_ASSERT_WITH_CODE((tmp_result == 0),
4919 "Failed to disable acg!", result = tmp_result);
4923 static int vega10_power_off_asic(struct pp_hwmgr *hwmgr)
4925 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4928 result = vega10_disable_dpm_tasks(hwmgr);
4929 PP_ASSERT_WITH_CODE((0 == result),
4930 "[disable_dpm_tasks] Failed to disable DPM!",
4932 data->water_marks_bitmap &= ~(WaterMarksLoaded);
4937 static void vega10_find_min_clock_index(struct pp_hwmgr *hwmgr,
4938 uint32_t *sclk_idx, uint32_t *mclk_idx,
4939 uint32_t min_sclk, uint32_t min_mclk)
4941 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4942 struct vega10_dpm_table *dpm_table = &(data->dpm_table);
4945 for (i = 0; i < dpm_table->gfx_table.count; i++) {
4946 if (dpm_table->gfx_table.dpm_levels[i].enabled &&
4947 dpm_table->gfx_table.dpm_levels[i].value >= min_sclk) {
4953 for (i = 0; i < dpm_table->mem_table.count; i++) {
4954 if (dpm_table->mem_table.dpm_levels[i].enabled &&
4955 dpm_table->mem_table.dpm_levels[i].value >= min_mclk) {
4962 static int vega10_set_power_profile_state(struct pp_hwmgr *hwmgr,
4963 struct amd_pp_profile *request)
4965 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
4966 uint32_t sclk_idx = ~0, mclk_idx = ~0;
4968 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_AUTO)
4971 vega10_find_min_clock_index(hwmgr, &sclk_idx, &mclk_idx,
4972 request->min_sclk, request->min_mclk);
4974 if (sclk_idx != ~0) {
4975 if (!data->registry_data.sclk_dpm_key_disabled)
4976 PP_ASSERT_WITH_CODE(
4977 !smum_send_msg_to_smc_with_parameter(
4979 PPSMC_MSG_SetSoftMinGfxclkByIndex,
4981 "Failed to set soft min sclk index!",
4985 if (mclk_idx != ~0) {
4986 if (!data->registry_data.mclk_dpm_key_disabled)
4987 PP_ASSERT_WITH_CODE(
4988 !smum_send_msg_to_smc_with_parameter(
4990 PPSMC_MSG_SetSoftMinUclkByIndex,
4992 "Failed to set soft min mclk index!",
4999 static int vega10_get_sclk_od(struct pp_hwmgr *hwmgr)
5001 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
5002 struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
5003 struct vega10_single_dpm_table *golden_sclk_table =
5004 &(data->golden_dpm_table.gfx_table);
5007 value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
5008 golden_sclk_table->dpm_levels
5009 [golden_sclk_table->count - 1].value) *
5011 golden_sclk_table->dpm_levels
5012 [golden_sclk_table->count - 1].value;
5017 static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5019 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
5020 struct vega10_single_dpm_table *golden_sclk_table =
5021 &(data->golden_dpm_table.gfx_table);
5022 struct pp_power_state *ps;
5023 struct vega10_power_state *vega10_ps;
5025 ps = hwmgr->request_ps;
5030 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
5032 vega10_ps->performance_levels
5033 [vega10_ps->performance_level_count - 1].gfx_clock =
5034 golden_sclk_table->dpm_levels
5035 [golden_sclk_table->count - 1].value *
5037 golden_sclk_table->dpm_levels
5038 [golden_sclk_table->count - 1].value;
5040 if (vega10_ps->performance_levels
5041 [vega10_ps->performance_level_count - 1].gfx_clock >
5042 hwmgr->platform_descriptor.overdriveLimit.engineClock)
5043 vega10_ps->performance_levels
5044 [vega10_ps->performance_level_count - 1].gfx_clock =
5045 hwmgr->platform_descriptor.overdriveLimit.engineClock;
5050 static int vega10_get_mclk_od(struct pp_hwmgr *hwmgr)
5052 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
5053 struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
5054 struct vega10_single_dpm_table *golden_mclk_table =
5055 &(data->golden_dpm_table.mem_table);
5058 value = (mclk_table->dpm_levels
5059 [mclk_table->count - 1].value -
5060 golden_mclk_table->dpm_levels
5061 [golden_mclk_table->count - 1].value) *
5063 golden_mclk_table->dpm_levels
5064 [golden_mclk_table->count - 1].value;
5069 static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
5071 struct vega10_hwmgr *data = (struct vega10_hwmgr *)(hwmgr->backend);
5072 struct vega10_single_dpm_table *golden_mclk_table =
5073 &(data->golden_dpm_table.mem_table);
5074 struct pp_power_state *ps;
5075 struct vega10_power_state *vega10_ps;
5077 ps = hwmgr->request_ps;
5082 vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
5084 vega10_ps->performance_levels
5085 [vega10_ps->performance_level_count - 1].mem_clock =
5086 golden_mclk_table->dpm_levels
5087 [golden_mclk_table->count - 1].value *
5089 golden_mclk_table->dpm_levels
5090 [golden_mclk_table->count - 1].value;
5092 if (vega10_ps->performance_levels
5093 [vega10_ps->performance_level_count - 1].mem_clock >
5094 hwmgr->platform_descriptor.overdriveLimit.memoryClock)
5095 vega10_ps->performance_levels
5096 [vega10_ps->performance_level_count - 1].mem_clock =
5097 hwmgr->platform_descriptor.overdriveLimit.memoryClock;
5102 static const struct pp_hwmgr_func vega10_hwmgr_funcs = {
5103 .backend_init = vega10_hwmgr_backend_init,
5104 .backend_fini = vega10_hwmgr_backend_fini,
5105 .asic_setup = vega10_setup_asic_task,
5106 .dynamic_state_management_enable = vega10_enable_dpm_tasks,
5107 .dynamic_state_management_disable = vega10_disable_dpm_tasks,
5108 .get_num_of_pp_table_entries =
5109 vega10_get_number_of_powerplay_table_entries,
5110 .get_power_state_size = vega10_get_power_state_size,
5111 .get_pp_table_entry = vega10_get_pp_table_entry,
5112 .patch_boot_state = vega10_patch_boot_state,
5113 .apply_state_adjust_rules = vega10_apply_state_adjust_rules,
5114 .power_state_set = vega10_set_power_state_tasks,
5115 .get_sclk = vega10_dpm_get_sclk,
5116 .get_mclk = vega10_dpm_get_mclk,
5117 .notify_smc_display_config_after_ps_adjustment =
5118 vega10_notify_smc_display_config_after_ps_adjustment,
5119 .force_dpm_level = vega10_dpm_force_dpm_level,
5120 .get_temperature = vega10_thermal_get_temperature,
5121 .stop_thermal_controller = vega10_thermal_stop_thermal_controller,
5122 .get_fan_speed_info = vega10_fan_ctrl_get_fan_speed_info,
5123 .get_fan_speed_percent = vega10_fan_ctrl_get_fan_speed_percent,
5124 .set_fan_speed_percent = vega10_fan_ctrl_set_fan_speed_percent,
5125 .reset_fan_speed_to_default =
5126 vega10_fan_ctrl_reset_fan_speed_to_default,
5127 .get_fan_speed_rpm = vega10_fan_ctrl_get_fan_speed_rpm,
5128 .set_fan_speed_rpm = vega10_fan_ctrl_set_fan_speed_rpm,
5129 .uninitialize_thermal_controller =
5130 vega10_thermal_ctrl_uninitialize_thermal_controller,
5131 .set_fan_control_mode = vega10_set_fan_control_mode,
5132 .get_fan_control_mode = vega10_get_fan_control_mode,
5133 .read_sensor = vega10_read_sensor,
5134 .get_dal_power_level = vega10_get_dal_power_level,
5135 .get_clock_by_type_with_latency = vega10_get_clock_by_type_with_latency,
5136 .get_clock_by_type_with_voltage = vega10_get_clock_by_type_with_voltage,
5137 .set_watermarks_for_clocks_ranges = vega10_set_watermarks_for_clocks_ranges,
5138 .display_clock_voltage_request = vega10_display_clock_voltage_request,
5139 .force_clock_level = vega10_force_clock_level,
5140 .print_clock_levels = vega10_print_clock_levels,
5141 .display_config_changed = vega10_display_configuration_changed_task,
5142 .powergate_uvd = vega10_power_gate_uvd,
5143 .powergate_vce = vega10_power_gate_vce,
5144 .check_states_equal = vega10_check_states_equal,
5145 .check_smc_update_required_for_display_configuration =
5146 vega10_check_smc_update_required_for_display_configuration,
5147 .power_off_asic = vega10_power_off_asic,
5148 .disable_smc_firmware_ctf = vega10_thermal_disable_alert,
5149 .set_power_profile_state = vega10_set_power_profile_state,
5150 .get_sclk_od = vega10_get_sclk_od,
5151 .set_sclk_od = vega10_set_sclk_od,
5152 .get_mclk_od = vega10_get_mclk_od,
5153 .set_mclk_od = vega10_set_mclk_od,
5154 .avfs_control = vega10_avfs_enable,
5157 int vega10_hwmgr_init(struct pp_hwmgr *hwmgr)
5159 hwmgr->hwmgr_func = &vega10_hwmgr_funcs;
5160 hwmgr->pptable_func = &vega10_pptable_funcs;
5161 pp_vega10_thermal_initialize(hwmgr);