2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "amdgpu_pm.h"
29 #include "amdgpu_atombios.h"
30 #include "amdgpu_dpm.h"
33 #include <linux/seq_file.h>
35 #include "smu/smu_7_0_0_d.h"
36 #include "smu/smu_7_0_0_sh_mask.h"
38 #include "gca/gfx_7_2_d.h"
39 #include "gca/gfx_7_2_sh_mask.h"
41 #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
42 #define KV_MINIMUM_ENGINE_CLOCK 800
43 #define SMC_RAM_END 0x40000
45 static const struct amd_pm_funcs kv_dpm_funcs;
47 static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev);
48 static int kv_enable_nb_dpm(struct amdgpu_device *adev,
50 static void kv_init_graphics_levels(struct amdgpu_device *adev);
51 static int kv_calculate_ds_divider(struct amdgpu_device *adev);
52 static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev);
53 static int kv_calculate_dpm_settings(struct amdgpu_device *adev);
54 static void kv_enable_new_levels(struct amdgpu_device *adev);
55 static void kv_program_nbps_index_settings(struct amdgpu_device *adev,
56 struct amdgpu_ps *new_rps);
57 static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level);
58 static int kv_set_enabled_levels(struct amdgpu_device *adev);
59 static int kv_force_dpm_highest(struct amdgpu_device *adev);
60 static int kv_force_dpm_lowest(struct amdgpu_device *adev);
61 static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
62 struct amdgpu_ps *new_rps,
63 struct amdgpu_ps *old_rps);
64 static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
65 int min_temp, int max_temp);
66 static int kv_init_fps_limits(struct amdgpu_device *adev);
68 static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
69 static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
72 static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev,
73 struct sumo_vid_mapping_table *vid_mapping_table,
76 struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table =
77 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
80 if (vddc_sclk_table && vddc_sclk_table->count) {
81 if (vid_2bit < vddc_sclk_table->count)
82 return vddc_sclk_table->entries[vid_2bit].v;
84 return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v;
86 for (i = 0; i < vid_mapping_table->num_entries; i++) {
87 if (vid_mapping_table->entries[i].vid_2bit == vid_2bit)
88 return vid_mapping_table->entries[i].vid_7bit;
90 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit;
94 static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev,
95 struct sumo_vid_mapping_table *vid_mapping_table,
98 struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table =
99 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
102 if (vddc_sclk_table && vddc_sclk_table->count) {
103 for (i = 0; i < vddc_sclk_table->count; i++) {
104 if (vddc_sclk_table->entries[i].v == vid_7bit)
107 return vddc_sclk_table->count - 1;
109 for (i = 0; i < vid_mapping_table->num_entries; i++) {
110 if (vid_mapping_table->entries[i].vid_7bit == vid_7bit)
111 return vid_mapping_table->entries[i].vid_2bit;
114 return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit;
118 static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable)
120 /* This bit selects who handles display phy powergating.
121 * Clear the bit to let atom handle it.
122 * Set it to let the driver handle it.
123 * For now we just let atom handle it.
126 u32 v = RREG32(mmDOUT_SCRATCH3);
133 WREG32(mmDOUT_SCRATCH3, v);
137 static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev,
138 struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table,
139 ATOM_AVAILABLE_SCLK_LIST *table)
145 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
146 if (table[i].ulSupportedSCLK > prev_sclk) {
147 sclk_voltage_mapping_table->entries[n].sclk_frequency =
148 table[i].ulSupportedSCLK;
149 sclk_voltage_mapping_table->entries[n].vid_2bit =
150 table[i].usVoltageIndex;
151 prev_sclk = table[i].ulSupportedSCLK;
156 sclk_voltage_mapping_table->num_max_dpm_entries = n;
159 static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev,
160 struct sumo_vid_mapping_table *vid_mapping_table,
161 ATOM_AVAILABLE_SCLK_LIST *table)
165 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) {
166 if (table[i].ulSupportedSCLK != 0) {
167 vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit =
168 table[i].usVoltageID;
169 vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit =
170 table[i].usVoltageIndex;
174 for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) {
175 if (vid_mapping_table->entries[i].vid_7bit == 0) {
176 for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) {
177 if (vid_mapping_table->entries[j].vid_7bit != 0) {
178 vid_mapping_table->entries[i] =
179 vid_mapping_table->entries[j];
180 vid_mapping_table->entries[j].vid_7bit = 0;
185 if (j == SUMO_MAX_NUMBER_VOLTAGES)
190 vid_mapping_table->num_entries = i;
194 static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] =
207 static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] =
213 static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] =
219 static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] =
225 static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] =
231 static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] =
263 static const struct kv_lcac_config_reg sx0_cac_config_reg[] =
265 { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
268 static const struct kv_lcac_config_reg mc0_cac_config_reg[] =
270 { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
273 static const struct kv_lcac_config_reg mc1_cac_config_reg[] =
275 { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
278 static const struct kv_lcac_config_reg mc2_cac_config_reg[] =
280 { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
283 static const struct kv_lcac_config_reg mc3_cac_config_reg[] =
285 { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
288 static const struct kv_lcac_config_reg cpl_cac_config_reg[] =
290 { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 }
294 static const struct kv_pt_config_reg didt_config_kv[] =
296 { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
297 { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
298 { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
299 { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
300 { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
301 { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
302 { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
303 { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
304 { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
305 { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
306 { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
307 { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
308 { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
309 { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
310 { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
311 { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
312 { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
313 { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
314 { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
315 { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
316 { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
317 { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
318 { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
319 { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
320 { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
321 { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
322 { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
323 { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
324 { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
325 { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
326 { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
327 { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
328 { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
329 { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
330 { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
331 { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
332 { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
333 { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
334 { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
335 { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
336 { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
337 { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
338 { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
339 { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
340 { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
341 { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
342 { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
343 { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
344 { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
345 { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
346 { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
347 { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
348 { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
349 { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
350 { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
351 { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
352 { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
353 { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
354 { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
355 { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
356 { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
357 { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
358 { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND },
359 { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND },
360 { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND },
361 { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND },
362 { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND },
363 { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND },
364 { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND },
365 { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND },
366 { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND },
367 { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND },
371 static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps)
373 struct kv_ps *ps = rps->ps_priv;
378 static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev)
380 struct kv_power_info *pi = adev->pm.dpm.priv;
386 static void kv_program_local_cac_table(struct amdgpu_device *adev,
387 const struct kv_lcac_config_values *local_cac_table,
388 const struct kv_lcac_config_reg *local_cac_reg)
391 const struct kv_lcac_config_values *values = local_cac_table;
393 while (values->block_id != 0xffffffff) {
394 count = values->signal_id;
395 for (i = 0; i < count; i++) {
396 data = ((values->block_id << local_cac_reg->block_shift) &
397 local_cac_reg->block_mask);
398 data |= ((i << local_cac_reg->signal_shift) &
399 local_cac_reg->signal_mask);
400 data |= ((values->t << local_cac_reg->t_shift) &
401 local_cac_reg->t_mask);
402 data |= ((1 << local_cac_reg->enable_shift) &
403 local_cac_reg->enable_mask);
404 WREG32_SMC(local_cac_reg->cntl, data);
411 static int kv_program_pt_config_registers(struct amdgpu_device *adev,
412 const struct kv_pt_config_reg *cac_config_regs)
414 const struct kv_pt_config_reg *config_regs = cac_config_regs;
418 if (config_regs == NULL)
421 while (config_regs->offset != 0xFFFFFFFF) {
422 if (config_regs->type == KV_CONFIGREG_CACHE) {
423 cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
425 switch (config_regs->type) {
426 case KV_CONFIGREG_SMC_IND:
427 data = RREG32_SMC(config_regs->offset);
429 case KV_CONFIGREG_DIDT_IND:
430 data = RREG32_DIDT(config_regs->offset);
433 data = RREG32(config_regs->offset);
437 data &= ~config_regs->mask;
438 data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
442 switch (config_regs->type) {
443 case KV_CONFIGREG_SMC_IND:
444 WREG32_SMC(config_regs->offset, data);
446 case KV_CONFIGREG_DIDT_IND:
447 WREG32_DIDT(config_regs->offset, data);
450 WREG32(config_regs->offset, data);
460 static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable)
462 struct kv_power_info *pi = kv_get_pi(adev);
465 if (pi->caps_sq_ramping) {
466 data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
468 data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
470 data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
471 WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
474 if (pi->caps_db_ramping) {
475 data = RREG32_DIDT(ixDIDT_DB_CTRL0);
477 data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
479 data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
480 WREG32_DIDT(ixDIDT_DB_CTRL0, data);
483 if (pi->caps_td_ramping) {
484 data = RREG32_DIDT(ixDIDT_TD_CTRL0);
486 data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
488 data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
489 WREG32_DIDT(ixDIDT_TD_CTRL0, data);
492 if (pi->caps_tcp_ramping) {
493 data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
495 data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
497 data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
498 WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
502 static int kv_enable_didt(struct amdgpu_device *adev, bool enable)
504 struct kv_power_info *pi = kv_get_pi(adev);
507 if (pi->caps_sq_ramping ||
508 pi->caps_db_ramping ||
509 pi->caps_td_ramping ||
510 pi->caps_tcp_ramping) {
511 adev->gfx.rlc.funcs->enter_safe_mode(adev);
514 ret = kv_program_pt_config_registers(adev, didt_config_kv);
516 adev->gfx.rlc.funcs->exit_safe_mode(adev);
521 kv_do_enable_didt(adev, enable);
523 adev->gfx.rlc.funcs->exit_safe_mode(adev);
530 static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev)
532 struct kv_power_info *pi = kv_get_pi(adev);
535 WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0);
536 WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0);
537 kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg);
539 WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0);
540 WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0);
541 kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg);
543 WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0);
544 WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0);
545 kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg);
547 WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0);
548 WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0);
549 kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg);
551 WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0);
552 WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0);
553 kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg);
555 WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0);
556 WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0);
557 kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg);
562 static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable)
564 struct kv_power_info *pi = kv_get_pi(adev);
569 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac);
571 pi->cac_enabled = false;
573 pi->cac_enabled = true;
574 } else if (pi->cac_enabled) {
575 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac);
576 pi->cac_enabled = false;
583 static int kv_process_firmware_header(struct amdgpu_device *adev)
585 struct kv_power_info *pi = kv_get_pi(adev);
589 ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION +
590 offsetof(SMU7_Firmware_Header, DpmTable),
594 pi->dpm_table_start = tmp;
596 ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION +
597 offsetof(SMU7_Firmware_Header, SoftRegisters),
601 pi->soft_regs_start = tmp;
606 static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev)
608 struct kv_power_info *pi = kv_get_pi(adev);
611 pi->graphics_voltage_change_enable = 1;
613 ret = amdgpu_kv_copy_bytes_to_smc(adev,
614 pi->dpm_table_start +
615 offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable),
616 &pi->graphics_voltage_change_enable,
617 sizeof(u8), pi->sram_end);
622 static int kv_set_dpm_interval(struct amdgpu_device *adev)
624 struct kv_power_info *pi = kv_get_pi(adev);
627 pi->graphics_interval = 1;
629 ret = amdgpu_kv_copy_bytes_to_smc(adev,
630 pi->dpm_table_start +
631 offsetof(SMU7_Fusion_DpmTable, GraphicsInterval),
632 &pi->graphics_interval,
633 sizeof(u8), pi->sram_end);
638 static int kv_set_dpm_boot_state(struct amdgpu_device *adev)
640 struct kv_power_info *pi = kv_get_pi(adev);
643 ret = amdgpu_kv_copy_bytes_to_smc(adev,
644 pi->dpm_table_start +
645 offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel),
646 &pi->graphics_boot_level,
647 sizeof(u8), pi->sram_end);
652 static void kv_program_vc(struct amdgpu_device *adev)
654 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100);
657 static void kv_clear_vc(struct amdgpu_device *adev)
659 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
662 static int kv_set_divider_value(struct amdgpu_device *adev,
665 struct kv_power_info *pi = kv_get_pi(adev);
666 struct atom_clock_dividers dividers;
669 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
670 sclk, false, ÷rs);
674 pi->graphics_level[index].SclkDid = (u8)dividers.post_div;
675 pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk);
680 static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev,
683 return 6200 - (voltage * 25);
686 static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev,
689 struct kv_power_info *pi = kv_get_pi(adev);
690 u32 vid_8bit = kv_convert_vid2_to_vid7(adev,
691 &pi->sys_info.vid_mapping_table,
694 return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit);
698 static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid)
700 struct kv_power_info *pi = kv_get_pi(adev);
702 pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t;
703 pi->graphics_level[index].MinVddNb =
704 cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid));
709 static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at)
711 struct kv_power_info *pi = kv_get_pi(adev);
713 pi->graphics_level[index].AT = cpu_to_be16((u16)at);
718 static void kv_dpm_power_level_enable(struct amdgpu_device *adev,
719 u32 index, bool enable)
721 struct kv_power_info *pi = kv_get_pi(adev);
723 pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0;
726 static void kv_start_dpm(struct amdgpu_device *adev)
728 u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
730 tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
731 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
733 amdgpu_kv_smc_dpm_enable(adev, true);
736 static void kv_stop_dpm(struct amdgpu_device *adev)
738 amdgpu_kv_smc_dpm_enable(adev, false);
741 static void kv_start_am(struct amdgpu_device *adev)
743 u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
745 sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK |
746 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
747 sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
749 WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
752 static void kv_reset_am(struct amdgpu_device *adev)
754 u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
756 sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK |
757 SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
759 WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl);
762 static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze)
764 return amdgpu_kv_notify_message_to_smu(adev, freeze ?
765 PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel);
768 static int kv_force_lowest_valid(struct amdgpu_device *adev)
770 return kv_force_dpm_lowest(adev);
773 static int kv_unforce_levels(struct amdgpu_device *adev)
775 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
776 return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel);
778 return kv_set_enabled_levels(adev);
781 static int kv_update_sclk_t(struct amdgpu_device *adev)
783 struct kv_power_info *pi = kv_get_pi(adev);
784 u32 low_sclk_interrupt_t = 0;
787 if (pi->caps_sclk_throttle_low_notification) {
788 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
790 ret = amdgpu_kv_copy_bytes_to_smc(adev,
791 pi->dpm_table_start +
792 offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT),
793 (u8 *)&low_sclk_interrupt_t,
794 sizeof(u32), pi->sram_end);
799 static int kv_program_bootup_state(struct amdgpu_device *adev)
801 struct kv_power_info *pi = kv_get_pi(adev);
803 struct amdgpu_clock_voltage_dependency_table *table =
804 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
806 if (table && table->count) {
807 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
808 if (table->entries[i].clk == pi->boot_pl.sclk)
812 pi->graphics_boot_level = (u8)i;
813 kv_dpm_power_level_enable(adev, i, true);
815 struct sumo_sclk_voltage_mapping_table *table =
816 &pi->sys_info.sclk_voltage_mapping_table;
818 if (table->num_max_dpm_entries == 0)
821 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
822 if (table->entries[i].sclk_frequency == pi->boot_pl.sclk)
826 pi->graphics_boot_level = (u8)i;
827 kv_dpm_power_level_enable(adev, i, true);
832 static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev)
834 struct kv_power_info *pi = kv_get_pi(adev);
837 pi->graphics_therm_throttle_enable = 1;
839 ret = amdgpu_kv_copy_bytes_to_smc(adev,
840 pi->dpm_table_start +
841 offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable),
842 &pi->graphics_therm_throttle_enable,
843 sizeof(u8), pi->sram_end);
848 static int kv_upload_dpm_settings(struct amdgpu_device *adev)
850 struct kv_power_info *pi = kv_get_pi(adev);
853 ret = amdgpu_kv_copy_bytes_to_smc(adev,
854 pi->dpm_table_start +
855 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel),
856 (u8 *)&pi->graphics_level,
857 sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS,
863 ret = amdgpu_kv_copy_bytes_to_smc(adev,
864 pi->dpm_table_start +
865 offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount),
866 &pi->graphics_dpm_level_count,
867 sizeof(u8), pi->sram_end);
872 static u32 kv_get_clock_difference(u32 a, u32 b)
874 return (a >= b) ? a - b : b - a;
877 static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk)
879 struct kv_power_info *pi = kv_get_pi(adev);
882 if (pi->caps_enable_dfs_bypass) {
883 if (kv_get_clock_difference(clk, 40000) < 200)
885 else if (kv_get_clock_difference(clk, 30000) < 200)
887 else if (kv_get_clock_difference(clk, 20000) < 200)
889 else if (kv_get_clock_difference(clk, 15000) < 200)
891 else if (kv_get_clock_difference(clk, 10000) < 200)
902 static int kv_populate_uvd_table(struct amdgpu_device *adev)
904 struct kv_power_info *pi = kv_get_pi(adev);
905 struct amdgpu_uvd_clock_voltage_dependency_table *table =
906 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
907 struct atom_clock_dividers dividers;
911 if (table == NULL || table->count == 0)
914 pi->uvd_level_count = 0;
915 for (i = 0; i < table->count; i++) {
916 if (pi->high_voltage_t &&
917 (pi->high_voltage_t < table->entries[i].v))
920 pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk);
921 pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk);
922 pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v);
924 pi->uvd_level[i].VClkBypassCntl =
925 (u8)kv_get_clk_bypass(adev, table->entries[i].vclk);
926 pi->uvd_level[i].DClkBypassCntl =
927 (u8)kv_get_clk_bypass(adev, table->entries[i].dclk);
929 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
930 table->entries[i].vclk, false, ÷rs);
933 pi->uvd_level[i].VclkDivider = (u8)dividers.post_div;
935 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
936 table->entries[i].dclk, false, ÷rs);
939 pi->uvd_level[i].DclkDivider = (u8)dividers.post_div;
941 pi->uvd_level_count++;
944 ret = amdgpu_kv_copy_bytes_to_smc(adev,
945 pi->dpm_table_start +
946 offsetof(SMU7_Fusion_DpmTable, UvdLevelCount),
947 (u8 *)&pi->uvd_level_count,
948 sizeof(u8), pi->sram_end);
952 pi->uvd_interval = 1;
954 ret = amdgpu_kv_copy_bytes_to_smc(adev,
955 pi->dpm_table_start +
956 offsetof(SMU7_Fusion_DpmTable, UVDInterval),
958 sizeof(u8), pi->sram_end);
962 ret = amdgpu_kv_copy_bytes_to_smc(adev,
963 pi->dpm_table_start +
964 offsetof(SMU7_Fusion_DpmTable, UvdLevel),
965 (u8 *)&pi->uvd_level,
966 sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD,
973 static int kv_populate_vce_table(struct amdgpu_device *adev)
975 struct kv_power_info *pi = kv_get_pi(adev);
978 struct amdgpu_vce_clock_voltage_dependency_table *table =
979 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
980 struct atom_clock_dividers dividers;
982 if (table == NULL || table->count == 0)
985 pi->vce_level_count = 0;
986 for (i = 0; i < table->count; i++) {
987 if (pi->high_voltage_t &&
988 pi->high_voltage_t < table->entries[i].v)
991 pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk);
992 pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
994 pi->vce_level[i].ClkBypassCntl =
995 (u8)kv_get_clk_bypass(adev, table->entries[i].evclk);
997 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
998 table->entries[i].evclk, false, ÷rs);
1001 pi->vce_level[i].Divider = (u8)dividers.post_div;
1003 pi->vce_level_count++;
1006 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1007 pi->dpm_table_start +
1008 offsetof(SMU7_Fusion_DpmTable, VceLevelCount),
1009 (u8 *)&pi->vce_level_count,
1015 pi->vce_interval = 1;
1017 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1018 pi->dpm_table_start +
1019 offsetof(SMU7_Fusion_DpmTable, VCEInterval),
1020 (u8 *)&pi->vce_interval,
1026 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1027 pi->dpm_table_start +
1028 offsetof(SMU7_Fusion_DpmTable, VceLevel),
1029 (u8 *)&pi->vce_level,
1030 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE,
1036 static int kv_populate_samu_table(struct amdgpu_device *adev)
1038 struct kv_power_info *pi = kv_get_pi(adev);
1039 struct amdgpu_clock_voltage_dependency_table *table =
1040 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
1041 struct atom_clock_dividers dividers;
1045 if (table == NULL || table->count == 0)
1048 pi->samu_level_count = 0;
1049 for (i = 0; i < table->count; i++) {
1050 if (pi->high_voltage_t &&
1051 pi->high_voltage_t < table->entries[i].v)
1054 pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
1055 pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
1057 pi->samu_level[i].ClkBypassCntl =
1058 (u8)kv_get_clk_bypass(adev, table->entries[i].clk);
1060 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
1061 table->entries[i].clk, false, ÷rs);
1064 pi->samu_level[i].Divider = (u8)dividers.post_div;
1066 pi->samu_level_count++;
1069 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1070 pi->dpm_table_start +
1071 offsetof(SMU7_Fusion_DpmTable, SamuLevelCount),
1072 (u8 *)&pi->samu_level_count,
1078 pi->samu_interval = 1;
1080 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1081 pi->dpm_table_start +
1082 offsetof(SMU7_Fusion_DpmTable, SAMUInterval),
1083 (u8 *)&pi->samu_interval,
1089 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1090 pi->dpm_table_start +
1091 offsetof(SMU7_Fusion_DpmTable, SamuLevel),
1092 (u8 *)&pi->samu_level,
1093 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU,
1102 static int kv_populate_acp_table(struct amdgpu_device *adev)
1104 struct kv_power_info *pi = kv_get_pi(adev);
1105 struct amdgpu_clock_voltage_dependency_table *table =
1106 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1107 struct atom_clock_dividers dividers;
1111 if (table == NULL || table->count == 0)
1114 pi->acp_level_count = 0;
1115 for (i = 0; i < table->count; i++) {
1116 pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk);
1117 pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v);
1119 ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
1120 table->entries[i].clk, false, ÷rs);
1123 pi->acp_level[i].Divider = (u8)dividers.post_div;
1125 pi->acp_level_count++;
1128 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1129 pi->dpm_table_start +
1130 offsetof(SMU7_Fusion_DpmTable, AcpLevelCount),
1131 (u8 *)&pi->acp_level_count,
1137 pi->acp_interval = 1;
1139 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1140 pi->dpm_table_start +
1141 offsetof(SMU7_Fusion_DpmTable, ACPInterval),
1142 (u8 *)&pi->acp_interval,
1148 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1149 pi->dpm_table_start +
1150 offsetof(SMU7_Fusion_DpmTable, AcpLevel),
1151 (u8 *)&pi->acp_level,
1152 sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP,
1160 static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev)
1162 struct kv_power_info *pi = kv_get_pi(adev);
1164 struct amdgpu_clock_voltage_dependency_table *table =
1165 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1167 if (table && table->count) {
1168 for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1169 if (pi->caps_enable_dfs_bypass) {
1170 if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200)
1171 pi->graphics_level[i].ClkBypassCntl = 3;
1172 else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200)
1173 pi->graphics_level[i].ClkBypassCntl = 2;
1174 else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200)
1175 pi->graphics_level[i].ClkBypassCntl = 7;
1176 else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200)
1177 pi->graphics_level[i].ClkBypassCntl = 6;
1178 else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200)
1179 pi->graphics_level[i].ClkBypassCntl = 8;
1181 pi->graphics_level[i].ClkBypassCntl = 0;
1183 pi->graphics_level[i].ClkBypassCntl = 0;
1187 struct sumo_sclk_voltage_mapping_table *table =
1188 &pi->sys_info.sclk_voltage_mapping_table;
1189 for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1190 if (pi->caps_enable_dfs_bypass) {
1191 if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200)
1192 pi->graphics_level[i].ClkBypassCntl = 3;
1193 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200)
1194 pi->graphics_level[i].ClkBypassCntl = 2;
1195 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200)
1196 pi->graphics_level[i].ClkBypassCntl = 7;
1197 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200)
1198 pi->graphics_level[i].ClkBypassCntl = 6;
1199 else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200)
1200 pi->graphics_level[i].ClkBypassCntl = 8;
1202 pi->graphics_level[i].ClkBypassCntl = 0;
1204 pi->graphics_level[i].ClkBypassCntl = 0;
1210 static int kv_enable_ulv(struct amdgpu_device *adev, bool enable)
1212 return amdgpu_kv_notify_message_to_smu(adev, enable ?
1213 PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV);
1216 static void kv_reset_acp_boot_level(struct amdgpu_device *adev)
1218 struct kv_power_info *pi = kv_get_pi(adev);
1220 pi->acp_boot_level = 0xff;
1223 static void kv_update_current_ps(struct amdgpu_device *adev,
1224 struct amdgpu_ps *rps)
1226 struct kv_ps *new_ps = kv_get_ps(rps);
1227 struct kv_power_info *pi = kv_get_pi(adev);
1229 pi->current_rps = *rps;
1230 pi->current_ps = *new_ps;
1231 pi->current_rps.ps_priv = &pi->current_ps;
1232 adev->pm.dpm.current_ps = &pi->current_rps;
1235 static void kv_update_requested_ps(struct amdgpu_device *adev,
1236 struct amdgpu_ps *rps)
1238 struct kv_ps *new_ps = kv_get_ps(rps);
1239 struct kv_power_info *pi = kv_get_pi(adev);
1241 pi->requested_rps = *rps;
1242 pi->requested_ps = *new_ps;
1243 pi->requested_rps.ps_priv = &pi->requested_ps;
1244 adev->pm.dpm.requested_ps = &pi->requested_rps;
1247 static void kv_dpm_enable_bapm(void *handle, bool enable)
1249 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1250 struct kv_power_info *pi = kv_get_pi(adev);
1253 if (pi->bapm_enable) {
1254 ret = amdgpu_kv_smc_bapm_enable(adev, enable);
1256 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
1260 static int kv_dpm_enable(struct amdgpu_device *adev)
1262 struct kv_power_info *pi = kv_get_pi(adev);
1265 ret = kv_process_firmware_header(adev);
1267 DRM_ERROR("kv_process_firmware_header failed\n");
1270 kv_init_fps_limits(adev);
1271 kv_init_graphics_levels(adev);
1272 ret = kv_program_bootup_state(adev);
1274 DRM_ERROR("kv_program_bootup_state failed\n");
1277 kv_calculate_dfs_bypass_settings(adev);
1278 ret = kv_upload_dpm_settings(adev);
1280 DRM_ERROR("kv_upload_dpm_settings failed\n");
1283 ret = kv_populate_uvd_table(adev);
1285 DRM_ERROR("kv_populate_uvd_table failed\n");
1288 ret = kv_populate_vce_table(adev);
1290 DRM_ERROR("kv_populate_vce_table failed\n");
1293 ret = kv_populate_samu_table(adev);
1295 DRM_ERROR("kv_populate_samu_table failed\n");
1298 ret = kv_populate_acp_table(adev);
1300 DRM_ERROR("kv_populate_acp_table failed\n");
1303 kv_program_vc(adev);
1305 kv_initialize_hardware_cac_manager(adev);
1308 if (pi->enable_auto_thermal_throttling) {
1309 ret = kv_enable_auto_thermal_throttling(adev);
1311 DRM_ERROR("kv_enable_auto_thermal_throttling failed\n");
1315 ret = kv_enable_dpm_voltage_scaling(adev);
1317 DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n");
1320 ret = kv_set_dpm_interval(adev);
1322 DRM_ERROR("kv_set_dpm_interval failed\n");
1325 ret = kv_set_dpm_boot_state(adev);
1327 DRM_ERROR("kv_set_dpm_boot_state failed\n");
1330 ret = kv_enable_ulv(adev, true);
1332 DRM_ERROR("kv_enable_ulv failed\n");
1336 ret = kv_enable_didt(adev, true);
1338 DRM_ERROR("kv_enable_didt failed\n");
1341 ret = kv_enable_smc_cac(adev, true);
1343 DRM_ERROR("kv_enable_smc_cac failed\n");
1347 kv_reset_acp_boot_level(adev);
1349 ret = amdgpu_kv_smc_bapm_enable(adev, false);
1351 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
1355 if (adev->irq.installed &&
1356 amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
1357 ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
1359 DRM_ERROR("kv_set_thermal_temperature_range failed\n");
1362 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
1363 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
1364 amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq,
1365 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
1371 static void kv_dpm_disable(struct amdgpu_device *adev)
1373 struct kv_power_info *pi = kv_get_pi(adev);
1375 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
1376 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
1377 amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
1378 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
1380 amdgpu_kv_smc_bapm_enable(adev, false);
1382 if (adev->asic_type == CHIP_MULLINS)
1383 kv_enable_nb_dpm(adev, false);
1385 /* powerup blocks */
1386 kv_dpm_powergate_acp(adev, false);
1387 kv_dpm_powergate_samu(adev, false);
1388 if (pi->caps_vce_pg) /* power on the VCE block */
1389 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
1390 if (pi->caps_uvd_pg) /* power on the UVD block */
1391 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
1393 kv_enable_smc_cac(adev, false);
1394 kv_enable_didt(adev, false);
1397 kv_enable_ulv(adev, false);
1400 kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
1404 static int kv_write_smc_soft_register(struct amdgpu_device *adev,
1405 u16 reg_offset, u32 value)
1407 struct kv_power_info *pi = kv_get_pi(adev);
1409 return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset,
1410 (u8 *)&value, sizeof(u16), pi->sram_end);
1413 static int kv_read_smc_soft_register(struct amdgpu_device *adev,
1414 u16 reg_offset, u32 *value)
1416 struct kv_power_info *pi = kv_get_pi(adev);
1418 return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset,
1419 value, pi->sram_end);
1423 static void kv_init_sclk_t(struct amdgpu_device *adev)
1425 struct kv_power_info *pi = kv_get_pi(adev);
1427 pi->low_sclk_interrupt_t = 0;
1430 static int kv_init_fps_limits(struct amdgpu_device *adev)
1432 struct kv_power_info *pi = kv_get_pi(adev);
1439 pi->fps_high_t = cpu_to_be16(tmp);
1440 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1441 pi->dpm_table_start +
1442 offsetof(SMU7_Fusion_DpmTable, FpsHighT),
1443 (u8 *)&pi->fps_high_t,
1444 sizeof(u16), pi->sram_end);
1447 pi->fps_low_t = cpu_to_be16(tmp);
1449 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1450 pi->dpm_table_start +
1451 offsetof(SMU7_Fusion_DpmTable, FpsLowT),
1452 (u8 *)&pi->fps_low_t,
1453 sizeof(u16), pi->sram_end);
1459 static void kv_init_powergate_state(struct amdgpu_device *adev)
1461 struct kv_power_info *pi = kv_get_pi(adev);
1463 pi->uvd_power_gated = false;
1464 pi->vce_power_gated = false;
1465 pi->samu_power_gated = false;
1466 pi->acp_power_gated = false;
1470 static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
1472 return amdgpu_kv_notify_message_to_smu(adev, enable ?
1473 PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable);
1476 static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
1478 return amdgpu_kv_notify_message_to_smu(adev, enable ?
1479 PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable);
1482 static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
1484 return amdgpu_kv_notify_message_to_smu(adev, enable ?
1485 PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable);
1488 static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
1490 return amdgpu_kv_notify_message_to_smu(adev, enable ?
1491 PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable);
1494 static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
1496 struct kv_power_info *pi = kv_get_pi(adev);
1497 struct amdgpu_uvd_clock_voltage_dependency_table *table =
1498 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
1504 pi->uvd_boot_level = table->count - 1;
1506 pi->uvd_boot_level = 0;
1508 if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) {
1509 mask = 1 << pi->uvd_boot_level;
1514 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1515 pi->dpm_table_start +
1516 offsetof(SMU7_Fusion_DpmTable, UvdBootLevel),
1517 (uint8_t *)&pi->uvd_boot_level,
1518 sizeof(u8), pi->sram_end);
1522 amdgpu_kv_send_msg_to_smc_with_parameter(adev,
1523 PPSMC_MSG_UVDDPM_SetEnabledMask,
1527 return kv_enable_uvd_dpm(adev, !gate);
1530 static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk)
1533 struct amdgpu_vce_clock_voltage_dependency_table *table =
1534 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1536 for (i = 0; i < table->count; i++) {
1537 if (table->entries[i].evclk >= evclk)
1544 static int kv_update_vce_dpm(struct amdgpu_device *adev,
1545 struct amdgpu_ps *amdgpu_new_state,
1546 struct amdgpu_ps *amdgpu_current_state)
1548 struct kv_power_info *pi = kv_get_pi(adev);
1549 struct amdgpu_vce_clock_voltage_dependency_table *table =
1550 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
1553 if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
1554 if (pi->caps_stable_p_state)
1555 pi->vce_boot_level = table->count - 1;
1557 pi->vce_boot_level = kv_get_vce_boot_level(adev, amdgpu_new_state->evclk);
1559 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1560 pi->dpm_table_start +
1561 offsetof(SMU7_Fusion_DpmTable, VceBootLevel),
1562 (u8 *)&pi->vce_boot_level,
1568 if (pi->caps_stable_p_state)
1569 amdgpu_kv_send_msg_to_smc_with_parameter(adev,
1570 PPSMC_MSG_VCEDPM_SetEnabledMask,
1571 (1 << pi->vce_boot_level));
1572 kv_enable_vce_dpm(adev, true);
1573 } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
1574 kv_enable_vce_dpm(adev, false);
1580 static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate)
1582 struct kv_power_info *pi = kv_get_pi(adev);
1583 struct amdgpu_clock_voltage_dependency_table *table =
1584 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
1588 if (pi->caps_stable_p_state)
1589 pi->samu_boot_level = table->count - 1;
1591 pi->samu_boot_level = 0;
1593 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1594 pi->dpm_table_start +
1595 offsetof(SMU7_Fusion_DpmTable, SamuBootLevel),
1596 (u8 *)&pi->samu_boot_level,
1602 if (pi->caps_stable_p_state)
1603 amdgpu_kv_send_msg_to_smc_with_parameter(adev,
1604 PPSMC_MSG_SAMUDPM_SetEnabledMask,
1605 (1 << pi->samu_boot_level));
1608 return kv_enable_samu_dpm(adev, !gate);
1611 static u8 kv_get_acp_boot_level(struct amdgpu_device *adev)
1616 static void kv_update_acp_boot_level(struct amdgpu_device *adev)
1618 struct kv_power_info *pi = kv_get_pi(adev);
1621 if (!pi->caps_stable_p_state) {
1622 acp_boot_level = kv_get_acp_boot_level(adev);
1623 if (acp_boot_level != pi->acp_boot_level) {
1624 pi->acp_boot_level = acp_boot_level;
1625 amdgpu_kv_send_msg_to_smc_with_parameter(adev,
1626 PPSMC_MSG_ACPDPM_SetEnabledMask,
1627 (1 << pi->acp_boot_level));
1632 static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate)
1634 struct kv_power_info *pi = kv_get_pi(adev);
1635 struct amdgpu_clock_voltage_dependency_table *table =
1636 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
1640 if (pi->caps_stable_p_state)
1641 pi->acp_boot_level = table->count - 1;
1643 pi->acp_boot_level = kv_get_acp_boot_level(adev);
1645 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1646 pi->dpm_table_start +
1647 offsetof(SMU7_Fusion_DpmTable, AcpBootLevel),
1648 (u8 *)&pi->acp_boot_level,
1654 if (pi->caps_stable_p_state)
1655 amdgpu_kv_send_msg_to_smc_with_parameter(adev,
1656 PPSMC_MSG_ACPDPM_SetEnabledMask,
1657 (1 << pi->acp_boot_level));
1660 return kv_enable_acp_dpm(adev, !gate);
1663 static void kv_dpm_powergate_uvd(void *handle, bool gate)
1665 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1666 struct kv_power_info *pi = kv_get_pi(adev);
1669 pi->uvd_power_gated = gate;
1672 /* stop the UVD block */
1673 ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1675 kv_update_uvd_dpm(adev, gate);
1676 if (pi->caps_uvd_pg)
1677 /* power off the UVD block */
1678 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF);
1680 if (pi->caps_uvd_pg)
1681 /* power on the UVD block */
1682 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
1683 /* re-init the UVD block */
1684 kv_update_uvd_dpm(adev, gate);
1686 ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD,
1687 AMD_PG_STATE_UNGATE);
1691 static void kv_dpm_powergate_vce(void *handle, bool gate)
1693 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1694 struct kv_power_info *pi = kv_get_pi(adev);
1697 pi->vce_power_gated = gate;
1700 /* stop the VCE block */
1701 ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
1703 kv_enable_vce_dpm(adev, false);
1704 if (pi->caps_vce_pg) /* power off the VCE block */
1705 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
1707 if (pi->caps_vce_pg) /* power on the VCE block */
1708 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
1709 kv_enable_vce_dpm(adev, true);
1710 /* re-init the VCE block */
1711 ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
1712 AMD_PG_STATE_UNGATE);
1717 static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
1719 struct kv_power_info *pi = kv_get_pi(adev);
1721 if (pi->samu_power_gated == gate)
1724 pi->samu_power_gated = gate;
1727 kv_update_samu_dpm(adev, true);
1728 if (pi->caps_samu_pg)
1729 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF);
1731 if (pi->caps_samu_pg)
1732 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON);
1733 kv_update_samu_dpm(adev, false);
1737 static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate)
1739 struct kv_power_info *pi = kv_get_pi(adev);
1741 if (pi->acp_power_gated == gate)
1744 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
1747 pi->acp_power_gated = gate;
1750 kv_update_acp_dpm(adev, true);
1751 if (pi->caps_acp_pg)
1752 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF);
1754 if (pi->caps_acp_pg)
1755 amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON);
1756 kv_update_acp_dpm(adev, false);
1760 static void kv_set_valid_clock_range(struct amdgpu_device *adev,
1761 struct amdgpu_ps *new_rps)
1763 struct kv_ps *new_ps = kv_get_ps(new_rps);
1764 struct kv_power_info *pi = kv_get_pi(adev);
1766 struct amdgpu_clock_voltage_dependency_table *table =
1767 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
1769 if (table && table->count) {
1770 for (i = 0; i < pi->graphics_dpm_level_count; i++) {
1771 if ((table->entries[i].clk >= new_ps->levels[0].sclk) ||
1772 (i == (pi->graphics_dpm_level_count - 1))) {
1773 pi->lowest_valid = i;
1778 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
1779 if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk)
1782 pi->highest_valid = i;
1784 if (pi->lowest_valid > pi->highest_valid) {
1785 if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) >
1786 (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk))
1787 pi->highest_valid = pi->lowest_valid;
1789 pi->lowest_valid = pi->highest_valid;
1792 struct sumo_sclk_voltage_mapping_table *table =
1793 &pi->sys_info.sclk_voltage_mapping_table;
1795 for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) {
1796 if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk ||
1797 i == (int)(pi->graphics_dpm_level_count - 1)) {
1798 pi->lowest_valid = i;
1803 for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) {
1804 if (table->entries[i].sclk_frequency <=
1805 new_ps->levels[new_ps->num_levels - 1].sclk)
1808 pi->highest_valid = i;
1810 if (pi->lowest_valid > pi->highest_valid) {
1811 if ((new_ps->levels[0].sclk -
1812 table->entries[pi->highest_valid].sclk_frequency) >
1813 (table->entries[pi->lowest_valid].sclk_frequency -
1814 new_ps->levels[new_ps->num_levels -1].sclk))
1815 pi->highest_valid = pi->lowest_valid;
1817 pi->lowest_valid = pi->highest_valid;
1822 static int kv_update_dfs_bypass_settings(struct amdgpu_device *adev,
1823 struct amdgpu_ps *new_rps)
1825 struct kv_ps *new_ps = kv_get_ps(new_rps);
1826 struct kv_power_info *pi = kv_get_pi(adev);
1830 if (pi->caps_enable_dfs_bypass) {
1831 clk_bypass_cntl = new_ps->need_dfs_bypass ?
1832 pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0;
1833 ret = amdgpu_kv_copy_bytes_to_smc(adev,
1834 (pi->dpm_table_start +
1835 offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) +
1836 (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) +
1837 offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)),
1839 sizeof(u8), pi->sram_end);
1845 static int kv_enable_nb_dpm(struct amdgpu_device *adev,
1848 struct kv_power_info *pi = kv_get_pi(adev);
1852 if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) {
1853 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Enable);
1855 pi->nb_dpm_enabled = true;
1858 if (pi->enable_nb_dpm && pi->nb_dpm_enabled) {
1859 ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Disable);
1861 pi->nb_dpm_enabled = false;
1868 static int kv_dpm_force_performance_level(void *handle,
1869 enum amd_dpm_forced_level level)
1872 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1874 if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
1875 ret = kv_force_dpm_highest(adev);
1878 } else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
1879 ret = kv_force_dpm_lowest(adev);
1882 } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
1883 ret = kv_unforce_levels(adev);
1888 adev->pm.dpm.forced_level = level;
1893 static int kv_dpm_pre_set_power_state(void *handle)
1895 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1896 struct kv_power_info *pi = kv_get_pi(adev);
1897 struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
1898 struct amdgpu_ps *new_ps = &requested_ps;
1900 kv_update_requested_ps(adev, new_ps);
1902 kv_apply_state_adjust_rules(adev,
1909 static int kv_dpm_set_power_state(void *handle)
1911 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1912 struct kv_power_info *pi = kv_get_pi(adev);
1913 struct amdgpu_ps *new_ps = &pi->requested_rps;
1914 struct amdgpu_ps *old_ps = &pi->current_rps;
1917 if (pi->bapm_enable) {
1918 ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power);
1920 DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n");
1925 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
1926 if (pi->enable_dpm) {
1927 kv_set_valid_clock_range(adev, new_ps);
1928 kv_update_dfs_bypass_settings(adev, new_ps);
1929 ret = kv_calculate_ds_divider(adev);
1931 DRM_ERROR("kv_calculate_ds_divider failed\n");
1934 kv_calculate_nbps_level_settings(adev);
1935 kv_calculate_dpm_settings(adev);
1936 kv_force_lowest_valid(adev);
1937 kv_enable_new_levels(adev);
1938 kv_upload_dpm_settings(adev);
1939 kv_program_nbps_index_settings(adev, new_ps);
1940 kv_unforce_levels(adev);
1941 kv_set_enabled_levels(adev);
1942 kv_force_lowest_valid(adev);
1943 kv_unforce_levels(adev);
1945 ret = kv_update_vce_dpm(adev, new_ps, old_ps);
1947 DRM_ERROR("kv_update_vce_dpm failed\n");
1950 kv_update_sclk_t(adev);
1951 if (adev->asic_type == CHIP_MULLINS)
1952 kv_enable_nb_dpm(adev, true);
1955 if (pi->enable_dpm) {
1956 kv_set_valid_clock_range(adev, new_ps);
1957 kv_update_dfs_bypass_settings(adev, new_ps);
1958 ret = kv_calculate_ds_divider(adev);
1960 DRM_ERROR("kv_calculate_ds_divider failed\n");
1963 kv_calculate_nbps_level_settings(adev);
1964 kv_calculate_dpm_settings(adev);
1965 kv_freeze_sclk_dpm(adev, true);
1966 kv_upload_dpm_settings(adev);
1967 kv_program_nbps_index_settings(adev, new_ps);
1968 kv_freeze_sclk_dpm(adev, false);
1969 kv_set_enabled_levels(adev);
1970 ret = kv_update_vce_dpm(adev, new_ps, old_ps);
1972 DRM_ERROR("kv_update_vce_dpm failed\n");
1975 kv_update_acp_boot_level(adev);
1976 kv_update_sclk_t(adev);
1977 kv_enable_nb_dpm(adev, true);
1984 static void kv_dpm_post_set_power_state(void *handle)
1986 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
1987 struct kv_power_info *pi = kv_get_pi(adev);
1988 struct amdgpu_ps *new_ps = &pi->requested_rps;
1990 kv_update_current_ps(adev, new_ps);
1993 static void kv_dpm_setup_asic(struct amdgpu_device *adev)
1995 sumo_take_smu_control(adev, true);
1996 kv_init_powergate_state(adev);
1997 kv_init_sclk_t(adev);
2001 static void kv_dpm_reset_asic(struct amdgpu_device *adev)
2003 struct kv_power_info *pi = kv_get_pi(adev);
2005 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
2006 kv_force_lowest_valid(adev);
2007 kv_init_graphics_levels(adev);
2008 kv_program_bootup_state(adev);
2009 kv_upload_dpm_settings(adev);
2010 kv_force_lowest_valid(adev);
2011 kv_unforce_levels(adev);
2013 kv_init_graphics_levels(adev);
2014 kv_program_bootup_state(adev);
2015 kv_freeze_sclk_dpm(adev, true);
2016 kv_upload_dpm_settings(adev);
2017 kv_freeze_sclk_dpm(adev, false);
2018 kv_set_enabled_level(adev, pi->graphics_boot_level);
2023 static void kv_construct_max_power_limits_table(struct amdgpu_device *adev,
2024 struct amdgpu_clock_and_voltage_limits *table)
2026 struct kv_power_info *pi = kv_get_pi(adev);
2028 if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) {
2029 int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1;
2031 pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency;
2033 kv_convert_2bit_index_to_voltage(adev,
2034 pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit);
2037 table->mclk = pi->sys_info.nbp_memory_clock[0];
2040 static void kv_patch_voltage_values(struct amdgpu_device *adev)
2043 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
2044 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
2045 struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
2046 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
2047 struct amdgpu_clock_voltage_dependency_table *samu_table =
2048 &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table;
2049 struct amdgpu_clock_voltage_dependency_table *acp_table =
2050 &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table;
2052 if (uvd_table->count) {
2053 for (i = 0; i < uvd_table->count; i++)
2054 uvd_table->entries[i].v =
2055 kv_convert_8bit_index_to_voltage(adev,
2056 uvd_table->entries[i].v);
2059 if (vce_table->count) {
2060 for (i = 0; i < vce_table->count; i++)
2061 vce_table->entries[i].v =
2062 kv_convert_8bit_index_to_voltage(adev,
2063 vce_table->entries[i].v);
2066 if (samu_table->count) {
2067 for (i = 0; i < samu_table->count; i++)
2068 samu_table->entries[i].v =
2069 kv_convert_8bit_index_to_voltage(adev,
2070 samu_table->entries[i].v);
2073 if (acp_table->count) {
2074 for (i = 0; i < acp_table->count; i++)
2075 acp_table->entries[i].v =
2076 kv_convert_8bit_index_to_voltage(adev,
2077 acp_table->entries[i].v);
2082 static void kv_construct_boot_state(struct amdgpu_device *adev)
2084 struct kv_power_info *pi = kv_get_pi(adev);
2086 pi->boot_pl.sclk = pi->sys_info.bootup_sclk;
2087 pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index;
2088 pi->boot_pl.ds_divider_index = 0;
2089 pi->boot_pl.ss_divider_index = 0;
2090 pi->boot_pl.allow_gnb_slow = 1;
2091 pi->boot_pl.force_nbp_state = 0;
2092 pi->boot_pl.display_wm = 0;
2093 pi->boot_pl.vce_wm = 0;
2096 static int kv_force_dpm_highest(struct amdgpu_device *adev)
2101 ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask);
2105 for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) {
2106 if (enable_mask & (1 << i))
2110 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
2111 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i);
2113 return kv_set_enabled_level(adev, i);
2116 static int kv_force_dpm_lowest(struct amdgpu_device *adev)
2121 ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask);
2125 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
2126 if (enable_mask & (1 << i))
2130 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
2131 return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i);
2133 return kv_set_enabled_level(adev, i);
2136 static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev,
2137 u32 sclk, u32 min_sclk_in_sr)
2139 struct kv_power_info *pi = kv_get_pi(adev);
2142 u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK);
2147 if (!pi->caps_sclk_ds)
2150 for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) {
2159 static int kv_get_high_voltage_limit(struct amdgpu_device *adev, int *limit)
2161 struct kv_power_info *pi = kv_get_pi(adev);
2162 struct amdgpu_clock_voltage_dependency_table *table =
2163 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2166 if (table && table->count) {
2167 for (i = table->count - 1; i >= 0; i--) {
2168 if (pi->high_voltage_t &&
2169 (kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <=
2170 pi->high_voltage_t)) {
2176 struct sumo_sclk_voltage_mapping_table *table =
2177 &pi->sys_info.sclk_voltage_mapping_table;
2179 for (i = table->num_max_dpm_entries - 1; i >= 0; i--) {
2180 if (pi->high_voltage_t &&
2181 (kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <=
2182 pi->high_voltage_t)) {
2193 static void kv_apply_state_adjust_rules(struct amdgpu_device *adev,
2194 struct amdgpu_ps *new_rps,
2195 struct amdgpu_ps *old_rps)
2197 struct kv_ps *ps = kv_get_ps(new_rps);
2198 struct kv_power_info *pi = kv_get_pi(adev);
2199 u32 min_sclk = 10000; /* ??? */
2203 struct amdgpu_clock_voltage_dependency_table *table =
2204 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2205 u32 stable_p_state_sclk = 0;
2206 struct amdgpu_clock_and_voltage_limits *max_limits =
2207 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
2209 if (new_rps->vce_active) {
2210 new_rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
2211 new_rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
2217 mclk = max_limits->mclk;
2220 if (pi->caps_stable_p_state) {
2221 stable_p_state_sclk = (max_limits->sclk * 75) / 100;
2223 for (i = table->count - 1; i >= 0; i--) {
2224 if (stable_p_state_sclk >= table->entries[i].clk) {
2225 stable_p_state_sclk = table->entries[i].clk;
2231 stable_p_state_sclk = table->entries[0].clk;
2233 sclk = stable_p_state_sclk;
2236 if (new_rps->vce_active) {
2237 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
2238 sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
2241 ps->need_dfs_bypass = true;
2243 for (i = 0; i < ps->num_levels; i++) {
2244 if (ps->levels[i].sclk < sclk)
2245 ps->levels[i].sclk = sclk;
2248 if (table && table->count) {
2249 for (i = 0; i < ps->num_levels; i++) {
2250 if (pi->high_voltage_t &&
2251 (pi->high_voltage_t <
2252 kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) {
2253 kv_get_high_voltage_limit(adev, &limit);
2254 ps->levels[i].sclk = table->entries[limit].clk;
2258 struct sumo_sclk_voltage_mapping_table *table =
2259 &pi->sys_info.sclk_voltage_mapping_table;
2261 for (i = 0; i < ps->num_levels; i++) {
2262 if (pi->high_voltage_t &&
2263 (pi->high_voltage_t <
2264 kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) {
2265 kv_get_high_voltage_limit(adev, &limit);
2266 ps->levels[i].sclk = table->entries[limit].sclk_frequency;
2271 if (pi->caps_stable_p_state) {
2272 for (i = 0; i < ps->num_levels; i++) {
2273 ps->levels[i].sclk = stable_p_state_sclk;
2277 pi->video_start = new_rps->dclk || new_rps->vclk ||
2278 new_rps->evclk || new_rps->ecclk;
2280 if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
2281 ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
2282 pi->battery_state = true;
2284 pi->battery_state = false;
2286 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
2287 ps->dpm0_pg_nb_ps_lo = 0x1;
2288 ps->dpm0_pg_nb_ps_hi = 0x0;
2289 ps->dpmx_nb_ps_lo = 0x1;
2290 ps->dpmx_nb_ps_hi = 0x0;
2292 ps->dpm0_pg_nb_ps_lo = 0x3;
2293 ps->dpm0_pg_nb_ps_hi = 0x0;
2294 ps->dpmx_nb_ps_lo = 0x3;
2295 ps->dpmx_nb_ps_hi = 0x0;
2297 if (pi->sys_info.nb_dpm_enable) {
2298 force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) ||
2299 pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) ||
2300 pi->disable_nb_ps3_in_battery;
2301 ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3;
2302 ps->dpm0_pg_nb_ps_hi = 0x2;
2303 ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3;
2304 ps->dpmx_nb_ps_hi = 0x2;
2309 static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device *adev,
2310 u32 index, bool enable)
2312 struct kv_power_info *pi = kv_get_pi(adev);
2314 pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0;
2317 static int kv_calculate_ds_divider(struct amdgpu_device *adev)
2319 struct kv_power_info *pi = kv_get_pi(adev);
2320 u32 sclk_in_sr = 10000; /* ??? */
2323 if (pi->lowest_valid > pi->highest_valid)
2326 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2327 pi->graphics_level[i].DeepSleepDivId =
2328 kv_get_sleep_divider_id_from_clock(adev,
2329 be32_to_cpu(pi->graphics_level[i].SclkFrequency),
2335 static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev)
2337 struct kv_power_info *pi = kv_get_pi(adev);
2340 struct amdgpu_clock_and_voltage_limits *max_limits =
2341 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
2342 u32 mclk = max_limits->mclk;
2344 if (pi->lowest_valid > pi->highest_valid)
2347 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) {
2348 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2349 pi->graphics_level[i].GnbSlow = 1;
2350 pi->graphics_level[i].ForceNbPs1 = 0;
2351 pi->graphics_level[i].UpH = 0;
2354 if (!pi->sys_info.nb_dpm_enable)
2357 force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) ||
2358 (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start);
2361 for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2362 pi->graphics_level[i].GnbSlow = 0;
2364 if (pi->battery_state)
2365 pi->graphics_level[0].ForceNbPs1 = 1;
2367 pi->graphics_level[1].GnbSlow = 0;
2368 pi->graphics_level[2].GnbSlow = 0;
2369 pi->graphics_level[3].GnbSlow = 0;
2370 pi->graphics_level[4].GnbSlow = 0;
2373 for (i = pi->lowest_valid; i <= pi->highest_valid; i++) {
2374 pi->graphics_level[i].GnbSlow = 1;
2375 pi->graphics_level[i].ForceNbPs1 = 0;
2376 pi->graphics_level[i].UpH = 0;
2379 if (pi->sys_info.nb_dpm_enable && pi->battery_state) {
2380 pi->graphics_level[pi->lowest_valid].UpH = 0x28;
2381 pi->graphics_level[pi->lowest_valid].GnbSlow = 0;
2382 if (pi->lowest_valid != pi->highest_valid)
2383 pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1;
2389 static int kv_calculate_dpm_settings(struct amdgpu_device *adev)
2391 struct kv_power_info *pi = kv_get_pi(adev);
2394 if (pi->lowest_valid > pi->highest_valid)
2397 for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2398 pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0;
2403 static void kv_init_graphics_levels(struct amdgpu_device *adev)
2405 struct kv_power_info *pi = kv_get_pi(adev);
2407 struct amdgpu_clock_voltage_dependency_table *table =
2408 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
2410 if (table && table->count) {
2413 pi->graphics_dpm_level_count = 0;
2414 for (i = 0; i < table->count; i++) {
2415 if (pi->high_voltage_t &&
2416 (pi->high_voltage_t <
2417 kv_convert_8bit_index_to_voltage(adev, table->entries[i].v)))
2420 kv_set_divider_value(adev, i, table->entries[i].clk);
2421 vid_2bit = kv_convert_vid7_to_vid2(adev,
2422 &pi->sys_info.vid_mapping_table,
2423 table->entries[i].v);
2424 kv_set_vid(adev, i, vid_2bit);
2425 kv_set_at(adev, i, pi->at[i]);
2426 kv_dpm_power_level_enabled_for_throttle(adev, i, true);
2427 pi->graphics_dpm_level_count++;
2430 struct sumo_sclk_voltage_mapping_table *table =
2431 &pi->sys_info.sclk_voltage_mapping_table;
2433 pi->graphics_dpm_level_count = 0;
2434 for (i = 0; i < table->num_max_dpm_entries; i++) {
2435 if (pi->high_voltage_t &&
2436 pi->high_voltage_t <
2437 kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit))
2440 kv_set_divider_value(adev, i, table->entries[i].sclk_frequency);
2441 kv_set_vid(adev, i, table->entries[i].vid_2bit);
2442 kv_set_at(adev, i, pi->at[i]);
2443 kv_dpm_power_level_enabled_for_throttle(adev, i, true);
2444 pi->graphics_dpm_level_count++;
2448 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++)
2449 kv_dpm_power_level_enable(adev, i, false);
2452 static void kv_enable_new_levels(struct amdgpu_device *adev)
2454 struct kv_power_info *pi = kv_get_pi(adev);
2457 for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) {
2458 if (i >= pi->lowest_valid && i <= pi->highest_valid)
2459 kv_dpm_power_level_enable(adev, i, true);
2463 static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level)
2465 u32 new_mask = (1 << level);
2467 return amdgpu_kv_send_msg_to_smc_with_parameter(adev,
2468 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2472 static int kv_set_enabled_levels(struct amdgpu_device *adev)
2474 struct kv_power_info *pi = kv_get_pi(adev);
2475 u32 i, new_mask = 0;
2477 for (i = pi->lowest_valid; i <= pi->highest_valid; i++)
2478 new_mask |= (1 << i);
2480 return amdgpu_kv_send_msg_to_smc_with_parameter(adev,
2481 PPSMC_MSG_SCLKDPM_SetEnabledMask,
2485 static void kv_program_nbps_index_settings(struct amdgpu_device *adev,
2486 struct amdgpu_ps *new_rps)
2488 struct kv_ps *new_ps = kv_get_ps(new_rps);
2489 struct kv_power_info *pi = kv_get_pi(adev);
2492 if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS)
2495 if (pi->sys_info.nb_dpm_enable) {
2496 nbdpmconfig1 = RREG32_SMC(ixNB_DPM_CONFIG_1);
2497 nbdpmconfig1 &= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK |
2498 NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK |
2499 NB_DPM_CONFIG_1__DpmXNbPsLo_MASK |
2500 NB_DPM_CONFIG_1__DpmXNbPsHi_MASK);
2501 nbdpmconfig1 |= (new_ps->dpm0_pg_nb_ps_lo << NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT) |
2502 (new_ps->dpm0_pg_nb_ps_hi << NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT) |
2503 (new_ps->dpmx_nb_ps_lo << NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT) |
2504 (new_ps->dpmx_nb_ps_hi << NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT);
2505 WREG32_SMC(ixNB_DPM_CONFIG_1, nbdpmconfig1);
2509 static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
2510 int min_temp, int max_temp)
2512 int low_temp = 0 * 1000;
2513 int high_temp = 255 * 1000;
2516 if (low_temp < min_temp)
2517 low_temp = min_temp;
2518 if (high_temp > max_temp)
2519 high_temp = max_temp;
2520 if (high_temp < low_temp) {
2521 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
2525 tmp = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
2526 tmp &= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK |
2527 CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK);
2528 tmp |= ((49 + (high_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT) |
2529 ((49 + (low_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT);
2530 WREG32_SMC(ixCG_THERMAL_INT_CTRL, tmp);
2532 adev->pm.dpm.thermal.min_temp = low_temp;
2533 adev->pm.dpm.thermal.max_temp = high_temp;
2539 struct _ATOM_INTEGRATED_SYSTEM_INFO info;
2540 struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
2541 struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5;
2542 struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6;
2543 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7;
2544 struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8;
2547 static int kv_parse_sys_info_table(struct amdgpu_device *adev)
2549 struct kv_power_info *pi = kv_get_pi(adev);
2550 struct amdgpu_mode_info *mode_info = &adev->mode_info;
2551 int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
2552 union igp_info *igp_info;
2557 if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
2558 &frev, &crev, &data_offset)) {
2559 igp_info = (union igp_info *)(mode_info->atom_context->bios +
2563 DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
2566 pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock);
2567 pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock);
2568 pi->sys_info.bootup_nb_voltage_index =
2569 le16_to_cpu(igp_info->info_8.usBootUpNBVoltage);
2570 if (igp_info->info_8.ucHtcTmpLmt == 0)
2571 pi->sys_info.htc_tmp_lmt = 203;
2573 pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt;
2574 if (igp_info->info_8.ucHtcHystLmt == 0)
2575 pi->sys_info.htc_hyst_lmt = 5;
2577 pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt;
2578 if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) {
2579 DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n");
2582 if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3))
2583 pi->sys_info.nb_dpm_enable = true;
2585 pi->sys_info.nb_dpm_enable = false;
2587 for (i = 0; i < KV_NUM_NBPSTATES; i++) {
2588 pi->sys_info.nbp_memory_clock[i] =
2589 le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]);
2590 pi->sys_info.nbp_n_clock[i] =
2591 le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]);
2593 if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) &
2594 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS)
2595 pi->caps_enable_dfs_bypass = true;
2597 sumo_construct_sclk_voltage_mapping_table(adev,
2598 &pi->sys_info.sclk_voltage_mapping_table,
2599 igp_info->info_8.sAvail_SCLK);
2601 sumo_construct_vid_mapping_table(adev,
2602 &pi->sys_info.vid_mapping_table,
2603 igp_info->info_8.sAvail_SCLK);
2605 kv_construct_max_power_limits_table(adev,
2606 &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
2612 struct _ATOM_POWERPLAY_INFO info;
2613 struct _ATOM_POWERPLAY_INFO_V2 info_2;
2614 struct _ATOM_POWERPLAY_INFO_V3 info_3;
2615 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
2616 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
2617 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
2620 union pplib_clock_info {
2621 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
2622 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
2623 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
2624 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
2627 union pplib_power_state {
2628 struct _ATOM_PPLIB_STATE v1;
2629 struct _ATOM_PPLIB_STATE_V2 v2;
2632 static void kv_patch_boot_state(struct amdgpu_device *adev,
2635 struct kv_power_info *pi = kv_get_pi(adev);
2638 ps->levels[0] = pi->boot_pl;
2641 static void kv_parse_pplib_non_clock_info(struct amdgpu_device *adev,
2642 struct amdgpu_ps *rps,
2643 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
2646 struct kv_ps *ps = kv_get_ps(rps);
2648 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
2649 rps->class = le16_to_cpu(non_clock_info->usClassification);
2650 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
2652 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
2653 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
2654 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
2660 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
2661 adev->pm.dpm.boot_ps = rps;
2662 kv_patch_boot_state(adev, ps);
2664 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
2665 adev->pm.dpm.uvd_ps = rps;
2668 static void kv_parse_pplib_clock_info(struct amdgpu_device *adev,
2669 struct amdgpu_ps *rps, int index,
2670 union pplib_clock_info *clock_info)
2672 struct kv_power_info *pi = kv_get_pi(adev);
2673 struct kv_ps *ps = kv_get_ps(rps);
2674 struct kv_pl *pl = &ps->levels[index];
2677 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
2678 sclk |= clock_info->sumo.ucEngineClockHigh << 16;
2680 pl->vddc_index = clock_info->sumo.vddcIndex;
2682 ps->num_levels = index + 1;
2684 if (pi->caps_sclk_ds) {
2685 pl->ds_divider_index = 5;
2686 pl->ss_divider_index = 5;
2690 static int kv_parse_power_table(struct amdgpu_device *adev)
2692 struct amdgpu_mode_info *mode_info = &adev->mode_info;
2693 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
2694 union pplib_power_state *power_state;
2695 int i, j, k, non_clock_array_index, clock_array_index;
2696 union pplib_clock_info *clock_info;
2697 struct _StateArray *state_array;
2698 struct _ClockInfoArray *clock_info_array;
2699 struct _NonClockInfoArray *non_clock_info_array;
2700 union power_info *power_info;
2701 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
2704 u8 *power_state_offset;
2707 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
2708 &frev, &crev, &data_offset))
2710 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2712 amdgpu_add_thermal_controller(adev);
2714 state_array = (struct _StateArray *)
2715 (mode_info->atom_context->bios + data_offset +
2716 le16_to_cpu(power_info->pplib.usStateArrayOffset));
2717 clock_info_array = (struct _ClockInfoArray *)
2718 (mode_info->atom_context->bios + data_offset +
2719 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
2720 non_clock_info_array = (struct _NonClockInfoArray *)
2721 (mode_info->atom_context->bios + data_offset +
2722 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
2724 adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
2725 sizeof(struct amdgpu_ps),
2727 if (!adev->pm.dpm.ps)
2729 power_state_offset = (u8 *)state_array->states;
2730 for (i = 0; i < state_array->ucNumEntries; i++) {
2732 power_state = (union pplib_power_state *)power_state_offset;
2733 non_clock_array_index = power_state->v2.nonClockInfoIndex;
2734 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2735 &non_clock_info_array->nonClockInfo[non_clock_array_index];
2736 ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL);
2738 kfree(adev->pm.dpm.ps);
2741 adev->pm.dpm.ps[i].ps_priv = ps;
2743 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
2744 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
2745 clock_array_index = idx[j];
2746 if (clock_array_index >= clock_info_array->ucNumEntries)
2748 if (k >= SUMO_MAX_HARDWARE_POWERLEVELS)
2750 clock_info = (union pplib_clock_info *)
2751 ((u8 *)&clock_info_array->clockInfo[0] +
2752 (clock_array_index * clock_info_array->ucEntrySize));
2753 kv_parse_pplib_clock_info(adev,
2754 &adev->pm.dpm.ps[i], k,
2758 kv_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
2760 non_clock_info_array->ucEntrySize);
2761 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
2763 adev->pm.dpm.num_ps = state_array->ucNumEntries;
2765 /* fill in the vce power states */
2766 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
2768 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
2769 clock_info = (union pplib_clock_info *)
2770 &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
2771 sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow);
2772 sclk |= clock_info->sumo.ucEngineClockHigh << 16;
2773 adev->pm.dpm.vce_states[i].sclk = sclk;
2774 adev->pm.dpm.vce_states[i].mclk = 0;
2780 static int kv_dpm_init(struct amdgpu_device *adev)
2782 struct kv_power_info *pi;
2785 pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL);
2788 adev->pm.dpm.priv = pi;
2790 ret = amdgpu_get_platform_caps(adev);
2794 ret = amdgpu_parse_extended_power_table(adev);
2798 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
2799 pi->at[i] = TRINITY_AT_DFLT;
2801 pi->sram_end = SMC_RAM_END;
2803 pi->enable_nb_dpm = true;
2805 pi->caps_power_containment = true;
2806 pi->caps_cac = true;
2807 pi->enable_didt = false;
2808 if (pi->enable_didt) {
2809 pi->caps_sq_ramping = true;
2810 pi->caps_db_ramping = true;
2811 pi->caps_td_ramping = true;
2812 pi->caps_tcp_ramping = true;
2815 if (adev->powerplay.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
2816 pi->caps_sclk_ds = true;
2818 pi->caps_sclk_ds = false;
2820 pi->enable_auto_thermal_throttling = true;
2821 pi->disable_nb_ps3_in_battery = false;
2822 if (amdgpu_bapm == 0)
2823 pi->bapm_enable = false;
2825 pi->bapm_enable = true;
2826 pi->voltage_drop_t = 0;
2827 pi->caps_sclk_throttle_low_notification = false;
2828 pi->caps_fps = false; /* true? */
2829 pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
2830 pi->caps_uvd_dpm = true;
2831 pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
2832 pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false;
2833 pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
2834 pi->caps_stable_p_state = false;
2836 ret = kv_parse_sys_info_table(adev);
2840 kv_patch_voltage_values(adev);
2841 kv_construct_boot_state(adev);
2843 ret = kv_parse_power_table(adev);
2847 pi->enable_dpm = true;
2853 kv_dpm_debugfs_print_current_performance_level(void *handle,
2856 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2857 struct kv_power_info *pi = kv_get_pi(adev);
2859 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
2860 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
2861 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
2865 if (current_index >= SMU__NUM_SCLK_DPM_STATE) {
2866 seq_printf(m, "invalid dpm profile %d\n", current_index);
2868 sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency);
2869 tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) &
2870 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >>
2871 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT;
2872 vddc = kv_convert_8bit_index_to_voltage(adev, (u16)tmp);
2873 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
2874 seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
2875 seq_printf(m, "power level %d sclk: %u vddc: %u\n",
2876 current_index, sclk, vddc);
2881 kv_dpm_print_power_state(void *handle, void *request_ps)
2884 struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
2885 struct kv_ps *ps = kv_get_ps(rps);
2886 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2888 amdgpu_dpm_print_class_info(rps->class, rps->class2);
2889 amdgpu_dpm_print_cap_info(rps->caps);
2890 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
2891 for (i = 0; i < ps->num_levels; i++) {
2892 struct kv_pl *pl = &ps->levels[i];
2893 printk("\t\tpower level %d sclk: %u vddc: %u\n",
2895 kv_convert_8bit_index_to_voltage(adev, pl->vddc_index));
2897 amdgpu_dpm_print_ps_status(adev, rps);
2900 static void kv_dpm_fini(struct amdgpu_device *adev)
2904 for (i = 0; i < adev->pm.dpm.num_ps; i++) {
2905 kfree(adev->pm.dpm.ps[i].ps_priv);
2907 kfree(adev->pm.dpm.ps);
2908 kfree(adev->pm.dpm.priv);
2909 amdgpu_free_extended_power_table(adev);
2912 static void kv_dpm_display_configuration_changed(void *handle)
2917 static u32 kv_dpm_get_sclk(void *handle, bool low)
2919 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2920 struct kv_power_info *pi = kv_get_pi(adev);
2921 struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps);
2924 return requested_state->levels[0].sclk;
2926 return requested_state->levels[requested_state->num_levels - 1].sclk;
2929 static u32 kv_dpm_get_mclk(void *handle, bool low)
2931 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2932 struct kv_power_info *pi = kv_get_pi(adev);
2934 return pi->sys_info.bootup_uma_clk;
2937 /* get temperature in millidegrees */
2938 static int kv_dpm_get_temp(void *handle)
2941 int actual_temp = 0;
2942 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2944 temp = RREG32_SMC(0xC0300E0C);
2947 actual_temp = (temp / 8) - 49;
2951 actual_temp = actual_temp * 1000;
2956 static int kv_dpm_early_init(void *handle)
2958 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2960 adev->powerplay.pp_funcs = &kv_dpm_funcs;
2961 adev->powerplay.pp_handle = adev;
2962 kv_dpm_set_irq_funcs(adev);
2967 static int kv_dpm_late_init(void *handle)
2969 /* powerdown unused blocks for now */
2970 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2972 if (!adev->pm.dpm_enabled)
2975 kv_dpm_powergate_acp(adev, true);
2976 kv_dpm_powergate_samu(adev, true);
2981 static int kv_dpm_sw_init(void *handle)
2984 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
2986 ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 230,
2987 &adev->pm.dpm.thermal.irq);
2991 ret = amdgpu_irq_add_id(adev, AMDGPU_IH_CLIENTID_LEGACY, 231,
2992 &adev->pm.dpm.thermal.irq);
2996 /* default to balanced state */
2997 adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
2998 adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
2999 adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
3000 adev->pm.default_sclk = adev->clock.default_sclk;
3001 adev->pm.default_mclk = adev->clock.default_mclk;
3002 adev->pm.current_sclk = adev->clock.default_sclk;
3003 adev->pm.current_mclk = adev->clock.default_mclk;
3004 adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
3006 if (amdgpu_dpm == 0)
3009 INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
3010 mutex_lock(&adev->pm.mutex);
3011 ret = kv_dpm_init(adev);
3014 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
3015 if (amdgpu_dpm == 1)
3016 amdgpu_pm_print_power_states(adev);
3017 mutex_unlock(&adev->pm.mutex);
3018 DRM_INFO("amdgpu: dpm initialized\n");
3024 mutex_unlock(&adev->pm.mutex);
3025 DRM_ERROR("amdgpu: dpm initialization failed\n");
3029 static int kv_dpm_sw_fini(void *handle)
3031 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3033 flush_work(&adev->pm.dpm.thermal.work);
3035 mutex_lock(&adev->pm.mutex);
3037 mutex_unlock(&adev->pm.mutex);
3042 static int kv_dpm_hw_init(void *handle)
3045 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3050 mutex_lock(&adev->pm.mutex);
3051 kv_dpm_setup_asic(adev);
3052 ret = kv_dpm_enable(adev);
3054 adev->pm.dpm_enabled = false;
3056 adev->pm.dpm_enabled = true;
3057 mutex_unlock(&adev->pm.mutex);
3058 amdgpu_pm_compute_clocks(adev);
3062 static int kv_dpm_hw_fini(void *handle)
3064 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3066 if (adev->pm.dpm_enabled) {
3067 mutex_lock(&adev->pm.mutex);
3068 kv_dpm_disable(adev);
3069 mutex_unlock(&adev->pm.mutex);
3075 static int kv_dpm_suspend(void *handle)
3077 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3079 if (adev->pm.dpm_enabled) {
3080 mutex_lock(&adev->pm.mutex);
3082 kv_dpm_disable(adev);
3083 /* reset the power state */
3084 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
3085 mutex_unlock(&adev->pm.mutex);
3090 static int kv_dpm_resume(void *handle)
3093 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3095 if (adev->pm.dpm_enabled) {
3096 /* asic init will reset to the boot state */
3097 mutex_lock(&adev->pm.mutex);
3098 kv_dpm_setup_asic(adev);
3099 ret = kv_dpm_enable(adev);
3101 adev->pm.dpm_enabled = false;
3103 adev->pm.dpm_enabled = true;
3104 mutex_unlock(&adev->pm.mutex);
3105 if (adev->pm.dpm_enabled)
3106 amdgpu_pm_compute_clocks(adev);
3111 static bool kv_dpm_is_idle(void *handle)
3116 static int kv_dpm_wait_for_idle(void *handle)
3122 static int kv_dpm_soft_reset(void *handle)
3127 static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev,
3128 struct amdgpu_irq_src *src,
3130 enum amdgpu_interrupt_state state)
3135 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
3137 case AMDGPU_IRQ_STATE_DISABLE:
3138 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
3139 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
3140 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
3142 case AMDGPU_IRQ_STATE_ENABLE:
3143 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
3144 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
3145 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
3152 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
3154 case AMDGPU_IRQ_STATE_DISABLE:
3155 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
3156 cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
3157 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
3159 case AMDGPU_IRQ_STATE_ENABLE:
3160 cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL);
3161 cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
3162 WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int);
3175 static int kv_dpm_process_interrupt(struct amdgpu_device *adev,
3176 struct amdgpu_irq_src *source,
3177 struct amdgpu_iv_entry *entry)
3179 bool queue_thermal = false;
3184 switch (entry->src_id) {
3185 case 230: /* thermal low to high */
3186 DRM_DEBUG("IH: thermal low to high\n");
3187 adev->pm.dpm.thermal.high_to_low = false;
3188 queue_thermal = true;
3190 case 231: /* thermal high to low */
3191 DRM_DEBUG("IH: thermal high to low\n");
3192 adev->pm.dpm.thermal.high_to_low = true;
3193 queue_thermal = true;
3200 schedule_work(&adev->pm.dpm.thermal.work);
3205 static int kv_dpm_set_clockgating_state(void *handle,
3206 enum amd_clockgating_state state)
3211 static int kv_dpm_set_powergating_state(void *handle,
3212 enum amd_powergating_state state)
3217 static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1,
3218 const struct kv_pl *kv_cpl2)
3220 return ((kv_cpl1->sclk == kv_cpl2->sclk) &&
3221 (kv_cpl1->vddc_index == kv_cpl2->vddc_index) &&
3222 (kv_cpl1->ds_divider_index == kv_cpl2->ds_divider_index) &&
3223 (kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state));
3226 static int kv_check_state_equal(void *handle,
3231 struct kv_ps *kv_cps;
3232 struct kv_ps *kv_rps;
3234 struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
3235 struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
3236 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3238 if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
3241 kv_cps = kv_get_ps(cps);
3242 kv_rps = kv_get_ps(rps);
3244 if (kv_cps == NULL) {
3249 if (kv_cps->num_levels != kv_rps->num_levels) {
3254 for (i = 0; i < kv_cps->num_levels; i++) {
3255 if (!kv_are_power_levels_equal(&(kv_cps->levels[i]),
3256 &(kv_rps->levels[i]))) {
3262 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
3263 *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
3264 *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
3269 static int kv_dpm_read_sensor(void *handle, int idx,
3270 void *value, int *size)
3272 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3273 struct kv_power_info *pi = kv_get_pi(adev);
3276 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
3277 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
3278 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
3280 /* size must be at least 4 bytes for all sensors */
3285 case AMDGPU_PP_SENSOR_GFX_SCLK:
3286 if (pl_index < SMU__NUM_SCLK_DPM_STATE) {
3288 pi->graphics_level[pl_index].SclkFrequency);
3289 *((uint32_t *)value) = sclk;
3294 case AMDGPU_PP_SENSOR_GPU_TEMP:
3295 *((uint32_t *)value) = kv_dpm_get_temp(adev);
3303 static int kv_set_powergating_by_smu(void *handle,
3304 uint32_t block_type, bool gate)
3306 switch (block_type) {
3307 case AMD_IP_BLOCK_TYPE_UVD:
3308 kv_dpm_powergate_uvd(handle, gate);
3310 case AMD_IP_BLOCK_TYPE_VCE:
3311 kv_dpm_powergate_vce(handle, gate);
3319 static const struct amd_ip_funcs kv_dpm_ip_funcs = {
3321 .early_init = kv_dpm_early_init,
3322 .late_init = kv_dpm_late_init,
3323 .sw_init = kv_dpm_sw_init,
3324 .sw_fini = kv_dpm_sw_fini,
3325 .hw_init = kv_dpm_hw_init,
3326 .hw_fini = kv_dpm_hw_fini,
3327 .suspend = kv_dpm_suspend,
3328 .resume = kv_dpm_resume,
3329 .is_idle = kv_dpm_is_idle,
3330 .wait_for_idle = kv_dpm_wait_for_idle,
3331 .soft_reset = kv_dpm_soft_reset,
3332 .set_clockgating_state = kv_dpm_set_clockgating_state,
3333 .set_powergating_state = kv_dpm_set_powergating_state,
3336 const struct amdgpu_ip_block_version kv_smu_ip_block =
3338 .type = AMD_IP_BLOCK_TYPE_SMC,
3342 .funcs = &kv_dpm_ip_funcs,
3345 static const struct amd_pm_funcs kv_dpm_funcs = {
3346 .pre_set_power_state = &kv_dpm_pre_set_power_state,
3347 .set_power_state = &kv_dpm_set_power_state,
3348 .post_set_power_state = &kv_dpm_post_set_power_state,
3349 .display_configuration_changed = &kv_dpm_display_configuration_changed,
3350 .get_sclk = &kv_dpm_get_sclk,
3351 .get_mclk = &kv_dpm_get_mclk,
3352 .print_power_state = &kv_dpm_print_power_state,
3353 .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level,
3354 .force_performance_level = &kv_dpm_force_performance_level,
3355 .set_powergating_by_smu = kv_set_powergating_by_smu,
3356 .enable_bapm = &kv_dpm_enable_bapm,
3357 .get_vce_clock_state = amdgpu_get_vce_clock_state,
3358 .check_state_equal = kv_check_state_equal,
3359 .read_sensor = &kv_dpm_read_sensor,
3362 static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = {
3363 .set = kv_dpm_set_interrupt_state,
3364 .process = kv_dpm_process_interrupt,
3367 static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev)
3369 adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
3370 adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs;