2 * Copyright 2011 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
27 #include "amdgpu_atombios.h"
28 #include "amdgpu_i2c.h"
29 #include "amdgpu_dpm.h"
32 void amdgpu_dpm_print_class_info(u32 class, u32 class2)
36 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
37 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
41 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
44 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
47 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
51 printk("\tui class: %s\n", s);
52 printk("\tinternal class:");
53 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
57 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
59 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
61 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
62 pr_cont(" limited_pwr");
63 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
65 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
67 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
69 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
71 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
73 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
75 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
77 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
79 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
81 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
83 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
84 pr_cont(" limited_pwr2");
85 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
87 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
93 void amdgpu_dpm_print_cap_info(u32 caps)
96 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
97 pr_cont(" single_disp");
98 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
100 if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
105 void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
106 struct amdgpu_ps *rps)
109 if (rps == adev->pm.dpm.current_ps)
111 if (rps == adev->pm.dpm.requested_ps)
113 if (rps == adev->pm.dpm.boot_ps)
119 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
121 struct drm_device *dev = adev->ddev;
122 struct drm_crtc *crtc;
123 struct amdgpu_crtc *amdgpu_crtc;
124 u32 vblank_in_pixels;
125 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
127 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
128 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
129 amdgpu_crtc = to_amdgpu_crtc(crtc);
130 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
132 amdgpu_crtc->hw_mode.crtc_htotal *
133 (amdgpu_crtc->hw_mode.crtc_vblank_end -
134 amdgpu_crtc->hw_mode.crtc_vdisplay +
135 (amdgpu_crtc->v_border * 2));
137 vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
143 return vblank_time_us;
146 u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
148 struct drm_device *dev = adev->ddev;
149 struct drm_crtc *crtc;
150 struct amdgpu_crtc *amdgpu_crtc;
153 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
154 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
155 amdgpu_crtc = to_amdgpu_crtc(crtc);
156 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
157 vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
166 void amdgpu_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
173 i_c = (i * r_c) / 100;
182 *p = i_c / (1 << (2 * (*u)));
185 int amdgpu_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
190 if ((fl == 0) || (fh == 0) || (fl > fh))
194 t1 = (t * (k - 100));
195 a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
197 ah = ((a * t) + 5000) / 10000;
206 bool amdgpu_is_uvd_state(u32 class, u32 class2)
208 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
210 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
212 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
214 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
216 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
221 bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
224 case THERMAL_TYPE_RV6XX:
225 case THERMAL_TYPE_RV770:
226 case THERMAL_TYPE_EVERGREEN:
227 case THERMAL_TYPE_SUMO:
228 case THERMAL_TYPE_NI:
229 case THERMAL_TYPE_SI:
230 case THERMAL_TYPE_CI:
231 case THERMAL_TYPE_KV:
233 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
234 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
235 return false; /* need special handling */
236 case THERMAL_TYPE_NONE:
237 case THERMAL_TYPE_EXTERNAL:
238 case THERMAL_TYPE_EXTERNAL_GPIO:
245 struct _ATOM_POWERPLAY_INFO info;
246 struct _ATOM_POWERPLAY_INFO_V2 info_2;
247 struct _ATOM_POWERPLAY_INFO_V3 info_3;
248 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
249 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
250 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
251 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
252 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
256 struct _ATOM_PPLIB_FANTABLE fan;
257 struct _ATOM_PPLIB_FANTABLE2 fan2;
258 struct _ATOM_PPLIB_FANTABLE3 fan3;
261 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
262 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
264 u32 size = atom_table->ucNumEntries *
265 sizeof(struct amdgpu_clock_voltage_dependency_entry);
267 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
269 amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
270 if (!amdgpu_table->entries)
273 entry = &atom_table->entries[0];
274 for (i = 0; i < atom_table->ucNumEntries; i++) {
275 amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
276 (entry->ucClockHigh << 16);
277 amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
278 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
279 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
281 amdgpu_table->count = atom_table->ucNumEntries;
286 int amdgpu_get_platform_caps(struct amdgpu_device *adev)
288 struct amdgpu_mode_info *mode_info = &adev->mode_info;
289 union power_info *power_info;
290 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
294 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
295 &frev, &crev, &data_offset))
297 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
299 adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
300 adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
301 adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
306 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
307 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
308 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
309 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
310 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
311 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
312 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
313 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
314 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
316 int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
318 struct amdgpu_mode_info *mode_info = &adev->mode_info;
319 union power_info *power_info;
320 union fan_info *fan_info;
321 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
322 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
327 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
328 &frev, &crev, &data_offset))
330 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
333 if (le16_to_cpu(power_info->pplib.usTableSize) >=
334 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
335 if (power_info->pplib3.usFanTableOffset) {
336 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
337 le16_to_cpu(power_info->pplib3.usFanTableOffset));
338 adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
339 adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
340 adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
341 adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
342 adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
343 adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
344 adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
345 if (fan_info->fan.ucFanTableFormat >= 2)
346 adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
348 adev->pm.dpm.fan.t_max = 10900;
349 adev->pm.dpm.fan.cycle_delay = 100000;
350 if (fan_info->fan.ucFanTableFormat >= 3) {
351 adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
352 adev->pm.dpm.fan.default_max_fan_pwm =
353 le16_to_cpu(fan_info->fan3.usFanPWMMax);
354 adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
355 adev->pm.dpm.fan.fan_output_sensitivity =
356 le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
358 adev->pm.dpm.fan.ucode_fan_control = true;
362 /* clock dependancy tables, shedding tables */
363 if (le16_to_cpu(power_info->pplib.usTableSize) >=
364 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
365 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
366 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
367 (mode_info->atom_context->bios + data_offset +
368 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
369 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
372 amdgpu_free_extended_power_table(adev);
376 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
377 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
378 (mode_info->atom_context->bios + data_offset +
379 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
380 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
383 amdgpu_free_extended_power_table(adev);
387 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
388 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
389 (mode_info->atom_context->bios + data_offset +
390 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
391 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
394 amdgpu_free_extended_power_table(adev);
398 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
399 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
400 (mode_info->atom_context->bios + data_offset +
401 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
402 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
405 amdgpu_free_extended_power_table(adev);
409 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
410 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
411 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
412 (mode_info->atom_context->bios + data_offset +
413 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
414 if (clk_v->ucNumEntries) {
415 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
416 le16_to_cpu(clk_v->entries[0].usSclkLow) |
417 (clk_v->entries[0].ucSclkHigh << 16);
418 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
419 le16_to_cpu(clk_v->entries[0].usMclkLow) |
420 (clk_v->entries[0].ucMclkHigh << 16);
421 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
422 le16_to_cpu(clk_v->entries[0].usVddc);
423 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
424 le16_to_cpu(clk_v->entries[0].usVddci);
427 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
428 ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
429 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
430 (mode_info->atom_context->bios + data_offset +
431 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
432 ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
434 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
435 kzalloc(psl->ucNumEntries *
436 sizeof(struct amdgpu_phase_shedding_limits_entry),
438 if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
439 amdgpu_free_extended_power_table(adev);
443 entry = &psl->entries[0];
444 for (i = 0; i < psl->ucNumEntries; i++) {
445 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
446 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
447 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
448 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
449 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
450 le16_to_cpu(entry->usVoltage);
451 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
452 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
454 adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
460 if (le16_to_cpu(power_info->pplib.usTableSize) >=
461 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
462 adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
463 adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
464 adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
465 adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
466 if (adev->pm.dpm.tdp_od_limit)
467 adev->pm.dpm.power_control = true;
469 adev->pm.dpm.power_control = false;
470 adev->pm.dpm.tdp_adjustment = 0;
471 adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
472 adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
473 adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
474 if (power_info->pplib5.usCACLeakageTableOffset) {
475 ATOM_PPLIB_CAC_Leakage_Table *cac_table =
476 (ATOM_PPLIB_CAC_Leakage_Table *)
477 (mode_info->atom_context->bios + data_offset +
478 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
479 ATOM_PPLIB_CAC_Leakage_Record *entry;
480 u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
481 adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
482 if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
483 amdgpu_free_extended_power_table(adev);
486 entry = &cac_table->entries[0];
487 for (i = 0; i < cac_table->ucNumEntries; i++) {
488 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
489 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
490 le16_to_cpu(entry->usVddc1);
491 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
492 le16_to_cpu(entry->usVddc2);
493 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
494 le16_to_cpu(entry->usVddc3);
496 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
497 le16_to_cpu(entry->usVddc);
498 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
499 le32_to_cpu(entry->ulLeakageValue);
501 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
502 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
504 adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
509 if (le16_to_cpu(power_info->pplib.usTableSize) >=
510 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
511 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
512 (mode_info->atom_context->bios + data_offset +
513 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
514 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
515 ext_hdr->usVCETableOffset) {
516 VCEClockInfoArray *array = (VCEClockInfoArray *)
517 (mode_info->atom_context->bios + data_offset +
518 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
519 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
520 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
521 (mode_info->atom_context->bios + data_offset +
522 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
523 1 + array->ucNumEntries * sizeof(VCEClockInfo));
524 ATOM_PPLIB_VCE_State_Table *states =
525 (ATOM_PPLIB_VCE_State_Table *)
526 (mode_info->atom_context->bios + data_offset +
527 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
528 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
529 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
530 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
531 ATOM_PPLIB_VCE_State_Record *state_entry;
532 VCEClockInfo *vce_clk;
533 u32 size = limits->numEntries *
534 sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
535 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
536 kzalloc(size, GFP_KERNEL);
537 if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
538 amdgpu_free_extended_power_table(adev);
541 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
543 entry = &limits->entries[0];
544 state_entry = &states->entries[0];
545 for (i = 0; i < limits->numEntries; i++) {
546 vce_clk = (VCEClockInfo *)
547 ((u8 *)&array->entries[0] +
548 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
549 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
550 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
551 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
552 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
553 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
554 le16_to_cpu(entry->usVoltage);
555 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
556 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
558 adev->pm.dpm.num_of_vce_states =
559 states->numEntries > AMD_MAX_VCE_LEVELS ?
560 AMD_MAX_VCE_LEVELS : states->numEntries;
561 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
562 vce_clk = (VCEClockInfo *)
563 ((u8 *)&array->entries[0] +
564 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
565 adev->pm.dpm.vce_states[i].evclk =
566 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
567 adev->pm.dpm.vce_states[i].ecclk =
568 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
569 adev->pm.dpm.vce_states[i].clk_idx =
570 state_entry->ucClockInfoIndex & 0x3f;
571 adev->pm.dpm.vce_states[i].pstate =
572 (state_entry->ucClockInfoIndex & 0xc0) >> 6;
573 state_entry = (ATOM_PPLIB_VCE_State_Record *)
574 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
577 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
578 ext_hdr->usUVDTableOffset) {
579 UVDClockInfoArray *array = (UVDClockInfoArray *)
580 (mode_info->atom_context->bios + data_offset +
581 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
582 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
583 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
584 (mode_info->atom_context->bios + data_offset +
585 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
586 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
587 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
588 u32 size = limits->numEntries *
589 sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
590 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
591 kzalloc(size, GFP_KERNEL);
592 if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
593 amdgpu_free_extended_power_table(adev);
596 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
598 entry = &limits->entries[0];
599 for (i = 0; i < limits->numEntries; i++) {
600 UVDClockInfo *uvd_clk = (UVDClockInfo *)
601 ((u8 *)&array->entries[0] +
602 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
603 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
604 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
605 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
606 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
607 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
608 le16_to_cpu(entry->usVoltage);
609 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
610 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
613 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
614 ext_hdr->usSAMUTableOffset) {
615 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
616 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
617 (mode_info->atom_context->bios + data_offset +
618 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
619 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
620 u32 size = limits->numEntries *
621 sizeof(struct amdgpu_clock_voltage_dependency_entry);
622 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
623 kzalloc(size, GFP_KERNEL);
624 if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
625 amdgpu_free_extended_power_table(adev);
628 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
630 entry = &limits->entries[0];
631 for (i = 0; i < limits->numEntries; i++) {
632 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
633 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
634 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
635 le16_to_cpu(entry->usVoltage);
636 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
637 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
640 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
641 ext_hdr->usPPMTableOffset) {
642 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
643 (mode_info->atom_context->bios + data_offset +
644 le16_to_cpu(ext_hdr->usPPMTableOffset));
645 adev->pm.dpm.dyn_state.ppm_table =
646 kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
647 if (!adev->pm.dpm.dyn_state.ppm_table) {
648 amdgpu_free_extended_power_table(adev);
651 adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
652 adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
653 le16_to_cpu(ppm->usCpuCoreNumber);
654 adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
655 le32_to_cpu(ppm->ulPlatformTDP);
656 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
657 le32_to_cpu(ppm->ulSmallACPlatformTDP);
658 adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
659 le32_to_cpu(ppm->ulPlatformTDC);
660 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
661 le32_to_cpu(ppm->ulSmallACPlatformTDC);
662 adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
663 le32_to_cpu(ppm->ulApuTDP);
664 adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
665 le32_to_cpu(ppm->ulDGpuTDP);
666 adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
667 le32_to_cpu(ppm->ulDGpuUlvPower);
668 adev->pm.dpm.dyn_state.ppm_table->tj_max =
669 le32_to_cpu(ppm->ulTjmax);
671 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
672 ext_hdr->usACPTableOffset) {
673 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
674 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
675 (mode_info->atom_context->bios + data_offset +
676 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
677 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
678 u32 size = limits->numEntries *
679 sizeof(struct amdgpu_clock_voltage_dependency_entry);
680 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
681 kzalloc(size, GFP_KERNEL);
682 if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
683 amdgpu_free_extended_power_table(adev);
686 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
688 entry = &limits->entries[0];
689 for (i = 0; i < limits->numEntries; i++) {
690 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
691 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
692 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
693 le16_to_cpu(entry->usVoltage);
694 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
695 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
698 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
699 ext_hdr->usPowerTuneTableOffset) {
700 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
701 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
702 ATOM_PowerTune_Table *pt;
703 adev->pm.dpm.dyn_state.cac_tdp_table =
704 kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
705 if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
706 amdgpu_free_extended_power_table(adev);
710 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
711 (mode_info->atom_context->bios + data_offset +
712 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
713 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
714 ppt->usMaximumPowerDeliveryLimit;
715 pt = &ppt->power_tune_table;
717 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
718 (mode_info->atom_context->bios + data_offset +
719 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
720 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
721 pt = &ppt->power_tune_table;
723 adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
724 adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
725 le16_to_cpu(pt->usConfigurableTDP);
726 adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
727 adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
728 le16_to_cpu(pt->usBatteryPowerLimit);
729 adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
730 le16_to_cpu(pt->usSmallPowerLimit);
731 adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
732 le16_to_cpu(pt->usLowCACLeakage);
733 adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
734 le16_to_cpu(pt->usHighCACLeakage);
736 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
737 ext_hdr->usSclkVddgfxTableOffset) {
738 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
739 (mode_info->atom_context->bios + data_offset +
740 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
741 ret = amdgpu_parse_clk_voltage_dep_table(
742 &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
745 kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
754 void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
756 struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
758 kfree(dyn_state->vddc_dependency_on_sclk.entries);
759 kfree(dyn_state->vddci_dependency_on_mclk.entries);
760 kfree(dyn_state->vddc_dependency_on_mclk.entries);
761 kfree(dyn_state->mvdd_dependency_on_mclk.entries);
762 kfree(dyn_state->cac_leakage_table.entries);
763 kfree(dyn_state->phase_shedding_limits_table.entries);
764 kfree(dyn_state->ppm_table);
765 kfree(dyn_state->cac_tdp_table);
766 kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
767 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
768 kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
769 kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
770 kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
773 static const char *pp_lib_thermal_controller_names[] = {
796 void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
798 struct amdgpu_mode_info *mode_info = &adev->mode_info;
799 ATOM_PPLIB_POWERPLAYTABLE *power_table;
800 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
801 ATOM_PPLIB_THERMALCONTROLLER *controller;
802 struct amdgpu_i2c_bus_rec i2c_bus;
806 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
807 &frev, &crev, &data_offset))
809 power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
810 (mode_info->atom_context->bios + data_offset);
811 controller = &power_table->sThermalController;
813 /* add the i2c bus for thermal/fan chip */
814 if (controller->ucType > 0) {
815 if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
816 adev->pm.no_fan = true;
817 adev->pm.fan_pulses_per_revolution =
818 controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
819 if (adev->pm.fan_pulses_per_revolution) {
820 adev->pm.fan_min_rpm = controller->ucFanMinRPM;
821 adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
823 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
824 DRM_INFO("Internal thermal controller %s fan control\n",
825 (controller->ucFanParameters &
826 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
827 adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
828 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
829 DRM_INFO("Internal thermal controller %s fan control\n",
830 (controller->ucFanParameters &
831 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
832 adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
833 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
834 DRM_INFO("Internal thermal controller %s fan control\n",
835 (controller->ucFanParameters &
836 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
837 adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
838 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
839 DRM_INFO("Internal thermal controller %s fan control\n",
840 (controller->ucFanParameters &
841 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
842 adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
843 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
844 DRM_INFO("Internal thermal controller %s fan control\n",
845 (controller->ucFanParameters &
846 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
847 adev->pm.int_thermal_type = THERMAL_TYPE_NI;
848 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
849 DRM_INFO("Internal thermal controller %s fan control\n",
850 (controller->ucFanParameters &
851 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
852 adev->pm.int_thermal_type = THERMAL_TYPE_SI;
853 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
854 DRM_INFO("Internal thermal controller %s fan control\n",
855 (controller->ucFanParameters &
856 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
857 adev->pm.int_thermal_type = THERMAL_TYPE_CI;
858 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
859 DRM_INFO("Internal thermal controller %s fan control\n",
860 (controller->ucFanParameters &
861 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
862 adev->pm.int_thermal_type = THERMAL_TYPE_KV;
863 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
864 DRM_INFO("External GPIO thermal controller %s fan control\n",
865 (controller->ucFanParameters &
866 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
867 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
868 } else if (controller->ucType ==
869 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
870 DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
871 (controller->ucFanParameters &
872 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
873 adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
874 } else if (controller->ucType ==
875 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
876 DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
877 (controller->ucFanParameters &
878 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
879 adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
880 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
881 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
882 pp_lib_thermal_controller_names[controller->ucType],
883 controller->ucI2cAddress >> 1,
884 (controller->ucFanParameters &
885 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
886 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
887 i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
888 adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
889 if (adev->pm.i2c_bus) {
890 struct i2c_board_info info = { };
891 const char *name = pp_lib_thermal_controller_names[controller->ucType];
892 info.addr = controller->ucI2cAddress >> 1;
893 strlcpy(info.type, name, sizeof(info.type));
894 i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
897 DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
899 controller->ucI2cAddress >> 1,
900 (controller->ucFanParameters &
901 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
906 enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
908 enum amdgpu_pcie_gen asic_gen,
909 enum amdgpu_pcie_gen default_gen)
912 case AMDGPU_PCIE_GEN1:
913 return AMDGPU_PCIE_GEN1;
914 case AMDGPU_PCIE_GEN2:
915 return AMDGPU_PCIE_GEN2;
916 case AMDGPU_PCIE_GEN3:
917 return AMDGPU_PCIE_GEN3;
919 if ((sys_mask & DRM_PCIE_SPEED_80) && (default_gen == AMDGPU_PCIE_GEN3))
920 return AMDGPU_PCIE_GEN3;
921 else if ((sys_mask & DRM_PCIE_SPEED_50) && (default_gen == AMDGPU_PCIE_GEN2))
922 return AMDGPU_PCIE_GEN2;
924 return AMDGPU_PCIE_GEN1;
926 return AMDGPU_PCIE_GEN1;
929 u16 amdgpu_get_pcie_lane_support(struct amdgpu_device *adev,
933 switch (asic_lanes) {
936 return default_lanes;
952 u8 amdgpu_encode_pci_lane_width(u32 lanes)
954 u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
959 return encoded_lanes[lanes];
962 struct amd_vce_state*
963 amdgpu_get_vce_clock_state(struct amdgpu_device *adev, unsigned idx)
965 if (idx < adev->pm.dpm.num_of_vce_states)
966 return &adev->pm.dpm.vce_states[idx];