2 * Copyright 2011 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
27 #include "radeon_asic.h"
32 const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
51 const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
70 void r600_dpm_print_class_info(u32 class, u32 class2)
74 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
75 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
79 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
82 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
85 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
89 printk("\tui class: %s\n", s);
91 printk("\tinternal class:");
92 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
96 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
98 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
100 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
101 pr_cont(" limited_pwr");
102 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
104 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
106 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
108 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
110 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
112 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
114 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
116 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
118 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
120 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
122 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
123 pr_cont(" limited_pwr2");
124 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
126 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
132 void r600_dpm_print_cap_info(u32 caps)
135 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
136 pr_cont(" single_disp");
137 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
139 if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
144 void r600_dpm_print_ps_status(struct radeon_device *rdev,
145 struct radeon_ps *rps)
148 if (rps == rdev->pm.dpm.current_ps)
150 if (rps == rdev->pm.dpm.requested_ps)
152 if (rps == rdev->pm.dpm.boot_ps)
157 u32 r600_dpm_get_vblank_time(struct radeon_device *rdev)
159 struct drm_device *dev = rdev->ddev;
160 struct drm_crtc *crtc;
161 struct radeon_crtc *radeon_crtc;
162 u32 vblank_in_pixels;
163 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
165 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
166 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
167 radeon_crtc = to_radeon_crtc(crtc);
168 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
170 radeon_crtc->hw_mode.crtc_htotal *
171 (radeon_crtc->hw_mode.crtc_vblank_end -
172 radeon_crtc->hw_mode.crtc_vdisplay +
173 (radeon_crtc->v_border * 2));
175 vblank_time_us = vblank_in_pixels * 1000 / radeon_crtc->hw_mode.clock;
181 return vblank_time_us;
184 u32 r600_dpm_get_vrefresh(struct radeon_device *rdev)
186 struct drm_device *dev = rdev->ddev;
187 struct drm_crtc *crtc;
188 struct radeon_crtc *radeon_crtc;
191 if (rdev->num_crtc && rdev->mode_info.mode_config_initialized) {
192 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
193 radeon_crtc = to_radeon_crtc(crtc);
194 if (crtc->enabled && radeon_crtc->enabled && radeon_crtc->hw_mode.clock) {
195 vrefresh = drm_mode_vrefresh(&radeon_crtc->hw_mode);
203 void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
210 i_c = (i * r_c) / 100;
219 *p = i_c / (1 << (2 * (*u)));
222 int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
227 if ((fl == 0) || (fh == 0) || (fl > fh))
231 t1 = (t * (k - 100));
232 a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
234 ah = ((a * t) + 5000) / 10000;
243 void r600_gfx_clockgating_enable(struct radeon_device *rdev, bool enable)
248 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
250 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
252 WREG32(CG_RLC_REQ_AND_RSP, 0x2);
254 for (i = 0; i < rdev->usec_timeout; i++) {
255 if (((RREG32(CG_RLC_REQ_AND_RSP) & CG_RLC_RSP_TYPE_MASK) >> CG_RLC_RSP_TYPE_SHIFT) == 1)
260 WREG32(CG_RLC_REQ_AND_RSP, 0x0);
262 WREG32(GRBM_PWR_CNTL, 0x1);
263 RREG32(GRBM_PWR_CNTL);
267 void r600_dynamicpm_enable(struct radeon_device *rdev, bool enable)
270 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
272 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
275 void r600_enable_thermal_protection(struct radeon_device *rdev, bool enable)
278 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
280 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
283 void r600_enable_acpi_pm(struct radeon_device *rdev)
285 WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
288 void r600_enable_dynamic_pcie_gen2(struct radeon_device *rdev, bool enable)
291 WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
293 WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
296 bool r600_dynamicpm_enabled(struct radeon_device *rdev)
298 if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN)
304 void r600_enable_sclk_control(struct radeon_device *rdev, bool enable)
307 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
309 WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
312 void r600_enable_mclk_control(struct radeon_device *rdev, bool enable)
315 WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
317 WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
320 void r600_enable_spll_bypass(struct radeon_device *rdev, bool enable)
323 WREG32_P(CG_SPLL_FUNC_CNTL, SPLL_BYPASS_EN, ~SPLL_BYPASS_EN);
325 WREG32_P(CG_SPLL_FUNC_CNTL, 0, ~SPLL_BYPASS_EN);
328 void r600_wait_for_spll_change(struct radeon_device *rdev)
332 for (i = 0; i < rdev->usec_timeout; i++) {
333 if (RREG32(CG_SPLL_FUNC_CNTL) & SPLL_CHG_STATUS)
339 void r600_set_bsp(struct radeon_device *rdev, u32 u, u32 p)
341 WREG32(CG_BSP, BSP(p) | BSU(u));
344 void r600_set_at(struct radeon_device *rdev,
345 u32 l_to_m, u32 m_to_h,
346 u32 h_to_m, u32 m_to_l)
348 WREG32(CG_RT, FLS(l_to_m) | FMS(m_to_h));
349 WREG32(CG_LT, FHS(h_to_m) | FMS(m_to_l));
352 void r600_set_tc(struct radeon_device *rdev,
353 u32 index, u32 u_t, u32 d_t)
355 WREG32(CG_FFCT_0 + (index * 4), UTC_0(u_t) | DTC_0(d_t));
358 void r600_select_td(struct radeon_device *rdev,
361 if (td == R600_TD_AUTO)
362 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
364 WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
365 if (td == R600_TD_UP)
366 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
367 if (td == R600_TD_DOWN)
368 WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
371 void r600_set_vrc(struct radeon_device *rdev, u32 vrv)
376 void r600_set_tpu(struct radeon_device *rdev, u32 u)
378 WREG32_P(CG_TPC, TPU(u), ~TPU_MASK);
381 void r600_set_tpc(struct radeon_device *rdev, u32 c)
383 WREG32_P(CG_TPC, TPCC(c), ~TPCC_MASK);
386 void r600_set_sstu(struct radeon_device *rdev, u32 u)
388 WREG32_P(CG_SSP, CG_SSTU(u), ~CG_SSTU_MASK);
391 void r600_set_sst(struct radeon_device *rdev, u32 t)
393 WREG32_P(CG_SSP, CG_SST(t), ~CG_SST_MASK);
396 void r600_set_git(struct radeon_device *rdev, u32 t)
398 WREG32_P(CG_GIT, CG_GICST(t), ~CG_GICST_MASK);
401 void r600_set_fctu(struct radeon_device *rdev, u32 u)
403 WREG32_P(CG_FC_T, FC_TU(u), ~FC_TU_MASK);
406 void r600_set_fct(struct radeon_device *rdev, u32 t)
408 WREG32_P(CG_FC_T, FC_T(t), ~FC_T_MASK);
411 void r600_set_ctxcgtt3d_rphc(struct radeon_device *rdev, u32 p)
413 WREG32_P(CG_CTX_CGTT3D_R, PHC(p), ~PHC_MASK);
416 void r600_set_ctxcgtt3d_rsdc(struct radeon_device *rdev, u32 s)
418 WREG32_P(CG_CTX_CGTT3D_R, SDC(s), ~SDC_MASK);
421 void r600_set_vddc3d_oorsu(struct radeon_device *rdev, u32 u)
423 WREG32_P(CG_VDDC3D_OOR, SU(u), ~SU_MASK);
426 void r600_set_vddc3d_oorphc(struct radeon_device *rdev, u32 p)
428 WREG32_P(CG_VDDC3D_OOR, PHC(p), ~PHC_MASK);
431 void r600_set_vddc3d_oorsdc(struct radeon_device *rdev, u32 s)
433 WREG32_P(CG_VDDC3D_OOR, SDC(s), ~SDC_MASK);
436 void r600_set_mpll_lock_time(struct radeon_device *rdev, u32 lock_time)
438 WREG32_P(MPLL_TIME, MPLL_LOCK_TIME(lock_time), ~MPLL_LOCK_TIME_MASK);
441 void r600_set_mpll_reset_time(struct radeon_device *rdev, u32 reset_time)
443 WREG32_P(MPLL_TIME, MPLL_RESET_TIME(reset_time), ~MPLL_RESET_TIME_MASK);
446 void r600_engine_clock_entry_enable(struct radeon_device *rdev,
447 u32 index, bool enable)
450 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
451 STEP_0_SPLL_ENTRY_VALID, ~STEP_0_SPLL_ENTRY_VALID);
453 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
454 0, ~STEP_0_SPLL_ENTRY_VALID);
457 void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device *rdev,
458 u32 index, bool enable)
461 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
462 STEP_0_SPLL_STEP_ENABLE, ~STEP_0_SPLL_STEP_ENABLE);
464 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
465 0, ~STEP_0_SPLL_STEP_ENABLE);
468 void r600_engine_clock_entry_enable_post_divider(struct radeon_device *rdev,
469 u32 index, bool enable)
472 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
473 STEP_0_POST_DIV_EN, ~STEP_0_POST_DIV_EN);
475 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2 + (index * 4 * 2),
476 0, ~STEP_0_POST_DIV_EN);
479 void r600_engine_clock_entry_set_post_divider(struct radeon_device *rdev,
480 u32 index, u32 divider)
482 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
483 STEP_0_SPLL_POST_DIV(divider), ~STEP_0_SPLL_POST_DIV_MASK);
486 void r600_engine_clock_entry_set_reference_divider(struct radeon_device *rdev,
487 u32 index, u32 divider)
489 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
490 STEP_0_SPLL_REF_DIV(divider), ~STEP_0_SPLL_REF_DIV_MASK);
493 void r600_engine_clock_entry_set_feedback_divider(struct radeon_device *rdev,
494 u32 index, u32 divider)
496 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
497 STEP_0_SPLL_FB_DIV(divider), ~STEP_0_SPLL_FB_DIV_MASK);
500 void r600_engine_clock_entry_set_step_time(struct radeon_device *rdev,
501 u32 index, u32 step_time)
503 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1 + (index * 4 * 2),
504 STEP_0_SPLL_STEP_TIME(step_time), ~STEP_0_SPLL_STEP_TIME_MASK);
507 void r600_vid_rt_set_ssu(struct radeon_device *rdev, u32 u)
509 WREG32_P(VID_RT, SSTU(u), ~SSTU_MASK);
512 void r600_vid_rt_set_vru(struct radeon_device *rdev, u32 u)
514 WREG32_P(VID_RT, VID_CRTU(u), ~VID_CRTU_MASK);
517 void r600_vid_rt_set_vrt(struct radeon_device *rdev, u32 rt)
519 WREG32_P(VID_RT, VID_CRT(rt), ~VID_CRT_MASK);
522 void r600_voltage_control_enable_pins(struct radeon_device *rdev,
525 WREG32(LOWER_GPIO_ENABLE, mask & 0xffffffff);
526 WREG32(UPPER_GPIO_ENABLE, upper_32_bits(mask));
530 void r600_voltage_control_program_voltages(struct radeon_device *rdev,
531 enum r600_power_level index, u64 pins)
534 u32 ix = 3 - (3 & index);
536 WREG32(CTXSW_VID_LOWER_GPIO_CNTL + (ix * 4), pins & 0xffffffff);
538 mask = 7 << (3 * ix);
539 tmp = RREG32(VID_UPPER_GPIO_CNTL);
540 tmp = (tmp & ~mask) | ((pins >> (32 - (3 * ix))) & mask);
541 WREG32(VID_UPPER_GPIO_CNTL, tmp);
544 void r600_voltage_control_deactivate_static_control(struct radeon_device *rdev,
549 gpio = RREG32(GPIOPAD_MASK);
551 WREG32(GPIOPAD_MASK, gpio);
553 gpio = RREG32(GPIOPAD_EN);
555 WREG32(GPIOPAD_EN, gpio);
557 gpio = RREG32(GPIOPAD_A);
559 WREG32(GPIOPAD_A, gpio);
562 void r600_power_level_enable(struct radeon_device *rdev,
563 enum r600_power_level index, bool enable)
565 u32 ix = 3 - (3 & index);
568 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), CTXSW_FREQ_STATE_ENABLE,
569 ~CTXSW_FREQ_STATE_ENABLE);
571 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), 0,
572 ~CTXSW_FREQ_STATE_ENABLE);
575 void r600_power_level_set_voltage_index(struct radeon_device *rdev,
576 enum r600_power_level index, u32 voltage_index)
578 u32 ix = 3 - (3 & index);
580 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
581 CTXSW_FREQ_VIDS_CFG_INDEX(voltage_index), ~CTXSW_FREQ_VIDS_CFG_INDEX_MASK);
584 void r600_power_level_set_mem_clock_index(struct radeon_device *rdev,
585 enum r600_power_level index, u32 mem_clock_index)
587 u32 ix = 3 - (3 & index);
589 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
590 CTXSW_FREQ_MCLK_CFG_INDEX(mem_clock_index), ~CTXSW_FREQ_MCLK_CFG_INDEX_MASK);
593 void r600_power_level_set_eng_clock_index(struct radeon_device *rdev,
594 enum r600_power_level index, u32 eng_clock_index)
596 u32 ix = 3 - (3 & index);
598 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4),
599 CTXSW_FREQ_SCLK_CFG_INDEX(eng_clock_index), ~CTXSW_FREQ_SCLK_CFG_INDEX_MASK);
602 void r600_power_level_set_watermark_id(struct radeon_device *rdev,
603 enum r600_power_level index,
604 enum r600_display_watermark watermark_id)
606 u32 ix = 3 - (3 & index);
609 if (watermark_id == R600_DISPLAY_WATERMARK_HIGH)
610 tmp = CTXSW_FREQ_DISPLAY_WATERMARK;
611 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_DISPLAY_WATERMARK);
614 void r600_power_level_set_pcie_gen2(struct radeon_device *rdev,
615 enum r600_power_level index, bool compatible)
617 u32 ix = 3 - (3 & index);
621 tmp = CTXSW_FREQ_GEN2PCIE_VOLT;
622 WREG32_P(CTXSW_PROFILE_INDEX + (ix * 4), tmp, ~CTXSW_FREQ_GEN2PCIE_VOLT);
625 enum r600_power_level r600_power_level_get_current_index(struct radeon_device *rdev)
629 tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK;
630 tmp >>= CURRENT_PROFILE_INDEX_SHIFT;
634 enum r600_power_level r600_power_level_get_target_index(struct radeon_device *rdev)
638 tmp = RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & TARGET_PROFILE_INDEX_MASK;
639 tmp >>= TARGET_PROFILE_INDEX_SHIFT;
643 void r600_power_level_set_enter_index(struct radeon_device *rdev,
644 enum r600_power_level index)
646 WREG32_P(TARGET_AND_CURRENT_PROFILE_INDEX, DYN_PWR_ENTER_INDEX(index),
647 ~DYN_PWR_ENTER_INDEX_MASK);
650 void r600_wait_for_power_level_unequal(struct radeon_device *rdev,
651 enum r600_power_level index)
655 for (i = 0; i < rdev->usec_timeout; i++) {
656 if (r600_power_level_get_target_index(rdev) != index)
661 for (i = 0; i < rdev->usec_timeout; i++) {
662 if (r600_power_level_get_current_index(rdev) != index)
668 void r600_wait_for_power_level(struct radeon_device *rdev,
669 enum r600_power_level index)
673 for (i = 0; i < rdev->usec_timeout; i++) {
674 if (r600_power_level_get_target_index(rdev) == index)
679 for (i = 0; i < rdev->usec_timeout; i++) {
680 if (r600_power_level_get_current_index(rdev) == index)
686 void r600_start_dpm(struct radeon_device *rdev)
688 r600_enable_sclk_control(rdev, false);
689 r600_enable_mclk_control(rdev, false);
691 r600_dynamicpm_enable(rdev, true);
693 radeon_wait_for_vblank(rdev, 0);
694 radeon_wait_for_vblank(rdev, 1);
696 r600_enable_spll_bypass(rdev, true);
697 r600_wait_for_spll_change(rdev);
698 r600_enable_spll_bypass(rdev, false);
699 r600_wait_for_spll_change(rdev);
701 r600_enable_spll_bypass(rdev, true);
702 r600_wait_for_spll_change(rdev);
703 r600_enable_spll_bypass(rdev, false);
704 r600_wait_for_spll_change(rdev);
706 r600_enable_sclk_control(rdev, true);
707 r600_enable_mclk_control(rdev, true);
710 void r600_stop_dpm(struct radeon_device *rdev)
712 r600_dynamicpm_enable(rdev, false);
715 int r600_dpm_pre_set_power_state(struct radeon_device *rdev)
720 void r600_dpm_post_set_power_state(struct radeon_device *rdev)
725 bool r600_is_uvd_state(u32 class, u32 class2)
727 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
729 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
731 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
733 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
735 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
740 static int r600_set_thermal_temperature_range(struct radeon_device *rdev,
741 int min_temp, int max_temp)
743 int low_temp = 0 * 1000;
744 int high_temp = 255 * 1000;
746 if (low_temp < min_temp)
748 if (high_temp > max_temp)
749 high_temp = max_temp;
750 if (high_temp < low_temp) {
751 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
755 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
756 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
757 WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
759 rdev->pm.dpm.thermal.min_temp = low_temp;
760 rdev->pm.dpm.thermal.max_temp = high_temp;
765 bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor)
768 case THERMAL_TYPE_RV6XX:
769 case THERMAL_TYPE_RV770:
770 case THERMAL_TYPE_EVERGREEN:
771 case THERMAL_TYPE_SUMO:
772 case THERMAL_TYPE_NI:
773 case THERMAL_TYPE_SI:
774 case THERMAL_TYPE_CI:
775 case THERMAL_TYPE_KV:
777 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
778 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
779 return false; /* need special handling */
780 case THERMAL_TYPE_NONE:
781 case THERMAL_TYPE_EXTERNAL:
782 case THERMAL_TYPE_EXTERNAL_GPIO:
788 int r600_dpm_late_enable(struct radeon_device *rdev)
792 if (rdev->irq.installed &&
793 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
794 ret = r600_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
797 rdev->irq.dpm_thermal = true;
798 radeon_irq_set(rdev);
805 struct _ATOM_POWERPLAY_INFO info;
806 struct _ATOM_POWERPLAY_INFO_V2 info_2;
807 struct _ATOM_POWERPLAY_INFO_V3 info_3;
808 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
809 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
810 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
811 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
812 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
816 struct _ATOM_PPLIB_FANTABLE fan;
817 struct _ATOM_PPLIB_FANTABLE2 fan2;
818 struct _ATOM_PPLIB_FANTABLE3 fan3;
821 static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table *radeon_table,
822 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
824 u32 size = atom_table->ucNumEntries *
825 sizeof(struct radeon_clock_voltage_dependency_entry);
827 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
829 radeon_table->entries = kzalloc(size, GFP_KERNEL);
830 if (!radeon_table->entries)
833 entry = &atom_table->entries[0];
834 for (i = 0; i < atom_table->ucNumEntries; i++) {
835 radeon_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
836 (entry->ucClockHigh << 16);
837 radeon_table->entries[i].v = le16_to_cpu(entry->usVoltage);
838 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
839 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
841 radeon_table->count = atom_table->ucNumEntries;
846 int r600_get_platform_caps(struct radeon_device *rdev)
848 struct radeon_mode_info *mode_info = &rdev->mode_info;
849 union power_info *power_info;
850 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
854 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
855 &frev, &crev, &data_offset))
857 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
859 rdev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
860 rdev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
861 rdev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
866 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
867 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
868 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
869 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
870 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
871 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
872 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
874 int r600_parse_extended_power_table(struct radeon_device *rdev)
876 struct radeon_mode_info *mode_info = &rdev->mode_info;
877 union power_info *power_info;
878 union fan_info *fan_info;
879 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
880 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
885 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
886 &frev, &crev, &data_offset))
888 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
891 if (le16_to_cpu(power_info->pplib.usTableSize) >=
892 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
893 if (power_info->pplib3.usFanTableOffset) {
894 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
895 le16_to_cpu(power_info->pplib3.usFanTableOffset));
896 rdev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
897 rdev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
898 rdev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
899 rdev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
900 rdev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
901 rdev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
902 rdev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
903 if (fan_info->fan.ucFanTableFormat >= 2)
904 rdev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
906 rdev->pm.dpm.fan.t_max = 10900;
907 rdev->pm.dpm.fan.cycle_delay = 100000;
908 if (fan_info->fan.ucFanTableFormat >= 3) {
909 rdev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
910 rdev->pm.dpm.fan.default_max_fan_pwm =
911 le16_to_cpu(fan_info->fan3.usFanPWMMax);
912 rdev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
913 rdev->pm.dpm.fan.fan_output_sensitivity =
914 le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
916 rdev->pm.dpm.fan.ucode_fan_control = true;
920 /* clock dependancy tables, shedding tables */
921 if (le16_to_cpu(power_info->pplib.usTableSize) >=
922 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
923 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
924 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
925 (mode_info->atom_context->bios + data_offset +
926 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
927 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
932 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
933 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
934 (mode_info->atom_context->bios + data_offset +
935 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
936 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
939 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
943 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
944 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
945 (mode_info->atom_context->bios + data_offset +
946 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
947 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
950 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
951 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
955 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
956 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
957 (mode_info->atom_context->bios + data_offset +
958 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
959 ret = r600_parse_clk_voltage_dep_table(&rdev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
962 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries);
963 kfree(rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries);
964 kfree(rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries);
968 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
969 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
970 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
971 (mode_info->atom_context->bios + data_offset +
972 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
973 if (clk_v->ucNumEntries) {
974 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
975 le16_to_cpu(clk_v->entries[0].usSclkLow) |
976 (clk_v->entries[0].ucSclkHigh << 16);
977 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
978 le16_to_cpu(clk_v->entries[0].usMclkLow) |
979 (clk_v->entries[0].ucMclkHigh << 16);
980 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
981 le16_to_cpu(clk_v->entries[0].usVddc);
982 rdev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
983 le16_to_cpu(clk_v->entries[0].usVddci);
986 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
987 ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
988 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
989 (mode_info->atom_context->bios + data_offset +
990 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
991 ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
993 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
994 kcalloc(psl->ucNumEntries,
995 sizeof(struct radeon_phase_shedding_limits_entry),
997 if (!rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
998 r600_free_extended_power_table(rdev);
1002 entry = &psl->entries[0];
1003 for (i = 0; i < psl->ucNumEntries; i++) {
1004 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
1005 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
1006 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
1007 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
1008 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
1009 le16_to_cpu(entry->usVoltage);
1010 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
1011 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
1013 rdev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
1019 if (le16_to_cpu(power_info->pplib.usTableSize) >=
1020 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
1021 rdev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
1022 rdev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
1023 rdev->pm.dpm.near_tdp_limit_adjusted = rdev->pm.dpm.near_tdp_limit;
1024 rdev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
1025 if (rdev->pm.dpm.tdp_od_limit)
1026 rdev->pm.dpm.power_control = true;
1028 rdev->pm.dpm.power_control = false;
1029 rdev->pm.dpm.tdp_adjustment = 0;
1030 rdev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
1031 rdev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
1032 rdev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
1033 if (power_info->pplib5.usCACLeakageTableOffset) {
1034 ATOM_PPLIB_CAC_Leakage_Table *cac_table =
1035 (ATOM_PPLIB_CAC_Leakage_Table *)
1036 (mode_info->atom_context->bios + data_offset +
1037 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
1038 ATOM_PPLIB_CAC_Leakage_Record *entry;
1039 u32 size = cac_table->ucNumEntries * sizeof(struct radeon_cac_leakage_table);
1040 rdev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
1041 if (!rdev->pm.dpm.dyn_state.cac_leakage_table.entries) {
1042 r600_free_extended_power_table(rdev);
1045 entry = &cac_table->entries[0];
1046 for (i = 0; i < cac_table->ucNumEntries; i++) {
1047 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1048 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
1049 le16_to_cpu(entry->usVddc1);
1050 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
1051 le16_to_cpu(entry->usVddc2);
1052 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
1053 le16_to_cpu(entry->usVddc3);
1055 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
1056 le16_to_cpu(entry->usVddc);
1057 rdev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
1058 le32_to_cpu(entry->ulLeakageValue);
1060 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
1061 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
1063 rdev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
1068 if (le16_to_cpu(power_info->pplib.usTableSize) >=
1069 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
1070 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
1071 (mode_info->atom_context->bios + data_offset +
1072 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
1073 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
1074 ext_hdr->usVCETableOffset) {
1075 VCEClockInfoArray *array = (VCEClockInfoArray *)
1076 (mode_info->atom_context->bios + data_offset +
1077 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
1078 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
1079 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
1080 (mode_info->atom_context->bios + data_offset +
1081 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1082 1 + array->ucNumEntries * sizeof(VCEClockInfo));
1083 ATOM_PPLIB_VCE_State_Table *states =
1084 (ATOM_PPLIB_VCE_State_Table *)
1085 (mode_info->atom_context->bios + data_offset +
1086 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
1087 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
1088 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
1089 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
1090 ATOM_PPLIB_VCE_State_Record *state_entry;
1091 VCEClockInfo *vce_clk;
1092 u32 size = limits->numEntries *
1093 sizeof(struct radeon_vce_clock_voltage_dependency_entry);
1094 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
1095 kzalloc(size, GFP_KERNEL);
1096 if (!rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
1097 r600_free_extended_power_table(rdev);
1100 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
1102 entry = &limits->entries[0];
1103 state_entry = &states->entries[0];
1104 for (i = 0; i < limits->numEntries; i++) {
1105 vce_clk = (VCEClockInfo *)
1106 ((u8 *)&array->entries[0] +
1107 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
1108 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
1109 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
1110 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
1111 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
1112 rdev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
1113 le16_to_cpu(entry->usVoltage);
1114 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
1115 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
1117 for (i = 0; i < states->numEntries; i++) {
1118 if (i >= RADEON_MAX_VCE_LEVELS)
1120 vce_clk = (VCEClockInfo *)
1121 ((u8 *)&array->entries[0] +
1122 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
1123 rdev->pm.dpm.vce_states[i].evclk =
1124 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
1125 rdev->pm.dpm.vce_states[i].ecclk =
1126 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
1127 rdev->pm.dpm.vce_states[i].clk_idx =
1128 state_entry->ucClockInfoIndex & 0x3f;
1129 rdev->pm.dpm.vce_states[i].pstate =
1130 (state_entry->ucClockInfoIndex & 0xc0) >> 6;
1131 state_entry = (ATOM_PPLIB_VCE_State_Record *)
1132 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
1135 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
1136 ext_hdr->usUVDTableOffset) {
1137 UVDClockInfoArray *array = (UVDClockInfoArray *)
1138 (mode_info->atom_context->bios + data_offset +
1139 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
1140 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
1141 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
1142 (mode_info->atom_context->bios + data_offset +
1143 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
1144 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
1145 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
1146 u32 size = limits->numEntries *
1147 sizeof(struct radeon_uvd_clock_voltage_dependency_entry);
1148 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
1149 kzalloc(size, GFP_KERNEL);
1150 if (!rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
1151 r600_free_extended_power_table(rdev);
1154 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
1156 entry = &limits->entries[0];
1157 for (i = 0; i < limits->numEntries; i++) {
1158 UVDClockInfo *uvd_clk = (UVDClockInfo *)
1159 ((u8 *)&array->entries[0] +
1160 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
1161 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
1162 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
1163 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
1164 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
1165 rdev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
1166 le16_to_cpu(entry->usVoltage);
1167 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
1168 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
1171 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
1172 ext_hdr->usSAMUTableOffset) {
1173 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
1174 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
1175 (mode_info->atom_context->bios + data_offset +
1176 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
1177 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
1178 u32 size = limits->numEntries *
1179 sizeof(struct radeon_clock_voltage_dependency_entry);
1180 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
1181 kzalloc(size, GFP_KERNEL);
1182 if (!rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
1183 r600_free_extended_power_table(rdev);
1186 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
1188 entry = &limits->entries[0];
1189 for (i = 0; i < limits->numEntries; i++) {
1190 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
1191 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
1192 rdev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
1193 le16_to_cpu(entry->usVoltage);
1194 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
1195 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
1198 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
1199 ext_hdr->usPPMTableOffset) {
1200 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
1201 (mode_info->atom_context->bios + data_offset +
1202 le16_to_cpu(ext_hdr->usPPMTableOffset));
1203 rdev->pm.dpm.dyn_state.ppm_table =
1204 kzalloc(sizeof(struct radeon_ppm_table), GFP_KERNEL);
1205 if (!rdev->pm.dpm.dyn_state.ppm_table) {
1206 r600_free_extended_power_table(rdev);
1209 rdev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
1210 rdev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
1211 le16_to_cpu(ppm->usCpuCoreNumber);
1212 rdev->pm.dpm.dyn_state.ppm_table->platform_tdp =
1213 le32_to_cpu(ppm->ulPlatformTDP);
1214 rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
1215 le32_to_cpu(ppm->ulSmallACPlatformTDP);
1216 rdev->pm.dpm.dyn_state.ppm_table->platform_tdc =
1217 le32_to_cpu(ppm->ulPlatformTDC);
1218 rdev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
1219 le32_to_cpu(ppm->ulSmallACPlatformTDC);
1220 rdev->pm.dpm.dyn_state.ppm_table->apu_tdp =
1221 le32_to_cpu(ppm->ulApuTDP);
1222 rdev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
1223 le32_to_cpu(ppm->ulDGpuTDP);
1224 rdev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
1225 le32_to_cpu(ppm->ulDGpuUlvPower);
1226 rdev->pm.dpm.dyn_state.ppm_table->tj_max =
1227 le32_to_cpu(ppm->ulTjmax);
1229 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
1230 ext_hdr->usACPTableOffset) {
1231 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
1232 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
1233 (mode_info->atom_context->bios + data_offset +
1234 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
1235 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
1236 u32 size = limits->numEntries *
1237 sizeof(struct radeon_clock_voltage_dependency_entry);
1238 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
1239 kzalloc(size, GFP_KERNEL);
1240 if (!rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
1241 r600_free_extended_power_table(rdev);
1244 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
1246 entry = &limits->entries[0];
1247 for (i = 0; i < limits->numEntries; i++) {
1248 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
1249 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
1250 rdev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
1251 le16_to_cpu(entry->usVoltage);
1252 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
1253 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
1256 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
1257 ext_hdr->usPowerTuneTableOffset) {
1258 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
1259 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1260 ATOM_PowerTune_Table *pt;
1261 rdev->pm.dpm.dyn_state.cac_tdp_table =
1262 kzalloc(sizeof(struct radeon_cac_tdp_table), GFP_KERNEL);
1263 if (!rdev->pm.dpm.dyn_state.cac_tdp_table) {
1264 r600_free_extended_power_table(rdev);
1268 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
1269 (mode_info->atom_context->bios + data_offset +
1270 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1271 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
1272 le16_to_cpu(ppt->usMaximumPowerDeliveryLimit);
1273 pt = &ppt->power_tune_table;
1275 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
1276 (mode_info->atom_context->bios + data_offset +
1277 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1278 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
1279 pt = &ppt->power_tune_table;
1281 rdev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
1282 rdev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
1283 le16_to_cpu(pt->usConfigurableTDP);
1284 rdev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
1285 rdev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
1286 le16_to_cpu(pt->usBatteryPowerLimit);
1287 rdev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
1288 le16_to_cpu(pt->usSmallPowerLimit);
1289 rdev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
1290 le16_to_cpu(pt->usLowCACLeakage);
1291 rdev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
1292 le16_to_cpu(pt->usHighCACLeakage);
1299 void r600_free_extended_power_table(struct radeon_device *rdev)
1301 struct radeon_dpm_dynamic_state *dyn_state = &rdev->pm.dpm.dyn_state;
1303 kfree(dyn_state->vddc_dependency_on_sclk.entries);
1304 kfree(dyn_state->vddci_dependency_on_mclk.entries);
1305 kfree(dyn_state->vddc_dependency_on_mclk.entries);
1306 kfree(dyn_state->mvdd_dependency_on_mclk.entries);
1307 kfree(dyn_state->cac_leakage_table.entries);
1308 kfree(dyn_state->phase_shedding_limits_table.entries);
1309 kfree(dyn_state->ppm_table);
1310 kfree(dyn_state->cac_tdp_table);
1311 kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
1312 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
1313 kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
1314 kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
1317 enum radeon_pcie_gen r600_get_pcie_gen_support(struct radeon_device *rdev,
1319 enum radeon_pcie_gen asic_gen,
1320 enum radeon_pcie_gen default_gen)
1323 case RADEON_PCIE_GEN1:
1324 return RADEON_PCIE_GEN1;
1325 case RADEON_PCIE_GEN2:
1326 return RADEON_PCIE_GEN2;
1327 case RADEON_PCIE_GEN3:
1328 return RADEON_PCIE_GEN3;
1330 if ((sys_mask & RADEON_PCIE_SPEED_80) && (default_gen == RADEON_PCIE_GEN3))
1331 return RADEON_PCIE_GEN3;
1332 else if ((sys_mask & RADEON_PCIE_SPEED_50) && (default_gen == RADEON_PCIE_GEN2))
1333 return RADEON_PCIE_GEN2;
1335 return RADEON_PCIE_GEN1;
1337 return RADEON_PCIE_GEN1;
1340 u16 r600_get_pcie_lane_support(struct radeon_device *rdev,
1344 switch (asic_lanes) {
1347 return default_lanes;
1363 u8 r600_encode_pci_lane_width(u32 lanes)
1365 u8 encoded_lanes[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
1370 return encoded_lanes[lanes];