GNU Linux-libre 4.9-gnu1
[releases.git] / drivers / gpu / drm / amd / amdgpu / ci_dpm.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25 #include "drmP.h"
26 #include "amdgpu.h"
27 #include "amdgpu_pm.h"
28 #include "amdgpu_ucode.h"
29 #include "cikd.h"
30 #include "amdgpu_dpm.h"
31 #include "ci_dpm.h"
32 #include "gfx_v7_0.h"
33 #include "atom.h"
34 #include "amd_pcie.h"
35 #include <linux/seq_file.h>
36
37 #include "smu/smu_7_0_1_d.h"
38 #include "smu/smu_7_0_1_sh_mask.h"
39
40 #include "dce/dce_8_0_d.h"
41 #include "dce/dce_8_0_sh_mask.h"
42
43 #include "bif/bif_4_1_d.h"
44 #include "bif/bif_4_1_sh_mask.h"
45
46 #include "gca/gfx_7_2_d.h"
47 #include "gca/gfx_7_2_sh_mask.h"
48
49 #include "gmc/gmc_7_1_d.h"
50 #include "gmc/gmc_7_1_sh_mask.h"
51
52 /*(DEBLOBBED)*/
53
54 #define MC_CG_ARB_FREQ_F0           0x0a
55 #define MC_CG_ARB_FREQ_F1           0x0b
56 #define MC_CG_ARB_FREQ_F2           0x0c
57 #define MC_CG_ARB_FREQ_F3           0x0d
58
59 #define SMC_RAM_END 0x40000
60
61 #define VOLTAGE_SCALE               4
62 #define VOLTAGE_VID_OFFSET_SCALE1    625
63 #define VOLTAGE_VID_OFFSET_SCALE2    100
64
65 static const struct ci_pt_defaults defaults_hawaii_xt =
66 {
67         1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
68         { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
69         { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
70 };
71
72 static const struct ci_pt_defaults defaults_hawaii_pro =
73 {
74         1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
75         { 0x2E,  0x00,  0x00,  0x88,  0x00,  0x00,  0x72,  0x60,  0x51,  0xA7,  0x79,  0x6B,  0x90,  0xBD,  0x79  },
76         { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
77 };
78
79 static const struct ci_pt_defaults defaults_bonaire_xt =
80 {
81         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
82         { 0x79,  0x253, 0x25D, 0xAE,  0x72,  0x80,  0x83,  0x86,  0x6F,  0xC8,  0xC9,  0xC9,  0x2F,  0x4D,  0x61  },
83         { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
84 };
85
86 #if 0
87 static const struct ci_pt_defaults defaults_bonaire_pro =
88 {
89         1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
90         { 0x8C,  0x23F, 0x244, 0xA6,  0x83,  0x85,  0x86,  0x86,  0x83,  0xDB,  0xDB,  0xDA,  0x67,  0x60,  0x5F  },
91         { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
92 };
93 #endif
94
95 static const struct ci_pt_defaults defaults_saturn_xt =
96 {
97         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
98         { 0x8C,  0x247, 0x249, 0xA6,  0x80,  0x81,  0x8B,  0x89,  0x86,  0xC9,  0xCA,  0xC9,  0x4D,  0x4D,  0x4D  },
99         { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
100 };
101
102 #if 0
103 static const struct ci_pt_defaults defaults_saturn_pro =
104 {
105         1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
106         { 0x96,  0x21D, 0x23B, 0xA1,  0x85,  0x87,  0x83,  0x84,  0x81,  0xE6,  0xE6,  0xE6,  0x71,  0x6A,  0x6A  },
107         { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
108 };
109 #endif
110
111 static const struct ci_pt_config_reg didt_config_ci[] =
112 {
113         { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
114         { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
115         { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
116         { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
117         { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
118         { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
119         { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
120         { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
121         { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
122         { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
123         { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
124         { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
125         { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
126         { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
127         { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
128         { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
129         { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
130         { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
131         { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
132         { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
133         { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
134         { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
135         { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
136         { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
137         { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
138         { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
139         { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
140         { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
141         { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
142         { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
143         { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
144         { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
145         { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
146         { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
147         { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
148         { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
149         { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
150         { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
151         { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
152         { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
153         { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
154         { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
155         { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
156         { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
157         { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
158         { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
159         { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
160         { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
161         { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
162         { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
163         { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
164         { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
165         { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
166         { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
167         { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
168         { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
169         { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
170         { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
171         { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
172         { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
173         { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
174         { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
175         { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
176         { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
177         { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
178         { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
179         { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND },
180         { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND },
181         { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND },
182         { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
183         { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND },
184         { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND },
185         { 0xFFFFFFFF }
186 };
187
188 static u8 ci_get_memory_module_index(struct amdgpu_device *adev)
189 {
190         return (u8) ((RREG32(mmBIOS_SCRATCH_4) >> 16) & 0xff);
191 }
192
193 #define MC_CG_ARB_FREQ_F0           0x0a
194 #define MC_CG_ARB_FREQ_F1           0x0b
195 #define MC_CG_ARB_FREQ_F2           0x0c
196 #define MC_CG_ARB_FREQ_F3           0x0d
197
198 static int ci_copy_and_switch_arb_sets(struct amdgpu_device *adev,
199                                        u32 arb_freq_src, u32 arb_freq_dest)
200 {
201         u32 mc_arb_dram_timing;
202         u32 mc_arb_dram_timing2;
203         u32 burst_time;
204         u32 mc_cg_config;
205
206         switch (arb_freq_src) {
207         case MC_CG_ARB_FREQ_F0:
208                 mc_arb_dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING);
209                 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
210                 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK) >>
211                          MC_ARB_BURST_TIME__STATE0__SHIFT;
212                 break;
213         case MC_CG_ARB_FREQ_F1:
214                 mc_arb_dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING_1);
215                 mc_arb_dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2_1);
216                 burst_time = (RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE1_MASK) >>
217                          MC_ARB_BURST_TIME__STATE1__SHIFT;
218                 break;
219         default:
220                 return -EINVAL;
221         }
222
223         switch (arb_freq_dest) {
224         case MC_CG_ARB_FREQ_F0:
225                 WREG32(mmMC_ARB_DRAM_TIMING, mc_arb_dram_timing);
226                 WREG32(mmMC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
227                 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE0__SHIFT),
228                         ~MC_ARB_BURST_TIME__STATE0_MASK);
229                 break;
230         case MC_CG_ARB_FREQ_F1:
231                 WREG32(mmMC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
232                 WREG32(mmMC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
233                 WREG32_P(mmMC_ARB_BURST_TIME, (burst_time << MC_ARB_BURST_TIME__STATE1__SHIFT),
234                         ~MC_ARB_BURST_TIME__STATE1_MASK);
235                 break;
236         default:
237                 return -EINVAL;
238         }
239
240         mc_cg_config = RREG32(mmMC_CG_CONFIG) | 0x0000000F;
241         WREG32(mmMC_CG_CONFIG, mc_cg_config);
242         WREG32_P(mmMC_ARB_CG, (arb_freq_dest) << MC_ARB_CG__CG_ARB_REQ__SHIFT,
243                 ~MC_ARB_CG__CG_ARB_REQ_MASK);
244
245         return 0;
246 }
247
248 static u8 ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
249 {
250         u8 mc_para_index;
251
252         if (memory_clock < 10000)
253                 mc_para_index = 0;
254         else if (memory_clock >= 80000)
255                 mc_para_index = 0x0f;
256         else
257                 mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
258         return mc_para_index;
259 }
260
261 static u8 ci_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
262 {
263         u8 mc_para_index;
264
265         if (strobe_mode) {
266                 if (memory_clock < 12500)
267                         mc_para_index = 0x00;
268                 else if (memory_clock > 47500)
269                         mc_para_index = 0x0f;
270                 else
271                         mc_para_index = (u8)((memory_clock - 10000) / 2500);
272         } else {
273                 if (memory_clock < 65000)
274                         mc_para_index = 0x00;
275                 else if (memory_clock > 135000)
276                         mc_para_index = 0x0f;
277                 else
278                         mc_para_index = (u8)((memory_clock - 60000) / 5000);
279         }
280         return mc_para_index;
281 }
282
283 static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
284                                                      u32 max_voltage_steps,
285                                                      struct atom_voltage_table *voltage_table)
286 {
287         unsigned int i, diff;
288
289         if (voltage_table->count <= max_voltage_steps)
290                 return;
291
292         diff = voltage_table->count - max_voltage_steps;
293
294         for (i = 0; i < max_voltage_steps; i++)
295                 voltage_table->entries[i] = voltage_table->entries[i + diff];
296
297         voltage_table->count = max_voltage_steps;
298 }
299
300 static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
301                                          struct atom_voltage_table_entry *voltage_table,
302                                          u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd);
303 static int ci_set_power_limit(struct amdgpu_device *adev, u32 n);
304 static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
305                                        u32 target_tdp);
306 static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate);
307 static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev);
308 static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev);
309
310 static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
311                                                              PPSMC_Msg msg, u32 parameter);
312 static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev);
313 static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
314
315 static struct ci_power_info *ci_get_pi(struct amdgpu_device *adev)
316 {
317         struct ci_power_info *pi = adev->pm.dpm.priv;
318
319         return pi;
320 }
321
322 static struct ci_ps *ci_get_ps(struct amdgpu_ps *rps)
323 {
324         struct ci_ps *ps = rps->ps_priv;
325
326         return ps;
327 }
328
329 static void ci_initialize_powertune_defaults(struct amdgpu_device *adev)
330 {
331         struct ci_power_info *pi = ci_get_pi(adev);
332
333         switch (adev->pdev->device) {
334         case 0x6649:
335         case 0x6650:
336         case 0x6651:
337         case 0x6658:
338         case 0x665C:
339         case 0x665D:
340         default:
341                 pi->powertune_defaults = &defaults_bonaire_xt;
342                 break;
343         case 0x6640:
344         case 0x6641:
345         case 0x6646:
346         case 0x6647:
347                 pi->powertune_defaults = &defaults_saturn_xt;
348                 break;
349         case 0x67B8:
350         case 0x67B0:
351                 pi->powertune_defaults = &defaults_hawaii_xt;
352                 break;
353         case 0x67BA:
354         case 0x67B1:
355                 pi->powertune_defaults = &defaults_hawaii_pro;
356                 break;
357         case 0x67A0:
358         case 0x67A1:
359         case 0x67A2:
360         case 0x67A8:
361         case 0x67A9:
362         case 0x67AA:
363         case 0x67B9:
364         case 0x67BE:
365                 pi->powertune_defaults = &defaults_bonaire_xt;
366                 break;
367         }
368
369         pi->dte_tj_offset = 0;
370
371         pi->caps_power_containment = true;
372         pi->caps_cac = false;
373         pi->caps_sq_ramping = false;
374         pi->caps_db_ramping = false;
375         pi->caps_td_ramping = false;
376         pi->caps_tcp_ramping = false;
377
378         if (pi->caps_power_containment) {
379                 pi->caps_cac = true;
380                 if (adev->asic_type == CHIP_HAWAII)
381                         pi->enable_bapm_feature = false;
382                 else
383                         pi->enable_bapm_feature = true;
384                 pi->enable_tdc_limit_feature = true;
385                 pi->enable_pkg_pwr_tracking_feature = true;
386         }
387 }
388
389 static u8 ci_convert_to_vid(u16 vddc)
390 {
391         return (6200 - (vddc * VOLTAGE_SCALE)) / 25;
392 }
393
394 static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device *adev)
395 {
396         struct ci_power_info *pi = ci_get_pi(adev);
397         u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
398         u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
399         u8 *hi2_vid = pi->smc_powertune_table.BapmVddCVidHiSidd2;
400         u32 i;
401
402         if (adev->pm.dpm.dyn_state.cac_leakage_table.entries == NULL)
403                 return -EINVAL;
404         if (adev->pm.dpm.dyn_state.cac_leakage_table.count > 8)
405                 return -EINVAL;
406         if (adev->pm.dpm.dyn_state.cac_leakage_table.count !=
407             adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count)
408                 return -EINVAL;
409
410         for (i = 0; i < adev->pm.dpm.dyn_state.cac_leakage_table.count; i++) {
411                 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
412                         lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1);
413                         hi_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2);
414                         hi2_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3);
415                 } else {
416                         lo_vid[i] = ci_convert_to_vid(adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc);
417                         hi_vid[i] = ci_convert_to_vid((u16)adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage);
418                 }
419         }
420         return 0;
421 }
422
423 static int ci_populate_vddc_vid(struct amdgpu_device *adev)
424 {
425         struct ci_power_info *pi = ci_get_pi(adev);
426         u8 *vid = pi->smc_powertune_table.VddCVid;
427         u32 i;
428
429         if (pi->vddc_voltage_table.count > 8)
430                 return -EINVAL;
431
432         for (i = 0; i < pi->vddc_voltage_table.count; i++)
433                 vid[i] = ci_convert_to_vid(pi->vddc_voltage_table.entries[i].value);
434
435         return 0;
436 }
437
438 static int ci_populate_svi_load_line(struct amdgpu_device *adev)
439 {
440         struct ci_power_info *pi = ci_get_pi(adev);
441         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
442
443         pi->smc_powertune_table.SviLoadLineEn = pt_defaults->svi_load_line_en;
444         pi->smc_powertune_table.SviLoadLineVddC = pt_defaults->svi_load_line_vddc;
445         pi->smc_powertune_table.SviLoadLineTrimVddC = 3;
446         pi->smc_powertune_table.SviLoadLineOffsetVddC = 0;
447
448         return 0;
449 }
450
451 static int ci_populate_tdc_limit(struct amdgpu_device *adev)
452 {
453         struct ci_power_info *pi = ci_get_pi(adev);
454         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
455         u16 tdc_limit;
456
457         tdc_limit = adev->pm.dpm.dyn_state.cac_tdp_table->tdc * 256;
458         pi->smc_powertune_table.TDC_VDDC_PkgLimit = cpu_to_be16(tdc_limit);
459         pi->smc_powertune_table.TDC_VDDC_ThrottleReleaseLimitPerc =
460                 pt_defaults->tdc_vddc_throttle_release_limit_perc;
461         pi->smc_powertune_table.TDC_MAWt = pt_defaults->tdc_mawt;
462
463         return 0;
464 }
465
466 static int ci_populate_dw8(struct amdgpu_device *adev)
467 {
468         struct ci_power_info *pi = ci_get_pi(adev);
469         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
470         int ret;
471
472         ret = amdgpu_ci_read_smc_sram_dword(adev,
473                                      SMU7_FIRMWARE_HEADER_LOCATION +
474                                      offsetof(SMU7_Firmware_Header, PmFuseTable) +
475                                      offsetof(SMU7_Discrete_PmFuses, TdcWaterfallCtl),
476                                      (u32 *)&pi->smc_powertune_table.TdcWaterfallCtl,
477                                      pi->sram_end);
478         if (ret)
479                 return -EINVAL;
480         else
481                 pi->smc_powertune_table.TdcWaterfallCtl = pt_defaults->tdc_waterfall_ctl;
482
483         return 0;
484 }
485
486 static int ci_populate_fuzzy_fan(struct amdgpu_device *adev)
487 {
488         struct ci_power_info *pi = ci_get_pi(adev);
489
490         if ((adev->pm.dpm.fan.fan_output_sensitivity & (1 << 15)) ||
491             (adev->pm.dpm.fan.fan_output_sensitivity == 0))
492                 adev->pm.dpm.fan.fan_output_sensitivity =
493                         adev->pm.dpm.fan.default_fan_output_sensitivity;
494
495         pi->smc_powertune_table.FuzzyFan_PwmSetDelta =
496                 cpu_to_be16(adev->pm.dpm.fan.fan_output_sensitivity);
497
498         return 0;
499 }
500
501 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device *adev)
502 {
503         struct ci_power_info *pi = ci_get_pi(adev);
504         u8 *hi_vid = pi->smc_powertune_table.BapmVddCVidHiSidd;
505         u8 *lo_vid = pi->smc_powertune_table.BapmVddCVidLoSidd;
506         int i, min, max;
507
508         min = max = hi_vid[0];
509         for (i = 0; i < 8; i++) {
510                 if (0 != hi_vid[i]) {
511                         if (min > hi_vid[i])
512                                 min = hi_vid[i];
513                         if (max < hi_vid[i])
514                                 max = hi_vid[i];
515                 }
516
517                 if (0 != lo_vid[i]) {
518                         if (min > lo_vid[i])
519                                 min = lo_vid[i];
520                         if (max < lo_vid[i])
521                                 max = lo_vid[i];
522                 }
523         }
524
525         if ((min == 0) || (max == 0))
526                 return -EINVAL;
527         pi->smc_powertune_table.GnbLPMLMaxVid = (u8)max;
528         pi->smc_powertune_table.GnbLPMLMinVid = (u8)min;
529
530         return 0;
531 }
532
533 static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device *adev)
534 {
535         struct ci_power_info *pi = ci_get_pi(adev);
536         u16 hi_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd;
537         u16 lo_sidd = pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd;
538         struct amdgpu_cac_tdp_table *cac_tdp_table =
539                 adev->pm.dpm.dyn_state.cac_tdp_table;
540
541         hi_sidd = cac_tdp_table->high_cac_leakage / 100 * 256;
542         lo_sidd = cac_tdp_table->low_cac_leakage / 100 * 256;
543
544         pi->smc_powertune_table.BapmVddCBaseLeakageHiSidd = cpu_to_be16(hi_sidd);
545         pi->smc_powertune_table.BapmVddCBaseLeakageLoSidd = cpu_to_be16(lo_sidd);
546
547         return 0;
548 }
549
550 static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device *adev)
551 {
552         struct ci_power_info *pi = ci_get_pi(adev);
553         const struct ci_pt_defaults *pt_defaults = pi->powertune_defaults;
554         SMU7_Discrete_DpmTable  *dpm_table = &pi->smc_state_table;
555         struct amdgpu_cac_tdp_table *cac_tdp_table =
556                 adev->pm.dpm.dyn_state.cac_tdp_table;
557         struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
558         int i, j, k;
559         const u16 *def1;
560         const u16 *def2;
561
562         dpm_table->DefaultTdp = cac_tdp_table->tdp * 256;
563         dpm_table->TargetTdp = cac_tdp_table->configurable_tdp * 256;
564
565         dpm_table->DTETjOffset = (u8)pi->dte_tj_offset;
566         dpm_table->GpuTjMax =
567                 (u8)(pi->thermal_temp_setting.temperature_high / 1000);
568         dpm_table->GpuTjHyst = 8;
569
570         dpm_table->DTEAmbientTempBase = pt_defaults->dte_ambient_temp_base;
571
572         if (ppm) {
573                 dpm_table->PPM_PkgPwrLimit = cpu_to_be16((u16)ppm->dgpu_tdp * 256 / 1000);
574                 dpm_table->PPM_TemperatureLimit = cpu_to_be16((u16)ppm->tj_max * 256);
575         } else {
576                 dpm_table->PPM_PkgPwrLimit = cpu_to_be16(0);
577                 dpm_table->PPM_TemperatureLimit = cpu_to_be16(0);
578         }
579
580         dpm_table->BAPM_TEMP_GRADIENT = cpu_to_be32(pt_defaults->bapm_temp_gradient);
581         def1 = pt_defaults->bapmti_r;
582         def2 = pt_defaults->bapmti_rc;
583
584         for (i = 0; i < SMU7_DTE_ITERATIONS; i++) {
585                 for (j = 0; j < SMU7_DTE_SOURCES; j++) {
586                         for (k = 0; k < SMU7_DTE_SINKS; k++) {
587                                 dpm_table->BAPMTI_R[i][j][k] = cpu_to_be16(*def1);
588                                 dpm_table->BAPMTI_RC[i][j][k] = cpu_to_be16(*def2);
589                                 def1++;
590                                 def2++;
591                         }
592                 }
593         }
594
595         return 0;
596 }
597
598 static int ci_populate_pm_base(struct amdgpu_device *adev)
599 {
600         struct ci_power_info *pi = ci_get_pi(adev);
601         u32 pm_fuse_table_offset;
602         int ret;
603
604         if (pi->caps_power_containment) {
605                 ret = amdgpu_ci_read_smc_sram_dword(adev,
606                                              SMU7_FIRMWARE_HEADER_LOCATION +
607                                              offsetof(SMU7_Firmware_Header, PmFuseTable),
608                                              &pm_fuse_table_offset, pi->sram_end);
609                 if (ret)
610                         return ret;
611                 ret = ci_populate_bapm_vddc_vid_sidd(adev);
612                 if (ret)
613                         return ret;
614                 ret = ci_populate_vddc_vid(adev);
615                 if (ret)
616                         return ret;
617                 ret = ci_populate_svi_load_line(adev);
618                 if (ret)
619                         return ret;
620                 ret = ci_populate_tdc_limit(adev);
621                 if (ret)
622                         return ret;
623                 ret = ci_populate_dw8(adev);
624                 if (ret)
625                         return ret;
626                 ret = ci_populate_fuzzy_fan(adev);
627                 if (ret)
628                         return ret;
629                 ret = ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev);
630                 if (ret)
631                         return ret;
632                 ret = ci_populate_bapm_vddc_base_leakage_sidd(adev);
633                 if (ret)
634                         return ret;
635                 ret = amdgpu_ci_copy_bytes_to_smc(adev, pm_fuse_table_offset,
636                                            (u8 *)&pi->smc_powertune_table,
637                                            sizeof(SMU7_Discrete_PmFuses), pi->sram_end);
638                 if (ret)
639                         return ret;
640         }
641
642         return 0;
643 }
644
645 static void ci_do_enable_didt(struct amdgpu_device *adev, const bool enable)
646 {
647         struct ci_power_info *pi = ci_get_pi(adev);
648         u32 data;
649
650         if (pi->caps_sq_ramping) {
651                 data = RREG32_DIDT(ixDIDT_SQ_CTRL0);
652                 if (enable)
653                         data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
654                 else
655                         data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK;
656                 WREG32_DIDT(ixDIDT_SQ_CTRL0, data);
657         }
658
659         if (pi->caps_db_ramping) {
660                 data = RREG32_DIDT(ixDIDT_DB_CTRL0);
661                 if (enable)
662                         data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
663                 else
664                         data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK;
665                 WREG32_DIDT(ixDIDT_DB_CTRL0, data);
666         }
667
668         if (pi->caps_td_ramping) {
669                 data = RREG32_DIDT(ixDIDT_TD_CTRL0);
670                 if (enable)
671                         data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
672                 else
673                         data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK;
674                 WREG32_DIDT(ixDIDT_TD_CTRL0, data);
675         }
676
677         if (pi->caps_tcp_ramping) {
678                 data = RREG32_DIDT(ixDIDT_TCP_CTRL0);
679                 if (enable)
680                         data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
681                 else
682                         data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK;
683                 WREG32_DIDT(ixDIDT_TCP_CTRL0, data);
684         }
685 }
686
687 static int ci_program_pt_config_registers(struct amdgpu_device *adev,
688                                           const struct ci_pt_config_reg *cac_config_regs)
689 {
690         const struct ci_pt_config_reg *config_regs = cac_config_regs;
691         u32 data;
692         u32 cache = 0;
693
694         if (config_regs == NULL)
695                 return -EINVAL;
696
697         while (config_regs->offset != 0xFFFFFFFF) {
698                 if (config_regs->type == CISLANDS_CONFIGREG_CACHE) {
699                         cache |= ((config_regs->value << config_regs->shift) & config_regs->mask);
700                 } else {
701                         switch (config_regs->type) {
702                         case CISLANDS_CONFIGREG_SMC_IND:
703                                 data = RREG32_SMC(config_regs->offset);
704                                 break;
705                         case CISLANDS_CONFIGREG_DIDT_IND:
706                                 data = RREG32_DIDT(config_regs->offset);
707                                 break;
708                         default:
709                                 data = RREG32(config_regs->offset);
710                                 break;
711                         }
712
713                         data &= ~config_regs->mask;
714                         data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
715                         data |= cache;
716
717                         switch (config_regs->type) {
718                         case CISLANDS_CONFIGREG_SMC_IND:
719                                 WREG32_SMC(config_regs->offset, data);
720                                 break;
721                         case CISLANDS_CONFIGREG_DIDT_IND:
722                                 WREG32_DIDT(config_regs->offset, data);
723                                 break;
724                         default:
725                                 WREG32(config_regs->offset, data);
726                                 break;
727                         }
728                         cache = 0;
729                 }
730                 config_regs++;
731         }
732         return 0;
733 }
734
735 static int ci_enable_didt(struct amdgpu_device *adev, bool enable)
736 {
737         struct ci_power_info *pi = ci_get_pi(adev);
738         int ret;
739
740         if (pi->caps_sq_ramping || pi->caps_db_ramping ||
741             pi->caps_td_ramping || pi->caps_tcp_ramping) {
742                 adev->gfx.rlc.funcs->enter_safe_mode(adev);
743
744                 if (enable) {
745                         ret = ci_program_pt_config_registers(adev, didt_config_ci);
746                         if (ret) {
747                                 adev->gfx.rlc.funcs->exit_safe_mode(adev);
748                                 return ret;
749                         }
750                 }
751
752                 ci_do_enable_didt(adev, enable);
753
754                 adev->gfx.rlc.funcs->exit_safe_mode(adev);
755         }
756
757         return 0;
758 }
759
760 static int ci_enable_power_containment(struct amdgpu_device *adev, bool enable)
761 {
762         struct ci_power_info *pi = ci_get_pi(adev);
763         PPSMC_Result smc_result;
764         int ret = 0;
765
766         if (enable) {
767                 pi->power_containment_features = 0;
768                 if (pi->caps_power_containment) {
769                         if (pi->enable_bapm_feature) {
770                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
771                                 if (smc_result != PPSMC_Result_OK)
772                                         ret = -EINVAL;
773                                 else
774                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_BAPM;
775                         }
776
777                         if (pi->enable_tdc_limit_feature) {
778                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitEnable);
779                                 if (smc_result != PPSMC_Result_OK)
780                                         ret = -EINVAL;
781                                 else
782                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_TDCLimit;
783                         }
784
785                         if (pi->enable_pkg_pwr_tracking_feature) {
786                                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitEnable);
787                                 if (smc_result != PPSMC_Result_OK) {
788                                         ret = -EINVAL;
789                                 } else {
790                                         struct amdgpu_cac_tdp_table *cac_tdp_table =
791                                                 adev->pm.dpm.dyn_state.cac_tdp_table;
792                                         u32 default_pwr_limit =
793                                                 (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
794
795                                         pi->power_containment_features |= POWERCONTAINMENT_FEATURE_PkgPwrLimit;
796
797                                         ci_set_power_limit(adev, default_pwr_limit);
798                                 }
799                         }
800                 }
801         } else {
802                 if (pi->caps_power_containment && pi->power_containment_features) {
803                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_TDCLimit)
804                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_TDCLimitDisable);
805
806                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_BAPM)
807                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
808
809                         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit)
810                                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PkgPwrLimitDisable);
811                         pi->power_containment_features = 0;
812                 }
813         }
814
815         return ret;
816 }
817
818 static int ci_enable_smc_cac(struct amdgpu_device *adev, bool enable)
819 {
820         struct ci_power_info *pi = ci_get_pi(adev);
821         PPSMC_Result smc_result;
822         int ret = 0;
823
824         if (pi->caps_cac) {
825                 if (enable) {
826                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
827                         if (smc_result != PPSMC_Result_OK) {
828                                 ret = -EINVAL;
829                                 pi->cac_enabled = false;
830                         } else {
831                                 pi->cac_enabled = true;
832                         }
833                 } else if (pi->cac_enabled) {
834                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
835                         pi->cac_enabled = false;
836                 }
837         }
838
839         return ret;
840 }
841
842 static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device *adev,
843                                             bool enable)
844 {
845         struct ci_power_info *pi = ci_get_pi(adev);
846         PPSMC_Result smc_result = PPSMC_Result_OK;
847
848         if (pi->thermal_sclk_dpm_enabled) {
849                 if (enable)
850                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ENABLE_THERMAL_DPM);
851                 else
852                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DISABLE_THERMAL_DPM);
853         }
854
855         if (smc_result == PPSMC_Result_OK)
856                 return 0;
857         else
858                 return -EINVAL;
859 }
860
861 static int ci_power_control_set_level(struct amdgpu_device *adev)
862 {
863         struct ci_power_info *pi = ci_get_pi(adev);
864         struct amdgpu_cac_tdp_table *cac_tdp_table =
865                 adev->pm.dpm.dyn_state.cac_tdp_table;
866         s32 adjust_percent;
867         s32 target_tdp;
868         int ret = 0;
869         bool adjust_polarity = false; /* ??? */
870
871         if (pi->caps_power_containment) {
872                 adjust_percent = adjust_polarity ?
873                         adev->pm.dpm.tdp_adjustment : (-1 * adev->pm.dpm.tdp_adjustment);
874                 target_tdp = ((100 + adjust_percent) *
875                               (s32)cac_tdp_table->configurable_tdp) / 100;
876
877                 ret = ci_set_overdrive_target_tdp(adev, (u32)target_tdp);
878         }
879
880         return ret;
881 }
882
883 static void ci_dpm_powergate_uvd(struct amdgpu_device *adev, bool gate)
884 {
885         struct ci_power_info *pi = ci_get_pi(adev);
886
887         if (pi->uvd_power_gated == gate)
888                 return;
889
890         pi->uvd_power_gated = gate;
891
892         ci_update_uvd_dpm(adev, gate);
893 }
894
895 static bool ci_dpm_vblank_too_short(struct amdgpu_device *adev)
896 {
897         u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
898         u32 switch_limit = adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 300;
899
900         if (vblank_time < switch_limit)
901                 return true;
902         else
903                 return false;
904
905 }
906
907 static void ci_apply_state_adjust_rules(struct amdgpu_device *adev,
908                                         struct amdgpu_ps *rps)
909 {
910         struct ci_ps *ps = ci_get_ps(rps);
911         struct ci_power_info *pi = ci_get_pi(adev);
912         struct amdgpu_clock_and_voltage_limits *max_limits;
913         bool disable_mclk_switching;
914         u32 sclk, mclk;
915         int i;
916
917         if (rps->vce_active) {
918                 rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
919                 rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
920         } else {
921                 rps->evclk = 0;
922                 rps->ecclk = 0;
923         }
924
925         if ((adev->pm.dpm.new_active_crtc_count > 1) ||
926             ci_dpm_vblank_too_short(adev))
927                 disable_mclk_switching = true;
928         else
929                 disable_mclk_switching = false;
930
931         if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY)
932                 pi->battery_state = true;
933         else
934                 pi->battery_state = false;
935
936         if (adev->pm.dpm.ac_power)
937                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
938         else
939                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
940
941         if (adev->pm.dpm.ac_power == false) {
942                 for (i = 0; i < ps->performance_level_count; i++) {
943                         if (ps->performance_levels[i].mclk > max_limits->mclk)
944                                 ps->performance_levels[i].mclk = max_limits->mclk;
945                         if (ps->performance_levels[i].sclk > max_limits->sclk)
946                                 ps->performance_levels[i].sclk = max_limits->sclk;
947                 }
948         }
949
950         /* XXX validate the min clocks required for display */
951
952         if (disable_mclk_switching) {
953                 mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
954                 sclk = ps->performance_levels[0].sclk;
955         } else {
956                 mclk = ps->performance_levels[0].mclk;
957                 sclk = ps->performance_levels[0].sclk;
958         }
959
960         if (rps->vce_active) {
961                 if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
962                         sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
963                 if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
964                         mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
965         }
966
967         ps->performance_levels[0].sclk = sclk;
968         ps->performance_levels[0].mclk = mclk;
969
970         if (ps->performance_levels[1].sclk < ps->performance_levels[0].sclk)
971                 ps->performance_levels[1].sclk = ps->performance_levels[0].sclk;
972
973         if (disable_mclk_switching) {
974                 if (ps->performance_levels[0].mclk < ps->performance_levels[1].mclk)
975                         ps->performance_levels[0].mclk = ps->performance_levels[1].mclk;
976         } else {
977                 if (ps->performance_levels[1].mclk < ps->performance_levels[0].mclk)
978                         ps->performance_levels[1].mclk = ps->performance_levels[0].mclk;
979         }
980 }
981
982 static int ci_thermal_set_temperature_range(struct amdgpu_device *adev,
983                                             int min_temp, int max_temp)
984 {
985         int low_temp = 0 * 1000;
986         int high_temp = 255 * 1000;
987         u32 tmp;
988
989         if (low_temp < min_temp)
990                 low_temp = min_temp;
991         if (high_temp > max_temp)
992                 high_temp = max_temp;
993         if (high_temp < low_temp) {
994                 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
995                 return -EINVAL;
996         }
997
998         tmp = RREG32_SMC(ixCG_THERMAL_INT);
999         tmp &= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK | CG_THERMAL_INT__DIG_THERM_INTL_MASK);
1000         tmp |= ((high_temp / 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT) |
1001                 ((low_temp / 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT;
1002         WREG32_SMC(ixCG_THERMAL_INT, tmp);
1003
1004 #if 0
1005         /* XXX: need to figure out how to handle this properly */
1006         tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1007         tmp &= DIG_THERM_DPM_MASK;
1008         tmp |= DIG_THERM_DPM(high_temp / 1000);
1009         WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1010 #endif
1011
1012         adev->pm.dpm.thermal.min_temp = low_temp;
1013         adev->pm.dpm.thermal.max_temp = high_temp;
1014         return 0;
1015 }
1016
1017 static int ci_thermal_enable_alert(struct amdgpu_device *adev,
1018                                    bool enable)
1019 {
1020         u32 thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
1021         PPSMC_Result result;
1022
1023         if (enable) {
1024                 thermal_int &= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1025                                  CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK);
1026                 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1027                 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Enable);
1028                 if (result != PPSMC_Result_OK) {
1029                         DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1030                         return -EINVAL;
1031                 }
1032         } else {
1033                 thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK |
1034                         CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
1035                 WREG32_SMC(ixCG_THERMAL_INT, thermal_int);
1036                 result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Thermal_Cntl_Disable);
1037                 if (result != PPSMC_Result_OK) {
1038                         DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
1039                         return -EINVAL;
1040                 }
1041         }
1042
1043         return 0;
1044 }
1045
1046 static void ci_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
1047 {
1048         struct ci_power_info *pi = ci_get_pi(adev);
1049         u32 tmp;
1050
1051         if (pi->fan_ctrl_is_in_default_mode) {
1052                 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK)
1053                         >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1054                 pi->fan_ctrl_default_mode = tmp;
1055                 tmp = (RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__TMIN_MASK)
1056                         >> CG_FDO_CTRL2__TMIN__SHIFT;
1057                 pi->t_min = tmp;
1058                 pi->fan_ctrl_is_in_default_mode = false;
1059         }
1060
1061         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1062         tmp |= 0 << CG_FDO_CTRL2__TMIN__SHIFT;
1063         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1064
1065         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1066         tmp |= mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1067         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1068 }
1069
1070 static int ci_thermal_setup_fan_table(struct amdgpu_device *adev)
1071 {
1072         struct ci_power_info *pi = ci_get_pi(adev);
1073         SMU7_Discrete_FanTable fan_table = { FDO_MODE_HARDWARE };
1074         u32 duty100;
1075         u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
1076         u16 fdo_min, slope1, slope2;
1077         u32 reference_clock, tmp;
1078         int ret;
1079         u64 tmp64;
1080
1081         if (!pi->fan_table_start) {
1082                 adev->pm.dpm.fan.ucode_fan_control = false;
1083                 return 0;
1084         }
1085
1086         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1087                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1088
1089         if (duty100 == 0) {
1090                 adev->pm.dpm.fan.ucode_fan_control = false;
1091                 return 0;
1092         }
1093
1094         tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
1095         do_div(tmp64, 10000);
1096         fdo_min = (u16)tmp64;
1097
1098         t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
1099         t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
1100
1101         pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
1102         pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
1103
1104         slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
1105         slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
1106
1107         fan_table.TempMin = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
1108         fan_table.TempMed = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
1109         fan_table.TempMax = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
1110
1111         fan_table.Slope1 = cpu_to_be16(slope1);
1112         fan_table.Slope2 = cpu_to_be16(slope2);
1113
1114         fan_table.FdoMin = cpu_to_be16(fdo_min);
1115
1116         fan_table.HystDown = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
1117
1118         fan_table.HystUp = cpu_to_be16(1);
1119
1120         fan_table.HystSlope = cpu_to_be16(1);
1121
1122         fan_table.TempRespLim = cpu_to_be16(5);
1123
1124         reference_clock = amdgpu_asic_get_xclk(adev);
1125
1126         fan_table.RefreshPeriod = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
1127                                                reference_clock) / 1600);
1128
1129         fan_table.FdoMax = cpu_to_be16((u16)duty100);
1130
1131         tmp = (RREG32_SMC(ixCG_MULT_THERMAL_CTRL) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK)
1132                 >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT;
1133         fan_table.TempSrc = (uint8_t)tmp;
1134
1135         ret = amdgpu_ci_copy_bytes_to_smc(adev,
1136                                           pi->fan_table_start,
1137                                           (u8 *)(&fan_table),
1138                                           sizeof(fan_table),
1139                                           pi->sram_end);
1140
1141         if (ret) {
1142                 DRM_ERROR("Failed to load fan table to the SMC.");
1143                 adev->pm.dpm.fan.ucode_fan_control = false;
1144         }
1145
1146         return 0;
1147 }
1148
1149 static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
1150 {
1151         struct ci_power_info *pi = ci_get_pi(adev);
1152         PPSMC_Result ret;
1153
1154         if (pi->caps_od_fuzzy_fan_control_support) {
1155                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1156                                                                PPSMC_StartFanControl,
1157                                                                FAN_CONTROL_FUZZY);
1158                 if (ret != PPSMC_Result_OK)
1159                         return -EINVAL;
1160                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1161                                                                PPSMC_MSG_SetFanPwmMax,
1162                                                                adev->pm.dpm.fan.default_max_fan_pwm);
1163                 if (ret != PPSMC_Result_OK)
1164                         return -EINVAL;
1165         } else {
1166                 ret = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
1167                                                                PPSMC_StartFanControl,
1168                                                                FAN_CONTROL_TABLE);
1169                 if (ret != PPSMC_Result_OK)
1170                         return -EINVAL;
1171         }
1172
1173         pi->fan_is_controlled_by_smc = true;
1174         return 0;
1175 }
1176
1177
1178 static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
1179 {
1180         PPSMC_Result ret;
1181         struct ci_power_info *pi = ci_get_pi(adev);
1182
1183         ret = amdgpu_ci_send_msg_to_smc(adev, PPSMC_StopFanControl);
1184         if (ret == PPSMC_Result_OK) {
1185                 pi->fan_is_controlled_by_smc = false;
1186                 return 0;
1187         } else {
1188                 return -EINVAL;
1189         }
1190 }
1191
1192 static int ci_dpm_get_fan_speed_percent(struct amdgpu_device *adev,
1193                                         u32 *speed)
1194 {
1195         u32 duty, duty100;
1196         u64 tmp64;
1197
1198         if (adev->pm.no_fan)
1199                 return -ENOENT;
1200
1201         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1202                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1203         duty = (RREG32_SMC(ixCG_THERMAL_STATUS) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK)
1204                 >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT;
1205
1206         if (duty100 == 0)
1207                 return -EINVAL;
1208
1209         tmp64 = (u64)duty * 100;
1210         do_div(tmp64, duty100);
1211         *speed = (u32)tmp64;
1212
1213         if (*speed > 100)
1214                 *speed = 100;
1215
1216         return 0;
1217 }
1218
1219 static int ci_dpm_set_fan_speed_percent(struct amdgpu_device *adev,
1220                                         u32 speed)
1221 {
1222         u32 tmp;
1223         u32 duty, duty100;
1224         u64 tmp64;
1225         struct ci_power_info *pi = ci_get_pi(adev);
1226
1227         if (adev->pm.no_fan)
1228                 return -ENOENT;
1229
1230         if (pi->fan_is_controlled_by_smc)
1231                 return -EINVAL;
1232
1233         if (speed > 100)
1234                 return -EINVAL;
1235
1236         duty100 = (RREG32_SMC(ixCG_FDO_CTRL1) & CG_FDO_CTRL1__FMAX_DUTY100_MASK)
1237                 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT;
1238
1239         if (duty100 == 0)
1240                 return -EINVAL;
1241
1242         tmp64 = (u64)speed * duty100;
1243         do_div(tmp64, 100);
1244         duty = (u32)tmp64;
1245
1246         tmp = RREG32_SMC(ixCG_FDO_CTRL0) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK;
1247         tmp |= duty << CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT;
1248         WREG32_SMC(ixCG_FDO_CTRL0, tmp);
1249
1250         return 0;
1251 }
1252
1253 static void ci_dpm_set_fan_control_mode(struct amdgpu_device *adev, u32 mode)
1254 {
1255         if (mode) {
1256                 /* stop auto-manage */
1257                 if (adev->pm.dpm.fan.ucode_fan_control)
1258                         ci_fan_ctrl_stop_smc_fan_control(adev);
1259                 ci_fan_ctrl_set_static_mode(adev, mode);
1260         } else {
1261                 /* restart auto-manage */
1262                 if (adev->pm.dpm.fan.ucode_fan_control)
1263                         ci_thermal_start_smc_fan_control(adev);
1264                 else
1265                         ci_fan_ctrl_set_default_mode(adev);
1266         }
1267 }
1268
1269 static u32 ci_dpm_get_fan_control_mode(struct amdgpu_device *adev)
1270 {
1271         struct ci_power_info *pi = ci_get_pi(adev);
1272         u32 tmp;
1273
1274         if (pi->fan_is_controlled_by_smc)
1275                 return 0;
1276
1277         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1278         return (tmp >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT);
1279 }
1280
1281 #if 0
1282 static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
1283                                          u32 *speed)
1284 {
1285         u32 tach_period;
1286         u32 xclk = amdgpu_asic_get_xclk(adev);
1287
1288         if (adev->pm.no_fan)
1289                 return -ENOENT;
1290
1291         if (adev->pm.fan_pulses_per_revolution == 0)
1292                 return -ENOENT;
1293
1294         tach_period = (RREG32_SMC(ixCG_TACH_STATUS) & CG_TACH_STATUS__TACH_PERIOD_MASK)
1295                 >> CG_TACH_STATUS__TACH_PERIOD__SHIFT;
1296         if (tach_period == 0)
1297                 return -ENOENT;
1298
1299         *speed = 60 * xclk * 10000 / tach_period;
1300
1301         return 0;
1302 }
1303
1304 static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
1305                                          u32 speed)
1306 {
1307         u32 tach_period, tmp;
1308         u32 xclk = amdgpu_asic_get_xclk(adev);
1309
1310         if (adev->pm.no_fan)
1311                 return -ENOENT;
1312
1313         if (adev->pm.fan_pulses_per_revolution == 0)
1314                 return -ENOENT;
1315
1316         if ((speed < adev->pm.fan_min_rpm) ||
1317             (speed > adev->pm.fan_max_rpm))
1318                 return -EINVAL;
1319
1320         if (adev->pm.dpm.fan.ucode_fan_control)
1321                 ci_fan_ctrl_stop_smc_fan_control(adev);
1322
1323         tach_period = 60 * xclk * 10000 / (8 * speed);
1324         tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK;
1325         tmp |= tach_period << CG_TACH_CTRL__TARGET_PERIOD__SHIFT;
1326         WREG32_SMC(CG_TACH_CTRL, tmp);
1327
1328         ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
1329
1330         return 0;
1331 }
1332 #endif
1333
1334 static void ci_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
1335 {
1336         struct ci_power_info *pi = ci_get_pi(adev);
1337         u32 tmp;
1338
1339         if (!pi->fan_ctrl_is_in_default_mode) {
1340                 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK;
1341                 tmp |= pi->fan_ctrl_default_mode << CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT;
1342                 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1343
1344                 tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TMIN_MASK;
1345                 tmp |= pi->t_min << CG_FDO_CTRL2__TMIN__SHIFT;
1346                 WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1347                 pi->fan_ctrl_is_in_default_mode = true;
1348         }
1349 }
1350
1351 static void ci_thermal_start_smc_fan_control(struct amdgpu_device *adev)
1352 {
1353         if (adev->pm.dpm.fan.ucode_fan_control) {
1354                 ci_fan_ctrl_start_smc_fan_control(adev);
1355                 ci_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
1356         }
1357 }
1358
1359 static void ci_thermal_initialize(struct amdgpu_device *adev)
1360 {
1361         u32 tmp;
1362
1363         if (adev->pm.fan_pulses_per_revolution) {
1364                 tmp = RREG32_SMC(ixCG_TACH_CTRL) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK;
1365                 tmp |= (adev->pm.fan_pulses_per_revolution - 1)
1366                         << CG_TACH_CTRL__EDGE_PER_REV__SHIFT;
1367                 WREG32_SMC(ixCG_TACH_CTRL, tmp);
1368         }
1369
1370         tmp = RREG32_SMC(ixCG_FDO_CTRL2) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK;
1371         tmp |= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT;
1372         WREG32_SMC(ixCG_FDO_CTRL2, tmp);
1373 }
1374
1375 static int ci_thermal_start_thermal_controller(struct amdgpu_device *adev)
1376 {
1377         int ret;
1378
1379         ci_thermal_initialize(adev);
1380         ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN, CISLANDS_TEMP_RANGE_MAX);
1381         if (ret)
1382                 return ret;
1383         ret = ci_thermal_enable_alert(adev, true);
1384         if (ret)
1385                 return ret;
1386         if (adev->pm.dpm.fan.ucode_fan_control) {
1387                 ret = ci_thermal_setup_fan_table(adev);
1388                 if (ret)
1389                         return ret;
1390                 ci_thermal_start_smc_fan_control(adev);
1391         }
1392
1393         return 0;
1394 }
1395
1396 static void ci_thermal_stop_thermal_controller(struct amdgpu_device *adev)
1397 {
1398         if (!adev->pm.no_fan)
1399                 ci_fan_ctrl_set_default_mode(adev);
1400 }
1401
1402 static int ci_read_smc_soft_register(struct amdgpu_device *adev,
1403                                      u16 reg_offset, u32 *value)
1404 {
1405         struct ci_power_info *pi = ci_get_pi(adev);
1406
1407         return amdgpu_ci_read_smc_sram_dword(adev,
1408                                       pi->soft_regs_start + reg_offset,
1409                                       value, pi->sram_end);
1410 }
1411
1412 static int ci_write_smc_soft_register(struct amdgpu_device *adev,
1413                                       u16 reg_offset, u32 value)
1414 {
1415         struct ci_power_info *pi = ci_get_pi(adev);
1416
1417         return amdgpu_ci_write_smc_sram_dword(adev,
1418                                        pi->soft_regs_start + reg_offset,
1419                                        value, pi->sram_end);
1420 }
1421
1422 static void ci_init_fps_limits(struct amdgpu_device *adev)
1423 {
1424         struct ci_power_info *pi = ci_get_pi(adev);
1425         SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
1426
1427         if (pi->caps_fps) {
1428                 u16 tmp;
1429
1430                 tmp = 45;
1431                 table->FpsHighT = cpu_to_be16(tmp);
1432
1433                 tmp = 30;
1434                 table->FpsLowT = cpu_to_be16(tmp);
1435         }
1436 }
1437
1438 static int ci_update_sclk_t(struct amdgpu_device *adev)
1439 {
1440         struct ci_power_info *pi = ci_get_pi(adev);
1441         int ret = 0;
1442         u32 low_sclk_interrupt_t = 0;
1443
1444         if (pi->caps_sclk_throttle_low_notification) {
1445                 low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t);
1446
1447                 ret = amdgpu_ci_copy_bytes_to_smc(adev,
1448                                            pi->dpm_table_start +
1449                                            offsetof(SMU7_Discrete_DpmTable, LowSclkInterruptT),
1450                                            (u8 *)&low_sclk_interrupt_t,
1451                                            sizeof(u32), pi->sram_end);
1452
1453         }
1454
1455         return ret;
1456 }
1457
1458 static void ci_get_leakage_voltages(struct amdgpu_device *adev)
1459 {
1460         struct ci_power_info *pi = ci_get_pi(adev);
1461         u16 leakage_id, virtual_voltage_id;
1462         u16 vddc, vddci;
1463         int i;
1464
1465         pi->vddc_leakage.count = 0;
1466         pi->vddci_leakage.count = 0;
1467
1468         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
1469                 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1470                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1471                         if (amdgpu_atombios_get_voltage_evv(adev, virtual_voltage_id, &vddc) != 0)
1472                                 continue;
1473                         if (vddc != 0 && vddc != virtual_voltage_id) {
1474                                 pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1475                                 pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1476                                 pi->vddc_leakage.count++;
1477                         }
1478                 }
1479         } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev, &leakage_id) == 0) {
1480                 for (i = 0; i < CISLANDS_MAX_LEAKAGE_COUNT; i++) {
1481                         virtual_voltage_id = ATOM_VIRTUAL_VOLTAGE_ID0 + i;
1482                         if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev, &vddc, &vddci,
1483                                                                                      virtual_voltage_id,
1484                                                                                      leakage_id) == 0) {
1485                                 if (vddc != 0 && vddc != virtual_voltage_id) {
1486                                         pi->vddc_leakage.actual_voltage[pi->vddc_leakage.count] = vddc;
1487                                         pi->vddc_leakage.leakage_id[pi->vddc_leakage.count] = virtual_voltage_id;
1488                                         pi->vddc_leakage.count++;
1489                                 }
1490                                 if (vddci != 0 && vddci != virtual_voltage_id) {
1491                                         pi->vddci_leakage.actual_voltage[pi->vddci_leakage.count] = vddci;
1492                                         pi->vddci_leakage.leakage_id[pi->vddci_leakage.count] = virtual_voltage_id;
1493                                         pi->vddci_leakage.count++;
1494                                 }
1495                         }
1496                 }
1497         }
1498 }
1499
1500 static void ci_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
1501 {
1502         struct ci_power_info *pi = ci_get_pi(adev);
1503         bool want_thermal_protection;
1504         enum amdgpu_dpm_event_src dpm_event_src;
1505         u32 tmp;
1506
1507         switch (sources) {
1508         case 0:
1509         default:
1510                 want_thermal_protection = false;
1511                 break;
1512         case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
1513                 want_thermal_protection = true;
1514                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
1515                 break;
1516         case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1517                 want_thermal_protection = true;
1518                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
1519                 break;
1520         case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1521               (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1522                 want_thermal_protection = true;
1523                 dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1524                 break;
1525         }
1526
1527         if (want_thermal_protection) {
1528 #if 0
1529                 /* XXX: need to figure out how to handle this properly */
1530                 tmp = RREG32_SMC(ixCG_THERMAL_CTRL);
1531                 tmp &= DPM_EVENT_SRC_MASK;
1532                 tmp |= DPM_EVENT_SRC(dpm_event_src);
1533                 WREG32_SMC(ixCG_THERMAL_CTRL, tmp);
1534 #endif
1535
1536                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1537                 if (pi->thermal_protection)
1538                         tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1539                 else
1540                         tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1541                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1542         } else {
1543                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1544                 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
1545                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1546         }
1547 }
1548
1549 static void ci_enable_auto_throttle_source(struct amdgpu_device *adev,
1550                                            enum amdgpu_dpm_auto_throttle_src source,
1551                                            bool enable)
1552 {
1553         struct ci_power_info *pi = ci_get_pi(adev);
1554
1555         if (enable) {
1556                 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1557                         pi->active_auto_throttle_sources |= 1 << source;
1558                         ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1559                 }
1560         } else {
1561                 if (pi->active_auto_throttle_sources & (1 << source)) {
1562                         pi->active_auto_throttle_sources &= ~(1 << source);
1563                         ci_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
1564                 }
1565         }
1566 }
1567
1568 static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device *adev)
1569 {
1570         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT)
1571                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableVRHotGPIOInterrupt);
1572 }
1573
1574 static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1575 {
1576         struct ci_power_info *pi = ci_get_pi(adev);
1577         PPSMC_Result smc_result;
1578
1579         if (!pi->need_update_smu7_dpm_table)
1580                 return 0;
1581
1582         if ((!pi->sclk_dpm_key_disabled) &&
1583             (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1584                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_UnfreezeLevel);
1585                 if (smc_result != PPSMC_Result_OK)
1586                         return -EINVAL;
1587         }
1588
1589         if ((!pi->mclk_dpm_key_disabled) &&
1590             (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1591                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_UnfreezeLevel);
1592                 if (smc_result != PPSMC_Result_OK)
1593                         return -EINVAL;
1594         }
1595
1596         pi->need_update_smu7_dpm_table = 0;
1597         return 0;
1598 }
1599
1600 static int ci_enable_sclk_mclk_dpm(struct amdgpu_device *adev, bool enable)
1601 {
1602         struct ci_power_info *pi = ci_get_pi(adev);
1603         PPSMC_Result smc_result;
1604
1605         if (enable) {
1606                 if (!pi->sclk_dpm_key_disabled) {
1607                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Enable);
1608                         if (smc_result != PPSMC_Result_OK)
1609                                 return -EINVAL;
1610                 }
1611
1612                 if (!pi->mclk_dpm_key_disabled) {
1613                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Enable);
1614                         if (smc_result != PPSMC_Result_OK)
1615                                 return -EINVAL;
1616
1617                         WREG32_P(mmMC_SEQ_CNTL_3, MC_SEQ_CNTL_3__CAC_EN_MASK,
1618                                         ~MC_SEQ_CNTL_3__CAC_EN_MASK);
1619
1620                         WREG32_SMC(ixLCAC_MC0_CNTL, 0x05);
1621                         WREG32_SMC(ixLCAC_MC1_CNTL, 0x05);
1622                         WREG32_SMC(ixLCAC_CPL_CNTL, 0x100005);
1623
1624                         udelay(10);
1625
1626                         WREG32_SMC(ixLCAC_MC0_CNTL, 0x400005);
1627                         WREG32_SMC(ixLCAC_MC1_CNTL, 0x400005);
1628                         WREG32_SMC(ixLCAC_CPL_CNTL, 0x500005);
1629                 }
1630         } else {
1631                 if (!pi->sclk_dpm_key_disabled) {
1632                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DPM_Disable);
1633                         if (smc_result != PPSMC_Result_OK)
1634                                 return -EINVAL;
1635                 }
1636
1637                 if (!pi->mclk_dpm_key_disabled) {
1638                         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_Disable);
1639                         if (smc_result != PPSMC_Result_OK)
1640                                 return -EINVAL;
1641                 }
1642         }
1643
1644         return 0;
1645 }
1646
1647 static int ci_start_dpm(struct amdgpu_device *adev)
1648 {
1649         struct ci_power_info *pi = ci_get_pi(adev);
1650         PPSMC_Result smc_result;
1651         int ret;
1652         u32 tmp;
1653
1654         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1655         tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1656         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1657
1658         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1659         tmp |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1660         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1661
1662         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VoltageChangeTimeout), 0x1000);
1663
1664         WREG32_P(mmBIF_LNCNT_RESET, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK);
1665
1666         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Enable);
1667         if (smc_result != PPSMC_Result_OK)
1668                 return -EINVAL;
1669
1670         ret = ci_enable_sclk_mclk_dpm(adev, true);
1671         if (ret)
1672                 return ret;
1673
1674         if (!pi->pcie_dpm_key_disabled) {
1675                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Enable);
1676                 if (smc_result != PPSMC_Result_OK)
1677                         return -EINVAL;
1678         }
1679
1680         return 0;
1681 }
1682
1683 static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device *adev)
1684 {
1685         struct ci_power_info *pi = ci_get_pi(adev);
1686         PPSMC_Result smc_result;
1687
1688         if (!pi->need_update_smu7_dpm_table)
1689                 return 0;
1690
1691         if ((!pi->sclk_dpm_key_disabled) &&
1692             (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK))) {
1693                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_SCLKDPM_FreezeLevel);
1694                 if (smc_result != PPSMC_Result_OK)
1695                         return -EINVAL;
1696         }
1697
1698         if ((!pi->mclk_dpm_key_disabled) &&
1699             (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)) {
1700                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MCLKDPM_FreezeLevel);
1701                 if (smc_result != PPSMC_Result_OK)
1702                         return -EINVAL;
1703         }
1704
1705         return 0;
1706 }
1707
1708 static int ci_stop_dpm(struct amdgpu_device *adev)
1709 {
1710         struct ci_power_info *pi = ci_get_pi(adev);
1711         PPSMC_Result smc_result;
1712         int ret;
1713         u32 tmp;
1714
1715         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
1716         tmp &= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK;
1717         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
1718
1719         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1720         tmp &= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK;
1721         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1722
1723         if (!pi->pcie_dpm_key_disabled) {
1724                 smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_PCIeDPM_Disable);
1725                 if (smc_result != PPSMC_Result_OK)
1726                         return -EINVAL;
1727         }
1728
1729         ret = ci_enable_sclk_mclk_dpm(adev, false);
1730         if (ret)
1731                 return ret;
1732
1733         smc_result = amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Voltage_Cntl_Disable);
1734         if (smc_result != PPSMC_Result_OK)
1735                 return -EINVAL;
1736
1737         return 0;
1738 }
1739
1740 static void ci_enable_sclk_control(struct amdgpu_device *adev, bool enable)
1741 {
1742         u32 tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
1743
1744         if (enable)
1745                 tmp &= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1746         else
1747                 tmp |= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK;
1748         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
1749 }
1750
1751 #if 0
1752 static int ci_notify_hw_of_power_source(struct amdgpu_device *adev,
1753                                         bool ac_power)
1754 {
1755         struct ci_power_info *pi = ci_get_pi(adev);
1756         struct amdgpu_cac_tdp_table *cac_tdp_table =
1757                 adev->pm.dpm.dyn_state.cac_tdp_table;
1758         u32 power_limit;
1759
1760         if (ac_power)
1761                 power_limit = (u32)(cac_tdp_table->maximum_power_delivery_limit * 256);
1762         else
1763                 power_limit = (u32)(cac_tdp_table->battery_power_limit * 256);
1764
1765         ci_set_power_limit(adev, power_limit);
1766
1767         if (pi->caps_automatic_dc_transition) {
1768                 if (ac_power)
1769                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC);
1770                 else
1771                         amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_Remove_DC_Clamp);
1772         }
1773
1774         return 0;
1775 }
1776 #endif
1777
1778 static PPSMC_Result amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
1779                                                       PPSMC_Msg msg, u32 parameter)
1780 {
1781         WREG32(mmSMC_MSG_ARG_0, parameter);
1782         return amdgpu_ci_send_msg_to_smc(adev, msg);
1783 }
1784
1785 static PPSMC_Result amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device *adev,
1786                                                         PPSMC_Msg msg, u32 *parameter)
1787 {
1788         PPSMC_Result smc_result;
1789
1790         smc_result = amdgpu_ci_send_msg_to_smc(adev, msg);
1791
1792         if ((smc_result == PPSMC_Result_OK) && parameter)
1793                 *parameter = RREG32(mmSMC_MSG_ARG_0);
1794
1795         return smc_result;
1796 }
1797
1798 static int ci_dpm_force_state_sclk(struct amdgpu_device *adev, u32 n)
1799 {
1800         struct ci_power_info *pi = ci_get_pi(adev);
1801
1802         if (!pi->sclk_dpm_key_disabled) {
1803                 PPSMC_Result smc_result =
1804                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SCLKDPM_SetEnabledMask, 1 << n);
1805                 if (smc_result != PPSMC_Result_OK)
1806                         return -EINVAL;
1807         }
1808
1809         return 0;
1810 }
1811
1812 static int ci_dpm_force_state_mclk(struct amdgpu_device *adev, u32 n)
1813 {
1814         struct ci_power_info *pi = ci_get_pi(adev);
1815
1816         if (!pi->mclk_dpm_key_disabled) {
1817                 PPSMC_Result smc_result =
1818                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_MCLKDPM_SetEnabledMask, 1 << n);
1819                 if (smc_result != PPSMC_Result_OK)
1820                         return -EINVAL;
1821         }
1822
1823         return 0;
1824 }
1825
1826 static int ci_dpm_force_state_pcie(struct amdgpu_device *adev, u32 n)
1827 {
1828         struct ci_power_info *pi = ci_get_pi(adev);
1829
1830         if (!pi->pcie_dpm_key_disabled) {
1831                 PPSMC_Result smc_result =
1832                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PCIeDPM_ForceLevel, n);
1833                 if (smc_result != PPSMC_Result_OK)
1834                         return -EINVAL;
1835         }
1836
1837         return 0;
1838 }
1839
1840 static int ci_set_power_limit(struct amdgpu_device *adev, u32 n)
1841 {
1842         struct ci_power_info *pi = ci_get_pi(adev);
1843
1844         if (pi->power_containment_features & POWERCONTAINMENT_FEATURE_PkgPwrLimit) {
1845                 PPSMC_Result smc_result =
1846                         amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_PkgPwrSetLimit, n);
1847                 if (smc_result != PPSMC_Result_OK)
1848                         return -EINVAL;
1849         }
1850
1851         return 0;
1852 }
1853
1854 static int ci_set_overdrive_target_tdp(struct amdgpu_device *adev,
1855                                        u32 target_tdp)
1856 {
1857         PPSMC_Result smc_result =
1858                 amdgpu_ci_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_OverDriveSetTargetTdp, target_tdp);
1859         if (smc_result != PPSMC_Result_OK)
1860                 return -EINVAL;
1861         return 0;
1862 }
1863
1864 #if 0
1865 static int ci_set_boot_state(struct amdgpu_device *adev)
1866 {
1867         return ci_enable_sclk_mclk_dpm(adev, false);
1868 }
1869 #endif
1870
1871 static u32 ci_get_average_sclk_freq(struct amdgpu_device *adev)
1872 {
1873         u32 sclk_freq;
1874         PPSMC_Result smc_result =
1875                 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1876                                                     PPSMC_MSG_API_GetSclkFrequency,
1877                                                     &sclk_freq);
1878         if (smc_result != PPSMC_Result_OK)
1879                 sclk_freq = 0;
1880
1881         return sclk_freq;
1882 }
1883
1884 static u32 ci_get_average_mclk_freq(struct amdgpu_device *adev)
1885 {
1886         u32 mclk_freq;
1887         PPSMC_Result smc_result =
1888                 amdgpu_ci_send_msg_to_smc_return_parameter(adev,
1889                                                     PPSMC_MSG_API_GetMclkFrequency,
1890                                                     &mclk_freq);
1891         if (smc_result != PPSMC_Result_OK)
1892                 mclk_freq = 0;
1893
1894         return mclk_freq;
1895 }
1896
1897 static void ci_dpm_start_smc(struct amdgpu_device *adev)
1898 {
1899         int i;
1900
1901         amdgpu_ci_program_jump_on_start(adev);
1902         amdgpu_ci_start_smc_clock(adev);
1903         amdgpu_ci_start_smc(adev);
1904         for (i = 0; i < adev->usec_timeout; i++) {
1905                 if (RREG32_SMC(ixFIRMWARE_FLAGS) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK)
1906                         break;
1907         }
1908 }
1909
1910 static void ci_dpm_stop_smc(struct amdgpu_device *adev)
1911 {
1912         amdgpu_ci_reset_smc(adev);
1913         amdgpu_ci_stop_smc_clock(adev);
1914 }
1915
1916 static int ci_process_firmware_header(struct amdgpu_device *adev)
1917 {
1918         struct ci_power_info *pi = ci_get_pi(adev);
1919         u32 tmp;
1920         int ret;
1921
1922         ret = amdgpu_ci_read_smc_sram_dword(adev,
1923                                      SMU7_FIRMWARE_HEADER_LOCATION +
1924                                      offsetof(SMU7_Firmware_Header, DpmTable),
1925                                      &tmp, pi->sram_end);
1926         if (ret)
1927                 return ret;
1928
1929         pi->dpm_table_start = tmp;
1930
1931         ret = amdgpu_ci_read_smc_sram_dword(adev,
1932                                      SMU7_FIRMWARE_HEADER_LOCATION +
1933                                      offsetof(SMU7_Firmware_Header, SoftRegisters),
1934                                      &tmp, pi->sram_end);
1935         if (ret)
1936                 return ret;
1937
1938         pi->soft_regs_start = tmp;
1939
1940         ret = amdgpu_ci_read_smc_sram_dword(adev,
1941                                      SMU7_FIRMWARE_HEADER_LOCATION +
1942                                      offsetof(SMU7_Firmware_Header, mcRegisterTable),
1943                                      &tmp, pi->sram_end);
1944         if (ret)
1945                 return ret;
1946
1947         pi->mc_reg_table_start = tmp;
1948
1949         ret = amdgpu_ci_read_smc_sram_dword(adev,
1950                                      SMU7_FIRMWARE_HEADER_LOCATION +
1951                                      offsetof(SMU7_Firmware_Header, FanTable),
1952                                      &tmp, pi->sram_end);
1953         if (ret)
1954                 return ret;
1955
1956         pi->fan_table_start = tmp;
1957
1958         ret = amdgpu_ci_read_smc_sram_dword(adev,
1959                                      SMU7_FIRMWARE_HEADER_LOCATION +
1960                                      offsetof(SMU7_Firmware_Header, mcArbDramTimingTable),
1961                                      &tmp, pi->sram_end);
1962         if (ret)
1963                 return ret;
1964
1965         pi->arb_table_start = tmp;
1966
1967         return 0;
1968 }
1969
1970 static void ci_read_clock_registers(struct amdgpu_device *adev)
1971 {
1972         struct ci_power_info *pi = ci_get_pi(adev);
1973
1974         pi->clock_registers.cg_spll_func_cntl =
1975                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL);
1976         pi->clock_registers.cg_spll_func_cntl_2 =
1977                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2);
1978         pi->clock_registers.cg_spll_func_cntl_3 =
1979                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3);
1980         pi->clock_registers.cg_spll_func_cntl_4 =
1981                 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4);
1982         pi->clock_registers.cg_spll_spread_spectrum =
1983                 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
1984         pi->clock_registers.cg_spll_spread_spectrum_2 =
1985                 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2);
1986         pi->clock_registers.dll_cntl = RREG32(mmDLL_CNTL);
1987         pi->clock_registers.mclk_pwrmgt_cntl = RREG32(mmMCLK_PWRMGT_CNTL);
1988         pi->clock_registers.mpll_ad_func_cntl = RREG32(mmMPLL_AD_FUNC_CNTL);
1989         pi->clock_registers.mpll_dq_func_cntl = RREG32(mmMPLL_DQ_FUNC_CNTL);
1990         pi->clock_registers.mpll_func_cntl = RREG32(mmMPLL_FUNC_CNTL);
1991         pi->clock_registers.mpll_func_cntl_1 = RREG32(mmMPLL_FUNC_CNTL_1);
1992         pi->clock_registers.mpll_func_cntl_2 = RREG32(mmMPLL_FUNC_CNTL_2);
1993         pi->clock_registers.mpll_ss1 = RREG32(mmMPLL_SS1);
1994         pi->clock_registers.mpll_ss2 = RREG32(mmMPLL_SS2);
1995 }
1996
1997 static void ci_init_sclk_t(struct amdgpu_device *adev)
1998 {
1999         struct ci_power_info *pi = ci_get_pi(adev);
2000
2001         pi->low_sclk_interrupt_t = 0;
2002 }
2003
2004 static void ci_enable_thermal_protection(struct amdgpu_device *adev,
2005                                          bool enable)
2006 {
2007         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2008
2009         if (enable)
2010                 tmp &= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2011         else
2012                 tmp |= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK;
2013         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2014 }
2015
2016 static void ci_enable_acpi_power_management(struct amdgpu_device *adev)
2017 {
2018         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2019
2020         tmp |= GENERAL_PWRMGT__STATIC_PM_EN_MASK;
2021
2022         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2023 }
2024
2025 #if 0
2026 static int ci_enter_ulp_state(struct amdgpu_device *adev)
2027 {
2028
2029         WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
2030
2031         udelay(25000);
2032
2033         return 0;
2034 }
2035
2036 static int ci_exit_ulp_state(struct amdgpu_device *adev)
2037 {
2038         int i;
2039
2040         WREG32(mmSMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
2041
2042         udelay(7000);
2043
2044         for (i = 0; i < adev->usec_timeout; i++) {
2045                 if (RREG32(mmSMC_RESP_0) == 1)
2046                         break;
2047                 udelay(1000);
2048         }
2049
2050         return 0;
2051 }
2052 #endif
2053
2054 static int ci_notify_smc_display_change(struct amdgpu_device *adev,
2055                                         bool has_display)
2056 {
2057         PPSMC_Msg msg = has_display ? PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
2058
2059         return (amdgpu_ci_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?  0 : -EINVAL;
2060 }
2061
2062 static int ci_enable_ds_master_switch(struct amdgpu_device *adev,
2063                                       bool enable)
2064 {
2065         struct ci_power_info *pi = ci_get_pi(adev);
2066
2067         if (enable) {
2068                 if (pi->caps_sclk_ds) {
2069                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_ON) != PPSMC_Result_OK)
2070                                 return -EINVAL;
2071                 } else {
2072                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2073                                 return -EINVAL;
2074                 }
2075         } else {
2076                 if (pi->caps_sclk_ds) {
2077                         if (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_MASTER_DeepSleep_OFF) != PPSMC_Result_OK)
2078                                 return -EINVAL;
2079                 }
2080         }
2081
2082         return 0;
2083 }
2084
2085 static void ci_program_display_gap(struct amdgpu_device *adev)
2086 {
2087         u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2088         u32 pre_vbi_time_in_us;
2089         u32 frame_time_in_us;
2090         u32 ref_clock = adev->clock.spll.reference_freq;
2091         u32 refresh_rate = amdgpu_dpm_get_vrefresh(adev);
2092         u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
2093
2094         tmp &= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK;
2095         if (adev->pm.dpm.new_active_crtc_count > 0)
2096                 tmp |= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2097         else
2098                 tmp |= (AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT);
2099         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2100
2101         if (refresh_rate == 0)
2102                 refresh_rate = 60;
2103         if (vblank_time == 0xffffffff)
2104                 vblank_time = 500;
2105         frame_time_in_us = 1000000 / refresh_rate;
2106         pre_vbi_time_in_us =
2107                 frame_time_in_us - 200 - vblank_time;
2108         tmp = pre_vbi_time_in_us * (ref_clock / 100);
2109
2110         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2, tmp);
2111         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, PreVBlankGap), 0x64);
2112         ci_write_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, VBlankTimeout), (frame_time_in_us - pre_vbi_time_in_us));
2113
2114
2115         ci_notify_smc_display_change(adev, (adev->pm.dpm.new_active_crtc_count == 1));
2116
2117 }
2118
2119 static void ci_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
2120 {
2121         struct ci_power_info *pi = ci_get_pi(adev);
2122         u32 tmp;
2123
2124         if (enable) {
2125                 if (pi->caps_sclk_ss_support) {
2126                         tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2127                         tmp |= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2128                         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2129                 }
2130         } else {
2131                 tmp = RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM);
2132                 tmp &= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK;
2133                 WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM, tmp);
2134
2135                 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
2136                 tmp &= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK;
2137                 WREG32_SMC(ixGENERAL_PWRMGT, tmp);
2138         }
2139 }
2140
2141 static void ci_program_sstp(struct amdgpu_device *adev)
2142 {
2143         WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER,
2144         ((CISLANDS_SSTU_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT) |
2145          (CISLANDS_SST_DFLT << CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT)));
2146 }
2147
2148 static void ci_enable_display_gap(struct amdgpu_device *adev)
2149 {
2150         u32 tmp = RREG32_SMC(ixCG_DISPLAY_GAP_CNTL);
2151
2152         tmp &= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK |
2153                         CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK);
2154         tmp |= ((AMDGPU_PM_DISPLAY_GAP_IGNORE << CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT) |
2155                 (AMDGPU_PM_DISPLAY_GAP_VBLANK << CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT));
2156
2157         WREG32_SMC(ixCG_DISPLAY_GAP_CNTL, tmp);
2158 }
2159
2160 static void ci_program_vc(struct amdgpu_device *adev)
2161 {
2162         u32 tmp;
2163
2164         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2165         tmp &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2166         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2167
2168         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, CISLANDS_VRC_DFLT0);
2169         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, CISLANDS_VRC_DFLT1);
2170         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, CISLANDS_VRC_DFLT2);
2171         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, CISLANDS_VRC_DFLT3);
2172         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, CISLANDS_VRC_DFLT4);
2173         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, CISLANDS_VRC_DFLT5);
2174         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, CISLANDS_VRC_DFLT6);
2175         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, CISLANDS_VRC_DFLT7);
2176 }
2177
2178 static void ci_clear_vc(struct amdgpu_device *adev)
2179 {
2180         u32 tmp;
2181
2182         tmp = RREG32_SMC(ixSCLK_PWRMGT_CNTL);
2183         tmp |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK);
2184         WREG32_SMC(ixSCLK_PWRMGT_CNTL, tmp);
2185
2186         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0);
2187         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1, 0);
2188         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2, 0);
2189         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3, 0);
2190         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4, 0);
2191         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5, 0);
2192         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6, 0);
2193         WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7, 0);
2194 }
2195
2196 static int ci_upload_firmware(struct amdgpu_device *adev)
2197 {
2198         struct ci_power_info *pi = ci_get_pi(adev);
2199         int i, ret;
2200
2201         for (i = 0; i < adev->usec_timeout; i++) {
2202                 if (RREG32_SMC(ixRCU_UC_EVENTS) & RCU_UC_EVENTS__boot_seq_done_MASK)
2203                         break;
2204         }
2205         WREG32_SMC(ixSMC_SYSCON_MISC_CNTL, 1);
2206
2207         amdgpu_ci_stop_smc_clock(adev);
2208         amdgpu_ci_reset_smc(adev);
2209
2210         ret = amdgpu_ci_load_smc_ucode(adev, pi->sram_end);
2211
2212         return ret;
2213
2214 }
2215
2216 static int ci_get_svi2_voltage_table(struct amdgpu_device *adev,
2217                                      struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
2218                                      struct atom_voltage_table *voltage_table)
2219 {
2220         u32 i;
2221
2222         if (voltage_dependency_table == NULL)
2223                 return -EINVAL;
2224
2225         voltage_table->mask_low = 0;
2226         voltage_table->phase_delay = 0;
2227
2228         voltage_table->count = voltage_dependency_table->count;
2229         for (i = 0; i < voltage_table->count; i++) {
2230                 voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
2231                 voltage_table->entries[i].smio_low = 0;
2232         }
2233
2234         return 0;
2235 }
2236
2237 static int ci_construct_voltage_tables(struct amdgpu_device *adev)
2238 {
2239         struct ci_power_info *pi = ci_get_pi(adev);
2240         int ret;
2241
2242         if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2243                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
2244                                                         VOLTAGE_OBJ_GPIO_LUT,
2245                                                         &pi->vddc_voltage_table);
2246                 if (ret)
2247                         return ret;
2248         } else if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2249                 ret = ci_get_svi2_voltage_table(adev,
2250                                                 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2251                                                 &pi->vddc_voltage_table);
2252                 if (ret)
2253                         return ret;
2254         }
2255
2256         if (pi->vddc_voltage_table.count > SMU7_MAX_LEVELS_VDDC)
2257                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDC,
2258                                                          &pi->vddc_voltage_table);
2259
2260         if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2261                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
2262                                                         VOLTAGE_OBJ_GPIO_LUT,
2263                                                         &pi->vddci_voltage_table);
2264                 if (ret)
2265                         return ret;
2266         } else if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2267                 ret = ci_get_svi2_voltage_table(adev,
2268                                                 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2269                                                 &pi->vddci_voltage_table);
2270                 if (ret)
2271                         return ret;
2272         }
2273
2274         if (pi->vddci_voltage_table.count > SMU7_MAX_LEVELS_VDDCI)
2275                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_VDDCI,
2276                                                          &pi->vddci_voltage_table);
2277
2278         if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO) {
2279                 ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
2280                                                         VOLTAGE_OBJ_GPIO_LUT,
2281                                                         &pi->mvdd_voltage_table);
2282                 if (ret)
2283                         return ret;
2284         } else if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
2285                 ret = ci_get_svi2_voltage_table(adev,
2286                                                 &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
2287                                                 &pi->mvdd_voltage_table);
2288                 if (ret)
2289                         return ret;
2290         }
2291
2292         if (pi->mvdd_voltage_table.count > SMU7_MAX_LEVELS_MVDD)
2293                 ci_trim_voltage_table_to_fit_state_table(adev, SMU7_MAX_LEVELS_MVDD,
2294                                                          &pi->mvdd_voltage_table);
2295
2296         return 0;
2297 }
2298
2299 static void ci_populate_smc_voltage_table(struct amdgpu_device *adev,
2300                                           struct atom_voltage_table_entry *voltage_table,
2301                                           SMU7_Discrete_VoltageLevel *smc_voltage_table)
2302 {
2303         int ret;
2304
2305         ret = ci_get_std_voltage_value_sidd(adev, voltage_table,
2306                                             &smc_voltage_table->StdVoltageHiSidd,
2307                                             &smc_voltage_table->StdVoltageLoSidd);
2308
2309         if (ret) {
2310                 smc_voltage_table->StdVoltageHiSidd = voltage_table->value * VOLTAGE_SCALE;
2311                 smc_voltage_table->StdVoltageLoSidd = voltage_table->value * VOLTAGE_SCALE;
2312         }
2313
2314         smc_voltage_table->Voltage = cpu_to_be16(voltage_table->value * VOLTAGE_SCALE);
2315         smc_voltage_table->StdVoltageHiSidd =
2316                 cpu_to_be16(smc_voltage_table->StdVoltageHiSidd);
2317         smc_voltage_table->StdVoltageLoSidd =
2318                 cpu_to_be16(smc_voltage_table->StdVoltageLoSidd);
2319 }
2320
2321 static int ci_populate_smc_vddc_table(struct amdgpu_device *adev,
2322                                       SMU7_Discrete_DpmTable *table)
2323 {
2324         struct ci_power_info *pi = ci_get_pi(adev);
2325         unsigned int count;
2326
2327         table->VddcLevelCount = pi->vddc_voltage_table.count;
2328         for (count = 0; count < table->VddcLevelCount; count++) {
2329                 ci_populate_smc_voltage_table(adev,
2330                                               &pi->vddc_voltage_table.entries[count],
2331                                               &table->VddcLevel[count]);
2332
2333                 if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2334                         table->VddcLevel[count].Smio |=
2335                                 pi->vddc_voltage_table.entries[count].smio_low;
2336                 else
2337                         table->VddcLevel[count].Smio = 0;
2338         }
2339         table->VddcLevelCount = cpu_to_be32(table->VddcLevelCount);
2340
2341         return 0;
2342 }
2343
2344 static int ci_populate_smc_vddci_table(struct amdgpu_device *adev,
2345                                        SMU7_Discrete_DpmTable *table)
2346 {
2347         unsigned int count;
2348         struct ci_power_info *pi = ci_get_pi(adev);
2349
2350         table->VddciLevelCount = pi->vddci_voltage_table.count;
2351         for (count = 0; count < table->VddciLevelCount; count++) {
2352                 ci_populate_smc_voltage_table(adev,
2353                                               &pi->vddci_voltage_table.entries[count],
2354                                               &table->VddciLevel[count]);
2355
2356                 if (pi->vddci_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2357                         table->VddciLevel[count].Smio |=
2358                                 pi->vddci_voltage_table.entries[count].smio_low;
2359                 else
2360                         table->VddciLevel[count].Smio = 0;
2361         }
2362         table->VddciLevelCount = cpu_to_be32(table->VddciLevelCount);
2363
2364         return 0;
2365 }
2366
2367 static int ci_populate_smc_mvdd_table(struct amdgpu_device *adev,
2368                                       SMU7_Discrete_DpmTable *table)
2369 {
2370         struct ci_power_info *pi = ci_get_pi(adev);
2371         unsigned int count;
2372
2373         table->MvddLevelCount = pi->mvdd_voltage_table.count;
2374         for (count = 0; count < table->MvddLevelCount; count++) {
2375                 ci_populate_smc_voltage_table(adev,
2376                                               &pi->mvdd_voltage_table.entries[count],
2377                                               &table->MvddLevel[count]);
2378
2379                 if (pi->mvdd_control == CISLANDS_VOLTAGE_CONTROL_BY_GPIO)
2380                         table->MvddLevel[count].Smio |=
2381                                 pi->mvdd_voltage_table.entries[count].smio_low;
2382                 else
2383                         table->MvddLevel[count].Smio = 0;
2384         }
2385         table->MvddLevelCount = cpu_to_be32(table->MvddLevelCount);
2386
2387         return 0;
2388 }
2389
2390 static int ci_populate_smc_voltage_tables(struct amdgpu_device *adev,
2391                                           SMU7_Discrete_DpmTable *table)
2392 {
2393         int ret;
2394
2395         ret = ci_populate_smc_vddc_table(adev, table);
2396         if (ret)
2397                 return ret;
2398
2399         ret = ci_populate_smc_vddci_table(adev, table);
2400         if (ret)
2401                 return ret;
2402
2403         ret = ci_populate_smc_mvdd_table(adev, table);
2404         if (ret)
2405                 return ret;
2406
2407         return 0;
2408 }
2409
2410 static int ci_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
2411                                   SMU7_Discrete_VoltageLevel *voltage)
2412 {
2413         struct ci_power_info *pi = ci_get_pi(adev);
2414         u32 i = 0;
2415
2416         if (pi->mvdd_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
2417                 for (i = 0; i < adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count; i++) {
2418                         if (mclk <= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries[i].clk) {
2419                                 voltage->Voltage = pi->mvdd_voltage_table.entries[i].value;
2420                                 break;
2421                         }
2422                 }
2423
2424                 if (i >= adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.count)
2425                         return -EINVAL;
2426         }
2427
2428         return -EINVAL;
2429 }
2430
2431 static int ci_get_std_voltage_value_sidd(struct amdgpu_device *adev,
2432                                          struct atom_voltage_table_entry *voltage_table,
2433                                          u16 *std_voltage_hi_sidd, u16 *std_voltage_lo_sidd)
2434 {
2435         u16 v_index, idx;
2436         bool voltage_found = false;
2437         *std_voltage_hi_sidd = voltage_table->value * VOLTAGE_SCALE;
2438         *std_voltage_lo_sidd = voltage_table->value * VOLTAGE_SCALE;
2439
2440         if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
2441                 return -EINVAL;
2442
2443         if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
2444                 for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2445                         if (voltage_table->value ==
2446                             adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2447                                 voltage_found = true;
2448                                 if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2449                                         idx = v_index;
2450                                 else
2451                                         idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2452                                 *std_voltage_lo_sidd =
2453                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2454                                 *std_voltage_hi_sidd =
2455                                         adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2456                                 break;
2457                         }
2458                 }
2459
2460                 if (!voltage_found) {
2461                         for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
2462                                 if (voltage_table->value <=
2463                                     adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
2464                                         voltage_found = true;
2465                                         if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
2466                                                 idx = v_index;
2467                                         else
2468                                                 idx = adev->pm.dpm.dyn_state.cac_leakage_table.count - 1;
2469                                         *std_voltage_lo_sidd =
2470                                                 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].vddc * VOLTAGE_SCALE;
2471                                         *std_voltage_hi_sidd =
2472                                                 adev->pm.dpm.dyn_state.cac_leakage_table.entries[idx].leakage * VOLTAGE_SCALE;
2473                                         break;
2474                                 }
2475                         }
2476                 }
2477         }
2478
2479         return 0;
2480 }
2481
2482 static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device *adev,
2483                                                   const struct amdgpu_phase_shedding_limits_table *limits,
2484                                                   u32 sclk,
2485                                                   u32 *phase_shedding)
2486 {
2487         unsigned int i;
2488
2489         *phase_shedding = 1;
2490
2491         for (i = 0; i < limits->count; i++) {
2492                 if (sclk < limits->entries[i].sclk) {
2493                         *phase_shedding = i;
2494                         break;
2495                 }
2496         }
2497 }
2498
2499 static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device *adev,
2500                                                   const struct amdgpu_phase_shedding_limits_table *limits,
2501                                                   u32 mclk,
2502                                                   u32 *phase_shedding)
2503 {
2504         unsigned int i;
2505
2506         *phase_shedding = 1;
2507
2508         for (i = 0; i < limits->count; i++) {
2509                 if (mclk < limits->entries[i].mclk) {
2510                         *phase_shedding = i;
2511                         break;
2512                 }
2513         }
2514 }
2515
2516 static int ci_init_arb_table_index(struct amdgpu_device *adev)
2517 {
2518         struct ci_power_info *pi = ci_get_pi(adev);
2519         u32 tmp;
2520         int ret;
2521
2522         ret = amdgpu_ci_read_smc_sram_dword(adev, pi->arb_table_start,
2523                                      &tmp, pi->sram_end);
2524         if (ret)
2525                 return ret;
2526
2527         tmp &= 0x00FFFFFF;
2528         tmp |= MC_CG_ARB_FREQ_F1 << 24;
2529
2530         return amdgpu_ci_write_smc_sram_dword(adev, pi->arb_table_start,
2531                                        tmp, pi->sram_end);
2532 }
2533
2534 static int ci_get_dependency_volt_by_clk(struct amdgpu_device *adev,
2535                                          struct amdgpu_clock_voltage_dependency_table *allowed_clock_voltage_table,
2536                                          u32 clock, u32 *voltage)
2537 {
2538         u32 i = 0;
2539
2540         if (allowed_clock_voltage_table->count == 0)
2541                 return -EINVAL;
2542
2543         for (i = 0; i < allowed_clock_voltage_table->count; i++) {
2544                 if (allowed_clock_voltage_table->entries[i].clk >= clock) {
2545                         *voltage = allowed_clock_voltage_table->entries[i].v;
2546                         return 0;
2547                 }
2548         }
2549
2550         *voltage = allowed_clock_voltage_table->entries[i-1].v;
2551
2552         return 0;
2553 }
2554
2555 static u8 ci_get_sleep_divider_id_from_clock(u32 sclk, u32 min_sclk_in_sr)
2556 {
2557         u32 i;
2558         u32 tmp;
2559         u32 min = max(min_sclk_in_sr, (u32)CISLAND_MINIMUM_ENGINE_CLOCK);
2560
2561         if (sclk < min)
2562                 return 0;
2563
2564         for (i = CISLAND_MAX_DEEPSLEEP_DIVIDER_ID;  ; i--) {
2565                 tmp = sclk >> i;
2566                 if (tmp >= min || i == 0)
2567                         break;
2568         }
2569
2570         return (u8)i;
2571 }
2572
2573 static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
2574 {
2575         return ci_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
2576 }
2577
2578 static int ci_reset_to_default(struct amdgpu_device *adev)
2579 {
2580         return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
2581                 0 : -EINVAL;
2582 }
2583
2584 static int ci_force_switch_to_arb_f0(struct amdgpu_device *adev)
2585 {
2586         u32 tmp;
2587
2588         tmp = (RREG32_SMC(ixSMC_SCRATCH9) & 0x0000ff00) >> 8;
2589
2590         if (tmp == MC_CG_ARB_FREQ_F0)
2591                 return 0;
2592
2593         return ci_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
2594 }
2595
2596 static void ci_register_patching_mc_arb(struct amdgpu_device *adev,
2597                                         const u32 engine_clock,
2598                                         const u32 memory_clock,
2599                                         u32 *dram_timimg2)
2600 {
2601         bool patch;
2602         u32 tmp, tmp2;
2603
2604         tmp = RREG32(mmMC_SEQ_MISC0);
2605         patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
2606
2607         if (patch &&
2608             ((adev->pdev->device == 0x67B0) ||
2609              (adev->pdev->device == 0x67B1))) {
2610                 if ((memory_clock > 100000) && (memory_clock <= 125000)) {
2611                         tmp2 = (((0x31 * engine_clock) / 125000) - 1) & 0xff;
2612                         *dram_timimg2 &= ~0x00ff0000;
2613                         *dram_timimg2 |= tmp2 << 16;
2614                 } else if ((memory_clock > 125000) && (memory_clock <= 137500)) {
2615                         tmp2 = (((0x36 * engine_clock) / 137500) - 1) & 0xff;
2616                         *dram_timimg2 &= ~0x00ff0000;
2617                         *dram_timimg2 |= tmp2 << 16;
2618                 }
2619         }
2620 }
2621
2622 static int ci_populate_memory_timing_parameters(struct amdgpu_device *adev,
2623                                                 u32 sclk,
2624                                                 u32 mclk,
2625                                                 SMU7_Discrete_MCArbDramTimingTableEntry *arb_regs)
2626 {
2627         u32 dram_timing;
2628         u32 dram_timing2;
2629         u32 burst_time;
2630
2631         amdgpu_atombios_set_engine_dram_timings(adev, sclk, mclk);
2632
2633         dram_timing  = RREG32(mmMC_ARB_DRAM_TIMING);
2634         dram_timing2 = RREG32(mmMC_ARB_DRAM_TIMING2);
2635         burst_time = RREG32(mmMC_ARB_BURST_TIME) & MC_ARB_BURST_TIME__STATE0_MASK;
2636
2637         ci_register_patching_mc_arb(adev, sclk, mclk, &dram_timing2);
2638
2639         arb_regs->McArbDramTiming  = cpu_to_be32(dram_timing);
2640         arb_regs->McArbDramTiming2 = cpu_to_be32(dram_timing2);
2641         arb_regs->McArbBurstTime = (u8)burst_time;
2642
2643         return 0;
2644 }
2645
2646 static int ci_do_program_memory_timing_parameters(struct amdgpu_device *adev)
2647 {
2648         struct ci_power_info *pi = ci_get_pi(adev);
2649         SMU7_Discrete_MCArbDramTimingTable arb_regs;
2650         u32 i, j;
2651         int ret =  0;
2652
2653         memset(&arb_regs, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable));
2654
2655         for (i = 0; i < pi->dpm_table.sclk_table.count; i++) {
2656                 for (j = 0; j < pi->dpm_table.mclk_table.count; j++) {
2657                         ret = ci_populate_memory_timing_parameters(adev,
2658                                                                    pi->dpm_table.sclk_table.dpm_levels[i].value,
2659                                                                    pi->dpm_table.mclk_table.dpm_levels[j].value,
2660                                                                    &arb_regs.entries[i][j]);
2661                         if (ret)
2662                                 break;
2663                 }
2664         }
2665
2666         if (ret == 0)
2667                 ret = amdgpu_ci_copy_bytes_to_smc(adev,
2668                                            pi->arb_table_start,
2669                                            (u8 *)&arb_regs,
2670                                            sizeof(SMU7_Discrete_MCArbDramTimingTable),
2671                                            pi->sram_end);
2672
2673         return ret;
2674 }
2675
2676 static int ci_program_memory_timing_parameters(struct amdgpu_device *adev)
2677 {
2678         struct ci_power_info *pi = ci_get_pi(adev);
2679
2680         if (pi->need_update_smu7_dpm_table == 0)
2681                 return 0;
2682
2683         return ci_do_program_memory_timing_parameters(adev);
2684 }
2685
2686 static void ci_populate_smc_initial_state(struct amdgpu_device *adev,
2687                                           struct amdgpu_ps *amdgpu_boot_state)
2688 {
2689         struct ci_ps *boot_state = ci_get_ps(amdgpu_boot_state);
2690         struct ci_power_info *pi = ci_get_pi(adev);
2691         u32 level = 0;
2692
2693         for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; level++) {
2694                 if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[level].clk >=
2695                     boot_state->performance_levels[0].sclk) {
2696                         pi->smc_state_table.GraphicsBootLevel = level;
2697                         break;
2698                 }
2699         }
2700
2701         for (level = 0; level < adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.count; level++) {
2702                 if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries[level].clk >=
2703                     boot_state->performance_levels[0].mclk) {
2704                         pi->smc_state_table.MemoryBootLevel = level;
2705                         break;
2706                 }
2707         }
2708 }
2709
2710 static u32 ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table *dpm_table)
2711 {
2712         u32 i;
2713         u32 mask_value = 0;
2714
2715         for (i = dpm_table->count; i > 0; i--) {
2716                 mask_value = mask_value << 1;
2717                 if (dpm_table->dpm_levels[i-1].enabled)
2718                         mask_value |= 0x1;
2719                 else
2720                         mask_value &= 0xFFFFFFFE;
2721         }
2722
2723         return mask_value;
2724 }
2725
2726 static void ci_populate_smc_link_level(struct amdgpu_device *adev,
2727                                        SMU7_Discrete_DpmTable *table)
2728 {
2729         struct ci_power_info *pi = ci_get_pi(adev);
2730         struct ci_dpm_table *dpm_table = &pi->dpm_table;
2731         u32 i;
2732
2733         for (i = 0; i < dpm_table->pcie_speed_table.count; i++) {
2734                 table->LinkLevel[i].PcieGenSpeed =
2735                         (u8)dpm_table->pcie_speed_table.dpm_levels[i].value;
2736                 table->LinkLevel[i].PcieLaneCount =
2737                         amdgpu_encode_pci_lane_width(dpm_table->pcie_speed_table.dpm_levels[i].param1);
2738                 table->LinkLevel[i].EnabledForActivity = 1;
2739                 table->LinkLevel[i].DownT = cpu_to_be32(5);
2740                 table->LinkLevel[i].UpT = cpu_to_be32(30);
2741         }
2742
2743         pi->smc_state_table.LinkLevelCount = (u8)dpm_table->pcie_speed_table.count;
2744         pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
2745                 ci_get_dpm_level_enable_mask_value(&dpm_table->pcie_speed_table);
2746 }
2747
2748 static int ci_populate_smc_uvd_level(struct amdgpu_device *adev,
2749                                      SMU7_Discrete_DpmTable *table)
2750 {
2751         u32 count;
2752         struct atom_clock_dividers dividers;
2753         int ret = -EINVAL;
2754
2755         table->UvdLevelCount =
2756                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count;
2757
2758         for (count = 0; count < table->UvdLevelCount; count++) {
2759                 table->UvdLevel[count].VclkFrequency =
2760                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].vclk;
2761                 table->UvdLevel[count].DclkFrequency =
2762                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].dclk;
2763                 table->UvdLevel[count].MinVddc =
2764                         adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2765                 table->UvdLevel[count].MinVddcPhases = 1;
2766
2767                 ret = amdgpu_atombios_get_clock_dividers(adev,
2768                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2769                                                          table->UvdLevel[count].VclkFrequency, false, &dividers);
2770                 if (ret)
2771                         return ret;
2772
2773                 table->UvdLevel[count].VclkDivider = (u8)dividers.post_divider;
2774
2775                 ret = amdgpu_atombios_get_clock_dividers(adev,
2776                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2777                                                          table->UvdLevel[count].DclkFrequency, false, &dividers);
2778                 if (ret)
2779                         return ret;
2780
2781                 table->UvdLevel[count].DclkDivider = (u8)dividers.post_divider;
2782
2783                 table->UvdLevel[count].VclkFrequency = cpu_to_be32(table->UvdLevel[count].VclkFrequency);
2784                 table->UvdLevel[count].DclkFrequency = cpu_to_be32(table->UvdLevel[count].DclkFrequency);
2785                 table->UvdLevel[count].MinVddc = cpu_to_be16(table->UvdLevel[count].MinVddc);
2786         }
2787
2788         return ret;
2789 }
2790
2791 static int ci_populate_smc_vce_level(struct amdgpu_device *adev,
2792                                      SMU7_Discrete_DpmTable *table)
2793 {
2794         u32 count;
2795         struct atom_clock_dividers dividers;
2796         int ret = -EINVAL;
2797
2798         table->VceLevelCount =
2799                 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count;
2800
2801         for (count = 0; count < table->VceLevelCount; count++) {
2802                 table->VceLevel[count].Frequency =
2803                         adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].evclk;
2804                 table->VceLevel[count].MinVoltage =
2805                         (u16)adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2806                 table->VceLevel[count].MinPhases = 1;
2807
2808                 ret = amdgpu_atombios_get_clock_dividers(adev,
2809                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2810                                                          table->VceLevel[count].Frequency, false, &dividers);
2811                 if (ret)
2812                         return ret;
2813
2814                 table->VceLevel[count].Divider = (u8)dividers.post_divider;
2815
2816                 table->VceLevel[count].Frequency = cpu_to_be32(table->VceLevel[count].Frequency);
2817                 table->VceLevel[count].MinVoltage = cpu_to_be16(table->VceLevel[count].MinVoltage);
2818         }
2819
2820         return ret;
2821
2822 }
2823
2824 static int ci_populate_smc_acp_level(struct amdgpu_device *adev,
2825                                      SMU7_Discrete_DpmTable *table)
2826 {
2827         u32 count;
2828         struct atom_clock_dividers dividers;
2829         int ret = -EINVAL;
2830
2831         table->AcpLevelCount = (u8)
2832                 (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count);
2833
2834         for (count = 0; count < table->AcpLevelCount; count++) {
2835                 table->AcpLevel[count].Frequency =
2836                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].clk;
2837                 table->AcpLevel[count].MinVoltage =
2838                         adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[count].v;
2839                 table->AcpLevel[count].MinPhases = 1;
2840
2841                 ret = amdgpu_atombios_get_clock_dividers(adev,
2842                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2843                                                          table->AcpLevel[count].Frequency, false, &dividers);
2844                 if (ret)
2845                         return ret;
2846
2847                 table->AcpLevel[count].Divider = (u8)dividers.post_divider;
2848
2849                 table->AcpLevel[count].Frequency = cpu_to_be32(table->AcpLevel[count].Frequency);
2850                 table->AcpLevel[count].MinVoltage = cpu_to_be16(table->AcpLevel[count].MinVoltage);
2851         }
2852
2853         return ret;
2854 }
2855
2856 static int ci_populate_smc_samu_level(struct amdgpu_device *adev,
2857                                       SMU7_Discrete_DpmTable *table)
2858 {
2859         u32 count;
2860         struct atom_clock_dividers dividers;
2861         int ret = -EINVAL;
2862
2863         table->SamuLevelCount =
2864                 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count;
2865
2866         for (count = 0; count < table->SamuLevelCount; count++) {
2867                 table->SamuLevel[count].Frequency =
2868                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].clk;
2869                 table->SamuLevel[count].MinVoltage =
2870                         adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[count].v * VOLTAGE_SCALE;
2871                 table->SamuLevel[count].MinPhases = 1;
2872
2873                 ret = amdgpu_atombios_get_clock_dividers(adev,
2874                                                          COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK,
2875                                                          table->SamuLevel[count].Frequency, false, &dividers);
2876                 if (ret)
2877                         return ret;
2878
2879                 table->SamuLevel[count].Divider = (u8)dividers.post_divider;
2880
2881                 table->SamuLevel[count].Frequency = cpu_to_be32(table->SamuLevel[count].Frequency);
2882                 table->SamuLevel[count].MinVoltage = cpu_to_be16(table->SamuLevel[count].MinVoltage);
2883         }
2884
2885         return ret;
2886 }
2887
2888 static int ci_calculate_mclk_params(struct amdgpu_device *adev,
2889                                     u32 memory_clock,
2890                                     SMU7_Discrete_MemoryLevel *mclk,
2891                                     bool strobe_mode,
2892                                     bool dll_state_on)
2893 {
2894         struct ci_power_info *pi = ci_get_pi(adev);
2895         u32  dll_cntl = pi->clock_registers.dll_cntl;
2896         u32  mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
2897         u32  mpll_ad_func_cntl = pi->clock_registers.mpll_ad_func_cntl;
2898         u32  mpll_dq_func_cntl = pi->clock_registers.mpll_dq_func_cntl;
2899         u32  mpll_func_cntl = pi->clock_registers.mpll_func_cntl;
2900         u32  mpll_func_cntl_1 = pi->clock_registers.mpll_func_cntl_1;
2901         u32  mpll_func_cntl_2 = pi->clock_registers.mpll_func_cntl_2;
2902         u32  mpll_ss1 = pi->clock_registers.mpll_ss1;
2903         u32  mpll_ss2 = pi->clock_registers.mpll_ss2;
2904         struct atom_mpll_param mpll_param;
2905         int ret;
2906
2907         ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
2908         if (ret)
2909                 return ret;
2910
2911         mpll_func_cntl &= ~MPLL_FUNC_CNTL__BWCTRL_MASK;
2912         mpll_func_cntl |= (mpll_param.bwcntl << MPLL_FUNC_CNTL__BWCTRL__SHIFT);
2913
2914         mpll_func_cntl_1 &= ~(MPLL_FUNC_CNTL_1__CLKF_MASK | MPLL_FUNC_CNTL_1__CLKFRAC_MASK |
2915                         MPLL_FUNC_CNTL_1__VCO_MODE_MASK);
2916         mpll_func_cntl_1 |= (mpll_param.clkf) << MPLL_FUNC_CNTL_1__CLKF__SHIFT |
2917                 (mpll_param.clkfrac << MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT) |
2918                 (mpll_param.vco_mode << MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT);
2919
2920         mpll_ad_func_cntl &= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK;
2921         mpll_ad_func_cntl |= (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2922
2923         if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
2924                 mpll_dq_func_cntl &= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK |
2925                                 MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK);
2926                 mpll_dq_func_cntl |= (mpll_param.yclk_sel << MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT) |
2927                                 (mpll_param.post_div << MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT);
2928         }
2929
2930         if (pi->caps_mclk_ss_support) {
2931                 struct amdgpu_atom_ss ss;
2932                 u32 freq_nom;
2933                 u32 tmp;
2934                 u32 reference_clock = adev->clock.mpll.reference_freq;
2935
2936                 if (mpll_param.qdr == 1)
2937                         freq_nom = memory_clock * 4 * (1 << mpll_param.post_div);
2938                 else
2939                         freq_nom = memory_clock * 2 * (1 << mpll_param.post_div);
2940
2941                 tmp = (freq_nom / reference_clock);
2942                 tmp = tmp * tmp;
2943                 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
2944                                                      ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
2945                         u32 clks = reference_clock * 5 / ss.rate;
2946                         u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
2947
2948                         mpll_ss1 &= ~MPLL_SS1__CLKV_MASK;
2949                         mpll_ss1 |= (clkv << MPLL_SS1__CLKV__SHIFT);
2950
2951                         mpll_ss2 &= ~MPLL_SS2__CLKS_MASK;
2952                         mpll_ss2 |= (clks << MPLL_SS2__CLKS__SHIFT);
2953                 }
2954         }
2955
2956         mclk_pwrmgt_cntl &= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK;
2957         mclk_pwrmgt_cntl |= (mpll_param.dll_speed << MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT);
2958
2959         if (dll_state_on)
2960                 mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2961                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK;
2962         else
2963                 mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
2964                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
2965
2966         mclk->MclkFrequency = memory_clock;
2967         mclk->MpllFuncCntl = mpll_func_cntl;
2968         mclk->MpllFuncCntl_1 = mpll_func_cntl_1;
2969         mclk->MpllFuncCntl_2 = mpll_func_cntl_2;
2970         mclk->MpllAdFuncCntl = mpll_ad_func_cntl;
2971         mclk->MpllDqFuncCntl = mpll_dq_func_cntl;
2972         mclk->MclkPwrmgtCntl = mclk_pwrmgt_cntl;
2973         mclk->DllCntl = dll_cntl;
2974         mclk->MpllSs1 = mpll_ss1;
2975         mclk->MpllSs2 = mpll_ss2;
2976
2977         return 0;
2978 }
2979
2980 static int ci_populate_single_memory_level(struct amdgpu_device *adev,
2981                                            u32 memory_clock,
2982                                            SMU7_Discrete_MemoryLevel *memory_level)
2983 {
2984         struct ci_power_info *pi = ci_get_pi(adev);
2985         int ret;
2986         bool dll_state_on;
2987
2988         if (adev->pm.dpm.dyn_state.vddc_dependency_on_mclk.entries) {
2989                 ret = ci_get_dependency_volt_by_clk(adev,
2990                                                     &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2991                                                     memory_clock, &memory_level->MinVddc);
2992                 if (ret)
2993                         return ret;
2994         }
2995
2996         if (adev->pm.dpm.dyn_state.vddci_dependency_on_mclk.entries) {
2997                 ret = ci_get_dependency_volt_by_clk(adev,
2998                                                     &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2999                                                     memory_clock, &memory_level->MinVddci);
3000                 if (ret)
3001                         return ret;
3002         }
3003
3004         if (adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk.entries) {
3005                 ret = ci_get_dependency_volt_by_clk(adev,
3006                                                     &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
3007                                                     memory_clock, &memory_level->MinMvdd);
3008                 if (ret)
3009                         return ret;
3010         }
3011
3012         memory_level->MinVddcPhases = 1;
3013
3014         if (pi->vddc_phase_shed_control)
3015                 ci_populate_phase_value_based_on_mclk(adev,
3016                                                       &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3017                                                       memory_clock,
3018                                                       &memory_level->MinVddcPhases);
3019
3020         memory_level->EnabledForThrottle = 1;
3021         memory_level->UpH = 0;
3022         memory_level->DownH = 100;
3023         memory_level->VoltageDownH = 0;
3024         memory_level->ActivityLevel = (u16)pi->mclk_activity_target;
3025
3026         memory_level->StutterEnable = false;
3027         memory_level->StrobeEnable = false;
3028         memory_level->EdcReadEnable = false;
3029         memory_level->EdcWriteEnable = false;
3030         memory_level->RttEnable = false;
3031
3032         memory_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3033
3034         if (pi->mclk_stutter_mode_threshold &&
3035             (memory_clock <= pi->mclk_stutter_mode_threshold) &&
3036             (!pi->uvd_enabled) &&
3037             (RREG32(mmDPG_PIPE_STUTTER_CONTROL) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK) &&
3038             (adev->pm.dpm.new_active_crtc_count <= 2))
3039                 memory_level->StutterEnable = true;
3040
3041         if (pi->mclk_strobe_mode_threshold &&
3042             (memory_clock <= pi->mclk_strobe_mode_threshold))
3043                 memory_level->StrobeEnable = 1;
3044
3045         if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
3046                 memory_level->StrobeRatio =
3047                         ci_get_mclk_frequency_ratio(memory_clock, memory_level->StrobeEnable);
3048                 if (pi->mclk_edc_enable_threshold &&
3049                     (memory_clock > pi->mclk_edc_enable_threshold))
3050                         memory_level->EdcReadEnable = true;
3051
3052                 if (pi->mclk_edc_wr_enable_threshold &&
3053                     (memory_clock > pi->mclk_edc_wr_enable_threshold))
3054                         memory_level->EdcWriteEnable = true;
3055
3056                 if (memory_level->StrobeEnable) {
3057                         if (ci_get_mclk_frequency_ratio(memory_clock, true) >=
3058                             ((RREG32(mmMC_SEQ_MISC7) >> 16) & 0xf))
3059                                 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3060                         else
3061                                 dll_state_on = ((RREG32(mmMC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
3062                 } else {
3063                         dll_state_on = pi->dll_default_on;
3064                 }
3065         } else {
3066                 memory_level->StrobeRatio = ci_get_ddr3_mclk_frequency_ratio(memory_clock);
3067                 dll_state_on = ((RREG32(mmMC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
3068         }
3069
3070         ret = ci_calculate_mclk_params(adev, memory_clock, memory_level, memory_level->StrobeEnable, dll_state_on);
3071         if (ret)
3072                 return ret;
3073
3074         memory_level->MinVddc = cpu_to_be32(memory_level->MinVddc * VOLTAGE_SCALE);
3075         memory_level->MinVddcPhases = cpu_to_be32(memory_level->MinVddcPhases);
3076         memory_level->MinVddci = cpu_to_be32(memory_level->MinVddci * VOLTAGE_SCALE);
3077         memory_level->MinMvdd = cpu_to_be32(memory_level->MinMvdd * VOLTAGE_SCALE);
3078
3079         memory_level->MclkFrequency = cpu_to_be32(memory_level->MclkFrequency);
3080         memory_level->ActivityLevel = cpu_to_be16(memory_level->ActivityLevel);
3081         memory_level->MpllFuncCntl = cpu_to_be32(memory_level->MpllFuncCntl);
3082         memory_level->MpllFuncCntl_1 = cpu_to_be32(memory_level->MpllFuncCntl_1);
3083         memory_level->MpllFuncCntl_2 = cpu_to_be32(memory_level->MpllFuncCntl_2);
3084         memory_level->MpllAdFuncCntl = cpu_to_be32(memory_level->MpllAdFuncCntl);
3085         memory_level->MpllDqFuncCntl = cpu_to_be32(memory_level->MpllDqFuncCntl);
3086         memory_level->MclkPwrmgtCntl = cpu_to_be32(memory_level->MclkPwrmgtCntl);
3087         memory_level->DllCntl = cpu_to_be32(memory_level->DllCntl);
3088         memory_level->MpllSs1 = cpu_to_be32(memory_level->MpllSs1);
3089         memory_level->MpllSs2 = cpu_to_be32(memory_level->MpllSs2);
3090
3091         return 0;
3092 }
3093
3094 static int ci_populate_smc_acpi_level(struct amdgpu_device *adev,
3095                                       SMU7_Discrete_DpmTable *table)
3096 {
3097         struct ci_power_info *pi = ci_get_pi(adev);
3098         struct atom_clock_dividers dividers;
3099         SMU7_Discrete_VoltageLevel voltage_level;
3100         u32 spll_func_cntl = pi->clock_registers.cg_spll_func_cntl;
3101         u32 spll_func_cntl_2 = pi->clock_registers.cg_spll_func_cntl_2;
3102         u32 dll_cntl = pi->clock_registers.dll_cntl;
3103         u32 mclk_pwrmgt_cntl = pi->clock_registers.mclk_pwrmgt_cntl;
3104         int ret;
3105
3106         table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
3107
3108         if (pi->acpi_vddc)
3109                 table->ACPILevel.MinVddc = cpu_to_be32(pi->acpi_vddc * VOLTAGE_SCALE);
3110         else
3111                 table->ACPILevel.MinVddc = cpu_to_be32(pi->min_vddc_in_pp_table * VOLTAGE_SCALE);
3112
3113         table->ACPILevel.MinVddcPhases = pi->vddc_phase_shed_control ? 0 : 1;
3114
3115         table->ACPILevel.SclkFrequency = adev->clock.spll.reference_freq;
3116
3117         ret = amdgpu_atombios_get_clock_dividers(adev,
3118                                                  COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3119                                                  table->ACPILevel.SclkFrequency, false, &dividers);
3120         if (ret)
3121                 return ret;
3122
3123         table->ACPILevel.SclkDid = (u8)dividers.post_divider;
3124         table->ACPILevel.DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3125         table->ACPILevel.DeepSleepDivId = 0;
3126
3127         spll_func_cntl &= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK;
3128         spll_func_cntl |= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK;
3129
3130         spll_func_cntl_2 &= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK;
3131         spll_func_cntl_2 |= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT);
3132
3133         table->ACPILevel.CgSpllFuncCntl = spll_func_cntl;
3134         table->ACPILevel.CgSpllFuncCntl2 = spll_func_cntl_2;
3135         table->ACPILevel.CgSpllFuncCntl3 = pi->clock_registers.cg_spll_func_cntl_3;
3136         table->ACPILevel.CgSpllFuncCntl4 = pi->clock_registers.cg_spll_func_cntl_4;
3137         table->ACPILevel.SpllSpreadSpectrum = pi->clock_registers.cg_spll_spread_spectrum;
3138         table->ACPILevel.SpllSpreadSpectrum2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3139         table->ACPILevel.CcPwrDynRm = 0;
3140         table->ACPILevel.CcPwrDynRm1 = 0;
3141
3142         table->ACPILevel.Flags = cpu_to_be32(table->ACPILevel.Flags);
3143         table->ACPILevel.MinVddcPhases = cpu_to_be32(table->ACPILevel.MinVddcPhases);
3144         table->ACPILevel.SclkFrequency = cpu_to_be32(table->ACPILevel.SclkFrequency);
3145         table->ACPILevel.CgSpllFuncCntl = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl);
3146         table->ACPILevel.CgSpllFuncCntl2 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl2);
3147         table->ACPILevel.CgSpllFuncCntl3 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl3);
3148         table->ACPILevel.CgSpllFuncCntl4 = cpu_to_be32(table->ACPILevel.CgSpllFuncCntl4);
3149         table->ACPILevel.SpllSpreadSpectrum = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum);
3150         table->ACPILevel.SpllSpreadSpectrum2 = cpu_to_be32(table->ACPILevel.SpllSpreadSpectrum2);
3151         table->ACPILevel.CcPwrDynRm = cpu_to_be32(table->ACPILevel.CcPwrDynRm);
3152         table->ACPILevel.CcPwrDynRm1 = cpu_to_be32(table->ACPILevel.CcPwrDynRm1);
3153
3154         table->MemoryACPILevel.MinVddc = table->ACPILevel.MinVddc;
3155         table->MemoryACPILevel.MinVddcPhases = table->ACPILevel.MinVddcPhases;
3156
3157         if (pi->vddci_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
3158                 if (pi->acpi_vddci)
3159                         table->MemoryACPILevel.MinVddci =
3160                                 cpu_to_be32(pi->acpi_vddci * VOLTAGE_SCALE);
3161                 else
3162                         table->MemoryACPILevel.MinVddci =
3163                                 cpu_to_be32(pi->min_vddci_in_pp_table * VOLTAGE_SCALE);
3164         }
3165
3166         if (ci_populate_mvdd_value(adev, 0, &voltage_level))
3167                 table->MemoryACPILevel.MinMvdd = 0;
3168         else
3169                 table->MemoryACPILevel.MinMvdd =
3170                         cpu_to_be32(voltage_level.Voltage * VOLTAGE_SCALE);
3171
3172         mclk_pwrmgt_cntl |= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK |
3173                 MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK;
3174         mclk_pwrmgt_cntl &= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK |
3175                         MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK);
3176
3177         dll_cntl &= ~(DLL_CNTL__MRDCK0_BYPASS_MASK | DLL_CNTL__MRDCK1_BYPASS_MASK);
3178
3179         table->MemoryACPILevel.DllCntl = cpu_to_be32(dll_cntl);
3180         table->MemoryACPILevel.MclkPwrmgtCntl = cpu_to_be32(mclk_pwrmgt_cntl);
3181         table->MemoryACPILevel.MpllAdFuncCntl =
3182                 cpu_to_be32(pi->clock_registers.mpll_ad_func_cntl);
3183         table->MemoryACPILevel.MpllDqFuncCntl =
3184                 cpu_to_be32(pi->clock_registers.mpll_dq_func_cntl);
3185         table->MemoryACPILevel.MpllFuncCntl =
3186                 cpu_to_be32(pi->clock_registers.mpll_func_cntl);
3187         table->MemoryACPILevel.MpllFuncCntl_1 =
3188                 cpu_to_be32(pi->clock_registers.mpll_func_cntl_1);
3189         table->MemoryACPILevel.MpllFuncCntl_2 =
3190                 cpu_to_be32(pi->clock_registers.mpll_func_cntl_2);
3191         table->MemoryACPILevel.MpllSs1 = cpu_to_be32(pi->clock_registers.mpll_ss1);
3192         table->MemoryACPILevel.MpllSs2 = cpu_to_be32(pi->clock_registers.mpll_ss2);
3193
3194         table->MemoryACPILevel.EnabledForThrottle = 0;
3195         table->MemoryACPILevel.EnabledForActivity = 0;
3196         table->MemoryACPILevel.UpH = 0;
3197         table->MemoryACPILevel.DownH = 100;
3198         table->MemoryACPILevel.VoltageDownH = 0;
3199         table->MemoryACPILevel.ActivityLevel =
3200                 cpu_to_be16((u16)pi->mclk_activity_target);
3201
3202         table->MemoryACPILevel.StutterEnable = false;
3203         table->MemoryACPILevel.StrobeEnable = false;
3204         table->MemoryACPILevel.EdcReadEnable = false;
3205         table->MemoryACPILevel.EdcWriteEnable = false;
3206         table->MemoryACPILevel.RttEnable = false;
3207
3208         return 0;
3209 }
3210
3211
3212 static int ci_enable_ulv(struct amdgpu_device *adev, bool enable)
3213 {
3214         struct ci_power_info *pi = ci_get_pi(adev);
3215         struct ci_ulv_parm *ulv = &pi->ulv;
3216
3217         if (ulv->supported) {
3218                 if (enable)
3219                         return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
3220                                 0 : -EINVAL;
3221                 else
3222                         return (amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
3223                                 0 : -EINVAL;
3224         }
3225
3226         return 0;
3227 }
3228
3229 static int ci_populate_ulv_level(struct amdgpu_device *adev,
3230                                  SMU7_Discrete_Ulv *state)
3231 {
3232         struct ci_power_info *pi = ci_get_pi(adev);
3233         u16 ulv_voltage = adev->pm.dpm.backbias_response_time;
3234
3235         state->CcPwrDynRm = 0;
3236         state->CcPwrDynRm1 = 0;
3237
3238         if (ulv_voltage == 0) {
3239                 pi->ulv.supported = false;
3240                 return 0;
3241         }
3242
3243         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_BY_SVID2) {
3244                 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3245                         state->VddcOffset = 0;
3246                 else
3247                         state->VddcOffset =
3248                                 adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage;
3249         } else {
3250                 if (ulv_voltage > adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v)
3251                         state->VddcOffsetVid = 0;
3252                 else
3253                         state->VddcOffsetVid = (u8)
3254                                 ((adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[0].v - ulv_voltage) *
3255                                  VOLTAGE_VID_OFFSET_SCALE2 / VOLTAGE_VID_OFFSET_SCALE1);
3256         }
3257         state->VddcPhase = pi->vddc_phase_shed_control ? 0 : 1;
3258
3259         state->CcPwrDynRm = cpu_to_be32(state->CcPwrDynRm);
3260         state->CcPwrDynRm1 = cpu_to_be32(state->CcPwrDynRm1);
3261         state->VddcOffset = cpu_to_be16(state->VddcOffset);
3262
3263         return 0;
3264 }
3265
3266 static int ci_calculate_sclk_params(struct amdgpu_device *adev,
3267                                     u32 engine_clock,
3268                                     SMU7_Discrete_GraphicsLevel *sclk)
3269 {
3270         struct ci_power_info *pi = ci_get_pi(adev);
3271         struct atom_clock_dividers dividers;
3272         u32 spll_func_cntl_3 = pi->clock_registers.cg_spll_func_cntl_3;
3273         u32 spll_func_cntl_4 = pi->clock_registers.cg_spll_func_cntl_4;
3274         u32 cg_spll_spread_spectrum = pi->clock_registers.cg_spll_spread_spectrum;
3275         u32 cg_spll_spread_spectrum_2 = pi->clock_registers.cg_spll_spread_spectrum_2;
3276         u32 reference_clock = adev->clock.spll.reference_freq;
3277         u32 reference_divider;
3278         u32 fbdiv;
3279         int ret;
3280
3281         ret = amdgpu_atombios_get_clock_dividers(adev,
3282                                                  COMPUTE_GPUCLK_INPUT_FLAG_SCLK,
3283                                                  engine_clock, false, &dividers);
3284         if (ret)
3285                 return ret;
3286
3287         reference_divider = 1 + dividers.ref_div;
3288         fbdiv = dividers.fb_div & 0x3FFFFFF;
3289
3290         spll_func_cntl_3 &= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK;
3291         spll_func_cntl_3 |= (fbdiv << CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT);
3292         spll_func_cntl_3 |= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK;
3293
3294         if (pi->caps_sclk_ss_support) {
3295                 struct amdgpu_atom_ss ss;
3296                 u32 vco_freq = engine_clock * dividers.post_div;
3297
3298                 if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
3299                                                      ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
3300                         u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
3301                         u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
3302
3303                         cg_spll_spread_spectrum &= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK | CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK);
3304                         cg_spll_spread_spectrum |= (clk_s << CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT);
3305                         cg_spll_spread_spectrum |= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT);
3306
3307                         cg_spll_spread_spectrum_2 &= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK;
3308                         cg_spll_spread_spectrum_2 |= (clk_v << CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT);
3309                 }
3310         }
3311
3312         sclk->SclkFrequency = engine_clock;
3313         sclk->CgSpllFuncCntl3 = spll_func_cntl_3;
3314         sclk->CgSpllFuncCntl4 = spll_func_cntl_4;
3315         sclk->SpllSpreadSpectrum = cg_spll_spread_spectrum;
3316         sclk->SpllSpreadSpectrum2  = cg_spll_spread_spectrum_2;
3317         sclk->SclkDid = (u8)dividers.post_divider;
3318
3319         return 0;
3320 }
3321
3322 static int ci_populate_single_graphic_level(struct amdgpu_device *adev,
3323                                             u32 engine_clock,
3324                                             u16 sclk_activity_level_t,
3325                                             SMU7_Discrete_GraphicsLevel *graphic_level)
3326 {
3327         struct ci_power_info *pi = ci_get_pi(adev);
3328         int ret;
3329
3330         ret = ci_calculate_sclk_params(adev, engine_clock, graphic_level);
3331         if (ret)
3332                 return ret;
3333
3334         ret = ci_get_dependency_volt_by_clk(adev,
3335                                             &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3336                                             engine_clock, &graphic_level->MinVddc);
3337         if (ret)
3338                 return ret;
3339
3340         graphic_level->SclkFrequency = engine_clock;
3341
3342         graphic_level->Flags =  0;
3343         graphic_level->MinVddcPhases = 1;
3344
3345         if (pi->vddc_phase_shed_control)
3346                 ci_populate_phase_value_based_on_sclk(adev,
3347                                                       &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
3348                                                       engine_clock,
3349                                                       &graphic_level->MinVddcPhases);
3350
3351         graphic_level->ActivityLevel = sclk_activity_level_t;
3352
3353         graphic_level->CcPwrDynRm = 0;
3354         graphic_level->CcPwrDynRm1 = 0;
3355         graphic_level->EnabledForThrottle = 1;
3356         graphic_level->UpH = 0;
3357         graphic_level->DownH = 0;
3358         graphic_level->VoltageDownH = 0;
3359         graphic_level->PowerThrottle = 0;
3360
3361         if (pi->caps_sclk_ds)
3362                 graphic_level->DeepSleepDivId = ci_get_sleep_divider_id_from_clock(engine_clock,
3363                                                                                    CISLAND_MINIMUM_ENGINE_CLOCK);
3364
3365         graphic_level->DisplayWatermark = PPSMC_DISPLAY_WATERMARK_LOW;
3366
3367         graphic_level->Flags = cpu_to_be32(graphic_level->Flags);
3368         graphic_level->MinVddc = cpu_to_be32(graphic_level->MinVddc * VOLTAGE_SCALE);
3369         graphic_level->MinVddcPhases = cpu_to_be32(graphic_level->MinVddcPhases);
3370         graphic_level->SclkFrequency = cpu_to_be32(graphic_level->SclkFrequency);
3371         graphic_level->ActivityLevel = cpu_to_be16(graphic_level->ActivityLevel);
3372         graphic_level->CgSpllFuncCntl3 = cpu_to_be32(graphic_level->CgSpllFuncCntl3);
3373         graphic_level->CgSpllFuncCntl4 = cpu_to_be32(graphic_level->CgSpllFuncCntl4);
3374         graphic_level->SpllSpreadSpectrum = cpu_to_be32(graphic_level->SpllSpreadSpectrum);
3375         graphic_level->SpllSpreadSpectrum2 = cpu_to_be32(graphic_level->SpllSpreadSpectrum2);
3376         graphic_level->CcPwrDynRm = cpu_to_be32(graphic_level->CcPwrDynRm);
3377         graphic_level->CcPwrDynRm1 = cpu_to_be32(graphic_level->CcPwrDynRm1);
3378
3379         return 0;
3380 }
3381
3382 static int ci_populate_all_graphic_levels(struct amdgpu_device *adev)
3383 {
3384         struct ci_power_info *pi = ci_get_pi(adev);
3385         struct ci_dpm_table *dpm_table = &pi->dpm_table;
3386         u32 level_array_address = pi->dpm_table_start +
3387                 offsetof(SMU7_Discrete_DpmTable, GraphicsLevel);
3388         u32 level_array_size = sizeof(SMU7_Discrete_GraphicsLevel) *
3389                 SMU7_MAX_LEVELS_GRAPHICS;
3390         SMU7_Discrete_GraphicsLevel *levels = pi->smc_state_table.GraphicsLevel;
3391         u32 i, ret;
3392
3393         memset(levels, 0, level_array_size);
3394
3395         for (i = 0; i < dpm_table->sclk_table.count; i++) {
3396                 ret = ci_populate_single_graphic_level(adev,
3397                                                        dpm_table->sclk_table.dpm_levels[i].value,
3398                                                        (u16)pi->activity_target[i],
3399                                                        &pi->smc_state_table.GraphicsLevel[i]);
3400                 if (ret)
3401                         return ret;
3402                 if (i > 1)
3403                         pi->smc_state_table.GraphicsLevel[i].DeepSleepDivId = 0;
3404                 if (i == (dpm_table->sclk_table.count - 1))
3405                         pi->smc_state_table.GraphicsLevel[i].DisplayWatermark =
3406                                 PPSMC_DISPLAY_WATERMARK_HIGH;
3407         }
3408         pi->smc_state_table.GraphicsLevel[0].EnabledForActivity = 1;
3409
3410         pi->smc_state_table.GraphicsDpmLevelCount = (u8)dpm_table->sclk_table.count;
3411         pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
3412                 ci_get_dpm_level_enable_mask_value(&dpm_table->sclk_table);
3413
3414         ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3415                                    (u8 *)levels, level_array_size,
3416                                    pi->sram_end);
3417         if (ret)
3418                 return ret;
3419
3420         return 0;
3421 }
3422
3423 static int ci_populate_ulv_state(struct amdgpu_device *adev,
3424                                  SMU7_Discrete_Ulv *ulv_level)
3425 {
3426         return ci_populate_ulv_level(adev, ulv_level);
3427 }
3428
3429 static int ci_populate_all_memory_levels(struct amdgpu_device *adev)
3430 {
3431         struct ci_power_info *pi = ci_get_pi(adev);
3432         struct ci_dpm_table *dpm_table = &pi->dpm_table;
3433         u32 level_array_address = pi->dpm_table_start +
3434                 offsetof(SMU7_Discrete_DpmTable, MemoryLevel);
3435         u32 level_array_size = sizeof(SMU7_Discrete_MemoryLevel) *
3436                 SMU7_MAX_LEVELS_MEMORY;
3437         SMU7_Discrete_MemoryLevel *levels = pi->smc_state_table.MemoryLevel;
3438         u32 i, ret;
3439
3440         memset(levels, 0, level_array_size);
3441
3442         for (i = 0; i < dpm_table->mclk_table.count; i++) {
3443                 if (dpm_table->mclk_table.dpm_levels[i].value == 0)
3444                         return -EINVAL;
3445                 ret = ci_populate_single_memory_level(adev,
3446                                                       dpm_table->mclk_table.dpm_levels[i].value,
3447                                                       &pi->smc_state_table.MemoryLevel[i]);
3448                 if (ret)
3449                         return ret;
3450         }
3451
3452         pi->smc_state_table.MemoryLevel[0].EnabledForActivity = 1;
3453
3454         if ((dpm_table->mclk_table.count >= 2) &&
3455             ((adev->pdev->device == 0x67B0) || (adev->pdev->device == 0x67B1))) {
3456                 pi->smc_state_table.MemoryLevel[1].MinVddc =
3457                         pi->smc_state_table.MemoryLevel[0].MinVddc;
3458                 pi->smc_state_table.MemoryLevel[1].MinVddcPhases =
3459                         pi->smc_state_table.MemoryLevel[0].MinVddcPhases;
3460         }
3461
3462         pi->smc_state_table.MemoryLevel[0].ActivityLevel = cpu_to_be16(0x1F);
3463
3464         pi->smc_state_table.MemoryDpmLevelCount = (u8)dpm_table->mclk_table.count;
3465         pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
3466                 ci_get_dpm_level_enable_mask_value(&dpm_table->mclk_table);
3467
3468         pi->smc_state_table.MemoryLevel[dpm_table->mclk_table.count - 1].DisplayWatermark =
3469                 PPSMC_DISPLAY_WATERMARK_HIGH;
3470
3471         ret = amdgpu_ci_copy_bytes_to_smc(adev, level_array_address,
3472                                    (u8 *)levels, level_array_size,
3473                                    pi->sram_end);
3474         if (ret)
3475                 return ret;
3476
3477         return 0;
3478 }
3479
3480 static void ci_reset_single_dpm_table(struct amdgpu_device *adev,
3481                                       struct ci_single_dpm_table* dpm_table,
3482                                       u32 count)
3483 {
3484         u32 i;
3485
3486         dpm_table->count = count;
3487         for (i = 0; i < MAX_REGULAR_DPM_NUMBER; i++)
3488                 dpm_table->dpm_levels[i].enabled = false;
3489 }
3490
3491 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table* dpm_table,
3492                                       u32 index, u32 pcie_gen, u32 pcie_lanes)
3493 {
3494         dpm_table->dpm_levels[index].value = pcie_gen;
3495         dpm_table->dpm_levels[index].param1 = pcie_lanes;
3496         dpm_table->dpm_levels[index].enabled = true;
3497 }
3498
3499 static int ci_setup_default_pcie_tables(struct amdgpu_device *adev)
3500 {
3501         struct ci_power_info *pi = ci_get_pi(adev);
3502
3503         if (!pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels)
3504                 return -EINVAL;
3505
3506         if (pi->use_pcie_performance_levels && !pi->use_pcie_powersaving_levels) {
3507                 pi->pcie_gen_powersaving = pi->pcie_gen_performance;
3508                 pi->pcie_lane_powersaving = pi->pcie_lane_performance;
3509         } else if (!pi->use_pcie_performance_levels && pi->use_pcie_powersaving_levels) {
3510                 pi->pcie_gen_performance = pi->pcie_gen_powersaving;
3511                 pi->pcie_lane_performance = pi->pcie_lane_powersaving;
3512         }
3513
3514         ci_reset_single_dpm_table(adev,
3515                                   &pi->dpm_table.pcie_speed_table,
3516                                   SMU7_MAX_LEVELS_LINK);
3517
3518         if (adev->asic_type == CHIP_BONAIRE)
3519                 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3520                                           pi->pcie_gen_powersaving.min,
3521                                           pi->pcie_lane_powersaving.max);
3522         else
3523                 ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 0,
3524                                           pi->pcie_gen_powersaving.min,
3525                                           pi->pcie_lane_powersaving.min);
3526         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 1,
3527                                   pi->pcie_gen_performance.min,
3528                                   pi->pcie_lane_performance.min);
3529         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 2,
3530                                   pi->pcie_gen_powersaving.min,
3531                                   pi->pcie_lane_powersaving.max);
3532         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 3,
3533                                   pi->pcie_gen_performance.min,
3534                                   pi->pcie_lane_performance.max);
3535         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 4,
3536                                   pi->pcie_gen_powersaving.max,
3537                                   pi->pcie_lane_powersaving.max);
3538         ci_setup_pcie_table_entry(&pi->dpm_table.pcie_speed_table, 5,
3539                                   pi->pcie_gen_performance.max,
3540                                   pi->pcie_lane_performance.max);
3541
3542         pi->dpm_table.pcie_speed_table.count = 6;
3543
3544         return 0;
3545 }
3546
3547 static int ci_setup_default_dpm_tables(struct amdgpu_device *adev)
3548 {
3549         struct ci_power_info *pi = ci_get_pi(adev);
3550         struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
3551                 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3552         struct amdgpu_clock_voltage_dependency_table *allowed_mclk_table =
3553                 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
3554         struct amdgpu_cac_leakage_table *std_voltage_table =
3555                 &adev->pm.dpm.dyn_state.cac_leakage_table;
3556         u32 i;
3557
3558         if (allowed_sclk_vddc_table == NULL)
3559                 return -EINVAL;
3560         if (allowed_sclk_vddc_table->count < 1)
3561                 return -EINVAL;
3562         if (allowed_mclk_table == NULL)
3563                 return -EINVAL;
3564         if (allowed_mclk_table->count < 1)
3565                 return -EINVAL;
3566
3567         memset(&pi->dpm_table, 0, sizeof(struct ci_dpm_table));
3568
3569         ci_reset_single_dpm_table(adev,
3570                                   &pi->dpm_table.sclk_table,
3571                                   SMU7_MAX_LEVELS_GRAPHICS);
3572         ci_reset_single_dpm_table(adev,
3573                                   &pi->dpm_table.mclk_table,
3574                                   SMU7_MAX_LEVELS_MEMORY);
3575         ci_reset_single_dpm_table(adev,
3576                                   &pi->dpm_table.vddc_table,
3577                                   SMU7_MAX_LEVELS_VDDC);
3578         ci_reset_single_dpm_table(adev,
3579                                   &pi->dpm_table.vddci_table,
3580                                   SMU7_MAX_LEVELS_VDDCI);
3581         ci_reset_single_dpm_table(adev,
3582                                   &pi->dpm_table.mvdd_table,
3583                                   SMU7_MAX_LEVELS_MVDD);
3584
3585         pi->dpm_table.sclk_table.count = 0;
3586         for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3587                 if ((i == 0) ||
3588                     (pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count-1].value !=
3589                      allowed_sclk_vddc_table->entries[i].clk)) {
3590                         pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].value =
3591                                 allowed_sclk_vddc_table->entries[i].clk;
3592                         pi->dpm_table.sclk_table.dpm_levels[pi->dpm_table.sclk_table.count].enabled =
3593                                 (i == 0) ? true : false;
3594                         pi->dpm_table.sclk_table.count++;
3595                 }
3596         }
3597
3598         pi->dpm_table.mclk_table.count = 0;
3599         for (i = 0; i < allowed_mclk_table->count; i++) {
3600                 if ((i == 0) ||
3601                     (pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count-1].value !=
3602                      allowed_mclk_table->entries[i].clk)) {
3603                         pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].value =
3604                                 allowed_mclk_table->entries[i].clk;
3605                         pi->dpm_table.mclk_table.dpm_levels[pi->dpm_table.mclk_table.count].enabled =
3606                                 (i == 0) ? true : false;
3607                         pi->dpm_table.mclk_table.count++;
3608                 }
3609         }
3610
3611         for (i = 0; i < allowed_sclk_vddc_table->count; i++) {
3612                 pi->dpm_table.vddc_table.dpm_levels[i].value =
3613                         allowed_sclk_vddc_table->entries[i].v;
3614                 pi->dpm_table.vddc_table.dpm_levels[i].param1 =
3615                         std_voltage_table->entries[i].leakage;
3616                 pi->dpm_table.vddc_table.dpm_levels[i].enabled = true;
3617         }
3618         pi->dpm_table.vddc_table.count = allowed_sclk_vddc_table->count;
3619
3620         allowed_mclk_table = &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
3621         if (allowed_mclk_table) {
3622                 for (i = 0; i < allowed_mclk_table->count; i++) {
3623                         pi->dpm_table.vddci_table.dpm_levels[i].value =
3624                                 allowed_mclk_table->entries[i].v;
3625                         pi->dpm_table.vddci_table.dpm_levels[i].enabled = true;
3626                 }
3627                 pi->dpm_table.vddci_table.count = allowed_mclk_table->count;
3628         }
3629
3630         allowed_mclk_table = &adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk;
3631         if (allowed_mclk_table) {
3632                 for (i = 0; i < allowed_mclk_table->count; i++) {
3633                         pi->dpm_table.mvdd_table.dpm_levels[i].value =
3634                                 allowed_mclk_table->entries[i].v;
3635                         pi->dpm_table.mvdd_table.dpm_levels[i].enabled = true;
3636                 }
3637                 pi->dpm_table.mvdd_table.count = allowed_mclk_table->count;
3638         }
3639
3640         ci_setup_default_pcie_tables(adev);
3641
3642         /* save a copy of the default DPM table */
3643         memcpy(&(pi->golden_dpm_table), &(pi->dpm_table),
3644                         sizeof(struct ci_dpm_table));
3645
3646         return 0;
3647 }
3648
3649 static int ci_find_boot_level(struct ci_single_dpm_table *table,
3650                               u32 value, u32 *boot_level)
3651 {
3652         u32 i;
3653         int ret = -EINVAL;
3654
3655         for(i = 0; i < table->count; i++) {
3656                 if (value == table->dpm_levels[i].value) {
3657                         *boot_level = i;
3658                         ret = 0;
3659                 }
3660         }
3661
3662         return ret;
3663 }
3664
3665 static int ci_init_smc_table(struct amdgpu_device *adev)
3666 {
3667         struct ci_power_info *pi = ci_get_pi(adev);
3668         struct ci_ulv_parm *ulv = &pi->ulv;
3669         struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
3670         SMU7_Discrete_DpmTable *table = &pi->smc_state_table;
3671         int ret;
3672
3673         ret = ci_setup_default_dpm_tables(adev);
3674         if (ret)
3675                 return ret;
3676
3677         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE)
3678                 ci_populate_smc_voltage_tables(adev, table);
3679
3680         ci_init_fps_limits(adev);
3681
3682         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
3683                 table->SystemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
3684
3685         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
3686                 table->SystemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
3687
3688         if (adev->mc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
3689                 table->SystemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
3690
3691         if (ulv->supported) {
3692                 ret = ci_populate_ulv_state(adev, &pi->smc_state_table.Ulv);
3693                 if (ret)
3694                         return ret;
3695                 WREG32_SMC(ixCG_ULV_PARAMETER, ulv->cg_ulv_parameter);
3696         }
3697
3698         ret = ci_populate_all_graphic_levels(adev);
3699         if (ret)
3700                 return ret;
3701
3702         ret = ci_populate_all_memory_levels(adev);
3703         if (ret)
3704                 return ret;
3705
3706         ci_populate_smc_link_level(adev, table);
3707
3708         ret = ci_populate_smc_acpi_level(adev, table);
3709         if (ret)
3710                 return ret;
3711
3712         ret = ci_populate_smc_vce_level(adev, table);
3713         if (ret)
3714                 return ret;
3715
3716         ret = ci_populate_smc_acp_level(adev, table);
3717         if (ret)
3718                 return ret;
3719
3720         ret = ci_populate_smc_samu_level(adev, table);
3721         if (ret)
3722                 return ret;
3723
3724         ret = ci_do_program_memory_timing_parameters(adev);
3725         if (ret)
3726                 return ret;
3727
3728         ret = ci_populate_smc_uvd_level(adev, table);
3729         if (ret)
3730                 return ret;
3731
3732         table->UvdBootLevel  = 0;
3733         table->VceBootLevel  = 0;
3734         table->AcpBootLevel  = 0;
3735         table->SamuBootLevel  = 0;
3736         table->GraphicsBootLevel  = 0;
3737         table->MemoryBootLevel  = 0;
3738
3739         ret = ci_find_boot_level(&pi->dpm_table.sclk_table,
3740                                  pi->vbios_boot_state.sclk_bootup_value,
3741                                  (u32 *)&pi->smc_state_table.GraphicsBootLevel);
3742
3743         ret = ci_find_boot_level(&pi->dpm_table.mclk_table,
3744                                  pi->vbios_boot_state.mclk_bootup_value,
3745                                  (u32 *)&pi->smc_state_table.MemoryBootLevel);
3746
3747         table->BootVddc = pi->vbios_boot_state.vddc_bootup_value;
3748         table->BootVddci = pi->vbios_boot_state.vddci_bootup_value;
3749         table->BootMVdd = pi->vbios_boot_state.mvdd_bootup_value;
3750
3751         ci_populate_smc_initial_state(adev, amdgpu_boot_state);
3752
3753         ret = ci_populate_bapm_parameters_in_dpm_table(adev);
3754         if (ret)
3755                 return ret;
3756
3757         table->UVDInterval = 1;
3758         table->VCEInterval = 1;
3759         table->ACPInterval = 1;
3760         table->SAMUInterval = 1;
3761         table->GraphicsVoltageChangeEnable = 1;
3762         table->GraphicsThermThrottleEnable = 1;
3763         table->GraphicsInterval = 1;
3764         table->VoltageInterval = 1;
3765         table->ThermalInterval = 1;
3766         table->TemperatureLimitHigh = (u16)((pi->thermal_temp_setting.temperature_high *
3767                                              CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3768         table->TemperatureLimitLow = (u16)((pi->thermal_temp_setting.temperature_low *
3769                                             CISLANDS_Q88_FORMAT_CONVERSION_UNIT) / 1000);
3770         table->MemoryVoltageChangeEnable = 1;
3771         table->MemoryInterval = 1;
3772         table->VoltageResponseTime = 0;
3773         table->VddcVddciDelta = 4000;
3774         table->PhaseResponseTime = 0;
3775         table->MemoryThermThrottleEnable = 1;
3776         table->PCIeBootLinkLevel = pi->dpm_table.pcie_speed_table.count - 1;
3777         table->PCIeGenInterval = 1;
3778         if (pi->voltage_control == CISLANDS_VOLTAGE_CONTROL_BY_SVID2)
3779                 table->SVI2Enable  = 1;
3780         else
3781                 table->SVI2Enable  = 0;
3782
3783         table->ThermGpio = 17;
3784         table->SclkStepSize = 0x4000;
3785
3786         table->SystemFlags = cpu_to_be32(table->SystemFlags);
3787         table->SmioMaskVddcVid = cpu_to_be32(table->SmioMaskVddcVid);
3788         table->SmioMaskVddcPhase = cpu_to_be32(table->SmioMaskVddcPhase);
3789         table->SmioMaskVddciVid = cpu_to_be32(table->SmioMaskVddciVid);
3790         table->SmioMaskMvddVid = cpu_to_be32(table->SmioMaskMvddVid);
3791         table->SclkStepSize = cpu_to_be32(table->SclkStepSize);
3792         table->TemperatureLimitHigh = cpu_to_be16(table->TemperatureLimitHigh);
3793         table->TemperatureLimitLow = cpu_to_be16(table->TemperatureLimitLow);
3794         table->VddcVddciDelta = cpu_to_be16(table->VddcVddciDelta);
3795         table->VoltageResponseTime = cpu_to_be16(table->VoltageResponseTime);
3796         table->PhaseResponseTime = cpu_to_be16(table->PhaseResponseTime);
3797         table->BootVddc = cpu_to_be16(table->BootVddc * VOLTAGE_SCALE);
3798         table->BootVddci = cpu_to_be16(table->BootVddci * VOLTAGE_SCALE);
3799         table->BootMVdd = cpu_to_be16(table->BootMVdd * VOLTAGE_SCALE);
3800
3801         ret = amdgpu_ci_copy_bytes_to_smc(adev,
3802                                    pi->dpm_table_start +
3803                                    offsetof(SMU7_Discrete_DpmTable, SystemFlags),
3804                                    (u8 *)&table->SystemFlags,
3805                                    sizeof(SMU7_Discrete_DpmTable) - 3 * sizeof(SMU7_PIDController),
3806                                    pi->sram_end);
3807         if (ret)
3808                 return ret;
3809
3810         return 0;
3811 }
3812
3813 static void ci_trim_single_dpm_states(struct amdgpu_device *adev,
3814                                       struct ci_single_dpm_table *dpm_table,
3815                                       u32 low_limit, u32 high_limit)
3816 {
3817         u32 i;
3818
3819         for (i = 0; i < dpm_table->count; i++) {
3820                 if ((dpm_table->dpm_levels[i].value < low_limit) ||
3821                     (dpm_table->dpm_levels[i].value > high_limit))
3822                         dpm_table->dpm_levels[i].enabled = false;
3823                 else
3824                         dpm_table->dpm_levels[i].enabled = true;
3825         }
3826 }
3827
3828 static void ci_trim_pcie_dpm_states(struct amdgpu_device *adev,
3829                                     u32 speed_low, u32 lanes_low,
3830                                     u32 speed_high, u32 lanes_high)
3831 {
3832         struct ci_power_info *pi = ci_get_pi(adev);
3833         struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
3834         u32 i, j;
3835
3836         for (i = 0; i < pcie_table->count; i++) {
3837                 if ((pcie_table->dpm_levels[i].value < speed_low) ||
3838                     (pcie_table->dpm_levels[i].param1 < lanes_low) ||
3839                     (pcie_table->dpm_levels[i].value > speed_high) ||
3840                     (pcie_table->dpm_levels[i].param1 > lanes_high))
3841                         pcie_table->dpm_levels[i].enabled = false;
3842                 else
3843                         pcie_table->dpm_levels[i].enabled = true;
3844         }
3845
3846         for (i = 0; i < pcie_table->count; i++) {
3847                 if (pcie_table->dpm_levels[i].enabled) {
3848                         for (j = i + 1; j < pcie_table->count; j++) {
3849                                 if (pcie_table->dpm_levels[j].enabled) {
3850                                         if ((pcie_table->dpm_levels[i].value == pcie_table->dpm_levels[j].value) &&
3851                                             (pcie_table->dpm_levels[i].param1 == pcie_table->dpm_levels[j].param1))
3852                                                 pcie_table->dpm_levels[j].enabled = false;
3853                                 }
3854                         }
3855                 }
3856         }
3857 }
3858
3859 static int ci_trim_dpm_states(struct amdgpu_device *adev,
3860                               struct amdgpu_ps *amdgpu_state)
3861 {
3862         struct ci_ps *state = ci_get_ps(amdgpu_state);
3863         struct ci_power_info *pi = ci_get_pi(adev);
3864         u32 high_limit_count;
3865
3866         if (state->performance_level_count < 1)
3867                 return -EINVAL;
3868
3869         if (state->performance_level_count == 1)
3870                 high_limit_count = 0;
3871         else
3872                 high_limit_count = 1;
3873
3874         ci_trim_single_dpm_states(adev,
3875                                   &pi->dpm_table.sclk_table,
3876                                   state->performance_levels[0].sclk,
3877                                   state->performance_levels[high_limit_count].sclk);
3878
3879         ci_trim_single_dpm_states(adev,
3880                                   &pi->dpm_table.mclk_table,
3881                                   state->performance_levels[0].mclk,
3882                                   state->performance_levels[high_limit_count].mclk);
3883
3884         ci_trim_pcie_dpm_states(adev,
3885                                 state->performance_levels[0].pcie_gen,
3886                                 state->performance_levels[0].pcie_lane,
3887                                 state->performance_levels[high_limit_count].pcie_gen,
3888                                 state->performance_levels[high_limit_count].pcie_lane);
3889
3890         return 0;
3891 }
3892
3893 static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device *adev)
3894 {
3895         struct amdgpu_clock_voltage_dependency_table *disp_voltage_table =
3896                 &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk;
3897         struct amdgpu_clock_voltage_dependency_table *vddc_table =
3898                 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
3899         u32 requested_voltage = 0;
3900         u32 i;
3901
3902         if (disp_voltage_table == NULL)
3903                 return -EINVAL;
3904         if (!disp_voltage_table->count)
3905                 return -EINVAL;
3906
3907         for (i = 0; i < disp_voltage_table->count; i++) {
3908                 if (adev->clock.current_dispclk == disp_voltage_table->entries[i].clk)
3909                         requested_voltage = disp_voltage_table->entries[i].v;
3910         }
3911
3912         for (i = 0; i < vddc_table->count; i++) {
3913                 if (requested_voltage <= vddc_table->entries[i].v) {
3914                         requested_voltage = vddc_table->entries[i].v;
3915                         return (amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3916                                                                   PPSMC_MSG_VddC_Request,
3917                                                                   requested_voltage * VOLTAGE_SCALE) == PPSMC_Result_OK) ?
3918                                 0 : -EINVAL;
3919                 }
3920         }
3921
3922         return -EINVAL;
3923 }
3924
3925 static int ci_upload_dpm_level_enable_mask(struct amdgpu_device *adev)
3926 {
3927         struct ci_power_info *pi = ci_get_pi(adev);
3928         PPSMC_Result result;
3929
3930         ci_apply_disp_minimum_voltage_request(adev);
3931
3932         if (!pi->sclk_dpm_key_disabled) {
3933                 if (pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
3934                         result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3935                                                                    PPSMC_MSG_SCLKDPM_SetEnabledMask,
3936                                                                    pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
3937                         if (result != PPSMC_Result_OK)
3938                                 return -EINVAL;
3939                 }
3940         }
3941
3942         if (!pi->mclk_dpm_key_disabled) {
3943                 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
3944                         result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3945                                                                    PPSMC_MSG_MCLKDPM_SetEnabledMask,
3946                                                                    pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
3947                         if (result != PPSMC_Result_OK)
3948                                 return -EINVAL;
3949                 }
3950         }
3951
3952 #if 0
3953         if (!pi->pcie_dpm_key_disabled) {
3954                 if (pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
3955                         result = amdgpu_ci_send_msg_to_smc_with_parameter(adev,
3956                                                                    PPSMC_MSG_PCIeDPM_SetEnabledMask,
3957                                                                    pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
3958                         if (result != PPSMC_Result_OK)
3959                                 return -EINVAL;
3960                 }
3961         }
3962 #endif
3963
3964         return 0;
3965 }
3966
3967 static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device *adev,
3968                                                    struct amdgpu_ps *amdgpu_state)
3969 {
3970         struct ci_power_info *pi = ci_get_pi(adev);
3971         struct ci_ps *state = ci_get_ps(amdgpu_state);
3972         struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
3973         u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
3974         struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
3975         u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
3976         u32 i;
3977
3978         pi->need_update_smu7_dpm_table = 0;
3979
3980         for (i = 0; i < sclk_table->count; i++) {
3981                 if (sclk == sclk_table->dpm_levels[i].value)
3982                         break;
3983         }
3984
3985         if (i >= sclk_table->count) {
3986                 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK;
3987         } else {
3988                 /* XXX check display min clock requirements */
3989                 if (CISLAND_MINIMUM_ENGINE_CLOCK != CISLAND_MINIMUM_ENGINE_CLOCK)
3990                         pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_SCLK;
3991         }
3992
3993         for (i = 0; i < mclk_table->count; i++) {
3994                 if (mclk == mclk_table->dpm_levels[i].value)
3995                         break;
3996         }
3997
3998         if (i >= mclk_table->count)
3999                 pi->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK;
4000
4001         if (adev->pm.dpm.current_active_crtc_count !=
4002             adev->pm.dpm.new_active_crtc_count)
4003                 pi->need_update_smu7_dpm_table |= DPMTABLE_UPDATE_MCLK;
4004 }
4005
4006 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device *adev,
4007                                                        struct amdgpu_ps *amdgpu_state)
4008 {
4009         struct ci_power_info *pi = ci_get_pi(adev);
4010         struct ci_ps *state = ci_get_ps(amdgpu_state);
4011         u32 sclk = state->performance_levels[state->performance_level_count-1].sclk;
4012         u32 mclk = state->performance_levels[state->performance_level_count-1].mclk;
4013         struct ci_dpm_table *dpm_table = &pi->dpm_table;
4014         int ret;
4015
4016         if (!pi->need_update_smu7_dpm_table)
4017                 return 0;
4018
4019         if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_SCLK)
4020                 dpm_table->sclk_table.dpm_levels[dpm_table->sclk_table.count-1].value = sclk;
4021
4022         if (pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK)
4023                 dpm_table->mclk_table.dpm_levels[dpm_table->mclk_table.count-1].value = mclk;
4024
4025         if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK)) {
4026                 ret = ci_populate_all_graphic_levels(adev);
4027                 if (ret)
4028                         return ret;
4029         }
4030
4031         if (pi->need_update_smu7_dpm_table & (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
4032                 ret = ci_populate_all_memory_levels(adev);
4033                 if (ret)
4034                         return ret;
4035         }
4036
4037         return 0;
4038 }
4039
4040 static int ci_enable_uvd_dpm(struct amdgpu_device *adev, bool enable)
4041 {
4042         struct ci_power_info *pi = ci_get_pi(adev);
4043         const struct amdgpu_clock_and_voltage_limits *max_limits;
4044         int i;
4045
4046         if (adev->pm.dpm.ac_power)
4047                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4048         else
4049                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4050
4051         if (enable) {
4052                 pi->dpm_level_enable_mask.uvd_dpm_enable_mask = 0;
4053
4054                 for (i = adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4055                         if (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4056                                 pi->dpm_level_enable_mask.uvd_dpm_enable_mask |= 1 << i;
4057
4058                                 if (!pi->caps_uvd_dpm)
4059                                         break;
4060                         }
4061                 }
4062
4063                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4064                                                   PPSMC_MSG_UVDDPM_SetEnabledMask,
4065                                                   pi->dpm_level_enable_mask.uvd_dpm_enable_mask);
4066
4067                 if (pi->last_mclk_dpm_enable_mask & 0x1) {
4068                         pi->uvd_enabled = true;
4069                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4070                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4071                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
4072                                                           pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4073                 }
4074         } else {
4075                 if (pi->uvd_enabled) {
4076                         pi->uvd_enabled = false;
4077                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask |= 1;
4078                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4079                                                           PPSMC_MSG_MCLKDPM_SetEnabledMask,
4080                                                           pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4081                 }
4082         }
4083
4084         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4085                                    PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable) == PPSMC_Result_OK) ?
4086                 0 : -EINVAL;
4087 }
4088
4089 static int ci_enable_vce_dpm(struct amdgpu_device *adev, bool enable)
4090 {
4091         struct ci_power_info *pi = ci_get_pi(adev);
4092         const struct amdgpu_clock_and_voltage_limits *max_limits;
4093         int i;
4094
4095         if (adev->pm.dpm.ac_power)
4096                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4097         else
4098                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4099
4100         if (enable) {
4101                 pi->dpm_level_enable_mask.vce_dpm_enable_mask = 0;
4102                 for (i = adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4103                         if (adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4104                                 pi->dpm_level_enable_mask.vce_dpm_enable_mask |= 1 << i;
4105
4106                                 if (!pi->caps_vce_dpm)
4107                                         break;
4108                         }
4109                 }
4110
4111                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4112                                                   PPSMC_MSG_VCEDPM_SetEnabledMask,
4113                                                   pi->dpm_level_enable_mask.vce_dpm_enable_mask);
4114         }
4115
4116         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4117                                    PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable) == PPSMC_Result_OK) ?
4118                 0 : -EINVAL;
4119 }
4120
4121 #if 0
4122 static int ci_enable_samu_dpm(struct amdgpu_device *adev, bool enable)
4123 {
4124         struct ci_power_info *pi = ci_get_pi(adev);
4125         const struct amdgpu_clock_and_voltage_limits *max_limits;
4126         int i;
4127
4128         if (adev->pm.dpm.ac_power)
4129                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4130         else
4131                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4132
4133         if (enable) {
4134                 pi->dpm_level_enable_mask.samu_dpm_enable_mask = 0;
4135                 for (i = adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4136                         if (adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4137                                 pi->dpm_level_enable_mask.samu_dpm_enable_mask |= 1 << i;
4138
4139                                 if (!pi->caps_samu_dpm)
4140                                         break;
4141                         }
4142                 }
4143
4144                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4145                                                   PPSMC_MSG_SAMUDPM_SetEnabledMask,
4146                                                   pi->dpm_level_enable_mask.samu_dpm_enable_mask);
4147         }
4148         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4149                                    PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable) == PPSMC_Result_OK) ?
4150                 0 : -EINVAL;
4151 }
4152
4153 static int ci_enable_acp_dpm(struct amdgpu_device *adev, bool enable)
4154 {
4155         struct ci_power_info *pi = ci_get_pi(adev);
4156         const struct amdgpu_clock_and_voltage_limits *max_limits;
4157         int i;
4158
4159         if (adev->pm.dpm.ac_power)
4160                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
4161         else
4162                 max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
4163
4164         if (enable) {
4165                 pi->dpm_level_enable_mask.acp_dpm_enable_mask = 0;
4166                 for (i = adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count - 1; i >= 0; i--) {
4167                         if (adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v <= max_limits->vddc) {
4168                                 pi->dpm_level_enable_mask.acp_dpm_enable_mask |= 1 << i;
4169
4170                                 if (!pi->caps_acp_dpm)
4171                                         break;
4172                         }
4173                 }
4174
4175                 amdgpu_ci_send_msg_to_smc_with_parameter(adev,
4176                                                   PPSMC_MSG_ACPDPM_SetEnabledMask,
4177                                                   pi->dpm_level_enable_mask.acp_dpm_enable_mask);
4178         }
4179
4180         return (amdgpu_ci_send_msg_to_smc(adev, enable ?
4181                                    PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable) == PPSMC_Result_OK) ?
4182                 0 : -EINVAL;
4183 }
4184 #endif
4185
4186 static int ci_update_uvd_dpm(struct amdgpu_device *adev, bool gate)
4187 {
4188         struct ci_power_info *pi = ci_get_pi(adev);
4189         u32 tmp;
4190
4191         if (!gate) {
4192                 if (pi->caps_uvd_dpm ||
4193                     (adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count <= 0))
4194                         pi->smc_state_table.UvdBootLevel = 0;
4195                 else
4196                         pi->smc_state_table.UvdBootLevel =
4197                                 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count - 1;
4198
4199                 tmp = RREG32_SMC(ixDPM_TABLE_475);
4200                 tmp &= ~DPM_TABLE_475__UvdBootLevel_MASK;
4201                 tmp |= (pi->smc_state_table.UvdBootLevel << DPM_TABLE_475__UvdBootLevel__SHIFT);
4202                 WREG32_SMC(ixDPM_TABLE_475, tmp);
4203         }
4204
4205         return ci_enable_uvd_dpm(adev, !gate);
4206 }
4207
4208 static u8 ci_get_vce_boot_level(struct amdgpu_device *adev)
4209 {
4210         u8 i;
4211         u32 min_evclk = 30000; /* ??? */
4212         struct amdgpu_vce_clock_voltage_dependency_table *table =
4213                 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
4214
4215         for (i = 0; i < table->count; i++) {
4216                 if (table->entries[i].evclk >= min_evclk)
4217                         return i;
4218         }
4219
4220         return table->count - 1;
4221 }
4222
4223 static int ci_update_vce_dpm(struct amdgpu_device *adev,
4224                              struct amdgpu_ps *amdgpu_new_state,
4225                              struct amdgpu_ps *amdgpu_current_state)
4226 {
4227         struct ci_power_info *pi = ci_get_pi(adev);
4228         int ret = 0;
4229         u32 tmp;
4230
4231         if (amdgpu_current_state->evclk != amdgpu_new_state->evclk) {
4232                 if (amdgpu_new_state->evclk) {
4233                         /* turn the clocks on when encoding */
4234                         ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
4235                                                             AMD_CG_STATE_UNGATE);
4236                         if (ret)
4237                                 return ret;
4238
4239                         pi->smc_state_table.VceBootLevel = ci_get_vce_boot_level(adev);
4240                         tmp = RREG32_SMC(ixDPM_TABLE_475);
4241                         tmp &= ~DPM_TABLE_475__VceBootLevel_MASK;
4242                         tmp |= (pi->smc_state_table.VceBootLevel << DPM_TABLE_475__VceBootLevel__SHIFT);
4243                         WREG32_SMC(ixDPM_TABLE_475, tmp);
4244
4245                         ret = ci_enable_vce_dpm(adev, true);
4246                 } else {
4247                         /* turn the clocks off when not encoding */
4248                         ret = amdgpu_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
4249                                                             AMD_CG_STATE_GATE);
4250                         if (ret)
4251                                 return ret;
4252
4253                         ret = ci_enable_vce_dpm(adev, false);
4254                 }
4255         }
4256         return ret;
4257 }
4258
4259 #if 0
4260 static int ci_update_samu_dpm(struct amdgpu_device *adev, bool gate)
4261 {
4262         return ci_enable_samu_dpm(adev, gate);
4263 }
4264
4265 static int ci_update_acp_dpm(struct amdgpu_device *adev, bool gate)
4266 {
4267         struct ci_power_info *pi = ci_get_pi(adev);
4268         u32 tmp;
4269
4270         if (!gate) {
4271                 pi->smc_state_table.AcpBootLevel = 0;
4272
4273                 tmp = RREG32_SMC(ixDPM_TABLE_475);
4274                 tmp &= ~AcpBootLevel_MASK;
4275                 tmp |= AcpBootLevel(pi->smc_state_table.AcpBootLevel);
4276                 WREG32_SMC(ixDPM_TABLE_475, tmp);
4277         }
4278
4279         return ci_enable_acp_dpm(adev, !gate);
4280 }
4281 #endif
4282
4283 static int ci_generate_dpm_level_enable_mask(struct amdgpu_device *adev,
4284                                              struct amdgpu_ps *amdgpu_state)
4285 {
4286         struct ci_power_info *pi = ci_get_pi(adev);
4287         int ret;
4288
4289         ret = ci_trim_dpm_states(adev, amdgpu_state);
4290         if (ret)
4291                 return ret;
4292
4293         pi->dpm_level_enable_mask.sclk_dpm_enable_mask =
4294                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.sclk_table);
4295         pi->dpm_level_enable_mask.mclk_dpm_enable_mask =
4296                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.mclk_table);
4297         pi->last_mclk_dpm_enable_mask =
4298                 pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4299         if (pi->uvd_enabled) {
4300                 if (pi->dpm_level_enable_mask.mclk_dpm_enable_mask & 1)
4301                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask &= 0xFFFFFFFE;
4302         }
4303         pi->dpm_level_enable_mask.pcie_dpm_enable_mask =
4304                 ci_get_dpm_level_enable_mask_value(&pi->dpm_table.pcie_speed_table);
4305
4306         return 0;
4307 }
4308
4309 static u32 ci_get_lowest_enabled_level(struct amdgpu_device *adev,
4310                                        u32 level_mask)
4311 {
4312         u32 level = 0;
4313
4314         while ((level_mask & (1 << level)) == 0)
4315                 level++;
4316
4317         return level;
4318 }
4319
4320
4321 static int ci_dpm_force_performance_level(struct amdgpu_device *adev,
4322                                           enum amdgpu_dpm_forced_level level)
4323 {
4324         struct ci_power_info *pi = ci_get_pi(adev);
4325         u32 tmp, levels, i;
4326         int ret;
4327
4328         if (level == AMDGPU_DPM_FORCED_LEVEL_HIGH) {
4329                 if ((!pi->pcie_dpm_key_disabled) &&
4330                     pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4331                         levels = 0;
4332                         tmp = pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
4333                         while (tmp >>= 1)
4334                                 levels++;
4335                         if (levels) {
4336                                 ret = ci_dpm_force_state_pcie(adev, level);
4337                                 if (ret)
4338                                         return ret;
4339                                 for (i = 0; i < adev->usec_timeout; i++) {
4340                                         tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4341                                         TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4342                                         TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4343                                         if (tmp == levels)
4344                                                 break;
4345                                         udelay(1);
4346                                 }
4347                         }
4348                 }
4349                 if ((!pi->sclk_dpm_key_disabled) &&
4350                     pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4351                         levels = 0;
4352                         tmp = pi->dpm_level_enable_mask.sclk_dpm_enable_mask;
4353                         while (tmp >>= 1)
4354                                 levels++;
4355                         if (levels) {
4356                                 ret = ci_dpm_force_state_sclk(adev, levels);
4357                                 if (ret)
4358                                         return ret;
4359                                 for (i = 0; i < adev->usec_timeout; i++) {
4360                                         tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4361                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4362                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4363                                         if (tmp == levels)
4364                                                 break;
4365                                         udelay(1);
4366                                 }
4367                         }
4368                 }
4369                 if ((!pi->mclk_dpm_key_disabled) &&
4370                     pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4371                         levels = 0;
4372                         tmp = pi->dpm_level_enable_mask.mclk_dpm_enable_mask;
4373                         while (tmp >>= 1)
4374                                 levels++;
4375                         if (levels) {
4376                                 ret = ci_dpm_force_state_mclk(adev, levels);
4377                                 if (ret)
4378                                         return ret;
4379                                 for (i = 0; i < adev->usec_timeout; i++) {
4380                                         tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4381                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4382                                         TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4383                                         if (tmp == levels)
4384                                                 break;
4385                                         udelay(1);
4386                                 }
4387                         }
4388                 }
4389         } else if (level == AMDGPU_DPM_FORCED_LEVEL_LOW) {
4390                 if ((!pi->sclk_dpm_key_disabled) &&
4391                     pi->dpm_level_enable_mask.sclk_dpm_enable_mask) {
4392                         levels = ci_get_lowest_enabled_level(adev,
4393                                                              pi->dpm_level_enable_mask.sclk_dpm_enable_mask);
4394                         ret = ci_dpm_force_state_sclk(adev, levels);
4395                         if (ret)
4396                                 return ret;
4397                         for (i = 0; i < adev->usec_timeout; i++) {
4398                                 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4399                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >>
4400                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT;
4401                                 if (tmp == levels)
4402                                         break;
4403                                 udelay(1);
4404                         }
4405                 }
4406                 if ((!pi->mclk_dpm_key_disabled) &&
4407                     pi->dpm_level_enable_mask.mclk_dpm_enable_mask) {
4408                         levels = ci_get_lowest_enabled_level(adev,
4409                                                              pi->dpm_level_enable_mask.mclk_dpm_enable_mask);
4410                         ret = ci_dpm_force_state_mclk(adev, levels);
4411                         if (ret)
4412                                 return ret;
4413                         for (i = 0; i < adev->usec_timeout; i++) {
4414                                 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) &
4415                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK) >>
4416                                 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT;
4417                                 if (tmp == levels)
4418                                         break;
4419                                 udelay(1);
4420                         }
4421                 }
4422                 if ((!pi->pcie_dpm_key_disabled) &&
4423                     pi->dpm_level_enable_mask.pcie_dpm_enable_mask) {
4424                         levels = ci_get_lowest_enabled_level(adev,
4425                                                              pi->dpm_level_enable_mask.pcie_dpm_enable_mask);
4426                         ret = ci_dpm_force_state_pcie(adev, levels);
4427                         if (ret)
4428                                 return ret;
4429                         for (i = 0; i < adev->usec_timeout; i++) {
4430                                 tmp = (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1) &
4431                                 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK) >>
4432                                 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT;
4433                                 if (tmp == levels)
4434                                         break;
4435                                 udelay(1);
4436                         }
4437                 }
4438         } else if (level == AMDGPU_DPM_FORCED_LEVEL_AUTO) {
4439                 if (!pi->pcie_dpm_key_disabled) {
4440                         PPSMC_Result smc_result;
4441
4442                         smc_result = amdgpu_ci_send_msg_to_smc(adev,
4443                                                                PPSMC_MSG_PCIeDPM_UnForceLevel);
4444                         if (smc_result != PPSMC_Result_OK)
4445                                 return -EINVAL;
4446                 }
4447                 ret = ci_upload_dpm_level_enable_mask(adev);
4448                 if (ret)
4449                         return ret;
4450         }
4451
4452         adev->pm.dpm.forced_level = level;
4453
4454         return 0;
4455 }
4456
4457 static int ci_set_mc_special_registers(struct amdgpu_device *adev,
4458                                        struct ci_mc_reg_table *table)
4459 {
4460         u8 i, j, k;
4461         u32 temp_reg;
4462
4463         for (i = 0, j = table->last; i < table->last; i++) {
4464                 if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4465                         return -EINVAL;
4466                 switch(table->mc_reg_address[i].s1) {
4467                 case mmMC_SEQ_MISC1:
4468                         temp_reg = RREG32(mmMC_PMG_CMD_EMRS);
4469                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_EMRS;
4470                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_EMRS_LP;
4471                         for (k = 0; k < table->num_entries; k++) {
4472                                 table->mc_reg_table_entry[k].mc_data[j] =
4473                                         ((temp_reg & 0xffff0000)) | ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
4474                         }
4475                         j++;
4476                         if (j >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4477                                 return -EINVAL;
4478
4479                         temp_reg = RREG32(mmMC_PMG_CMD_MRS);
4480                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS;
4481                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS_LP;
4482                         for (k = 0; k < table->num_entries; k++) {
4483                                 table->mc_reg_table_entry[k].mc_data[j] =
4484                                         (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4485                                 if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
4486                                         table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
4487                         }
4488                         j++;
4489                         if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4490                                 return -EINVAL;
4491
4492                         if (adev->mc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
4493                                 table->mc_reg_address[j].s1 = mmMC_PMG_AUTO_CMD;
4494                                 table->mc_reg_address[j].s0 = mmMC_PMG_AUTO_CMD;
4495                                 for (k = 0; k < table->num_entries; k++) {
4496                                         table->mc_reg_table_entry[k].mc_data[j] =
4497                                                 (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
4498                                 }
4499                                 j++;
4500                                 if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4501                                         return -EINVAL;
4502                         }
4503                         break;
4504                 case mmMC_SEQ_RESERVE_M:
4505                         temp_reg = RREG32(mmMC_PMG_CMD_MRS1);
4506                         table->mc_reg_address[j].s1 = mmMC_PMG_CMD_MRS1;
4507                         table->mc_reg_address[j].s0 = mmMC_SEQ_PMG_CMD_MRS1_LP;
4508                         for (k = 0; k < table->num_entries; k++) {
4509                                 table->mc_reg_table_entry[k].mc_data[j] =
4510                                         (temp_reg & 0xffff0000) | (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
4511                         }
4512                         j++;
4513                         if (j > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4514                                 return -EINVAL;
4515                         break;
4516                 default:
4517                         break;
4518                 }
4519
4520         }
4521
4522         table->last = j;
4523
4524         return 0;
4525 }
4526
4527 static bool ci_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
4528 {
4529         bool result = true;
4530
4531         switch(in_reg) {
4532         case mmMC_SEQ_RAS_TIMING:
4533                 *out_reg = mmMC_SEQ_RAS_TIMING_LP;
4534                 break;
4535         case mmMC_SEQ_DLL_STBY:
4536                 *out_reg = mmMC_SEQ_DLL_STBY_LP;
4537                 break;
4538         case mmMC_SEQ_G5PDX_CMD0:
4539                 *out_reg = mmMC_SEQ_G5PDX_CMD0_LP;
4540                 break;
4541         case mmMC_SEQ_G5PDX_CMD1:
4542                 *out_reg = mmMC_SEQ_G5PDX_CMD1_LP;
4543                 break;
4544         case mmMC_SEQ_G5PDX_CTRL:
4545                 *out_reg = mmMC_SEQ_G5PDX_CTRL_LP;
4546                 break;
4547         case mmMC_SEQ_CAS_TIMING:
4548                 *out_reg = mmMC_SEQ_CAS_TIMING_LP;
4549             break;
4550         case mmMC_SEQ_MISC_TIMING:
4551                 *out_reg = mmMC_SEQ_MISC_TIMING_LP;
4552                 break;
4553         case mmMC_SEQ_MISC_TIMING2:
4554                 *out_reg = mmMC_SEQ_MISC_TIMING2_LP;
4555                 break;
4556         case mmMC_SEQ_PMG_DVS_CMD:
4557                 *out_reg = mmMC_SEQ_PMG_DVS_CMD_LP;
4558                 break;
4559         case mmMC_SEQ_PMG_DVS_CTL:
4560                 *out_reg = mmMC_SEQ_PMG_DVS_CTL_LP;
4561                 break;
4562         case mmMC_SEQ_RD_CTL_D0:
4563                 *out_reg = mmMC_SEQ_RD_CTL_D0_LP;
4564                 break;
4565         case mmMC_SEQ_RD_CTL_D1:
4566                 *out_reg = mmMC_SEQ_RD_CTL_D1_LP;
4567                 break;
4568         case mmMC_SEQ_WR_CTL_D0:
4569                 *out_reg = mmMC_SEQ_WR_CTL_D0_LP;
4570                 break;
4571         case mmMC_SEQ_WR_CTL_D1:
4572                 *out_reg = mmMC_SEQ_WR_CTL_D1_LP;
4573                 break;
4574         case mmMC_PMG_CMD_EMRS:
4575                 *out_reg = mmMC_SEQ_PMG_CMD_EMRS_LP;
4576                 break;
4577         case mmMC_PMG_CMD_MRS:
4578                 *out_reg = mmMC_SEQ_PMG_CMD_MRS_LP;
4579                 break;
4580         case mmMC_PMG_CMD_MRS1:
4581                 *out_reg = mmMC_SEQ_PMG_CMD_MRS1_LP;
4582                 break;
4583         case mmMC_SEQ_PMG_TIMING:
4584                 *out_reg = mmMC_SEQ_PMG_TIMING_LP;
4585                 break;
4586         case mmMC_PMG_CMD_MRS2:
4587                 *out_reg = mmMC_SEQ_PMG_CMD_MRS2_LP;
4588                 break;
4589         case mmMC_SEQ_WR_CTL_2:
4590                 *out_reg = mmMC_SEQ_WR_CTL_2_LP;
4591                 break;
4592         default:
4593                 result = false;
4594                 break;
4595         }
4596
4597         return result;
4598 }
4599
4600 static void ci_set_valid_flag(struct ci_mc_reg_table *table)
4601 {
4602         u8 i, j;
4603
4604         for (i = 0; i < table->last; i++) {
4605                 for (j = 1; j < table->num_entries; j++) {
4606                         if (table->mc_reg_table_entry[j-1].mc_data[i] !=
4607                             table->mc_reg_table_entry[j].mc_data[i]) {
4608                                 table->valid_flag |= 1 << i;
4609                                 break;
4610                         }
4611                 }
4612         }
4613 }
4614
4615 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table *table)
4616 {
4617         u32 i;
4618         u16 address;
4619
4620         for (i = 0; i < table->last; i++) {
4621                 table->mc_reg_address[i].s0 =
4622                         ci_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
4623                         address : table->mc_reg_address[i].s1;
4624         }
4625 }
4626
4627 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table *table,
4628                                       struct ci_mc_reg_table *ci_table)
4629 {
4630         u8 i, j;
4631
4632         if (table->last > SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4633                 return -EINVAL;
4634         if (table->num_entries > MAX_AC_TIMING_ENTRIES)
4635                 return -EINVAL;
4636
4637         for (i = 0; i < table->last; i++)
4638                 ci_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
4639
4640         ci_table->last = table->last;
4641
4642         for (i = 0; i < table->num_entries; i++) {
4643                 ci_table->mc_reg_table_entry[i].mclk_max =
4644                         table->mc_reg_table_entry[i].mclk_max;
4645                 for (j = 0; j < table->last; j++)
4646                         ci_table->mc_reg_table_entry[i].mc_data[j] =
4647                                 table->mc_reg_table_entry[i].mc_data[j];
4648         }
4649         ci_table->num_entries = table->num_entries;
4650
4651         return 0;
4652 }
4653
4654 static int ci_register_patching_mc_seq(struct amdgpu_device *adev,
4655                                        struct ci_mc_reg_table *table)
4656 {
4657         u8 i, k;
4658         u32 tmp;
4659         bool patch;
4660
4661         tmp = RREG32(mmMC_SEQ_MISC0);
4662         patch = ((tmp & 0x0000f00) == 0x300) ? true : false;
4663
4664         if (patch &&
4665             ((adev->pdev->device == 0x67B0) ||
4666              (adev->pdev->device == 0x67B1))) {
4667                 for (i = 0; i < table->last; i++) {
4668                         if (table->last >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4669                                 return -EINVAL;
4670                         switch (table->mc_reg_address[i].s1) {
4671                         case mmMC_SEQ_MISC1:
4672                                 for (k = 0; k < table->num_entries; k++) {
4673                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4674                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4675                                                 table->mc_reg_table_entry[k].mc_data[i] =
4676                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFF8) |
4677                                                         0x00000007;
4678                                 }
4679                                 break;
4680                         case mmMC_SEQ_WR_CTL_D0:
4681                                 for (k = 0; k < table->num_entries; k++) {
4682                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4683                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4684                                                 table->mc_reg_table_entry[k].mc_data[i] =
4685                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4686                                                         0x0000D0DD;
4687                                 }
4688                                 break;
4689                         case mmMC_SEQ_WR_CTL_D1:
4690                                 for (k = 0; k < table->num_entries; k++) {
4691                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4692                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4693                                                 table->mc_reg_table_entry[k].mc_data[i] =
4694                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFF0F00) |
4695                                                         0x0000D0DD;
4696                                 }
4697                                 break;
4698                         case mmMC_SEQ_WR_CTL_2:
4699                                 for (k = 0; k < table->num_entries; k++) {
4700                                         if ((table->mc_reg_table_entry[k].mclk_max == 125000) ||
4701                                             (table->mc_reg_table_entry[k].mclk_max == 137500))
4702                                                 table->mc_reg_table_entry[k].mc_data[i] = 0;
4703                                 }
4704                                 break;
4705                         case mmMC_SEQ_CAS_TIMING:
4706                                 for (k = 0; k < table->num_entries; k++) {
4707                                         if (table->mc_reg_table_entry[k].mclk_max == 125000)
4708                                                 table->mc_reg_table_entry[k].mc_data[i] =
4709                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4710                                                         0x000C0140;
4711                                         else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4712                                                 table->mc_reg_table_entry[k].mc_data[i] =
4713                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFE0FE0F) |
4714                                                         0x000C0150;
4715                                 }
4716                                 break;
4717                         case mmMC_SEQ_MISC_TIMING:
4718                                 for (k = 0; k < table->num_entries; k++) {
4719                                         if (table->mc_reg_table_entry[k].mclk_max == 125000)
4720                                                 table->mc_reg_table_entry[k].mc_data[i] =
4721                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4722                                                         0x00000030;
4723                                         else if (table->mc_reg_table_entry[k].mclk_max == 137500)
4724                                                 table->mc_reg_table_entry[k].mc_data[i] =
4725                                                         (table->mc_reg_table_entry[k].mc_data[i] & 0xFFFFFFE0) |
4726                                                         0x00000035;
4727                                 }
4728                                 break;
4729                         default:
4730                                 break;
4731                         }
4732                 }
4733
4734                 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4735                 tmp = RREG32(mmMC_SEQ_IO_DEBUG_DATA);
4736                 tmp = (tmp & 0xFFF8FFFF) | (1 << 16);
4737                 WREG32(mmMC_SEQ_IO_DEBUG_INDEX, 3);
4738                 WREG32(mmMC_SEQ_IO_DEBUG_DATA, tmp);
4739         }
4740
4741         return 0;
4742 }
4743
4744 static int ci_initialize_mc_reg_table(struct amdgpu_device *adev)
4745 {
4746         struct ci_power_info *pi = ci_get_pi(adev);
4747         struct atom_mc_reg_table *table;
4748         struct ci_mc_reg_table *ci_table = &pi->mc_reg_table;
4749         u8 module_index = ci_get_memory_module_index(adev);
4750         int ret;
4751
4752         table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
4753         if (!table)
4754                 return -ENOMEM;
4755
4756         WREG32(mmMC_SEQ_RAS_TIMING_LP, RREG32(mmMC_SEQ_RAS_TIMING));
4757         WREG32(mmMC_SEQ_CAS_TIMING_LP, RREG32(mmMC_SEQ_CAS_TIMING));
4758         WREG32(mmMC_SEQ_DLL_STBY_LP, RREG32(mmMC_SEQ_DLL_STBY));
4759         WREG32(mmMC_SEQ_G5PDX_CMD0_LP, RREG32(mmMC_SEQ_G5PDX_CMD0));
4760         WREG32(mmMC_SEQ_G5PDX_CMD1_LP, RREG32(mmMC_SEQ_G5PDX_CMD1));
4761         WREG32(mmMC_SEQ_G5PDX_CTRL_LP, RREG32(mmMC_SEQ_G5PDX_CTRL));
4762         WREG32(mmMC_SEQ_PMG_DVS_CMD_LP, RREG32(mmMC_SEQ_PMG_DVS_CMD));
4763         WREG32(mmMC_SEQ_PMG_DVS_CTL_LP, RREG32(mmMC_SEQ_PMG_DVS_CTL));
4764         WREG32(mmMC_SEQ_MISC_TIMING_LP, RREG32(mmMC_SEQ_MISC_TIMING));
4765         WREG32(mmMC_SEQ_MISC_TIMING2_LP, RREG32(mmMC_SEQ_MISC_TIMING2));
4766         WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP, RREG32(mmMC_PMG_CMD_EMRS));
4767         WREG32(mmMC_SEQ_PMG_CMD_MRS_LP, RREG32(mmMC_PMG_CMD_MRS));
4768         WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP, RREG32(mmMC_PMG_CMD_MRS1));
4769         WREG32(mmMC_SEQ_WR_CTL_D0_LP, RREG32(mmMC_SEQ_WR_CTL_D0));
4770         WREG32(mmMC_SEQ_WR_CTL_D1_LP, RREG32(mmMC_SEQ_WR_CTL_D1));
4771         WREG32(mmMC_SEQ_RD_CTL_D0_LP, RREG32(mmMC_SEQ_RD_CTL_D0));
4772         WREG32(mmMC_SEQ_RD_CTL_D1_LP, RREG32(mmMC_SEQ_RD_CTL_D1));
4773         WREG32(mmMC_SEQ_PMG_TIMING_LP, RREG32(mmMC_SEQ_PMG_TIMING));
4774         WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP, RREG32(mmMC_PMG_CMD_MRS2));
4775         WREG32(mmMC_SEQ_WR_CTL_2_LP, RREG32(mmMC_SEQ_WR_CTL_2));
4776
4777         ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
4778         if (ret)
4779                 goto init_mc_done;
4780
4781         ret = ci_copy_vbios_mc_reg_table(table, ci_table);
4782         if (ret)
4783                 goto init_mc_done;
4784
4785         ci_set_s0_mc_reg_index(ci_table);
4786
4787         ret = ci_register_patching_mc_seq(adev, ci_table);
4788         if (ret)
4789                 goto init_mc_done;
4790
4791         ret = ci_set_mc_special_registers(adev, ci_table);
4792         if (ret)
4793                 goto init_mc_done;
4794
4795         ci_set_valid_flag(ci_table);
4796
4797 init_mc_done:
4798         kfree(table);
4799
4800         return ret;
4801 }
4802
4803 static int ci_populate_mc_reg_addresses(struct amdgpu_device *adev,
4804                                         SMU7_Discrete_MCRegisters *mc_reg_table)
4805 {
4806         struct ci_power_info *pi = ci_get_pi(adev);
4807         u32 i, j;
4808
4809         for (i = 0, j = 0; j < pi->mc_reg_table.last; j++) {
4810                 if (pi->mc_reg_table.valid_flag & (1 << j)) {
4811                         if (i >= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE)
4812                                 return -EINVAL;
4813                         mc_reg_table->address[i].s0 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s0);
4814                         mc_reg_table->address[i].s1 = cpu_to_be16(pi->mc_reg_table.mc_reg_address[j].s1);
4815                         i++;
4816                 }
4817         }
4818
4819         mc_reg_table->last = (u8)i;
4820
4821         return 0;
4822 }
4823
4824 static void ci_convert_mc_registers(const struct ci_mc_reg_entry *entry,
4825                                     SMU7_Discrete_MCRegisterSet *data,
4826                                     u32 num_entries, u32 valid_flag)
4827 {
4828         u32 i, j;
4829
4830         for (i = 0, j = 0; j < num_entries; j++) {
4831                 if (valid_flag & (1 << j)) {
4832                         data->value[i] = cpu_to_be32(entry->mc_data[j]);
4833                         i++;
4834                 }
4835         }
4836 }
4837
4838 static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
4839                                                  const u32 memory_clock,
4840                                                  SMU7_Discrete_MCRegisterSet *mc_reg_table_data)
4841 {
4842         struct ci_power_info *pi = ci_get_pi(adev);
4843         u32 i = 0;
4844
4845         for(i = 0; i < pi->mc_reg_table.num_entries; i++) {
4846                 if (memory_clock <= pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
4847                         break;
4848         }
4849
4850         if ((i == pi->mc_reg_table.num_entries) && (i > 0))
4851                 --i;
4852
4853         ci_convert_mc_registers(&pi->mc_reg_table.mc_reg_table_entry[i],
4854                                 mc_reg_table_data, pi->mc_reg_table.last,
4855                                 pi->mc_reg_table.valid_flag);
4856 }
4857
4858 static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
4859                                            SMU7_Discrete_MCRegisters *mc_reg_table)
4860 {
4861         struct ci_power_info *pi = ci_get_pi(adev);
4862         u32 i;
4863
4864         for (i = 0; i < pi->dpm_table.mclk_table.count; i++)
4865                 ci_convert_mc_reg_table_entry_to_smc(adev,
4866                                                      pi->dpm_table.mclk_table.dpm_levels[i].value,
4867                                                      &mc_reg_table->data[i]);
4868 }
4869
4870 static int ci_populate_initial_mc_reg_table(struct amdgpu_device *adev)
4871 {
4872         struct ci_power_info *pi = ci_get_pi(adev);
4873         int ret;
4874
4875         memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4876
4877         ret = ci_populate_mc_reg_addresses(adev, &pi->smc_mc_reg_table);
4878         if (ret)
4879                 return ret;
4880         ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4881
4882         return amdgpu_ci_copy_bytes_to_smc(adev,
4883                                     pi->mc_reg_table_start,
4884                                     (u8 *)&pi->smc_mc_reg_table,
4885                                     sizeof(SMU7_Discrete_MCRegisters),
4886                                     pi->sram_end);
4887 }
4888
4889 static int ci_update_and_upload_mc_reg_table(struct amdgpu_device *adev)
4890 {
4891         struct ci_power_info *pi = ci_get_pi(adev);
4892
4893         if (!(pi->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_MCLK))
4894                 return 0;
4895
4896         memset(&pi->smc_mc_reg_table, 0, sizeof(SMU7_Discrete_MCRegisters));
4897
4898         ci_convert_mc_reg_table_to_smc(adev, &pi->smc_mc_reg_table);
4899
4900         return amdgpu_ci_copy_bytes_to_smc(adev,
4901                                     pi->mc_reg_table_start +
4902                                     offsetof(SMU7_Discrete_MCRegisters, data[0]),
4903                                     (u8 *)&pi->smc_mc_reg_table.data[0],
4904                                     sizeof(SMU7_Discrete_MCRegisterSet) *
4905                                     pi->dpm_table.mclk_table.count,
4906                                     pi->sram_end);
4907 }
4908
4909 static void ci_enable_voltage_control(struct amdgpu_device *adev)
4910 {
4911         u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT);
4912
4913         tmp |= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK;
4914         WREG32_SMC(ixGENERAL_PWRMGT, tmp);
4915 }
4916
4917 static enum amdgpu_pcie_gen ci_get_maximum_link_speed(struct amdgpu_device *adev,
4918                                                       struct amdgpu_ps *amdgpu_state)
4919 {
4920         struct ci_ps *state = ci_get_ps(amdgpu_state);
4921         int i;
4922         u16 pcie_speed, max_speed = 0;
4923
4924         for (i = 0; i < state->performance_level_count; i++) {
4925                 pcie_speed = state->performance_levels[i].pcie_gen;
4926                 if (max_speed < pcie_speed)
4927                         max_speed = pcie_speed;
4928         }
4929
4930         return max_speed;
4931 }
4932
4933 static u16 ci_get_current_pcie_speed(struct amdgpu_device *adev)
4934 {
4935         u32 speed_cntl = 0;
4936
4937         speed_cntl = RREG32_PCIE(ixPCIE_LC_SPEED_CNTL) &
4938                 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK;
4939         speed_cntl >>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT;
4940
4941         return (u16)speed_cntl;
4942 }
4943
4944 static int ci_get_current_pcie_lane_number(struct amdgpu_device *adev)
4945 {
4946         u32 link_width = 0;
4947
4948         link_width = RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL) &
4949                 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK;
4950         link_width >>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT;
4951
4952         switch (link_width) {
4953         case 1:
4954                 return 1;
4955         case 2:
4956                 return 2;
4957         case 3:
4958                 return 4;
4959         case 4:
4960                 return 8;
4961         case 0:
4962         case 6:
4963         default:
4964                 return 16;
4965         }
4966 }
4967
4968 static void ci_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
4969                                                              struct amdgpu_ps *amdgpu_new_state,
4970                                                              struct amdgpu_ps *amdgpu_current_state)
4971 {
4972         struct ci_power_info *pi = ci_get_pi(adev);
4973         enum amdgpu_pcie_gen target_link_speed =
4974                 ci_get_maximum_link_speed(adev, amdgpu_new_state);
4975         enum amdgpu_pcie_gen current_link_speed;
4976
4977         if (pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
4978                 current_link_speed = ci_get_maximum_link_speed(adev, amdgpu_current_state);
4979         else
4980                 current_link_speed = pi->force_pcie_gen;
4981
4982         pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
4983         pi->pspp_notify_required = false;
4984         if (target_link_speed > current_link_speed) {
4985                 switch (target_link_speed) {
4986 #ifdef CONFIG_ACPI
4987                 case AMDGPU_PCIE_GEN3:
4988                         if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
4989                                 break;
4990                         pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
4991                         if (current_link_speed == AMDGPU_PCIE_GEN2)
4992                                 break;
4993                 case AMDGPU_PCIE_GEN2:
4994                         if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
4995                                 break;
4996 #endif
4997                 default:
4998                         pi->force_pcie_gen = ci_get_current_pcie_speed(adev);
4999                         break;
5000                 }
5001         } else {
5002                 if (target_link_speed < current_link_speed)
5003                         pi->pspp_notify_required = true;
5004         }
5005 }
5006
5007 static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
5008                                                            struct amdgpu_ps *amdgpu_new_state,
5009                                                            struct amdgpu_ps *amdgpu_current_state)
5010 {
5011         struct ci_power_info *pi = ci_get_pi(adev);
5012         enum amdgpu_pcie_gen target_link_speed =
5013                 ci_get_maximum_link_speed(adev, amdgpu_new_state);
5014         u8 request;
5015
5016         if (pi->pspp_notify_required) {
5017                 if (target_link_speed == AMDGPU_PCIE_GEN3)
5018                         request = PCIE_PERF_REQ_PECI_GEN3;
5019                 else if (target_link_speed == AMDGPU_PCIE_GEN2)
5020                         request = PCIE_PERF_REQ_PECI_GEN2;
5021                 else
5022                         request = PCIE_PERF_REQ_PECI_GEN1;
5023
5024                 if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
5025                     (ci_get_current_pcie_speed(adev) > 0))
5026                         return;
5027
5028 #ifdef CONFIG_ACPI
5029                 amdgpu_acpi_pcie_performance_request(adev, request, false);
5030 #endif
5031         }
5032 }
5033
5034 static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device *adev)
5035 {
5036         struct ci_power_info *pi = ci_get_pi(adev);
5037         struct amdgpu_clock_voltage_dependency_table *allowed_sclk_vddc_table =
5038                 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
5039         struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddc_table =
5040                 &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk;
5041         struct amdgpu_clock_voltage_dependency_table *allowed_mclk_vddci_table =
5042                 &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk;
5043
5044         if (allowed_sclk_vddc_table == NULL)
5045                 return -EINVAL;
5046         if (allowed_sclk_vddc_table->count < 1)
5047                 return -EINVAL;
5048         if (allowed_mclk_vddc_table == NULL)
5049                 return -EINVAL;
5050         if (allowed_mclk_vddc_table->count < 1)
5051                 return -EINVAL;
5052         if (allowed_mclk_vddci_table == NULL)
5053                 return -EINVAL;
5054         if (allowed_mclk_vddci_table->count < 1)
5055                 return -EINVAL;
5056
5057         pi->min_vddc_in_pp_table = allowed_sclk_vddc_table->entries[0].v;
5058         pi->max_vddc_in_pp_table =
5059                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5060
5061         pi->min_vddci_in_pp_table = allowed_mclk_vddci_table->entries[0].v;
5062         pi->max_vddci_in_pp_table =
5063                 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5064
5065         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk =
5066                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5067         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk =
5068                 allowed_mclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].clk;
5069         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc =
5070                 allowed_sclk_vddc_table->entries[allowed_sclk_vddc_table->count - 1].v;
5071         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci =
5072                 allowed_mclk_vddci_table->entries[allowed_mclk_vddci_table->count - 1].v;
5073
5074         return 0;
5075 }
5076
5077 static void ci_patch_with_vddc_leakage(struct amdgpu_device *adev, u16 *vddc)
5078 {
5079         struct ci_power_info *pi = ci_get_pi(adev);
5080         struct ci_leakage_voltage *leakage_table = &pi->vddc_leakage;
5081         u32 leakage_index;
5082
5083         for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5084                 if (leakage_table->leakage_id[leakage_index] == *vddc) {
5085                         *vddc = leakage_table->actual_voltage[leakage_index];
5086                         break;
5087                 }
5088         }
5089 }
5090
5091 static void ci_patch_with_vddci_leakage(struct amdgpu_device *adev, u16 *vddci)
5092 {
5093         struct ci_power_info *pi = ci_get_pi(adev);
5094         struct ci_leakage_voltage *leakage_table = &pi->vddci_leakage;
5095         u32 leakage_index;
5096
5097         for (leakage_index = 0; leakage_index < leakage_table->count; leakage_index++) {
5098                 if (leakage_table->leakage_id[leakage_index] == *vddci) {
5099                         *vddci = leakage_table->actual_voltage[leakage_index];
5100                         break;
5101                 }
5102         }
5103 }
5104
5105 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5106                                                                       struct amdgpu_clock_voltage_dependency_table *table)
5107 {
5108         u32 i;
5109
5110         if (table) {
5111                 for (i = 0; i < table->count; i++)
5112                         ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5113         }
5114 }
5115
5116 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device *adev,
5117                                                                        struct amdgpu_clock_voltage_dependency_table *table)
5118 {
5119         u32 i;
5120
5121         if (table) {
5122                 for (i = 0; i < table->count; i++)
5123                         ci_patch_with_vddci_leakage(adev, &table->entries[i].v);
5124         }
5125 }
5126
5127 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5128                                                                           struct amdgpu_vce_clock_voltage_dependency_table *table)
5129 {
5130         u32 i;
5131
5132         if (table) {
5133                 for (i = 0; i < table->count; i++)
5134                         ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5135         }
5136 }
5137
5138 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device *adev,
5139                                                                           struct amdgpu_uvd_clock_voltage_dependency_table *table)
5140 {
5141         u32 i;
5142
5143         if (table) {
5144                 for (i = 0; i < table->count; i++)
5145                         ci_patch_with_vddc_leakage(adev, &table->entries[i].v);
5146         }
5147 }
5148
5149 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device *adev,
5150                                                                    struct amdgpu_phase_shedding_limits_table *table)
5151 {
5152         u32 i;
5153
5154         if (table) {
5155                 for (i = 0; i < table->count; i++)
5156                         ci_patch_with_vddc_leakage(adev, &table->entries[i].voltage);
5157         }
5158 }
5159
5160 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device *adev,
5161                                                             struct amdgpu_clock_and_voltage_limits *table)
5162 {
5163         if (table) {
5164                 ci_patch_with_vddc_leakage(adev, (u16 *)&table->vddc);
5165                 ci_patch_with_vddci_leakage(adev, (u16 *)&table->vddci);
5166         }
5167 }
5168
5169 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device *adev,
5170                                                          struct amdgpu_cac_leakage_table *table)
5171 {
5172         u32 i;
5173
5174         if (table) {
5175                 for (i = 0; i < table->count; i++)
5176                         ci_patch_with_vddc_leakage(adev, &table->entries[i].vddc);
5177         }
5178 }
5179
5180 static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device *adev)
5181 {
5182
5183         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5184                                                                   &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
5185         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5186                                                                   &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
5187         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5188                                                                   &adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk);
5189         ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev,
5190                                                                    &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
5191         ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev,
5192                                                                       &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table);
5193         ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev,
5194                                                                       &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table);
5195         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5196                                                                   &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table);
5197         ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev,
5198                                                                   &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table);
5199         ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev,
5200                                                                &adev->pm.dpm.dyn_state.phase_shedding_limits_table);
5201         ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5202                                                         &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac);
5203         ci_patch_clock_voltage_limits_with_vddc_leakage(adev,
5204                                                         &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc);
5205         ci_patch_cac_leakage_table_with_vddc_leakage(adev,
5206                                                      &adev->pm.dpm.dyn_state.cac_leakage_table);
5207
5208 }
5209
5210 static void ci_update_current_ps(struct amdgpu_device *adev,
5211                                  struct amdgpu_ps *rps)
5212 {
5213         struct ci_ps *new_ps = ci_get_ps(rps);
5214         struct ci_power_info *pi = ci_get_pi(adev);
5215
5216         pi->current_rps = *rps;
5217         pi->current_ps = *new_ps;
5218         pi->current_rps.ps_priv = &pi->current_ps;
5219 }
5220
5221 static void ci_update_requested_ps(struct amdgpu_device *adev,
5222                                    struct amdgpu_ps *rps)
5223 {
5224         struct ci_ps *new_ps = ci_get_ps(rps);
5225         struct ci_power_info *pi = ci_get_pi(adev);
5226
5227         pi->requested_rps = *rps;
5228         pi->requested_ps = *new_ps;
5229         pi->requested_rps.ps_priv = &pi->requested_ps;
5230 }
5231
5232 static int ci_dpm_pre_set_power_state(struct amdgpu_device *adev)
5233 {
5234         struct ci_power_info *pi = ci_get_pi(adev);
5235         struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
5236         struct amdgpu_ps *new_ps = &requested_ps;
5237
5238         ci_update_requested_ps(adev, new_ps);
5239
5240         ci_apply_state_adjust_rules(adev, &pi->requested_rps);
5241
5242         return 0;
5243 }
5244
5245 static void ci_dpm_post_set_power_state(struct amdgpu_device *adev)
5246 {
5247         struct ci_power_info *pi = ci_get_pi(adev);
5248         struct amdgpu_ps *new_ps = &pi->requested_rps;
5249
5250         ci_update_current_ps(adev, new_ps);
5251 }
5252
5253
5254 static void ci_dpm_setup_asic(struct amdgpu_device *adev)
5255 {
5256         ci_read_clock_registers(adev);
5257         ci_enable_acpi_power_management(adev);
5258         ci_init_sclk_t(adev);
5259 }
5260
5261 static int ci_dpm_enable(struct amdgpu_device *adev)
5262 {
5263         struct ci_power_info *pi = ci_get_pi(adev);
5264         struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5265         int ret;
5266
5267         if (amdgpu_ci_is_smc_running(adev))
5268                 return -EINVAL;
5269         if (pi->voltage_control != CISLANDS_VOLTAGE_CONTROL_NONE) {
5270                 ci_enable_voltage_control(adev);
5271                 ret = ci_construct_voltage_tables(adev);
5272                 if (ret) {
5273                         DRM_ERROR("ci_construct_voltage_tables failed\n");
5274                         return ret;
5275                 }
5276         }
5277         if (pi->caps_dynamic_ac_timing) {
5278                 ret = ci_initialize_mc_reg_table(adev);
5279                 if (ret)
5280                         pi->caps_dynamic_ac_timing = false;
5281         }
5282         if (pi->dynamic_ss)
5283                 ci_enable_spread_spectrum(adev, true);
5284         if (pi->thermal_protection)
5285                 ci_enable_thermal_protection(adev, true);
5286         ci_program_sstp(adev);
5287         ci_enable_display_gap(adev);
5288         ci_program_vc(adev);
5289         ret = ci_upload_firmware(adev);
5290         if (ret) {
5291                 DRM_ERROR("ci_upload_firmware failed\n");
5292                 return ret;
5293         }
5294         ret = ci_process_firmware_header(adev);
5295         if (ret) {
5296                 DRM_ERROR("ci_process_firmware_header failed\n");
5297                 return ret;
5298         }
5299         ret = ci_initial_switch_from_arb_f0_to_f1(adev);
5300         if (ret) {
5301                 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5302                 return ret;
5303         }
5304         ret = ci_init_smc_table(adev);
5305         if (ret) {
5306                 DRM_ERROR("ci_init_smc_table failed\n");
5307                 return ret;
5308         }
5309         ret = ci_init_arb_table_index(adev);
5310         if (ret) {
5311                 DRM_ERROR("ci_init_arb_table_index failed\n");
5312                 return ret;
5313         }
5314         if (pi->caps_dynamic_ac_timing) {
5315                 ret = ci_populate_initial_mc_reg_table(adev);
5316                 if (ret) {
5317                         DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5318                         return ret;
5319                 }
5320         }
5321         ret = ci_populate_pm_base(adev);
5322         if (ret) {
5323                 DRM_ERROR("ci_populate_pm_base failed\n");
5324                 return ret;
5325         }
5326         ci_dpm_start_smc(adev);
5327         ci_enable_vr_hot_gpio_interrupt(adev);
5328         ret = ci_notify_smc_display_change(adev, false);
5329         if (ret) {
5330                 DRM_ERROR("ci_notify_smc_display_change failed\n");
5331                 return ret;
5332         }
5333         ci_enable_sclk_control(adev, true);
5334         ret = ci_enable_ulv(adev, true);
5335         if (ret) {
5336                 DRM_ERROR("ci_enable_ulv failed\n");
5337                 return ret;
5338         }
5339         ret = ci_enable_ds_master_switch(adev, true);
5340         if (ret) {
5341                 DRM_ERROR("ci_enable_ds_master_switch failed\n");
5342                 return ret;
5343         }
5344         ret = ci_start_dpm(adev);
5345         if (ret) {
5346                 DRM_ERROR("ci_start_dpm failed\n");
5347                 return ret;
5348         }
5349         ret = ci_enable_didt(adev, true);
5350         if (ret) {
5351                 DRM_ERROR("ci_enable_didt failed\n");
5352                 return ret;
5353         }
5354         ret = ci_enable_smc_cac(adev, true);
5355         if (ret) {
5356                 DRM_ERROR("ci_enable_smc_cac failed\n");
5357                 return ret;
5358         }
5359         ret = ci_enable_power_containment(adev, true);
5360         if (ret) {
5361                 DRM_ERROR("ci_enable_power_containment failed\n");
5362                 return ret;
5363         }
5364
5365         ret = ci_power_control_set_level(adev);
5366         if (ret) {
5367                 DRM_ERROR("ci_power_control_set_level failed\n");
5368                 return ret;
5369         }
5370
5371         ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
5372
5373         ret = ci_enable_thermal_based_sclk_dpm(adev, true);
5374         if (ret) {
5375                 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5376                 return ret;
5377         }
5378
5379         ci_thermal_start_thermal_controller(adev);
5380
5381         ci_update_current_ps(adev, boot_ps);
5382
5383         return 0;
5384 }
5385
5386 static void ci_dpm_disable(struct amdgpu_device *adev)
5387 {
5388         struct ci_power_info *pi = ci_get_pi(adev);
5389         struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
5390
5391         amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5392                        AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
5393         amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
5394                        AMDGPU_THERMAL_IRQ_HIGH_TO_LOW);
5395
5396         ci_dpm_powergate_uvd(adev, true);
5397
5398         if (!amdgpu_ci_is_smc_running(adev))
5399                 return;
5400
5401         ci_thermal_stop_thermal_controller(adev);
5402
5403         if (pi->thermal_protection)
5404                 ci_enable_thermal_protection(adev, false);
5405         ci_enable_power_containment(adev, false);
5406         ci_enable_smc_cac(adev, false);
5407         ci_enable_didt(adev, false);
5408         ci_enable_spread_spectrum(adev, false);
5409         ci_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
5410         ci_stop_dpm(adev);
5411         ci_enable_ds_master_switch(adev, false);
5412         ci_enable_ulv(adev, false);
5413         ci_clear_vc(adev);
5414         ci_reset_to_default(adev);
5415         ci_dpm_stop_smc(adev);
5416         ci_force_switch_to_arb_f0(adev);
5417         ci_enable_thermal_based_sclk_dpm(adev, false);
5418
5419         ci_update_current_ps(adev, boot_ps);
5420 }
5421
5422 static int ci_dpm_set_power_state(struct amdgpu_device *adev)
5423 {
5424         struct ci_power_info *pi = ci_get_pi(adev);
5425         struct amdgpu_ps *new_ps = &pi->requested_rps;
5426         struct amdgpu_ps *old_ps = &pi->current_rps;
5427         int ret;
5428
5429         ci_find_dpm_states_clocks_in_dpm_table(adev, new_ps);
5430         if (pi->pcie_performance_request)
5431                 ci_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
5432         ret = ci_freeze_sclk_mclk_dpm(adev);
5433         if (ret) {
5434                 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5435                 return ret;
5436         }
5437         ret = ci_populate_and_upload_sclk_mclk_dpm_levels(adev, new_ps);
5438         if (ret) {
5439                 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5440                 return ret;
5441         }
5442         ret = ci_generate_dpm_level_enable_mask(adev, new_ps);
5443         if (ret) {
5444                 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5445                 return ret;
5446         }
5447
5448         ret = ci_update_vce_dpm(adev, new_ps, old_ps);
5449         if (ret) {
5450                 DRM_ERROR("ci_update_vce_dpm failed\n");
5451                 return ret;
5452         }
5453
5454         ret = ci_update_sclk_t(adev);
5455         if (ret) {
5456                 DRM_ERROR("ci_update_sclk_t failed\n");
5457                 return ret;
5458         }
5459         if (pi->caps_dynamic_ac_timing) {
5460                 ret = ci_update_and_upload_mc_reg_table(adev);
5461                 if (ret) {
5462                         DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5463                         return ret;
5464                 }
5465         }
5466         ret = ci_program_memory_timing_parameters(adev);
5467         if (ret) {
5468                 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5469                 return ret;
5470         }
5471         ret = ci_unfreeze_sclk_mclk_dpm(adev);
5472         if (ret) {
5473                 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5474                 return ret;
5475         }
5476         ret = ci_upload_dpm_level_enable_mask(adev);
5477         if (ret) {
5478                 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5479                 return ret;
5480         }
5481         if (pi->pcie_performance_request)
5482                 ci_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
5483
5484         return 0;
5485 }
5486
5487 #if 0
5488 static void ci_dpm_reset_asic(struct amdgpu_device *adev)
5489 {
5490         ci_set_boot_state(adev);
5491 }
5492 #endif
5493
5494 static void ci_dpm_display_configuration_changed(struct amdgpu_device *adev)
5495 {
5496         ci_program_display_gap(adev);
5497 }
5498
5499 union power_info {
5500         struct _ATOM_POWERPLAY_INFO info;
5501         struct _ATOM_POWERPLAY_INFO_V2 info_2;
5502         struct _ATOM_POWERPLAY_INFO_V3 info_3;
5503         struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
5504         struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
5505         struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
5506 };
5507
5508 union pplib_clock_info {
5509         struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
5510         struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
5511         struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
5512         struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
5513         struct _ATOM_PPLIB_SI_CLOCK_INFO si;
5514         struct _ATOM_PPLIB_CI_CLOCK_INFO ci;
5515 };
5516
5517 union pplib_power_state {
5518         struct _ATOM_PPLIB_STATE v1;
5519         struct _ATOM_PPLIB_STATE_V2 v2;
5520 };
5521
5522 static void ci_parse_pplib_non_clock_info(struct amdgpu_device *adev,
5523                                           struct amdgpu_ps *rps,
5524                                           struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
5525                                           u8 table_rev)
5526 {
5527         rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
5528         rps->class = le16_to_cpu(non_clock_info->usClassification);
5529         rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
5530
5531         if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
5532                 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
5533                 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
5534         } else {
5535                 rps->vclk = 0;
5536                 rps->dclk = 0;
5537         }
5538
5539         if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
5540                 adev->pm.dpm.boot_ps = rps;
5541         if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
5542                 adev->pm.dpm.uvd_ps = rps;
5543 }
5544
5545 static void ci_parse_pplib_clock_info(struct amdgpu_device *adev,
5546                                       struct amdgpu_ps *rps, int index,
5547                                       union pplib_clock_info *clock_info)
5548 {
5549         struct ci_power_info *pi = ci_get_pi(adev);
5550         struct ci_ps *ps = ci_get_ps(rps);
5551         struct ci_pl *pl = &ps->performance_levels[index];
5552
5553         ps->performance_level_count = index + 1;
5554
5555         pl->sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5556         pl->sclk |= clock_info->ci.ucEngineClockHigh << 16;
5557         pl->mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5558         pl->mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5559
5560         pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
5561                                                    pi->sys_pcie_mask,
5562                                                    pi->vbios_boot_state.pcie_gen_bootup_value,
5563                                                    clock_info->ci.ucPCIEGen);
5564         pl->pcie_lane = amdgpu_get_pcie_lane_support(adev,
5565                                                      pi->vbios_boot_state.pcie_lane_bootup_value,
5566                                                      le16_to_cpu(clock_info->ci.usPCIELane));
5567
5568         if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
5569                 pi->acpi_pcie_gen = pl->pcie_gen;
5570         }
5571
5572         if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
5573                 pi->ulv.supported = true;
5574                 pi->ulv.pl = *pl;
5575                 pi->ulv.cg_ulv_parameter = CISLANDS_CGULVPARAMETER_DFLT;
5576         }
5577
5578         /* patch up boot state */
5579         if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
5580                 pl->mclk = pi->vbios_boot_state.mclk_bootup_value;
5581                 pl->sclk = pi->vbios_boot_state.sclk_bootup_value;
5582                 pl->pcie_gen = pi->vbios_boot_state.pcie_gen_bootup_value;
5583                 pl->pcie_lane = pi->vbios_boot_state.pcie_lane_bootup_value;
5584         }
5585
5586         switch (rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
5587         case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
5588                 pi->use_pcie_powersaving_levels = true;
5589                 if (pi->pcie_gen_powersaving.max < pl->pcie_gen)
5590                         pi->pcie_gen_powersaving.max = pl->pcie_gen;
5591                 if (pi->pcie_gen_powersaving.min > pl->pcie_gen)
5592                         pi->pcie_gen_powersaving.min = pl->pcie_gen;
5593                 if (pi->pcie_lane_powersaving.max < pl->pcie_lane)
5594                         pi->pcie_lane_powersaving.max = pl->pcie_lane;
5595                 if (pi->pcie_lane_powersaving.min > pl->pcie_lane)
5596                         pi->pcie_lane_powersaving.min = pl->pcie_lane;
5597                 break;
5598         case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
5599                 pi->use_pcie_performance_levels = true;
5600                 if (pi->pcie_gen_performance.max < pl->pcie_gen)
5601                         pi->pcie_gen_performance.max = pl->pcie_gen;
5602                 if (pi->pcie_gen_performance.min > pl->pcie_gen)
5603                         pi->pcie_gen_performance.min = pl->pcie_gen;
5604                 if (pi->pcie_lane_performance.max < pl->pcie_lane)
5605                         pi->pcie_lane_performance.max = pl->pcie_lane;
5606                 if (pi->pcie_lane_performance.min > pl->pcie_lane)
5607                         pi->pcie_lane_performance.min = pl->pcie_lane;
5608                 break;
5609         default:
5610                 break;
5611         }
5612 }
5613
5614 static int ci_parse_power_table(struct amdgpu_device *adev)
5615 {
5616         struct amdgpu_mode_info *mode_info = &adev->mode_info;
5617         struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
5618         union pplib_power_state *power_state;
5619         int i, j, k, non_clock_array_index, clock_array_index;
5620         union pplib_clock_info *clock_info;
5621         struct _StateArray *state_array;
5622         struct _ClockInfoArray *clock_info_array;
5623         struct _NonClockInfoArray *non_clock_info_array;
5624         union power_info *power_info;
5625         int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
5626         u16 data_offset;
5627         u8 frev, crev;
5628         u8 *power_state_offset;
5629         struct ci_ps *ps;
5630
5631         if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5632                                    &frev, &crev, &data_offset))
5633                 return -EINVAL;
5634         power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
5635
5636         amdgpu_add_thermal_controller(adev);
5637
5638         state_array = (struct _StateArray *)
5639                 (mode_info->atom_context->bios + data_offset +
5640                  le16_to_cpu(power_info->pplib.usStateArrayOffset));
5641         clock_info_array = (struct _ClockInfoArray *)
5642                 (mode_info->atom_context->bios + data_offset +
5643                  le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
5644         non_clock_info_array = (struct _NonClockInfoArray *)
5645                 (mode_info->atom_context->bios + data_offset +
5646                  le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
5647
5648         adev->pm.dpm.ps = kzalloc(sizeof(struct amdgpu_ps) *
5649                                   state_array->ucNumEntries, GFP_KERNEL);
5650         if (!adev->pm.dpm.ps)
5651                 return -ENOMEM;
5652         power_state_offset = (u8 *)state_array->states;
5653         for (i = 0; i < state_array->ucNumEntries; i++) {
5654                 u8 *idx;
5655                 power_state = (union pplib_power_state *)power_state_offset;
5656                 non_clock_array_index = power_state->v2.nonClockInfoIndex;
5657                 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
5658                         &non_clock_info_array->nonClockInfo[non_clock_array_index];
5659                 ps = kzalloc(sizeof(struct ci_ps), GFP_KERNEL);
5660                 if (ps == NULL) {
5661                         kfree(adev->pm.dpm.ps);
5662                         return -ENOMEM;
5663                 }
5664                 adev->pm.dpm.ps[i].ps_priv = ps;
5665                 ci_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
5666                                               non_clock_info,
5667                                               non_clock_info_array->ucEntrySize);
5668                 k = 0;
5669                 idx = (u8 *)&power_state->v2.clockInfoIndex[0];
5670                 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
5671                         clock_array_index = idx[j];
5672                         if (clock_array_index >= clock_info_array->ucNumEntries)
5673                                 continue;
5674                         if (k >= CISLANDS_MAX_HARDWARE_POWERLEVELS)
5675                                 break;
5676                         clock_info = (union pplib_clock_info *)
5677                                 ((u8 *)&clock_info_array->clockInfo[0] +
5678                                  (clock_array_index * clock_info_array->ucEntrySize));
5679                         ci_parse_pplib_clock_info(adev,
5680                                                   &adev->pm.dpm.ps[i], k,
5681                                                   clock_info);
5682                         k++;
5683                 }
5684                 power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
5685         }
5686         adev->pm.dpm.num_ps = state_array->ucNumEntries;
5687
5688         /* fill in the vce power states */
5689         for (i = 0; i < AMDGPU_MAX_VCE_LEVELS; i++) {
5690                 u32 sclk, mclk;
5691                 clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
5692                 clock_info = (union pplib_clock_info *)
5693                         &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
5694                 sclk = le16_to_cpu(clock_info->ci.usEngineClockLow);
5695                 sclk |= clock_info->ci.ucEngineClockHigh << 16;
5696                 mclk = le16_to_cpu(clock_info->ci.usMemoryClockLow);
5697                 mclk |= clock_info->ci.ucMemoryClockHigh << 16;
5698                 adev->pm.dpm.vce_states[i].sclk = sclk;
5699                 adev->pm.dpm.vce_states[i].mclk = mclk;
5700         }
5701
5702         return 0;
5703 }
5704
5705 static int ci_get_vbios_boot_values(struct amdgpu_device *adev,
5706                                     struct ci_vbios_boot_state *boot_state)
5707 {
5708         struct amdgpu_mode_info *mode_info = &adev->mode_info;
5709         int index = GetIndexIntoMasterTable(DATA, FirmwareInfo);
5710         ATOM_FIRMWARE_INFO_V2_2 *firmware_info;
5711         u8 frev, crev;
5712         u16 data_offset;
5713
5714         if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
5715                                    &frev, &crev, &data_offset)) {
5716                 firmware_info =
5717                         (ATOM_FIRMWARE_INFO_V2_2 *)(mode_info->atom_context->bios +
5718                                                     data_offset);
5719                 boot_state->mvdd_bootup_value = le16_to_cpu(firmware_info->usBootUpMVDDCVoltage);
5720                 boot_state->vddc_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCVoltage);
5721                 boot_state->vddci_bootup_value = le16_to_cpu(firmware_info->usBootUpVDDCIVoltage);
5722                 boot_state->pcie_gen_bootup_value = ci_get_current_pcie_speed(adev);
5723                 boot_state->pcie_lane_bootup_value = ci_get_current_pcie_lane_number(adev);
5724                 boot_state->sclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultEngineClock);
5725                 boot_state->mclk_bootup_value = le32_to_cpu(firmware_info->ulDefaultMemoryClock);
5726
5727                 return 0;
5728         }
5729         return -EINVAL;
5730 }
5731
5732 static void ci_dpm_fini(struct amdgpu_device *adev)
5733 {
5734         int i;
5735
5736         for (i = 0; i < adev->pm.dpm.num_ps; i++) {
5737                 kfree(adev->pm.dpm.ps[i].ps_priv);
5738         }
5739         kfree(adev->pm.dpm.ps);
5740         kfree(adev->pm.dpm.priv);
5741         kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
5742         amdgpu_free_extended_power_table(adev);
5743 }
5744
5745 /**
5746  * ci_dpm_init_microcode - load ucode images from disk
5747  *
5748  * @adev: amdgpu_device pointer
5749  *
5750  * Use the firmware interface to load the ucode images into
5751  * the driver (not loaded into hw).
5752  * Returns 0 on success, error on failure.
5753  */
5754 static int ci_dpm_init_microcode(struct amdgpu_device *adev)
5755 {
5756         const char *chip_name;
5757         char fw_name[30];
5758         int err;
5759
5760         DRM_DEBUG("\n");
5761
5762         switch (adev->asic_type) {
5763         case CHIP_BONAIRE:
5764                 if ((adev->pdev->revision == 0x80) ||
5765                     (adev->pdev->revision == 0x81) ||
5766                     (adev->pdev->device == 0x665f))
5767                         chip_name = "bonaire_k";
5768                 else
5769                         chip_name = "bonaire";
5770                 break;
5771         case CHIP_HAWAII:
5772                 if (adev->pdev->revision == 0x80)
5773                         chip_name = "hawaii_k";
5774                 else
5775                         chip_name = "hawaii";
5776                 break;
5777         case CHIP_KAVERI:
5778         case CHIP_KABINI:
5779         case CHIP_MULLINS:
5780         default: BUG();
5781         }
5782
5783         snprintf(fw_name, sizeof(fw_name), "/*(DEBLOBBED)*/", chip_name);
5784         err = reject_firmware(&adev->pm.fw, fw_name, adev->dev);
5785         if (err)
5786                 goto out;
5787         err = amdgpu_ucode_validate(adev->pm.fw);
5788
5789 out:
5790         if (err) {
5791                 printk(KERN_ERR
5792                        "cik_smc: Failed to load firmware \"%s\"\n",
5793                        fw_name);
5794                 release_firmware(adev->pm.fw);
5795                 adev->pm.fw = NULL;
5796         }
5797         return err;
5798 }
5799
5800 static int ci_dpm_init(struct amdgpu_device *adev)
5801 {
5802         int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info);
5803         SMU7_Discrete_DpmTable *dpm_table;
5804         struct amdgpu_gpio_rec gpio;
5805         u16 data_offset, size;
5806         u8 frev, crev;
5807         struct ci_power_info *pi;
5808         int ret;
5809
5810         pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
5811         if (pi == NULL)
5812                 return -ENOMEM;
5813         adev->pm.dpm.priv = pi;
5814
5815         pi->sys_pcie_mask =
5816                 (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
5817                 CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
5818
5819         pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
5820
5821         pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
5822         pi->pcie_gen_performance.min = AMDGPU_PCIE_GEN3;
5823         pi->pcie_gen_powersaving.max = AMDGPU_PCIE_GEN1;
5824         pi->pcie_gen_powersaving.min = AMDGPU_PCIE_GEN3;
5825
5826         pi->pcie_lane_performance.max = 0;
5827         pi->pcie_lane_performance.min = 16;
5828         pi->pcie_lane_powersaving.max = 0;
5829         pi->pcie_lane_powersaving.min = 16;
5830
5831         ret = ci_get_vbios_boot_values(adev, &pi->vbios_boot_state);
5832         if (ret) {
5833                 ci_dpm_fini(adev);
5834                 return ret;
5835         }
5836
5837         ret = amdgpu_get_platform_caps(adev);
5838         if (ret) {
5839                 ci_dpm_fini(adev);
5840                 return ret;
5841         }
5842
5843         ret = amdgpu_parse_extended_power_table(adev);
5844         if (ret) {
5845                 ci_dpm_fini(adev);
5846                 return ret;
5847         }
5848
5849         ret = ci_parse_power_table(adev);
5850         if (ret) {
5851                 ci_dpm_fini(adev);
5852                 return ret;
5853         }
5854
5855         pi->dll_default_on = false;
5856         pi->sram_end = SMC_RAM_END;
5857
5858         pi->activity_target[0] = CISLAND_TARGETACTIVITY_DFLT;
5859         pi->activity_target[1] = CISLAND_TARGETACTIVITY_DFLT;
5860         pi->activity_target[2] = CISLAND_TARGETACTIVITY_DFLT;
5861         pi->activity_target[3] = CISLAND_TARGETACTIVITY_DFLT;
5862         pi->activity_target[4] = CISLAND_TARGETACTIVITY_DFLT;
5863         pi->activity_target[5] = CISLAND_TARGETACTIVITY_DFLT;
5864         pi->activity_target[6] = CISLAND_TARGETACTIVITY_DFLT;
5865         pi->activity_target[7] = CISLAND_TARGETACTIVITY_DFLT;
5866
5867         pi->mclk_activity_target = CISLAND_MCLK_TARGETACTIVITY_DFLT;
5868
5869         pi->sclk_dpm_key_disabled = 0;
5870         pi->mclk_dpm_key_disabled = 0;
5871         pi->pcie_dpm_key_disabled = 0;
5872         pi->thermal_sclk_dpm_enabled = 0;
5873
5874         if (amdgpu_sclk_deep_sleep_en)
5875                 pi->caps_sclk_ds = true;
5876         else
5877                 pi->caps_sclk_ds = false;
5878
5879         pi->mclk_strobe_mode_threshold = 40000;
5880         pi->mclk_stutter_mode_threshold = 40000;
5881         pi->mclk_edc_enable_threshold = 40000;
5882         pi->mclk_edc_wr_enable_threshold = 40000;
5883
5884         ci_initialize_powertune_defaults(adev);
5885
5886         pi->caps_fps = false;
5887
5888         pi->caps_sclk_throttle_low_notification = false;
5889
5890         pi->caps_uvd_dpm = true;
5891         pi->caps_vce_dpm = true;
5892
5893         ci_get_leakage_voltages(adev);
5894         ci_patch_dependency_tables_with_leakage(adev);
5895         ci_set_private_data_variables_based_on_pptable(adev);
5896
5897         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
5898                 kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry), GFP_KERNEL);
5899         if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
5900                 ci_dpm_fini(adev);
5901                 return -ENOMEM;
5902         }
5903         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
5904         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
5905         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
5906         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
5907         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
5908         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
5909         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
5910         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
5911         adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
5912
5913         adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
5914         adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
5915         adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
5916
5917         adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
5918         adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
5919         adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
5920         adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
5921
5922         if (adev->asic_type == CHIP_HAWAII) {
5923                 pi->thermal_temp_setting.temperature_low = 94500;
5924                 pi->thermal_temp_setting.temperature_high = 95000;
5925                 pi->thermal_temp_setting.temperature_shutdown = 104000;
5926         } else {
5927                 pi->thermal_temp_setting.temperature_low = 99500;
5928                 pi->thermal_temp_setting.temperature_high = 100000;
5929                 pi->thermal_temp_setting.temperature_shutdown = 104000;
5930         }
5931
5932         pi->uvd_enabled = false;
5933
5934         dpm_table = &pi->smc_state_table;
5935
5936         gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_VRHOT_GPIO_PINID);
5937         if (gpio.valid) {
5938                 dpm_table->VRHotGpio = gpio.shift;
5939                 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5940         } else {
5941                 dpm_table->VRHotGpio = CISLANDS_UNUSED_GPIO_PIN;
5942                 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT;
5943         }
5944
5945         gpio = amdgpu_atombios_lookup_gpio(adev, PP_AC_DC_SWITCH_GPIO_PINID);
5946         if (gpio.valid) {
5947                 dpm_table->AcDcGpio = gpio.shift;
5948                 adev->pm.dpm.platform_caps |= ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5949         } else {
5950                 dpm_table->AcDcGpio = CISLANDS_UNUSED_GPIO_PIN;
5951                 adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC;
5952         }
5953
5954         gpio = amdgpu_atombios_lookup_gpio(adev, VDDC_PCC_GPIO_PINID);
5955         if (gpio.valid) {
5956                 u32 tmp = RREG32_SMC(ixCNB_PWRMGT_CNTL);
5957
5958                 switch (gpio.shift) {
5959                 case 0:
5960                         tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
5961                         tmp |= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
5962                         break;
5963                 case 1:
5964                         tmp &= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK;
5965                         tmp |= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT;
5966                         break;
5967                 case 2:
5968                         tmp |= CNB_PWRMGT_CNTL__GNB_SLOW_MASK;
5969                         break;
5970                 case 3:
5971                         tmp |= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK;
5972                         break;
5973                 case 4:
5974                         tmp |= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK;
5975                         break;
5976                 default:
5977                         DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio.shift);
5978                         break;
5979                 }
5980                 WREG32_SMC(ixCNB_PWRMGT_CNTL, tmp);
5981         }
5982
5983         pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5984         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5985         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_NONE;
5986         if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_GPIO_LUT))
5987                 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5988         else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDC, VOLTAGE_OBJ_SVID2))
5989                 pi->voltage_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5990
5991         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL) {
5992                 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_GPIO_LUT))
5993                         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
5994                 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_VDDCI, VOLTAGE_OBJ_SVID2))
5995                         pi->vddci_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
5996                 else
5997                         adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL;
5998         }
5999
6000         if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_MVDDCONTROL) {
6001                 if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_GPIO_LUT))
6002                         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_GPIO;
6003                 else if (amdgpu_atombios_is_voltage_gpio(adev, VOLTAGE_TYPE_MVDDC, VOLTAGE_OBJ_SVID2))
6004                         pi->mvdd_control = CISLANDS_VOLTAGE_CONTROL_BY_SVID2;
6005                 else
6006                         adev->pm.dpm.platform_caps &= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL;
6007         }
6008
6009         pi->vddc_phase_shed_control = true;
6010
6011 #if defined(CONFIG_ACPI)
6012         pi->pcie_performance_request =
6013                 amdgpu_acpi_is_pcie_performance_request_supported(adev);
6014 #else
6015         pi->pcie_performance_request = false;
6016 #endif
6017
6018         if (amdgpu_atom_parse_data_header(adev->mode_info.atom_context, index, &size,
6019                                    &frev, &crev, &data_offset)) {
6020                 pi->caps_sclk_ss_support = true;
6021                 pi->caps_mclk_ss_support = true;
6022                 pi->dynamic_ss = true;
6023         } else {
6024                 pi->caps_sclk_ss_support = false;
6025                 pi->caps_mclk_ss_support = false;
6026                 pi->dynamic_ss = true;
6027         }
6028
6029         if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
6030                 pi->thermal_protection = true;
6031         else
6032                 pi->thermal_protection = false;
6033
6034         pi->caps_dynamic_ac_timing = true;
6035
6036         pi->uvd_power_gated = true;
6037
6038         /* make sure dc limits are valid */
6039         if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
6040             (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
6041                 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
6042                         adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
6043
6044         pi->fan_ctrl_is_in_default_mode = true;
6045
6046         return 0;
6047 }
6048
6049 static void
6050 ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
6051                                                struct seq_file *m)
6052 {
6053         struct ci_power_info *pi = ci_get_pi(adev);
6054         struct amdgpu_ps *rps = &pi->current_rps;
6055         u32 sclk = ci_get_average_sclk_freq(adev);
6056         u32 mclk = ci_get_average_mclk_freq(adev);
6057         u32 activity_percent = 50;
6058         int ret;
6059
6060         ret = ci_read_smc_soft_register(adev, offsetof(SMU7_SoftRegisters, AverageGraphicsA),
6061                                         &activity_percent);
6062
6063         if (ret == 0) {
6064                 activity_percent += 0x80;
6065                 activity_percent >>= 8;
6066                 activity_percent = activity_percent > 100 ? 100 : activity_percent;
6067         }
6068
6069         seq_printf(m, "uvd %sabled\n", pi->uvd_enabled ? "en" : "dis");
6070         seq_printf(m, "vce %sabled\n", rps->vce_active ? "en" : "dis");
6071         seq_printf(m, "power level avg    sclk: %u mclk: %u\n",
6072                    sclk, mclk);
6073         seq_printf(m, "GPU load: %u %%\n", activity_percent);
6074 }
6075
6076 static void ci_dpm_print_power_state(struct amdgpu_device *adev,
6077                                      struct amdgpu_ps *rps)
6078 {
6079         struct ci_ps *ps = ci_get_ps(rps);
6080         struct ci_pl *pl;
6081         int i;
6082
6083         amdgpu_dpm_print_class_info(rps->class, rps->class2);
6084         amdgpu_dpm_print_cap_info(rps->caps);
6085         printk("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
6086         for (i = 0; i < ps->performance_level_count; i++) {
6087                 pl = &ps->performance_levels[i];
6088                 printk("\t\tpower level %d    sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
6089                        i, pl->sclk, pl->mclk, pl->pcie_gen + 1, pl->pcie_lane);
6090         }
6091         amdgpu_dpm_print_ps_status(adev, rps);
6092 }
6093
6094 static u32 ci_dpm_get_sclk(struct amdgpu_device *adev, bool low)
6095 {
6096         struct ci_power_info *pi = ci_get_pi(adev);
6097         struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6098
6099         if (low)
6100                 return requested_state->performance_levels[0].sclk;
6101         else
6102                 return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
6103 }
6104
6105 static u32 ci_dpm_get_mclk(struct amdgpu_device *adev, bool low)
6106 {
6107         struct ci_power_info *pi = ci_get_pi(adev);
6108         struct ci_ps *requested_state = ci_get_ps(&pi->requested_rps);
6109
6110         if (low)
6111                 return requested_state->performance_levels[0].mclk;
6112         else
6113                 return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
6114 }
6115
6116 /* get temperature in millidegrees */
6117 static int ci_dpm_get_temp(struct amdgpu_device *adev)
6118 {
6119         u32 temp;
6120         int actual_temp = 0;
6121
6122         temp = (RREG32_SMC(ixCG_MULT_THERMAL_STATUS) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK) >>
6123                 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT;
6124
6125         if (temp & 0x200)
6126                 actual_temp = 255;
6127         else
6128                 actual_temp = temp & 0x1ff;
6129
6130         actual_temp = actual_temp * 1000;
6131
6132         return actual_temp;
6133 }
6134
6135 static int ci_set_temperature_range(struct amdgpu_device *adev)
6136 {
6137         int ret;
6138
6139         ret = ci_thermal_enable_alert(adev, false);
6140         if (ret)
6141                 return ret;
6142         ret = ci_thermal_set_temperature_range(adev, CISLANDS_TEMP_RANGE_MIN,
6143                                                CISLANDS_TEMP_RANGE_MAX);
6144         if (ret)
6145                 return ret;
6146         ret = ci_thermal_enable_alert(adev, true);
6147         if (ret)
6148                 return ret;
6149         return ret;
6150 }
6151
6152 static int ci_dpm_early_init(void *handle)
6153 {
6154         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6155
6156         ci_dpm_set_dpm_funcs(adev);
6157         ci_dpm_set_irq_funcs(adev);
6158
6159         return 0;
6160 }
6161
6162 static int ci_dpm_late_init(void *handle)
6163 {
6164         int ret;
6165         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6166
6167         if (!amdgpu_dpm)
6168                 return 0;
6169
6170         /* init the sysfs and debugfs files late */
6171         ret = amdgpu_pm_sysfs_init(adev);
6172         if (ret)
6173                 return ret;
6174
6175         ret = ci_set_temperature_range(adev);
6176         if (ret)
6177                 return ret;
6178
6179         return 0;
6180 }
6181
6182 static int ci_dpm_sw_init(void *handle)
6183 {
6184         int ret;
6185         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6186
6187         ret = amdgpu_irq_add_id(adev, 230, &adev->pm.dpm.thermal.irq);
6188         if (ret)
6189                 return ret;
6190
6191         ret = amdgpu_irq_add_id(adev, 231, &adev->pm.dpm.thermal.irq);
6192         if (ret)
6193                 return ret;
6194
6195         /* default to balanced state */
6196         adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
6197         adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
6198         adev->pm.dpm.forced_level = AMDGPU_DPM_FORCED_LEVEL_AUTO;
6199         adev->pm.default_sclk = adev->clock.default_sclk;
6200         adev->pm.default_mclk = adev->clock.default_mclk;
6201         adev->pm.current_sclk = adev->clock.default_sclk;
6202         adev->pm.current_mclk = adev->clock.default_mclk;
6203         adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
6204
6205         if (amdgpu_dpm == 0)
6206                 return 0;
6207
6208         ret = ci_dpm_init_microcode(adev);
6209         if (ret)
6210                 return ret;
6211
6212         INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
6213         mutex_lock(&adev->pm.mutex);
6214         ret = ci_dpm_init(adev);
6215         if (ret)
6216                 goto dpm_failed;
6217         adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6218         if (amdgpu_dpm == 1)
6219                 amdgpu_pm_print_power_states(adev);
6220         mutex_unlock(&adev->pm.mutex);
6221         DRM_INFO("amdgpu: dpm initialized\n");
6222
6223         return 0;
6224
6225 dpm_failed:
6226         ci_dpm_fini(adev);
6227         mutex_unlock(&adev->pm.mutex);
6228         DRM_ERROR("amdgpu: dpm initialization failed\n");
6229         return ret;
6230 }
6231
6232 static int ci_dpm_sw_fini(void *handle)
6233 {
6234         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6235
6236         flush_work(&adev->pm.dpm.thermal.work);
6237
6238         mutex_lock(&adev->pm.mutex);
6239         amdgpu_pm_sysfs_fini(adev);
6240         ci_dpm_fini(adev);
6241         mutex_unlock(&adev->pm.mutex);
6242
6243         release_firmware(adev->pm.fw);
6244         adev->pm.fw = NULL;
6245
6246         return 0;
6247 }
6248
6249 static int ci_dpm_hw_init(void *handle)
6250 {
6251         int ret;
6252
6253         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6254
6255         if (!amdgpu_dpm)
6256                 return 0;
6257
6258         mutex_lock(&adev->pm.mutex);
6259         ci_dpm_setup_asic(adev);
6260         ret = ci_dpm_enable(adev);
6261         if (ret)
6262                 adev->pm.dpm_enabled = false;
6263         else
6264                 adev->pm.dpm_enabled = true;
6265         mutex_unlock(&adev->pm.mutex);
6266
6267         return ret;
6268 }
6269
6270 static int ci_dpm_hw_fini(void *handle)
6271 {
6272         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6273
6274         if (adev->pm.dpm_enabled) {
6275                 mutex_lock(&adev->pm.mutex);
6276                 ci_dpm_disable(adev);
6277                 mutex_unlock(&adev->pm.mutex);
6278         }
6279
6280         return 0;
6281 }
6282
6283 static int ci_dpm_suspend(void *handle)
6284 {
6285         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6286
6287         if (adev->pm.dpm_enabled) {
6288                 mutex_lock(&adev->pm.mutex);
6289                 /* disable dpm */
6290                 ci_dpm_disable(adev);
6291                 /* reset the power state */
6292                 adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
6293                 mutex_unlock(&adev->pm.mutex);
6294         }
6295         return 0;
6296 }
6297
6298 static int ci_dpm_resume(void *handle)
6299 {
6300         int ret;
6301         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6302
6303         if (adev->pm.dpm_enabled) {
6304                 /* asic init will reset to the boot state */
6305                 mutex_lock(&adev->pm.mutex);
6306                 ci_dpm_setup_asic(adev);
6307                 ret = ci_dpm_enable(adev);
6308                 if (ret)
6309                         adev->pm.dpm_enabled = false;
6310                 else
6311                         adev->pm.dpm_enabled = true;
6312                 mutex_unlock(&adev->pm.mutex);
6313                 if (adev->pm.dpm_enabled)
6314                         amdgpu_pm_compute_clocks(adev);
6315         }
6316         return 0;
6317 }
6318
6319 static bool ci_dpm_is_idle(void *handle)
6320 {
6321         /* XXX */
6322         return true;
6323 }
6324
6325 static int ci_dpm_wait_for_idle(void *handle)
6326 {
6327         /* XXX */
6328         return 0;
6329 }
6330
6331 static int ci_dpm_soft_reset(void *handle)
6332 {
6333         return 0;
6334 }
6335
6336 static int ci_dpm_set_interrupt_state(struct amdgpu_device *adev,
6337                                       struct amdgpu_irq_src *source,
6338                                       unsigned type,
6339                                       enum amdgpu_interrupt_state state)
6340 {
6341         u32 cg_thermal_int;
6342
6343         switch (type) {
6344         case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
6345                 switch (state) {
6346                 case AMDGPU_IRQ_STATE_DISABLE:
6347                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6348                         cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6349                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6350                         break;
6351                 case AMDGPU_IRQ_STATE_ENABLE:
6352                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6353                         cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
6354                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6355                         break;
6356                 default:
6357                         break;
6358                 }
6359                 break;
6360
6361         case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
6362                 switch (state) {
6363                 case AMDGPU_IRQ_STATE_DISABLE:
6364                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6365                         cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6366                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6367                         break;
6368                 case AMDGPU_IRQ_STATE_ENABLE:
6369                         cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
6370                         cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
6371                         WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
6372                         break;
6373                 default:
6374                         break;
6375                 }
6376                 break;
6377
6378         default:
6379                 break;
6380         }
6381         return 0;
6382 }
6383
6384 static int ci_dpm_process_interrupt(struct amdgpu_device *adev,
6385                                     struct amdgpu_irq_src *source,
6386                                     struct amdgpu_iv_entry *entry)
6387 {
6388         bool queue_thermal = false;
6389
6390         if (entry == NULL)
6391                 return -EINVAL;
6392
6393         switch (entry->src_id) {
6394         case 230: /* thermal low to high */
6395                 DRM_DEBUG("IH: thermal low to high\n");
6396                 adev->pm.dpm.thermal.high_to_low = false;
6397                 queue_thermal = true;
6398                 break;
6399         case 231: /* thermal high to low */
6400                 DRM_DEBUG("IH: thermal high to low\n");
6401                 adev->pm.dpm.thermal.high_to_low = true;
6402                 queue_thermal = true;
6403                 break;
6404         default:
6405                 break;
6406         }
6407
6408         if (queue_thermal)
6409                 schedule_work(&adev->pm.dpm.thermal.work);
6410
6411         return 0;
6412 }
6413
6414 static int ci_dpm_set_clockgating_state(void *handle,
6415                                           enum amd_clockgating_state state)
6416 {
6417         return 0;
6418 }
6419
6420 static int ci_dpm_set_powergating_state(void *handle,
6421                                           enum amd_powergating_state state)
6422 {
6423         return 0;
6424 }
6425
6426 static int ci_dpm_print_clock_levels(struct amdgpu_device *adev,
6427                 enum pp_clock_type type, char *buf)
6428 {
6429         struct ci_power_info *pi = ci_get_pi(adev);
6430         struct ci_single_dpm_table *sclk_table = &pi->dpm_table.sclk_table;
6431         struct ci_single_dpm_table *mclk_table = &pi->dpm_table.mclk_table;
6432         struct ci_single_dpm_table *pcie_table = &pi->dpm_table.pcie_speed_table;
6433
6434         int i, now, size = 0;
6435         uint32_t clock, pcie_speed;
6436
6437         switch (type) {
6438         case PP_SCLK:
6439                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetSclkFrequency);
6440                 clock = RREG32(mmSMC_MSG_ARG_0);
6441
6442                 for (i = 0; i < sclk_table->count; i++) {
6443                         if (clock > sclk_table->dpm_levels[i].value)
6444                                 continue;
6445                         break;
6446                 }
6447                 now = i;
6448
6449                 for (i = 0; i < sclk_table->count; i++)
6450                         size += sprintf(buf + size, "%d: %uMhz %s\n",
6451                                         i, sclk_table->dpm_levels[i].value / 100,
6452                                         (i == now) ? "*" : "");
6453                 break;
6454         case PP_MCLK:
6455                 amdgpu_ci_send_msg_to_smc(adev, PPSMC_MSG_API_GetMclkFrequency);
6456                 clock = RREG32(mmSMC_MSG_ARG_0);
6457
6458                 for (i = 0; i < mclk_table->count; i++) {
6459                         if (clock > mclk_table->dpm_levels[i].value)
6460                                 continue;
6461                         break;
6462                 }
6463                 now = i;
6464
6465                 for (i = 0; i < mclk_table->count; i++)
6466                         size += sprintf(buf + size, "%d: %uMhz %s\n",
6467                                         i, mclk_table->dpm_levels[i].value / 100,
6468                                         (i == now) ? "*" : "");
6469                 break;
6470         case PP_PCIE:
6471                 pcie_speed = ci_get_current_pcie_speed(adev);
6472                 for (i = 0; i < pcie_table->count; i++) {
6473                         if (pcie_speed != pcie_table->dpm_levels[i].value)
6474                                 continue;
6475                         break;
6476                 }
6477                 now = i;
6478
6479                 for (i = 0; i < pcie_table->count; i++)
6480                         size += sprintf(buf + size, "%d: %s %s\n", i,
6481                                         (pcie_table->dpm_levels[i].value == 0) ? "2.5GB, x1" :
6482                                         (pcie_table->dpm_levels[i].value == 1) ? "5.0GB, x16" :
6483                                         (pcie_table->dpm_levels[i].value == 2) ? "8.0GB, x16" : "",
6484                                         (i == now) ? "*" : "");
6485                 break;
6486         default:
6487                 break;
6488         }
6489
6490         return size;
6491 }
6492
6493 static int ci_dpm_force_clock_level(struct amdgpu_device *adev,
6494                 enum pp_clock_type type, uint32_t mask)
6495 {
6496         struct ci_power_info *pi = ci_get_pi(adev);
6497
6498         if (adev->pm.dpm.forced_level
6499                         != AMDGPU_DPM_FORCED_LEVEL_MANUAL)
6500                 return -EINVAL;
6501
6502         switch (type) {
6503         case PP_SCLK:
6504                 if (!pi->sclk_dpm_key_disabled)
6505                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6506                                         PPSMC_MSG_SCLKDPM_SetEnabledMask,
6507                                         pi->dpm_level_enable_mask.sclk_dpm_enable_mask & mask);
6508                 break;
6509
6510         case PP_MCLK:
6511                 if (!pi->mclk_dpm_key_disabled)
6512                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6513                                         PPSMC_MSG_MCLKDPM_SetEnabledMask,
6514                                         pi->dpm_level_enable_mask.mclk_dpm_enable_mask & mask);
6515                 break;
6516
6517         case PP_PCIE:
6518         {
6519                 uint32_t tmp = mask & pi->dpm_level_enable_mask.pcie_dpm_enable_mask;
6520                 uint32_t level = 0;
6521
6522                 while (tmp >>= 1)
6523                         level++;
6524
6525                 if (!pi->pcie_dpm_key_disabled)
6526                         amdgpu_ci_send_msg_to_smc_with_parameter(adev,
6527                                         PPSMC_MSG_PCIeDPM_ForceLevel,
6528                                         level);
6529                 break;
6530         }
6531         default:
6532                 break;
6533         }
6534
6535         return 0;
6536 }
6537
6538 static int ci_dpm_get_sclk_od(struct amdgpu_device *adev)
6539 {
6540         struct ci_power_info *pi = ci_get_pi(adev);
6541         struct ci_single_dpm_table *sclk_table = &(pi->dpm_table.sclk_table);
6542         struct ci_single_dpm_table *golden_sclk_table =
6543                         &(pi->golden_dpm_table.sclk_table);
6544         int value;
6545
6546         value = (sclk_table->dpm_levels[sclk_table->count - 1].value -
6547                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value) *
6548                         100 /
6549                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6550
6551         return value;
6552 }
6553
6554 static int ci_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
6555 {
6556         struct ci_power_info *pi = ci_get_pi(adev);
6557         struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6558         struct ci_single_dpm_table *golden_sclk_table =
6559                         &(pi->golden_dpm_table.sclk_table);
6560
6561         if (value > 20)
6562                 value = 20;
6563
6564         ps->performance_levels[ps->performance_level_count - 1].sclk =
6565                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value *
6566                         value / 100 +
6567                         golden_sclk_table->dpm_levels[golden_sclk_table->count - 1].value;
6568
6569         return 0;
6570 }
6571
6572 static int ci_dpm_get_mclk_od(struct amdgpu_device *adev)
6573 {
6574         struct ci_power_info *pi = ci_get_pi(adev);
6575         struct ci_single_dpm_table *mclk_table = &(pi->dpm_table.mclk_table);
6576         struct ci_single_dpm_table *golden_mclk_table =
6577                         &(pi->golden_dpm_table.mclk_table);
6578         int value;
6579
6580         value = (mclk_table->dpm_levels[mclk_table->count - 1].value -
6581                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value) *
6582                         100 /
6583                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6584
6585         return value;
6586 }
6587
6588 static int ci_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
6589 {
6590         struct ci_power_info *pi = ci_get_pi(adev);
6591         struct ci_ps *ps = ci_get_ps(adev->pm.dpm.requested_ps);
6592         struct ci_single_dpm_table *golden_mclk_table =
6593                         &(pi->golden_dpm_table.mclk_table);
6594
6595         if (value > 20)
6596                 value = 20;
6597
6598         ps->performance_levels[ps->performance_level_count - 1].mclk =
6599                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value *
6600                         value / 100 +
6601                         golden_mclk_table->dpm_levels[golden_mclk_table->count - 1].value;
6602
6603         return 0;
6604 }
6605
6606 const struct amd_ip_funcs ci_dpm_ip_funcs = {
6607         .name = "ci_dpm",
6608         .early_init = ci_dpm_early_init,
6609         .late_init = ci_dpm_late_init,
6610         .sw_init = ci_dpm_sw_init,
6611         .sw_fini = ci_dpm_sw_fini,
6612         .hw_init = ci_dpm_hw_init,
6613         .hw_fini = ci_dpm_hw_fini,
6614         .suspend = ci_dpm_suspend,
6615         .resume = ci_dpm_resume,
6616         .is_idle = ci_dpm_is_idle,
6617         .wait_for_idle = ci_dpm_wait_for_idle,
6618         .soft_reset = ci_dpm_soft_reset,
6619         .set_clockgating_state = ci_dpm_set_clockgating_state,
6620         .set_powergating_state = ci_dpm_set_powergating_state,
6621 };
6622
6623 static const struct amdgpu_dpm_funcs ci_dpm_funcs = {
6624         .get_temperature = &ci_dpm_get_temp,
6625         .pre_set_power_state = &ci_dpm_pre_set_power_state,
6626         .set_power_state = &ci_dpm_set_power_state,
6627         .post_set_power_state = &ci_dpm_post_set_power_state,
6628         .display_configuration_changed = &ci_dpm_display_configuration_changed,
6629         .get_sclk = &ci_dpm_get_sclk,
6630         .get_mclk = &ci_dpm_get_mclk,
6631         .print_power_state = &ci_dpm_print_power_state,
6632         .debugfs_print_current_performance_level = &ci_dpm_debugfs_print_current_performance_level,
6633         .force_performance_level = &ci_dpm_force_performance_level,
6634         .vblank_too_short = &ci_dpm_vblank_too_short,
6635         .powergate_uvd = &ci_dpm_powergate_uvd,
6636         .set_fan_control_mode = &ci_dpm_set_fan_control_mode,
6637         .get_fan_control_mode = &ci_dpm_get_fan_control_mode,
6638         .set_fan_speed_percent = &ci_dpm_set_fan_speed_percent,
6639         .get_fan_speed_percent = &ci_dpm_get_fan_speed_percent,
6640         .print_clock_levels = ci_dpm_print_clock_levels,
6641         .force_clock_level = ci_dpm_force_clock_level,
6642         .get_sclk_od = ci_dpm_get_sclk_od,
6643         .set_sclk_od = ci_dpm_set_sclk_od,
6644         .get_mclk_od = ci_dpm_get_mclk_od,
6645         .set_mclk_od = ci_dpm_set_mclk_od,
6646 };
6647
6648 static void ci_dpm_set_dpm_funcs(struct amdgpu_device *adev)
6649 {
6650         if (adev->pm.funcs == NULL)
6651                 adev->pm.funcs = &ci_dpm_funcs;
6652 }
6653
6654 static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs = {
6655         .set = ci_dpm_set_interrupt_state,
6656         .process = ci_dpm_process_interrupt,
6657 };
6658
6659 static void ci_dpm_set_irq_funcs(struct amdgpu_device *adev)
6660 {
6661         adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
6662         adev->pm.dpm.thermal.irq.funcs = &ci_dpm_irq_funcs;
6663 }