2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
27 #include "smu8_fusion.h"
29 #include "cz_smumgr.h"
30 #include "smu_ucode_xfer_cz.h"
31 #include "amdgpu_ucode.h"
35 #include "smu/smu_8_0_d.h"
36 #include "smu/smu_8_0_sh_mask.h"
37 #include "gca/gfx_8_0_d.h"
38 #include "gca/gfx_8_0_sh_mask.h"
40 uint32_t cz_get_argument(struct amdgpu_device *adev)
42 return RREG32(mmSMU_MP1_SRBM2P_ARG_0);
45 static struct cz_smu_private_data *cz_smu_get_priv(struct amdgpu_device *adev)
47 struct cz_smu_private_data *priv =
48 (struct cz_smu_private_data *)(adev->smu.priv);
53 static int cz_send_msg_to_smc_async(struct amdgpu_device *adev, u16 msg)
58 for (i = 0; i < adev->usec_timeout; i++) {
59 tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
60 SMU_MP1_SRBM2P_RESP_0, CONTENT);
66 /* timeout means wrong logic*/
67 if (i == adev->usec_timeout)
70 WREG32(mmSMU_MP1_SRBM2P_RESP_0, 0);
71 WREG32(mmSMU_MP1_SRBM2P_MSG_0, msg);
76 int cz_send_msg_to_smc(struct amdgpu_device *adev, u16 msg)
79 u32 content = 0, tmp = 0;
81 if (cz_send_msg_to_smc_async(adev, msg))
84 for (i = 0; i < adev->usec_timeout; i++) {
85 tmp = REG_GET_FIELD(RREG32(mmSMU_MP1_SRBM2P_RESP_0),
86 SMU_MP1_SRBM2P_RESP_0, CONTENT);
92 /* timeout means wrong logic*/
93 if (i == adev->usec_timeout)
96 if (PPSMC_Result_OK != tmp) {
97 dev_err(adev->dev, "SMC Failed to send Message.\n");
104 int cz_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
105 u16 msg, u32 parameter)
107 WREG32(mmSMU_MP1_SRBM2P_ARG_0, parameter);
108 return cz_send_msg_to_smc(adev, msg);
111 static int cz_set_smc_sram_address(struct amdgpu_device *adev,
112 u32 smc_address, u32 limit)
116 if ((smc_address + 3) > limit)
119 WREG32(mmMP0PUB_IND_INDEX_0, SMN_MP1_SRAM_START_ADDR + smc_address);
124 int cz_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
125 u32 *value, u32 limit)
129 ret = cz_set_smc_sram_address(adev, smc_address, limit);
133 *value = RREG32(mmMP0PUB_IND_DATA_0);
138 static int cz_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address,
139 u32 value, u32 limit)
143 ret = cz_set_smc_sram_address(adev, smc_address, limit);
147 WREG32(mmMP0PUB_IND_DATA_0, value);
152 static int cz_smu_request_load_fw(struct amdgpu_device *adev)
154 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
156 uint32_t smc_addr = SMU8_FIRMWARE_HEADER_LOCATION +
157 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
159 cz_write_smc_sram_dword(adev, smc_addr, 0, smc_addr + 4);
161 /*prepare toc buffers*/
162 cz_send_msg_to_smc_with_parameter(adev,
163 PPSMC_MSG_DriverDramAddrHi,
164 priv->toc_buffer.mc_addr_high);
165 cz_send_msg_to_smc_with_parameter(adev,
166 PPSMC_MSG_DriverDramAddrLo,
167 priv->toc_buffer.mc_addr_low);
168 cz_send_msg_to_smc(adev, PPSMC_MSG_InitJobs);
171 cz_send_msg_to_smc_with_parameter(adev,
172 PPSMC_MSG_ExecuteJob,
173 priv->toc_entry_aram);
175 cz_send_msg_to_smc_with_parameter(adev,
176 PPSMC_MSG_ExecuteJob,
177 priv->toc_entry_power_profiling_index);
179 cz_send_msg_to_smc_with_parameter(adev,
180 PPSMC_MSG_ExecuteJob,
181 priv->toc_entry_initialize_index);
187 *Check if the FW has been loaded, SMU will not return if loading
190 static int cz_smu_check_fw_load_finish(struct amdgpu_device *adev,
194 uint32_t index = SMN_MP1_SRAM_START_ADDR +
195 SMU8_FIRMWARE_HEADER_LOCATION +
196 offsetof(struct SMU8_Firmware_Header, UcodeLoadStatus);
198 WREG32(mmMP0PUB_IND_INDEX, index);
200 for (i = 0; i < adev->usec_timeout; i++) {
201 if (fw_mask == (RREG32(mmMP0PUB_IND_DATA) & fw_mask))
206 if (i >= adev->usec_timeout) {
208 "SMU check loaded firmware failed, expecting 0x%x, getting 0x%x",
209 fw_mask, RREG32(mmMP0PUB_IND_DATA));
217 * interfaces for different ip blocks to check firmware loading status
218 * 0 for success otherwise failed
220 static int cz_smu_check_finished(struct amdgpu_device *adev,
221 enum AMDGPU_UCODE_ID id)
224 case AMDGPU_UCODE_ID_SDMA0:
225 if (adev->smu.fw_flags & AMDGPU_SDMA0_UCODE_LOADED)
228 case AMDGPU_UCODE_ID_SDMA1:
229 if (adev->smu.fw_flags & AMDGPU_SDMA1_UCODE_LOADED)
232 case AMDGPU_UCODE_ID_CP_CE:
233 if (adev->smu.fw_flags & AMDGPU_CPCE_UCODE_LOADED)
236 case AMDGPU_UCODE_ID_CP_PFP:
237 if (adev->smu.fw_flags & AMDGPU_CPPFP_UCODE_LOADED)
239 case AMDGPU_UCODE_ID_CP_ME:
240 if (adev->smu.fw_flags & AMDGPU_CPME_UCODE_LOADED)
243 case AMDGPU_UCODE_ID_CP_MEC1:
244 if (adev->smu.fw_flags & AMDGPU_CPMEC1_UCODE_LOADED)
247 case AMDGPU_UCODE_ID_CP_MEC2:
248 if (adev->smu.fw_flags & AMDGPU_CPMEC2_UCODE_LOADED)
251 case AMDGPU_UCODE_ID_RLC_G:
252 if (adev->smu.fw_flags & AMDGPU_CPRLC_UCODE_LOADED)
255 case AMDGPU_UCODE_ID_MAXIMUM:
263 static int cz_load_mec_firmware(struct amdgpu_device *adev)
265 struct amdgpu_firmware_info *ucode =
266 &adev->firmware.ucode[AMDGPU_UCODE_ID_CP_MEC1];
270 if (ucode->fw == NULL)
273 /* Disable MEC parsing/prefetching */
274 tmp = RREG32(mmCP_MEC_CNTL);
275 tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME1_HALT, 1);
276 tmp = REG_SET_FIELD(tmp, CP_MEC_CNTL, MEC_ME2_HALT, 1);
277 WREG32(mmCP_MEC_CNTL, tmp);
279 tmp = RREG32(mmCP_CPC_IC_BASE_CNTL);
280 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, VMID, 0);
281 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, ATC, 0);
282 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, CACHE_POLICY, 0);
283 tmp = REG_SET_FIELD(tmp, CP_CPC_IC_BASE_CNTL, MTYPE, 1);
284 WREG32(mmCP_CPC_IC_BASE_CNTL, tmp);
286 reg_data = lower_32_bits(ucode->mc_addr) &
287 REG_FIELD_MASK(CP_CPC_IC_BASE_LO, IC_BASE_LO);
288 WREG32(mmCP_CPC_IC_BASE_LO, reg_data);
290 reg_data = upper_32_bits(ucode->mc_addr) &
291 REG_FIELD_MASK(CP_CPC_IC_BASE_HI, IC_BASE_HI);
292 WREG32(mmCP_CPC_IC_BASE_HI, reg_data);
297 int cz_smu_start(struct amdgpu_device *adev)
301 uint32_t fw_to_check = UCODE_ID_RLC_G_MASK |
302 UCODE_ID_SDMA0_MASK |
303 UCODE_ID_SDMA1_MASK |
304 UCODE_ID_CP_CE_MASK |
305 UCODE_ID_CP_ME_MASK |
306 UCODE_ID_CP_PFP_MASK |
307 UCODE_ID_CP_MEC_JT1_MASK |
308 UCODE_ID_CP_MEC_JT2_MASK;
310 if (adev->asic_type == CHIP_STONEY)
311 fw_to_check &= ~(UCODE_ID_SDMA1_MASK | UCODE_ID_CP_MEC_JT2_MASK);
313 cz_smu_request_load_fw(adev);
314 ret = cz_smu_check_fw_load_finish(adev, fw_to_check);
318 /* manually load MEC firmware for CZ */
319 if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) {
320 ret = cz_load_mec_firmware(adev);
322 dev_err(adev->dev, "(%d) Mec Firmware load failed\n", ret);
327 /* setup fw load flag */
328 adev->smu.fw_flags = AMDGPU_SDMA0_UCODE_LOADED |
329 AMDGPU_SDMA1_UCODE_LOADED |
330 AMDGPU_CPCE_UCODE_LOADED |
331 AMDGPU_CPPFP_UCODE_LOADED |
332 AMDGPU_CPME_UCODE_LOADED |
333 AMDGPU_CPMEC1_UCODE_LOADED |
334 AMDGPU_CPMEC2_UCODE_LOADED |
335 AMDGPU_CPRLC_UCODE_LOADED;
337 if (adev->asic_type == CHIP_STONEY)
338 adev->smu.fw_flags &= ~(AMDGPU_SDMA1_UCODE_LOADED | AMDGPU_CPMEC2_UCODE_LOADED);
343 static uint32_t cz_convert_fw_type(uint32_t fw_type)
345 enum AMDGPU_UCODE_ID result = AMDGPU_UCODE_ID_MAXIMUM;
349 result = AMDGPU_UCODE_ID_SDMA0;
352 result = AMDGPU_UCODE_ID_SDMA1;
355 result = AMDGPU_UCODE_ID_CP_CE;
357 case UCODE_ID_CP_PFP:
358 result = AMDGPU_UCODE_ID_CP_PFP;
361 result = AMDGPU_UCODE_ID_CP_ME;
363 case UCODE_ID_CP_MEC_JT1:
364 case UCODE_ID_CP_MEC_JT2:
365 result = AMDGPU_UCODE_ID_CP_MEC1;
368 result = AMDGPU_UCODE_ID_RLC_G;
371 DRM_ERROR("UCode type is out of range!");
377 static uint8_t cz_smu_translate_firmware_enum_to_arg(
378 enum cz_scratch_entry firmware_enum)
382 switch (firmware_enum) {
383 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0:
384 ret = UCODE_ID_SDMA0;
386 case CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1:
387 ret = UCODE_ID_SDMA1;
389 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE:
390 ret = UCODE_ID_CP_CE;
392 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP:
393 ret = UCODE_ID_CP_PFP;
395 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME:
396 ret = UCODE_ID_CP_ME;
398 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1:
399 ret = UCODE_ID_CP_MEC_JT1;
401 case CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2:
402 ret = UCODE_ID_CP_MEC_JT2;
404 case CZ_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG:
405 ret = UCODE_ID_GMCON_RENG;
407 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G:
408 ret = UCODE_ID_RLC_G;
410 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH:
411 ret = UCODE_ID_RLC_SCRATCH;
413 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM:
414 ret = UCODE_ID_RLC_SRM_ARAM;
416 case CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM:
417 ret = UCODE_ID_RLC_SRM_DRAM;
419 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM:
420 ret = UCODE_ID_DMCU_ERAM;
422 case CZ_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM:
423 ret = UCODE_ID_DMCU_IRAM;
425 case CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING:
426 ret = TASK_ARG_INIT_MM_PWR_LOG;
428 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_HALT:
429 case CZ_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING:
430 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS:
431 case CZ_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT:
432 case CZ_SCRATCH_ENTRY_DATA_ID_SDMA_START:
433 case CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS:
434 ret = TASK_ARG_REG_MMIO;
436 case CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE:
437 ret = TASK_ARG_INIT_CLK_TABLE;
444 static int cz_smu_populate_single_firmware_entry(struct amdgpu_device *adev,
445 enum cz_scratch_entry firmware_enum,
446 struct cz_buffer_entry *entry)
450 uint8_t ucode_id = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
451 enum AMDGPU_UCODE_ID id = cz_convert_fw_type(ucode_id);
452 struct amdgpu_firmware_info *ucode = &adev->firmware.ucode[id];
453 const struct gfx_firmware_header_v1_0 *header;
455 if (ucode->fw == NULL)
458 gpu_addr = ucode->mc_addr;
459 header = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data;
460 data_size = le32_to_cpu(header->header.ucode_size_bytes);
462 if ((firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1) ||
463 (firmware_enum == CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2)) {
464 gpu_addr += le32_to_cpu(header->jt_offset) << 2;
465 data_size = le32_to_cpu(header->jt_size) << 2;
468 entry->mc_addr_low = lower_32_bits(gpu_addr);
469 entry->mc_addr_high = upper_32_bits(gpu_addr);
470 entry->data_size = data_size;
471 entry->firmware_ID = firmware_enum;
476 static int cz_smu_populate_single_scratch_entry(struct amdgpu_device *adev,
477 enum cz_scratch_entry scratch_type,
478 uint32_t size_in_byte,
479 struct cz_buffer_entry *entry)
481 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
482 uint64_t mc_addr = (((uint64_t) priv->smu_buffer.mc_addr_high) << 32) |
483 priv->smu_buffer.mc_addr_low;
484 mc_addr += size_in_byte;
486 priv->smu_buffer_used_bytes += size_in_byte;
487 entry->data_size = size_in_byte;
488 entry->kaddr = priv->smu_buffer.kaddr + priv->smu_buffer_used_bytes;
489 entry->mc_addr_low = lower_32_bits(mc_addr);
490 entry->mc_addr_high = upper_32_bits(mc_addr);
491 entry->firmware_ID = scratch_type;
496 static int cz_smu_populate_single_ucode_load_task(struct amdgpu_device *adev,
497 enum cz_scratch_entry firmware_enum,
501 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
502 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
503 struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
505 task->type = TASK_TYPE_UCODE_LOAD;
506 task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
507 task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
509 for (i = 0; i < priv->driver_buffer_length; i++)
510 if (priv->driver_buffer[i].firmware_ID == firmware_enum)
513 if (i >= priv->driver_buffer_length) {
514 dev_err(adev->dev, "Invalid Firmware Type\n");
518 task->addr.low = priv->driver_buffer[i].mc_addr_low;
519 task->addr.high = priv->driver_buffer[i].mc_addr_high;
520 task->size_bytes = priv->driver_buffer[i].data_size;
525 static int cz_smu_populate_single_scratch_task(struct amdgpu_device *adev,
526 enum cz_scratch_entry firmware_enum,
527 uint8_t type, bool is_last)
530 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
531 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
532 struct SMU_Task *task = &toc->tasks[priv->toc_entry_used_count++];
535 task->arg = cz_smu_translate_firmware_enum_to_arg(firmware_enum);
536 task->next = is_last ? END_OF_TASK_LIST : priv->toc_entry_used_count;
538 for (i = 0; i < priv->scratch_buffer_length; i++)
539 if (priv->scratch_buffer[i].firmware_ID == firmware_enum)
542 if (i >= priv->scratch_buffer_length) {
543 dev_err(adev->dev, "Invalid Firmware Type\n");
547 task->addr.low = priv->scratch_buffer[i].mc_addr_low;
548 task->addr.high = priv->scratch_buffer[i].mc_addr_high;
549 task->size_bytes = priv->scratch_buffer[i].data_size;
551 if (CZ_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS == firmware_enum) {
552 struct cz_ih_meta_data *pIHReg_restore =
553 (struct cz_ih_meta_data *)priv->scratch_buffer[i].kaddr;
554 pIHReg_restore->command =
555 METADATA_CMD_MODE0 | METADATA_PERFORM_ON_LOAD;
561 static int cz_smu_construct_toc_for_rlc_aram_save(struct amdgpu_device *adev)
563 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
564 priv->toc_entry_aram = priv->toc_entry_used_count;
565 cz_smu_populate_single_scratch_task(adev,
566 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
567 TASK_TYPE_UCODE_SAVE, true);
572 static int cz_smu_construct_toc_for_vddgfx_enter(struct amdgpu_device *adev)
574 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
575 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
577 toc->JobList[JOB_GFX_SAVE] = (uint8_t)priv->toc_entry_used_count;
578 cz_smu_populate_single_scratch_task(adev,
579 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
580 TASK_TYPE_UCODE_SAVE, false);
581 cz_smu_populate_single_scratch_task(adev,
582 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
583 TASK_TYPE_UCODE_SAVE, true);
588 static int cz_smu_construct_toc_for_vddgfx_exit(struct amdgpu_device *adev)
590 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
591 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
593 toc->JobList[JOB_GFX_RESTORE] = (uint8_t)priv->toc_entry_used_count;
596 if (adev->firmware.smu_load) {
597 cz_smu_populate_single_ucode_load_task(adev,
598 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
599 cz_smu_populate_single_ucode_load_task(adev,
600 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
601 cz_smu_populate_single_ucode_load_task(adev,
602 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
603 cz_smu_populate_single_ucode_load_task(adev,
604 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
605 if (adev->asic_type == CHIP_STONEY) {
606 cz_smu_populate_single_ucode_load_task(adev,
607 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
609 cz_smu_populate_single_ucode_load_task(adev,
610 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
612 cz_smu_populate_single_ucode_load_task(adev,
613 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, false);
616 /* populate scratch */
617 cz_smu_populate_single_scratch_task(adev,
618 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
619 TASK_TYPE_UCODE_LOAD, false);
620 cz_smu_populate_single_scratch_task(adev,
621 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
622 TASK_TYPE_UCODE_LOAD, false);
623 cz_smu_populate_single_scratch_task(adev,
624 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
625 TASK_TYPE_UCODE_LOAD, true);
630 static int cz_smu_construct_toc_for_power_profiling(struct amdgpu_device *adev)
632 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
634 priv->toc_entry_power_profiling_index = priv->toc_entry_used_count;
636 cz_smu_populate_single_scratch_task(adev,
637 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
638 TASK_TYPE_INITIALIZE, true);
642 static int cz_smu_construct_toc_for_bootup(struct amdgpu_device *adev)
644 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
646 priv->toc_entry_initialize_index = priv->toc_entry_used_count;
648 if (adev->firmware.smu_load) {
649 cz_smu_populate_single_ucode_load_task(adev,
650 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
651 if (adev->asic_type == CHIP_STONEY) {
652 cz_smu_populate_single_ucode_load_task(adev,
653 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0, false);
655 cz_smu_populate_single_ucode_load_task(adev,
656 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1, false);
658 cz_smu_populate_single_ucode_load_task(adev,
659 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE, false);
660 cz_smu_populate_single_ucode_load_task(adev,
661 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP, false);
662 cz_smu_populate_single_ucode_load_task(adev,
663 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME, false);
664 cz_smu_populate_single_ucode_load_task(adev,
665 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
666 if (adev->asic_type == CHIP_STONEY) {
667 cz_smu_populate_single_ucode_load_task(adev,
668 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, false);
670 cz_smu_populate_single_ucode_load_task(adev,
671 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, false);
673 cz_smu_populate_single_ucode_load_task(adev,
674 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G, true);
680 static int cz_smu_construct_toc_for_clock_table(struct amdgpu_device *adev)
682 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
684 priv->toc_entry_clock_table = priv->toc_entry_used_count;
686 cz_smu_populate_single_scratch_task(adev,
687 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
688 TASK_TYPE_INITIALIZE, true);
693 static int cz_smu_initialize_toc_empty_job_list(struct amdgpu_device *adev)
696 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
697 struct TOC *toc = (struct TOC *)priv->toc_buffer.kaddr;
699 for (i = 0; i < NUM_JOBLIST_ENTRIES; i++)
700 toc->JobList[i] = (uint8_t)IGNORE_JOB;
706 * cz smu uninitialization
708 int cz_smu_fini(struct amdgpu_device *adev)
710 amdgpu_bo_unref(&adev->smu.toc_buf);
711 amdgpu_bo_unref(&adev->smu.smu_buf);
712 kfree(adev->smu.priv);
713 adev->smu.priv = NULL;
714 if (adev->firmware.smu_load)
715 amdgpu_ucode_fini_bo(adev);
720 int cz_smu_download_pptable(struct amdgpu_device *adev, void **table)
723 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
725 for (i = 0; i < priv->scratch_buffer_length; i++)
726 if (priv->scratch_buffer[i].firmware_ID ==
727 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
730 if (i >= priv->scratch_buffer_length) {
731 dev_err(adev->dev, "Invalid Scratch Type\n");
735 *table = (struct SMU8_Fusion_ClkTable *)priv->scratch_buffer[i].kaddr;
737 /* prepare buffer for pptable */
738 cz_send_msg_to_smc_with_parameter(adev,
739 PPSMC_MSG_SetClkTableAddrHi,
740 priv->scratch_buffer[i].mc_addr_high);
741 cz_send_msg_to_smc_with_parameter(adev,
742 PPSMC_MSG_SetClkTableAddrLo,
743 priv->scratch_buffer[i].mc_addr_low);
744 cz_send_msg_to_smc_with_parameter(adev,
745 PPSMC_MSG_ExecuteJob,
746 priv->toc_entry_clock_table);
748 /* actual downloading */
749 cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToDram);
754 int cz_smu_upload_pptable(struct amdgpu_device *adev)
757 struct cz_smu_private_data *priv = cz_smu_get_priv(adev);
759 for (i = 0; i < priv->scratch_buffer_length; i++)
760 if (priv->scratch_buffer[i].firmware_ID ==
761 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE)
764 if (i >= priv->scratch_buffer_length) {
765 dev_err(adev->dev, "Invalid Scratch Type\n");
770 cz_send_msg_to_smc_with_parameter(adev,
771 PPSMC_MSG_SetClkTableAddrHi,
772 priv->scratch_buffer[i].mc_addr_high);
773 cz_send_msg_to_smc_with_parameter(adev,
774 PPSMC_MSG_SetClkTableAddrLo,
775 priv->scratch_buffer[i].mc_addr_low);
776 cz_send_msg_to_smc_with_parameter(adev,
777 PPSMC_MSG_ExecuteJob,
778 priv->toc_entry_clock_table);
780 /* actual uploading */
781 cz_send_msg_to_smc(adev, PPSMC_MSG_ClkTableXferToSmu);
787 * cz smumgr functions initialization
789 static const struct amdgpu_smumgr_funcs cz_smumgr_funcs = {
790 .check_fw_load_finish = cz_smu_check_finished,
791 .request_smu_load_fw = NULL,
792 .request_smu_specific_fw = NULL,
796 * cz smu initialization
798 int cz_smu_init(struct amdgpu_device *adev)
801 uint64_t mc_addr = 0;
802 struct amdgpu_bo **toc_buf = &adev->smu.toc_buf;
803 struct amdgpu_bo **smu_buf = &adev->smu.smu_buf;
804 void *toc_buf_ptr = NULL;
805 void *smu_buf_ptr = NULL;
807 struct cz_smu_private_data *priv =
808 kzalloc(sizeof(struct cz_smu_private_data), GFP_KERNEL);
812 /* allocate firmware buffers */
813 if (adev->firmware.smu_load)
814 amdgpu_ucode_init_bo(adev);
816 adev->smu.priv = priv;
817 adev->smu.fw_flags = 0;
818 priv->toc_buffer.data_size = 4096;
820 priv->smu_buffer.data_size =
821 ALIGN(UCODE_ID_RLC_SCRATCH_SIZE_BYTE, 32) +
822 ALIGN(UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE, 32) +
823 ALIGN(UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE, 32) +
824 ALIGN(sizeof(struct SMU8_MultimediaPowerLogData), 32) +
825 ALIGN(sizeof(struct SMU8_Fusion_ClkTable), 32);
827 /* prepare toc buffer and smu buffer:
828 * 1. create amdgpu_bo for toc buffer and smu buffer
830 * 3. map kernel virtual address
832 ret = amdgpu_bo_create(adev, priv->toc_buffer.data_size, PAGE_SIZE,
833 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
837 dev_err(adev->dev, "(%d) SMC TOC buffer allocation failed\n", ret);
841 ret = amdgpu_bo_create(adev, priv->smu_buffer.data_size, PAGE_SIZE,
842 true, AMDGPU_GEM_DOMAIN_GTT, 0, NULL, NULL,
846 dev_err(adev->dev, "(%d) SMC Internal buffer allocation failed\n", ret);
850 /* toc buffer reserve/pin/map */
851 ret = amdgpu_bo_reserve(adev->smu.toc_buf, false);
853 amdgpu_bo_unref(&adev->smu.toc_buf);
854 dev_err(adev->dev, "(%d) SMC TOC buffer reserve failed\n", ret);
858 ret = amdgpu_bo_pin(adev->smu.toc_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
860 amdgpu_bo_unreserve(adev->smu.toc_buf);
861 amdgpu_bo_unref(&adev->smu.toc_buf);
862 dev_err(adev->dev, "(%d) SMC TOC buffer pin failed\n", ret);
866 ret = amdgpu_bo_kmap(*toc_buf, &toc_buf_ptr);
868 goto smu_init_failed;
870 amdgpu_bo_unreserve(adev->smu.toc_buf);
872 priv->toc_buffer.mc_addr_low = lower_32_bits(mc_addr);
873 priv->toc_buffer.mc_addr_high = upper_32_bits(mc_addr);
874 priv->toc_buffer.kaddr = toc_buf_ptr;
876 /* smu buffer reserve/pin/map */
877 ret = amdgpu_bo_reserve(adev->smu.smu_buf, false);
879 amdgpu_bo_unref(&adev->smu.smu_buf);
880 dev_err(adev->dev, "(%d) SMC Internal buffer reserve failed\n", ret);
884 ret = amdgpu_bo_pin(adev->smu.smu_buf, AMDGPU_GEM_DOMAIN_GTT, &mc_addr);
886 amdgpu_bo_unreserve(adev->smu.smu_buf);
887 amdgpu_bo_unref(&adev->smu.smu_buf);
888 dev_err(adev->dev, "(%d) SMC Internal buffer pin failed\n", ret);
892 ret = amdgpu_bo_kmap(*smu_buf, &smu_buf_ptr);
894 goto smu_init_failed;
896 amdgpu_bo_unreserve(adev->smu.smu_buf);
898 priv->smu_buffer.mc_addr_low = lower_32_bits(mc_addr);
899 priv->smu_buffer.mc_addr_high = upper_32_bits(mc_addr);
900 priv->smu_buffer.kaddr = smu_buf_ptr;
902 if (adev->firmware.smu_load) {
903 if (cz_smu_populate_single_firmware_entry(adev,
904 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
905 &priv->driver_buffer[priv->driver_buffer_length++]))
906 goto smu_init_failed;
908 if (adev->asic_type == CHIP_STONEY) {
909 if (cz_smu_populate_single_firmware_entry(adev,
910 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA0,
911 &priv->driver_buffer[priv->driver_buffer_length++]))
912 goto smu_init_failed;
914 if (cz_smu_populate_single_firmware_entry(adev,
915 CZ_SCRATCH_ENTRY_UCODE_ID_SDMA1,
916 &priv->driver_buffer[priv->driver_buffer_length++]))
917 goto smu_init_failed;
919 if (cz_smu_populate_single_firmware_entry(adev,
920 CZ_SCRATCH_ENTRY_UCODE_ID_CP_CE,
921 &priv->driver_buffer[priv->driver_buffer_length++]))
922 goto smu_init_failed;
923 if (cz_smu_populate_single_firmware_entry(adev,
924 CZ_SCRATCH_ENTRY_UCODE_ID_CP_PFP,
925 &priv->driver_buffer[priv->driver_buffer_length++]))
926 goto smu_init_failed;
927 if (cz_smu_populate_single_firmware_entry(adev,
928 CZ_SCRATCH_ENTRY_UCODE_ID_CP_ME,
929 &priv->driver_buffer[priv->driver_buffer_length++]))
930 goto smu_init_failed;
931 if (cz_smu_populate_single_firmware_entry(adev,
932 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
933 &priv->driver_buffer[priv->driver_buffer_length++]))
934 goto smu_init_failed;
935 if (adev->asic_type == CHIP_STONEY) {
936 if (cz_smu_populate_single_firmware_entry(adev,
937 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1,
938 &priv->driver_buffer[priv->driver_buffer_length++]))
939 goto smu_init_failed;
941 if (cz_smu_populate_single_firmware_entry(adev,
942 CZ_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2,
943 &priv->driver_buffer[priv->driver_buffer_length++]))
944 goto smu_init_failed;
946 if (cz_smu_populate_single_firmware_entry(adev,
947 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_G,
948 &priv->driver_buffer[priv->driver_buffer_length++]))
949 goto smu_init_failed;
952 if (cz_smu_populate_single_scratch_entry(adev,
953 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH,
954 UCODE_ID_RLC_SCRATCH_SIZE_BYTE,
955 &priv->scratch_buffer[priv->scratch_buffer_length++]))
956 goto smu_init_failed;
957 if (cz_smu_populate_single_scratch_entry(adev,
958 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM,
959 UCODE_ID_RLC_SRM_ARAM_SIZE_BYTE,
960 &priv->scratch_buffer[priv->scratch_buffer_length++]))
961 goto smu_init_failed;
962 if (cz_smu_populate_single_scratch_entry(adev,
963 CZ_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM,
964 UCODE_ID_RLC_SRM_DRAM_SIZE_BYTE,
965 &priv->scratch_buffer[priv->scratch_buffer_length++]))
966 goto smu_init_failed;
967 if (cz_smu_populate_single_scratch_entry(adev,
968 CZ_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING,
969 sizeof(struct SMU8_MultimediaPowerLogData),
970 &priv->scratch_buffer[priv->scratch_buffer_length++]))
971 goto smu_init_failed;
972 if (cz_smu_populate_single_scratch_entry(adev,
973 CZ_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE,
974 sizeof(struct SMU8_Fusion_ClkTable),
975 &priv->scratch_buffer[priv->scratch_buffer_length++]))
976 goto smu_init_failed;
978 cz_smu_initialize_toc_empty_job_list(adev);
979 cz_smu_construct_toc_for_rlc_aram_save(adev);
980 cz_smu_construct_toc_for_vddgfx_enter(adev);
981 cz_smu_construct_toc_for_vddgfx_exit(adev);
982 cz_smu_construct_toc_for_power_profiling(adev);
983 cz_smu_construct_toc_for_bootup(adev);
984 cz_smu_construct_toc_for_clock_table(adev);
985 /* init the smumgr functions */
986 adev->smu.smumgr_funcs = &cz_smumgr_funcs;
991 amdgpu_bo_unref(toc_buf);
992 amdgpu_bo_unref(smu_buf);