1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2010,2015,2019 The Linux Foundation. All rights reserved.
3 * Copyright (C) 2015 Linaro Ltd.
5 #include <linux/platform_device.h>
6 #include <linux/init.h>
7 #include <linux/cpumask.h>
8 #include <linux/export.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/module.h>
11 #include <linux/types.h>
12 #include <linux/qcom_scm.h>
14 #include <linux/of_address.h>
15 #include <linux/of_platform.h>
16 #include <linux/clk.h>
17 #include <linux/reset-controller.h>
18 #include <linux/arm-smccc.h>
22 static bool download_mode = IS_ENABLED(CONFIG_QCOM_SCM_DOWNLOAD_MODE_DEFAULT);
23 module_param(download_mode, bool, 0);
25 #define SCM_HAS_CORE_CLK BIT(0)
26 #define SCM_HAS_IFACE_CLK BIT(1)
27 #define SCM_HAS_BUS_CLK BIT(2)
32 struct clk *iface_clk;
34 struct reset_controller_dev reset;
39 struct qcom_scm_current_perm_info {
47 struct qcom_scm_mem_map_info {
52 #define QCOM_SCM_FLAG_COLDBOOT_CPU0 0x00
53 #define QCOM_SCM_FLAG_COLDBOOT_CPU1 0x01
54 #define QCOM_SCM_FLAG_COLDBOOT_CPU2 0x08
55 #define QCOM_SCM_FLAG_COLDBOOT_CPU3 0x20
57 #define QCOM_SCM_FLAG_WARMBOOT_CPU0 0x04
58 #define QCOM_SCM_FLAG_WARMBOOT_CPU1 0x02
59 #define QCOM_SCM_FLAG_WARMBOOT_CPU2 0x10
60 #define QCOM_SCM_FLAG_WARMBOOT_CPU3 0x40
62 struct qcom_scm_wb_entry {
67 static struct qcom_scm_wb_entry qcom_scm_wb[] = {
68 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU0 },
69 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU1 },
70 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU2 },
71 { .flag = QCOM_SCM_FLAG_WARMBOOT_CPU3 },
74 static const char *qcom_scm_convention_names[] = {
75 [SMC_CONVENTION_UNKNOWN] = "unknown",
76 [SMC_CONVENTION_ARM_32] = "smc arm 32",
77 [SMC_CONVENTION_ARM_64] = "smc arm 64",
78 [SMC_CONVENTION_LEGACY] = "smc legacy",
81 static struct qcom_scm *__scm;
83 static int qcom_scm_clk_enable(void)
87 ret = clk_prepare_enable(__scm->core_clk);
91 ret = clk_prepare_enable(__scm->iface_clk);
95 ret = clk_prepare_enable(__scm->bus_clk);
102 clk_disable_unprepare(__scm->iface_clk);
104 clk_disable_unprepare(__scm->core_clk);
109 static void qcom_scm_clk_disable(void)
111 clk_disable_unprepare(__scm->core_clk);
112 clk_disable_unprepare(__scm->iface_clk);
113 clk_disable_unprepare(__scm->bus_clk);
116 enum qcom_scm_convention qcom_scm_convention = SMC_CONVENTION_UNKNOWN;
117 static DEFINE_SPINLOCK(scm_query_lock);
119 static enum qcom_scm_convention __get_convention(void)
122 struct qcom_scm_desc desc = {
123 .svc = QCOM_SCM_SVC_INFO,
124 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
125 .args[0] = SCM_SMC_FNID(QCOM_SCM_SVC_INFO,
126 QCOM_SCM_INFO_IS_CALL_AVAIL) |
127 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT),
128 .arginfo = QCOM_SCM_ARGS(1),
129 .owner = ARM_SMCCC_OWNER_SIP,
131 struct qcom_scm_res res;
132 enum qcom_scm_convention probed_convention;
136 if (likely(qcom_scm_convention != SMC_CONVENTION_UNKNOWN))
137 return qcom_scm_convention;
140 * Device isn't required as there is only one argument - no device
141 * needed to dma_map_single to secure world
143 probed_convention = SMC_CONVENTION_ARM_64;
144 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
145 if (!ret && res.result[0] == 1)
149 * Some SC7180 firmwares didn't implement the
150 * QCOM_SCM_INFO_IS_CALL_AVAIL call, so we fallback to forcing ARM_64
151 * calling conventions on these firmwares. Luckily we don't make any
152 * early calls into the firmware on these SoCs so the device pointer
153 * will be valid here to check if the compatible matches.
155 if (of_device_is_compatible(__scm ? __scm->dev->of_node : NULL, "qcom,scm-sc7180")) {
160 probed_convention = SMC_CONVENTION_ARM_32;
161 ret = __scm_smc_call(NULL, &desc, probed_convention, &res, true);
162 if (!ret && res.result[0] == 1)
165 probed_convention = SMC_CONVENTION_LEGACY;
167 spin_lock_irqsave(&scm_query_lock, flags);
168 if (probed_convention != qcom_scm_convention) {
169 qcom_scm_convention = probed_convention;
170 pr_info("qcom_scm: convention: %s%s\n",
171 qcom_scm_convention_names[qcom_scm_convention],
172 forced ? " (forced)" : "");
174 spin_unlock_irqrestore(&scm_query_lock, flags);
176 return qcom_scm_convention;
180 * qcom_scm_call() - Invoke a syscall in the secure world
182 * @svc_id: service identifier
183 * @cmd_id: command identifier
184 * @desc: Descriptor structure containing arguments and return values
186 * Sends a command to the SCM and waits for the command to finish processing.
187 * This should *only* be called in pre-emptible context.
189 static int qcom_scm_call(struct device *dev, const struct qcom_scm_desc *desc,
190 struct qcom_scm_res *res)
193 switch (__get_convention()) {
194 case SMC_CONVENTION_ARM_32:
195 case SMC_CONVENTION_ARM_64:
196 return scm_smc_call(dev, desc, res, false);
197 case SMC_CONVENTION_LEGACY:
198 return scm_legacy_call(dev, desc, res);
200 pr_err("Unknown current SCM calling convention.\n");
206 * qcom_scm_call_atomic() - atomic variation of qcom_scm_call()
208 * @svc_id: service identifier
209 * @cmd_id: command identifier
210 * @desc: Descriptor structure containing arguments and return values
211 * @res: Structure containing results from SMC/HVC call
213 * Sends a command to the SCM and waits for the command to finish processing.
214 * This can be called in atomic context.
216 static int qcom_scm_call_atomic(struct device *dev,
217 const struct qcom_scm_desc *desc,
218 struct qcom_scm_res *res)
220 switch (__get_convention()) {
221 case SMC_CONVENTION_ARM_32:
222 case SMC_CONVENTION_ARM_64:
223 return scm_smc_call(dev, desc, res, true);
224 case SMC_CONVENTION_LEGACY:
225 return scm_legacy_call_atomic(dev, desc, res);
227 pr_err("Unknown current SCM calling convention.\n");
232 static bool __qcom_scm_is_call_available(struct device *dev, u32 svc_id,
236 struct qcom_scm_desc desc = {
237 .svc = QCOM_SCM_SVC_INFO,
238 .cmd = QCOM_SCM_INFO_IS_CALL_AVAIL,
239 .owner = ARM_SMCCC_OWNER_SIP,
241 struct qcom_scm_res res;
243 desc.arginfo = QCOM_SCM_ARGS(1);
244 switch (__get_convention()) {
245 case SMC_CONVENTION_ARM_32:
246 case SMC_CONVENTION_ARM_64:
247 desc.args[0] = SCM_SMC_FNID(svc_id, cmd_id) |
248 (ARM_SMCCC_OWNER_SIP << ARM_SMCCC_OWNER_SHIFT);
250 case SMC_CONVENTION_LEGACY:
251 desc.args[0] = SCM_LEGACY_FNID(svc_id, cmd_id);
254 pr_err("Unknown SMC convention being used\n");
258 ret = qcom_scm_call(dev, &desc, &res);
260 return ret ? false : !!res.result[0];
264 * qcom_scm_set_warm_boot_addr() - Set the warm boot address for cpus
265 * @entry: Entry point function for the cpus
266 * @cpus: The cpumask of cpus that will use the entry point
268 * Set the Linux entry point for the SCM to transfer control to when coming
269 * out of a power down. CPU power down may be executed on cpuidle or hotplug.
271 int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
276 struct qcom_scm_desc desc = {
277 .svc = QCOM_SCM_SVC_BOOT,
278 .cmd = QCOM_SCM_BOOT_SET_ADDR,
279 .arginfo = QCOM_SCM_ARGS(2),
283 * Reassign only if we are switching from hotplug entry point
284 * to cpuidle entry point or vice versa.
286 for_each_cpu(cpu, cpus) {
287 if (entry == qcom_scm_wb[cpu].entry)
289 flags |= qcom_scm_wb[cpu].flag;
292 /* No change in entry function */
296 desc.args[0] = flags;
297 desc.args[1] = virt_to_phys(entry);
299 ret = qcom_scm_call(__scm->dev, &desc, NULL);
301 for_each_cpu(cpu, cpus)
302 qcom_scm_wb[cpu].entry = entry;
307 EXPORT_SYMBOL(qcom_scm_set_warm_boot_addr);
310 * qcom_scm_set_cold_boot_addr() - Set the cold boot address for cpus
311 * @entry: Entry point function for the cpus
312 * @cpus: The cpumask of cpus that will use the entry point
314 * Set the cold boot address of the cpus. Any cpu outside the supported
315 * range would be removed from the cpu present mask.
317 int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
321 int scm_cb_flags[] = {
322 QCOM_SCM_FLAG_COLDBOOT_CPU0,
323 QCOM_SCM_FLAG_COLDBOOT_CPU1,
324 QCOM_SCM_FLAG_COLDBOOT_CPU2,
325 QCOM_SCM_FLAG_COLDBOOT_CPU3,
327 struct qcom_scm_desc desc = {
328 .svc = QCOM_SCM_SVC_BOOT,
329 .cmd = QCOM_SCM_BOOT_SET_ADDR,
330 .arginfo = QCOM_SCM_ARGS(2),
331 .owner = ARM_SMCCC_OWNER_SIP,
334 if (!cpus || (cpus && cpumask_empty(cpus)))
337 for_each_cpu(cpu, cpus) {
338 if (cpu < ARRAY_SIZE(scm_cb_flags))
339 flags |= scm_cb_flags[cpu];
341 set_cpu_present(cpu, false);
344 desc.args[0] = flags;
345 desc.args[1] = virt_to_phys(entry);
347 return qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
349 EXPORT_SYMBOL(qcom_scm_set_cold_boot_addr);
352 * qcom_scm_cpu_power_down() - Power down the cpu
353 * @flags - Flags to flush cache
355 * This is an end point to power down cpu. If there was a pending interrupt,
356 * the control would return from this function, otherwise, the cpu jumps to the
357 * warm boot entry point set for this cpu upon reset.
359 void qcom_scm_cpu_power_down(u32 flags)
361 struct qcom_scm_desc desc = {
362 .svc = QCOM_SCM_SVC_BOOT,
363 .cmd = QCOM_SCM_BOOT_TERMINATE_PC,
364 .args[0] = flags & QCOM_SCM_FLUSH_FLAG_MASK,
365 .arginfo = QCOM_SCM_ARGS(1),
366 .owner = ARM_SMCCC_OWNER_SIP,
369 qcom_scm_call_atomic(__scm ? __scm->dev : NULL, &desc, NULL);
371 EXPORT_SYMBOL(qcom_scm_cpu_power_down);
373 int qcom_scm_set_remote_state(u32 state, u32 id)
375 struct qcom_scm_desc desc = {
376 .svc = QCOM_SCM_SVC_BOOT,
377 .cmd = QCOM_SCM_BOOT_SET_REMOTE_STATE,
378 .arginfo = QCOM_SCM_ARGS(2),
381 .owner = ARM_SMCCC_OWNER_SIP,
383 struct qcom_scm_res res;
386 ret = qcom_scm_call(__scm->dev, &desc, &res);
388 return ret ? : res.result[0];
390 EXPORT_SYMBOL(qcom_scm_set_remote_state);
392 static int __qcom_scm_set_dload_mode(struct device *dev, bool enable)
394 struct qcom_scm_desc desc = {
395 .svc = QCOM_SCM_SVC_BOOT,
396 .cmd = QCOM_SCM_BOOT_SET_DLOAD_MODE,
397 .arginfo = QCOM_SCM_ARGS(2),
398 .args[0] = QCOM_SCM_BOOT_SET_DLOAD_MODE,
399 .owner = ARM_SMCCC_OWNER_SIP,
402 desc.args[1] = enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0;
404 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
407 static void qcom_scm_set_download_mode(bool enable)
412 avail = __qcom_scm_is_call_available(__scm->dev,
414 QCOM_SCM_BOOT_SET_DLOAD_MODE);
416 ret = __qcom_scm_set_dload_mode(__scm->dev, enable);
417 } else if (__scm->dload_mode_addr) {
418 ret = qcom_scm_io_writel(__scm->dload_mode_addr,
419 enable ? QCOM_SCM_BOOT_SET_DLOAD_MODE : 0);
422 "No available mechanism for setting download mode\n");
426 dev_err(__scm->dev, "failed to set download mode: %d\n", ret);
430 * qcom_scm_pas_init_image() - Initialize peripheral authentication service
431 * state machine for a given peripheral, using the
433 * @peripheral: peripheral id
434 * @metadata: pointer to memory containing ELF header, program header table
435 * and optional blob of data used for authenticating the metadata
436 * and the rest of the firmware
437 * @size: size of the metadata
439 * Returns 0 on success.
441 int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size)
443 dma_addr_t mdata_phys;
446 struct qcom_scm_desc desc = {
447 .svc = QCOM_SCM_SVC_PIL,
448 .cmd = QCOM_SCM_PIL_PAS_INIT_IMAGE,
449 .arginfo = QCOM_SCM_ARGS(2, QCOM_SCM_VAL, QCOM_SCM_RW),
450 .args[0] = peripheral,
451 .owner = ARM_SMCCC_OWNER_SIP,
453 struct qcom_scm_res res;
456 * During the scm call memory protection will be enabled for the meta
457 * data blob, so make sure it's physically contiguous, 4K aligned and
458 * non-cachable to avoid XPU violations.
460 mdata_buf = dma_alloc_coherent(__scm->dev, size, &mdata_phys,
463 dev_err(__scm->dev, "Allocation of metadata buffer failed.\n");
466 memcpy(mdata_buf, metadata, size);
468 ret = qcom_scm_clk_enable();
472 desc.args[1] = mdata_phys;
474 ret = qcom_scm_call(__scm->dev, &desc, &res);
476 qcom_scm_clk_disable();
479 dma_free_coherent(__scm->dev, size, mdata_buf, mdata_phys);
481 return ret ? : res.result[0];
483 EXPORT_SYMBOL(qcom_scm_pas_init_image);
486 * qcom_scm_pas_mem_setup() - Prepare the memory related to a given peripheral
487 * for firmware loading
488 * @peripheral: peripheral id
489 * @addr: start address of memory area to prepare
490 * @size: size of the memory area to prepare
492 * Returns 0 on success.
494 int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size)
497 struct qcom_scm_desc desc = {
498 .svc = QCOM_SCM_SVC_PIL,
499 .cmd = QCOM_SCM_PIL_PAS_MEM_SETUP,
500 .arginfo = QCOM_SCM_ARGS(3),
501 .args[0] = peripheral,
504 .owner = ARM_SMCCC_OWNER_SIP,
506 struct qcom_scm_res res;
508 ret = qcom_scm_clk_enable();
512 ret = qcom_scm_call(__scm->dev, &desc, &res);
513 qcom_scm_clk_disable();
515 return ret ? : res.result[0];
517 EXPORT_SYMBOL(qcom_scm_pas_mem_setup);
520 * qcom_scm_pas_auth_and_reset() - Authenticate the given peripheral firmware
521 * and reset the remote processor
522 * @peripheral: peripheral id
524 * Return 0 on success.
526 int qcom_scm_pas_auth_and_reset(u32 peripheral)
529 struct qcom_scm_desc desc = {
530 .svc = QCOM_SCM_SVC_PIL,
531 .cmd = QCOM_SCM_PIL_PAS_AUTH_AND_RESET,
532 .arginfo = QCOM_SCM_ARGS(1),
533 .args[0] = peripheral,
534 .owner = ARM_SMCCC_OWNER_SIP,
536 struct qcom_scm_res res;
538 ret = qcom_scm_clk_enable();
542 ret = qcom_scm_call(__scm->dev, &desc, &res);
543 qcom_scm_clk_disable();
545 return ret ? : res.result[0];
547 EXPORT_SYMBOL(qcom_scm_pas_auth_and_reset);
550 * qcom_scm_pas_shutdown() - Shut down the remote processor
551 * @peripheral: peripheral id
553 * Returns 0 on success.
555 int qcom_scm_pas_shutdown(u32 peripheral)
558 struct qcom_scm_desc desc = {
559 .svc = QCOM_SCM_SVC_PIL,
560 .cmd = QCOM_SCM_PIL_PAS_SHUTDOWN,
561 .arginfo = QCOM_SCM_ARGS(1),
562 .args[0] = peripheral,
563 .owner = ARM_SMCCC_OWNER_SIP,
565 struct qcom_scm_res res;
567 ret = qcom_scm_clk_enable();
571 ret = qcom_scm_call(__scm->dev, &desc, &res);
573 qcom_scm_clk_disable();
575 return ret ? : res.result[0];
577 EXPORT_SYMBOL(qcom_scm_pas_shutdown);
580 * qcom_scm_pas_supported() - Check if the peripheral authentication service is
581 * available for the given peripherial
582 * @peripheral: peripheral id
584 * Returns true if PAS is supported for this peripheral, otherwise false.
586 bool qcom_scm_pas_supported(u32 peripheral)
589 struct qcom_scm_desc desc = {
590 .svc = QCOM_SCM_SVC_PIL,
591 .cmd = QCOM_SCM_PIL_PAS_IS_SUPPORTED,
592 .arginfo = QCOM_SCM_ARGS(1),
593 .args[0] = peripheral,
594 .owner = ARM_SMCCC_OWNER_SIP,
596 struct qcom_scm_res res;
598 if (!__qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_PIL,
599 QCOM_SCM_PIL_PAS_IS_SUPPORTED))
602 ret = qcom_scm_call(__scm->dev, &desc, &res);
604 return ret ? false : !!res.result[0];
606 EXPORT_SYMBOL(qcom_scm_pas_supported);
608 static int __qcom_scm_pas_mss_reset(struct device *dev, bool reset)
610 struct qcom_scm_desc desc = {
611 .svc = QCOM_SCM_SVC_PIL,
612 .cmd = QCOM_SCM_PIL_PAS_MSS_RESET,
613 .arginfo = QCOM_SCM_ARGS(2),
616 .owner = ARM_SMCCC_OWNER_SIP,
618 struct qcom_scm_res res;
621 ret = qcom_scm_call(__scm->dev, &desc, &res);
623 return ret ? : res.result[0];
626 static int qcom_scm_pas_reset_assert(struct reset_controller_dev *rcdev,
632 return __qcom_scm_pas_mss_reset(__scm->dev, 1);
635 static int qcom_scm_pas_reset_deassert(struct reset_controller_dev *rcdev,
641 return __qcom_scm_pas_mss_reset(__scm->dev, 0);
644 static const struct reset_control_ops qcom_scm_pas_reset_ops = {
645 .assert = qcom_scm_pas_reset_assert,
646 .deassert = qcom_scm_pas_reset_deassert,
649 int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
651 struct qcom_scm_desc desc = {
652 .svc = QCOM_SCM_SVC_IO,
653 .cmd = QCOM_SCM_IO_READ,
654 .arginfo = QCOM_SCM_ARGS(1),
656 .owner = ARM_SMCCC_OWNER_SIP,
658 struct qcom_scm_res res;
662 ret = qcom_scm_call_atomic(__scm->dev, &desc, &res);
664 *val = res.result[0];
666 return ret < 0 ? ret : 0;
668 EXPORT_SYMBOL(qcom_scm_io_readl);
670 int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
672 struct qcom_scm_desc desc = {
673 .svc = QCOM_SCM_SVC_IO,
674 .cmd = QCOM_SCM_IO_WRITE,
675 .arginfo = QCOM_SCM_ARGS(2),
678 .owner = ARM_SMCCC_OWNER_SIP,
681 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
683 EXPORT_SYMBOL(qcom_scm_io_writel);
686 * qcom_scm_restore_sec_cfg_available() - Check if secure environment
687 * supports restore security config interface.
689 * Return true if restore-cfg interface is supported, false if not.
691 bool qcom_scm_restore_sec_cfg_available(void)
693 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_MP,
694 QCOM_SCM_MP_RESTORE_SEC_CFG);
696 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg_available);
698 int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
700 struct qcom_scm_desc desc = {
701 .svc = QCOM_SCM_SVC_MP,
702 .cmd = QCOM_SCM_MP_RESTORE_SEC_CFG,
703 .arginfo = QCOM_SCM_ARGS(2),
704 .args[0] = device_id,
706 .owner = ARM_SMCCC_OWNER_SIP,
708 struct qcom_scm_res res;
711 ret = qcom_scm_call(__scm->dev, &desc, &res);
713 return ret ? : res.result[0];
715 EXPORT_SYMBOL(qcom_scm_restore_sec_cfg);
717 int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
719 struct qcom_scm_desc desc = {
720 .svc = QCOM_SCM_SVC_MP,
721 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_SIZE,
722 .arginfo = QCOM_SCM_ARGS(1),
724 .owner = ARM_SMCCC_OWNER_SIP,
726 struct qcom_scm_res res;
729 ret = qcom_scm_call(__scm->dev, &desc, &res);
732 *size = res.result[0];
734 return ret ? : res.result[1];
736 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_size);
738 int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
740 struct qcom_scm_desc desc = {
741 .svc = QCOM_SCM_SVC_MP,
742 .cmd = QCOM_SCM_MP_IOMMU_SECURE_PTBL_INIT,
743 .arginfo = QCOM_SCM_ARGS(3, QCOM_SCM_RW, QCOM_SCM_VAL,
748 .owner = ARM_SMCCC_OWNER_SIP,
752 ret = qcom_scm_call(__scm->dev, &desc, NULL);
754 /* the pg table has been initialized already, ignore the error */
760 EXPORT_SYMBOL(qcom_scm_iommu_secure_ptbl_init);
762 int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
763 u32 cp_nonpixel_start,
764 u32 cp_nonpixel_size)
767 struct qcom_scm_desc desc = {
768 .svc = QCOM_SCM_SVC_MP,
769 .cmd = QCOM_SCM_MP_VIDEO_VAR,
770 .arginfo = QCOM_SCM_ARGS(4, QCOM_SCM_VAL, QCOM_SCM_VAL,
771 QCOM_SCM_VAL, QCOM_SCM_VAL),
774 .args[2] = cp_nonpixel_start,
775 .args[3] = cp_nonpixel_size,
776 .owner = ARM_SMCCC_OWNER_SIP,
778 struct qcom_scm_res res;
780 ret = qcom_scm_call(__scm->dev, &desc, &res);
782 return ret ? : res.result[0];
784 EXPORT_SYMBOL(qcom_scm_mem_protect_video_var);
786 static int __qcom_scm_assign_mem(struct device *dev, phys_addr_t mem_region,
787 size_t mem_sz, phys_addr_t src, size_t src_sz,
788 phys_addr_t dest, size_t dest_sz)
791 struct qcom_scm_desc desc = {
792 .svc = QCOM_SCM_SVC_MP,
793 .cmd = QCOM_SCM_MP_ASSIGN,
794 .arginfo = QCOM_SCM_ARGS(7, QCOM_SCM_RO, QCOM_SCM_VAL,
795 QCOM_SCM_RO, QCOM_SCM_VAL, QCOM_SCM_RO,
796 QCOM_SCM_VAL, QCOM_SCM_VAL),
797 .args[0] = mem_region,
804 .owner = ARM_SMCCC_OWNER_SIP,
806 struct qcom_scm_res res;
808 ret = qcom_scm_call(dev, &desc, &res);
810 return ret ? : res.result[0];
814 * qcom_scm_assign_mem() - Make a secure call to reassign memory ownership
815 * @mem_addr: mem region whose ownership need to be reassigned
816 * @mem_sz: size of the region.
817 * @srcvm: vmid for current set of owners, each set bit in
818 * flag indicate a unique owner
819 * @newvm: array having new owners and corresponding permission
821 * @dest_cnt: number of owners in next set.
823 * Return negative errno on failure or 0 on success with @srcvm updated.
825 int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
827 const struct qcom_scm_vmperm *newvm,
828 unsigned int dest_cnt)
830 struct qcom_scm_current_perm_info *destvm;
831 struct qcom_scm_mem_map_info *mem_to_map;
832 phys_addr_t mem_to_map_phys;
833 phys_addr_t dest_phys;
835 size_t mem_to_map_sz;
843 unsigned long srcvm_bits = *srcvm;
845 src_sz = hweight_long(srcvm_bits) * sizeof(*src);
846 mem_to_map_sz = sizeof(*mem_to_map);
847 dest_sz = dest_cnt * sizeof(*destvm);
848 ptr_sz = ALIGN(src_sz, SZ_64) + ALIGN(mem_to_map_sz, SZ_64) +
849 ALIGN(dest_sz, SZ_64);
851 ptr = dma_alloc_coherent(__scm->dev, ptr_sz, &ptr_phys, GFP_KERNEL);
855 /* Fill source vmid detail */
858 for_each_set_bit(b, &srcvm_bits, BITS_PER_LONG)
859 src[i++] = cpu_to_le32(b);
861 /* Fill details of mem buff to map */
862 mem_to_map = ptr + ALIGN(src_sz, SZ_64);
863 mem_to_map_phys = ptr_phys + ALIGN(src_sz, SZ_64);
864 mem_to_map->mem_addr = cpu_to_le64(mem_addr);
865 mem_to_map->mem_size = cpu_to_le64(mem_sz);
868 /* Fill details of next vmid detail */
869 destvm = ptr + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
870 dest_phys = ptr_phys + ALIGN(mem_to_map_sz, SZ_64) + ALIGN(src_sz, SZ_64);
871 for (i = 0; i < dest_cnt; i++, destvm++, newvm++) {
872 destvm->vmid = cpu_to_le32(newvm->vmid);
873 destvm->perm = cpu_to_le32(newvm->perm);
875 destvm->ctx_size = 0;
876 next_vm |= BIT(newvm->vmid);
879 ret = __qcom_scm_assign_mem(__scm->dev, mem_to_map_phys, mem_to_map_sz,
880 ptr_phys, src_sz, dest_phys, dest_sz);
881 dma_free_coherent(__scm->dev, ptr_sz, ptr, ptr_phys);
884 "Assign memory protection call failed %d\n", ret);
891 EXPORT_SYMBOL(qcom_scm_assign_mem);
894 * qcom_scm_ocmem_lock_available() - is OCMEM lock/unlock interface available
896 bool qcom_scm_ocmem_lock_available(void)
898 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_OCMEM,
899 QCOM_SCM_OCMEM_LOCK_CMD);
901 EXPORT_SYMBOL(qcom_scm_ocmem_lock_available);
904 * qcom_scm_ocmem_lock() - call OCMEM lock interface to assign an OCMEM
905 * region to the specified initiator
907 * @id: tz initiator id
908 * @offset: OCMEM offset
910 * @mode: access mode (WIDE/NARROW)
912 int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
915 struct qcom_scm_desc desc = {
916 .svc = QCOM_SCM_SVC_OCMEM,
917 .cmd = QCOM_SCM_OCMEM_LOCK_CMD,
922 .arginfo = QCOM_SCM_ARGS(4),
925 return qcom_scm_call(__scm->dev, &desc, NULL);
927 EXPORT_SYMBOL(qcom_scm_ocmem_lock);
930 * qcom_scm_ocmem_unlock() - call OCMEM unlock interface to release an OCMEM
931 * region from the specified initiator
933 * @id: tz initiator id
934 * @offset: OCMEM offset
937 int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size)
939 struct qcom_scm_desc desc = {
940 .svc = QCOM_SCM_SVC_OCMEM,
941 .cmd = QCOM_SCM_OCMEM_UNLOCK_CMD,
945 .arginfo = QCOM_SCM_ARGS(3),
948 return qcom_scm_call(__scm->dev, &desc, NULL);
950 EXPORT_SYMBOL(qcom_scm_ocmem_unlock);
953 * qcom_scm_ice_available() - Is the ICE key programming interface available?
955 * Return: true iff the SCM calls wrapped by qcom_scm_ice_invalidate_key() and
956 * qcom_scm_ice_set_key() are available.
958 bool qcom_scm_ice_available(void)
960 return __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
961 QCOM_SCM_ES_INVALIDATE_ICE_KEY) &&
962 __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_ES,
963 QCOM_SCM_ES_CONFIG_SET_ICE_KEY);
965 EXPORT_SYMBOL(qcom_scm_ice_available);
968 * qcom_scm_ice_invalidate_key() - Invalidate an inline encryption key
969 * @index: the keyslot to invalidate
971 * The UFSHCI standard defines a standard way to do this, but it doesn't work on
972 * these SoCs; only this SCM call does.
974 * Return: 0 on success; -errno on failure.
976 int qcom_scm_ice_invalidate_key(u32 index)
978 struct qcom_scm_desc desc = {
979 .svc = QCOM_SCM_SVC_ES,
980 .cmd = QCOM_SCM_ES_INVALIDATE_ICE_KEY,
981 .arginfo = QCOM_SCM_ARGS(1),
983 .owner = ARM_SMCCC_OWNER_SIP,
986 return qcom_scm_call(__scm->dev, &desc, NULL);
988 EXPORT_SYMBOL(qcom_scm_ice_invalidate_key);
991 * qcom_scm_ice_set_key() - Set an inline encryption key
992 * @index: the keyslot into which to set the key
993 * @key: the key to program
994 * @key_size: the size of the key in bytes
995 * @cipher: the encryption algorithm the key is for
996 * @data_unit_size: the encryption data unit size, i.e. the size of each
997 * individual plaintext and ciphertext. Given in 512-byte
998 * units, e.g. 1 = 512 bytes, 8 = 4096 bytes, etc.
1000 * Program a key into a keyslot of Qualcomm ICE (Inline Crypto Engine), where it
1001 * can then be used to encrypt/decrypt UFS I/O requests inline.
1003 * The UFSHCI standard defines a standard way to do this, but it doesn't work on
1004 * these SoCs; only this SCM call does.
1006 * Return: 0 on success; -errno on failure.
1008 int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
1009 enum qcom_scm_ice_cipher cipher, u32 data_unit_size)
1011 struct qcom_scm_desc desc = {
1012 .svc = QCOM_SCM_SVC_ES,
1013 .cmd = QCOM_SCM_ES_CONFIG_SET_ICE_KEY,
1014 .arginfo = QCOM_SCM_ARGS(5, QCOM_SCM_VAL, QCOM_SCM_RW,
1015 QCOM_SCM_VAL, QCOM_SCM_VAL,
1018 .args[2] = key_size,
1020 .args[4] = data_unit_size,
1021 .owner = ARM_SMCCC_OWNER_SIP,
1024 dma_addr_t key_phys;
1028 * 'key' may point to vmalloc()'ed memory, but we need to pass a
1029 * physical address that's been properly flushed. The sanctioned way to
1030 * do this is by using the DMA API. But as is best practice for crypto
1031 * keys, we also must wipe the key after use. This makes kmemdup() +
1032 * dma_map_single() not clearly correct, since the DMA API can use
1033 * bounce buffers. Instead, just use dma_alloc_coherent(). Programming
1034 * keys is normally rare and thus not performance-critical.
1037 keybuf = dma_alloc_coherent(__scm->dev, key_size, &key_phys,
1041 memcpy(keybuf, key, key_size);
1042 desc.args[1] = key_phys;
1044 ret = qcom_scm_call(__scm->dev, &desc, NULL);
1046 memzero_explicit(keybuf, key_size);
1048 dma_free_coherent(__scm->dev, key_size, keybuf, key_phys);
1051 EXPORT_SYMBOL(qcom_scm_ice_set_key);
1054 * qcom_scm_hdcp_available() - Check if secure environment supports HDCP.
1056 * Return true if HDCP is supported, false if not.
1058 bool qcom_scm_hdcp_available(void)
1061 int ret = qcom_scm_clk_enable();
1066 avail = __qcom_scm_is_call_available(__scm->dev, QCOM_SCM_SVC_HDCP,
1067 QCOM_SCM_HDCP_INVOKE);
1069 qcom_scm_clk_disable();
1073 EXPORT_SYMBOL(qcom_scm_hdcp_available);
1076 * qcom_scm_hdcp_req() - Send HDCP request.
1077 * @req: HDCP request array
1078 * @req_cnt: HDCP request array count
1079 * @resp: response buffer passed to SCM
1081 * Write HDCP register(s) through SCM.
1083 int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp)
1086 struct qcom_scm_desc desc = {
1087 .svc = QCOM_SCM_SVC_HDCP,
1088 .cmd = QCOM_SCM_HDCP_INVOKE,
1089 .arginfo = QCOM_SCM_ARGS(10),
1102 .owner = ARM_SMCCC_OWNER_SIP,
1104 struct qcom_scm_res res;
1106 if (req_cnt > QCOM_SCM_HDCP_MAX_REQ_CNT)
1109 ret = qcom_scm_clk_enable();
1113 ret = qcom_scm_call(__scm->dev, &desc, &res);
1114 *resp = res.result[0];
1116 qcom_scm_clk_disable();
1120 EXPORT_SYMBOL(qcom_scm_hdcp_req);
1122 int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
1124 struct qcom_scm_desc desc = {
1125 .svc = QCOM_SCM_SVC_SMMU_PROGRAM,
1126 .cmd = QCOM_SCM_SMMU_CONFIG_ERRATA1,
1127 .arginfo = QCOM_SCM_ARGS(2),
1128 .args[0] = QCOM_SCM_SMMU_CONFIG_ERRATA1_CLIENT_ALL,
1130 .owner = ARM_SMCCC_OWNER_SIP,
1134 return qcom_scm_call_atomic(__scm->dev, &desc, NULL);
1136 EXPORT_SYMBOL(qcom_scm_qsmmu500_wait_safe_toggle);
1138 static int qcom_scm_find_dload_address(struct device *dev, u64 *addr)
1140 struct device_node *tcsr;
1141 struct device_node *np = dev->of_node;
1142 struct resource res;
1146 tcsr = of_parse_phandle(np, "qcom,dload-mode", 0);
1150 ret = of_address_to_resource(tcsr, 0, &res);
1155 ret = of_property_read_u32_index(np, "qcom,dload-mode", 1, &offset);
1159 *addr = res.start + offset;
1165 * qcom_scm_is_available() - Checks if SCM is available
1167 bool qcom_scm_is_available(void)
1171 EXPORT_SYMBOL(qcom_scm_is_available);
1173 static int qcom_scm_probe(struct platform_device *pdev)
1175 struct qcom_scm *scm;
1179 scm = devm_kzalloc(&pdev->dev, sizeof(*scm), GFP_KERNEL);
1183 ret = qcom_scm_find_dload_address(&pdev->dev, &scm->dload_mode_addr);
1187 clks = (unsigned long)of_device_get_match_data(&pdev->dev);
1189 scm->core_clk = devm_clk_get(&pdev->dev, "core");
1190 if (IS_ERR(scm->core_clk)) {
1191 if (PTR_ERR(scm->core_clk) == -EPROBE_DEFER)
1192 return PTR_ERR(scm->core_clk);
1194 if (clks & SCM_HAS_CORE_CLK) {
1195 dev_err(&pdev->dev, "failed to acquire core clk\n");
1196 return PTR_ERR(scm->core_clk);
1199 scm->core_clk = NULL;
1202 scm->iface_clk = devm_clk_get(&pdev->dev, "iface");
1203 if (IS_ERR(scm->iface_clk)) {
1204 if (PTR_ERR(scm->iface_clk) == -EPROBE_DEFER)
1205 return PTR_ERR(scm->iface_clk);
1207 if (clks & SCM_HAS_IFACE_CLK) {
1208 dev_err(&pdev->dev, "failed to acquire iface clk\n");
1209 return PTR_ERR(scm->iface_clk);
1212 scm->iface_clk = NULL;
1215 scm->bus_clk = devm_clk_get(&pdev->dev, "bus");
1216 if (IS_ERR(scm->bus_clk)) {
1217 if (PTR_ERR(scm->bus_clk) == -EPROBE_DEFER)
1218 return PTR_ERR(scm->bus_clk);
1220 if (clks & SCM_HAS_BUS_CLK) {
1221 dev_err(&pdev->dev, "failed to acquire bus clk\n");
1222 return PTR_ERR(scm->bus_clk);
1225 scm->bus_clk = NULL;
1228 scm->reset.ops = &qcom_scm_pas_reset_ops;
1229 scm->reset.nr_resets = 1;
1230 scm->reset.of_node = pdev->dev.of_node;
1231 ret = devm_reset_controller_register(&pdev->dev, &scm->reset);
1235 /* vote for max clk rate for highest performance */
1236 ret = clk_set_rate(scm->core_clk, INT_MAX);
1241 __scm->dev = &pdev->dev;
1246 * If requested enable "download mode", from this point on warmboot
1247 * will cause the the boot stages to enter download mode, unless
1248 * disabled below by a clean shutdown/reboot.
1251 qcom_scm_set_download_mode(true);
1256 static void qcom_scm_shutdown(struct platform_device *pdev)
1258 /* Clean shutdown, disable download mode to allow normal restart */
1260 qcom_scm_set_download_mode(false);
1263 static const struct of_device_id qcom_scm_dt_match[] = {
1264 { .compatible = "qcom,scm-apq8064",
1265 /* FIXME: This should have .data = (void *) SCM_HAS_CORE_CLK */
1267 { .compatible = "qcom,scm-apq8084", .data = (void *)(SCM_HAS_CORE_CLK |
1271 { .compatible = "qcom,scm-ipq4019" },
1272 { .compatible = "qcom,scm-msm8660", .data = (void *) SCM_HAS_CORE_CLK },
1273 { .compatible = "qcom,scm-msm8960", .data = (void *) SCM_HAS_CORE_CLK },
1274 { .compatible = "qcom,scm-msm8916", .data = (void *)(SCM_HAS_CORE_CLK |
1278 { .compatible = "qcom,scm-msm8974", .data = (void *)(SCM_HAS_CORE_CLK |
1282 { .compatible = "qcom,scm-msm8994" },
1283 { .compatible = "qcom,scm-msm8996" },
1284 { .compatible = "qcom,scm" },
1288 static struct platform_driver qcom_scm_driver = {
1291 .of_match_table = qcom_scm_dt_match,
1293 .probe = qcom_scm_probe,
1294 .shutdown = qcom_scm_shutdown,
1297 static int __init qcom_scm_init(void)
1299 return platform_driver_register(&qcom_scm_driver);
1301 subsys_initcall(qcom_scm_init);