1 // SPDX-License-Identifier: GPL-2.0-only
3 * TI K3 R5F (MCU) Remote Processor driver
5 * Copyright (C) 2017-2020 Texas Instruments Incorporated - https://www.ti.com/
6 * Suman Anna <s-anna@ti.com>
9 #include <linux/dma-mapping.h>
10 #include <linux/err.h>
11 #include <linux/interrupt.h>
12 #include <linux/kernel.h>
13 #include <linux/mailbox_client.h>
14 #include <linux/module.h>
15 #include <linux/of_address.h>
16 #include <linux/of_device.h>
17 #include <linux/of_reserved_mem.h>
18 #include <linux/omap-mailbox.h>
19 #include <linux/platform_device.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/remoteproc.h>
22 #include <linux/reset.h>
23 #include <linux/slab.h>
25 #include "omap_remoteproc.h"
26 #include "remoteproc_internal.h"
27 #include "ti_sci_proc.h"
29 /* This address can either be for ATCM or BTCM with the other at address 0x0 */
30 #define K3_R5_TCM_DEV_ADDR 0x41010000
32 /* R5 TI-SCI Processor Configuration Flags */
33 #define PROC_BOOT_CFG_FLAG_R5_DBG_EN 0x00000001
34 #define PROC_BOOT_CFG_FLAG_R5_DBG_NIDEN 0x00000002
35 #define PROC_BOOT_CFG_FLAG_R5_LOCKSTEP 0x00000100
36 #define PROC_BOOT_CFG_FLAG_R5_TEINIT 0x00000200
37 #define PROC_BOOT_CFG_FLAG_R5_NMFI_EN 0x00000400
38 #define PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE 0x00000800
39 #define PROC_BOOT_CFG_FLAG_R5_BTCM_EN 0x00001000
40 #define PROC_BOOT_CFG_FLAG_R5_ATCM_EN 0x00002000
42 /* R5 TI-SCI Processor Control Flags */
43 #define PROC_BOOT_CTRL_FLAG_R5_CORE_HALT 0x00000001
45 /* R5 TI-SCI Processor Status Flags */
46 #define PROC_BOOT_STATUS_FLAG_R5_WFE 0x00000001
47 #define PROC_BOOT_STATUS_FLAG_R5_WFI 0x00000002
48 #define PROC_BOOT_STATUS_FLAG_R5_CLK_GATED 0x00000004
49 #define PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED 0x00000100
52 * struct k3_r5_mem - internal memory structure
53 * @cpu_addr: MPU virtual address of the memory region
54 * @bus_addr: Bus address used to access the memory region
55 * @dev_addr: Device address from remoteproc view
56 * @size: Size of the memory region
59 void __iomem *cpu_addr;
66 CLUSTER_MODE_SPLIT = 0,
67 CLUSTER_MODE_LOCKSTEP,
71 * struct k3_r5_cluster - K3 R5F Cluster structure
72 * @dev: cached device pointer
73 * @mode: Mode to configure the Cluster - Split or LockStep
74 * @cores: list of R5 cores within the cluster
76 struct k3_r5_cluster {
78 enum cluster_mode mode;
79 struct list_head cores;
83 * struct k3_r5_core - K3 R5 core structure
84 * @elem: linked list item
85 * @dev: cached device pointer
86 * @rproc: rproc handle representing this core
87 * @mem: internal memory regions data
88 * @sram: on-chip SRAM memory regions data
89 * @num_mems: number of internal memory regions
90 * @num_sram: number of on-chip SRAM memory regions
91 * @reset: reset control handle
92 * @tsp: TI-SCI processor control handle
93 * @ti_sci: TI-SCI handle
94 * @ti_sci_id: TI-SCI device identifier
95 * @atcm_enable: flag to control ATCM enablement
96 * @btcm_enable: flag to control BTCM enablement
97 * @loczrama: flag to dictate which TCM is at device address 0x0
100 struct list_head elem;
103 struct k3_r5_mem *mem;
104 struct k3_r5_mem *sram;
107 struct reset_control *reset;
108 struct ti_sci_proc *tsp;
109 const struct ti_sci_handle *ti_sci;
117 * struct k3_r5_rproc - K3 remote processor state
118 * @dev: cached device pointer
119 * @cluster: cached pointer to parent cluster structure
120 * @mbox: mailbox channel handle
121 * @client: mailbox client to request the mailbox channel
122 * @rproc: rproc handle
123 * @core: cached pointer to r5 core structure being used
124 * @rmem: reserved memory regions data
125 * @num_rmems: number of reserved memory regions
129 struct k3_r5_cluster *cluster;
130 struct mbox_chan *mbox;
131 struct mbox_client client;
133 struct k3_r5_core *core;
134 struct k3_r5_mem *rmem;
139 * k3_r5_rproc_mbox_callback() - inbound mailbox message handler
140 * @client: mailbox client pointer used for requesting the mailbox channel
141 * @data: mailbox payload
143 * This handler is invoked by the OMAP mailbox driver whenever a mailbox
144 * message is received. Usually, the mailbox payload simply contains
145 * the index of the virtqueue that is kicked by the remote processor,
146 * and we let remoteproc core handle it.
148 * In addition to virtqueue indices, we also have some out-of-band values
149 * that indicate different events. Those values are deliberately very
150 * large so they don't coincide with virtqueue indices.
152 static void k3_r5_rproc_mbox_callback(struct mbox_client *client, void *data)
154 struct k3_r5_rproc *kproc = container_of(client, struct k3_r5_rproc,
156 struct device *dev = kproc->rproc->dev.parent;
157 const char *name = kproc->rproc->name;
158 u32 msg = omap_mbox_message(data);
160 dev_dbg(dev, "mbox msg: 0x%x\n", msg);
165 * remoteproc detected an exception, but error recovery is not
166 * supported. So, just log this for now
168 dev_err(dev, "K3 R5F rproc %s crashed\n", name);
170 case RP_MBOX_ECHO_REPLY:
171 dev_info(dev, "received echo reply from %s\n", name);
174 /* silently handle all other valid messages */
175 if (msg >= RP_MBOX_READY && msg < RP_MBOX_END_MSG)
177 if (msg > kproc->rproc->max_notifyid) {
178 dev_dbg(dev, "dropping unknown message 0x%x", msg);
181 /* msg contains the index of the triggered vring */
182 if (rproc_vq_interrupt(kproc->rproc, msg) == IRQ_NONE)
183 dev_dbg(dev, "no message was found in vqid %d\n", msg);
187 /* kick a virtqueue */
188 static void k3_r5_rproc_kick(struct rproc *rproc, int vqid)
190 struct k3_r5_rproc *kproc = rproc->priv;
191 struct device *dev = rproc->dev.parent;
192 mbox_msg_t msg = (mbox_msg_t)vqid;
195 /* send the index of the triggered virtqueue in the mailbox payload */
196 ret = mbox_send_message(kproc->mbox, (void *)msg);
198 dev_err(dev, "failed to send mailbox message, status = %d\n",
202 static int k3_r5_split_reset(struct k3_r5_core *core)
206 ret = reset_control_assert(core->reset);
208 dev_err(core->dev, "local-reset assert failed, ret = %d\n",
213 ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
216 dev_err(core->dev, "module-reset assert failed, ret = %d\n",
218 if (reset_control_deassert(core->reset))
219 dev_warn(core->dev, "local-reset deassert back failed\n");
225 static int k3_r5_split_release(struct k3_r5_core *core)
229 ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
232 dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
237 ret = reset_control_deassert(core->reset);
239 dev_err(core->dev, "local-reset deassert failed, ret = %d\n",
241 if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
243 dev_warn(core->dev, "module-reset assert back failed\n");
249 static int k3_r5_lockstep_reset(struct k3_r5_cluster *cluster)
251 struct k3_r5_core *core;
254 /* assert local reset on all applicable cores */
255 list_for_each_entry(core, &cluster->cores, elem) {
256 ret = reset_control_assert(core->reset);
258 dev_err(core->dev, "local-reset assert failed, ret = %d\n",
260 core = list_prev_entry(core, elem);
261 goto unroll_local_reset;
265 /* disable PSC modules on all applicable cores */
266 list_for_each_entry(core, &cluster->cores, elem) {
267 ret = core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
270 dev_err(core->dev, "module-reset assert failed, ret = %d\n",
272 goto unroll_module_reset;
279 list_for_each_entry_continue_reverse(core, &cluster->cores, elem) {
280 if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
282 dev_warn(core->dev, "module-reset assert back failed\n");
284 core = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
286 list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
287 if (reset_control_deassert(core->reset))
288 dev_warn(core->dev, "local-reset deassert back failed\n");
294 static int k3_r5_lockstep_release(struct k3_r5_cluster *cluster)
296 struct k3_r5_core *core;
299 /* enable PSC modules on all applicable cores */
300 list_for_each_entry_reverse(core, &cluster->cores, elem) {
301 ret = core->ti_sci->ops.dev_ops.get_device(core->ti_sci,
304 dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
306 core = list_next_entry(core, elem);
307 goto unroll_module_reset;
311 /* deassert local reset on all applicable cores */
312 list_for_each_entry_reverse(core, &cluster->cores, elem) {
313 ret = reset_control_deassert(core->reset);
315 dev_err(core->dev, "module-reset deassert failed, ret = %d\n",
317 goto unroll_local_reset;
324 list_for_each_entry_continue(core, &cluster->cores, elem) {
325 if (reset_control_assert(core->reset))
326 dev_warn(core->dev, "local-reset assert back failed\n");
328 core = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
330 list_for_each_entry_from(core, &cluster->cores, elem) {
331 if (core->ti_sci->ops.dev_ops.put_device(core->ti_sci,
333 dev_warn(core->dev, "module-reset assert back failed\n");
339 static inline int k3_r5_core_halt(struct k3_r5_core *core)
341 return ti_sci_proc_set_control(core->tsp,
342 PROC_BOOT_CTRL_FLAG_R5_CORE_HALT, 0);
345 static inline int k3_r5_core_run(struct k3_r5_core *core)
347 return ti_sci_proc_set_control(core->tsp,
348 0, PROC_BOOT_CTRL_FLAG_R5_CORE_HALT);
352 * The R5F cores have controls for both a reset and a halt/run. The code
353 * execution from DDR requires the initial boot-strapping code to be run
354 * from the internal TCMs. This function is used to release the resets on
355 * applicable cores to allow loading into the TCMs. The .prepare() ops is
356 * invoked by remoteproc core before any firmware loading, and is followed
357 * by the .start() ops after loading to actually let the R5 cores run.
359 static int k3_r5_rproc_prepare(struct rproc *rproc)
361 struct k3_r5_rproc *kproc = rproc->priv;
362 struct k3_r5_cluster *cluster = kproc->cluster;
363 struct k3_r5_core *core = kproc->core;
364 struct device *dev = kproc->dev;
367 ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ?
368 k3_r5_lockstep_release(cluster) : k3_r5_split_release(core);
370 dev_err(dev, "unable to enable cores for TCM loading, ret = %d\n",
376 * Zero out both TCMs unconditionally (access from v8 Arm core is not
377 * affected by ATCM & BTCM enable configuration values) so that ECC
378 * can be effective on all TCM addresses.
380 dev_dbg(dev, "zeroing out ATCM memory\n");
381 memset(core->mem[0].cpu_addr, 0x00, core->mem[0].size);
383 dev_dbg(dev, "zeroing out BTCM memory\n");
384 memset(core->mem[1].cpu_addr, 0x00, core->mem[1].size);
390 * This function implements the .unprepare() ops and performs the complimentary
391 * operations to that of the .prepare() ops. The function is used to assert the
392 * resets on all applicable cores for the rproc device (depending on LockStep
393 * or Split mode). This completes the second portion of powering down the R5F
394 * cores. The cores themselves are only halted in the .stop() ops, and the
395 * .unprepare() ops is invoked by the remoteproc core after the remoteproc is
398 static int k3_r5_rproc_unprepare(struct rproc *rproc)
400 struct k3_r5_rproc *kproc = rproc->priv;
401 struct k3_r5_cluster *cluster = kproc->cluster;
402 struct k3_r5_core *core = kproc->core;
403 struct device *dev = kproc->dev;
406 ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ?
407 k3_r5_lockstep_reset(cluster) : k3_r5_split_reset(core);
409 dev_err(dev, "unable to disable cores, ret = %d\n", ret);
415 * The R5F start sequence includes two different operations
416 * 1. Configure the boot vector for R5F core(s)
417 * 2. Unhalt/Run the R5F core(s)
419 * The sequence is different between LockStep and Split modes. The LockStep
420 * mode requires the boot vector to be configured only for Core0, and then
421 * unhalt both the cores to start the execution - Core1 needs to be unhalted
422 * first followed by Core0. The Split-mode requires that Core0 to be maintained
423 * always in a higher power state that Core1 (implying Core1 needs to be started
424 * always only after Core0 is started).
426 static int k3_r5_rproc_start(struct rproc *rproc)
428 struct k3_r5_rproc *kproc = rproc->priv;
429 struct k3_r5_cluster *cluster = kproc->cluster;
430 struct mbox_client *client = &kproc->client;
431 struct device *dev = kproc->dev;
432 struct k3_r5_core *core;
437 client->tx_done = NULL;
438 client->rx_callback = k3_r5_rproc_mbox_callback;
439 client->tx_block = false;
440 client->knows_txdone = false;
442 kproc->mbox = mbox_request_channel(client, 0);
443 if (IS_ERR(kproc->mbox)) {
445 dev_err(dev, "mbox_request_channel failed: %ld\n",
446 PTR_ERR(kproc->mbox));
451 * Ping the remote processor, this is only for sanity-sake for now;
452 * there is no functional effect whatsoever.
454 * Note that the reply will _not_ arrive immediately: this message
455 * will wait in the mailbox fifo until the remote processor is booted.
457 ret = mbox_send_message(kproc->mbox, (void *)RP_MBOX_ECHO_REQUEST);
459 dev_err(dev, "mbox_send_message failed: %d\n", ret);
463 boot_addr = rproc->bootaddr;
464 /* TODO: add boot_addr sanity checking */
465 dev_dbg(dev, "booting R5F core using boot addr = 0x%x\n", boot_addr);
467 /* boot vector need not be programmed for Core1 in LockStep mode */
469 ret = ti_sci_proc_set_config(core->tsp, boot_addr, 0, 0);
473 /* unhalt/run all applicable cores */
474 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
475 list_for_each_entry_reverse(core, &cluster->cores, elem) {
476 ret = k3_r5_core_run(core);
478 goto unroll_core_run;
481 ret = k3_r5_core_run(core);
489 list_for_each_entry_continue(core, &cluster->cores, elem) {
490 if (k3_r5_core_halt(core))
491 dev_warn(core->dev, "core halt back failed\n");
494 mbox_free_channel(kproc->mbox);
499 * The R5F stop function includes the following operations
500 * 1. Halt R5F core(s)
502 * The sequence is different between LockStep and Split modes, and the order
503 * of cores the operations are performed are also in general reverse to that
504 * of the start function. The LockStep mode requires each operation to be
505 * performed first on Core0 followed by Core1. The Split-mode requires that
506 * Core0 to be maintained always in a higher power state that Core1 (implying
507 * Core1 needs to be stopped first before Core0).
509 * Note that the R5F halt operation in general is not effective when the R5F
510 * core is running, but is needed to make sure the core won't run after
511 * deasserting the reset the subsequent time. The asserting of reset can
512 * be done here, but is preferred to be done in the .unprepare() ops - this
513 * maintains the symmetric behavior between the .start(), .stop(), .prepare()
514 * and .unprepare() ops, and also balances them well between sysfs 'state'
515 * flow and device bind/unbind or module removal.
517 static int k3_r5_rproc_stop(struct rproc *rproc)
519 struct k3_r5_rproc *kproc = rproc->priv;
520 struct k3_r5_cluster *cluster = kproc->cluster;
521 struct k3_r5_core *core = kproc->core;
524 /* halt all applicable cores */
525 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
526 list_for_each_entry(core, &cluster->cores, elem) {
527 ret = k3_r5_core_halt(core);
529 core = list_prev_entry(core, elem);
530 goto unroll_core_halt;
534 ret = k3_r5_core_halt(core);
539 mbox_free_channel(kproc->mbox);
544 list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
545 if (k3_r5_core_run(core))
546 dev_warn(core->dev, "core run back failed\n");
553 * Internal Memory translation helper
555 * Custom function implementing the rproc .da_to_va ops to provide address
556 * translation (device address to kernel virtual address) for internal RAMs
557 * present in a DSP or IPU device). The translated addresses can be used
558 * either by the remoteproc core for loading, or by any rpmsg bus drivers.
560 static void *k3_r5_rproc_da_to_va(struct rproc *rproc, u64 da, size_t len)
562 struct k3_r5_rproc *kproc = rproc->priv;
563 struct k3_r5_core *core = kproc->core;
564 void __iomem *va = NULL;
565 phys_addr_t bus_addr;
566 u32 dev_addr, offset;
573 /* handle both R5 and SoC views of ATCM and BTCM */
574 for (i = 0; i < core->num_mems; i++) {
575 bus_addr = core->mem[i].bus_addr;
576 dev_addr = core->mem[i].dev_addr;
577 size = core->mem[i].size;
579 /* handle R5-view addresses of TCMs */
580 if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
581 offset = da - dev_addr;
582 va = core->mem[i].cpu_addr + offset;
583 return (__force void *)va;
586 /* handle SoC-view addresses of TCMs */
587 if (da >= bus_addr && ((da + len) <= (bus_addr + size))) {
588 offset = da - bus_addr;
589 va = core->mem[i].cpu_addr + offset;
590 return (__force void *)va;
594 /* handle any SRAM regions using SoC-view addresses */
595 for (i = 0; i < core->num_sram; i++) {
596 dev_addr = core->sram[i].dev_addr;
597 size = core->sram[i].size;
599 if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
600 offset = da - dev_addr;
601 va = core->sram[i].cpu_addr + offset;
602 return (__force void *)va;
606 /* handle static DDR reserved memory regions */
607 for (i = 0; i < kproc->num_rmems; i++) {
608 dev_addr = kproc->rmem[i].dev_addr;
609 size = kproc->rmem[i].size;
611 if (da >= dev_addr && ((da + len) <= (dev_addr + size))) {
612 offset = da - dev_addr;
613 va = kproc->rmem[i].cpu_addr + offset;
614 return (__force void *)va;
621 static const struct rproc_ops k3_r5_rproc_ops = {
622 .prepare = k3_r5_rproc_prepare,
623 .unprepare = k3_r5_rproc_unprepare,
624 .start = k3_r5_rproc_start,
625 .stop = k3_r5_rproc_stop,
626 .kick = k3_r5_rproc_kick,
627 .da_to_va = k3_r5_rproc_da_to_va,
631 * Internal R5F Core configuration
633 * Each R5FSS has a cluster-level setting for configuring the processor
634 * subsystem either in a safety/fault-tolerant LockStep mode or a performance
635 * oriented Split mode. Each R5F core has a number of settings to either
636 * enable/disable each of the TCMs, control which TCM appears at the R5F core's
637 * address 0x0. These settings need to be configured before the resets for the
638 * corresponding core are released. These settings are all protected and managed
639 * by the System Processor.
641 * This function is used to pre-configure these settings for each R5F core, and
642 * the configuration is all done through various ti_sci_proc functions that
643 * communicate with the System Processor. The function also ensures that both
644 * the cores are halted before the .prepare() step.
646 * The function is called from k3_r5_cluster_rproc_init() and is invoked either
647 * once (in LockStep mode) or twice (in Split mode). Support for LockStep-mode
648 * is dictated by an eFUSE register bit, and the config settings retrieved from
649 * DT are adjusted accordingly as per the permitted cluster mode. All cluster
650 * level settings like Cluster mode and TEINIT (exception handling state
651 * dictating ARM or Thumb mode) can only be set and retrieved using Core0.
653 * The function behavior is different based on the cluster mode. The R5F cores
654 * are configured independently as per their individual settings in Split mode.
655 * They are identically configured in LockStep mode using the primary Core0
656 * settings. However, some individual settings cannot be set in LockStep mode.
657 * This is overcome by switching to Split-mode initially and then programming
658 * both the cores with the same settings, before reconfiguing again for
661 static int k3_r5_rproc_configure(struct k3_r5_rproc *kproc)
663 struct k3_r5_cluster *cluster = kproc->cluster;
664 struct device *dev = kproc->dev;
665 struct k3_r5_core *core0, *core, *temp;
666 u32 ctrl = 0, cfg = 0, stat = 0;
667 u32 set_cfg = 0, clr_cfg = 0;
672 core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem);
673 core = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ? core0 : kproc->core;
675 ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl,
680 dev_dbg(dev, "boot_vector = 0x%llx, cfg = 0x%x ctrl = 0x%x stat = 0x%x\n",
681 boot_vec, cfg, ctrl, stat);
683 lockstep_en = !!(stat & PROC_BOOT_STATUS_FLAG_R5_LOCKSTEP_PERMITTED);
684 if (!lockstep_en && cluster->mode == CLUSTER_MODE_LOCKSTEP) {
685 dev_err(cluster->dev, "lockstep mode not permitted, force configuring for split-mode\n");
686 cluster->mode = CLUSTER_MODE_SPLIT;
689 /* always enable ARM mode and set boot vector to 0 */
692 clr_cfg = PROC_BOOT_CFG_FLAG_R5_TEINIT;
694 * LockStep configuration bit is Read-only on Split-mode _only_
695 * devices and system firmware will NACK any requests with the
696 * bit configured, so program it only on permitted devices
699 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
702 if (core->atcm_enable)
703 set_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
705 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_ATCM_EN;
707 if (core->btcm_enable)
708 set_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
710 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_BTCM_EN;
713 set_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
715 clr_cfg |= PROC_BOOT_CFG_FLAG_R5_TCM_RSTBASE;
717 if (cluster->mode == CLUSTER_MODE_LOCKSTEP) {
719 * work around system firmware limitations to make sure both
720 * cores are programmed symmetrically in LockStep. LockStep
721 * and TEINIT config is only allowed with Core0.
723 list_for_each_entry(temp, &cluster->cores, elem) {
724 ret = k3_r5_core_halt(temp);
729 clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
730 clr_cfg &= ~PROC_BOOT_CFG_FLAG_R5_TEINIT;
732 ret = ti_sci_proc_set_config(temp->tsp, boot_vec,
738 set_cfg = PROC_BOOT_CFG_FLAG_R5_LOCKSTEP;
740 ret = ti_sci_proc_set_config(core->tsp, boot_vec,
743 ret = k3_r5_core_halt(core);
747 ret = ti_sci_proc_set_config(core->tsp, boot_vec,
755 static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc)
757 struct device *dev = kproc->dev;
758 struct device_node *np = dev_of_node(dev);
759 struct device_node *rmem_np;
760 struct reserved_mem *rmem;
764 num_rmems = of_property_count_elems_of_size(np, "memory-region",
766 if (num_rmems <= 0) {
767 dev_err(dev, "device does not have reserved memory regions, ret = %d\n",
772 dev_err(dev, "device needs atleast two memory regions to be defined, num = %d\n",
777 /* use reserved memory region 0 for vring DMA allocations */
778 ret = of_reserved_mem_device_init_by_idx(dev, np, 0);
780 dev_err(dev, "device cannot initialize DMA pool, ret = %d\n",
786 kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL);
792 /* use remaining reserved memory regions for static carveouts */
793 for (i = 0; i < num_rmems; i++) {
794 rmem_np = of_parse_phandle(np, "memory-region", i + 1);
800 rmem = of_reserved_mem_lookup(rmem_np);
802 of_node_put(rmem_np);
806 of_node_put(rmem_np);
808 kproc->rmem[i].bus_addr = rmem->base;
810 * R5Fs do not have an MMU, but have a Region Address Translator
811 * (RAT) module that provides a fixed entry translation between
812 * the 32-bit processor addresses to 64-bit bus addresses. The
813 * RAT is programmable only by the R5F cores. Support for RAT
814 * is currently not supported, so 64-bit address regions are not
815 * supported. The absence of MMUs implies that the R5F device
816 * addresses/supported memory regions are restricted to 32-bit
817 * bus addresses, and are identical
819 kproc->rmem[i].dev_addr = (u32)rmem->base;
820 kproc->rmem[i].size = rmem->size;
821 kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size);
822 if (!kproc->rmem[i].cpu_addr) {
823 dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n",
824 i + 1, &rmem->base, &rmem->size);
829 dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
830 i + 1, &kproc->rmem[i].bus_addr,
831 kproc->rmem[i].size, kproc->rmem[i].cpu_addr,
832 kproc->rmem[i].dev_addr);
834 kproc->num_rmems = num_rmems;
839 for (i--; i >= 0; i--)
840 iounmap(kproc->rmem[i].cpu_addr);
843 of_reserved_mem_device_release(dev);
847 static void k3_r5_reserved_mem_exit(struct k3_r5_rproc *kproc)
851 for (i = 0; i < kproc->num_rmems; i++)
852 iounmap(kproc->rmem[i].cpu_addr);
855 of_reserved_mem_device_release(kproc->dev);
858 static int k3_r5_cluster_rproc_init(struct platform_device *pdev)
860 struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
861 struct device *dev = &pdev->dev;
862 struct k3_r5_rproc *kproc;
863 struct k3_r5_core *core, *core1;
869 core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem);
870 list_for_each_entry(core, &cluster->cores, elem) {
872 ret = rproc_of_parse_firmware(cdev, 0, &fw_name);
874 dev_err(dev, "failed to parse firmware-name property, ret = %d\n",
879 rproc = rproc_alloc(cdev, dev_name(cdev), &k3_r5_rproc_ops,
880 fw_name, sizeof(*kproc));
886 /* K3 R5s have a Region Address Translator (RAT) but no MMU */
887 rproc->has_iommu = false;
888 /* error recovery is not supported at present */
889 rproc->recovery_disabled = true;
892 kproc->cluster = cluster;
895 kproc->rproc = rproc;
898 ret = k3_r5_rproc_configure(kproc);
900 dev_err(dev, "initial configure failed, ret = %d\n",
905 ret = k3_r5_reserved_mem_init(kproc);
907 dev_err(dev, "reserved memory init failed, ret = %d\n",
912 ret = rproc_add(rproc);
914 dev_err(dev, "rproc_add failed, ret = %d\n", ret);
918 /* create only one rproc in lockstep mode */
919 if (cluster->mode == CLUSTER_MODE_LOCKSTEP)
928 k3_r5_reserved_mem_exit(kproc);
933 /* undo core0 upon any failures on core1 in split-mode */
934 if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) {
935 core = list_prev_entry(core, elem);
943 static int k3_r5_cluster_rproc_exit(struct platform_device *pdev)
945 struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
946 struct k3_r5_rproc *kproc;
947 struct k3_r5_core *core;
951 * lockstep mode has only one rproc associated with first core, whereas
952 * split-mode has two rprocs associated with each core, and requires
953 * that core1 be powered down first
955 core = (cluster->mode == CLUSTER_MODE_LOCKSTEP) ?
956 list_first_entry(&cluster->cores, struct k3_r5_core, elem) :
957 list_last_entry(&cluster->cores, struct k3_r5_core, elem);
959 list_for_each_entry_from_reverse(core, &cluster->cores, elem) {
965 k3_r5_reserved_mem_exit(kproc);
974 static int k3_r5_core_of_get_internal_memories(struct platform_device *pdev,
975 struct k3_r5_core *core)
977 static const char * const mem_names[] = {"atcm", "btcm"};
978 struct device *dev = &pdev->dev;
979 struct resource *res;
983 num_mems = ARRAY_SIZE(mem_names);
984 core->mem = devm_kcalloc(dev, num_mems, sizeof(*core->mem), GFP_KERNEL);
988 for (i = 0; i < num_mems; i++) {
989 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
992 dev_err(dev, "found no memory resource for %s\n",
996 if (!devm_request_mem_region(dev, res->start,
999 dev_err(dev, "could not request %s region for resource\n",
1005 * TCMs are designed in general to support RAM-like backing
1006 * memories. So, map these as Normal Non-Cached memories. This
1007 * also avoids/fixes any potential alignment faults due to
1008 * unaligned data accesses when using memcpy() or memset()
1009 * functions (normally seen with device type memory).
1011 core->mem[i].cpu_addr = devm_ioremap_wc(dev, res->start,
1012 resource_size(res));
1013 if (!core->mem[i].cpu_addr) {
1014 dev_err(dev, "failed to map %s memory\n", mem_names[i]);
1017 core->mem[i].bus_addr = res->start;
1021 * The R5F cores can place ATCM & BTCM anywhere in its address
1022 * based on the corresponding Region Registers in the System
1023 * Control coprocessor. For now, place ATCM and BTCM at
1024 * addresses 0 and 0x41010000 (same as the bus address on AM65x
1025 * SoCs) based on loczrama setting
1027 if (!strcmp(mem_names[i], "atcm")) {
1028 core->mem[i].dev_addr = core->loczrama ?
1029 0 : K3_R5_TCM_DEV_ADDR;
1031 core->mem[i].dev_addr = core->loczrama ?
1032 K3_R5_TCM_DEV_ADDR : 0;
1034 core->mem[i].size = resource_size(res);
1036 dev_dbg(dev, "memory %5s: bus addr %pa size 0x%zx va %pK da 0x%x\n",
1037 mem_names[i], &core->mem[i].bus_addr,
1038 core->mem[i].size, core->mem[i].cpu_addr,
1039 core->mem[i].dev_addr);
1041 core->num_mems = num_mems;
1046 static int k3_r5_core_of_get_sram_memories(struct platform_device *pdev,
1047 struct k3_r5_core *core)
1049 struct device_node *np = pdev->dev.of_node;
1050 struct device *dev = &pdev->dev;
1051 struct device_node *sram_np;
1052 struct resource res;
1056 num_sram = of_property_count_elems_of_size(np, "sram", sizeof(phandle));
1057 if (num_sram <= 0) {
1058 dev_dbg(dev, "device does not use reserved on-chip memories, num_sram = %d\n",
1063 core->sram = devm_kcalloc(dev, num_sram, sizeof(*core->sram), GFP_KERNEL);
1067 for (i = 0; i < num_sram; i++) {
1068 sram_np = of_parse_phandle(np, "sram", i);
1072 if (!of_device_is_available(sram_np)) {
1073 of_node_put(sram_np);
1077 ret = of_address_to_resource(sram_np, 0, &res);
1078 of_node_put(sram_np);
1082 core->sram[i].bus_addr = res.start;
1083 core->sram[i].dev_addr = res.start;
1084 core->sram[i].size = resource_size(&res);
1085 core->sram[i].cpu_addr = devm_ioremap_wc(dev, res.start,
1086 resource_size(&res));
1087 if (!core->sram[i].cpu_addr) {
1088 dev_err(dev, "failed to parse and map sram%d memory at %pad\n",
1093 dev_dbg(dev, "memory sram%d: bus addr %pa size 0x%zx va %pK da 0x%x\n",
1094 i, &core->sram[i].bus_addr,
1095 core->sram[i].size, core->sram[i].cpu_addr,
1096 core->sram[i].dev_addr);
1098 core->num_sram = num_sram;
1104 struct ti_sci_proc *k3_r5_core_of_get_tsp(struct device *dev,
1105 const struct ti_sci_handle *sci)
1107 struct ti_sci_proc *tsp;
1111 ret = of_property_read_u32_array(dev_of_node(dev), "ti,sci-proc-ids",
1114 return ERR_PTR(ret);
1116 tsp = devm_kzalloc(dev, sizeof(*tsp), GFP_KERNEL);
1118 return ERR_PTR(-ENOMEM);
1122 tsp->ops = &sci->ops.proc_ops;
1123 tsp->proc_id = temp[0];
1124 tsp->host_id = temp[1];
1129 static int k3_r5_core_of_init(struct platform_device *pdev)
1131 struct device *dev = &pdev->dev;
1132 struct device_node *np = dev_of_node(dev);
1133 struct k3_r5_core *core;
1136 if (!devres_open_group(dev, k3_r5_core_of_init, GFP_KERNEL))
1139 core = devm_kzalloc(dev, sizeof(*core), GFP_KERNEL);
1147 * Use SoC Power-on-Reset values as default if no DT properties are
1148 * used to dictate the TCM configurations
1150 core->atcm_enable = 0;
1151 core->btcm_enable = 1;
1154 ret = of_property_read_u32(np, "ti,atcm-enable", &core->atcm_enable);
1155 if (ret < 0 && ret != -EINVAL) {
1156 dev_err(dev, "invalid format for ti,atcm-enable, ret = %d\n",
1161 ret = of_property_read_u32(np, "ti,btcm-enable", &core->btcm_enable);
1162 if (ret < 0 && ret != -EINVAL) {
1163 dev_err(dev, "invalid format for ti,btcm-enable, ret = %d\n",
1168 ret = of_property_read_u32(np, "ti,loczrama", &core->loczrama);
1169 if (ret < 0 && ret != -EINVAL) {
1170 dev_err(dev, "invalid format for ti,loczrama, ret = %d\n", ret);
1174 core->ti_sci = devm_ti_sci_get_by_phandle(dev, "ti,sci");
1175 if (IS_ERR(core->ti_sci)) {
1176 ret = PTR_ERR(core->ti_sci);
1177 if (ret != -EPROBE_DEFER) {
1178 dev_err(dev, "failed to get ti-sci handle, ret = %d\n",
1181 core->ti_sci = NULL;
1185 ret = of_property_read_u32(np, "ti,sci-dev-id", &core->ti_sci_id);
1187 dev_err(dev, "missing 'ti,sci-dev-id' property\n");
1191 core->reset = devm_reset_control_get_exclusive(dev, NULL);
1192 if (IS_ERR_OR_NULL(core->reset)) {
1193 ret = PTR_ERR_OR_ZERO(core->reset);
1196 if (ret != -EPROBE_DEFER) {
1197 dev_err(dev, "failed to get reset handle, ret = %d\n",
1203 core->tsp = k3_r5_core_of_get_tsp(dev, core->ti_sci);
1204 if (IS_ERR(core->tsp)) {
1205 ret = PTR_ERR(core->tsp);
1206 dev_err(dev, "failed to construct ti-sci proc control, ret = %d\n",
1211 ret = k3_r5_core_of_get_internal_memories(pdev, core);
1213 dev_err(dev, "failed to get internal memories, ret = %d\n",
1218 ret = k3_r5_core_of_get_sram_memories(pdev, core);
1220 dev_err(dev, "failed to get sram memories, ret = %d\n", ret);
1224 ret = ti_sci_proc_request(core->tsp);
1226 dev_err(dev, "ti_sci_proc_request failed, ret = %d\n", ret);
1230 platform_set_drvdata(pdev, core);
1231 devres_close_group(dev, k3_r5_core_of_init);
1236 devres_release_group(dev, k3_r5_core_of_init);
1241 * free the resources explicitly since driver model is not being used
1242 * for the child R5F devices
1244 static void k3_r5_core_of_exit(struct platform_device *pdev)
1246 struct k3_r5_core *core = platform_get_drvdata(pdev);
1247 struct device *dev = &pdev->dev;
1250 ret = ti_sci_proc_release(core->tsp);
1252 dev_err(dev, "failed to release proc, ret = %d\n", ret);
1254 platform_set_drvdata(pdev, NULL);
1255 devres_release_group(dev, k3_r5_core_of_init);
1258 static void k3_r5_cluster_of_exit(struct platform_device *pdev)
1260 struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
1261 struct platform_device *cpdev;
1262 struct k3_r5_core *core, *temp;
1264 list_for_each_entry_safe_reverse(core, temp, &cluster->cores, elem) {
1265 list_del(&core->elem);
1266 cpdev = to_platform_device(core->dev);
1267 k3_r5_core_of_exit(cpdev);
1271 static int k3_r5_cluster_of_init(struct platform_device *pdev)
1273 struct k3_r5_cluster *cluster = platform_get_drvdata(pdev);
1274 struct device *dev = &pdev->dev;
1275 struct device_node *np = dev_of_node(dev);
1276 struct platform_device *cpdev;
1277 struct device_node *child;
1278 struct k3_r5_core *core;
1281 for_each_available_child_of_node(np, child) {
1282 cpdev = of_find_device_by_node(child);
1285 dev_err(dev, "could not get R5 core platform device\n");
1290 ret = k3_r5_core_of_init(cpdev);
1292 dev_err(dev, "k3_r5_core_of_init failed, ret = %d\n",
1294 put_device(&cpdev->dev);
1299 core = platform_get_drvdata(cpdev);
1300 put_device(&cpdev->dev);
1301 list_add_tail(&core->elem, &cluster->cores);
1307 k3_r5_cluster_of_exit(pdev);
1311 static int k3_r5_probe(struct platform_device *pdev)
1313 struct device *dev = &pdev->dev;
1314 struct device_node *np = dev_of_node(dev);
1315 struct k3_r5_cluster *cluster;
1319 cluster = devm_kzalloc(dev, sizeof(*cluster), GFP_KERNEL);
1324 cluster->mode = CLUSTER_MODE_LOCKSTEP;
1325 INIT_LIST_HEAD(&cluster->cores);
1327 ret = of_property_read_u32(np, "ti,cluster-mode", &cluster->mode);
1328 if (ret < 0 && ret != -EINVAL) {
1329 dev_err(dev, "invalid format for ti,cluster-mode, ret = %d\n",
1334 num_cores = of_get_available_child_count(np);
1335 if (num_cores != 2) {
1336 dev_err(dev, "MCU cluster requires both R5F cores to be enabled, num_cores = %d\n",
1341 platform_set_drvdata(pdev, cluster);
1343 ret = devm_of_platform_populate(dev);
1345 dev_err(dev, "devm_of_platform_populate failed, ret = %d\n",
1350 ret = k3_r5_cluster_of_init(pdev);
1352 dev_err(dev, "k3_r5_cluster_of_init failed, ret = %d\n", ret);
1356 ret = devm_add_action_or_reset(dev,
1357 (void(*)(void *))k3_r5_cluster_of_exit,
1362 ret = k3_r5_cluster_rproc_init(pdev);
1364 dev_err(dev, "k3_r5_cluster_rproc_init failed, ret = %d\n",
1369 ret = devm_add_action_or_reset(dev,
1370 (void(*)(void *))k3_r5_cluster_rproc_exit,
1378 static const struct of_device_id k3_r5_of_match[] = {
1379 { .compatible = "ti,am654-r5fss", },
1380 { .compatible = "ti,j721e-r5fss", },
1383 MODULE_DEVICE_TABLE(of, k3_r5_of_match);
1385 static struct platform_driver k3_r5_rproc_driver = {
1386 .probe = k3_r5_probe,
1388 .name = "k3_r5_rproc",
1389 .of_match_table = k3_r5_of_match,
1393 module_platform_driver(k3_r5_rproc_driver);
1395 MODULE_LICENSE("GPL v2");
1396 MODULE_DESCRIPTION("TI K3 R5F remote processor driver");
1397 MODULE_AUTHOR("Suman Anna <s-anna@ti.com>");