1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * PowerNV OPAL high level interfaces
5 * Copyright 2011 IBM Corp.
8 #define pr_fmt(fmt) "opal: " fmt
10 #include <linux/printk.h>
11 #include <linux/types.h>
13 #include <linux/of_fdt.h>
14 #include <linux/of_platform.h>
15 #include <linux/of_address.h>
16 #include <linux/interrupt.h>
17 #include <linux/notifier.h>
18 #include <linux/slab.h>
19 #include <linux/sched.h>
20 #include <linux/kobject.h>
21 #include <linux/delay.h>
22 #include <linux/memblock.h>
23 #include <linux/kthread.h>
24 #include <linux/freezer.h>
25 #include <linux/kmsg_dump.h>
26 #include <linux/console.h>
27 #include <linux/sched/debug.h>
29 #include <asm/machdep.h>
31 #include <asm/firmware.h>
33 #include <asm/imc-pmu.h>
38 /* /sys/firmware/opal */
39 struct kobject *opal_kobj;
47 struct mcheck_recoverable_range {
53 static struct mcheck_recoverable_range *mc_recoverable_range;
54 static int mc_recoverable_range_len;
56 struct device_node *opal_node;
57 static DEFINE_SPINLOCK(opal_write_lock);
58 static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX];
59 static uint32_t opal_heartbeat;
60 static struct task_struct *kopald_tsk;
61 static struct opal_msg *opal_msg;
62 static u32 opal_msg_size __ro_after_init;
64 void opal_configure_cores(void)
68 /* Do the actual re-init, This will clobber all FPRs, VRs, etc...
70 * It will preserve non volatile GPRs and HSPRG0/1. It will
71 * also restore HIDs and other SPRs to their original value
72 * but it might clobber a bunch.
75 reinit_flags |= OPAL_REINIT_CPUS_HILE_BE;
77 reinit_flags |= OPAL_REINIT_CPUS_HILE_LE;
81 * POWER9 always support running hash:
82 * ie. Host hash supports hash guests
83 * Host radix supports hash/radix guests
85 if (early_cpu_has_feature(CPU_FTR_ARCH_300)) {
86 reinit_flags |= OPAL_REINIT_CPUS_MMU_HASH;
87 if (early_radix_enabled())
88 reinit_flags |= OPAL_REINIT_CPUS_MMU_RADIX;
91 opal_reinit_cpus(reinit_flags);
93 /* Restore some bits */
94 if (cur_cpu_spec->cpu_restore)
95 cur_cpu_spec->cpu_restore();
98 int __init early_init_dt_scan_opal(unsigned long node,
99 const char *uname, int depth, void *data)
101 const void *basep, *entryp, *sizep;
102 int basesz, entrysz, runtimesz;
104 if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
107 basep = of_get_flat_dt_prop(node, "opal-base-address", &basesz);
108 entryp = of_get_flat_dt_prop(node, "opal-entry-address", &entrysz);
109 sizep = of_get_flat_dt_prop(node, "opal-runtime-size", &runtimesz);
111 if (!basep || !entryp || !sizep)
114 opal.base = of_read_number(basep, basesz/4);
115 opal.entry = of_read_number(entryp, entrysz/4);
116 opal.size = of_read_number(sizep, runtimesz/4);
118 pr_debug("OPAL Base = 0x%llx (basep=%p basesz=%d)\n",
119 opal.base, basep, basesz);
120 pr_debug("OPAL Entry = 0x%llx (entryp=%p basesz=%d)\n",
121 opal.entry, entryp, entrysz);
122 pr_debug("OPAL Entry = 0x%llx (sizep=%p runtimesz=%d)\n",
123 opal.size, sizep, runtimesz);
125 if (of_flat_dt_is_compatible(node, "ibm,opal-v3")) {
126 powerpc_firmware_features |= FW_FEATURE_OPAL;
127 pr_debug("OPAL detected !\n");
129 panic("OPAL != V3 detected, no longer supported.\n");
135 int __init early_init_dt_scan_recoverable_ranges(unsigned long node,
136 const char *uname, int depth, void *data)
141 if (depth != 1 || strcmp(uname, "ibm,opal") != 0)
144 prop = of_get_flat_dt_prop(node, "mcheck-recoverable-ranges", &psize);
149 pr_debug("Found machine check recoverable ranges.\n");
152 * Calculate number of available entries.
154 * Each recoverable address range entry is (start address, len,
155 * recovery address), 2 cells each for start and recovery address,
156 * 1 cell for len, totalling 5 cells per entry.
158 mc_recoverable_range_len = psize / (sizeof(*prop) * 5);
161 if (!mc_recoverable_range_len)
164 /* Size required to hold all the entries. */
165 size = mc_recoverable_range_len *
166 sizeof(struct mcheck_recoverable_range);
169 * Allocate a buffer to hold the MC recoverable ranges.
171 mc_recoverable_range = memblock_alloc(size, __alignof__(u64));
172 if (!mc_recoverable_range)
173 panic("%s: Failed to allocate %u bytes align=0x%lx\n",
174 __func__, size, __alignof__(u64));
176 for (i = 0; i < mc_recoverable_range_len; i++) {
177 mc_recoverable_range[i].start_addr =
178 of_read_number(prop + (i * 5) + 0, 2);
179 mc_recoverable_range[i].end_addr =
180 mc_recoverable_range[i].start_addr +
181 of_read_number(prop + (i * 5) + 2, 1);
182 mc_recoverable_range[i].recover_addr =
183 of_read_number(prop + (i * 5) + 3, 2);
185 pr_debug("Machine check recoverable range: %llx..%llx: %llx\n",
186 mc_recoverable_range[i].start_addr,
187 mc_recoverable_range[i].end_addr,
188 mc_recoverable_range[i].recover_addr);
193 static int __init opal_register_exception_handlers(void)
195 #ifdef __BIG_ENDIAN__
198 if (!(powerpc_firmware_features & FW_FEATURE_OPAL))
201 /* Hookup some exception handlers except machine check. We use the
202 * fwnmi area at 0x7000 to provide the glue space to OPAL
207 * Only ancient OPAL firmware requires this.
208 * Specifically, firmware from FW810.00 (released June 2014)
209 * through FW810.20 (Released October 2014).
211 * Check if we are running on newer (post Oct 2014) firmware that
212 * exports the OPAL_HANDLE_HMI token. If yes, then don't ask OPAL to
213 * patch the HMI interrupt and we catch it directly in Linux.
215 * For older firmware (i.e < FW810.20), we fallback to old behavior and
216 * let OPAL patch the HMI vector and handle it inside OPAL firmware.
218 * For newer firmware we catch/handle the HMI directly in Linux.
220 if (!opal_check_token(OPAL_HANDLE_HMI)) {
221 pr_info("Old firmware detected, OPAL handles HMIs.\n");
222 opal_register_exception_handler(
223 OPAL_HYPERVISOR_MAINTENANCE_HANDLER,
229 * Only applicable to ancient firmware, all modern
230 * (post March 2015/skiboot 5.0) firmware will just return
233 opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue);
238 machine_early_initcall(powernv, opal_register_exception_handlers);
241 * Opal message notifier based on message type. Allow subscribers to get
242 * notified for specific messgae type.
244 int opal_message_notifier_register(enum opal_msg_type msg_type,
245 struct notifier_block *nb)
247 if (!nb || msg_type >= OPAL_MSG_TYPE_MAX) {
248 pr_warn("%s: Invalid arguments, msg_type:%d\n",
253 return atomic_notifier_chain_register(
254 &opal_msg_notifier_head[msg_type], nb);
256 EXPORT_SYMBOL_GPL(opal_message_notifier_register);
258 int opal_message_notifier_unregister(enum opal_msg_type msg_type,
259 struct notifier_block *nb)
261 return atomic_notifier_chain_unregister(
262 &opal_msg_notifier_head[msg_type], nb);
264 EXPORT_SYMBOL_GPL(opal_message_notifier_unregister);
266 static void opal_message_do_notify(uint32_t msg_type, void *msg)
268 /* notify subscribers */
269 atomic_notifier_call_chain(&opal_msg_notifier_head[msg_type],
273 static void opal_handle_message(void)
278 ret = opal_get_msg(__pa(opal_msg), opal_msg_size);
279 /* No opal message pending. */
280 if (ret == OPAL_RESOURCE)
283 /* check for errors. */
285 pr_warn("%s: Failed to retrieve opal message, err=%lld\n",
290 type = be32_to_cpu(opal_msg->msg_type);
293 if (type >= OPAL_MSG_TYPE_MAX) {
294 pr_warn_once("%s: Unknown message type: %u\n", __func__, type);
297 opal_message_do_notify(type, (void *)opal_msg);
300 static irqreturn_t opal_message_notify(int irq, void *data)
302 opal_handle_message();
306 static int __init opal_message_init(struct device_node *opal_node)
310 ret = of_property_read_u32(opal_node, "opal-msg-size", &opal_msg_size);
312 pr_notice("Failed to read opal-msg-size property\n");
313 opal_msg_size = sizeof(struct opal_msg);
316 opal_msg = kmalloc(opal_msg_size, GFP_KERNEL);
318 opal_msg_size = sizeof(struct opal_msg);
319 /* Try to allocate fixed message size */
320 opal_msg = kmalloc(opal_msg_size, GFP_KERNEL);
321 BUG_ON(opal_msg == NULL);
324 for (i = 0; i < OPAL_MSG_TYPE_MAX; i++)
325 ATOMIC_INIT_NOTIFIER_HEAD(&opal_msg_notifier_head[i]);
327 irq = opal_event_request(ilog2(OPAL_EVENT_MSG_PENDING));
329 pr_err("%s: Can't register OPAL event irq (%d)\n",
334 ret = request_irq(irq, opal_message_notify,
335 IRQ_TYPE_LEVEL_HIGH, "opal-msg", NULL);
337 pr_err("%s: Can't request OPAL event irq (%d)\n",
345 int opal_get_chars(uint32_t vtermno, char *buf, int count)
352 opal_poll_events(&evt);
353 if ((be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_INPUT) == 0)
355 len = cpu_to_be64(count);
356 rc = opal_console_read(vtermno, &len, buf);
357 if (rc == OPAL_SUCCESS)
358 return be64_to_cpu(len);
362 static int __opal_put_chars(uint32_t vtermno, const char *data, int total_len, bool atomic)
364 unsigned long flags = 0 /* shut up gcc */;
373 spin_lock_irqsave(&opal_write_lock, flags);
374 rc = opal_console_write_buffer_space(vtermno, &olen);
375 if (rc || be64_to_cpu(olen) < total_len) {
376 /* Closed -> drop characters */
384 /* Should not get a partial write here because space is available. */
385 olen = cpu_to_be64(total_len);
386 rc = opal_console_write(vtermno, &olen, data);
387 if (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
388 if (rc == OPAL_BUSY_EVENT)
389 opal_poll_events(NULL);
394 /* Closed or other error drop */
395 if (rc != OPAL_SUCCESS) {
396 written = opal_error_code(rc);
400 written = be64_to_cpu(olen);
401 if (written < total_len) {
403 /* Should not happen */
404 pr_warn("atomic console write returned partial "
405 "len=%d written=%d\n", total_len, written);
413 spin_unlock_irqrestore(&opal_write_lock, flags);
418 int opal_put_chars(uint32_t vtermno, const char *data, int total_len)
420 return __opal_put_chars(vtermno, data, total_len, false);
424 * opal_put_chars_atomic will not perform partial-writes. Data will be
425 * atomically written to the terminal or not at all. This is not strictly
426 * true at the moment because console space can race with OPAL's console
429 int opal_put_chars_atomic(uint32_t vtermno, const char *data, int total_len)
431 return __opal_put_chars(vtermno, data, total_len, true);
434 static s64 __opal_flush_console(uint32_t vtermno)
438 if (!opal_check_token(OPAL_CONSOLE_FLUSH)) {
442 * If OPAL_CONSOLE_FLUSH is not implemented in the firmware,
443 * the console can still be flushed by calling the polling
444 * function while it has OPAL_EVENT_CONSOLE_OUTPUT events.
446 WARN_ONCE(1, "opal: OPAL_CONSOLE_FLUSH missing.\n");
448 opal_poll_events(&evt);
449 if (!(be64_to_cpu(evt) & OPAL_EVENT_CONSOLE_OUTPUT))
454 rc = opal_console_flush(vtermno);
455 if (rc == OPAL_BUSY_EVENT) {
456 opal_poll_events(NULL);
465 * opal_flush_console spins until the console is flushed
467 int opal_flush_console(uint32_t vtermno)
470 s64 rc = __opal_flush_console(vtermno);
472 if (rc == OPAL_BUSY || rc == OPAL_PARTIAL) {
477 return opal_error_code(rc);
482 * opal_flush_chars is an hvc interface that sleeps until the console is
483 * flushed if wait, otherwise it will return -EBUSY if the console has data,
484 * -EAGAIN if it has data and some of it was flushed.
486 int opal_flush_chars(uint32_t vtermno, bool wait)
489 s64 rc = __opal_flush_console(vtermno);
491 if (rc == OPAL_BUSY || rc == OPAL_PARTIAL) {
493 msleep(OPAL_BUSY_DELAY_MS);
496 if (rc == OPAL_PARTIAL)
500 return opal_error_code(rc);
504 static int opal_recover_mce(struct pt_regs *regs,
505 struct machine_check_event *evt)
509 if (!(regs->msr & MSR_RI)) {
510 /* If MSR_RI isn't set, we cannot recover */
511 pr_err("Machine check interrupt unrecoverable: MSR(RI=0)\n");
513 } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
514 /* Platform corrected itself */
516 } else if (evt->severity == MCE_SEV_FATAL) {
517 /* Fatal machine check */
518 pr_err("Machine check interrupt is fatal\n");
522 if (!recovered && evt->sync_error) {
524 * Try to kill processes if we get a synchronous machine check
525 * (e.g., one caused by execution of this instruction). This
526 * will devolve into a panic if we try to kill init or are in
529 * TODO: Queue up this address for hwpoisioning later.
530 * TODO: This is not quite right for d-side machine
531 * checks ->nip is not necessarily the important
534 if ((user_mode(regs))) {
535 _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
537 } else if (die_will_crash()) {
539 * die() would kill the kernel, so better to go via
540 * the platform reboot code that will log the
545 die("Machine check", regs, SIGBUS);
553 void __noreturn pnv_platform_error_reboot(struct pt_regs *regs, const char *msg)
555 panic_flush_kmsg_start();
557 pr_emerg("Hardware platform error: %s\n", msg);
562 panic_flush_kmsg_end();
565 * Don't bother to shut things down because this will
568 if (opal_cec_reboot2(OPAL_REBOOT_PLATFORM_ERROR, msg)
569 == OPAL_UNSUPPORTED) {
570 pr_emerg("Reboot type %d not supported for %s\n",
571 OPAL_REBOOT_PLATFORM_ERROR, msg);
575 * We reached here. There can be three possibilities:
576 * 1. We are running on a firmware level that do not support
578 * 2. We are running on a firmware level that do not support
579 * OPAL_REBOOT_PLATFORM_ERROR reboot type.
580 * 3. We are running on FSP based system that does not need
581 * opal to trigger checkstop explicitly for error analysis.
582 * The FSP PRD component would have already got notified
583 * about this error through other channels.
584 * 4. We are running on a newer skiboot that by default does
585 * not cause a checkstop, drops us back to the kernel to
586 * extract context and state at the time of the error.
592 int opal_machine_check(struct pt_regs *regs)
594 struct machine_check_event evt;
596 if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
599 /* Print things out */
600 if (evt.version != MCE_V1) {
601 pr_err("Machine Check Exception, Unknown event version %d !\n",
605 machine_check_print_event_info(&evt, user_mode(regs), false);
607 if (opal_recover_mce(regs, &evt))
610 pnv_platform_error_reboot(regs, "Unrecoverable Machine Check exception");
613 /* Early hmi handler called in real mode. */
614 int opal_hmi_exception_early(struct pt_regs *regs)
619 * call opal hmi handler. Pass paca address as token.
620 * The return value OPAL_SUCCESS is an indication that there is
621 * an HMI event generated waiting to pull by Linux.
623 rc = opal_handle_hmi();
624 if (rc == OPAL_SUCCESS) {
625 local_paca->hmi_event_available = 1;
631 int opal_hmi_exception_early2(struct pt_regs *regs)
637 * call opal hmi handler.
638 * Check 64-bit flag mask to find out if an event was generated,
639 * and whether TB is still valid or not etc.
641 rc = opal_handle_hmi2(&out_flags);
642 if (rc != OPAL_SUCCESS)
645 if (be64_to_cpu(out_flags) & OPAL_HMI_FLAGS_NEW_EVENT)
646 local_paca->hmi_event_available = 1;
647 if (be64_to_cpu(out_flags) & OPAL_HMI_FLAGS_TOD_TB_FAIL)
652 /* HMI exception handler called in virtual mode during check_irq_replay. */
653 int opal_handle_hmi_exception(struct pt_regs *regs)
656 * Check if HMI event is available.
657 * if Yes, then wake kopald to process them.
659 if (!local_paca->hmi_event_available)
662 local_paca->hmi_event_available = 0;
668 static uint64_t find_recovery_address(uint64_t nip)
672 for (i = 0; i < mc_recoverable_range_len; i++)
673 if ((nip >= mc_recoverable_range[i].start_addr) &&
674 (nip < mc_recoverable_range[i].end_addr))
675 return mc_recoverable_range[i].recover_addr;
679 bool opal_mce_check_early_recovery(struct pt_regs *regs)
681 uint64_t recover_addr = 0;
683 if (!opal.base || !opal.size)
686 if ((regs->nip >= opal.base) &&
687 (regs->nip < (opal.base + opal.size)))
688 recover_addr = find_recovery_address(regs->nip);
691 * Setup regs->nip to rfi into fixup address.
694 regs->nip = recover_addr;
697 return !!recover_addr;
700 static int opal_sysfs_init(void)
702 opal_kobj = kobject_create_and_add("opal", firmware_kobj);
704 pr_warn("kobject_create_and_add opal failed\n");
711 static ssize_t symbol_map_read(struct file *fp, struct kobject *kobj,
712 struct bin_attribute *bin_attr,
713 char *buf, loff_t off, size_t count)
715 return memory_read_from_buffer(buf, count, &off, bin_attr->private,
719 static struct bin_attribute symbol_map_attr = {
720 .attr = {.name = "symbol_map", .mode = 0400},
721 .read = symbol_map_read
724 static void opal_export_symmap(void)
728 struct device_node *fw;
731 fw = of_find_node_by_path("/ibm,opal/firmware");
734 syms = of_get_property(fw, "symbol-map", &size);
735 if (!syms || size != 2 * sizeof(__be64))
738 /* Setup attributes */
739 symbol_map_attr.private = __va(be64_to_cpu(syms[0]));
740 symbol_map_attr.size = be64_to_cpu(syms[1]);
742 rc = sysfs_create_bin_file(opal_kobj, &symbol_map_attr);
744 pr_warn("Error %d creating OPAL symbols file\n", rc);
747 static ssize_t export_attr_read(struct file *fp, struct kobject *kobj,
748 struct bin_attribute *bin_attr, char *buf,
749 loff_t off, size_t count)
751 return memory_read_from_buffer(buf, count, &off, bin_attr->private,
756 * opal_export_attrs: creates a sysfs node for each property listed in
757 * the device-tree under /ibm,opal/firmware/exports/
758 * All new sysfs nodes are created under /opal/exports/.
759 * This allows for reserved memory regions (e.g. HDAT) to be read.
760 * The new sysfs nodes are only readable by root.
762 static void opal_export_attrs(void)
764 struct bin_attribute *attr;
765 struct device_node *np;
766 struct property *prop;
767 struct kobject *kobj;
771 np = of_find_node_by_path("/ibm,opal/firmware/exports");
775 /* Create new 'exports' directory - /sys/firmware/opal/exports */
776 kobj = kobject_create_and_add("exports", opal_kobj);
778 pr_warn("kobject_create_and_add() of exports failed\n");
783 for_each_property_of_node(np, prop) {
784 if (!strcmp(prop->name, "name") || !strcmp(prop->name, "phandle"))
787 if (of_property_read_u64_array(np, prop->name, &vals[0], 2))
790 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
793 pr_warn("Failed kmalloc for bin_attribute!");
797 sysfs_bin_attr_init(attr);
798 attr->attr.name = kstrdup(prop->name, GFP_KERNEL);
799 attr->attr.mode = 0400;
800 attr->read = export_attr_read;
801 attr->private = __va(vals[0]);
802 attr->size = vals[1];
804 if (attr->attr.name == NULL) {
805 pr_warn("Failed kstrdup for bin_attribute attr.name");
810 rc = sysfs_create_bin_file(kobj, attr);
812 pr_warn("Error %d creating OPAL sysfs exports/%s file\n",
814 kfree(attr->attr.name);
822 static void __init opal_dump_region_init(void)
828 if (!opal_check_token(OPAL_REGISTER_DUMP_REGION))
831 /* Register kernel log buffer */
832 addr = log_buf_addr_get();
836 size = log_buf_len_get();
840 rc = opal_register_dump_region(OPAL_DUMP_REGION_LOG_BUF,
842 /* Don't warn if this is just an older OPAL that doesn't
843 * know about that call
845 if (rc && rc != OPAL_UNSUPPORTED)
846 pr_warn("DUMP: Failed to register kernel log buffer. "
850 static void opal_pdev_init(const char *compatible)
852 struct device_node *np;
854 for_each_compatible_node(np, NULL, compatible)
855 of_platform_device_create(np, NULL, NULL);
858 static void __init opal_imc_init_dev(void)
860 struct device_node *np;
862 np = of_find_compatible_node(NULL, NULL, IMC_DTB_COMPAT);
864 of_platform_device_create(np, NULL, NULL);
867 static int kopald(void *unused)
869 unsigned long timeout = msecs_to_jiffies(opal_heartbeat) + 1;
875 opal_handle_events();
877 set_current_state(TASK_INTERRUPTIBLE);
878 if (opal_have_pending_events())
879 __set_current_state(TASK_RUNNING);
881 schedule_timeout(timeout);
883 } while (!kthread_should_stop());
888 void opal_wake_poller(void)
891 wake_up_process(kopald_tsk);
894 static void opal_init_heartbeat(void)
896 /* Old firwmware, we assume the HVC heartbeat is sufficient */
897 if (of_property_read_u32(opal_node, "ibm,heartbeat-ms",
898 &opal_heartbeat) != 0)
902 kopald_tsk = kthread_run(kopald, NULL, "kopald");
905 static int __init opal_init(void)
907 struct device_node *np, *consoles, *leds;
910 opal_node = of_find_node_by_path("/ibm,opal");
912 pr_warn("Device node not found\n");
916 /* Register OPAL consoles if any ports */
917 consoles = of_find_node_by_path("/ibm,opal/consoles");
919 for_each_child_of_node(consoles, np) {
920 if (!of_node_name_eq(np, "serial"))
922 of_platform_device_create(np, NULL, NULL);
924 of_node_put(consoles);
927 /* Initialise OPAL messaging system */
928 opal_message_init(opal_node);
930 /* Initialise OPAL asynchronous completion interface */
931 opal_async_comp_init();
933 /* Initialise OPAL sensor interface */
936 /* Initialise OPAL hypervisor maintainence interrupt handling */
937 opal_hmi_handler_init();
939 /* Create i2c platform devices */
940 opal_pdev_init("ibm,opal-i2c");
942 /* Handle non-volatile memory devices */
943 opal_pdev_init("pmem-region");
945 /* Setup a heatbeat thread if requested by OPAL */
946 opal_init_heartbeat();
948 /* Detect In-Memory Collection counters and create devices*/
951 /* Create leds platform devices */
952 leds = of_find_node_by_path("/ibm,opal/leds");
954 of_platform_device_create(leds, "opal_leds", NULL);
958 /* Initialise OPAL message log interface */
961 /* Create "opal" kobject under /sys/firmware */
962 rc = opal_sysfs_init();
964 /* Export symbol map to userspace */
965 opal_export_symmap();
966 /* Setup dump region interface */
967 opal_dump_region_init();
968 /* Setup error log interface */
969 rc = opal_elog_init();
970 /* Setup code update interface */
971 opal_flash_update_init();
972 /* Setup platform dump extract interface */
973 opal_platform_dump_init();
974 /* Setup system parameters interface */
975 opal_sys_param_init();
976 /* Setup message log sysfs interface. */
977 opal_msglog_sysfs_init();
980 /* Export all properties */
983 /* Initialize platform devices: IPMI backend, PRD & flash interface */
984 opal_pdev_init("ibm,opal-ipmi");
985 opal_pdev_init("ibm,opal-flash");
986 opal_pdev_init("ibm,opal-prd");
988 /* Initialise platform device: oppanel interface */
989 opal_pdev_init("ibm,opal-oppanel");
991 /* Initialise OPAL kmsg dumper for flushing console on panic */
994 /* Initialise OPAL powercap interface */
995 opal_powercap_init();
997 /* Initialise OPAL Power-Shifting-Ratio interface */
1000 /* Initialise OPAL sensor groups */
1001 opal_sensor_groups_init();
1003 /* Initialise OPAL Power control interface */
1004 opal_power_control_init();
1008 machine_subsys_initcall(powernv, opal_init);
1010 void opal_shutdown(void)
1012 long rc = OPAL_BUSY;
1014 opal_event_shutdown();
1017 * Then sync with OPAL which ensure anything that can
1018 * potentially write to our memory has completed such
1019 * as an ongoing dump retrieval
1021 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
1022 rc = opal_sync_host_reboot();
1023 if (rc == OPAL_BUSY)
1024 opal_poll_events(NULL);
1029 /* Unregister memory dump region */
1030 if (opal_check_token(OPAL_UNREGISTER_DUMP_REGION))
1031 opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF);
1034 /* Export this so that test modules can use it */
1035 EXPORT_SYMBOL_GPL(opal_invalid_call);
1036 EXPORT_SYMBOL_GPL(opal_xscom_read);
1037 EXPORT_SYMBOL_GPL(opal_xscom_write);
1038 EXPORT_SYMBOL_GPL(opal_ipmi_send);
1039 EXPORT_SYMBOL_GPL(opal_ipmi_recv);
1040 EXPORT_SYMBOL_GPL(opal_flash_read);
1041 EXPORT_SYMBOL_GPL(opal_flash_write);
1042 EXPORT_SYMBOL_GPL(opal_flash_erase);
1043 EXPORT_SYMBOL_GPL(opal_prd_msg);
1044 EXPORT_SYMBOL_GPL(opal_check_token);
1046 /* Convert a region of vmalloc memory to an opal sg list */
1047 struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
1048 unsigned long vmalloc_size)
1050 struct opal_sg_list *sg, *first = NULL;
1051 unsigned long i = 0;
1053 sg = kzalloc(PAGE_SIZE, GFP_KERNEL);
1059 while (vmalloc_size > 0) {
1060 uint64_t data = vmalloc_to_pfn(vmalloc_addr) << PAGE_SHIFT;
1061 uint64_t length = min(vmalloc_size, PAGE_SIZE);
1063 sg->entry[i].data = cpu_to_be64(data);
1064 sg->entry[i].length = cpu_to_be64(length);
1067 if (i >= SG_ENTRIES_PER_NODE) {
1068 struct opal_sg_list *next;
1070 next = kzalloc(PAGE_SIZE, GFP_KERNEL);
1074 sg->length = cpu_to_be64(
1075 i * sizeof(struct opal_sg_entry) + 16);
1077 sg->next = cpu_to_be64(__pa(next));
1081 vmalloc_addr += length;
1082 vmalloc_size -= length;
1085 sg->length = cpu_to_be64(i * sizeof(struct opal_sg_entry) + 16);
1090 pr_err("%s : Failed to allocate memory\n", __func__);
1091 opal_free_sg_list(first);
1095 void opal_free_sg_list(struct opal_sg_list *sg)
1098 uint64_t next = be64_to_cpu(sg->next);
1109 int opal_error_code(int rc)
1112 case OPAL_SUCCESS: return 0;
1114 case OPAL_PARAMETER: return -EINVAL;
1115 case OPAL_ASYNC_COMPLETION: return -EINPROGRESS;
1117 case OPAL_BUSY_EVENT: return -EBUSY;
1118 case OPAL_NO_MEM: return -ENOMEM;
1119 case OPAL_PERMISSION: return -EPERM;
1121 case OPAL_UNSUPPORTED: return -EIO;
1122 case OPAL_HARDWARE: return -EIO;
1123 case OPAL_INTERNAL_ERROR: return -EIO;
1124 case OPAL_TIMEOUT: return -ETIMEDOUT;
1126 pr_err("%s: unexpected OPAL error %d\n", __func__, rc);
1131 void powernv_set_nmmu_ptcr(unsigned long ptcr)
1135 if (firmware_has_feature(FW_FEATURE_OPAL)) {
1136 rc = opal_nmmu_set_ptcr(-1UL, ptcr);
1137 if (rc != OPAL_SUCCESS && rc != OPAL_UNSUPPORTED)
1138 pr_warn("%s: Unable to set nest mmu ptcr\n", __func__);
1142 EXPORT_SYMBOL_GPL(opal_poll_events);
1143 EXPORT_SYMBOL_GPL(opal_rtc_read);
1144 EXPORT_SYMBOL_GPL(opal_rtc_write);
1145 EXPORT_SYMBOL_GPL(opal_tpo_read);
1146 EXPORT_SYMBOL_GPL(opal_tpo_write);
1147 EXPORT_SYMBOL_GPL(opal_i2c_request);
1148 /* Export these symbols for PowerNV LED class driver */
1149 EXPORT_SYMBOL_GPL(opal_leds_get_ind);
1150 EXPORT_SYMBOL_GPL(opal_leds_set_ind);
1151 /* Export this symbol for PowerNV Operator Panel class driver */
1152 EXPORT_SYMBOL_GPL(opal_write_oppanel_async);
1153 /* Export this for KVM */
1154 EXPORT_SYMBOL_GPL(opal_int_set_mfrr);
1155 EXPORT_SYMBOL_GPL(opal_int_eoi);
1156 EXPORT_SYMBOL_GPL(opal_error_code);
1157 /* Export the below symbol for NX compression */
1158 EXPORT_SYMBOL(opal_nx_coproc_init);