1 // SPDX-License-Identifier: GPL-2.0
3 * sysfs.c - ACPI sysfs interface to userspace.
6 #define pr_fmt(fmt) "ACPI: " fmt
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/moduleparam.h>
11 #include <linux/acpi.h>
15 #define _COMPONENT ACPI_SYSTEM_COMPONENT
16 ACPI_MODULE_NAME("sysfs");
18 #ifdef CONFIG_ACPI_DEBUG
20 * ACPI debug sysfs I/F, including:
21 * /sys/modules/acpi/parameters/debug_layer
22 * /sys/modules/acpi/parameters/debug_level
23 * /sys/modules/acpi/parameters/trace_method_name
24 * /sys/modules/acpi/parameters/trace_state
25 * /sys/modules/acpi/parameters/trace_debug_layer
26 * /sys/modules/acpi/parameters/trace_debug_level
37 #define ACPI_DEBUG_INIT(v) { .name = #v, .value = v }
39 static const struct acpi_dlayer acpi_debug_layers[] = {
40 ACPI_DEBUG_INIT(ACPI_UTILITIES),
41 ACPI_DEBUG_INIT(ACPI_HARDWARE),
42 ACPI_DEBUG_INIT(ACPI_EVENTS),
43 ACPI_DEBUG_INIT(ACPI_TABLES),
44 ACPI_DEBUG_INIT(ACPI_NAMESPACE),
45 ACPI_DEBUG_INIT(ACPI_PARSER),
46 ACPI_DEBUG_INIT(ACPI_DISPATCHER),
47 ACPI_DEBUG_INIT(ACPI_EXECUTER),
48 ACPI_DEBUG_INIT(ACPI_RESOURCES),
49 ACPI_DEBUG_INIT(ACPI_CA_DEBUGGER),
50 ACPI_DEBUG_INIT(ACPI_OS_SERVICES),
51 ACPI_DEBUG_INIT(ACPI_CA_DISASSEMBLER),
52 ACPI_DEBUG_INIT(ACPI_COMPILER),
53 ACPI_DEBUG_INIT(ACPI_TOOLS),
55 ACPI_DEBUG_INIT(ACPI_BUS_COMPONENT),
56 ACPI_DEBUG_INIT(ACPI_AC_COMPONENT),
57 ACPI_DEBUG_INIT(ACPI_BATTERY_COMPONENT),
58 ACPI_DEBUG_INIT(ACPI_BUTTON_COMPONENT),
59 ACPI_DEBUG_INIT(ACPI_SBS_COMPONENT),
60 ACPI_DEBUG_INIT(ACPI_FAN_COMPONENT),
61 ACPI_DEBUG_INIT(ACPI_PCI_COMPONENT),
62 ACPI_DEBUG_INIT(ACPI_POWER_COMPONENT),
63 ACPI_DEBUG_INIT(ACPI_CONTAINER_COMPONENT),
64 ACPI_DEBUG_INIT(ACPI_SYSTEM_COMPONENT),
65 ACPI_DEBUG_INIT(ACPI_THERMAL_COMPONENT),
66 ACPI_DEBUG_INIT(ACPI_MEMORY_DEVICE_COMPONENT),
67 ACPI_DEBUG_INIT(ACPI_VIDEO_COMPONENT),
68 ACPI_DEBUG_INIT(ACPI_PROCESSOR_COMPONENT),
71 static const struct acpi_dlevel acpi_debug_levels[] = {
72 ACPI_DEBUG_INIT(ACPI_LV_INIT),
73 ACPI_DEBUG_INIT(ACPI_LV_DEBUG_OBJECT),
74 ACPI_DEBUG_INIT(ACPI_LV_INFO),
75 ACPI_DEBUG_INIT(ACPI_LV_REPAIR),
76 ACPI_DEBUG_INIT(ACPI_LV_TRACE_POINT),
78 ACPI_DEBUG_INIT(ACPI_LV_INIT_NAMES),
79 ACPI_DEBUG_INIT(ACPI_LV_PARSE),
80 ACPI_DEBUG_INIT(ACPI_LV_LOAD),
81 ACPI_DEBUG_INIT(ACPI_LV_DISPATCH),
82 ACPI_DEBUG_INIT(ACPI_LV_EXEC),
83 ACPI_DEBUG_INIT(ACPI_LV_NAMES),
84 ACPI_DEBUG_INIT(ACPI_LV_OPREGION),
85 ACPI_DEBUG_INIT(ACPI_LV_BFIELD),
86 ACPI_DEBUG_INIT(ACPI_LV_TABLES),
87 ACPI_DEBUG_INIT(ACPI_LV_VALUES),
88 ACPI_DEBUG_INIT(ACPI_LV_OBJECTS),
89 ACPI_DEBUG_INIT(ACPI_LV_RESOURCES),
90 ACPI_DEBUG_INIT(ACPI_LV_USER_REQUESTS),
91 ACPI_DEBUG_INIT(ACPI_LV_PACKAGE),
93 ACPI_DEBUG_INIT(ACPI_LV_ALLOCATIONS),
94 ACPI_DEBUG_INIT(ACPI_LV_FUNCTIONS),
95 ACPI_DEBUG_INIT(ACPI_LV_OPTIMIZATIONS),
97 ACPI_DEBUG_INIT(ACPI_LV_MUTEX),
98 ACPI_DEBUG_INIT(ACPI_LV_THREADS),
99 ACPI_DEBUG_INIT(ACPI_LV_IO),
100 ACPI_DEBUG_INIT(ACPI_LV_INTERRUPTS),
102 ACPI_DEBUG_INIT(ACPI_LV_AML_DISASSEMBLE),
103 ACPI_DEBUG_INIT(ACPI_LV_VERBOSE_INFO),
104 ACPI_DEBUG_INIT(ACPI_LV_FULL_TABLES),
105 ACPI_DEBUG_INIT(ACPI_LV_EVENTS),
108 static int param_get_debug_layer(char *buffer, const struct kernel_param *kp)
113 result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
115 for (i = 0; i < ARRAY_SIZE(acpi_debug_layers); i++) {
116 result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
117 acpi_debug_layers[i].name,
118 acpi_debug_layers[i].value,
119 (acpi_dbg_layer & acpi_debug_layers[i].value)
123 sprintf(buffer + result, "%-25s\t0x%08X [%c]\n", "ACPI_ALL_DRIVERS",
125 (acpi_dbg_layer & ACPI_ALL_DRIVERS) ==
126 ACPI_ALL_DRIVERS ? '*' : (acpi_dbg_layer & ACPI_ALL_DRIVERS)
129 sprintf(buffer + result,
130 "--\ndebug_layer = 0x%08X ( * = enabled)\n",
136 static int param_get_debug_level(char *buffer, const struct kernel_param *kp)
141 result = sprintf(buffer, "%-25s\tHex SET\n", "Description");
143 for (i = 0; i < ARRAY_SIZE(acpi_debug_levels); i++) {
144 result += sprintf(buffer + result, "%-25s\t0x%08lX [%c]\n",
145 acpi_debug_levels[i].name,
146 acpi_debug_levels[i].value,
147 (acpi_dbg_level & acpi_debug_levels[i].value)
151 sprintf(buffer + result, "--\ndebug_level = 0x%08X (* = enabled)\n",
157 static const struct kernel_param_ops param_ops_debug_layer = {
158 .set = param_set_uint,
159 .get = param_get_debug_layer,
162 static const struct kernel_param_ops param_ops_debug_level = {
163 .set = param_set_uint,
164 .get = param_get_debug_level,
167 module_param_cb(debug_layer, ¶m_ops_debug_layer, &acpi_dbg_layer, 0644);
168 module_param_cb(debug_level, ¶m_ops_debug_level, &acpi_dbg_level, 0644);
170 static char trace_method_name[1024];
172 int param_set_trace_method_name(const char *val, const struct kernel_param *kp)
175 bool is_abs_path = true;
180 if ((is_abs_path && strlen(val) > 1023) ||
181 (!is_abs_path && strlen(val) > 1022)) {
182 pr_err("%s: string parameter too long\n", kp->name);
187 * It's not safe to update acpi_gbl_trace_method_name without
188 * having the tracer stopped, so we save the original tracer
189 * state and disable it.
191 saved_flags = acpi_gbl_trace_flags;
192 (void)acpi_debug_trace(NULL,
193 acpi_gbl_trace_dbg_level,
194 acpi_gbl_trace_dbg_layer,
197 /* This is a hack. We can't kmalloc in early boot. */
199 strcpy(trace_method_name, val);
201 trace_method_name[0] = '\\';
202 strcpy(trace_method_name+1, val);
205 /* Restore the original tracer state */
206 (void)acpi_debug_trace(trace_method_name,
207 acpi_gbl_trace_dbg_level,
208 acpi_gbl_trace_dbg_layer,
214 static int param_get_trace_method_name(char *buffer, const struct kernel_param *kp)
216 return scnprintf(buffer, PAGE_SIZE, "%s", acpi_gbl_trace_method_name);
219 static const struct kernel_param_ops param_ops_trace_method = {
220 .set = param_set_trace_method_name,
221 .get = param_get_trace_method_name,
224 static const struct kernel_param_ops param_ops_trace_attrib = {
225 .set = param_set_uint,
226 .get = param_get_uint,
229 module_param_cb(trace_method_name, ¶m_ops_trace_method, &trace_method_name, 0644);
230 module_param_cb(trace_debug_layer, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_layer, 0644);
231 module_param_cb(trace_debug_level, ¶m_ops_trace_attrib, &acpi_gbl_trace_dbg_level, 0644);
233 static int param_set_trace_state(const char *val, struct kernel_param *kp)
236 const char *method = trace_method_name;
239 /* So "xxx-once" comparison should go prior than "xxx" comparison */
240 #define acpi_compare_param(val, key) \
241 strncmp((val), (key), sizeof(key) - 1)
243 if (!acpi_compare_param(val, "enable")) {
245 flags = ACPI_TRACE_ENABLED;
246 } else if (!acpi_compare_param(val, "disable"))
248 else if (!acpi_compare_param(val, "method-once"))
249 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT;
250 else if (!acpi_compare_param(val, "method"))
251 flags = ACPI_TRACE_ENABLED;
252 else if (!acpi_compare_param(val, "opcode-once"))
253 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_ONESHOT | ACPI_TRACE_OPCODE;
254 else if (!acpi_compare_param(val, "opcode"))
255 flags = ACPI_TRACE_ENABLED | ACPI_TRACE_OPCODE;
259 status = acpi_debug_trace(method,
260 acpi_gbl_trace_dbg_level,
261 acpi_gbl_trace_dbg_layer,
263 if (ACPI_FAILURE(status))
269 static int param_get_trace_state(char *buffer, struct kernel_param *kp)
271 if (!(acpi_gbl_trace_flags & ACPI_TRACE_ENABLED))
272 return sprintf(buffer, "disable");
274 if (acpi_gbl_trace_method_name) {
275 if (acpi_gbl_trace_flags & ACPI_TRACE_ONESHOT)
276 return sprintf(buffer, "method-once");
278 return sprintf(buffer, "method");
280 return sprintf(buffer, "enable");
285 module_param_call(trace_state, param_set_trace_state, param_get_trace_state,
287 #endif /* CONFIG_ACPI_DEBUG */
290 /* /sys/modules/acpi/parameters/aml_debug_output */
292 module_param_named(aml_debug_output, acpi_gbl_enable_aml_debug_object,
294 MODULE_PARM_DESC(aml_debug_output,
295 "To enable/disable the ACPI Debug Object output.");
297 /* /sys/module/acpi/parameters/acpica_version */
298 static int param_get_acpica_version(char *buffer, struct kernel_param *kp)
302 result = sprintf(buffer, "%x", ACPI_CA_VERSION);
307 module_param_call(acpica_version, NULL, param_get_acpica_version, NULL, 0444);
310 * ACPI table sysfs I/F:
311 * /sys/firmware/acpi/tables/
312 * /sys/firmware/acpi/tables/data/
313 * /sys/firmware/acpi/tables/dynamic/
316 static LIST_HEAD(acpi_table_attr_list);
317 static struct kobject *tables_kobj;
318 static struct kobject *tables_data_kobj;
319 static struct kobject *dynamic_tables_kobj;
320 static struct kobject *hotplug_kobj;
322 #define ACPI_MAX_TABLE_INSTANCES 999
323 #define ACPI_INST_SIZE 4 /* including trailing 0 */
325 struct acpi_table_attr {
326 struct bin_attribute attr;
327 char name[ACPI_NAME_SIZE];
329 char filename[ACPI_NAME_SIZE+ACPI_INST_SIZE];
330 struct list_head node;
333 struct acpi_data_attr {
334 struct bin_attribute attr;
338 static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
339 struct bin_attribute *bin_attr, char *buf,
340 loff_t offset, size_t count)
342 struct acpi_table_attr *table_attr =
343 container_of(bin_attr, struct acpi_table_attr, attr);
344 struct acpi_table_header *table_header = NULL;
348 status = acpi_get_table(table_attr->name, table_attr->instance,
350 if (ACPI_FAILURE(status))
353 rc = memory_read_from_buffer(buf, count, &offset, table_header,
354 table_header->length);
355 acpi_put_table(table_header);
359 static int acpi_table_attr_init(struct kobject *tables_obj,
360 struct acpi_table_attr *table_attr,
361 struct acpi_table_header *table_header)
363 struct acpi_table_header *header = NULL;
364 struct acpi_table_attr *attr = NULL;
365 char instance_str[ACPI_INST_SIZE];
367 sysfs_attr_init(&table_attr->attr.attr);
368 ACPI_MOVE_NAME(table_attr->name, table_header->signature);
370 list_for_each_entry(attr, &acpi_table_attr_list, node) {
371 if (ACPI_COMPARE_NAME(table_attr->name, attr->name))
372 if (table_attr->instance < attr->instance)
373 table_attr->instance = attr->instance;
375 table_attr->instance++;
376 if (table_attr->instance > ACPI_MAX_TABLE_INSTANCES) {
377 pr_warn("%4.4s: too many table instances\n",
382 ACPI_MOVE_NAME(table_attr->filename, table_header->signature);
383 table_attr->filename[ACPI_NAME_SIZE] = '\0';
384 if (table_attr->instance > 1 || (table_attr->instance == 1 &&
386 (table_header->signature, 2, &header))) {
387 snprintf(instance_str, sizeof(instance_str), "%u",
388 table_attr->instance);
389 strcat(table_attr->filename, instance_str);
392 table_attr->attr.size = table_header->length;
393 table_attr->attr.read = acpi_table_show;
394 table_attr->attr.attr.name = table_attr->filename;
395 table_attr->attr.attr.mode = 0400;
397 return sysfs_create_bin_file(tables_obj, &table_attr->attr);
400 acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
402 struct acpi_table_attr *table_attr;
405 case ACPI_TABLE_EVENT_INSTALL:
407 kzalloc(sizeof(struct acpi_table_attr), GFP_KERNEL);
411 if (acpi_table_attr_init(dynamic_tables_kobj,
412 table_attr, table)) {
416 list_add_tail(&table_attr->node, &acpi_table_attr_list);
418 case ACPI_TABLE_EVENT_LOAD:
419 case ACPI_TABLE_EVENT_UNLOAD:
420 case ACPI_TABLE_EVENT_UNINSTALL:
422 * we do not need to do anything right now
423 * because the table is not deleted from the
424 * global table list when unloading it.
428 return AE_BAD_PARAMETER;
433 static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
434 struct bin_attribute *bin_attr, char *buf,
435 loff_t offset, size_t count)
437 struct acpi_data_attr *data_attr;
441 data_attr = container_of(bin_attr, struct acpi_data_attr, attr);
442 size = data_attr->attr.size;
450 if (count > size - offset)
451 count = size - offset;
453 base = acpi_os_map_iomem(data_attr->addr, size);
457 memcpy_fromio(buf, base + offset, count);
459 acpi_os_unmap_iomem(base, size);
464 static int acpi_bert_data_init(void *th, struct acpi_data_attr *data_attr)
466 struct acpi_table_bert *bert = th;
468 if (bert->header.length < sizeof(struct acpi_table_bert) ||
469 bert->region_length < sizeof(struct acpi_hest_generic_status)) {
473 data_attr->addr = bert->address;
474 data_attr->attr.size = bert->region_length;
475 data_attr->attr.attr.name = "BERT";
477 return sysfs_create_bin_file(tables_data_kobj, &data_attr->attr);
480 static struct acpi_data_obj {
482 int (*fn)(void *, struct acpi_data_attr *);
483 } acpi_data_objs[] = {
484 { ACPI_SIG_BERT, acpi_bert_data_init },
487 #define NUM_ACPI_DATA_OBJS ARRAY_SIZE(acpi_data_objs)
489 static int acpi_table_data_init(struct acpi_table_header *th)
491 struct acpi_data_attr *data_attr;
494 for (i = 0; i < NUM_ACPI_DATA_OBJS; i++) {
495 if (ACPI_COMPARE_NAME(th->signature, acpi_data_objs[i].name)) {
496 data_attr = kzalloc(sizeof(*data_attr), GFP_KERNEL);
499 sysfs_attr_init(&data_attr->attr.attr);
500 data_attr->attr.read = acpi_data_show;
501 data_attr->attr.attr.mode = 0400;
502 return acpi_data_objs[i].fn(th, data_attr);
508 static int acpi_tables_sysfs_init(void)
510 struct acpi_table_attr *table_attr;
511 struct acpi_table_header *table_header = NULL;
516 tables_kobj = kobject_create_and_add("tables", acpi_kobj);
520 tables_data_kobj = kobject_create_and_add("data", tables_kobj);
521 if (!tables_data_kobj)
522 goto err_tables_data;
524 dynamic_tables_kobj = kobject_create_and_add("dynamic", tables_kobj);
525 if (!dynamic_tables_kobj)
526 goto err_dynamic_tables;
528 for (table_index = 0;; table_index++) {
529 status = acpi_get_table_by_index(table_index, &table_header);
531 if (status == AE_BAD_PARAMETER)
534 if (ACPI_FAILURE(status))
537 table_attr = kzalloc(sizeof(*table_attr), GFP_KERNEL);
541 ret = acpi_table_attr_init(tables_kobj,
542 table_attr, table_header);
547 list_add_tail(&table_attr->node, &acpi_table_attr_list);
548 acpi_table_data_init(table_header);
551 kobject_uevent(tables_kobj, KOBJ_ADD);
552 kobject_uevent(tables_data_kobj, KOBJ_ADD);
553 kobject_uevent(dynamic_tables_kobj, KOBJ_ADD);
557 kobject_put(tables_data_kobj);
559 kobject_put(tables_kobj);
565 * Detailed ACPI IRQ counters:
566 * /sys/firmware/acpi/interrupts/
569 u32 acpi_irq_handled;
570 u32 acpi_irq_not_handled;
573 #define COUNT_SCI 1 /* acpi_irq_handled */
574 #define COUNT_SCI_NOT 2 /* acpi_irq_not_handled */
575 #define COUNT_ERROR 3 /* other */
576 #define NUM_COUNTERS_EXTRA 4
578 struct event_counter {
583 static struct event_counter *all_counters;
585 static u32 num_counters;
586 static struct attribute **all_attrs;
587 static u32 acpi_gpe_count;
589 static struct attribute_group interrupt_stats_attr_group = {
590 .name = "interrupts",
593 static struct kobj_attribute *counter_attrs;
595 static void delete_gpe_attr_array(void)
597 struct event_counter *tmp = all_counters;
605 for (i = 0; i < num_gpes; i++)
606 kfree(counter_attrs[i].attr.name);
608 kfree(counter_attrs);
615 static void gpe_count(u32 gpe_number)
622 if (gpe_number < num_gpes)
623 all_counters[gpe_number].count++;
625 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
626 COUNT_ERROR].count++;
631 static void fixed_event_count(u32 event_number)
636 if (event_number < ACPI_NUM_FIXED_EVENTS)
637 all_counters[num_gpes + event_number].count++;
639 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS +
640 COUNT_ERROR].count++;
645 static void acpi_global_event_handler(u32 event_type, acpi_handle device,
646 u32 event_number, void *context)
648 if (event_type == ACPI_EVENT_TYPE_GPE) {
649 gpe_count(event_number);
650 pr_debug("GPE event 0x%02x\n", event_number);
651 } else if (event_type == ACPI_EVENT_TYPE_FIXED) {
652 fixed_event_count(event_number);
653 pr_debug("Fixed event 0x%02x\n", event_number);
655 pr_debug("Other event 0x%02x\n", event_number);
659 static int get_status(u32 index, acpi_event_status *status,
664 if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
667 if (index < num_gpes) {
668 result = acpi_get_gpe_device(index, handle);
670 ACPI_EXCEPTION((AE_INFO, AE_NOT_FOUND,
671 "Invalid GPE 0x%x", index));
674 result = acpi_get_gpe_status(*handle, index, status);
675 } else if (index < (num_gpes + ACPI_NUM_FIXED_EVENTS))
676 result = acpi_get_event_status(index - num_gpes, status);
681 static ssize_t counter_show(struct kobject *kobj,
682 struct kobj_attribute *attr, char *buf)
684 int index = attr - counter_attrs;
687 acpi_event_status status;
690 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI].count =
692 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT].count =
693 acpi_irq_not_handled;
694 all_counters[num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE].count =
696 size = sprintf(buf, "%8u", all_counters[index].count);
698 /* "gpe_all" or "sci" */
699 if (index >= num_gpes + ACPI_NUM_FIXED_EVENTS)
702 result = get_status(index, &status, &handle);
706 if (status & ACPI_EVENT_FLAG_ENABLE_SET)
707 size += sprintf(buf + size, " EN");
709 size += sprintf(buf + size, " ");
710 if (status & ACPI_EVENT_FLAG_STATUS_SET)
711 size += sprintf(buf + size, " STS");
713 size += sprintf(buf + size, " ");
715 if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER))
716 size += sprintf(buf + size, " invalid ");
717 else if (status & ACPI_EVENT_FLAG_ENABLED)
718 size += sprintf(buf + size, " enabled ");
719 else if (status & ACPI_EVENT_FLAG_WAKE_ENABLED)
720 size += sprintf(buf + size, " wake_enabled");
722 size += sprintf(buf + size, " disabled ");
723 if (status & ACPI_EVENT_FLAG_MASKED)
724 size += sprintf(buf + size, " masked ");
726 size += sprintf(buf + size, " unmasked");
729 size += sprintf(buf + size, "\n");
730 return result ? result : size;
734 * counter_set() sets the specified counter.
735 * setting the total "sci" file to any value clears all counters.
736 * enable/disable/clear a gpe/fixed event in user space.
738 static ssize_t counter_set(struct kobject *kobj,
739 struct kobj_attribute *attr, const char *buf,
742 int index = attr - counter_attrs;
743 acpi_event_status status;
748 if (index == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI) {
750 for (i = 0; i < num_counters; ++i)
751 all_counters[i].count = 0;
753 acpi_irq_handled = 0;
754 acpi_irq_not_handled = 0;
758 /* show the event status for both GPEs and Fixed Events */
759 result = get_status(index, &status, &handle);
763 if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) {
764 printk(KERN_WARNING PREFIX
765 "Can not change Invalid GPE/Fixed Event status\n");
769 if (index < num_gpes) {
770 if (!strcmp(buf, "disable\n") &&
771 (status & ACPI_EVENT_FLAG_ENABLED))
772 result = acpi_disable_gpe(handle, index);
773 else if (!strcmp(buf, "enable\n") &&
774 !(status & ACPI_EVENT_FLAG_ENABLED))
775 result = acpi_enable_gpe(handle, index);
776 else if (!strcmp(buf, "clear\n") &&
777 (status & ACPI_EVENT_FLAG_STATUS_SET))
778 result = acpi_clear_gpe(handle, index);
779 else if (!strcmp(buf, "mask\n"))
780 result = acpi_mask_gpe(handle, index, TRUE);
781 else if (!strcmp(buf, "unmask\n"))
782 result = acpi_mask_gpe(handle, index, FALSE);
783 else if (!kstrtoul(buf, 0, &tmp))
784 all_counters[index].count = tmp;
787 } else if (index < num_gpes + ACPI_NUM_FIXED_EVENTS) {
788 int event = index - num_gpes;
789 if (!strcmp(buf, "disable\n") &&
790 (status & ACPI_EVENT_FLAG_ENABLE_SET))
791 result = acpi_disable_event(event, ACPI_NOT_ISR);
792 else if (!strcmp(buf, "enable\n") &&
793 !(status & ACPI_EVENT_FLAG_ENABLE_SET))
794 result = acpi_enable_event(event, ACPI_NOT_ISR);
795 else if (!strcmp(buf, "clear\n") &&
796 (status & ACPI_EVENT_FLAG_STATUS_SET))
797 result = acpi_clear_event(event);
798 else if (!kstrtoul(buf, 0, &tmp))
799 all_counters[index].count = tmp;
803 all_counters[index].count = strtoul(buf, NULL, 0);
805 if (ACPI_FAILURE(result))
808 return result ? result : size;
812 * A Quirk Mechanism for GPE Flooding Prevention:
814 * Quirks may be needed to prevent GPE flooding on a specific GPE. The
815 * flooding typically cannot be detected and automatically prevented by
816 * ACPI_GPE_DISPATCH_NONE check because there is a _Lxx/_Exx prepared in
817 * the AML tables. This normally indicates a feature gap in Linux, thus
818 * instead of providing endless quirk tables, we provide a boot parameter
819 * for those who want this quirk. For example, if the users want to prevent
820 * the GPE flooding for GPE 00, they need to specify the following boot
823 * The masking status can be modified by the following runtime controlling
825 * echo unmask > /sys/firmware/acpi/interrupts/gpe00
829 * Currently, the GPE flooding prevention only supports to mask the GPEs
830 * numbered from 00 to 7f.
832 #define ACPI_MASKABLE_GPE_MAX 0x80
834 static u64 __initdata acpi_masked_gpes;
836 static int __init acpi_gpe_set_masked_gpes(char *val)
840 if (kstrtou8(val, 0, &gpe) || gpe > ACPI_MASKABLE_GPE_MAX)
842 acpi_masked_gpes |= ((u64)1<<gpe);
846 __setup("acpi_mask_gpe=", acpi_gpe_set_masked_gpes);
848 void __init acpi_gpe_apply_masked_gpes(void)
855 gpe < min_t(u8, ACPI_MASKABLE_GPE_MAX, acpi_current_gpe_count);
857 if (acpi_masked_gpes & ((u64)1<<gpe)) {
858 status = acpi_get_gpe_device(gpe, &handle);
859 if (ACPI_SUCCESS(status)) {
860 pr_info("Masking GPE 0x%x.\n", gpe);
861 (void)acpi_mask_gpe(handle, gpe, TRUE);
867 void acpi_irq_stats_init(void)
875 num_gpes = acpi_current_gpe_count;
876 num_counters = num_gpes + ACPI_NUM_FIXED_EVENTS + NUM_COUNTERS_EXTRA;
878 all_attrs = kzalloc(sizeof(struct attribute *) * (num_counters + 1),
880 if (all_attrs == NULL)
883 all_counters = kzalloc(sizeof(struct event_counter) * (num_counters),
885 if (all_counters == NULL)
888 status = acpi_install_global_event_handler(acpi_global_event_handler, NULL);
889 if (ACPI_FAILURE(status))
892 counter_attrs = kzalloc(sizeof(struct kobj_attribute) * (num_counters),
894 if (counter_attrs == NULL)
897 for (i = 0; i < num_counters; ++i) {
902 sprintf(buffer, "gpe%02X", i);
903 else if (i == num_gpes + ACPI_EVENT_PMTIMER)
904 sprintf(buffer, "ff_pmtimer");
905 else if (i == num_gpes + ACPI_EVENT_GLOBAL)
906 sprintf(buffer, "ff_gbl_lock");
907 else if (i == num_gpes + ACPI_EVENT_POWER_BUTTON)
908 sprintf(buffer, "ff_pwr_btn");
909 else if (i == num_gpes + ACPI_EVENT_SLEEP_BUTTON)
910 sprintf(buffer, "ff_slp_btn");
911 else if (i == num_gpes + ACPI_EVENT_RTC)
912 sprintf(buffer, "ff_rt_clk");
913 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_GPE)
914 sprintf(buffer, "gpe_all");
915 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI)
916 sprintf(buffer, "sci");
917 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_SCI_NOT)
918 sprintf(buffer, "sci_not");
919 else if (i == num_gpes + ACPI_NUM_FIXED_EVENTS + COUNT_ERROR)
920 sprintf(buffer, "error");
922 sprintf(buffer, "bug%02X", i);
924 name = kstrdup(buffer, GFP_KERNEL);
928 sysfs_attr_init(&counter_attrs[i].attr);
929 counter_attrs[i].attr.name = name;
930 counter_attrs[i].attr.mode = 0644;
931 counter_attrs[i].show = counter_show;
932 counter_attrs[i].store = counter_set;
934 all_attrs[i] = &counter_attrs[i].attr;
937 interrupt_stats_attr_group.attrs = all_attrs;
938 if (!sysfs_create_group(acpi_kobj, &interrupt_stats_attr_group))
942 delete_gpe_attr_array();
946 static void __exit interrupt_stats_exit(void)
948 sysfs_remove_group(acpi_kobj, &interrupt_stats_attr_group);
950 delete_gpe_attr_array();
956 acpi_show_profile(struct kobject *kobj, struct kobj_attribute *attr,
959 return sprintf(buf, "%d\n", acpi_gbl_FADT.preferred_profile);
962 static const struct kobj_attribute pm_profile_attr =
963 __ATTR(pm_profile, S_IRUGO, acpi_show_profile, NULL);
965 static ssize_t hotplug_enabled_show(struct kobject *kobj,
966 struct kobj_attribute *attr, char *buf)
968 struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
970 return sprintf(buf, "%d\n", hotplug->enabled);
973 static ssize_t hotplug_enabled_store(struct kobject *kobj,
974 struct kobj_attribute *attr,
975 const char *buf, size_t size)
977 struct acpi_hotplug_profile *hotplug = to_acpi_hotplug_profile(kobj);
980 if (kstrtouint(buf, 10, &val) || val > 1)
983 acpi_scan_hotplug_enabled(hotplug, val);
987 static struct kobj_attribute hotplug_enabled_attr =
988 __ATTR(enabled, S_IRUGO | S_IWUSR, hotplug_enabled_show,
989 hotplug_enabled_store);
991 static struct attribute *hotplug_profile_attrs[] = {
992 &hotplug_enabled_attr.attr,
996 static struct kobj_type acpi_hotplug_profile_ktype = {
997 .sysfs_ops = &kobj_sysfs_ops,
998 .default_attrs = hotplug_profile_attrs,
1001 void acpi_sysfs_add_hotplug_profile(struct acpi_hotplug_profile *hotplug,
1009 error = kobject_init_and_add(&hotplug->kobj,
1010 &acpi_hotplug_profile_ktype, hotplug_kobj, "%s", name);
1012 kobject_put(&hotplug->kobj);
1016 kobject_uevent(&hotplug->kobj, KOBJ_ADD);
1020 pr_err(PREFIX "Unable to add hotplug profile '%s'\n", name);
1023 static ssize_t force_remove_show(struct kobject *kobj,
1024 struct kobj_attribute *attr, char *buf)
1026 return sprintf(buf, "%d\n", 0);
1029 static ssize_t force_remove_store(struct kobject *kobj,
1030 struct kobj_attribute *attr,
1031 const char *buf, size_t size)
1036 ret = strtobool(buf, &val);
1041 pr_err("Enabling force_remove is not supported anymore. Please report to linux-acpi@vger.kernel.org if you depend on this functionality\n");
1047 static const struct kobj_attribute force_remove_attr =
1048 __ATTR(force_remove, S_IRUGO | S_IWUSR, force_remove_show,
1049 force_remove_store);
1051 int __init acpi_sysfs_init(void)
1055 result = acpi_tables_sysfs_init();
1059 hotplug_kobj = kobject_create_and_add("hotplug", acpi_kobj);
1063 result = sysfs_create_file(hotplug_kobj, &force_remove_attr.attr);
1067 result = sysfs_create_file(acpi_kobj, &pm_profile_attr.attr);