1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2014 Red Hat, Inc.
8 #include "xfs_shared.h"
9 #include "xfs_format.h"
10 #include "xfs_log_format.h"
11 #include "xfs_trans_resv.h"
12 #include "xfs_sysfs.h"
14 #include "xfs_log_priv.h"
15 #include "xfs_mount.h"
17 struct xfs_sysfs_attr {
18 struct attribute attr;
19 ssize_t (*show)(struct kobject *kobject, char *buf);
20 ssize_t (*store)(struct kobject *kobject, const char *buf,
24 static inline struct xfs_sysfs_attr *
25 to_attr(struct attribute *attr)
27 return container_of(attr, struct xfs_sysfs_attr, attr);
30 #define XFS_SYSFS_ATTR_RW(name) \
31 static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RW(name)
32 #define XFS_SYSFS_ATTR_RO(name) \
33 static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_RO(name)
34 #define XFS_SYSFS_ATTR_WO(name) \
35 static struct xfs_sysfs_attr xfs_sysfs_attr_##name = __ATTR_WO(name)
37 #define ATTR_LIST(name) &xfs_sysfs_attr_##name.attr
40 xfs_sysfs_object_show(
41 struct kobject *kobject,
42 struct attribute *attr,
45 struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
47 return xfs_attr->show ? xfs_attr->show(kobject, buf) : 0;
51 xfs_sysfs_object_store(
52 struct kobject *kobject,
53 struct attribute *attr,
57 struct xfs_sysfs_attr *xfs_attr = to_attr(attr);
59 return xfs_attr->store ? xfs_attr->store(kobject, buf, count) : 0;
62 static const struct sysfs_ops xfs_sysfs_ops = {
63 .show = xfs_sysfs_object_show,
64 .store = xfs_sysfs_object_store,
67 static struct attribute *xfs_mp_attrs[] = {
70 ATTRIBUTE_GROUPS(xfs_mp);
72 const struct kobj_type xfs_mp_ktype = {
73 .release = xfs_sysfs_release,
74 .sysfs_ops = &xfs_sysfs_ops,
75 .default_groups = xfs_mp_groups,
83 struct kobject *kobject,
90 ret = kstrtoint(buf, 0, &val);
95 xfs_globals.bug_on_assert = true;
97 xfs_globals.bug_on_assert = false;
106 struct kobject *kobject,
109 return sysfs_emit(buf, "%d\n", xfs_globals.bug_on_assert);
111 XFS_SYSFS_ATTR_RW(bug_on_assert);
114 log_recovery_delay_store(
115 struct kobject *kobject,
122 ret = kstrtoint(buf, 0, &val);
126 if (val < 0 || val > 60)
129 xfs_globals.log_recovery_delay = val;
135 log_recovery_delay_show(
136 struct kobject *kobject,
139 return sysfs_emit(buf, "%d\n", xfs_globals.log_recovery_delay);
141 XFS_SYSFS_ATTR_RW(log_recovery_delay);
145 struct kobject *kobject,
152 ret = kstrtoint(buf, 0, &val);
156 if (val < 0 || val > 60)
159 xfs_globals.mount_delay = val;
166 struct kobject *kobject,
169 return sysfs_emit(buf, "%d\n", xfs_globals.mount_delay);
171 XFS_SYSFS_ATTR_RW(mount_delay);
175 struct kobject *kobject,
181 ret = kstrtobool(buf, &xfs_globals.always_cow);
189 struct kobject *kobject,
192 return sysfs_emit(buf, "%d\n", xfs_globals.always_cow);
194 XFS_SYSFS_ATTR_RW(always_cow);
197 * Override how many threads the parallel work queue is allowed to create.
198 * This has to be a debug-only global (instead of an errortag) because one of
199 * the main users of parallel workqueues is mount time quotacheck.
203 struct kobject *kobject,
210 ret = kstrtoint(buf, 0, &val);
214 if (val < -1 || val > num_possible_cpus())
217 xfs_globals.pwork_threads = val;
224 struct kobject *kobject,
227 return sysfs_emit(buf, "%d\n", xfs_globals.pwork_threads);
229 XFS_SYSFS_ATTR_RW(pwork_threads);
232 * The "LARP" (Logged extended Attribute Recovery Persistence) debugging knob
233 * sets the XFS_DA_OP_LOGGED flag on all xfs_attr_set operations performed on
234 * V5 filesystems. As a result, the intermediate progress of all setxattr and
235 * removexattr operations are tracked via the log and can be restarted during
236 * recovery. This is useful for testing xattr recovery prior to merging of the
237 * parent pointer feature which requires it to maintain consistency, and may be
238 * enabled for userspace xattrs in the future.
242 struct kobject *kobject,
248 ret = kstrtobool(buf, &xfs_globals.larp);
256 struct kobject *kobject,
259 return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.larp);
261 XFS_SYSFS_ATTR_RW(larp);
264 bload_leaf_slack_store(
265 struct kobject *kobject,
272 ret = kstrtoint(buf, 0, &val);
276 xfs_globals.bload_leaf_slack = val;
281 bload_leaf_slack_show(
282 struct kobject *kobject,
285 return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.bload_leaf_slack);
287 XFS_SYSFS_ATTR_RW(bload_leaf_slack);
290 bload_node_slack_store(
291 struct kobject *kobject,
298 ret = kstrtoint(buf, 0, &val);
302 xfs_globals.bload_node_slack = val;
307 bload_node_slack_show(
308 struct kobject *kobject,
311 return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.bload_node_slack);
313 XFS_SYSFS_ATTR_RW(bload_node_slack);
315 static struct attribute *xfs_dbg_attrs[] = {
316 ATTR_LIST(bug_on_assert),
317 ATTR_LIST(log_recovery_delay),
318 ATTR_LIST(mount_delay),
319 ATTR_LIST(always_cow),
320 ATTR_LIST(pwork_threads),
322 ATTR_LIST(bload_leaf_slack),
323 ATTR_LIST(bload_node_slack),
326 ATTRIBUTE_GROUPS(xfs_dbg);
328 const struct kobj_type xfs_dbg_ktype = {
329 .release = xfs_sysfs_release,
330 .sysfs_ops = &xfs_sysfs_ops,
331 .default_groups = xfs_dbg_groups,
338 static inline struct xstats *
339 to_xstats(struct kobject *kobject)
341 struct xfs_kobj *kobj = to_kobj(kobject);
343 return container_of(kobj, struct xstats, xs_kobj);
348 struct kobject *kobject,
351 struct xstats *stats = to_xstats(kobject);
353 return xfs_stats_format(stats->xs_stats, buf);
355 XFS_SYSFS_ATTR_RO(stats);
359 struct kobject *kobject,
365 struct xstats *stats = to_xstats(kobject);
367 ret = kstrtoint(buf, 0, &val);
374 xfs_stats_clearall(stats->xs_stats);
377 XFS_SYSFS_ATTR_WO(stats_clear);
379 static struct attribute *xfs_stats_attrs[] = {
381 ATTR_LIST(stats_clear),
384 ATTRIBUTE_GROUPS(xfs_stats);
386 const struct kobj_type xfs_stats_ktype = {
387 .release = xfs_sysfs_release,
388 .sysfs_ops = &xfs_sysfs_ops,
389 .default_groups = xfs_stats_groups,
394 static inline struct xlog *
395 to_xlog(struct kobject *kobject)
397 struct xfs_kobj *kobj = to_kobj(kobject);
399 return container_of(kobj, struct xlog, l_kobj);
404 struct kobject *kobject,
409 struct xlog *log = to_xlog(kobject);
411 spin_lock(&log->l_icloglock);
412 cycle = log->l_curr_cycle;
413 block = log->l_curr_block;
414 spin_unlock(&log->l_icloglock);
416 return sysfs_emit(buf, "%d:%d\n", cycle, block);
418 XFS_SYSFS_ATTR_RO(log_head_lsn);
422 struct kobject *kobject,
427 struct xlog *log = to_xlog(kobject);
429 xlog_crack_atomic_lsn(&log->l_tail_lsn, &cycle, &block);
430 return sysfs_emit(buf, "%d:%d\n", cycle, block);
432 XFS_SYSFS_ATTR_RO(log_tail_lsn);
435 reserve_grant_head_show(
436 struct kobject *kobject,
442 struct xlog *log = to_xlog(kobject);
444 xlog_crack_grant_head(&log->l_reserve_head.grant, &cycle, &bytes);
445 return sysfs_emit(buf, "%d:%d\n", cycle, bytes);
447 XFS_SYSFS_ATTR_RO(reserve_grant_head);
450 write_grant_head_show(
451 struct kobject *kobject,
456 struct xlog *log = to_xlog(kobject);
458 xlog_crack_grant_head(&log->l_write_head.grant, &cycle, &bytes);
459 return sysfs_emit(buf, "%d:%d\n", cycle, bytes);
461 XFS_SYSFS_ATTR_RO(write_grant_head);
463 static struct attribute *xfs_log_attrs[] = {
464 ATTR_LIST(log_head_lsn),
465 ATTR_LIST(log_tail_lsn),
466 ATTR_LIST(reserve_grant_head),
467 ATTR_LIST(write_grant_head),
470 ATTRIBUTE_GROUPS(xfs_log);
472 const struct kobj_type xfs_log_ktype = {
473 .release = xfs_sysfs_release,
474 .sysfs_ops = &xfs_sysfs_ops,
475 .default_groups = xfs_log_groups,
479 * Metadata IO error configuration
481 * The sysfs structure here is:
482 * ...xfs/<dev>/error/<class>/<errno>/<error_attrs>
484 * where <class> allows us to discriminate between data IO and metadata IO,
485 * and any other future type of IO (e.g. special inode or directory error
486 * handling) we care to support.
488 static inline struct xfs_error_cfg *
489 to_error_cfg(struct kobject *kobject)
491 struct xfs_kobj *kobj = to_kobj(kobject);
492 return container_of(kobj, struct xfs_error_cfg, kobj);
495 static inline struct xfs_mount *
496 err_to_mp(struct kobject *kobject)
498 struct xfs_kobj *kobj = to_kobj(kobject);
499 return container_of(kobj, struct xfs_mount, m_error_kobj);
504 struct kobject *kobject,
508 struct xfs_error_cfg *cfg = to_error_cfg(kobject);
510 if (cfg->max_retries == XFS_ERR_RETRY_FOREVER)
513 retries = cfg->max_retries;
515 return sysfs_emit(buf, "%d\n", retries);
520 struct kobject *kobject,
524 struct xfs_error_cfg *cfg = to_error_cfg(kobject);
528 ret = kstrtoint(buf, 0, &val);
536 cfg->max_retries = XFS_ERR_RETRY_FOREVER;
538 cfg->max_retries = val;
541 XFS_SYSFS_ATTR_RW(max_retries);
544 retry_timeout_seconds_show(
545 struct kobject *kobject,
549 struct xfs_error_cfg *cfg = to_error_cfg(kobject);
551 if (cfg->retry_timeout == XFS_ERR_RETRY_FOREVER)
554 timeout = jiffies_to_msecs(cfg->retry_timeout) / MSEC_PER_SEC;
556 return sysfs_emit(buf, "%d\n", timeout);
560 retry_timeout_seconds_store(
561 struct kobject *kobject,
565 struct xfs_error_cfg *cfg = to_error_cfg(kobject);
569 ret = kstrtoint(buf, 0, &val);
573 /* 1 day timeout maximum, -1 means infinite */
574 if (val < -1 || val > 86400)
578 cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
580 cfg->retry_timeout = msecs_to_jiffies(val * MSEC_PER_SEC);
581 ASSERT(msecs_to_jiffies(val * MSEC_PER_SEC) < LONG_MAX);
585 XFS_SYSFS_ATTR_RW(retry_timeout_seconds);
588 fail_at_unmount_show(
589 struct kobject *kobject,
592 struct xfs_mount *mp = err_to_mp(kobject);
594 return sysfs_emit(buf, "%d\n", mp->m_fail_unmount);
598 fail_at_unmount_store(
599 struct kobject *kobject,
603 struct xfs_mount *mp = err_to_mp(kobject);
607 ret = kstrtoint(buf, 0, &val);
611 if (val < 0 || val > 1)
614 mp->m_fail_unmount = val;
617 XFS_SYSFS_ATTR_RW(fail_at_unmount);
619 static struct attribute *xfs_error_attrs[] = {
620 ATTR_LIST(max_retries),
621 ATTR_LIST(retry_timeout_seconds),
624 ATTRIBUTE_GROUPS(xfs_error);
626 static const struct kobj_type xfs_error_cfg_ktype = {
627 .release = xfs_sysfs_release,
628 .sysfs_ops = &xfs_sysfs_ops,
629 .default_groups = xfs_error_groups,
632 static const struct kobj_type xfs_error_ktype = {
633 .release = xfs_sysfs_release,
634 .sysfs_ops = &xfs_sysfs_ops,
638 * Error initialization tables. These need to be ordered in the same
639 * order as the enums used to index the array. All class init tables need to
640 * define a "default" behaviour as the first entry, all other entries can be
643 struct xfs_error_init {
646 int retry_timeout; /* in seconds */
649 static const struct xfs_error_init xfs_error_meta_init[XFS_ERR_ERRNO_MAX] = {
651 .max_retries = XFS_ERR_RETRY_FOREVER,
652 .retry_timeout = XFS_ERR_RETRY_FOREVER,
655 .max_retries = XFS_ERR_RETRY_FOREVER,
656 .retry_timeout = XFS_ERR_RETRY_FOREVER,
659 .max_retries = XFS_ERR_RETRY_FOREVER,
660 .retry_timeout = XFS_ERR_RETRY_FOREVER,
663 .max_retries = 0, /* We can't recover from devices disappearing */
669 xfs_error_sysfs_init_class(
670 struct xfs_mount *mp,
672 const char *parent_name,
673 struct xfs_kobj *parent_kobj,
674 const struct xfs_error_init init[])
676 struct xfs_error_cfg *cfg;
680 ASSERT(class < XFS_ERR_CLASS_MAX);
682 error = xfs_sysfs_init(parent_kobj, &xfs_error_ktype,
683 &mp->m_error_kobj, parent_name);
687 for (i = 0; i < XFS_ERR_ERRNO_MAX; i++) {
688 cfg = &mp->m_error_cfg[class][i];
689 error = xfs_sysfs_init(&cfg->kobj, &xfs_error_cfg_ktype,
690 parent_kobj, init[i].name);
694 cfg->max_retries = init[i].max_retries;
695 if (init[i].retry_timeout == XFS_ERR_RETRY_FOREVER)
696 cfg->retry_timeout = XFS_ERR_RETRY_FOREVER;
698 cfg->retry_timeout = msecs_to_jiffies(
699 init[i].retry_timeout * MSEC_PER_SEC);
704 /* unwind the entries that succeeded */
705 for (i--; i >= 0; i--) {
706 cfg = &mp->m_error_cfg[class][i];
707 xfs_sysfs_del(&cfg->kobj);
709 xfs_sysfs_del(parent_kobj);
714 xfs_error_sysfs_init(
715 struct xfs_mount *mp)
719 /* .../xfs/<dev>/error/ */
720 error = xfs_sysfs_init(&mp->m_error_kobj, &xfs_error_ktype,
721 &mp->m_kobj, "error");
725 error = sysfs_create_file(&mp->m_error_kobj.kobject,
726 ATTR_LIST(fail_at_unmount));
731 /* .../xfs/<dev>/error/metadata/ */
732 error = xfs_error_sysfs_init_class(mp, XFS_ERR_METADATA,
733 "metadata", &mp->m_error_meta_kobj,
734 xfs_error_meta_init);
741 xfs_sysfs_del(&mp->m_error_kobj);
747 struct xfs_mount *mp)
749 struct xfs_error_cfg *cfg;
752 for (i = 0; i < XFS_ERR_CLASS_MAX; i++) {
753 for (j = 0; j < XFS_ERR_ERRNO_MAX; j++) {
754 cfg = &mp->m_error_cfg[i][j];
756 xfs_sysfs_del(&cfg->kobj);
759 xfs_sysfs_del(&mp->m_error_meta_kobj);
760 xfs_sysfs_del(&mp->m_error_kobj);
763 struct xfs_error_cfg *
765 struct xfs_mount *mp,
769 struct xfs_error_cfg *cfg;
776 cfg = &mp->m_error_cfg[error_class][XFS_ERR_EIO];
779 cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENOSPC];
782 cfg = &mp->m_error_cfg[error_class][XFS_ERR_ENODEV];
785 cfg = &mp->m_error_cfg[error_class][XFS_ERR_DEFAULT];