4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 #define DEBUG_SUBSYSTEM S_LLITE
34 #include "../include/lprocfs_status.h"
35 #include <linux/seq_file.h>
36 #include "../include/obd_support.h"
38 #include "llite_internal.h"
39 #include "vvp_internal.h"
41 /* debugfs llite mount point registration */
42 static struct file_operations ll_rw_extents_stats_fops;
43 static struct file_operations ll_rw_extents_stats_pp_fops;
44 static struct file_operations ll_rw_offset_stats_fops;
46 static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
49 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
51 struct obd_statfs osfs;
54 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
55 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
58 return sprintf(buf, "%u\n", osfs.os_bsize);
62 LUSTRE_RO_ATTR(blocksize);
64 static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
67 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
69 struct obd_statfs osfs;
72 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
73 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
76 __u32 blk_size = osfs.os_bsize >> 10;
77 __u64 result = osfs.os_blocks;
79 while (blk_size >>= 1)
82 rc = sprintf(buf, "%llu\n", result);
87 LUSTRE_RO_ATTR(kbytestotal);
89 static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
92 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
94 struct obd_statfs osfs;
97 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
98 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
101 __u32 blk_size = osfs.os_bsize >> 10;
102 __u64 result = osfs.os_bfree;
104 while (blk_size >>= 1)
107 rc = sprintf(buf, "%llu\n", result);
112 LUSTRE_RO_ATTR(kbytesfree);
114 static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
117 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
119 struct obd_statfs osfs;
122 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
123 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
126 __u32 blk_size = osfs.os_bsize >> 10;
127 __u64 result = osfs.os_bavail;
129 while (blk_size >>= 1)
132 rc = sprintf(buf, "%llu\n", result);
137 LUSTRE_RO_ATTR(kbytesavail);
139 static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
142 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
144 struct obd_statfs osfs;
147 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
148 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
151 return sprintf(buf, "%llu\n", osfs.os_files);
155 LUSTRE_RO_ATTR(filestotal);
157 static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
160 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
162 struct obd_statfs osfs;
165 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
166 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
169 return sprintf(buf, "%llu\n", osfs.os_ffree);
173 LUSTRE_RO_ATTR(filesfree);
175 static ssize_t client_type_show(struct kobject *kobj, struct attribute *attr,
178 return sprintf(buf, "local client\n");
180 LUSTRE_RO_ATTR(client_type);
182 static ssize_t fstype_show(struct kobject *kobj, struct attribute *attr,
185 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
188 return sprintf(buf, "%s\n", sbi->ll_sb->s_type->name);
190 LUSTRE_RO_ATTR(fstype);
192 static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
195 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
198 return sprintf(buf, "%s\n", sbi->ll_sb_uuid.uuid);
200 LUSTRE_RO_ATTR(uuid);
202 static int ll_site_stats_seq_show(struct seq_file *m, void *v)
204 struct super_block *sb = m->private;
207 * See description of statistical counters in struct cl_site, and
210 return cl_site_stats_print(lu2cl_site(ll_s2sbi(sb)->ll_site), m);
213 LPROC_SEQ_FOPS_RO(ll_site_stats);
215 static ssize_t max_read_ahead_mb_show(struct kobject *kobj,
216 struct attribute *attr, char *buf)
218 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
223 spin_lock(&sbi->ll_lock);
224 pages_number = sbi->ll_ra_info.ra_max_pages;
225 spin_unlock(&sbi->ll_lock);
227 mult = 1 << (20 - PAGE_SHIFT);
228 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
231 static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
232 struct attribute *attr,
236 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
239 unsigned long pages_number;
241 rc = kstrtoul(buffer, 10, &pages_number);
245 pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
247 if (pages_number > totalram_pages / 2) {
248 CERROR("can't set file readahead more than %lu MB\n",
249 totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/
253 spin_lock(&sbi->ll_lock);
254 sbi->ll_ra_info.ra_max_pages = pages_number;
255 spin_unlock(&sbi->ll_lock);
259 LUSTRE_RW_ATTR(max_read_ahead_mb);
261 static ssize_t max_read_ahead_per_file_mb_show(struct kobject *kobj,
262 struct attribute *attr,
265 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
270 spin_lock(&sbi->ll_lock);
271 pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
272 spin_unlock(&sbi->ll_lock);
274 mult = 1 << (20 - PAGE_SHIFT);
275 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
278 static ssize_t max_read_ahead_per_file_mb_store(struct kobject *kobj,
279 struct attribute *attr,
283 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
286 unsigned long pages_number;
288 rc = kstrtoul(buffer, 10, &pages_number);
292 if (pages_number > sbi->ll_ra_info.ra_max_pages) {
293 CERROR("can't set file readahead more than max_read_ahead_mb %lu MB\n",
294 sbi->ll_ra_info.ra_max_pages);
298 spin_lock(&sbi->ll_lock);
299 sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
300 spin_unlock(&sbi->ll_lock);
304 LUSTRE_RW_ATTR(max_read_ahead_per_file_mb);
306 static ssize_t max_read_ahead_whole_mb_show(struct kobject *kobj,
307 struct attribute *attr,
310 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
315 spin_lock(&sbi->ll_lock);
316 pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
317 spin_unlock(&sbi->ll_lock);
319 mult = 1 << (20 - PAGE_SHIFT);
320 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
323 static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj,
324 struct attribute *attr,
328 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
331 unsigned long pages_number;
333 rc = kstrtoul(buffer, 10, &pages_number);
337 /* Cap this at the current max readahead window size, the readahead
338 * algorithm does this anyway so it's pointless to set it larger.
340 if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
341 CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n",
342 sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_SHIFT));
346 spin_lock(&sbi->ll_lock);
347 sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
348 spin_unlock(&sbi->ll_lock);
352 LUSTRE_RW_ATTR(max_read_ahead_whole_mb);
354 static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
356 struct super_block *sb = m->private;
357 struct ll_sb_info *sbi = ll_s2sbi(sb);
358 struct cl_client_cache *cache = sbi->ll_cache;
359 int shift = 20 - PAGE_SHIFT;
363 max_cached_mb = cache->ccc_lru_max >> shift;
364 unused_mb = atomic_long_read(&cache->ccc_lru_left) >> shift;
367 "max_cached_mb: %ld\n"
370 "reclaim_count: %u\n",
371 atomic_read(&cache->ccc_users),
373 max_cached_mb - unused_mb,
375 cache->ccc_lru_shrinkers);
379 static ssize_t ll_max_cached_mb_seq_write(struct file *file,
380 const char __user *buffer,
381 size_t count, loff_t *off)
383 struct super_block *sb = ((struct seq_file *)file->private_data)->private;
384 struct ll_sb_info *sbi = ll_s2sbi(sb);
385 struct cl_client_cache *cache = sbi->ll_cache;
396 if (count >= sizeof(kernbuf))
399 if (copy_from_user(kernbuf, buffer, count))
403 mult = 1 << (20 - PAGE_SHIFT);
404 buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
406 rc = lprocfs_write_frac_u64_helper(buffer, count, &val, mult);
412 pages_number = (long)val;
414 if (pages_number < 0 || pages_number > totalram_pages) {
415 CERROR("%s: can't set max cache more than %lu MB\n",
416 ll_get_fsname(sb, NULL, 0),
417 totalram_pages >> (20 - PAGE_SHIFT));
421 spin_lock(&sbi->ll_lock);
422 diff = pages_number - cache->ccc_lru_max;
423 spin_unlock(&sbi->ll_lock);
425 /* easy - add more LRU slots. */
427 atomic_long_add(diff, &cache->ccc_lru_left);
432 env = cl_env_get(&refcheck);
440 /* reduce LRU budget from free slots. */
444 ov = atomic_long_read(&cache->ccc_lru_left);
448 nv = ov > diff ? ov - diff : 0;
449 rc = atomic_long_cmpxchg(&cache->ccc_lru_left, ov, nv);
450 if (likely(ov == rc)) {
460 if (!sbi->ll_dt_exp) { /* being initialized */
465 /* difficult - have to ask OSCs to drop LRU slots. */
467 rc = obd_set_info_async(env, sbi->ll_dt_exp,
468 sizeof(KEY_CACHE_LRU_SHRINK),
469 KEY_CACHE_LRU_SHRINK,
470 sizeof(tmp), &tmp, NULL);
474 cl_env_put(env, &refcheck);
478 spin_lock(&sbi->ll_lock);
479 cache->ccc_lru_max = pages_number;
480 spin_unlock(&sbi->ll_lock);
483 atomic_long_add(nrpages, &cache->ccc_lru_left);
488 LPROC_SEQ_FOPS(ll_max_cached_mb);
490 static ssize_t checksum_pages_show(struct kobject *kobj, struct attribute *attr,
493 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
496 return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_CHECKSUM) ? 1 : 0);
499 static ssize_t checksum_pages_store(struct kobject *kobj,
500 struct attribute *attr,
504 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
513 rc = kstrtoul(buffer, 10, &val);
517 sbi->ll_flags |= LL_SBI_CHECKSUM;
519 sbi->ll_flags &= ~LL_SBI_CHECKSUM;
521 rc = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
522 KEY_CHECKSUM, sizeof(val), &val, NULL);
524 CWARN("Failed to set OSC checksum flags: %d\n", rc);
528 LUSTRE_RW_ATTR(checksum_pages);
530 static ssize_t ll_rd_track_id(struct kobject *kobj, char *buf,
531 enum stats_track_type type)
533 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
536 if (sbi->ll_stats_track_type == type)
537 return sprintf(buf, "%d\n", sbi->ll_stats_track_id);
538 else if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
539 return sprintf(buf, "0 (all)\n");
541 return sprintf(buf, "untracked\n");
544 static ssize_t ll_wr_track_id(struct kobject *kobj, const char *buffer,
546 enum stats_track_type type)
548 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
553 rc = kstrtoul(buffer, 10, &pid);
556 sbi->ll_stats_track_id = pid;
558 sbi->ll_stats_track_type = STATS_TRACK_ALL;
560 sbi->ll_stats_track_type = type;
561 lprocfs_clear_stats(sbi->ll_stats);
565 static ssize_t stats_track_pid_show(struct kobject *kobj,
566 struct attribute *attr,
569 return ll_rd_track_id(kobj, buf, STATS_TRACK_PID);
572 static ssize_t stats_track_pid_store(struct kobject *kobj,
573 struct attribute *attr,
577 return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PID);
579 LUSTRE_RW_ATTR(stats_track_pid);
581 static ssize_t stats_track_ppid_show(struct kobject *kobj,
582 struct attribute *attr,
585 return ll_rd_track_id(kobj, buf, STATS_TRACK_PPID);
588 static ssize_t stats_track_ppid_store(struct kobject *kobj,
589 struct attribute *attr,
593 return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PPID);
595 LUSTRE_RW_ATTR(stats_track_ppid);
597 static ssize_t stats_track_gid_show(struct kobject *kobj,
598 struct attribute *attr,
601 return ll_rd_track_id(kobj, buf, STATS_TRACK_GID);
604 static ssize_t stats_track_gid_store(struct kobject *kobj,
605 struct attribute *attr,
609 return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_GID);
611 LUSTRE_RW_ATTR(stats_track_gid);
613 static ssize_t statahead_max_show(struct kobject *kobj,
614 struct attribute *attr,
617 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
620 return sprintf(buf, "%u\n", sbi->ll_sa_max);
623 static ssize_t statahead_max_store(struct kobject *kobj,
624 struct attribute *attr,
628 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
633 rc = kstrtoul(buffer, 10, &val);
637 if (val <= LL_SA_RPC_MAX)
638 sbi->ll_sa_max = val;
640 CERROR("Bad statahead_max value %lu. Valid values are in the range [0, %d]\n",
645 LUSTRE_RW_ATTR(statahead_max);
647 static ssize_t statahead_agl_show(struct kobject *kobj,
648 struct attribute *attr,
651 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
654 return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_AGL_ENABLED ? 1 : 0);
657 static ssize_t statahead_agl_store(struct kobject *kobj,
658 struct attribute *attr,
662 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
667 rc = kstrtoul(buffer, 10, &val);
672 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
674 sbi->ll_flags &= ~LL_SBI_AGL_ENABLED;
678 LUSTRE_RW_ATTR(statahead_agl);
680 static int ll_statahead_stats_seq_show(struct seq_file *m, void *v)
682 struct super_block *sb = m->private;
683 struct ll_sb_info *sbi = ll_s2sbi(sb);
686 "statahead total: %u\n"
687 "statahead wrong: %u\n"
689 atomic_read(&sbi->ll_sa_total),
690 atomic_read(&sbi->ll_sa_wrong),
691 atomic_read(&sbi->ll_agl_total));
695 LPROC_SEQ_FOPS_RO(ll_statahead_stats);
697 static ssize_t lazystatfs_show(struct kobject *kobj,
698 struct attribute *attr,
701 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
704 return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_LAZYSTATFS ? 1 : 0);
707 static ssize_t lazystatfs_store(struct kobject *kobj,
708 struct attribute *attr,
712 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
717 rc = kstrtoul(buffer, 10, &val);
722 sbi->ll_flags |= LL_SBI_LAZYSTATFS;
724 sbi->ll_flags &= ~LL_SBI_LAZYSTATFS;
728 LUSTRE_RW_ATTR(lazystatfs);
730 static ssize_t max_easize_show(struct kobject *kobj,
731 struct attribute *attr,
734 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
739 rc = ll_get_max_mdsize(sbi, &ealen);
743 return sprintf(buf, "%u\n", ealen);
745 LUSTRE_RO_ATTR(max_easize);
748 * Get default_easize.
750 * \see client_obd::cl_default_mds_easize
752 * \param[in] kobj kernel object for sysfs tree
753 * \param[in] attr attribute of this kernel object
754 * \param[in] buf buffer to write data into
756 * \retval positive \a count on success
757 * \retval negative negated errno on failure
759 static ssize_t default_easize_show(struct kobject *kobj,
760 struct attribute *attr,
763 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
768 rc = ll_get_default_mdsize(sbi, &ealen);
772 return sprintf(buf, "%u\n", ealen);
776 * Set default_easize.
778 * Range checking on the passed value is handled by
779 * ll_set_default_mdsize().
781 * \see client_obd::cl_default_mds_easize
783 * \param[in] kobj kernel object for sysfs tree
784 * \param[in] attr attribute of this kernel object
785 * \param[in] buffer string passed from user space
786 * \param[in] count \a buffer length
788 * \retval positive \a count on success
789 * \retval negative negated errno on failure
791 static ssize_t default_easize_store(struct kobject *kobj,
792 struct attribute *attr,
796 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
801 rc = kstrtoul(buffer, 10, &val);
805 rc = ll_set_default_mdsize(sbi, val);
811 LUSTRE_RW_ATTR(default_easize);
813 static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
815 const char *str[] = LL_SBI_FLAGS;
816 struct super_block *sb = m->private;
817 int flags = ll_s2sbi(sb)->ll_flags;
821 if (ARRAY_SIZE(str) <= i) {
822 CERROR("%s: Revise array LL_SBI_FLAGS to match sbi flags please.\n",
823 ll_get_fsname(sb, NULL, 0));
828 seq_printf(m, "%s ", str[i]);
836 LPROC_SEQ_FOPS_RO(ll_sbi_flags);
838 static ssize_t xattr_cache_show(struct kobject *kobj,
839 struct attribute *attr,
842 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
845 return sprintf(buf, "%u\n", sbi->ll_xattr_cache_enabled);
848 static ssize_t xattr_cache_store(struct kobject *kobj,
849 struct attribute *attr,
853 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
858 rc = kstrtoul(buffer, 10, &val);
862 if (val != 0 && val != 1)
865 if (val == 1 && !(sbi->ll_flags & LL_SBI_XATTR_CACHE))
868 sbi->ll_xattr_cache_enabled = val;
872 LUSTRE_RW_ATTR(xattr_cache);
874 static int ll_unstable_stats_seq_show(struct seq_file *m, void *v)
876 struct super_block *sb = m->private;
877 struct ll_sb_info *sbi = ll_s2sbi(sb);
878 struct cl_client_cache *cache = sbi->ll_cache;
882 pages = atomic_long_read(&cache->ccc_unstable_nr);
883 mb = (pages * PAGE_SIZE) >> 20;
886 "unstable_check: %8d\n"
887 "unstable_pages: %12ld\n"
888 "unstable_mb: %8d\n",
889 cache->ccc_unstable_check, pages, mb);
894 static ssize_t ll_unstable_stats_seq_write(struct file *file,
895 const char __user *buffer,
896 size_t count, loff_t *off)
898 struct super_block *sb = ((struct seq_file *)file->private_data)->private;
899 struct ll_sb_info *sbi = ll_s2sbi(sb);
905 if (count >= sizeof(kernbuf))
908 if (copy_from_user(kernbuf, buffer, count))
912 buffer += lprocfs_find_named_value(kernbuf, "unstable_check:", &count) -
914 rc = lprocfs_write_helper(buffer, count, &val);
918 /* borrow lru lock to set the value */
919 spin_lock(&sbi->ll_cache->ccc_lru_lock);
920 sbi->ll_cache->ccc_unstable_check = !!val;
921 spin_unlock(&sbi->ll_cache->ccc_lru_lock);
925 LPROC_SEQ_FOPS(ll_unstable_stats);
927 static int ll_root_squash_seq_show(struct seq_file *m, void *v)
929 struct super_block *sb = m->private;
930 struct ll_sb_info *sbi = ll_s2sbi(sb);
931 struct root_squash_info *squash = &sbi->ll_squash;
933 seq_printf(m, "%u:%u\n", squash->rsi_uid, squash->rsi_gid);
937 static ssize_t ll_root_squash_seq_write(struct file *file,
938 const char __user *buffer,
939 size_t count, loff_t *off)
941 struct seq_file *m = file->private_data;
942 struct super_block *sb = m->private;
943 struct ll_sb_info *sbi = ll_s2sbi(sb);
944 struct root_squash_info *squash = &sbi->ll_squash;
946 return lprocfs_wr_root_squash(buffer, count, squash,
947 ll_get_fsname(sb, NULL, 0));
949 LPROC_SEQ_FOPS(ll_root_squash);
951 static int ll_nosquash_nids_seq_show(struct seq_file *m, void *v)
953 struct super_block *sb = m->private;
954 struct ll_sb_info *sbi = ll_s2sbi(sb);
955 struct root_squash_info *squash = &sbi->ll_squash;
958 down_read(&squash->rsi_sem);
959 if (!list_empty(&squash->rsi_nosquash_nids)) {
960 len = cfs_print_nidlist(m->buf + m->count, m->size - m->count,
961 &squash->rsi_nosquash_nids);
965 seq_puts(m, "NONE\n");
967 up_read(&squash->rsi_sem);
972 static ssize_t ll_nosquash_nids_seq_write(struct file *file,
973 const char __user *buffer,
974 size_t count, loff_t *off)
976 struct seq_file *m = file->private_data;
977 struct super_block *sb = m->private;
978 struct ll_sb_info *sbi = ll_s2sbi(sb);
979 struct root_squash_info *squash = &sbi->ll_squash;
982 rc = lprocfs_wr_nosquash_nids(buffer, count, squash,
983 ll_get_fsname(sb, NULL, 0));
987 ll_compute_rootsquash_state(sbi);
992 LPROC_SEQ_FOPS(ll_nosquash_nids);
994 static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
995 /* { "mntpt_path", ll_rd_path, 0, 0 }, */
996 { "site", &ll_site_stats_fops, NULL, 0 },
997 /* { "filegroups", lprocfs_rd_filegroups, 0, 0 }, */
998 { "max_cached_mb", &ll_max_cached_mb_fops, NULL },
999 { "statahead_stats", &ll_statahead_stats_fops, NULL, 0 },
1000 { "unstable_stats", &ll_unstable_stats_fops, NULL },
1001 { "sbi_flags", &ll_sbi_flags_fops, NULL, 0 },
1002 { .name = "root_squash",
1003 .fops = &ll_root_squash_fops },
1004 { .name = "nosquash_nids",
1005 .fops = &ll_nosquash_nids_fops },
1009 #define MAX_STRING_SIZE 128
1011 static struct attribute *llite_attrs[] = {
1012 &lustre_attr_blocksize.attr,
1013 &lustre_attr_kbytestotal.attr,
1014 &lustre_attr_kbytesfree.attr,
1015 &lustre_attr_kbytesavail.attr,
1016 &lustre_attr_filestotal.attr,
1017 &lustre_attr_filesfree.attr,
1018 &lustre_attr_client_type.attr,
1019 &lustre_attr_fstype.attr,
1020 &lustre_attr_uuid.attr,
1021 &lustre_attr_max_read_ahead_mb.attr,
1022 &lustre_attr_max_read_ahead_per_file_mb.attr,
1023 &lustre_attr_max_read_ahead_whole_mb.attr,
1024 &lustre_attr_checksum_pages.attr,
1025 &lustre_attr_stats_track_pid.attr,
1026 &lustre_attr_stats_track_ppid.attr,
1027 &lustre_attr_stats_track_gid.attr,
1028 &lustre_attr_statahead_max.attr,
1029 &lustre_attr_statahead_agl.attr,
1030 &lustre_attr_lazystatfs.attr,
1031 &lustre_attr_max_easize.attr,
1032 &lustre_attr_default_easize.attr,
1033 &lustre_attr_xattr_cache.attr,
1037 static void llite_sb_release(struct kobject *kobj)
1039 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
1041 complete(&sbi->ll_kobj_unregister);
1044 static struct kobj_type llite_ktype = {
1045 .default_attrs = llite_attrs,
1046 .sysfs_ops = &lustre_sysfs_ops,
1047 .release = llite_sb_release,
1050 static const struct llite_file_opcode {
1054 } llite_opcode_table[LPROC_LL_FILE_OPCODES] = {
1055 /* file operation */
1056 { LPROC_LL_DIRTY_HITS, LPROCFS_TYPE_REGS, "dirty_pages_hits" },
1057 { LPROC_LL_DIRTY_MISSES, LPROCFS_TYPE_REGS, "dirty_pages_misses" },
1058 { LPROC_LL_READ_BYTES, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
1060 { LPROC_LL_WRITE_BYTES, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
1062 { LPROC_LL_BRW_READ, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_PAGES,
1064 { LPROC_LL_BRW_WRITE, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_PAGES,
1066 { LPROC_LL_OSC_READ, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
1068 { LPROC_LL_OSC_WRITE, LPROCFS_CNTR_AVGMINMAX | LPROCFS_TYPE_BYTES,
1070 { LPROC_LL_IOCTL, LPROCFS_TYPE_REGS, "ioctl" },
1071 { LPROC_LL_OPEN, LPROCFS_TYPE_REGS, "open" },
1072 { LPROC_LL_RELEASE, LPROCFS_TYPE_REGS, "close" },
1073 { LPROC_LL_MAP, LPROCFS_TYPE_REGS, "mmap" },
1074 { LPROC_LL_LLSEEK, LPROCFS_TYPE_REGS, "seek" },
1075 { LPROC_LL_FSYNC, LPROCFS_TYPE_REGS, "fsync" },
1076 { LPROC_LL_READDIR, LPROCFS_TYPE_REGS, "readdir" },
1077 /* inode operation */
1078 { LPROC_LL_SETATTR, LPROCFS_TYPE_REGS, "setattr" },
1079 { LPROC_LL_TRUNC, LPROCFS_TYPE_REGS, "truncate" },
1080 { LPROC_LL_FLOCK, LPROCFS_TYPE_REGS, "flock" },
1081 { LPROC_LL_GETATTR, LPROCFS_TYPE_REGS, "getattr" },
1082 /* dir inode operation */
1083 { LPROC_LL_CREATE, LPROCFS_TYPE_REGS, "create" },
1084 { LPROC_LL_LINK, LPROCFS_TYPE_REGS, "link" },
1085 { LPROC_LL_UNLINK, LPROCFS_TYPE_REGS, "unlink" },
1086 { LPROC_LL_SYMLINK, LPROCFS_TYPE_REGS, "symlink" },
1087 { LPROC_LL_MKDIR, LPROCFS_TYPE_REGS, "mkdir" },
1088 { LPROC_LL_RMDIR, LPROCFS_TYPE_REGS, "rmdir" },
1089 { LPROC_LL_MKNOD, LPROCFS_TYPE_REGS, "mknod" },
1090 { LPROC_LL_RENAME, LPROCFS_TYPE_REGS, "rename" },
1091 /* special inode operation */
1092 { LPROC_LL_STAFS, LPROCFS_TYPE_REGS, "statfs" },
1093 { LPROC_LL_ALLOC_INODE, LPROCFS_TYPE_REGS, "alloc_inode" },
1094 { LPROC_LL_SETXATTR, LPROCFS_TYPE_REGS, "setxattr" },
1095 { LPROC_LL_GETXATTR, LPROCFS_TYPE_REGS, "getxattr" },
1096 { LPROC_LL_GETXATTR_HITS, LPROCFS_TYPE_REGS, "getxattr_hits" },
1097 { LPROC_LL_LISTXATTR, LPROCFS_TYPE_REGS, "listxattr" },
1098 { LPROC_LL_REMOVEXATTR, LPROCFS_TYPE_REGS, "removexattr" },
1099 { LPROC_LL_INODE_PERM, LPROCFS_TYPE_REGS, "inode_permission" },
1102 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count)
1106 if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
1107 lprocfs_counter_add(sbi->ll_stats, op, count);
1108 else if (sbi->ll_stats_track_type == STATS_TRACK_PID &&
1109 sbi->ll_stats_track_id == current->pid)
1110 lprocfs_counter_add(sbi->ll_stats, op, count);
1111 else if (sbi->ll_stats_track_type == STATS_TRACK_PPID &&
1112 sbi->ll_stats_track_id == current->real_parent->pid)
1113 lprocfs_counter_add(sbi->ll_stats, op, count);
1114 else if (sbi->ll_stats_track_type == STATS_TRACK_GID &&
1115 sbi->ll_stats_track_id ==
1116 from_kgid(&init_user_ns, current_gid()))
1117 lprocfs_counter_add(sbi->ll_stats, op, count);
1119 EXPORT_SYMBOL(ll_stats_ops_tally);
1121 static const char *ra_stat_string[] = {
1122 [RA_STAT_HIT] = "hits",
1123 [RA_STAT_MISS] = "misses",
1124 [RA_STAT_DISTANT_READPAGE] = "readpage not consecutive",
1125 [RA_STAT_MISS_IN_WINDOW] = "miss inside window",
1126 [RA_STAT_FAILED_GRAB_PAGE] = "failed grab_cache_page",
1127 [RA_STAT_FAILED_MATCH] = "failed lock match",
1128 [RA_STAT_DISCARDED] = "read but discarded",
1129 [RA_STAT_ZERO_LEN] = "zero length file",
1130 [RA_STAT_ZERO_WINDOW] = "zero size window",
1131 [RA_STAT_EOF] = "read-ahead to EOF",
1132 [RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue",
1133 [RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page",
1134 [RA_STAT_FAILED_REACH_END] = "failed to reach end"
1137 int ldebugfs_register_mountpoint(struct dentry *parent,
1138 struct super_block *sb, char *osc, char *mdc)
1140 struct lustre_sb_info *lsi = s2lsi(sb);
1141 struct ll_sb_info *sbi = ll_s2sbi(sb);
1142 struct obd_device *obd;
1144 char name[MAX_STRING_SIZE + 1], *ptr;
1145 int err, id, len, rc;
1147 name[MAX_STRING_SIZE] = '\0';
1154 len = strlen(lsi->lsi_lmd->lmd_profile);
1155 ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
1156 if (ptr && (strcmp(ptr, "-client") == 0))
1160 snprintf(name, MAX_STRING_SIZE, "%.*s-%p", len,
1161 lsi->lsi_lmd->lmd_profile, sb);
1163 dir = ldebugfs_register(name, parent, NULL, NULL);
1164 if (IS_ERR_OR_NULL(dir)) {
1165 err = dir ? PTR_ERR(dir) : -ENOMEM;
1166 sbi->ll_debugfs_entry = NULL;
1169 sbi->ll_debugfs_entry = dir;
1171 rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "dump_page_cache", 0444,
1172 &vvp_dump_pgcache_file_ops, sbi);
1174 CWARN("Error adding the dump_page_cache file\n");
1176 rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "extents_stats", 0644,
1177 &ll_rw_extents_stats_fops, sbi);
1179 CWARN("Error adding the extent_stats file\n");
1181 rc = ldebugfs_seq_create(sbi->ll_debugfs_entry,
1182 "extents_stats_per_process",
1183 0644, &ll_rw_extents_stats_pp_fops, sbi);
1185 CWARN("Error adding the extents_stats_per_process file\n");
1187 rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "offset_stats", 0644,
1188 &ll_rw_offset_stats_fops, sbi);
1190 CWARN("Error adding the offset_stats file\n");
1192 /* File operations stats */
1193 sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES,
1194 LPROCFS_STATS_FLAG_NONE);
1195 if (!sbi->ll_stats) {
1199 /* do counter init */
1200 for (id = 0; id < LPROC_LL_FILE_OPCODES; id++) {
1201 __u32 type = llite_opcode_table[id].type;
1204 if (type & LPROCFS_TYPE_REGS)
1206 else if (type & LPROCFS_TYPE_BYTES)
1208 else if (type & LPROCFS_TYPE_PAGES)
1210 lprocfs_counter_init(sbi->ll_stats,
1211 llite_opcode_table[id].opcode,
1212 (type & LPROCFS_CNTR_AVGMINMAX),
1213 llite_opcode_table[id].opname, ptr);
1215 err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "stats",
1220 sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string),
1221 LPROCFS_STATS_FLAG_NONE);
1222 if (!sbi->ll_ra_stats) {
1227 for (id = 0; id < ARRAY_SIZE(ra_stat_string); id++)
1228 lprocfs_counter_init(sbi->ll_ra_stats, id, 0,
1229 ra_stat_string[id], "pages");
1231 err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "read_ahead_stats",
1236 err = ldebugfs_add_vars(sbi->ll_debugfs_entry,
1237 lprocfs_llite_obd_vars, sb);
1241 sbi->ll_kobj.kset = llite_kset;
1242 init_completion(&sbi->ll_kobj_unregister);
1243 err = kobject_init_and_add(&sbi->ll_kobj, &llite_ktype, NULL,
1249 obd = class_name2obd(mdc);
1251 err = sysfs_create_link(&sbi->ll_kobj, &obd->obd_kobj,
1252 obd->obd_type->typ_name);
1257 obd = class_name2obd(osc);
1259 err = sysfs_create_link(&sbi->ll_kobj, &obd->obd_kobj,
1260 obd->obd_type->typ_name);
1263 ldebugfs_remove(&sbi->ll_debugfs_entry);
1264 lprocfs_free_stats(&sbi->ll_ra_stats);
1265 lprocfs_free_stats(&sbi->ll_stats);
1270 void ldebugfs_unregister_mountpoint(struct ll_sb_info *sbi)
1272 if (sbi->ll_debugfs_entry) {
1273 ldebugfs_remove(&sbi->ll_debugfs_entry);
1274 kobject_put(&sbi->ll_kobj);
1275 wait_for_completion(&sbi->ll_kobj_unregister);
1276 lprocfs_free_stats(&sbi->ll_ra_stats);
1277 lprocfs_free_stats(&sbi->ll_stats);
1281 #undef MAX_STRING_SIZE
1283 #define pct(a, b) (b ? a * 100 / b : 0)
1285 static void ll_display_extents_info(struct ll_rw_extents_info *io_extents,
1286 struct seq_file *seq, int which)
1288 unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
1289 unsigned long start, end, r, w;
1290 char *unitp = "KMGTPEZY";
1292 struct per_process_info *pp_info = &io_extents->pp_extents[which];
1298 for (i = 0; i < LL_HIST_MAX; i++) {
1299 read_tot += pp_info->pp_r_hist.oh_buckets[i];
1300 write_tot += pp_info->pp_w_hist.oh_buckets[i];
1303 for (i = 0; i < LL_HIST_MAX; i++) {
1304 r = pp_info->pp_r_hist.oh_buckets[i];
1305 w = pp_info->pp_w_hist.oh_buckets[i];
1308 end = 1 << (i + LL_HIST_START - units);
1309 seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4lu %4lu | %14lu %4lu %4lu\n",
1310 start, *unitp, end, *unitp,
1311 (i == LL_HIST_MAX - 1) ? '+' : ' ',
1312 r, pct(r, read_tot), pct(read_cum, read_tot),
1313 w, pct(w, write_tot), pct(write_cum, write_tot));
1315 if (start == 1 << 10) {
1320 if (read_cum == read_tot && write_cum == write_tot)
1325 static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
1327 struct timespec64 now;
1328 struct ll_sb_info *sbi = seq->private;
1329 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1332 ktime_get_real_ts64(&now);
1334 if (!sbi->ll_rw_stats_on) {
1335 seq_printf(seq, "disabled\n"
1336 "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
1339 seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n",
1340 (s64)now.tv_sec, (unsigned long)now.tv_nsec);
1341 seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
1342 seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
1343 "extents", "calls", "%", "cum%",
1344 "calls", "%", "cum%");
1345 spin_lock(&sbi->ll_pp_extent_lock);
1346 for (k = 0; k < LL_PROCESS_HIST_MAX; k++) {
1347 if (io_extents->pp_extents[k].pid != 0) {
1348 seq_printf(seq, "\nPID: %d\n",
1349 io_extents->pp_extents[k].pid);
1350 ll_display_extents_info(io_extents, seq, k);
1353 spin_unlock(&sbi->ll_pp_extent_lock);
1357 static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file,
1358 const char __user *buf,
1362 struct seq_file *seq = file->private_data;
1363 struct ll_sb_info *sbi = seq->private;
1364 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1366 int value = 1, rc = 0;
1371 rc = lprocfs_write_helper(buf, len, &value);
1372 if (rc < 0 && len < 16) {
1375 if (copy_from_user(kernbuf, buf, len))
1379 if (kernbuf[len - 1] == '\n')
1380 kernbuf[len - 1] = 0;
1382 if (strcmp(kernbuf, "disabled") == 0 ||
1383 strcmp(kernbuf, "Disabled") == 0)
1388 sbi->ll_rw_stats_on = 0;
1390 sbi->ll_rw_stats_on = 1;
1392 spin_lock(&sbi->ll_pp_extent_lock);
1393 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1394 io_extents->pp_extents[i].pid = 0;
1395 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1396 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1398 spin_unlock(&sbi->ll_pp_extent_lock);
1402 LPROC_SEQ_FOPS(ll_rw_extents_stats_pp);
1404 static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
1406 struct timespec64 now;
1407 struct ll_sb_info *sbi = seq->private;
1408 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1410 ktime_get_real_ts64(&now);
1412 if (!sbi->ll_rw_stats_on) {
1413 seq_printf(seq, "disabled\n"
1414 "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
1417 seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n",
1418 (u64)now.tv_sec, (unsigned long)now.tv_nsec);
1420 seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
1421 seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
1422 "extents", "calls", "%", "cum%",
1423 "calls", "%", "cum%");
1424 spin_lock(&sbi->ll_lock);
1425 ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
1426 spin_unlock(&sbi->ll_lock);
1431 static ssize_t ll_rw_extents_stats_seq_write(struct file *file,
1432 const char __user *buf,
1433 size_t len, loff_t *off)
1435 struct seq_file *seq = file->private_data;
1436 struct ll_sb_info *sbi = seq->private;
1437 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1439 int value = 1, rc = 0;
1444 rc = lprocfs_write_helper(buf, len, &value);
1445 if (rc < 0 && len < 16) {
1448 if (copy_from_user(kernbuf, buf, len))
1452 if (kernbuf[len - 1] == '\n')
1453 kernbuf[len - 1] = 0;
1455 if (strcmp(kernbuf, "disabled") == 0 ||
1456 strcmp(kernbuf, "Disabled") == 0)
1461 sbi->ll_rw_stats_on = 0;
1463 sbi->ll_rw_stats_on = 1;
1465 spin_lock(&sbi->ll_pp_extent_lock);
1466 for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
1467 io_extents->pp_extents[i].pid = 0;
1468 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1469 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1471 spin_unlock(&sbi->ll_pp_extent_lock);
1476 LPROC_SEQ_FOPS(ll_rw_extents_stats);
1478 void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
1479 struct ll_file_data *file, loff_t pos,
1480 size_t count, int rw)
1483 struct ll_rw_process_info *process;
1484 struct ll_rw_process_info *offset;
1485 int *off_count = &sbi->ll_rw_offset_entry_count;
1486 int *process_count = &sbi->ll_offset_process_count;
1487 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1489 if (!sbi->ll_rw_stats_on)
1491 process = sbi->ll_rw_process_info;
1492 offset = sbi->ll_rw_offset_info;
1494 spin_lock(&sbi->ll_pp_extent_lock);
1495 /* Extent statistics */
1496 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1497 if (io_extents->pp_extents[i].pid == pid) {
1505 sbi->ll_extent_process_count =
1506 (sbi->ll_extent_process_count + 1) % LL_PROCESS_HIST_MAX;
1507 cur = sbi->ll_extent_process_count;
1508 io_extents->pp_extents[cur].pid = pid;
1509 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_r_hist);
1510 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_w_hist);
1513 for (i = 0; (count >= (1 << LL_HIST_START << i)) &&
1514 (i < (LL_HIST_MAX - 1)); i++)
1517 io_extents->pp_extents[cur].pp_r_hist.oh_buckets[i]++;
1518 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_r_hist.oh_buckets[i]++;
1520 io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++;
1521 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++;
1523 spin_unlock(&sbi->ll_pp_extent_lock);
1525 spin_lock(&sbi->ll_process_lock);
1526 /* Offset statistics */
1527 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1528 if (process[i].rw_pid == pid) {
1529 if (process[i].rw_last_file != file) {
1530 process[i].rw_range_start = pos;
1531 process[i].rw_last_file_pos = pos + count;
1532 process[i].rw_smallest_extent = count;
1533 process[i].rw_largest_extent = count;
1534 process[i].rw_offset = 0;
1535 process[i].rw_last_file = file;
1536 spin_unlock(&sbi->ll_process_lock);
1539 if (process[i].rw_last_file_pos != pos) {
1541 (*off_count + 1) % LL_OFFSET_HIST_MAX;
1542 offset[*off_count].rw_op = process[i].rw_op;
1543 offset[*off_count].rw_pid = pid;
1544 offset[*off_count].rw_range_start =
1545 process[i].rw_range_start;
1546 offset[*off_count].rw_range_end =
1547 process[i].rw_last_file_pos;
1548 offset[*off_count].rw_smallest_extent =
1549 process[i].rw_smallest_extent;
1550 offset[*off_count].rw_largest_extent =
1551 process[i].rw_largest_extent;
1552 offset[*off_count].rw_offset =
1553 process[i].rw_offset;
1554 process[i].rw_op = rw;
1555 process[i].rw_range_start = pos;
1556 process[i].rw_smallest_extent = count;
1557 process[i].rw_largest_extent = count;
1558 process[i].rw_offset = pos -
1559 process[i].rw_last_file_pos;
1561 if (process[i].rw_smallest_extent > count)
1562 process[i].rw_smallest_extent = count;
1563 if (process[i].rw_largest_extent < count)
1564 process[i].rw_largest_extent = count;
1565 process[i].rw_last_file_pos = pos + count;
1566 spin_unlock(&sbi->ll_process_lock);
1570 *process_count = (*process_count + 1) % LL_PROCESS_HIST_MAX;
1571 process[*process_count].rw_pid = pid;
1572 process[*process_count].rw_op = rw;
1573 process[*process_count].rw_range_start = pos;
1574 process[*process_count].rw_last_file_pos = pos + count;
1575 process[*process_count].rw_smallest_extent = count;
1576 process[*process_count].rw_largest_extent = count;
1577 process[*process_count].rw_offset = 0;
1578 process[*process_count].rw_last_file = file;
1579 spin_unlock(&sbi->ll_process_lock);
1582 static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
1584 struct timespec64 now;
1585 struct ll_sb_info *sbi = seq->private;
1586 struct ll_rw_process_info *offset = sbi->ll_rw_offset_info;
1587 struct ll_rw_process_info *process = sbi->ll_rw_process_info;
1590 ktime_get_real_ts64(&now);
1592 if (!sbi->ll_rw_stats_on) {
1593 seq_printf(seq, "disabled\n"
1594 "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
1597 spin_lock(&sbi->ll_process_lock);
1599 seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n",
1600 (s64)now.tv_sec, (unsigned long)now.tv_nsec);
1601 seq_printf(seq, "%3s %10s %14s %14s %17s %17s %14s\n",
1602 "R/W", "PID", "RANGE START", "RANGE END",
1603 "SMALLEST EXTENT", "LARGEST EXTENT", "OFFSET");
1604 /* We stored the discontiguous offsets here; print them first */
1605 for (i = 0; i < LL_OFFSET_HIST_MAX; i++) {
1606 if (offset[i].rw_pid != 0)
1608 "%3c %10d %14Lu %14Lu %17lu %17lu %14Lu",
1609 offset[i].rw_op == READ ? 'R' : 'W',
1611 offset[i].rw_range_start,
1612 offset[i].rw_range_end,
1613 (unsigned long)offset[i].rw_smallest_extent,
1614 (unsigned long)offset[i].rw_largest_extent,
1615 offset[i].rw_offset);
1617 /* Then print the current offsets for each process */
1618 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1619 if (process[i].rw_pid != 0)
1621 "%3c %10d %14Lu %14Lu %17lu %17lu %14Lu",
1622 process[i].rw_op == READ ? 'R' : 'W',
1624 process[i].rw_range_start,
1625 process[i].rw_last_file_pos,
1626 (unsigned long)process[i].rw_smallest_extent,
1627 (unsigned long)process[i].rw_largest_extent,
1628 process[i].rw_offset);
1630 spin_unlock(&sbi->ll_process_lock);
1635 static ssize_t ll_rw_offset_stats_seq_write(struct file *file,
1636 const char __user *buf,
1637 size_t len, loff_t *off)
1639 struct seq_file *seq = file->private_data;
1640 struct ll_sb_info *sbi = seq->private;
1641 struct ll_rw_process_info *process_info = sbi->ll_rw_process_info;
1642 struct ll_rw_process_info *offset_info = sbi->ll_rw_offset_info;
1643 int value = 1, rc = 0;
1648 rc = lprocfs_write_helper(buf, len, &value);
1650 if (rc < 0 && len < 16) {
1653 if (copy_from_user(kernbuf, buf, len))
1657 if (kernbuf[len - 1] == '\n')
1658 kernbuf[len - 1] = 0;
1660 if (strcmp(kernbuf, "disabled") == 0 ||
1661 strcmp(kernbuf, "Disabled") == 0)
1666 sbi->ll_rw_stats_on = 0;
1668 sbi->ll_rw_stats_on = 1;
1670 spin_lock(&sbi->ll_process_lock);
1671 sbi->ll_offset_process_count = 0;
1672 sbi->ll_rw_offset_entry_count = 0;
1673 memset(process_info, 0, sizeof(struct ll_rw_process_info) *
1674 LL_PROCESS_HIST_MAX);
1675 memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
1676 LL_OFFSET_HIST_MAX);
1677 spin_unlock(&sbi->ll_process_lock);
1682 LPROC_SEQ_FOPS(ll_rw_offset_stats);
1684 void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars)
1686 lvars->obd_vars = lprocfs_llite_obd_vars;