4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/obdclass/lprocfs_status.c
34 * Author: Hariharan Thantry <thantry@users.sourceforge.net>
37 #define DEBUG_SUBSYSTEM S_CLASS
39 #include <obd_class.h>
40 #include <lprocfs_status.h>
41 #include <uapi/linux/lustre/lustre_idl.h>
42 #include <linux/seq_file.h>
43 #include <linux/ctype.h>
45 static const char * const obd_connect_names[] = {
59 "join_file(obsolete)",
63 "remote_client_by_force",
75 "alt_checksum_algorithm",
113 int obd_connect_flags2str(char *page, int count, __u64 flags, char *sep)
118 for (i = 0; obd_connect_names[i]; i++, mask <<= 1) {
120 ret += snprintf(page + ret, count - ret, "%s%s",
121 ret ? sep : "", obd_connect_names[i]);
123 if (flags & ~(mask - 1))
124 ret += snprintf(page + ret, count - ret,
125 "%sunknown flags %#llx",
126 ret ? sep : "", flags & ~(mask - 1));
129 EXPORT_SYMBOL(obd_connect_flags2str);
131 static void obd_connect_data_seqprint(struct seq_file *m,
132 struct obd_connect_data *ocd)
137 flags = ocd->ocd_connect_flags;
139 seq_printf(m, " connect_data:\n"
142 ocd->ocd_connect_flags,
144 if (flags & OBD_CONNECT_VERSION)
145 seq_printf(m, " target_version: %u.%u.%u.%u\n",
146 OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
147 OBD_OCD_VERSION_MINOR(ocd->ocd_version),
148 OBD_OCD_VERSION_PATCH(ocd->ocd_version),
149 OBD_OCD_VERSION_FIX(ocd->ocd_version));
150 if (flags & OBD_CONNECT_MDS)
151 seq_printf(m, " mdt_index: %d\n", ocd->ocd_group);
152 if (flags & OBD_CONNECT_GRANT)
153 seq_printf(m, " initial_grant: %d\n", ocd->ocd_grant);
154 if (flags & OBD_CONNECT_INDEX)
155 seq_printf(m, " target_index: %u\n", ocd->ocd_index);
156 if (flags & OBD_CONNECT_BRW_SIZE)
157 seq_printf(m, " max_brw_size: %d\n", ocd->ocd_brw_size);
158 if (flags & OBD_CONNECT_IBITS)
159 seq_printf(m, " ibits_known: %llx\n",
160 ocd->ocd_ibits_known);
161 if (flags & OBD_CONNECT_GRANT_PARAM)
162 seq_printf(m, " grant_block_size: %d\n"
163 " grant_inode_size: %d\n"
164 " grant_extent_overhead: %d\n",
167 ocd->ocd_grant_extent);
168 if (flags & OBD_CONNECT_TRANSNO)
169 seq_printf(m, " first_transno: %llx\n",
171 if (flags & OBD_CONNECT_CKSUM)
172 seq_printf(m, " cksum_types: %#x\n",
173 ocd->ocd_cksum_types);
174 if (flags & OBD_CONNECT_MAX_EASIZE)
175 seq_printf(m, " max_easize: %d\n", ocd->ocd_max_easize);
176 if (flags & OBD_CONNECT_MAXBYTES)
177 seq_printf(m, " max_object_bytes: %llx\n",
179 if (flags & OBD_CONNECT_MULTIMODRPCS)
180 seq_printf(m, " max_mod_rpcs: %hu\n",
181 ocd->ocd_maxmodrpcs);
184 int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val,
187 long decimal_val, frac_val;
193 decimal_val = val / mult;
194 prtn = snprintf(buffer, count, "%ld", decimal_val);
195 frac_val = val % mult;
197 if (prtn < (count - 4) && frac_val > 0) {
199 int i, temp_mult = 1, frac_bits = 0;
201 temp_frac = frac_val * 10;
202 buffer[prtn++] = '.';
203 while (frac_bits < 2 && (temp_frac / mult) < 1) {
204 /* only reserved 2 bits fraction */
205 buffer[prtn++] = '0';
210 * Need to think these cases :
211 * 1. #echo x.00 > /sys/xxx output result : x
212 * 2. #echo x.0x > /sys/xxx output result : x.0x
213 * 3. #echo x.x0 > /sys/xxx output result : x.x
214 * 4. #echo x.xx > /sys/xxx output result : x.xx
215 * Only reserved 2 bits fraction.
217 for (i = 0; i < (5 - prtn); i++)
220 frac_bits = min((int)count - prtn, 3 - frac_bits);
221 prtn += snprintf(buffer + prtn, frac_bits, "%ld",
222 frac_val * temp_mult / mult);
225 while (buffer[prtn] < '1' || buffer[prtn] > '9') {
227 if (buffer[prtn] == '.') {
234 buffer[prtn++] = '\n';
237 EXPORT_SYMBOL(lprocfs_read_frac_helper);
239 int lprocfs_write_frac_helper(const char __user *buffer, unsigned long count,
242 char kernbuf[20], *end, *pbuf;
244 if (count > (sizeof(kernbuf) - 1))
247 if (copy_from_user(kernbuf, buffer, count))
250 kernbuf[count] = '\0';
257 *val = (int)simple_strtoul(pbuf, &end, 10) * mult;
261 if (end && *end == '.') {
262 int temp_val, pow = 1;
266 if (strlen(pbuf) > 5)
267 pbuf[5] = '\0'; /*only allow 5bits fractional*/
269 temp_val = (int)simple_strtoul(pbuf, &end, 10) * mult;
272 for (i = 0; i < (end - pbuf); i++)
275 *val += temp_val / pow;
280 EXPORT_SYMBOL(lprocfs_write_frac_helper);
282 static int lprocfs_no_percpu_stats;
283 module_param(lprocfs_no_percpu_stats, int, 0644);
284 MODULE_PARM_DESC(lprocfs_no_percpu_stats, "Do not alloc percpu data for lprocfs stats");
286 #define MAX_STRING_SIZE 128
288 int lprocfs_single_release(struct inode *inode, struct file *file)
290 return single_release(inode, file);
292 EXPORT_SYMBOL(lprocfs_single_release);
294 int lprocfs_seq_release(struct inode *inode, struct file *file)
296 return seq_release(inode, file);
298 EXPORT_SYMBOL(lprocfs_seq_release);
300 /* lprocfs API calls */
302 struct dentry *ldebugfs_add_simple(struct dentry *root,
303 char *name, void *data,
304 const struct file_operations *fops)
306 struct dentry *entry;
309 if (!root || !name || !fops)
310 return ERR_PTR(-EINVAL);
316 entry = debugfs_create_file(name, mode, root, data, fops);
317 if (IS_ERR_OR_NULL(entry)) {
318 CERROR("LprocFS: No memory to create <debugfs> entry %s\n", name);
319 return entry ?: ERR_PTR(-ENOMEM);
323 EXPORT_SYMBOL_GPL(ldebugfs_add_simple);
325 static const struct file_operations lprocfs_generic_fops = { };
327 int ldebugfs_add_vars(struct dentry *parent,
328 struct lprocfs_vars *list,
331 if (IS_ERR_OR_NULL(parent) || IS_ERR_OR_NULL(list))
335 struct dentry *entry;
338 if (list->proc_mode != 0000) {
339 mode = list->proc_mode;
340 } else if (list->fops) {
341 if (list->fops->read)
343 if (list->fops->write)
346 entry = debugfs_create_file(list->name, mode, parent,
348 list->fops ?: &lprocfs_generic_fops
350 if (IS_ERR_OR_NULL(entry))
351 return entry ? PTR_ERR(entry) : -ENOMEM;
356 EXPORT_SYMBOL_GPL(ldebugfs_add_vars);
358 void ldebugfs_remove(struct dentry **entryp)
360 debugfs_remove_recursive(*entryp);
363 EXPORT_SYMBOL_GPL(ldebugfs_remove);
365 struct dentry *ldebugfs_register(const char *name,
366 struct dentry *parent,
367 struct lprocfs_vars *list, void *data)
369 struct dentry *entry;
371 entry = debugfs_create_dir(name, parent);
372 if (IS_ERR_OR_NULL(entry)) {
373 entry = entry ?: ERR_PTR(-ENOMEM);
377 if (!IS_ERR_OR_NULL(list)) {
380 rc = ldebugfs_add_vars(entry, list, data);
382 debugfs_remove(entry);
389 EXPORT_SYMBOL_GPL(ldebugfs_register);
391 /* Generic callbacks */
392 static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
395 struct obd_device *obd = container_of(kobj, struct obd_device,
398 return sprintf(buf, "%s\n", obd->obd_uuid.uuid);
400 LUSTRE_RO_ATTR(uuid);
402 static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
405 struct obd_device *obd = container_of(kobj, struct obd_device,
407 struct obd_statfs osfs;
408 int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
409 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
412 return sprintf(buf, "%u\n", osfs.os_bsize);
416 LUSTRE_RO_ATTR(blocksize);
418 static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
421 struct obd_device *obd = container_of(kobj, struct obd_device,
423 struct obd_statfs osfs;
424 int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
425 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
428 __u32 blk_size = osfs.os_bsize >> 10;
429 __u64 result = osfs.os_blocks;
431 while (blk_size >>= 1)
434 return sprintf(buf, "%llu\n", result);
439 LUSTRE_RO_ATTR(kbytestotal);
441 static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
444 struct obd_device *obd = container_of(kobj, struct obd_device,
446 struct obd_statfs osfs;
447 int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
448 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
451 __u32 blk_size = osfs.os_bsize >> 10;
452 __u64 result = osfs.os_bfree;
454 while (blk_size >>= 1)
457 return sprintf(buf, "%llu\n", result);
462 LUSTRE_RO_ATTR(kbytesfree);
464 static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
467 struct obd_device *obd = container_of(kobj, struct obd_device,
469 struct obd_statfs osfs;
470 int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
471 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
474 __u32 blk_size = osfs.os_bsize >> 10;
475 __u64 result = osfs.os_bavail;
477 while (blk_size >>= 1)
480 return sprintf(buf, "%llu\n", result);
485 LUSTRE_RO_ATTR(kbytesavail);
487 static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
490 struct obd_device *obd = container_of(kobj, struct obd_device,
492 struct obd_statfs osfs;
493 int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
494 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
497 return sprintf(buf, "%llu\n", osfs.os_files);
501 LUSTRE_RO_ATTR(filestotal);
503 static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
506 struct obd_device *obd = container_of(kobj, struct obd_device,
508 struct obd_statfs osfs;
509 int rc = obd_statfs(NULL, obd->obd_self_export, &osfs,
510 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
513 return sprintf(buf, "%llu\n", osfs.os_ffree);
517 LUSTRE_RO_ATTR(filesfree);
519 int lprocfs_rd_server_uuid(struct seq_file *m, void *data)
521 struct obd_device *obd = data;
522 struct obd_import *imp;
523 char *imp_state_name = NULL;
527 rc = lprocfs_climp_check(obd);
531 imp = obd->u.cli.cl_import;
532 imp_state_name = ptlrpc_import_state_name(imp->imp_state);
533 seq_printf(m, "%s\t%s%s\n",
534 obd2cli_tgt(obd), imp_state_name,
535 imp->imp_deactive ? "\tDEACTIVATED" : "");
537 up_read(&obd->u.cli.cl_sem);
541 EXPORT_SYMBOL(lprocfs_rd_server_uuid);
543 int lprocfs_rd_conn_uuid(struct seq_file *m, void *data)
545 struct obd_device *obd = data;
546 struct ptlrpc_connection *conn;
551 rc = lprocfs_climp_check(obd);
555 conn = obd->u.cli.cl_import->imp_connection;
556 if (conn && obd->u.cli.cl_import)
557 seq_printf(m, "%s\n", conn->c_remote_uuid.uuid);
559 seq_puts(m, "<none>\n");
561 up_read(&obd->u.cli.cl_sem);
565 EXPORT_SYMBOL(lprocfs_rd_conn_uuid);
568 * Lock statistics structure for access, possibly only on this CPU.
570 * The statistics struct may be allocated with per-CPU structures for
571 * efficient concurrent update (usually only on server-wide stats), or
572 * as a single global struct (e.g. for per-client or per-job statistics),
573 * so the required locking depends on the type of structure allocated.
575 * For per-CPU statistics, pin the thread to the current cpuid so that
576 * will only access the statistics for that CPU. If the stats structure
577 * for the current CPU has not been allocated (or previously freed),
578 * allocate it now. The per-CPU statistics do not need locking since
579 * the thread is pinned to the CPU during update.
581 * For global statistics, lock the stats structure to prevent concurrent update.
583 * \param[in] stats statistics structure to lock
584 * \param[in] opc type of operation:
585 * LPROCFS_GET_SMP_ID: "lock" and return current CPU index
586 * for incrementing statistics for that CPU
587 * LPROCFS_GET_NUM_CPU: "lock" and return number of used
588 * CPU indices to iterate over all indices
589 * \param[out] flags CPU interrupt saved state for IRQ-safe locking
591 * \retval cpuid of current thread or number of allocated structs
592 * \retval negative on error (only for opc LPROCFS_GET_SMP_ID + per-CPU stats)
594 int lprocfs_stats_lock(struct lprocfs_stats *stats,
595 enum lprocfs_stats_lock_ops opc,
596 unsigned long *flags)
598 if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
599 if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
600 spin_lock_irqsave(&stats->ls_lock, *flags);
602 spin_lock(&stats->ls_lock);
603 return opc == LPROCFS_GET_NUM_CPU ? 1 : 0;
607 case LPROCFS_GET_SMP_ID: {
608 unsigned int cpuid = get_cpu();
610 if (unlikely(!stats->ls_percpu[cpuid])) {
611 int rc = lprocfs_stats_alloc_one(stats, cpuid);
620 case LPROCFS_GET_NUM_CPU:
621 return stats->ls_biggest_alloc_num;
628 * Unlock statistics structure after access.
630 * Unlock the lock acquired via lprocfs_stats_lock() for global statistics,
631 * or unpin this thread from the current cpuid for per-CPU statistics.
633 * This function must be called using the same arguments as used when calling
634 * lprocfs_stats_lock() so that the correct operation can be performed.
636 * \param[in] stats statistics structure to unlock
637 * \param[in] opc type of operation (current cpuid or number of structs)
638 * \param[in] flags CPU interrupt saved state for IRQ-safe locking
640 void lprocfs_stats_unlock(struct lprocfs_stats *stats,
641 enum lprocfs_stats_lock_ops opc,
642 unsigned long *flags)
644 if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) {
645 if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
646 spin_unlock_irqrestore(&stats->ls_lock, *flags);
648 spin_unlock(&stats->ls_lock);
649 } else if (opc == LPROCFS_GET_SMP_ID) {
654 /** add up per-cpu counters */
655 void lprocfs_stats_collect(struct lprocfs_stats *stats, int idx,
656 struct lprocfs_counter *cnt)
658 unsigned int num_entry;
659 struct lprocfs_counter *percpu_cntr;
661 unsigned long flags = 0;
663 memset(cnt, 0, sizeof(*cnt));
666 /* set count to 1 to avoid divide-by-zero errs in callers */
671 cnt->lc_min = LC_MIN_INIT;
673 num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
675 for (i = 0; i < num_entry; i++) {
676 if (!stats->ls_percpu[i])
678 percpu_cntr = lprocfs_stats_counter_get(stats, i, idx);
680 cnt->lc_count += percpu_cntr->lc_count;
681 cnt->lc_sum += percpu_cntr->lc_sum;
682 if (percpu_cntr->lc_min < cnt->lc_min)
683 cnt->lc_min = percpu_cntr->lc_min;
684 if (percpu_cntr->lc_max > cnt->lc_max)
685 cnt->lc_max = percpu_cntr->lc_max;
686 cnt->lc_sumsquare += percpu_cntr->lc_sumsquare;
689 lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags);
693 * Append a space separated list of current set flags to str.
695 #define flag2str(flag, first) \
697 if (imp->imp_##flag) \
698 seq_printf(m, "%s" #flag, first ? "" : ", "); \
700 static int obd_import_flags2str(struct obd_import *imp, struct seq_file *m)
704 if (imp->imp_obd->obd_no_recov) {
705 seq_puts(m, "no_recov");
709 flag2str(invalid, first);
711 flag2str(deactive, first);
712 flag2str(replayable, first);
713 flag2str(pingable, first);
719 static void obd_connect_seq_flags2str(struct seq_file *m, __u64 flags, char *sep)
725 for (i = 0; obd_connect_names[i]; i++, mask <<= 1) {
727 seq_printf(m, "%s%s",
728 first ? sep : "", obd_connect_names[i]);
732 if (flags & ~(mask - 1))
733 seq_printf(m, "%sunknown flags %#llx",
734 first ? sep : "", flags & ~(mask - 1));
737 int lprocfs_rd_import(struct seq_file *m, void *data)
739 char nidstr[LNET_NIDSTR_SIZE];
740 struct lprocfs_counter ret;
741 struct lprocfs_counter_header *header;
742 struct obd_device *obd = data;
743 struct obd_import *imp;
744 struct obd_import_conn *conn;
745 struct obd_connect_data *ocd;
752 rc = lprocfs_climp_check(obd);
756 imp = obd->u.cli.cl_import;
757 ocd = &imp->imp_connect_data;
759 seq_printf(m, "import:\n"
764 " connect_flags: [ ",
767 ptlrpc_import_state_name(imp->imp_state),
768 imp->imp_connect_data.ocd_instance);
769 obd_connect_seq_flags2str(m, imp->imp_connect_data.ocd_connect_flags,
772 obd_connect_data_seqprint(m, ocd);
773 seq_puts(m, " import_flags: [ ");
774 obd_import_flags2str(imp, m);
779 " failover_nids: [ ");
780 spin_lock(&imp->imp_lock);
782 list_for_each_entry(conn, &imp->imp_conn_list, oic_item) {
783 libcfs_nid2str_r(conn->oic_conn->c_peer.nid,
784 nidstr, sizeof(nidstr));
785 seq_printf(m, "%s%s", j ? ", " : "", nidstr);
788 if (imp->imp_connection)
789 libcfs_nid2str_r(imp->imp_connection->c_peer.nid,
790 nidstr, sizeof(nidstr));
792 strncpy(nidstr, "<none>", sizeof(nidstr));
795 " current_connection: %s\n"
796 " connection_attempts: %u\n"
798 " in-progress_invalidations: %u\n",
802 atomic_read(&imp->imp_inval_count));
803 spin_unlock(&imp->imp_lock);
805 if (!obd->obd_svc_stats)
808 header = &obd->obd_svc_stats->ls_cnt_header[PTLRPC_REQWAIT_CNTR];
809 lprocfs_stats_collect(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR, &ret);
810 if (ret.lc_count != 0) {
811 /* first argument to do_div MUST be __u64 */
812 __u64 sum = ret.lc_sum;
814 do_div(sum, ret.lc_count);
822 " unregistering: %u\n"
824 " avg_waittime: %llu %s\n",
825 atomic_read(&imp->imp_inflight),
826 atomic_read(&imp->imp_unregistering),
827 atomic_read(&imp->imp_timeouts),
828 ret.lc_sum, header->lc_units);
831 for (j = 0; j < IMP_AT_MAX_PORTALS; j++) {
832 if (imp->imp_at.iat_portal[j] == 0)
834 k = max_t(unsigned int, k,
835 at_get(&imp->imp_at.iat_service_estimate[j]));
838 " service_estimates:\n"
839 " services: %u sec\n"
840 " network: %u sec\n",
842 at_get(&imp->imp_at.iat_net_latency));
846 " last_replay: %llu\n"
847 " peer_committed: %llu\n"
848 " last_checked: %llu\n",
849 imp->imp_last_replay_transno,
850 imp->imp_peer_committed_transno,
851 imp->imp_last_transno_checked);
854 for (rw = 0; rw <= 1; rw++) {
855 lprocfs_stats_collect(obd->obd_svc_stats,
856 PTLRPC_LAST_CNTR + BRW_READ_BYTES + rw,
858 if (ret.lc_sum > 0 && ret.lc_count > 0) {
859 /* first argument to do_div MUST be __u64 */
860 __u64 sum = ret.lc_sum;
862 do_div(sum, ret.lc_count);
865 " %s_data_averages:\n"
866 " bytes_per_rpc: %llu\n",
867 rw ? "write" : "read",
871 j = opcode_offset(OST_READ + rw) + EXTRA_MAX_OPCODES;
872 header = &obd->obd_svc_stats->ls_cnt_header[j];
873 lprocfs_stats_collect(obd->obd_svc_stats, j, &ret);
874 if (ret.lc_sum > 0 && ret.lc_count != 0) {
875 /* first argument to do_div MUST be __u64 */
876 __u64 sum = ret.lc_sum;
878 do_div(sum, ret.lc_count);
881 " %s_per_rpc: %llu\n",
882 header->lc_units, ret.lc_sum);
886 " MB_per_sec: %u.%.02u\n",
887 k / j, (100 * k / j) % 100);
892 up_read(&obd->u.cli.cl_sem);
895 EXPORT_SYMBOL(lprocfs_rd_import);
897 int lprocfs_rd_state(struct seq_file *m, void *data)
899 struct obd_device *obd = data;
900 struct obd_import *imp;
904 rc = lprocfs_climp_check(obd);
908 imp = obd->u.cli.cl_import;
910 seq_printf(m, "current_state: %s\n",
911 ptlrpc_import_state_name(imp->imp_state));
912 seq_puts(m, "state_history:\n");
913 k = imp->imp_state_hist_idx;
914 for (j = 0; j < IMP_STATE_HIST_LEN; j++) {
915 struct import_state_hist *ish =
916 &imp->imp_state_hist[(k + j) % IMP_STATE_HIST_LEN];
917 if (ish->ish_state == 0)
919 seq_printf(m, " - [ %lld, %s ]\n", (s64)ish->ish_time,
920 ptlrpc_import_state_name(ish->ish_state));
923 up_read(&obd->u.cli.cl_sem);
926 EXPORT_SYMBOL(lprocfs_rd_state);
928 int lprocfs_at_hist_helper(struct seq_file *m, struct adaptive_timeout *at)
932 for (i = 0; i < AT_BINS; i++)
933 seq_printf(m, "%3u ", at->at_hist[i]);
937 EXPORT_SYMBOL(lprocfs_at_hist_helper);
939 /* See also ptlrpc_lprocfs_rd_timeouts */
940 int lprocfs_rd_timeouts(struct seq_file *m, void *data)
942 struct obd_device *obd = data;
943 struct obd_import *imp;
944 unsigned int cur, worst;
945 time64_t now, worstt;
950 rc = lprocfs_climp_check(obd);
954 imp = obd->u.cli.cl_import;
956 now = ktime_get_real_seconds();
958 /* Some network health info for kicks */
959 s2dhms(&ts, now - imp->imp_last_reply_time);
960 seq_printf(m, "%-10s : %lld, " DHMS_FMT " ago\n",
961 "last reply", (s64)imp->imp_last_reply_time, DHMS_VARS(&ts));
963 cur = at_get(&imp->imp_at.iat_net_latency);
964 worst = imp->imp_at.iat_net_latency.at_worst_ever;
965 worstt = imp->imp_at.iat_net_latency.at_worst_time;
966 s2dhms(&ts, now - worstt);
967 seq_printf(m, "%-10s : cur %3u worst %3u (at %lld, " DHMS_FMT " ago) ",
968 "network", cur, worst, (s64)worstt, DHMS_VARS(&ts));
969 lprocfs_at_hist_helper(m, &imp->imp_at.iat_net_latency);
971 for (i = 0; i < IMP_AT_MAX_PORTALS; i++) {
972 if (imp->imp_at.iat_portal[i] == 0)
974 cur = at_get(&imp->imp_at.iat_service_estimate[i]);
975 worst = imp->imp_at.iat_service_estimate[i].at_worst_ever;
976 worstt = imp->imp_at.iat_service_estimate[i].at_worst_time;
977 s2dhms(&ts, now - worstt);
978 seq_printf(m, "portal %-2d : cur %3u worst %3u (at %lld, "
979 DHMS_FMT " ago) ", imp->imp_at.iat_portal[i],
980 cur, worst, (s64)worstt, DHMS_VARS(&ts));
981 lprocfs_at_hist_helper(m, &imp->imp_at.iat_service_estimate[i]);
984 up_read(&obd->u.cli.cl_sem);
987 EXPORT_SYMBOL(lprocfs_rd_timeouts);
989 int lprocfs_rd_connect_flags(struct seq_file *m, void *data)
991 struct obd_device *obd = data;
995 rc = lprocfs_climp_check(obd);
999 flags = obd->u.cli.cl_import->imp_connect_data.ocd_connect_flags;
1000 seq_printf(m, "flags=%#llx\n", flags);
1001 obd_connect_seq_flags2str(m, flags, "\n");
1003 up_read(&obd->u.cli.cl_sem);
1006 EXPORT_SYMBOL(lprocfs_rd_connect_flags);
1008 static struct attribute *obd_def_attrs[] = {
1009 &lustre_attr_blocksize.attr,
1010 &lustre_attr_kbytestotal.attr,
1011 &lustre_attr_kbytesfree.attr,
1012 &lustre_attr_kbytesavail.attr,
1013 &lustre_attr_filestotal.attr,
1014 &lustre_attr_filesfree.attr,
1015 &lustre_attr_uuid.attr,
1019 static void obd_sysfs_release(struct kobject *kobj)
1021 struct obd_device *obd = container_of(kobj, struct obd_device,
1024 complete(&obd->obd_kobj_unregister);
1027 static struct kobj_type obd_ktype = {
1028 .default_attrs = obd_def_attrs,
1029 .sysfs_ops = &lustre_sysfs_ops,
1030 .release = obd_sysfs_release,
1033 int lprocfs_obd_setup(struct obd_device *obd, struct lprocfs_vars *list,
1034 const struct attribute_group *attrs)
1038 init_completion(&obd->obd_kobj_unregister);
1039 rc = kobject_init_and_add(&obd->obd_kobj, &obd_ktype,
1040 obd->obd_type->typ_kobj,
1041 "%s", obd->obd_name);
1046 rc = sysfs_create_group(&obd->obd_kobj, attrs);
1048 kobject_put(&obd->obd_kobj);
1053 obd->obd_debugfs_entry = ldebugfs_register(obd->obd_name,
1054 obd->obd_type->typ_debugfs_entry,
1056 if (IS_ERR_OR_NULL(obd->obd_debugfs_entry)) {
1057 rc = obd->obd_debugfs_entry ? PTR_ERR(obd->obd_debugfs_entry)
1059 CERROR("error %d setting up lprocfs for %s\n",
1061 obd->obd_debugfs_entry = NULL;
1066 EXPORT_SYMBOL_GPL(lprocfs_obd_setup);
1068 int lprocfs_obd_cleanup(struct obd_device *obd)
1073 if (!IS_ERR_OR_NULL(obd->obd_debugfs_entry))
1074 ldebugfs_remove(&obd->obd_debugfs_entry);
1076 kobject_put(&obd->obd_kobj);
1077 wait_for_completion(&obd->obd_kobj_unregister);
1081 EXPORT_SYMBOL_GPL(lprocfs_obd_cleanup);
1083 int lprocfs_stats_alloc_one(struct lprocfs_stats *stats, unsigned int cpuid)
1085 struct lprocfs_counter *cntr;
1086 unsigned int percpusize;
1088 unsigned long flags = 0;
1091 LASSERT(!stats->ls_percpu[cpuid]);
1092 LASSERT((stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU) == 0);
1094 percpusize = lprocfs_stats_counter_size(stats);
1095 LIBCFS_ALLOC_ATOMIC(stats->ls_percpu[cpuid], percpusize);
1096 if (stats->ls_percpu[cpuid]) {
1098 if (unlikely(stats->ls_biggest_alloc_num <= cpuid)) {
1099 if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
1100 spin_lock_irqsave(&stats->ls_lock, flags);
1102 spin_lock(&stats->ls_lock);
1103 if (stats->ls_biggest_alloc_num <= cpuid)
1104 stats->ls_biggest_alloc_num = cpuid + 1;
1105 if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
1106 spin_unlock_irqrestore(&stats->ls_lock, flags);
1108 spin_unlock(&stats->ls_lock);
1110 /* initialize the ls_percpu[cpuid] non-zero counter */
1111 for (i = 0; i < stats->ls_num; ++i) {
1112 cntr = lprocfs_stats_counter_get(stats, cpuid, i);
1113 cntr->lc_min = LC_MIN_INIT;
1119 struct lprocfs_stats *lprocfs_alloc_stats(unsigned int num,
1120 enum lprocfs_stats_flags flags)
1122 struct lprocfs_stats *stats;
1123 unsigned int num_entry;
1124 unsigned int percpusize = 0;
1130 if (lprocfs_no_percpu_stats != 0)
1131 flags |= LPROCFS_STATS_FLAG_NOPERCPU;
1133 if (flags & LPROCFS_STATS_FLAG_NOPERCPU)
1136 num_entry = num_possible_cpus();
1138 /* alloc percpu pointers for all possible cpu slots */
1139 LIBCFS_ALLOC(stats, offsetof(typeof(*stats), ls_percpu[num_entry]));
1143 stats->ls_num = num;
1144 stats->ls_flags = flags;
1145 spin_lock_init(&stats->ls_lock);
1147 /* alloc num of counter headers */
1148 LIBCFS_ALLOC(stats->ls_cnt_header,
1149 stats->ls_num * sizeof(struct lprocfs_counter_header));
1150 if (!stats->ls_cnt_header)
1153 if ((flags & LPROCFS_STATS_FLAG_NOPERCPU) != 0) {
1154 /* contains only one set counters */
1155 percpusize = lprocfs_stats_counter_size(stats);
1156 LIBCFS_ALLOC_ATOMIC(stats->ls_percpu[0], percpusize);
1157 if (!stats->ls_percpu[0])
1159 stats->ls_biggest_alloc_num = 1;
1160 } else if ((flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0) {
1161 /* alloc all percpu data */
1162 for (i = 0; i < num_entry; ++i)
1163 if (lprocfs_stats_alloc_one(stats, i) < 0)
1170 lprocfs_free_stats(&stats);
1173 EXPORT_SYMBOL(lprocfs_alloc_stats);
1175 void lprocfs_free_stats(struct lprocfs_stats **statsh)
1177 struct lprocfs_stats *stats = *statsh;
1178 unsigned int num_entry;
1179 unsigned int percpusize;
1182 if (!stats || stats->ls_num == 0)
1186 if (stats->ls_flags & LPROCFS_STATS_FLAG_NOPERCPU)
1189 num_entry = num_possible_cpus();
1191 percpusize = lprocfs_stats_counter_size(stats);
1192 for (i = 0; i < num_entry; i++)
1193 if (stats->ls_percpu[i])
1194 LIBCFS_FREE(stats->ls_percpu[i], percpusize);
1195 if (stats->ls_cnt_header)
1196 LIBCFS_FREE(stats->ls_cnt_header, stats->ls_num *
1197 sizeof(struct lprocfs_counter_header));
1198 LIBCFS_FREE(stats, offsetof(typeof(*stats), ls_percpu[num_entry]));
1200 EXPORT_SYMBOL(lprocfs_free_stats);
1202 __u64 lprocfs_stats_collector(struct lprocfs_stats *stats, int idx,
1203 enum lprocfs_fields_flags field)
1206 unsigned int num_cpu;
1207 unsigned long flags = 0;
1212 num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
1213 for (i = 0; i < num_cpu; i++) {
1214 if (!stats->ls_percpu[i])
1216 ret += lprocfs_read_helper(
1217 lprocfs_stats_counter_get(stats, i, idx),
1218 &stats->ls_cnt_header[idx], stats->ls_flags,
1221 lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags);
1224 EXPORT_SYMBOL(lprocfs_stats_collector);
1226 void lprocfs_clear_stats(struct lprocfs_stats *stats)
1228 struct lprocfs_counter *percpu_cntr;
1231 unsigned int num_entry;
1232 unsigned long flags = 0;
1234 num_entry = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
1236 for (i = 0; i < num_entry; i++) {
1237 if (!stats->ls_percpu[i])
1239 for (j = 0; j < stats->ls_num; j++) {
1240 percpu_cntr = lprocfs_stats_counter_get(stats, i, j);
1241 percpu_cntr->lc_count = 0;
1242 percpu_cntr->lc_min = LC_MIN_INIT;
1243 percpu_cntr->lc_max = 0;
1244 percpu_cntr->lc_sumsquare = 0;
1245 percpu_cntr->lc_sum = 0;
1246 if (stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE)
1247 percpu_cntr->lc_sum_irq = 0;
1251 lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags);
1253 EXPORT_SYMBOL(lprocfs_clear_stats);
1255 static ssize_t lprocfs_stats_seq_write(struct file *file,
1256 const char __user *buf,
1257 size_t len, loff_t *off)
1259 struct seq_file *seq = file->private_data;
1260 struct lprocfs_stats *stats = seq->private;
1262 lprocfs_clear_stats(stats);
1267 static void *lprocfs_stats_seq_start(struct seq_file *p, loff_t *pos)
1269 struct lprocfs_stats *stats = p->private;
1271 return (*pos < stats->ls_num) ? pos : NULL;
1274 static void lprocfs_stats_seq_stop(struct seq_file *p, void *v)
1278 static void *lprocfs_stats_seq_next(struct seq_file *p, void *v, loff_t *pos)
1281 return lprocfs_stats_seq_start(p, pos);
1284 /* seq file export of one lprocfs counter */
1285 static int lprocfs_stats_seq_show(struct seq_file *p, void *v)
1287 struct lprocfs_stats *stats = p->private;
1288 struct lprocfs_counter_header *hdr;
1289 struct lprocfs_counter ctr;
1290 int idx = *(loff_t *)v;
1293 struct timespec64 now;
1295 ktime_get_real_ts64(&now);
1296 seq_printf(p, "%-25s %llu.%9lu secs.usecs\n",
1298 (s64)now.tv_sec, (unsigned long)now.tv_nsec);
1301 hdr = &stats->ls_cnt_header[idx];
1302 lprocfs_stats_collect(stats, idx, &ctr);
1304 if (ctr.lc_count != 0) {
1305 seq_printf(p, "%-25s %lld samples [%s]",
1306 hdr->lc_name, ctr.lc_count, hdr->lc_units);
1308 if ((hdr->lc_config & LPROCFS_CNTR_AVGMINMAX) &&
1309 (ctr.lc_count > 0)) {
1310 seq_printf(p, " %lld %lld %lld",
1311 ctr.lc_min, ctr.lc_max, ctr.lc_sum);
1312 if (hdr->lc_config & LPROCFS_CNTR_STDDEV)
1313 seq_printf(p, " %lld", ctr.lc_sumsquare);
1321 static const struct seq_operations lprocfs_stats_seq_sops = {
1322 .start = lprocfs_stats_seq_start,
1323 .stop = lprocfs_stats_seq_stop,
1324 .next = lprocfs_stats_seq_next,
1325 .show = lprocfs_stats_seq_show,
1328 static int lprocfs_stats_seq_open(struct inode *inode, struct file *file)
1330 struct seq_file *seq;
1333 rc = seq_open(file, &lprocfs_stats_seq_sops);
1337 seq = file->private_data;
1338 seq->private = inode->i_private;
1343 static const struct file_operations lprocfs_stats_seq_fops = {
1344 .owner = THIS_MODULE,
1345 .open = lprocfs_stats_seq_open,
1347 .write = lprocfs_stats_seq_write,
1348 .llseek = seq_lseek,
1349 .release = lprocfs_seq_release,
1352 int ldebugfs_register_stats(struct dentry *parent, const char *name,
1353 struct lprocfs_stats *stats)
1355 struct dentry *entry;
1357 LASSERT(!IS_ERR_OR_NULL(parent));
1359 entry = debugfs_create_file(name, 0644, parent, stats,
1360 &lprocfs_stats_seq_fops);
1361 if (IS_ERR_OR_NULL(entry))
1362 return entry ? PTR_ERR(entry) : -ENOMEM;
1366 EXPORT_SYMBOL_GPL(ldebugfs_register_stats);
1368 void lprocfs_counter_init(struct lprocfs_stats *stats, int index,
1369 unsigned int conf, const char *name,
1372 struct lprocfs_counter_header *header;
1373 struct lprocfs_counter *percpu_cntr;
1374 unsigned long flags = 0;
1376 unsigned int num_cpu;
1378 header = &stats->ls_cnt_header[index];
1379 LASSERTF(header, "Failed to allocate stats header:[%d]%s/%s\n",
1380 index, name, units);
1382 header->lc_config = conf;
1383 header->lc_name = name;
1384 header->lc_units = units;
1386 num_cpu = lprocfs_stats_lock(stats, LPROCFS_GET_NUM_CPU, &flags);
1387 for (i = 0; i < num_cpu; ++i) {
1388 if (!stats->ls_percpu[i])
1390 percpu_cntr = lprocfs_stats_counter_get(stats, i, index);
1391 percpu_cntr->lc_count = 0;
1392 percpu_cntr->lc_min = LC_MIN_INIT;
1393 percpu_cntr->lc_max = 0;
1394 percpu_cntr->lc_sumsquare = 0;
1395 percpu_cntr->lc_sum = 0;
1396 if ((stats->ls_flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
1397 percpu_cntr->lc_sum_irq = 0;
1399 lprocfs_stats_unlock(stats, LPROCFS_GET_NUM_CPU, &flags);
1401 EXPORT_SYMBOL(lprocfs_counter_init);
1403 int lprocfs_exp_cleanup(struct obd_export *exp)
1407 EXPORT_SYMBOL(lprocfs_exp_cleanup);
1409 __s64 lprocfs_read_helper(struct lprocfs_counter *lc,
1410 struct lprocfs_counter_header *header,
1411 enum lprocfs_stats_flags flags,
1412 enum lprocfs_fields_flags field)
1420 case LPROCFS_FIELDS_FLAGS_CONFIG:
1421 ret = header->lc_config;
1423 case LPROCFS_FIELDS_FLAGS_SUM:
1425 if ((flags & LPROCFS_STATS_FLAG_IRQ_SAFE) != 0)
1426 ret += lc->lc_sum_irq;
1428 case LPROCFS_FIELDS_FLAGS_MIN:
1431 case LPROCFS_FIELDS_FLAGS_MAX:
1434 case LPROCFS_FIELDS_FLAGS_AVG:
1435 ret = (lc->lc_max - lc->lc_min) / 2;
1437 case LPROCFS_FIELDS_FLAGS_SUMSQUARE:
1438 ret = lc->lc_sumsquare;
1440 case LPROCFS_FIELDS_FLAGS_COUNT:
1449 EXPORT_SYMBOL(lprocfs_read_helper);
1451 int lprocfs_write_helper(const char __user *buffer, unsigned long count,
1454 return lprocfs_write_frac_helper(buffer, count, val, 1);
1456 EXPORT_SYMBOL(lprocfs_write_helper);
1458 int lprocfs_write_u64_helper(const char __user *buffer, unsigned long count,
1461 return lprocfs_write_frac_u64_helper(buffer, count, val, 1);
1463 EXPORT_SYMBOL(lprocfs_write_u64_helper);
1465 int lprocfs_write_frac_u64_helper(const char __user *buffer,
1466 unsigned long count, __u64 *val, int mult)
1468 char kernbuf[22], *end, *pbuf;
1469 __u64 whole, frac = 0, units;
1470 unsigned frac_d = 1;
1473 if (count > (sizeof(kernbuf) - 1))
1476 if (copy_from_user(kernbuf, buffer, count))
1479 kernbuf[count] = '\0';
1486 whole = simple_strtoull(pbuf, &end, 10);
1495 /* need to limit frac_d to a __u32 */
1496 if (strlen(pbuf) > 10)
1499 frac = simple_strtoull(pbuf, &end, 10);
1500 /* count decimal places */
1501 for (i = 0; i < (end - pbuf); i++)
1507 switch (tolower(*end)) {
1520 /* Specified units override the multiplier */
1525 do_div(frac, frac_d);
1526 *val = sign * (whole * mult + frac);
1529 EXPORT_SYMBOL(lprocfs_write_frac_u64_helper);
1531 static char *lprocfs_strnstr(const char *s1, const char *s2, size_t len)
1540 if (!memcmp(s1, s2, l2))
1548 * Find the string \a name in the input \a buffer, and return a pointer to the
1549 * value immediately following \a name, reducing \a count appropriately.
1550 * If \a name is not found the original \a buffer is returned.
1552 char *lprocfs_find_named_value(const char *buffer, const char *name,
1556 size_t buflen = *count;
1558 /* there is no strnstr() in rhel5 and ubuntu kernels */
1559 val = lprocfs_strnstr(buffer, name, buflen);
1561 return (char *)buffer;
1563 val += strlen(name); /* skip prefix */
1564 while (val < buffer + buflen && isspace(*val)) /* skip separator */
1568 while (val < buffer + buflen && isalnum(*val)) {
1573 return val - *count;
1575 EXPORT_SYMBOL(lprocfs_find_named_value);
1577 int ldebugfs_seq_create(struct dentry *parent, const char *name,
1578 umode_t mode, const struct file_operations *seq_fops,
1581 struct dentry *entry;
1583 /* Disallow secretly (un)writable entries. */
1584 LASSERT((seq_fops->write == NULL) == ((mode & 0222) == 0));
1586 entry = debugfs_create_file(name, mode, parent, data, seq_fops);
1587 if (IS_ERR_OR_NULL(entry))
1588 return entry ? PTR_ERR(entry) : -ENOMEM;
1592 EXPORT_SYMBOL_GPL(ldebugfs_seq_create);
1594 int ldebugfs_obd_seq_create(struct obd_device *dev,
1597 const struct file_operations *seq_fops,
1600 return ldebugfs_seq_create(dev->obd_debugfs_entry, name,
1601 mode, seq_fops, data);
1603 EXPORT_SYMBOL_GPL(ldebugfs_obd_seq_create);
1605 void lprocfs_oh_tally(struct obd_histogram *oh, unsigned int value)
1607 if (value >= OBD_HIST_MAX)
1608 value = OBD_HIST_MAX - 1;
1610 spin_lock(&oh->oh_lock);
1611 oh->oh_buckets[value]++;
1612 spin_unlock(&oh->oh_lock);
1614 EXPORT_SYMBOL(lprocfs_oh_tally);
1616 void lprocfs_oh_tally_log2(struct obd_histogram *oh, unsigned int value)
1618 unsigned int val = 0;
1620 if (likely(value != 0))
1621 val = min(fls(value - 1), OBD_HIST_MAX);
1623 lprocfs_oh_tally(oh, val);
1625 EXPORT_SYMBOL(lprocfs_oh_tally_log2);
1627 unsigned long lprocfs_oh_sum(struct obd_histogram *oh)
1629 unsigned long ret = 0;
1632 for (i = 0; i < OBD_HIST_MAX; i++)
1633 ret += oh->oh_buckets[i];
1636 EXPORT_SYMBOL(lprocfs_oh_sum);
1638 void lprocfs_oh_clear(struct obd_histogram *oh)
1640 spin_lock(&oh->oh_lock);
1641 memset(oh->oh_buckets, 0, sizeof(oh->oh_buckets));
1642 spin_unlock(&oh->oh_lock);
1644 EXPORT_SYMBOL(lprocfs_oh_clear);
1646 int lprocfs_wr_root_squash(const char __user *buffer, unsigned long count,
1647 struct root_squash_info *squash, char *name)
1649 char kernbuf[64], *tmp, *errmsg;
1650 unsigned long uid, gid;
1653 if (count >= sizeof(kernbuf)) {
1654 errmsg = "string too long";
1656 goto failed_noprint;
1658 if (copy_from_user(kernbuf, buffer, count)) {
1659 errmsg = "bad address";
1661 goto failed_noprint;
1663 kernbuf[count] = '\0';
1665 /* look for uid gid separator */
1666 tmp = strchr(kernbuf, ':');
1668 errmsg = "needs uid:gid format";
1676 if (kstrtoul(kernbuf, 0, &uid) != 0) {
1682 if (kstrtoul(tmp, 0, &gid) != 0) {
1688 squash->rsi_uid = uid;
1689 squash->rsi_gid = gid;
1691 LCONSOLE_INFO("%s: root_squash is set to %u:%u\n",
1692 name, squash->rsi_uid, squash->rsi_gid);
1700 CWARN("%s: failed to set root_squash to \"%s\", %s, rc = %d\n",
1701 name, kernbuf, errmsg, rc);
1704 CWARN("%s: failed to set root_squash due to %s, rc = %d\n",
1708 EXPORT_SYMBOL(lprocfs_wr_root_squash);
1710 int lprocfs_wr_nosquash_nids(const char __user *buffer, unsigned long count,
1711 struct root_squash_info *squash, char *name)
1713 char *kernbuf = NULL, *errmsg;
1714 struct list_head tmp;
1719 errmsg = "string too long";
1724 kernbuf = kzalloc(count + 1, GFP_NOFS);
1726 errmsg = "no memory";
1731 if (copy_from_user(kernbuf, buffer, count)) {
1732 errmsg = "bad address";
1736 kernbuf[count] = '\0';
1738 if (count > 0 && kernbuf[count - 1] == '\n')
1741 if ((len == 4 && !strncmp(kernbuf, "NONE", len)) ||
1742 (len == 5 && !strncmp(kernbuf, "clear", len))) {
1743 /* empty string is special case */
1744 down_write(&squash->rsi_sem);
1745 if (!list_empty(&squash->rsi_nosquash_nids))
1746 cfs_free_nidlist(&squash->rsi_nosquash_nids);
1747 up_write(&squash->rsi_sem);
1748 LCONSOLE_INFO("%s: nosquash_nids is cleared\n", name);
1753 INIT_LIST_HEAD(&tmp);
1754 if (cfs_parse_nidlist(kernbuf, count, &tmp) <= 0) {
1755 errmsg = "can't parse";
1759 LCONSOLE_INFO("%s: nosquash_nids set to %s\n",
1764 down_write(&squash->rsi_sem);
1765 if (!list_empty(&squash->rsi_nosquash_nids))
1766 cfs_free_nidlist(&squash->rsi_nosquash_nids);
1767 list_splice(&tmp, &squash->rsi_nosquash_nids);
1768 up_write(&squash->rsi_sem);
1774 CWARN("%s: failed to set nosquash_nids to \"%s\", %s rc = %d\n",
1775 name, kernbuf, errmsg, rc);
1779 CWARN("%s: failed to set nosquash_nids due to %s rc = %d\n",
1784 EXPORT_SYMBOL(lprocfs_wr_nosquash_nids);
1786 static ssize_t lustre_attr_show(struct kobject *kobj,
1787 struct attribute *attr, char *buf)
1789 struct lustre_attr *a = container_of(attr, struct lustre_attr, attr);
1791 return a->show ? a->show(kobj, attr, buf) : 0;
1794 static ssize_t lustre_attr_store(struct kobject *kobj, struct attribute *attr,
1795 const char *buf, size_t len)
1797 struct lustre_attr *a = container_of(attr, struct lustre_attr, attr);
1799 return a->store ? a->store(kobj, attr, buf, len) : len;
1802 const struct sysfs_ops lustre_sysfs_ops = {
1803 .show = lustre_attr_show,
1804 .store = lustre_attr_store,
1806 EXPORT_SYMBOL_GPL(lustre_sysfs_ops);