4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * lustre/include/lustre/lustre_idl.h
34 * Lustre wire protocol definitions.
37 /** \defgroup lustreidl lustreidl
39 * Lustre wire protocol definitions.
41 * ALL structs passing over the wire should be declared here. Structs
42 * that are used in interfaces with userspace should go in lustre_user.h.
44 * All structs being declared here should be built from simple fixed-size
45 * types (__u8, __u16, __u32, __u64) or be built from other types or
46 * structs also declared in this file. Similarly, all flags and magic
47 * values in those structs should also be declared here. This ensures
48 * that the Lustre wire protocol is not influenced by external dependencies.
50 * The only other acceptable items in this file are VERY SIMPLE accessor
51 * functions to avoid callers grubbing inside the structures, and the
52 * prototypes of the swabber functions for each struct. Nothing that
53 * depends on external functions or definitions should be in here.
55 * Structs must be properly aligned to put 64-bit values on an 8-byte
56 * boundary. Any structs being added here must also be added to
57 * utils/wirecheck.c and "make newwiretest" run to regenerate the
58 * utils/wiretest.c sources. This allows us to verify that wire structs
59 * have the proper alignment/size on all architectures.
61 * DO NOT CHANGE any of the structs, flags, values declared here and used
62 * in released Lustre versions. Some structs may have padding fields that
63 * can be used. Some structs might allow addition at the end (verify this
64 * in the code to ensure that new/old clients that see this larger struct
65 * do not fail, otherwise you need to implement protocol compatibility).
67 * We assume all nodes are either little-endian or big-endian, and we
68 * always send messages in the sender's native format. The receiver
69 * detects the message format by checking the 'magic' field of the message
70 * (see lustre_msg_swabbed() below).
72 * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines,
73 * implemented either here, inline (trivial implementations) or in
74 * ptlrpc/pack_generic.c. These 'swabbers' convert the type from "other"
75 * endian, in-place in the message buffer.
77 * A swabber takes a single pointer argument. The caller must already have
78 * verified that the length of the message buffer >= sizeof (type).
80 * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
81 * may be defined that swabs just the variable part, after the caller has
82 * verified that the message buffer is large enough.
87 #ifndef _LUSTRE_IDL_H_
88 #define _LUSTRE_IDL_H_
90 #include "../../../include/linux/libcfs/libcfs.h"
91 #include "../../../include/linux/lnet/types.h"
93 /* Defn's shared with user-space. */
94 #include "lustre_user.h"
95 #include "lustre_errno.h"
96 #include "../lustre_ver.h"
101 /* FOO_REQUEST_PORTAL is for incoming requests on the FOO
102 * FOO_REPLY_PORTAL is for incoming replies on the FOO
103 * FOO_BULK_PORTAL is for incoming bulk on the FOO
106 /* Lustre service names are following the format
107 * service name + MDT + seq name
109 #define LUSTRE_MDT_MAXNAMELEN 80
111 #define CONNMGR_REQUEST_PORTAL 1
112 #define CONNMGR_REPLY_PORTAL 2
113 /*#define OSC_REQUEST_PORTAL 3 */
114 #define OSC_REPLY_PORTAL 4
115 /*#define OSC_BULK_PORTAL 5 */
116 #define OST_IO_PORTAL 6
117 #define OST_CREATE_PORTAL 7
118 #define OST_BULK_PORTAL 8
119 /*#define MDC_REQUEST_PORTAL 9 */
120 #define MDC_REPLY_PORTAL 10
121 /*#define MDC_BULK_PORTAL 11 */
122 #define MDS_REQUEST_PORTAL 12
123 /*#define MDS_REPLY_PORTAL 13 */
124 #define MDS_BULK_PORTAL 14
125 #define LDLM_CB_REQUEST_PORTAL 15
126 #define LDLM_CB_REPLY_PORTAL 16
127 #define LDLM_CANCEL_REQUEST_PORTAL 17
128 #define LDLM_CANCEL_REPLY_PORTAL 18
129 /*#define PTLBD_REQUEST_PORTAL 19 */
130 /*#define PTLBD_REPLY_PORTAL 20 */
131 /*#define PTLBD_BULK_PORTAL 21 */
132 #define MDS_SETATTR_PORTAL 22
133 #define MDS_READPAGE_PORTAL 23
134 #define OUT_PORTAL 24
136 #define MGC_REPLY_PORTAL 25
137 #define MGS_REQUEST_PORTAL 26
138 #define MGS_REPLY_PORTAL 27
139 #define OST_REQUEST_PORTAL 28
140 #define FLD_REQUEST_PORTAL 29
141 #define SEQ_METADATA_PORTAL 30
142 #define SEQ_DATA_PORTAL 31
143 #define SEQ_CONTROLLER_PORTAL 32
144 #define MGS_BULK_PORTAL 33
146 /* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com,
151 #define PTL_RPC_MSG_REQUEST 4711
152 #define PTL_RPC_MSG_ERR 4712
153 #define PTL_RPC_MSG_REPLY 4713
155 /* DON'T use swabbed values of MAGIC as magic! */
156 #define LUSTRE_MSG_MAGIC_V2 0x0BD00BD3
157 #define LUSTRE_MSG_MAGIC_V2_SWABBED 0xD30BD00B
159 #define LUSTRE_MSG_MAGIC LUSTRE_MSG_MAGIC_V2
161 #define PTLRPC_MSG_VERSION 0x00000003
162 #define LUSTRE_VERSION_MASK 0xffff0000
163 #define LUSTRE_OBD_VERSION 0x00010000
164 #define LUSTRE_MDS_VERSION 0x00020000
165 #define LUSTRE_OST_VERSION 0x00030000
166 #define LUSTRE_DLM_VERSION 0x00040000
167 #define LUSTRE_LOG_VERSION 0x00050000
168 #define LUSTRE_MGS_VERSION 0x00060000
171 * Describes a range of sequence, lsr_start is included but lsr_end is
173 * Same structure is used in fld module where lsr_index field holds mdt id
176 struct lu_seq_range {
183 struct lu_seq_range_array {
186 struct lu_seq_range lsra_lsr[0];
189 #define LU_SEQ_RANGE_MDT 0x0
190 #define LU_SEQ_RANGE_OST 0x1
191 #define LU_SEQ_RANGE_ANY 0x3
193 #define LU_SEQ_RANGE_MASK 0x3
195 static inline unsigned fld_range_type(const struct lu_seq_range *range)
197 return range->lsr_flags & LU_SEQ_RANGE_MASK;
200 static inline bool fld_range_is_ost(const struct lu_seq_range *range)
202 return fld_range_type(range) == LU_SEQ_RANGE_OST;
205 static inline bool fld_range_is_mdt(const struct lu_seq_range *range)
207 return fld_range_type(range) == LU_SEQ_RANGE_MDT;
211 * This all range is only being used when fld client sends fld query request,
212 * but it does not know whether the seq is MDT or OST, so it will send req
213 * with ALL type, which means either seq type gotten from lookup can be
216 static inline unsigned fld_range_is_any(const struct lu_seq_range *range)
218 return fld_range_type(range) == LU_SEQ_RANGE_ANY;
221 static inline void fld_range_set_type(struct lu_seq_range *range,
224 range->lsr_flags |= flags;
227 static inline void fld_range_set_mdt(struct lu_seq_range *range)
229 fld_range_set_type(range, LU_SEQ_RANGE_MDT);
232 static inline void fld_range_set_ost(struct lu_seq_range *range)
234 fld_range_set_type(range, LU_SEQ_RANGE_OST);
237 static inline void fld_range_set_any(struct lu_seq_range *range)
239 fld_range_set_type(range, LU_SEQ_RANGE_ANY);
243 * returns width of given range \a r
246 static inline __u64 range_space(const struct lu_seq_range *range)
248 return range->lsr_end - range->lsr_start;
252 * initialize range to zero
255 static inline void range_init(struct lu_seq_range *range)
257 memset(range, 0, sizeof(*range));
261 * check if given seq id \a s is within given range \a r
264 static inline bool range_within(const struct lu_seq_range *range,
267 return s >= range->lsr_start && s < range->lsr_end;
270 static inline bool range_is_sane(const struct lu_seq_range *range)
272 return (range->lsr_end >= range->lsr_start);
275 static inline bool range_is_zero(const struct lu_seq_range *range)
277 return (range->lsr_start == 0 && range->lsr_end == 0);
280 static inline bool range_is_exhausted(const struct lu_seq_range *range)
283 return range_space(range) == 0;
286 /* return 0 if two range have the same location */
287 static inline int range_compare_loc(const struct lu_seq_range *r1,
288 const struct lu_seq_range *r2)
290 return r1->lsr_index != r2->lsr_index ||
291 r1->lsr_flags != r2->lsr_flags;
294 #define DRANGE "[%#16.16Lx-%#16.16Lx):%x:%s"
296 #define PRANGE(range) \
297 (range)->lsr_start, \
299 (range)->lsr_index, \
300 fld_range_is_mdt(range) ? "mdt" : "ost"
302 /** \defgroup lu_fid lu_fid
307 * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat.
308 * Deprecated since HSM and SOM attributes are now stored in separate on-disk
312 LMAC_HSM = 0x00000001,
313 LMAC_SOM = 0x00000002,
314 LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */
315 LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is
316 * under /O/<seq>/d<x>.
321 * Masks for all features that should be supported by a Lustre version to
322 * access a specific file.
323 * This information is stored in lustre_mdt_attrs::lma_incompat.
326 LMAI_RELEASED = 0x00000001, /* file is released */
327 LMAI_AGENT = 0x00000002, /* agent inode */
328 LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object
329 * is on the remote MDT
333 #define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT)
339 /** LASTID file has zero OID */
340 LUSTRE_FID_LASTID_OID = 0UL,
341 /** initial fid id value */
342 LUSTRE_FID_INIT_OID = 1UL
345 /** returns fid object sequence */
346 static inline __u64 fid_seq(const struct lu_fid *fid)
351 /** returns fid object id */
352 static inline __u32 fid_oid(const struct lu_fid *fid)
357 /** returns fid object version */
358 static inline __u32 fid_ver(const struct lu_fid *fid)
363 static inline void fid_zero(struct lu_fid *fid)
365 memset(fid, 0, sizeof(*fid));
368 static inline __u64 fid_ver_oid(const struct lu_fid *fid)
370 return ((__u64)fid_ver(fid) << 32 | fid_oid(fid));
373 /* copytool uses a 32b bitmask field to encode archive-Ids during register
375 * archive num = 0 => all
376 * archive num from 1 to 32
378 #define LL_HSM_MAX_ARCHIVE (sizeof(__u32) * 8)
381 * Note that reserved SEQ numbers below 12 will conflict with ldiskfs
382 * inodes in the IGIF namespace, so these reserved SEQ numbers can be
383 * used for other purposes and not risk collisions with existing inodes.
385 * Different FID Format
386 * http://wiki.old.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
389 FID_SEQ_OST_MDT0 = 0,
390 FID_SEQ_LLOG = 1, /* unnamed llogs */
392 FID_SEQ_OST_MDT1 = 3,
393 FID_SEQ_OST_MAX = 9, /* Max MDT count before OST_on_FID */
394 FID_SEQ_LLOG_NAME = 10, /* named llogs */
397 FID_SEQ_IGIF_MAX = 0x0ffffffffULL,
398 FID_SEQ_IDIF = 0x100000000ULL,
399 FID_SEQ_IDIF_MAX = 0x1ffffffffULL,
400 /* Normal FID sequence starts from this value, i.e. 1<<33 */
401 FID_SEQ_START = 0x200000000ULL,
402 /* sequence for local pre-defined FIDs listed in local_oid */
403 FID_SEQ_LOCAL_FILE = 0x200000001ULL,
404 FID_SEQ_DOT_LUSTRE = 0x200000002ULL,
405 /* sequence is used for local named objects FIDs generated
406 * by local_object_storage library
408 FID_SEQ_LOCAL_NAME = 0x200000003ULL,
409 /* Because current FLD will only cache the fid sequence, instead
410 * of oid on the client side, if the FID needs to be exposed to
411 * clients sides, it needs to make sure all of fids under one
412 * sequence will be located in one MDT.
414 FID_SEQ_SPECIAL = 0x200000004ULL,
415 FID_SEQ_QUOTA = 0x200000005ULL,
416 FID_SEQ_QUOTA_GLB = 0x200000006ULL,
417 FID_SEQ_ROOT = 0x200000007ULL, /* Located on MDT0 */
418 FID_SEQ_NORMAL = 0x200000400ULL,
419 FID_SEQ_LOV_DEFAULT = 0xffffffffffffffffULL
422 #define OBIF_OID_MAX_BITS 32
423 #define OBIF_MAX_OID (1ULL << OBIF_OID_MAX_BITS)
424 #define OBIF_OID_MASK ((1ULL << OBIF_OID_MAX_BITS) - 1)
425 #define IDIF_OID_MAX_BITS 48
426 #define IDIF_MAX_OID (1ULL << IDIF_OID_MAX_BITS)
427 #define IDIF_OID_MASK ((1ULL << IDIF_OID_MAX_BITS) - 1)
429 /** OID for FID_SEQ_SPECIAL */
431 /* Big Filesystem Lock to serialize rename operations */
432 FID_OID_SPECIAL_BFL = 1UL,
435 /** OID for FID_SEQ_DOT_LUSTRE */
436 enum dot_lustre_oid {
437 FID_OID_DOT_LUSTRE = 1UL,
438 FID_OID_DOT_LUSTRE_OBF = 2UL,
441 static inline bool fid_seq_is_mdt0(__u64 seq)
443 return (seq == FID_SEQ_OST_MDT0);
446 static inline bool fid_seq_is_mdt(__u64 seq)
448 return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL;
451 static inline bool fid_seq_is_echo(__u64 seq)
453 return (seq == FID_SEQ_ECHO);
456 static inline bool fid_is_echo(const struct lu_fid *fid)
458 return fid_seq_is_echo(fid_seq(fid));
461 static inline bool fid_seq_is_llog(__u64 seq)
463 return (seq == FID_SEQ_LLOG);
466 static inline bool fid_is_llog(const struct lu_fid *fid)
468 /* file with OID == 0 is not llog but contains last oid */
469 return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0;
472 static inline bool fid_seq_is_rsvd(__u64 seq)
474 return (seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD);
477 static inline bool fid_seq_is_special(__u64 seq)
479 return seq == FID_SEQ_SPECIAL;
482 static inline bool fid_seq_is_local_file(__u64 seq)
484 return seq == FID_SEQ_LOCAL_FILE ||
485 seq == FID_SEQ_LOCAL_NAME;
488 static inline bool fid_seq_is_root(__u64 seq)
490 return seq == FID_SEQ_ROOT;
493 static inline bool fid_seq_is_dot(__u64 seq)
495 return seq == FID_SEQ_DOT_LUSTRE;
498 static inline bool fid_seq_is_default(__u64 seq)
500 return seq == FID_SEQ_LOV_DEFAULT;
503 static inline bool fid_is_mdt0(const struct lu_fid *fid)
505 return fid_seq_is_mdt0(fid_seq(fid));
508 static inline void lu_root_fid(struct lu_fid *fid)
510 fid->f_seq = FID_SEQ_ROOT;
516 * Check if a fid is igif or not.
517 * \param fid the fid to be tested.
518 * \return true if the fid is a igif; otherwise false.
520 static inline bool fid_seq_is_igif(__u64 seq)
522 return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX;
525 static inline bool fid_is_igif(const struct lu_fid *fid)
527 return fid_seq_is_igif(fid_seq(fid));
531 * Check if a fid is idif or not.
532 * \param fid the fid to be tested.
533 * \return true if the fid is a idif; otherwise false.
535 static inline bool fid_seq_is_idif(__u64 seq)
537 return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX;
540 static inline bool fid_is_idif(const struct lu_fid *fid)
542 return fid_seq_is_idif(fid_seq(fid));
545 static inline bool fid_is_local_file(const struct lu_fid *fid)
547 return fid_seq_is_local_file(fid_seq(fid));
550 static inline bool fid_seq_is_norm(__u64 seq)
552 return (seq >= FID_SEQ_NORMAL);
555 static inline bool fid_is_norm(const struct lu_fid *fid)
557 return fid_seq_is_norm(fid_seq(fid));
560 /* convert an OST objid into an IDIF FID SEQ number */
561 static inline __u64 fid_idif_seq(__u64 id, __u32 ost_idx)
563 return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff);
566 /* convert a packed IDIF FID into an OST objid */
567 static inline __u64 fid_idif_id(__u64 seq, __u32 oid, __u32 ver)
569 return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid;
572 /* extract ost index from IDIF FID */
573 static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid)
575 return (fid_seq(fid) >> 16) & 0xffff;
578 /* extract OST sequence (group) from a wire ost_id (id/seq) pair */
579 static inline __u64 ostid_seq(const struct ost_id *ostid)
581 if (fid_seq_is_mdt0(ostid->oi.oi_seq))
582 return FID_SEQ_OST_MDT0;
584 if (unlikely(fid_seq_is_default(ostid->oi.oi_seq)))
585 return FID_SEQ_LOV_DEFAULT;
587 if (fid_is_idif(&ostid->oi_fid))
588 return FID_SEQ_OST_MDT0;
590 return fid_seq(&ostid->oi_fid);
593 /* extract OST objid from a wire ost_id (id/seq) pair */
594 static inline __u64 ostid_id(const struct ost_id *ostid)
596 if (fid_seq_is_mdt0(ostid->oi.oi_seq))
597 return ostid->oi.oi_id & IDIF_OID_MASK;
599 if (unlikely(fid_seq_is_default(ostid->oi.oi_seq)))
600 return ostid->oi.oi_id;
602 if (fid_is_idif(&ostid->oi_fid))
603 return fid_idif_id(fid_seq(&ostid->oi_fid),
604 fid_oid(&ostid->oi_fid), 0);
606 return fid_oid(&ostid->oi_fid);
609 static inline void ostid_set_seq(struct ost_id *oi, __u64 seq)
611 if (fid_seq_is_mdt0(seq) || fid_seq_is_default(seq)) {
614 oi->oi_fid.f_seq = seq;
615 /* Note: if f_oid + f_ver is zero, we need init it
616 * to be 1, otherwise, ostid_seq will treat this
617 * as old ostid (oi_seq == 0)
619 if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0)
620 oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID;
624 static inline void ostid_set_seq_mdt0(struct ost_id *oi)
626 ostid_set_seq(oi, FID_SEQ_OST_MDT0);
629 static inline void ostid_set_seq_echo(struct ost_id *oi)
631 ostid_set_seq(oi, FID_SEQ_ECHO);
634 static inline void ostid_set_seq_llog(struct ost_id *oi)
636 ostid_set_seq(oi, FID_SEQ_LLOG);
640 * Note: we need check oi_seq to decide where to set oi_id,
641 * so oi_seq should always be set ahead of oi_id.
643 static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
645 if (fid_seq_is_mdt0(oi->oi.oi_seq)) {
646 if (oid >= IDIF_MAX_OID) {
647 CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
651 } else if (fid_is_idif(&oi->oi_fid)) {
652 if (oid >= IDIF_MAX_OID) {
653 CERROR("Bad %llu to set "DOSTID"\n",
657 oi->oi_fid.f_seq = fid_idif_seq(oid,
658 fid_idif_ost_idx(&oi->oi_fid));
659 oi->oi_fid.f_oid = oid;
660 oi->oi_fid.f_ver = oid >> 48;
662 if (oid >= OBIF_MAX_OID) {
663 CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
666 oi->oi_fid.f_oid = oid;
670 static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
672 if (unlikely(fid_seq_is_igif(fid->f_seq))) {
673 CERROR("bad IGIF, "DFID"\n", PFID(fid));
677 if (fid_is_idif(fid)) {
678 if (oid >= IDIF_MAX_OID) {
679 CERROR("Too large OID %#llx to set IDIF "DFID"\n",
680 (unsigned long long)oid, PFID(fid));
683 fid->f_seq = fid_idif_seq(oid, fid_idif_ost_idx(fid));
685 fid->f_ver = oid >> 48;
687 if (oid >= OBIF_MAX_OID) {
688 CERROR("Too large OID %#llx to set REG "DFID"\n",
689 (unsigned long long)oid, PFID(fid));
698 * Unpack an OST object id/seq (group) into a FID. This is needed for
699 * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper
700 * FIDs. Note that if an id/seq is already in FID/IDIF format it will
701 * be passed through unchanged. Only legacy OST objects in "group 0"
702 * will be mapped into the IDIF namespace so that they can fit into the
703 * struct lu_fid fields without loss. For reference see:
704 * http://wiki.old.lustre.org/index.php/Architecture_-_Interoperability_fids_zfs
706 static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
709 __u64 seq = ostid_seq(ostid);
711 if (ost_idx > 0xffff) {
712 CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid),
717 if (fid_seq_is_mdt0(seq)) {
718 __u64 oid = ostid_id(ostid);
720 /* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
721 * that we map into the IDIF namespace. It allows up to 2^48
722 * objects per OST, as this is the object namespace that has
723 * been in production for years. This can handle create rates
724 * of 1M objects/s/OST for 9 years, or combinations thereof.
726 if (oid >= IDIF_MAX_OID) {
727 CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n",
728 POSTID(ostid), ost_idx);
731 fid->f_seq = fid_idif_seq(oid, ost_idx);
732 /* truncate to 32 bits by assignment */
734 /* in theory, not currently used */
735 fid->f_ver = oid >> 48;
736 } else if (likely(!fid_seq_is_default(seq))) {
737 /* This is either an IDIF object, which identifies objects across
738 * all OSTs, or a regular FID. The IDIF namespace maps legacy
739 * OST objects into the FID namespace. In both cases, we just
740 * pass the FID through, no conversion needed.
742 if (ostid->oi_fid.f_ver != 0) {
743 CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n",
744 POSTID(ostid), ost_idx);
747 *fid = ostid->oi_fid;
753 /* pack any OST FID into an ostid (id/seq) for the wire/disk */
754 static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
756 if (unlikely(fid_seq_is_igif(fid->f_seq))) {
757 CERROR("bad IGIF, "DFID"\n", PFID(fid));
761 if (fid_is_idif(fid)) {
762 ostid_set_seq_mdt0(ostid);
763 ostid_set_id(ostid, fid_idif_id(fid_seq(fid), fid_oid(fid),
766 ostid->oi_fid = *fid;
772 /* Check whether the fid is for LAST_ID */
773 static inline bool fid_is_last_id(const struct lu_fid *fid)
775 return (fid_oid(fid) == 0);
779 * Get inode number from a igif.
780 * \param fid a igif to get inode number from.
781 * \return inode number for the igif.
783 static inline ino_t lu_igif_ino(const struct lu_fid *fid)
788 void lustre_swab_ost_id(struct ost_id *oid);
791 * Get inode generation from a igif.
792 * \param fid a igif to get inode generation from.
793 * \return inode generation for the igif.
795 static inline __u32 lu_igif_gen(const struct lu_fid *fid)
801 * Build igif from the inode number/generation.
803 static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen)
811 * Fids are transmitted across network (in the sender byte-ordering),
812 * and stored on disk in big-endian order.
814 static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
816 dst->f_seq = cpu_to_le64(fid_seq(src));
817 dst->f_oid = cpu_to_le32(fid_oid(src));
818 dst->f_ver = cpu_to_le32(fid_ver(src));
821 static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
823 dst->f_seq = le64_to_cpu(fid_seq(src));
824 dst->f_oid = le32_to_cpu(fid_oid(src));
825 dst->f_ver = le32_to_cpu(fid_ver(src));
828 static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
830 dst->f_seq = cpu_to_be64(fid_seq(src));
831 dst->f_oid = cpu_to_be32(fid_oid(src));
832 dst->f_ver = cpu_to_be32(fid_ver(src));
835 static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
837 dst->f_seq = be64_to_cpu(fid_seq(src));
838 dst->f_oid = be32_to_cpu(fid_oid(src));
839 dst->f_ver = be32_to_cpu(fid_ver(src));
842 static inline bool fid_is_sane(const struct lu_fid *fid)
845 ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) ||
846 fid_is_igif(fid) || fid_is_idif(fid) ||
847 fid_seq_is_rsvd(fid_seq(fid)));
850 void lustre_swab_lu_fid(struct lu_fid *fid);
851 void lustre_swab_lu_seq_range(struct lu_seq_range *range);
853 static inline bool lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
855 return memcmp(f0, f1, sizeof(*f0)) == 0;
858 #define __diff_normalize(val0, val1) \
860 typeof(val0) __val0 = (val0); \
861 typeof(val1) __val1 = (val1); \
863 (__val0 == __val1 ? 0 : __val0 > __val1 ? 1 : -1); \
866 static inline int lu_fid_cmp(const struct lu_fid *f0,
867 const struct lu_fid *f1)
870 __diff_normalize(fid_seq(f0), fid_seq(f1)) ?:
871 __diff_normalize(fid_oid(f0), fid_oid(f1)) ?:
872 __diff_normalize(fid_ver(f0), fid_ver(f1));
875 static inline void ostid_cpu_to_le(const struct ost_id *src_oi,
876 struct ost_id *dst_oi)
878 if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
879 dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
880 dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
882 fid_cpu_to_le(&dst_oi->oi_fid, &src_oi->oi_fid);
886 static inline void ostid_le_to_cpu(const struct ost_id *src_oi,
887 struct ost_id *dst_oi)
889 if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
890 dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
891 dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
893 fid_le_to_cpu(&dst_oi->oi_fid, &src_oi->oi_fid);
899 /** \defgroup lu_dir lu_dir
904 * Enumeration of possible directory entry attributes.
906 * Attributes follow directory entry header in the order they appear in this
909 enum lu_dirent_attrs {
912 LUDA_64BITHASH = 0x0004,
916 * Layout of readdir pages, as transmitted on wire.
919 /** valid if LUDA_FID is set. */
920 struct lu_fid lde_fid;
921 /** a unique entry identifier: a hash or an offset. */
923 /** total record length, including all attributes. */
927 /** optional variable size attributes following this entry.
928 * taken from enum lu_dirent_attrs.
931 /** name is followed by the attributes indicated in ->ldp_attrs, in
932 * their natural order. After the last attribute, padding bytes are
933 * added to make ->lde_reclen a multiple of 8.
939 * Definitions of optional directory entry attributes formats.
941 * Individual attributes do not have their length encoded in a generic way. It
942 * is assumed that consumer of an attribute knows its format. This means that
943 * it is impossible to skip over an unknown attribute, except by skipping over all
944 * remaining attributes (by using ->lde_reclen), which is not too
945 * constraining, because new server versions will append new attributes at
946 * the end of an entry.
950 * Fid directory attribute: a fid of an object referenced by the entry. This
951 * will be almost always requested by the client and supplied by the server.
953 * Aligned to 8 bytes.
955 /* To have compatibility with 1.8, lets have fid in lu_dirent struct. */
960 * Aligned to 2 bytes.
971 #define IFTODT(type) (((type) & S_IFMT) >> IFSHIFT)
974 #define DTTOIF(dirtype) ((dirtype) << IFSHIFT)
978 __u64 ldp_hash_start;
982 struct lu_dirent ldp_entries[0];
985 enum lu_dirpage_flags {
987 * dirpage contains no entry.
991 * last entry's lde_hash equals ldp_hash_end.
996 static inline struct lu_dirent *lu_dirent_start(struct lu_dirpage *dp)
998 if (le32_to_cpu(dp->ldp_flags) & LDF_EMPTY)
1001 return dp->ldp_entries;
1004 static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent)
1006 struct lu_dirent *next;
1008 if (le16_to_cpu(ent->lde_reclen) != 0)
1009 next = ((void *)ent) + le16_to_cpu(ent->lde_reclen);
1016 static inline size_t lu_dirent_calc_size(size_t namelen, __u16 attr)
1020 if (attr & LUDA_TYPE) {
1021 const size_t align = sizeof(struct luda_type) - 1;
1023 size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
1024 size += sizeof(struct luda_type);
1026 size = sizeof(struct lu_dirent) + namelen;
1029 return (size + 7) & ~7;
1032 #define MDS_DIR_END_OFF 0xfffffffffffffffeULL
1035 * MDS_READPAGE page size
1037 * This is the directory page size packed in MDS_READPAGE RPC.
1038 * It's different than PAGE_SIZE because the client needs to
1039 * access the struct lu_dirpage header packed at the beginning of
1040 * the "page" and without this there isn't any way to know find the
1041 * lu_dirpage header is if client and server PAGE_SIZE differ.
1043 #define LU_PAGE_SHIFT 12
1044 #define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
1045 #define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
1047 #define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT))
1051 struct lustre_handle {
1055 #define DEAD_HANDLE_MAGIC 0xdeadbeefcafebabeULL
1057 static inline bool lustre_handle_is_used(const struct lustre_handle *lh)
1059 return lh->cookie != 0ull;
1062 static inline bool lustre_handle_equal(const struct lustre_handle *lh1,
1063 const struct lustre_handle *lh2)
1065 return lh1->cookie == lh2->cookie;
1068 static inline void lustre_handle_copy(struct lustre_handle *tgt,
1069 const struct lustre_handle *src)
1071 tgt->cookie = src->cookie;
1074 /* flags for lm_flags */
1075 #define MSGHDR_AT_SUPPORT 0x1
1076 #define MSGHDR_CKSUM_INCOMPAT18 0x2
1078 #define lustre_msg lustre_msg_v2
1079 /* we depend on this structure to be 8-byte aligned */
1080 /* this type is only endian-adjusted in lustre_unpack_msg() */
1081 struct lustre_msg_v2 {
1090 __u32 lm_buflens[0];
1093 /* without gss, ptlrpc_body is put at the first buffer. */
1094 #define PTLRPC_NUM_VERSIONS 4
1096 struct ptlrpc_body_v3 {
1097 struct lustre_handle pb_handle;
1104 __u64 pb_last_committed;
1109 __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
1110 __u32 pb_service_time; /* for rep, actual service time */
1113 /* VBR: pre-versions */
1114 __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
1115 /* padding for future needs */
1116 __u64 pb_padding[4];
1117 char pb_jobid[LUSTRE_JOBID_SIZE];
1120 #define ptlrpc_body ptlrpc_body_v3
1122 struct ptlrpc_body_v2 {
1123 struct lustre_handle pb_handle;
1130 __u64 pb_last_committed;
1135 __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
1136 __u32 pb_service_time; /* for rep, actual service time, also used for
1137 * net_latency of req
1141 /* VBR: pre-versions */
1142 __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
1143 /* padding for future needs */
1144 __u64 pb_padding[4];
1147 void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
1149 /* message body offset for lustre_msg_v2 */
1150 /* ptlrpc body offset in all request/reply messages */
1151 #define MSG_PTLRPC_BODY_OFF 0
1153 /* normal request/reply message record offset */
1154 #define REQ_REC_OFF 1
1155 #define REPLY_REC_OFF 1
1157 /* ldlm request message body offset */
1158 #define DLM_LOCKREQ_OFF 1 /* lockreq offset */
1159 #define DLM_REQ_REC_OFF 2 /* normal dlm request record offset */
1161 /* ldlm intent lock message body offset */
1162 #define DLM_INTENT_IT_OFF 2 /* intent lock it offset */
1163 #define DLM_INTENT_REC_OFF 3 /* intent lock record offset */
1165 /* ldlm reply message body offset */
1166 #define DLM_LOCKREPLY_OFF 1 /* lockrep offset */
1167 #define DLM_REPLY_REC_OFF 2 /* reply record offset */
1169 /** only use in req->rq_{req,rep}_swab_mask */
1170 #define MSG_PTLRPC_HEADER_OFF 31
1172 /* Flags that are operation-specific go in the top 16 bits. */
1173 #define MSG_OP_FLAG_MASK 0xffff0000
1174 #define MSG_OP_FLAG_SHIFT 16
1176 /* Flags that apply to all requests are in the bottom 16 bits */
1177 #define MSG_GEN_FLAG_MASK 0x0000ffff
1178 #define MSG_LAST_REPLAY 0x0001
1179 #define MSG_RESENT 0x0002
1180 #define MSG_REPLAY 0x0004
1181 /* #define MSG_AT_SUPPORT 0x0008
1182 * This was used in early prototypes of adaptive timeouts, and while there
1183 * shouldn't be any users of that code there also isn't a need for using this
1184 * bits. Defer usage until at least 1.10 to avoid potential conflict.
1186 #define MSG_DELAY_REPLAY 0x0010
1187 #define MSG_VERSION_REPLAY 0x0020
1188 #define MSG_REQ_REPLAY_DONE 0x0040
1189 #define MSG_LOCK_REPLAY_DONE 0x0080
1192 * Flags for all connect opcodes (MDS_CONNECT, OST_CONNECT)
1195 #define MSG_CONNECT_RECOVERING 0x00000001
1196 #define MSG_CONNECT_RECONNECT 0x00000002
1197 #define MSG_CONNECT_REPLAYABLE 0x00000004
1198 /*#define MSG_CONNECT_PEER 0x8 */
1199 #define MSG_CONNECT_LIBCLIENT 0x00000010
1200 #define MSG_CONNECT_INITIAL 0x00000020
1201 #define MSG_CONNECT_ASYNC 0x00000040
1202 #define MSG_CONNECT_NEXT_VER 0x00000080 /* use next version of lustre_msg */
1203 #define MSG_CONNECT_TRANSNO 0x00000100 /* report transno */
1206 #define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/
1207 #define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */
1208 #define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */
1209 #define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */
1210 #define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */
1211 #define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */
1212 #define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */
1213 #define OBD_CONNECT_ACL 0x80ULL /*access control lists */
1214 #define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */
1215 #define OBD_CONNECT_CROW 0x200ULL /*MDS+OST create obj on write*/
1216 #define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */
1217 #define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */
1218 #define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/
1219 #define OBD_CONNECT_JOIN 0x2000ULL /*files can be concatenated.
1220 *We do not support JOIN FILE
1221 *anymore, reserve this flags
1222 *just for preventing such bit
1225 #define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/
1226 #define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/
1227 #define OBD_CONNECT_RMT_CLIENT 0x10000ULL /* Remote client, never used
1228 * in production. Removed in
1229 * 2.9. Keep this flag to
1232 #define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /* Remote client by force,
1233 * never used in production.
1234 * Removed in 2.9. Keep this
1235 * flag to avoid reuse
1237 #define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */
1238 #define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */
1239 #define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */
1240 #define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */
1241 #define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */
1242 #define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */
1243 #define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */
1244 #define OBD_CONNECT_LRU_RESIZE 0x2000000ULL /*LRU resize feature. */
1245 #define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */
1246 #define OBD_CONNECT_REAL 0x8000000ULL /*real connection */
1247 #define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*Not used since 2.4 */
1248 #define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/
1249 #define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */
1250 #define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */
1251 #define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */
1252 #define OBD_CONNECT_GRANT_SHRINK 0x200000000ULL /* support grant shrink */
1253 #define OBD_CONNECT_SKIP_ORPHAN 0x400000000ULL /* don't reuse orphan objids */
1254 #define OBD_CONNECT_MAX_EASIZE 0x800000000ULL /* preserved for large EA */
1255 #define OBD_CONNECT_FULL20 0x1000000000ULL /* it is 2.0 client */
1256 #define OBD_CONNECT_LAYOUTLOCK 0x2000000000ULL /* client uses layout lock */
1257 #define OBD_CONNECT_64BITHASH 0x4000000000ULL /* client supports 64-bits
1260 #define OBD_CONNECT_MAXBYTES 0x8000000000ULL /* max stripe size */
1261 #define OBD_CONNECT_IMP_RECOV 0x10000000000ULL /* imp recovery support */
1262 #define OBD_CONNECT_JOBSTATS 0x20000000000ULL /* jobid in ptlrpc_body */
1263 #define OBD_CONNECT_UMASK 0x40000000000ULL /* create uses client umask */
1264 #define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS
1265 * RPC error properly
1267 #define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for
1268 * finer space reservation
1270 #define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8
1271 * policy and 2.x server
1273 #define OBD_CONNECT_LVB_TYPE 0x400000000000ULL /* variable type of LVB */
1274 #define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */
1275 #define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */
1276 #define OBD_CONNECT_SHORTIO 0x2000000000000ULL/* short io */
1277 #define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */
1278 #define OBD_CONNECT_FLOCK_DEAD 0x8000000000000ULL/* flock deadlock detection */
1279 #define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/*create stripe disposition*/
1280 #define OBD_CONNECT_OPEN_BY_FID 0x20000000000000ULL /* open by fid won't pack
1283 #define OBD_CONNECT_LFSCK 0x40000000000000ULL/* support online LFSCK */
1284 #define OBD_CONNECT_UNLINK_CLOSE 0x100000000000000ULL/* close file in unlink */
1285 #define OBD_CONNECT_DIR_STRIPE 0x400000000000000ULL/* striped DNE dir */
1288 * Please DO NOT add flag values here before first ensuring that this same
1289 * flag value is not in use on some other branch. Please clear any such
1290 * changes with senior engineers before starting to use a new flag. Then,
1291 * submit a small patch against EVERY branch that ONLY adds the new flag,
1292 * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the
1293 * flag to check_obd_connect_data(), and updates wiretests accordingly, so it
1294 * can be approved and landed easily to reserve the flag for future use.
1297 /* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS
1298 * connection. It is a temporary bug fix for Imperative Recovery interop
1299 * between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for
1300 * 2.2 clients/servers is no longer needed. LU-1252/LU-1644.
1302 #define OBD_CONNECT_MNE_SWAB OBD_CONNECT_MDS_MDS
1304 #define OCD_HAS_FLAG(ocd, flg) \
1305 (!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg))
1307 /* Features required for this version of the client to work with server */
1308 #define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \
1311 /* This structure is used for both request and reply.
1313 * If we eventually have separate connect data for different types, which we
1314 * almost certainly will, then perhaps we stick a union in here.
1316 struct obd_connect_data_v1 {
1317 __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
1318 __u32 ocd_version; /* lustre release version number */
1319 __u32 ocd_grant; /* initial cache grant amount (bytes) */
1320 __u32 ocd_index; /* LOV index to connect to */
1321 __u32 ocd_brw_size; /* Maximum BRW size in bytes, must be 2^n */
1322 __u64 ocd_ibits_known; /* inode bits this client understands */
1323 __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
1324 __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
1325 __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
1326 __u32 ocd_unused; /* also fix lustre_swab_connect */
1327 __u64 ocd_transno; /* first transno from client to be replayed */
1328 __u32 ocd_group; /* MDS group on OST */
1329 __u32 ocd_cksum_types; /* supported checksum algorithms */
1330 __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
1331 __u32 ocd_instance; /* also fix lustre_swab_connect */
1332 __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
1335 struct obd_connect_data {
1336 __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
1337 __u32 ocd_version; /* lustre release version number */
1338 __u32 ocd_grant; /* initial cache grant amount (bytes) */
1339 __u32 ocd_index; /* LOV index to connect to */
1340 __u32 ocd_brw_size; /* Maximum BRW size in bytes */
1341 __u64 ocd_ibits_known; /* inode bits this client understands */
1342 __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
1343 __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
1344 __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
1345 __u32 ocd_unused; /* also fix lustre_swab_connect */
1346 __u64 ocd_transno; /* first transno from client to be replayed */
1347 __u32 ocd_group; /* MDS group on OST */
1348 __u32 ocd_cksum_types; /* supported checksum algorithms */
1349 __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
1350 __u32 ocd_instance; /* instance # of this target */
1351 __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
1352 /* Fields after ocd_maxbytes are only accessible by the receiver
1353 * if the corresponding flag in ocd_connect_flags is set. Accessing
1354 * any field after ocd_maxbytes on the receiver without a valid flag
1355 * may result in out-of-bound memory access and kernel oops.
1357 __u64 padding1; /* added 2.1.0. also fix lustre_swab_connect */
1358 __u64 padding2; /* added 2.1.0. also fix lustre_swab_connect */
1359 __u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */
1360 __u64 padding4; /* added 2.1.0. also fix lustre_swab_connect */
1361 __u64 padding5; /* added 2.1.0. also fix lustre_swab_connect */
1362 __u64 padding6; /* added 2.1.0. also fix lustre_swab_connect */
1363 __u64 padding7; /* added 2.1.0. also fix lustre_swab_connect */
1364 __u64 padding8; /* added 2.1.0. also fix lustre_swab_connect */
1365 __u64 padding9; /* added 2.1.0. also fix lustre_swab_connect */
1366 __u64 paddingA; /* added 2.1.0. also fix lustre_swab_connect */
1367 __u64 paddingB; /* added 2.1.0. also fix lustre_swab_connect */
1368 __u64 paddingC; /* added 2.1.0. also fix lustre_swab_connect */
1369 __u64 paddingD; /* added 2.1.0. also fix lustre_swab_connect */
1370 __u64 paddingE; /* added 2.1.0. also fix lustre_swab_connect */
1371 __u64 paddingF; /* added 2.1.0. also fix lustre_swab_connect */
1375 * Please DO NOT use any fields here before first ensuring that this same
1376 * field is not in use on some other branch. Please clear any such changes
1377 * with senior engineers before starting to use a new field. Then, submit
1378 * a small patch against EVERY branch that ONLY adds the new field along with
1379 * the matching OBD_CONNECT flag, so that can be approved and landed easily to
1380 * reserve the flag for future use.
1383 void lustre_swab_connect(struct obd_connect_data *ocd);
1386 * Supported checksum algorithms. Up to 32 checksum types are supported.
1387 * (32-bit mask stored in obd_connect_data::ocd_cksum_types)
1388 * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new
1389 * algorithm and also the OBD_FL_CKSUM* flags.
1392 OBD_CKSUM_CRC32 = 0x00000001,
1393 OBD_CKSUM_ADLER = 0x00000002,
1394 OBD_CKSUM_CRC32C = 0x00000004,
1398 * OST requests: OBDO & OBD request records
1403 OST_REPLY = 0, /* reply ? */
1419 OST_QUOTACHECK = 18,
1421 OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
1424 #define OST_FIRST_OPC OST_REPLY
1427 OBD_FL_INLINEDATA = 0x00000001,
1428 OBD_FL_OBDMDEXISTS = 0x00000002,
1429 OBD_FL_DELORPHAN = 0x00000004, /* if set in o_flags delete orphans */
1430 OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */
1431 OBD_FL_IDONLY = 0x00000010, /* set in o_flags only adjust obj id*/
1432 OBD_FL_RECREATE_OBJS = 0x00000020, /* recreate missing obj */
1433 OBD_FL_DEBUG_CHECK = 0x00000040, /* echo client/server debug check */
1434 OBD_FL_NO_USRQUOTA = 0x00000100, /* the object's owner is over quota */
1435 OBD_FL_NO_GRPQUOTA = 0x00000200, /* the object's group is over quota */
1436 OBD_FL_CREATE_CROW = 0x00000400, /* object should be create on write */
1437 OBD_FL_SRVLOCK = 0x00000800, /* delegate DLM locking to server */
1438 OBD_FL_CKSUM_CRC32 = 0x00001000, /* CRC32 checksum type */
1439 OBD_FL_CKSUM_ADLER = 0x00002000, /* ADLER checksum type */
1440 OBD_FL_CKSUM_CRC32C = 0x00004000, /* CRC32C checksum type */
1441 OBD_FL_CKSUM_RSVD2 = 0x00008000, /* for future cksum types */
1442 OBD_FL_CKSUM_RSVD3 = 0x00010000, /* for future cksum types */
1443 OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */
1444 OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client.
1445 * XXX: obsoleted - reserved for old
1446 * clients prior than 2.2
1448 OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
1449 OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */
1450 OBD_FL_FLUSH = 0x00200000, /* flush pages on the OST */
1451 OBD_FL_SHORT_IO = 0x00400000, /* short io request */
1453 /* Note that while these checksum values are currently separate bits,
1454 * in 2.x we can actually allow all values from 1-31 if we wanted.
1456 OBD_FL_CKSUM_ALL = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER |
1457 OBD_FL_CKSUM_CRC32C,
1459 /* mask for local-only flag, which won't be sent over network */
1460 OBD_FL_LOCAL_MASK = 0xF0000000,
1464 * All LOV EA magics should have the same postfix, if some new version
1465 * Lustre instroduces new LOV EA magic, then when down-grade to an old
1466 * Lustre, even though the old version system does not recognizes such
1467 * new magic, it still can distinguish the corrupted cases by checking
1468 * the magic's postfix.
1470 #define LOV_MAGIC_MAGIC 0x0BD0
1471 #define LOV_MAGIC_MASK 0xFFFF
1473 #define LOV_MAGIC_V1 (0x0BD10000 | LOV_MAGIC_MAGIC)
1474 #define LOV_MAGIC_JOIN_V1 (0x0BD20000 | LOV_MAGIC_MAGIC)
1475 #define LOV_MAGIC_V3 (0x0BD30000 | LOV_MAGIC_MAGIC)
1476 #define LOV_MAGIC_MIGRATE (0x0BD40000 | LOV_MAGIC_MAGIC)
1477 /* reserved for specifying OSTs */
1478 #define LOV_MAGIC_SPECIFIC (0x0BD50000 | LOV_MAGIC_MAGIC)
1479 #define LOV_MAGIC LOV_MAGIC_V1
1482 * magic for fully defined striping
1483 * the idea is that we should have different magics for striping "hints"
1484 * (struct lov_user_md_v[13]) and defined ready-to-use striping (struct
1485 * lov_mds_md_v[13]). at the moment the magics are used in wire protocol,
1486 * we can't just change it w/o long way preparation, but we still need a
1487 * mechanism to allow LOD to differentiate hint versus ready striping.
1488 * so, at the moment we do a trick: MDT knows what to expect from request
1489 * depending on the case (replay uses ready striping, non-replay req uses
1490 * hints), so MDT replaces magic with appropriate one and now LOD can
1491 * easily understand what's inside -bzzz
1493 #define LOV_MAGIC_V1_DEF 0x0CD10BD0
1494 #define LOV_MAGIC_V3_DEF 0x0CD30BD0
1496 #define lov_pattern(pattern) (pattern & ~LOV_PATTERN_F_MASK)
1497 #define lov_pattern_flags(pattern) (pattern & LOV_PATTERN_F_MASK)
1499 #define lov_ost_data lov_ost_data_v1
1500 struct lov_ost_data_v1 { /* per-stripe data structure (little-endian)*/
1501 struct ost_id l_ost_oi; /* OST object ID */
1502 __u32 l_ost_gen; /* generation of this l_ost_idx */
1503 __u32 l_ost_idx; /* OST index in LOV (lov_tgt_desc->tgts) */
1506 #define lov_mds_md lov_mds_md_v1
1507 struct lov_mds_md_v1 { /* LOV EA mds/wire data (little-endian) */
1508 __u32 lmm_magic; /* magic number = LOV_MAGIC_V1 */
1509 __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
1510 struct ost_id lmm_oi; /* LOV object ID */
1511 __u32 lmm_stripe_size; /* size of stripe in bytes */
1512 /* lmm_stripe_count used to be __u32 */
1513 __u16 lmm_stripe_count; /* num stripes in use for this object */
1514 __u16 lmm_layout_gen; /* layout generation number */
1515 struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
1519 * Sigh, because pre-2.4 uses
1520 * struct lov_mds_md_v1 {
1522 * __u64 lmm_object_id;
1523 * __u64 lmm_object_seq;
1526 * to identify the LOV(MDT) object, and lmm_object_seq will
1527 * be normal_fid, which make it hard to combine these conversion
1528 * to ostid_to FID. so we will do lmm_oi/fid conversion separately
1530 * We can tell the lmm_oi by this way,
1531 * 1.8: lmm_object_id = {inode}, lmm_object_gr = 0
1532 * 2.1: lmm_object_id = {oid < 128k}, lmm_object_seq = FID_SEQ_NORMAL
1533 * 2.4: lmm_oi.f_seq = FID_SEQ_NORMAL, lmm_oi.f_oid = {oid < 128k},
1536 * But currently lmm_oi/lsm_oi does not have any "real" usages,
1537 * except for printing some information, and the user can always
1538 * get the real FID from LMA, besides this multiple case check might
1539 * make swab more complicate. So we will keep using id/seq for lmm_oi.
1542 static inline void fid_to_lmm_oi(const struct lu_fid *fid,
1545 oi->oi.oi_id = fid_oid(fid);
1546 oi->oi.oi_seq = fid_seq(fid);
1549 static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq)
1551 oi->oi.oi_seq = seq;
1554 static inline void lmm_oi_set_id(struct ost_id *oi, __u64 oid)
1559 static inline __u64 lmm_oi_id(const struct ost_id *oi)
1561 return oi->oi.oi_id;
1564 static inline __u64 lmm_oi_seq(const struct ost_id *oi)
1566 return oi->oi.oi_seq;
1569 static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi,
1570 const struct ost_id *src_oi)
1572 dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
1573 dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
1576 static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
1577 const struct ost_id *src_oi)
1579 dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
1580 dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
1583 /* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */
1585 #define MAX_MD_SIZE \
1586 (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
1587 #define MIN_MD_SIZE \
1588 (sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data))
1590 #define XATTR_NAME_ACL_ACCESS "system.posix_acl_access"
1591 #define XATTR_NAME_ACL_DEFAULT "system.posix_acl_default"
1592 #define XATTR_USER_PREFIX "user."
1593 #define XATTR_TRUSTED_PREFIX "trusted."
1594 #define XATTR_SECURITY_PREFIX "security."
1595 #define XATTR_LUSTRE_PREFIX "lustre."
1597 #define XATTR_NAME_LOV "trusted.lov"
1598 #define XATTR_NAME_LMA "trusted.lma"
1599 #define XATTR_NAME_LMV "trusted.lmv"
1600 #define XATTR_NAME_DEFAULT_LMV "trusted.dmv"
1601 #define XATTR_NAME_LINK "trusted.link"
1602 #define XATTR_NAME_FID "trusted.fid"
1603 #define XATTR_NAME_VERSION "trusted.version"
1604 #define XATTR_NAME_SOM "trusted.som"
1605 #define XATTR_NAME_HSM "trusted.hsm"
1606 #define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_namespace"
1608 struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */
1609 __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */
1610 __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
1611 struct ost_id lmm_oi; /* LOV object ID */
1612 __u32 lmm_stripe_size; /* size of stripe in bytes */
1613 /* lmm_stripe_count used to be __u32 */
1614 __u16 lmm_stripe_count; /* num stripes in use for this object */
1615 __u16 lmm_layout_gen; /* layout generation number */
1616 char lmm_pool_name[LOV_MAXPOOLNAME + 1]; /* must be 32bit aligned */
1617 struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
1620 static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
1622 if (lmm_magic == LOV_MAGIC_V3)
1623 return sizeof(struct lov_mds_md_v3) +
1624 stripes * sizeof(struct lov_ost_data_v1);
1626 return sizeof(struct lov_mds_md_v1) +
1627 stripes * sizeof(struct lov_ost_data_v1);
1631 lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
1633 switch (lmm_magic) {
1634 case LOV_MAGIC_V1: {
1635 struct lov_mds_md_v1 lmm;
1637 if (buf_size < sizeof(lmm))
1640 return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
1642 case LOV_MAGIC_V3: {
1643 struct lov_mds_md_v3 lmm;
1645 if (buf_size < sizeof(lmm))
1648 return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
1655 #define OBD_MD_FLID (0x00000001ULL) /* object ID */
1656 #define OBD_MD_FLATIME (0x00000002ULL) /* access time */
1657 #define OBD_MD_FLMTIME (0x00000004ULL) /* data modification time */
1658 #define OBD_MD_FLCTIME (0x00000008ULL) /* change time */
1659 #define OBD_MD_FLSIZE (0x00000010ULL) /* size */
1660 #define OBD_MD_FLBLOCKS (0x00000020ULL) /* allocated blocks count */
1661 #define OBD_MD_FLBLKSZ (0x00000040ULL) /* block size */
1662 #define OBD_MD_FLMODE (0x00000080ULL) /* access bits (mode & ~S_IFMT) */
1663 #define OBD_MD_FLTYPE (0x00000100ULL) /* object type (mode & S_IFMT) */
1664 #define OBD_MD_FLUID (0x00000200ULL) /* user ID */
1665 #define OBD_MD_FLGID (0x00000400ULL) /* group ID */
1666 #define OBD_MD_FLFLAGS (0x00000800ULL) /* flags word */
1667 #define OBD_MD_FLNLINK (0x00002000ULL) /* link count */
1668 #define OBD_MD_FLGENER (0x00004000ULL) /* generation number */
1669 /*#define OBD_MD_FLINLINE (0x00008000ULL) inline data. used until 1.6.5 */
1670 #define OBD_MD_FLRDEV (0x00010000ULL) /* device number */
1671 #define OBD_MD_FLEASIZE (0x00020000ULL) /* extended attribute data */
1672 #define OBD_MD_LINKNAME (0x00040000ULL) /* symbolic link target */
1673 #define OBD_MD_FLHANDLE (0x00080000ULL) /* file/lock handle */
1674 #define OBD_MD_FLCKSUM (0x00100000ULL) /* bulk data checksum */
1675 #define OBD_MD_FLQOS (0x00200000ULL) /* quality of service stats */
1676 /*#define OBD_MD_FLOSCOPQ (0x00400000ULL) osc opaque data, never used */
1677 #define OBD_MD_FLCOOKIE (0x00800000ULL) /* log cancellation cookie */
1678 #define OBD_MD_FLGROUP (0x01000000ULL) /* group */
1679 #define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */
1680 #define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */
1681 /* ->mds if epoch opens or closes
1683 #define OBD_MD_FLGRANT (0x08000000ULL) /* ost preallocation space grant */
1684 #define OBD_MD_FLDIREA (0x10000000ULL) /* dir's extended attribute data */
1685 #define OBD_MD_FLUSRQUOTA (0x20000000ULL) /* over quota flags sent from ost */
1686 #define OBD_MD_FLGRPQUOTA (0x40000000ULL) /* over quota flags sent from ost */
1687 #define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */
1689 #define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */
1690 #define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */
1691 #define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */
1692 #define OBD_MD_TSTATE (0x0000000800000000ULL) /* transient state field */
1694 #define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */
1695 #define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */
1696 #define OBD_MD_FLXATTRRM (0x0000004000000000ULL) /* xattr remove */
1697 #define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */
1698 /* OBD_MD_FLRMTPERM (0x0000010000000000ULL) remote perm, obsolete */
1699 #define OBD_MD_FLMDSCAPA (0x0000020000000000ULL) /* MDS capability */
1700 #define OBD_MD_FLOSSCAPA (0x0000040000000000ULL) /* OSS capability */
1701 #define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */
1702 #define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */
1703 #define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
1704 * under lock; for xattr
1705 * requests means the
1706 * client holds the lock
1708 #define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */
1710 /* OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) lfs lsetfacl, obsolete */
1711 /* OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) lfs lgetfacl, obsolete */
1712 /* OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) lfs rsetfacl, obsolete */
1713 /* OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) lfs rgetfacl, obsolete */
1715 #define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
1716 #define OBD_MD_FLRELEASED (0x0020000000000000ULL) /* file released */
1718 #define OBD_MD_DEFAULT_MEA (0x0040000000000000ULL) /* default MEA */
1720 #define OBD_MD_FLGETATTR (OBD_MD_FLID | OBD_MD_FLATIME | OBD_MD_FLMTIME | \
1721 OBD_MD_FLCTIME | OBD_MD_FLSIZE | OBD_MD_FLBLKSZ | \
1722 OBD_MD_FLMODE | OBD_MD_FLTYPE | OBD_MD_FLUID | \
1723 OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \
1724 OBD_MD_FLGENER | OBD_MD_FLRDEV | OBD_MD_FLGROUP)
1726 #define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS)
1728 /* don't forget obdo_fid which is way down at the bottom so it can
1729 * come after the definition of llog_cookie
1734 HSS_CLEARMASK = 0x02,
1735 HSS_ARCHIVE_ID = 0x04,
1738 struct hsm_state_set {
1740 __u32 hss_archive_id;
1742 __u64 hss_clearmask;
1745 void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
1746 void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
1748 void lustre_swab_obd_statfs(struct obd_statfs *os);
1750 /* ost_body.data values for OST_BRW */
1752 #define OBD_BRW_READ 0x01
1753 #define OBD_BRW_WRITE 0x02
1754 #define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE)
1755 #define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous
1756 * transfer and is not accounted in
1759 #define OBD_BRW_CHECK 0x10
1760 #define OBD_BRW_FROM_GRANT 0x20 /* the osc manages this under llite */
1761 #define OBD_BRW_GRANTED 0x40 /* the ost manages this */
1762 #define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */
1763 #define OBD_BRW_NOQUOTA 0x100
1764 #define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */
1765 #define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */
1766 #define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */
1767 #define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
1768 #define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */
1769 #define OBD_BRW_SOFT_SYNC 0x4000 /* This flag notifies the server
1770 * that the client is running low on
1771 * space for unstable pages; asking
1772 * it to sync quickly
1775 #define OBD_OBJECT_EOF LUSTRE_EOF
1777 #define OST_MIN_PRECREATE 32
1778 #define OST_MAX_PRECREATE 20000
1781 struct ost_id ioo_oid; /* object ID, if multi-obj BRW */
1782 __u32 ioo_max_brw; /* low 16 bits were o_mode before 2.4,
1783 * now (PTLRPC_BULK_OPS_COUNT - 1) in
1784 * high 16 bits in 2.4 and later
1786 __u32 ioo_bufcnt; /* number of niobufs for this object */
1789 #define IOOBJ_MAX_BRW_BITS 16
1790 #define IOOBJ_TYPE_MASK ((1U << IOOBJ_MAX_BRW_BITS) - 1)
1791 #define ioobj_max_brw_get(ioo) (((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1)
1792 #define ioobj_max_brw_set(ioo, num) \
1793 do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)
1795 void lustre_swab_obd_ioobj(struct obd_ioobj *ioo);
1797 /* multiple of 8 bytes => can array */
1798 struct niobuf_remote {
1804 void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
1806 /* lock value block communicated between the filter and llite */
1808 /* OST_LVB_ERR_INIT is needed because the return code in rc is
1809 * negative, i.e. because ((MASK + rc) & MASK) != MASK.
1811 #define OST_LVB_ERR_INIT 0xffbadbad80000000ULL
1812 #define OST_LVB_ERR_MASK 0xffbadbad00000000ULL
1813 #define OST_LVB_IS_ERR(blocks) \
1814 ((blocks & OST_LVB_ERR_MASK) == OST_LVB_ERR_MASK)
1815 #define OST_LVB_SET_ERR(blocks, rc) \
1816 do { blocks = OST_LVB_ERR_INIT + rc; } while (0)
1817 #define OST_LVB_GET_ERR(blocks) (int)(blocks - OST_LVB_ERR_INIT)
1827 void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
1841 void lustre_swab_ost_lvb(struct ost_lvb *lvb);
1844 * lquota data structures
1847 /* The lquota_id structure is an union of all the possible identifier types that
1848 * can be used with quota, this includes:
1851 * - a FID which can be used for per-directory quota in the future
1854 struct lu_fid qid_fid; /* FID for per-directory quota */
1855 __u64 qid_uid; /* user identifier */
1856 __u64 qid_gid; /* group identifier */
1859 /* quotactl management */
1860 struct obd_quotactl {
1862 __u32 qc_type; /* see Q_* flag below */
1865 struct obd_dqinfo qc_dqinfo;
1866 struct obd_dqblk qc_dqblk;
1869 void lustre_swab_obd_quotactl(struct obd_quotactl *q);
1871 #define Q_COPY(out, in, member) (out)->member = (in)->member
1873 #define QCTL_COPY(out, in) \
1875 Q_COPY(out, in, qc_cmd); \
1876 Q_COPY(out, in, qc_type); \
1877 Q_COPY(out, in, qc_id); \
1878 Q_COPY(out, in, qc_stat); \
1879 Q_COPY(out, in, qc_dqinfo); \
1880 Q_COPY(out, in, qc_dqblk); \
1883 /* Data structures associated with the quota locks */
1885 /* Glimpse descriptor used for the index & per-ID quota locks */
1886 struct ldlm_gl_lquota_desc {
1887 union lquota_id gl_id; /* quota ID subject to the glimpse */
1888 __u64 gl_flags; /* see LQUOTA_FL* below */
1889 __u64 gl_ver; /* new index version */
1890 __u64 gl_hardlimit; /* new hardlimit or qunit value */
1891 __u64 gl_softlimit; /* new softlimit */
1896 /* quota glimpse flags */
1897 #define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */
1899 /* LVB used with quota (global and per-ID) locks */
1901 __u64 lvb_flags; /* see LQUOTA_FL* above */
1902 __u64 lvb_id_may_rel; /* space that might be released later */
1903 __u64 lvb_id_rel; /* space released by the slave for this ID */
1904 __u64 lvb_id_qunit; /* current qunit value */
1908 void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
1916 #define QUOTA_FIRST_OPC QUOTA_DQACQ
1925 MDS_GETATTR_NAME = 34,
1930 MDS_DISCONNECT = 39,
1933 MDS_PIN = 42, /* obsolete, never used in a release */
1934 MDS_UNPIN = 43, /* obsolete, never used in a release */
1936 MDS_DONE_WRITING = 45,
1938 MDS_QUOTACHECK = 47,
1941 MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */
1943 MDS_IS_SUBDIR = 52, /* obsolete, never used in a release */
1945 MDS_HSM_STATE_GET = 54,
1946 MDS_HSM_STATE_SET = 55,
1947 MDS_HSM_ACTION = 56,
1948 MDS_HSM_PROGRESS = 57,
1949 MDS_HSM_REQUEST = 58,
1950 MDS_HSM_CT_REGISTER = 59,
1951 MDS_HSM_CT_UNREGISTER = 60,
1952 MDS_SWAP_LAYOUTS = 61,
1956 #define MDS_FIRST_OPC MDS_GETATTR
1962 enum mdt_reint_cmd {
1975 void lustre_swab_generic_32s(__u32 *val);
1977 /* the disposition of the intent outlines what was executed */
1978 #define DISP_IT_EXECD 0x00000001
1979 #define DISP_LOOKUP_EXECD 0x00000002
1980 #define DISP_LOOKUP_NEG 0x00000004
1981 #define DISP_LOOKUP_POS 0x00000008
1982 #define DISP_OPEN_CREATE 0x00000010
1983 #define DISP_OPEN_OPEN 0x00000020
1984 #define DISP_ENQ_COMPLETE 0x00400000 /* obsolete and unused */
1985 #define DISP_ENQ_OPEN_REF 0x00800000
1986 #define DISP_ENQ_CREATE_REF 0x01000000
1987 #define DISP_OPEN_LOCK 0x02000000
1988 #define DISP_OPEN_LEASE 0x04000000
1989 #define DISP_OPEN_STRIPE 0x08000000
1990 #define DISP_OPEN_DENY 0x10000000
1992 /* INODE LOCK PARTS */
1993 #define MDS_INODELOCK_LOOKUP 0x000001 /* For namespace, dentry etc, and also
1994 * was used to protect permission (mode,
1995 * owner, group etc) before 2.4.
1997 #define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */
1998 #define MDS_INODELOCK_OPEN 0x000004 /* For opened files */
1999 #define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */
2001 /* The PERM bit is added int 2.4, and it is used to protect permission(mode,
2002 * owner, group, acl etc), so to separate the permission from LOOKUP lock.
2003 * Because for remote directories(in DNE), these locks will be granted by
2004 * different MDTs(different ldlm namespace).
2006 * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together.
2007 * For Remote directory, the master MDT, where the remote directory is, will
2008 * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is,
2009 * will grant LOOKUP_LOCK.
2011 #define MDS_INODELOCK_PERM 0x000010
2012 #define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */
2014 #define MDS_INODELOCK_MAXSHIFT 5
2015 /* This FULL lock is useful to take on unlink sort of operations */
2016 #define MDS_INODELOCK_FULL ((1 << (MDS_INODELOCK_MAXSHIFT + 1)) - 1)
2018 /* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
2019 * but was moved into name[1] along with the OID to avoid consuming the
2020 * name[2,3] fields that need to be used for the quota id (also a FID).
2023 LUSTRE_RES_ID_SEQ_OFF = 0,
2024 LUSTRE_RES_ID_VER_OID_OFF = 1,
2025 LUSTRE_RES_ID_WAS_VER_OFF = 2, /* see note above */
2026 LUSTRE_RES_ID_QUOTA_SEQ_OFF = 2,
2027 LUSTRE_RES_ID_QUOTA_VER_OID_OFF = 3,
2028 LUSTRE_RES_ID_HSH_OFF = 3
2031 #define MDS_STATUS_CONN 1
2032 #define MDS_STATUS_LOV 2
2034 /* mdt_thread_info.mti_flags. */
2036 /* The flag indicates Size-on-MDS attributes are changed. */
2037 MF_SOM_CHANGE = (1 << 0),
2038 /* Flags indicates an epoch opens or closes. */
2039 MF_EPOCH_OPEN = (1 << 1),
2040 MF_EPOCH_CLOSE = (1 << 2),
2041 MF_MDC_CANCEL_FID1 = (1 << 3),
2042 MF_MDC_CANCEL_FID2 = (1 << 4),
2043 MF_MDC_CANCEL_FID3 = (1 << 5),
2044 MF_MDC_CANCEL_FID4 = (1 << 6),
2045 /* There is a pending attribute update. */
2046 MF_SOM_AU = (1 << 7),
2047 /* Cancel OST locks while getattr OST attributes. */
2048 MF_GETATTR_LOCK = (1 << 8),
2049 MF_GET_MDT_IDX = (1 << 9),
2052 #define MF_SOM_LOCAL_FLAGS (MF_SOM_CHANGE | MF_EPOCH_OPEN | MF_EPOCH_CLOSE)
2054 #define LUSTRE_BFLAG_UNCOMMITTED_WRITES 0x1
2056 /* these should be identical to their EXT4_*_FL counterparts, they are
2057 * redefined here only to avoid dragging in fs/ext4/ext4.h
2059 #define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */
2060 #define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */
2061 #define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */
2062 #define LUSTRE_NOATIME_FL 0x00000080 /* do not update atime */
2063 #define LUSTRE_DIRSYNC_FL 0x00010000 /* dirsync behaviour (dir only) */
2065 /* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values
2066 * for the client inode i_flags. The LUSTRE_*_FL are the Lustre wire
2067 * protocol equivalents of LDISKFS_*_FL values stored on disk, while
2068 * the S_* flags are kernel-internal values that change between kernel
2069 * versions. These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS.
2070 * See b=16526 for a full history.
2072 static inline int ll_ext_to_inode_flags(int flags)
2074 return (((flags & LUSTRE_SYNC_FL) ? S_SYNC : 0) |
2075 ((flags & LUSTRE_NOATIME_FL) ? S_NOATIME : 0) |
2076 ((flags & LUSTRE_APPEND_FL) ? S_APPEND : 0) |
2077 ((flags & LUSTRE_DIRSYNC_FL) ? S_DIRSYNC : 0) |
2078 ((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0));
2081 static inline int ll_inode_to_ext_flags(int iflags)
2083 return (((iflags & S_SYNC) ? LUSTRE_SYNC_FL : 0) |
2084 ((iflags & S_NOATIME) ? LUSTRE_NOATIME_FL : 0) |
2085 ((iflags & S_APPEND) ? LUSTRE_APPEND_FL : 0) |
2086 ((iflags & S_DIRSYNC) ? LUSTRE_DIRSYNC_FL : 0) |
2087 ((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0));
2090 /* 64 possible states */
2091 enum md_transient_state {
2092 MS_RESTORE = (1 << 0), /* restore is running */
2096 struct lu_fid mbo_fid1;
2097 struct lu_fid mbo_fid2;
2098 struct lustre_handle mbo_handle;
2100 __u64 mbo_size; /* Offset, in the case of MDS_READPAGE */
2104 __u64 mbo_blocks; /* XID, in the case of MDS_READPAGE */
2106 __u64 mbo_t_state; /* transient file state defined in
2107 * enum md_transient_state
2108 * was "ino" until 2.4.0
2112 __u32 mbo_capability;
2118 __u32 mbo_nlink; /* #bytes to read in the case of MDS_READPAGE */
2119 __u32 mbo_unused2; /* was "generation" until 2.4.0 */
2121 __u32 mbo_eadatasize;
2123 __u32 mbo_max_mdsize;
2124 __u32 mbo_max_cookiesize;
2125 __u32 mbo_uid_h; /* high 32-bits of uid, for FUID */
2126 __u32 mbo_gid_h; /* high 32-bits of gid, for FUID */
2127 __u32 mbo_padding_5; /* also fix lustre_swab_mdt_body */
2128 __u64 mbo_padding_6;
2129 __u64 mbo_padding_7;
2130 __u64 mbo_padding_8;
2131 __u64 mbo_padding_9;
2132 __u64 mbo_padding_10;
2135 void lustre_swab_mdt_body(struct mdt_body *b);
2137 struct mdt_ioepoch {
2138 struct lustre_handle handle;
2144 void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b);
2146 /* permissions for md_perm.mp_perm */
2148 CFS_SETUID_PERM = 0x01,
2149 CFS_SETGID_PERM = 0x02,
2150 CFS_SETGRP_PERM = 0x04,
2153 struct mdt_rec_setattr {
2163 __u32 sa_padding_1_h;
2164 struct lu_fid sa_fid;
2173 __u32 sa_attr_flags;
2175 __u32 sa_bias; /* some operation flags */
2181 void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
2184 * Attribute flags used in mdt_rec_setattr::sa_valid.
2185 * The kernel's #defines for ATTR_* should not be used over the network
2186 * since the client and MDS may run different kernels (see bug 13828)
2187 * Therefore, we should only use MDS_ATTR_* attributes for sa_valid.
2189 #define MDS_ATTR_MODE 0x1ULL /* = 1 */
2190 #define MDS_ATTR_UID 0x2ULL /* = 2 */
2191 #define MDS_ATTR_GID 0x4ULL /* = 4 */
2192 #define MDS_ATTR_SIZE 0x8ULL /* = 8 */
2193 #define MDS_ATTR_ATIME 0x10ULL /* = 16 */
2194 #define MDS_ATTR_MTIME 0x20ULL /* = 32 */
2195 #define MDS_ATTR_CTIME 0x40ULL /* = 64 */
2196 #define MDS_ATTR_ATIME_SET 0x80ULL /* = 128 */
2197 #define MDS_ATTR_MTIME_SET 0x100ULL /* = 256 */
2198 #define MDS_ATTR_FORCE 0x200ULL /* = 512, Not a change, but a change it */
2199 #define MDS_ATTR_ATTR_FLAG 0x400ULL /* = 1024 */
2200 #define MDS_ATTR_KILL_SUID 0x800ULL /* = 2048 */
2201 #define MDS_ATTR_KILL_SGID 0x1000ULL /* = 4096 */
2202 #define MDS_ATTR_CTIME_SET 0x2000ULL /* = 8192 */
2203 #define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path,
2206 #define MDS_ATTR_BLOCKS 0x8000ULL /* = 32768 */
2208 #define MDS_FMODE_CLOSED 00000000
2209 #define MDS_FMODE_EXEC 00000004
2210 /* IO Epoch is opened on a closed file. */
2211 #define MDS_FMODE_EPOCH 01000000
2212 /* IO Epoch is opened on a file truncate. */
2213 #define MDS_FMODE_TRUNC 02000000
2214 /* Size-on-MDS Attribute Update is pending. */
2215 #define MDS_FMODE_SOM 04000000
2217 #define MDS_OPEN_CREATED 00000010
2218 #define MDS_OPEN_CROSS 00000020
2220 #define MDS_OPEN_CREAT 00000100
2221 #define MDS_OPEN_EXCL 00000200
2222 #define MDS_OPEN_TRUNC 00001000
2223 #define MDS_OPEN_APPEND 00002000
2224 #define MDS_OPEN_SYNC 00010000
2225 #define MDS_OPEN_DIRECTORY 00200000
2227 #define MDS_OPEN_BY_FID 040000000 /* open_by_fid for known object */
2228 #define MDS_OPEN_DELAY_CREATE 0100000000 /* delay initial object create */
2229 #define MDS_OPEN_OWNEROVERRIDE 0200000000 /* NFSD rw-reopen ro file for owner */
2230 #define MDS_OPEN_JOIN_FILE 0400000000 /* open for join file.
2231 * We do not support JOIN FILE
2232 * anymore, reserve this flags
2233 * just for preventing such bit
2237 #define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */
2238 #define MDS_OPEN_HAS_EA 010000000000 /* specify object create pattern */
2239 #define MDS_OPEN_HAS_OBJS 020000000000 /* Just set the EA the obj exist */
2240 #define MDS_OPEN_NORESTORE 0100000000000ULL /* Do not restore file at open */
2241 #define MDS_OPEN_NEWSTRIPE 0200000000000ULL /* New stripe needed (restripe or
2243 #define MDS_OPEN_VOLATILE 0400000000000ULL /* File is volatile = created
2245 #define MDS_OPEN_LEASE 01000000000000ULL /* Open the file and grant lease
2246 * delegation, succeed if it's not
2247 * being opened with conflict mode.
2249 #define MDS_OPEN_RELEASE 02000000000000ULL /* Open the file for HSM release */
2251 #define MDS_OPEN_FL_INTERNAL (MDS_OPEN_HAS_EA | MDS_OPEN_HAS_OBJS | \
2252 MDS_OPEN_OWNEROVERRIDE | MDS_OPEN_LOCK | \
2253 MDS_OPEN_BY_FID | MDS_OPEN_LEASE | \
2257 MDS_CHECK_SPLIT = 1 << 0,
2258 MDS_CROSS_REF = 1 << 1,
2259 MDS_VTX_BYPASS = 1 << 2,
2260 MDS_PERM_BYPASS = 1 << 3,
2262 MDS_QUOTA_IGNORE = 1 << 5,
2263 MDS_CLOSE_CLEANUP = 1 << 6,
2264 MDS_KEEP_ORPHAN = 1 << 7,
2265 MDS_RECOV_OPEN = 1 << 8,
2266 MDS_DATA_MODIFIED = 1 << 9,
2267 MDS_CREATE_VOLATILE = 1 << 10,
2268 MDS_OWNEROVERRIDE = 1 << 11,
2269 MDS_HSM_RELEASE = 1 << 12,
2270 MDS_RENAME_MIGRATE = BIT(13),
2273 /* instance of mdt_reint_rec */
2274 struct mdt_rec_create {
2282 __u32 cr_suppgid1_h;
2284 __u32 cr_suppgid2_h;
2285 struct lu_fid cr_fid1;
2286 struct lu_fid cr_fid2;
2287 struct lustre_handle cr_old_handle; /* handle in case of open replay */
2291 __u64 cr_padding_1; /* rr_blocks */
2294 /* use of helpers set/get_mrc_cr_flags() is needed to access
2295 * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to
2296 * extend cr_flags size without breaking 1.8 compat
2298 __u32 cr_flags_l; /* for use with open, low 32 bits */
2299 __u32 cr_flags_h; /* for use with open, high 32 bits */
2300 __u32 cr_umask; /* umask for create */
2301 __u32 cr_padding_4; /* rr_padding_4 */
2304 static inline void set_mrc_cr_flags(struct mdt_rec_create *mrc, __u64 flags)
2306 mrc->cr_flags_l = (__u32)(flags & 0xFFFFFFFFUll);
2307 mrc->cr_flags_h = (__u32)(flags >> 32);
2310 static inline __u64 get_mrc_cr_flags(struct mdt_rec_create *mrc)
2312 return ((__u64)(mrc->cr_flags_l) | ((__u64)mrc->cr_flags_h << 32));
2315 /* instance of mdt_reint_rec */
2316 struct mdt_rec_link {
2324 __u32 lk_suppgid1_h;
2326 __u32 lk_suppgid2_h;
2327 struct lu_fid lk_fid1;
2328 struct lu_fid lk_fid2;
2330 __u64 lk_padding_1; /* rr_atime */
2331 __u64 lk_padding_2; /* rr_ctime */
2332 __u64 lk_padding_3; /* rr_size */
2333 __u64 lk_padding_4; /* rr_blocks */
2335 __u32 lk_padding_5; /* rr_mode */
2336 __u32 lk_padding_6; /* rr_flags */
2337 __u32 lk_padding_7; /* rr_padding_2 */
2338 __u32 lk_padding_8; /* rr_padding_3 */
2339 __u32 lk_padding_9; /* rr_padding_4 */
2342 /* instance of mdt_reint_rec */
2343 struct mdt_rec_unlink {
2351 __u32 ul_suppgid1_h;
2353 __u32 ul_suppgid2_h;
2354 struct lu_fid ul_fid1;
2355 struct lu_fid ul_fid2;
2357 __u64 ul_padding_2; /* rr_atime */
2358 __u64 ul_padding_3; /* rr_ctime */
2359 __u64 ul_padding_4; /* rr_size */
2360 __u64 ul_padding_5; /* rr_blocks */
2363 __u32 ul_padding_6; /* rr_flags */
2364 __u32 ul_padding_7; /* rr_padding_2 */
2365 __u32 ul_padding_8; /* rr_padding_3 */
2366 __u32 ul_padding_9; /* rr_padding_4 */
2369 /* instance of mdt_reint_rec */
2370 struct mdt_rec_rename {
2378 __u32 rn_suppgid1_h;
2380 __u32 rn_suppgid2_h;
2381 struct lu_fid rn_fid1;
2382 struct lu_fid rn_fid2;
2384 __u64 rn_padding_1; /* rr_atime */
2385 __u64 rn_padding_2; /* rr_ctime */
2386 __u64 rn_padding_3; /* rr_size */
2387 __u64 rn_padding_4; /* rr_blocks */
2388 __u32 rn_bias; /* some operation flags */
2389 __u32 rn_mode; /* cross-ref rename has mode */
2390 __u32 rn_padding_5; /* rr_flags */
2391 __u32 rn_padding_6; /* rr_padding_2 */
2392 __u32 rn_padding_7; /* rr_padding_3 */
2393 __u32 rn_padding_8; /* rr_padding_4 */
2396 /* instance of mdt_reint_rec */
2397 struct mdt_rec_setxattr {
2405 __u32 sx_suppgid1_h;
2407 __u32 sx_suppgid2_h;
2408 struct lu_fid sx_fid;
2409 __u64 sx_padding_1; /* These three are rr_fid2 */
2414 __u64 sx_padding_5; /* rr_ctime */
2415 __u64 sx_padding_6; /* rr_size */
2416 __u64 sx_padding_7; /* rr_blocks */
2419 __u32 sx_padding_8; /* rr_flags */
2420 __u32 sx_padding_9; /* rr_padding_2 */
2421 __u32 sx_padding_10; /* rr_padding_3 */
2422 __u32 sx_padding_11; /* rr_padding_4 */
2426 * mdt_rec_reint is the template for all mdt_reint_xxx structures.
2427 * Do NOT change the size of various members, otherwise the value
2428 * will be broken in lustre_swab_mdt_rec_reint().
2430 * If you add new members in other mdt_reint_xxx structures and need to use the
2431 * rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also.
2433 struct mdt_rec_reint {
2441 __u32 rr_suppgid1_h;
2443 __u32 rr_suppgid2_h;
2444 struct lu_fid rr_fid1;
2445 struct lu_fid rr_fid2;
2456 __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
2459 void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
2461 /* lmv structures */
2463 __u32 ld_tgt_count; /* how many MDS's */
2464 __u32 ld_active_tgt_count; /* how many active */
2465 __u32 ld_default_stripe_count; /* how many objects are used */
2466 __u32 ld_pattern; /* default hash pattern */
2467 __u64 ld_default_hash_size;
2468 __u64 ld_padding_1; /* also fix lustre_swab_lmv_desc */
2469 __u32 ld_padding_2; /* also fix lustre_swab_lmv_desc */
2470 __u32 ld_qos_maxage; /* in second */
2471 __u32 ld_padding_3; /* also fix lustre_swab_lmv_desc */
2472 __u32 ld_padding_4; /* also fix lustre_swab_lmv_desc */
2473 struct obd_uuid ld_uuid;
2476 /* LMV layout EA, and it will be stored both in master and slave object */
2477 struct lmv_mds_md_v1 {
2479 __u32 lmv_stripe_count;
2480 __u32 lmv_master_mdt_index; /* On master object, it is master
2481 * MDT index, on slave object, it
2482 * is stripe index of the slave obj
2484 __u32 lmv_hash_type; /* dir stripe policy, i.e. indicate
2485 * which hash function to be used,
2486 * Note: only lower 16 bits is being
2487 * used for now. Higher 16 bits will
2488 * be used to mark the object status,
2489 * for example migrating or dead.
2491 __u32 lmv_layout_version; /* Used for directory restriping */
2495 char lmv_pool_name[LOV_MAXPOOLNAME + 1];/* pool name */
2496 struct lu_fid lmv_stripe_fids[0]; /* FIDs for each stripe */
2499 #define LMV_MAGIC_V1 0x0CD20CD0 /* normal stripe lmv magic */
2500 #define LMV_MAGIC LMV_MAGIC_V1
2502 /* #define LMV_USER_MAGIC 0x0CD30CD0 */
2503 #define LMV_MAGIC_STRIPE 0x0CD40CD0 /* magic for dir sub_stripe */
2506 *Right now only the lower part(0-16bits) of lmv_hash_type is being used,
2507 * and the higher part will be the flag to indicate the status of object,
2508 * for example the object is being migrated. And the hash function
2509 * might be interpreted differently with different flags.
2511 #define LMV_HASH_TYPE_MASK 0x0000ffff
2513 #define LMV_HASH_FLAG_MIGRATION 0x80000000
2514 #define LMV_HASH_FLAG_DEAD 0x40000000
2517 * The FNV-1a hash algorithm is as follows:
2518 * hash = FNV_offset_basis
2519 * for each octet_of_data to be hashed
2520 * hash = hash XOR octet_of_data
2521 * hash = hash × FNV_prime
2523 * http://en.wikipedia.org/wiki/Fowler–Noll–Vo_hash_function#FNV-1a_hash
2525 * http://www.isthe.com/chongo/tech/comp/fnv/index.html#FNV-reference-source
2526 * FNV_prime is 2^40 + 2^8 + 0xb3 = 0x100000001b3ULL
2528 #define LUSTRE_FNV_1A_64_PRIME 0x100000001b3ULL
2529 #define LUSTRE_FNV_1A_64_OFFSET_BIAS 0xcbf29ce484222325ULL
2530 static inline __u64 lustre_hash_fnv_1a_64(const void *buf, size_t size)
2532 __u64 hash = LUSTRE_FNV_1A_64_OFFSET_BIAS;
2533 const unsigned char *p = buf;
2536 for (i = 0; i < size; i++) {
2538 hash *= LUSTRE_FNV_1A_64_PRIME;
2546 struct lmv_mds_md_v1 lmv_md_v1;
2547 struct lmv_user_md lmv_user_md;
2550 void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm);
2552 static inline ssize_t lmv_mds_md_size(int stripe_count, unsigned int lmm_magic)
2554 ssize_t len = -EINVAL;
2556 switch (lmm_magic) {
2557 case LMV_MAGIC_V1: {
2558 struct lmv_mds_md_v1 *lmm1;
2560 len = sizeof(*lmm1);
2561 len += stripe_count * sizeof(lmm1->lmv_stripe_fids[0]);
2569 static inline int lmv_mds_md_stripe_count_get(const union lmv_mds_md *lmm)
2571 switch (le32_to_cpu(lmm->lmv_magic)) {
2573 return le32_to_cpu(lmm->lmv_md_v1.lmv_stripe_count);
2574 case LMV_USER_MAGIC:
2575 return le32_to_cpu(lmm->lmv_user_md.lum_stripe_count);
2581 static inline int lmv_mds_md_stripe_count_set(union lmv_mds_md *lmm,
2582 unsigned int stripe_count)
2586 switch (le32_to_cpu(lmm->lmv_magic)) {
2588 lmm->lmv_md_v1.lmv_stripe_count = cpu_to_le32(stripe_count);
2590 case LMV_USER_MAGIC:
2591 lmm->lmv_user_md.lum_stripe_count = cpu_to_le32(stripe_count);
2604 FLD_FIRST_OPC = FLD_QUERY
2610 SEQ_FIRST_OPC = SEQ_QUERY
2614 SEQ_ALLOC_SUPER = 0,
2625 * LOV data structures
2628 #define LOV_MAX_UUID_BUFFER_SIZE 8192
2629 /* The size of the buffer the lov/mdc reserves for the
2630 * array of UUIDs returned by the MDS. With the current
2631 * protocol, this will limit the max number of OSTs per LOV
2634 #define LOV_DESC_MAGIC 0xB0CCDE5C
2635 #define LOV_DESC_QOS_MAXAGE_DEFAULT 5 /* Seconds */
2636 #define LOV_DESC_STRIPE_SIZE_DEFAULT (1 << LNET_MTU_BITS)
2638 /* LOV settings descriptor (should only contain static info) */
2640 __u32 ld_tgt_count; /* how many OBD's */
2641 __u32 ld_active_tgt_count; /* how many active */
2642 __u32 ld_default_stripe_count; /* how many objects are used */
2643 __u32 ld_pattern; /* default PATTERN_RAID0 */
2644 __u64 ld_default_stripe_size; /* in bytes */
2645 __u64 ld_default_stripe_offset; /* in bytes */
2646 __u32 ld_padding_0; /* unused */
2647 __u32 ld_qos_maxage; /* in second */
2648 __u32 ld_padding_1; /* also fix lustre_swab_lov_desc */
2649 __u32 ld_padding_2; /* also fix lustre_swab_lov_desc */
2650 struct obd_uuid ld_uuid;
2653 #define ld_magic ld_active_tgt_count /* for swabbing from llogs */
2655 void lustre_swab_lov_desc(struct lov_desc *ld);
2660 /* opcodes -- MUST be distinct from OST/MDS opcodes */
2665 LDLM_BL_CALLBACK = 104,
2666 LDLM_CP_CALLBACK = 105,
2667 LDLM_GL_CALLBACK = 106,
2668 LDLM_SET_INFO = 107,
2671 #define LDLM_FIRST_OPC LDLM_ENQUEUE
2673 #define RES_NAME_SIZE 4
2674 struct ldlm_res_id {
2675 __u64 name[RES_NAME_SIZE];
2678 #define DLDLMRES "[%#llx:%#llx:%#llx].%llx"
2679 #define PLDLMRES(res) (res)->lr_name.name[0], (res)->lr_name.name[1], \
2680 (res)->lr_name.name[2], (res)->lr_name.name[3]
2682 static inline bool ldlm_res_eq(const struct ldlm_res_id *res0,
2683 const struct ldlm_res_id *res1)
2685 return !memcmp(res0, res1, sizeof(*res0));
2702 #define LCK_MODE_NUM 8
2712 #define LDLM_MIN_TYPE LDLM_PLAIN
2714 struct ldlm_extent {
2720 static inline int ldlm_extent_overlap(const struct ldlm_extent *ex1,
2721 const struct ldlm_extent *ex2)
2723 return (ex1->start <= ex2->end) && (ex2->start <= ex1->end);
2726 /* check if @ex1 contains @ex2 */
2727 static inline int ldlm_extent_contain(const struct ldlm_extent *ex1,
2728 const struct ldlm_extent *ex2)
2730 return (ex1->start <= ex2->start) && (ex1->end >= ex2->end);
2733 struct ldlm_inodebits {
2737 struct ldlm_flock_wire {
2745 /* it's important that the fields of the ldlm_extent structure match
2746 * the first fields of the ldlm_flock structure because there is only
2747 * one ldlm_swab routine to process the ldlm_policy_data_t union. if
2748 * this ever changes we will need to swab the union differently based
2749 * on the resource type.
2753 struct ldlm_extent l_extent;
2754 struct ldlm_flock_wire l_flock;
2755 struct ldlm_inodebits l_inodebits;
2756 } ldlm_wire_policy_data_t;
2758 union ldlm_gl_desc {
2759 struct ldlm_gl_lquota_desc lquota_desc;
2762 void lustre_swab_gl_desc(union ldlm_gl_desc *);
2764 struct ldlm_intent {
2768 void lustre_swab_ldlm_intent(struct ldlm_intent *i);
2770 struct ldlm_resource_desc {
2771 enum ldlm_type lr_type;
2772 __u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */
2773 struct ldlm_res_id lr_name;
2776 struct ldlm_lock_desc {
2777 struct ldlm_resource_desc l_resource;
2778 enum ldlm_mode l_req_mode;
2779 enum ldlm_mode l_granted_mode;
2780 ldlm_wire_policy_data_t l_policy_data;
2783 #define LDLM_LOCKREQ_HANDLES 2
2784 #define LDLM_ENQUEUE_CANCEL_OFF 1
2786 struct ldlm_request {
2789 struct ldlm_lock_desc lock_desc;
2790 struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
2793 void lustre_swab_ldlm_request(struct ldlm_request *rq);
2795 /* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
2796 * Otherwise, 2 are available.
2798 #define ldlm_request_bufsize(count, type) \
2800 int _avail = LDLM_LOCKREQ_HANDLES; \
2801 _avail -= (type == LDLM_ENQUEUE ? LDLM_ENQUEUE_CANCEL_OFF : 0); \
2802 sizeof(struct ldlm_request) + \
2803 (count > _avail ? count - _avail : 0) * \
2804 sizeof(struct lustre_handle); \
2809 __u32 lock_padding; /* also fix lustre_swab_ldlm_reply */
2810 struct ldlm_lock_desc lock_desc;
2811 struct lustre_handle lock_handle;
2812 __u64 lock_policy_res1;
2813 __u64 lock_policy_res2;
2816 void lustre_swab_ldlm_reply(struct ldlm_reply *r);
2818 #define ldlm_flags_to_wire(flags) ((__u32)(flags))
2819 #define ldlm_flags_from_wire(flags) ((__u64)(flags))
2822 * Opcodes for mountconf (mgs and mgc)
2827 MGS_EXCEPTION, /* node died, etc. */
2828 MGS_TARGET_REG, /* whenever target starts up */
2834 #define MGS_FIRST_OPC MGS_CONNECT
2836 #define MGS_PARAM_MAXLEN 1024
2837 #define KEY_SET_INFO "set_info"
2839 struct mgs_send_param {
2840 char mgs_param[MGS_PARAM_MAXLEN];
2843 /* We pass this info to the MGS so it can write config logs */
2844 #define MTI_NAME_MAXLEN 64
2845 #define MTI_PARAM_MAXLEN 4096
2846 #define MTI_NIDS_MAX 32
2847 struct mgs_target_info {
2848 __u32 mti_lustre_ver;
2849 __u32 mti_stripe_index;
2850 __u32 mti_config_ver;
2852 __u32 mti_nid_count;
2853 __u32 mti_instance; /* Running instance of target */
2854 char mti_fsname[MTI_NAME_MAXLEN];
2855 char mti_svname[MTI_NAME_MAXLEN];
2856 char mti_uuid[sizeof(struct obd_uuid)];
2857 __u64 mti_nids[MTI_NIDS_MAX]; /* host nids (lnet_nid_t)*/
2858 char mti_params[MTI_PARAM_MAXLEN];
2861 void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
2863 struct mgs_nidtbl_entry {
2864 __u64 mne_version; /* table version of this entry */
2865 __u32 mne_instance; /* target instance # */
2866 __u32 mne_index; /* target index */
2867 __u32 mne_length; /* length of this entry - by bytes */
2868 __u8 mne_type; /* target type LDD_F_SV_TYPE_OST/MDT */
2869 __u8 mne_nid_type; /* type of nid(mbz). for ipv6. */
2870 __u8 mne_nid_size; /* size of each NID, by bytes */
2871 __u8 mne_nid_count; /* # of NIDs in buffer */
2873 lnet_nid_t nids[0]; /* variable size buffer for NIDs. */
2877 void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
2879 struct mgs_config_body {
2880 char mcb_name[MTI_NAME_MAXLEN]; /* logname */
2881 __u64 mcb_offset; /* next index of config log to request */
2882 __u16 mcb_type; /* type of log: CONFIG_T_[CONFIG|RECOVER] */
2884 __u8 mcb_bits; /* bits unit size of config log */
2885 __u32 mcb_units; /* # of units for bulk transfer */
2888 void lustre_swab_mgs_config_body(struct mgs_config_body *body);
2890 struct mgs_config_res {
2891 __u64 mcr_offset; /* index of last config log */
2892 __u64 mcr_size; /* size of the log */
2895 void lustre_swab_mgs_config_res(struct mgs_config_res *body);
2897 /* Config marker flags (in config log) */
2898 #define CM_START 0x01
2900 #define CM_SKIP 0x04
2901 #define CM_UPGRADE146 0x08
2902 #define CM_EXCLUDE 0x10
2903 #define CM_START_SKIP (CM_START | CM_SKIP)
2906 __u32 cm_step; /* aka config version */
2908 __u32 cm_vers; /* lustre release version number */
2909 __u32 cm_padding; /* 64 bit align */
2910 __s64 cm_createtime; /*when this record was first created */
2911 __s64 cm_canceltime; /*when this record is no longer valid*/
2912 char cm_tgtname[MTI_NAME_MAXLEN];
2913 char cm_comment[MTI_NAME_MAXLEN];
2916 void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size);
2919 * Opcodes for multiple servers.
2929 #define OBD_FIRST_OPC OBD_PING
2932 * llog contexts indices.
2934 * There is compatibility problem with indexes below, they are not
2935 * continuous and must keep their numbers for compatibility needs.
2936 * See LU-5218 for details.
2939 LLOG_CONFIG_ORIG_CTXT = 0,
2940 LLOG_CONFIG_REPL_CTXT = 1,
2941 LLOG_MDS_OST_ORIG_CTXT = 2,
2942 LLOG_MDS_OST_REPL_CTXT = 3, /* kept just to avoid re-assignment */
2943 LLOG_SIZE_ORIG_CTXT = 4,
2944 LLOG_SIZE_REPL_CTXT = 5,
2945 LLOG_TEST_ORIG_CTXT = 8,
2946 LLOG_TEST_REPL_CTXT = 9, /* kept just to avoid re-assignment */
2947 LLOG_CHANGELOG_ORIG_CTXT = 12, /**< changelog generation on mdd */
2948 LLOG_CHANGELOG_REPL_CTXT = 13, /**< changelog access on clients */
2949 /* for multiple changelog consumers */
2950 LLOG_CHANGELOG_USER_ORIG_CTXT = 14,
2951 LLOG_AGENT_ORIG_CTXT = 15, /**< agent requests generation on cdt */
2955 /** Identifier for a single log object */
2957 struct ost_id lgl_oi;
2961 /** Records written to the CATALOGS list */
2962 #define CATLIST "CATALOGS"
2964 struct llog_logid lci_logid;
2970 /* Log data record types - there is no specific reason that these need to
2971 * be related to the RPC opcodes, but no reason not to (may be handy later?)
2973 #define LLOG_OP_MAGIC 0x10600000
2974 #define LLOG_OP_MASK 0xfff00000
2977 LLOG_PAD_MAGIC = LLOG_OP_MAGIC | 0x00000,
2978 OST_SZ_REC = LLOG_OP_MAGIC | 0x00f00,
2979 /* OST_RAID1_REC = LLOG_OP_MAGIC | 0x01000, never used */
2980 MDS_UNLINK_REC = LLOG_OP_MAGIC | 0x10000 | (MDS_REINT << 8) |
2981 REINT_UNLINK, /* obsolete after 2.5.0 */
2982 MDS_UNLINK64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
2984 /* MDS_SETATTR_REC = LLOG_OP_MAGIC | 0x12401, obsolete 1.8.0 */
2985 MDS_SETATTR64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
2987 OBD_CFG_REC = LLOG_OP_MAGIC | 0x20000,
2988 /* PTL_CFG_REC = LLOG_OP_MAGIC | 0x30000, obsolete 1.4.0 */
2989 LLOG_GEN_REC = LLOG_OP_MAGIC | 0x40000,
2990 /* LLOG_JOIN_REC = LLOG_OP_MAGIC | 0x50000, obsolete 1.8.0 */
2991 CHANGELOG_REC = LLOG_OP_MAGIC | 0x60000,
2992 CHANGELOG_USER_REC = LLOG_OP_MAGIC | 0x70000,
2993 HSM_AGENT_REC = LLOG_OP_MAGIC | 0x80000,
2994 LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539,
2995 LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b,
2998 #define LLOG_REC_HDR_NEEDS_SWABBING(r) \
2999 (((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC))
3001 /** Log record header - stored in little endian order.
3002 * Each record must start with this struct, end with a llog_rec_tail,
3003 * and be a multiple of 256 bits in size.
3005 struct llog_rec_hdr {
3012 struct llog_rec_tail {
3017 /* Where data follow just after header */
3018 #define REC_DATA(ptr) \
3019 ((void *)((char *)ptr + sizeof(struct llog_rec_hdr)))
3021 #define REC_DATA_LEN(rec) \
3022 (rec->lrh_len - sizeof(struct llog_rec_hdr) - \
3023 sizeof(struct llog_rec_tail))
3025 struct llog_logid_rec {
3026 struct llog_rec_hdr lid_hdr;
3027 struct llog_logid lid_id;
3031 struct llog_rec_tail lid_tail;
3034 struct llog_unlink_rec {
3035 struct llog_rec_hdr lur_hdr;
3039 struct llog_rec_tail lur_tail;
3042 struct llog_unlink64_rec {
3043 struct llog_rec_hdr lur_hdr;
3044 struct lu_fid lur_fid;
3045 __u32 lur_count; /* to destroy the lost precreated */
3049 struct llog_rec_tail lur_tail;
3052 struct llog_setattr64_rec {
3053 struct llog_rec_hdr lsr_hdr;
3054 struct ost_id lsr_oi;
3060 struct llog_rec_tail lsr_tail;
3063 struct llog_size_change_rec {
3064 struct llog_rec_hdr lsc_hdr;
3065 struct ll_fid lsc_fid;
3070 struct llog_rec_tail lsc_tail;
3073 /* changelog llog name, needed by client replicators */
3074 #define CHANGELOG_CATALOG "changelog_catalog"
3076 struct changelog_setinfo {
3081 /** changelog record */
3082 struct llog_changelog_rec {
3083 struct llog_rec_hdr cr_hdr;
3084 struct changelog_rec cr; /**< Variable length field */
3085 struct llog_rec_tail cr_do_not_use; /**< for_sizezof_only */
3088 struct llog_changelog_user_rec {
3089 struct llog_rec_hdr cur_hdr;
3093 struct llog_rec_tail cur_tail;
3096 enum agent_req_status {
3104 static inline const char *agent_req_status2name(const enum agent_req_status ars)
3122 static inline bool agent_req_in_final_state(enum agent_req_status ars)
3124 return ((ars == ARS_SUCCEED) || (ars == ARS_FAILED) ||
3125 (ars == ARS_CANCELED));
3128 struct llog_agent_req_rec {
3129 struct llog_rec_hdr arr_hdr; /**< record header */
3130 __u32 arr_status; /**< status of the request */
3134 __u32 arr_archive_id; /**< backend archive number */
3135 __u64 arr_flags; /**< req flags */
3136 __u64 arr_compound_id;/**< compound cookie */
3137 __u64 arr_req_create; /**< req. creation time */
3138 __u64 arr_req_change; /**< req. status change time */
3139 struct hsm_action_item arr_hai; /**< req. to the agent */
3140 struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */
3143 /* Old llog gen for compatibility */
3149 struct llog_gen_rec {
3150 struct llog_rec_hdr lgr_hdr;
3151 struct llog_gen lgr_gen;
3155 struct llog_rec_tail lgr_tail;
3158 /* On-disk header structure of each log object, stored in little endian order */
3159 #define LLOG_CHUNK_SIZE 8192
3160 #define LLOG_HEADER_SIZE (96)
3161 #define LLOG_BITMAP_BYTES (LLOG_CHUNK_SIZE - LLOG_HEADER_SIZE)
3163 #define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */
3165 /* flags for the logs */
3167 LLOG_F_ZAP_WHEN_EMPTY = 0x1,
3168 LLOG_F_IS_CAT = 0x2,
3169 LLOG_F_IS_PLAIN = 0x4,
3170 LLOG_F_EXT_JOBID = BIT(3),
3172 LLOG_F_EXT_MASK = LLOG_F_EXT_JOBID,
3175 struct llog_log_hdr {
3176 struct llog_rec_hdr llh_hdr;
3177 __s64 llh_timestamp;
3179 __u32 llh_bitmap_offset;
3183 /* for a catalog the first plain slot is next to it */
3184 struct obd_uuid llh_tgtuuid;
3185 __u32 llh_reserved[LLOG_HEADER_SIZE / sizeof(__u32) - 23];
3186 __u32 llh_bitmap[LLOG_BITMAP_BYTES / sizeof(__u32)];
3187 struct llog_rec_tail llh_tail;
3190 #define LLOG_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \
3191 llh->llh_bitmap_offset - \
3192 sizeof(llh->llh_tail)) * 8)
3194 /** log cookies are used to reference a specific log file and a record
3197 struct llog_cookie {
3198 struct llog_logid lgc_lgl;
3204 /** llog protocol */
3205 enum llogd_rpc_ops {
3206 LLOG_ORIGIN_HANDLE_CREATE = 501,
3207 LLOG_ORIGIN_HANDLE_NEXT_BLOCK = 502,
3208 LLOG_ORIGIN_HANDLE_READ_HEADER = 503,
3209 LLOG_ORIGIN_HANDLE_WRITE_REC = 504,
3210 LLOG_ORIGIN_HANDLE_CLOSE = 505,
3211 LLOG_ORIGIN_CONNECT = 506,
3212 LLOG_CATINFO = 507, /* deprecated */
3213 LLOG_ORIGIN_HANDLE_PREV_BLOCK = 508,
3214 LLOG_ORIGIN_HANDLE_DESTROY = 509, /* for destroy llog object*/
3216 LLOG_FIRST_OPC = LLOG_ORIGIN_HANDLE_CREATE
3220 struct llog_logid lgd_logid;
3222 __u32 lgd_llh_flags;
3224 __u32 lgd_saved_index;
3226 __u64 lgd_cur_offset;
3229 struct llogd_conn_body {
3230 struct llog_gen lgdc_gen;
3231 struct llog_logid lgdc_logid;
3232 __u32 lgdc_ctxt_idx;
3235 /* Note: 64-bit types are 64-bit aligned in structure */
3237 __u64 o_valid; /* hot fields in this obdo */
3240 __u64 o_size; /* o_size-o_blocks == ost_lvb */
3244 __u64 o_blocks; /* brw: cli sent cached bytes */
3247 /* 32-bit fields start here: keep an even number of them via padding */
3248 __u32 o_blksize; /* optimal IO blocksize */
3249 __u32 o_mode; /* brw: cli sent cache remain */
3253 __u32 o_nlink; /* brw: checksum */
3255 __u32 o_misc; /* brw: o_dropped */
3257 __u64 o_ioepoch; /* epoch in ost writes */
3258 __u32 o_stripe_idx; /* holds stripe idx */
3260 struct lustre_handle o_handle; /* brw: lock handle to prolong locks
3262 struct llog_cookie o_lcookie; /* destroy: unlink cookie from MDS
3267 __u64 o_data_version; /* getattr: sum of iversion for
3269 * brw: grant space consumed on
3270 * the client for the write
3277 #define o_dirty o_blocks
3278 #define o_undirty o_mode
3279 #define o_dropped o_misc
3280 #define o_cksum o_nlink
3281 #define o_grant_used o_data_version
3283 static inline void lustre_set_wire_obdo(const struct obd_connect_data *ocd,
3285 const struct obdo *lobdo)
3288 wobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
3292 if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
3293 fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) {
3294 /* Currently OBD_FL_OSTID will only be used when 2.4 echo
3295 * client communicate with pre-2.4 server
3297 wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid);
3298 wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid);
3302 static inline void lustre_get_wire_obdo(const struct obd_connect_data *ocd,
3304 const struct obdo *wobdo)
3306 __u32 local_flags = 0;
3308 if (lobdo->o_valid & OBD_MD_FLFLAGS)
3309 local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK;
3312 if (local_flags != 0) {
3313 lobdo->o_valid |= OBD_MD_FLFLAGS;
3314 lobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
3315 lobdo->o_flags |= local_flags;
3320 if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
3321 fid_seq_is_echo(wobdo->o_oi.oi.oi_seq)) {
3323 lobdo->o_oi.oi_fid.f_seq = wobdo->o_oi.oi.oi_seq;
3324 lobdo->o_oi.oi_fid.f_oid = wobdo->o_oi.oi.oi_id;
3325 lobdo->o_oi.oi_fid.f_ver = 0;
3329 /* request structure for OST's */
3334 /* Key for FIEMAP to be used in get_info calls */
3335 struct ll_fiemap_info_key {
3338 struct ll_user_fiemap fiemap;
3341 void lustre_swab_ost_body(struct ost_body *b);
3342 void lustre_swab_ost_last_id(__u64 *id);
3343 void lustre_swab_fiemap(struct ll_user_fiemap *fiemap);
3345 void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
3346 void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
3347 void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
3349 void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
3352 void lustre_swab_llogd_body(struct llogd_body *d);
3353 void lustre_swab_llog_hdr(struct llog_log_hdr *h);
3354 void lustre_swab_llogd_conn_body(struct llogd_conn_body *d);
3355 void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
3358 void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
3360 /* Functions for dumping PTLRPC fields */
3361 void dump_rniobuf(struct niobuf_remote *rnb);
3362 void dump_ioo(struct obd_ioobj *nb);
3363 void dump_ost_body(struct ost_body *ob);
3364 void dump_rcs(__u32 *rc);
3366 /* security opcodes */
3369 SEC_CTX_INIT_CONT = 802,
3372 SEC_FIRST_OPC = SEC_CTX_INIT
3376 * capa related definitions
3378 #define CAPA_HMAC_MAX_LEN 64
3379 #define CAPA_HMAC_KEY_MAX_LEN 56
3381 /* NB take care when changing the sequence of elements this struct,
3382 * because the offset info is used in find_capa()
3384 struct lustre_capa {
3385 struct lu_fid lc_fid; /** fid */
3386 __u64 lc_opc; /** operations allowed */
3387 __u64 lc_uid; /** file owner */
3388 __u64 lc_gid; /** file group */
3389 __u32 lc_flags; /** HMAC algorithm & flags */
3390 __u32 lc_keyid; /** key# used for the capability */
3391 __u32 lc_timeout; /** capa timeout value (sec) */
3392 /* FIXME: y2038 time_t overflow: */
3393 __u32 lc_expiry; /** expiry time (sec) */
3394 __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */
3397 void lustre_swab_lustre_capa(struct lustre_capa *c);
3399 /** lustre_capa::lc_opc */
3401 CAPA_OPC_BODY_WRITE = 1 << 0, /**< write object data */
3402 CAPA_OPC_BODY_READ = 1 << 1, /**< read object data */
3403 CAPA_OPC_INDEX_LOOKUP = 1 << 2, /**< lookup object fid */
3404 CAPA_OPC_INDEX_INSERT = 1 << 3, /**< insert object fid */
3405 CAPA_OPC_INDEX_DELETE = 1 << 4, /**< delete object fid */
3406 CAPA_OPC_OSS_WRITE = 1 << 5, /**< write oss object data */
3407 CAPA_OPC_OSS_READ = 1 << 6, /**< read oss object data */
3408 CAPA_OPC_OSS_TRUNC = 1 << 7, /**< truncate oss object */
3409 CAPA_OPC_OSS_DESTROY = 1 << 8, /**< destroy oss object */
3410 CAPA_OPC_META_WRITE = 1 << 9, /**< write object meta data */
3411 CAPA_OPC_META_READ = 1 << 10, /**< read object meta data */
3414 #define CAPA_OPC_OSS_RW (CAPA_OPC_OSS_READ | CAPA_OPC_OSS_WRITE)
3415 #define CAPA_OPC_MDS_ONLY \
3416 (CAPA_OPC_BODY_WRITE | CAPA_OPC_BODY_READ | CAPA_OPC_INDEX_LOOKUP | \
3417 CAPA_OPC_INDEX_INSERT | CAPA_OPC_INDEX_DELETE)
3418 #define CAPA_OPC_OSS_ONLY \
3419 (CAPA_OPC_OSS_WRITE | CAPA_OPC_OSS_READ | CAPA_OPC_OSS_TRUNC | \
3420 CAPA_OPC_OSS_DESTROY)
3421 #define CAPA_OPC_MDS_DEFAULT ~CAPA_OPC_OSS_ONLY
3422 #define CAPA_OPC_OSS_DEFAULT ~(CAPA_OPC_MDS_ONLY | CAPA_OPC_OSS_ONLY)
3424 struct lustre_capa_key {
3425 __u64 lk_seq; /**< mds# */
3426 __u32 lk_keyid; /**< key# */
3428 __u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */
3431 /** The link ea holds 1 \a link_ea_entry for each hardlink */
3432 #define LINK_EA_MAGIC 0x11EAF1DFUL
3433 struct link_ea_header {
3436 __u64 leh_len; /* total size */
3442 /** Hardlink data is name and parent fid.
3443 * Stored in this crazy struct for maximum packing and endian-neutrality
3445 struct link_ea_entry {
3446 /** __u16 stored big-endian, unaligned */
3447 unsigned char lee_reclen[2];
3448 unsigned char lee_parent_fid[sizeof(struct lu_fid)];
3452 /** fid2path request/reply structure */
3453 struct getinfo_fid2path {
3454 struct lu_fid gf_fid;
3461 void lustre_swab_fid2path(struct getinfo_fid2path *gf);
3463 /** path2parent request/reply structures */
3465 struct lu_fid gp_fid; /**< parent FID */
3466 __u32 gp_linkno; /**< hardlink number */
3467 __u32 gp_name_size; /**< size of the name field */
3468 char gp_name[0]; /**< zero-terminated link name */
3472 LAYOUT_INTENT_ACCESS = 0,
3473 LAYOUT_INTENT_READ = 1,
3474 LAYOUT_INTENT_WRITE = 2,
3475 LAYOUT_INTENT_GLIMPSE = 3,
3476 LAYOUT_INTENT_TRUNC = 4,
3477 LAYOUT_INTENT_RELEASE = 5,
3478 LAYOUT_INTENT_RESTORE = 6
3481 /* enqueue layout lock with intent */
3482 struct layout_intent {
3483 __u32 li_opc; /* intent operation for enqueue, read, write etc */
3489 void lustre_swab_layout_intent(struct layout_intent *li);
3492 * On the wire version of hsm_progress structure.
3494 * Contains the userspace hsm_progress and some internal fields.
3496 struct hsm_progress_kernel {
3497 /* Field taken from struct hsm_progress */
3498 struct lu_fid hpk_fid;
3500 struct hsm_extent hpk_extent;
3502 __u16 hpk_errval; /* positive val */
3504 /* Additional fields */
3505 __u64 hpk_data_version;
3509 void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
3510 void lustre_swab_hsm_current_action(struct hsm_current_action *action);
3511 void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
3512 void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
3513 void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
3514 void lustre_swab_hsm_request(struct hsm_request *hr);
3516 /** layout swap request structure
3517 * fid1 and fid2 are in mdt_body
3519 struct mdc_swap_layouts {
3523 void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
3526 struct lustre_handle cd_handle;
3527 struct lu_fid cd_fid;
3528 __u64 cd_data_version;
3529 __u64 cd_reserved[8];
3532 void lustre_swab_close_data(struct close_data *data);