GNU Linux-libre 5.19-rc6-gnu
[releases.git] / fs / xfs / libxfs / xfs_attr_leaf.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2000-2005 Silicon Graphics, Inc.
4  * Copyright (c) 2013 Red Hat, Inc.
5  * All Rights Reserved.
6  */
7 #include "xfs.h"
8 #include "xfs_fs.h"
9 #include "xfs_shared.h"
10 #include "xfs_format.h"
11 #include "xfs_log_format.h"
12 #include "xfs_trans_resv.h"
13 #include "xfs_sb.h"
14 #include "xfs_mount.h"
15 #include "xfs_da_format.h"
16 #include "xfs_da_btree.h"
17 #include "xfs_inode.h"
18 #include "xfs_trans.h"
19 #include "xfs_bmap_btree.h"
20 #include "xfs_bmap.h"
21 #include "xfs_attr_sf.h"
22 #include "xfs_attr.h"
23 #include "xfs_attr_remote.h"
24 #include "xfs_attr_leaf.h"
25 #include "xfs_error.h"
26 #include "xfs_trace.h"
27 #include "xfs_buf_item.h"
28 #include "xfs_dir2.h"
29 #include "xfs_log.h"
30 #include "xfs_ag.h"
31 #include "xfs_errortag.h"
32
33
34 /*
35  * xfs_attr_leaf.c
36  *
37  * Routines to implement leaf blocks of attributes as Btrees of hashed names.
38  */
39
40 /*========================================================================
41  * Function prototypes for the kernel.
42  *========================================================================*/
43
44 /*
45  * Routines used for growing the Btree.
46  */
47 STATIC int xfs_attr3_leaf_create(struct xfs_da_args *args,
48                                  xfs_dablk_t which_block, struct xfs_buf **bpp);
49 STATIC int xfs_attr3_leaf_add_work(struct xfs_buf *leaf_buffer,
50                                    struct xfs_attr3_icleaf_hdr *ichdr,
51                                    struct xfs_da_args *args, int freemap_index);
52 STATIC void xfs_attr3_leaf_compact(struct xfs_da_args *args,
53                                    struct xfs_attr3_icleaf_hdr *ichdr,
54                                    struct xfs_buf *leaf_buffer);
55 STATIC void xfs_attr3_leaf_rebalance(xfs_da_state_t *state,
56                                                    xfs_da_state_blk_t *blk1,
57                                                    xfs_da_state_blk_t *blk2);
58 STATIC int xfs_attr3_leaf_figure_balance(xfs_da_state_t *state,
59                         xfs_da_state_blk_t *leaf_blk_1,
60                         struct xfs_attr3_icleaf_hdr *ichdr1,
61                         xfs_da_state_blk_t *leaf_blk_2,
62                         struct xfs_attr3_icleaf_hdr *ichdr2,
63                         int *number_entries_in_blk1,
64                         int *number_usedbytes_in_blk1);
65
66 /*
67  * Utility routines.
68  */
69 STATIC void xfs_attr3_leaf_moveents(struct xfs_da_args *args,
70                         struct xfs_attr_leafblock *src_leaf,
71                         struct xfs_attr3_icleaf_hdr *src_ichdr, int src_start,
72                         struct xfs_attr_leafblock *dst_leaf,
73                         struct xfs_attr3_icleaf_hdr *dst_ichdr, int dst_start,
74                         int move_count);
75 STATIC int xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index);
76
77 /*
78  * attr3 block 'firstused' conversion helpers.
79  *
80  * firstused refers to the offset of the first used byte of the nameval region
81  * of an attr leaf block. The region starts at the tail of the block and expands
82  * backwards towards the middle. As such, firstused is initialized to the block
83  * size for an empty leaf block and is reduced from there.
84  *
85  * The attr3 block size is pegged to the fsb size and the maximum fsb is 64k.
86  * The in-core firstused field is 32-bit and thus supports the maximum fsb size.
87  * The on-disk field is only 16-bit, however, and overflows at 64k. Since this
88  * only occurs at exactly 64k, we use zero as a magic on-disk value to represent
89  * the attr block size. The following helpers manage the conversion between the
90  * in-core and on-disk formats.
91  */
92
93 static void
94 xfs_attr3_leaf_firstused_from_disk(
95         struct xfs_da_geometry          *geo,
96         struct xfs_attr3_icleaf_hdr     *to,
97         struct xfs_attr_leafblock       *from)
98 {
99         struct xfs_attr3_leaf_hdr       *hdr3;
100
101         if (from->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)) {
102                 hdr3 = (struct xfs_attr3_leaf_hdr *) from;
103                 to->firstused = be16_to_cpu(hdr3->firstused);
104         } else {
105                 to->firstused = be16_to_cpu(from->hdr.firstused);
106         }
107
108         /*
109          * Convert from the magic fsb size value to actual blocksize. This
110          * should only occur for empty blocks when the block size overflows
111          * 16-bits.
112          */
113         if (to->firstused == XFS_ATTR3_LEAF_NULLOFF) {
114                 ASSERT(!to->count && !to->usedbytes);
115                 ASSERT(geo->blksize > USHRT_MAX);
116                 to->firstused = geo->blksize;
117         }
118 }
119
120 static void
121 xfs_attr3_leaf_firstused_to_disk(
122         struct xfs_da_geometry          *geo,
123         struct xfs_attr_leafblock       *to,
124         struct xfs_attr3_icleaf_hdr     *from)
125 {
126         struct xfs_attr3_leaf_hdr       *hdr3;
127         uint32_t                        firstused;
128
129         /* magic value should only be seen on disk */
130         ASSERT(from->firstused != XFS_ATTR3_LEAF_NULLOFF);
131
132         /*
133          * Scale down the 32-bit in-core firstused value to the 16-bit on-disk
134          * value. This only overflows at the max supported value of 64k. Use the
135          * magic on-disk value to represent block size in this case.
136          */
137         firstused = from->firstused;
138         if (firstused > USHRT_MAX) {
139                 ASSERT(from->firstused == geo->blksize);
140                 firstused = XFS_ATTR3_LEAF_NULLOFF;
141         }
142
143         if (from->magic == XFS_ATTR3_LEAF_MAGIC) {
144                 hdr3 = (struct xfs_attr3_leaf_hdr *) to;
145                 hdr3->firstused = cpu_to_be16(firstused);
146         } else {
147                 to->hdr.firstused = cpu_to_be16(firstused);
148         }
149 }
150
151 void
152 xfs_attr3_leaf_hdr_from_disk(
153         struct xfs_da_geometry          *geo,
154         struct xfs_attr3_icleaf_hdr     *to,
155         struct xfs_attr_leafblock       *from)
156 {
157         int     i;
158
159         ASSERT(from->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC) ||
160                from->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC));
161
162         if (from->hdr.info.magic == cpu_to_be16(XFS_ATTR3_LEAF_MAGIC)) {
163                 struct xfs_attr3_leaf_hdr *hdr3 = (struct xfs_attr3_leaf_hdr *)from;
164
165                 to->forw = be32_to_cpu(hdr3->info.hdr.forw);
166                 to->back = be32_to_cpu(hdr3->info.hdr.back);
167                 to->magic = be16_to_cpu(hdr3->info.hdr.magic);
168                 to->count = be16_to_cpu(hdr3->count);
169                 to->usedbytes = be16_to_cpu(hdr3->usedbytes);
170                 xfs_attr3_leaf_firstused_from_disk(geo, to, from);
171                 to->holes = hdr3->holes;
172
173                 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
174                         to->freemap[i].base = be16_to_cpu(hdr3->freemap[i].base);
175                         to->freemap[i].size = be16_to_cpu(hdr3->freemap[i].size);
176                 }
177                 return;
178         }
179         to->forw = be32_to_cpu(from->hdr.info.forw);
180         to->back = be32_to_cpu(from->hdr.info.back);
181         to->magic = be16_to_cpu(from->hdr.info.magic);
182         to->count = be16_to_cpu(from->hdr.count);
183         to->usedbytes = be16_to_cpu(from->hdr.usedbytes);
184         xfs_attr3_leaf_firstused_from_disk(geo, to, from);
185         to->holes = from->hdr.holes;
186
187         for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
188                 to->freemap[i].base = be16_to_cpu(from->hdr.freemap[i].base);
189                 to->freemap[i].size = be16_to_cpu(from->hdr.freemap[i].size);
190         }
191 }
192
193 void
194 xfs_attr3_leaf_hdr_to_disk(
195         struct xfs_da_geometry          *geo,
196         struct xfs_attr_leafblock       *to,
197         struct xfs_attr3_icleaf_hdr     *from)
198 {
199         int                             i;
200
201         ASSERT(from->magic == XFS_ATTR_LEAF_MAGIC ||
202                from->magic == XFS_ATTR3_LEAF_MAGIC);
203
204         if (from->magic == XFS_ATTR3_LEAF_MAGIC) {
205                 struct xfs_attr3_leaf_hdr *hdr3 = (struct xfs_attr3_leaf_hdr *)to;
206
207                 hdr3->info.hdr.forw = cpu_to_be32(from->forw);
208                 hdr3->info.hdr.back = cpu_to_be32(from->back);
209                 hdr3->info.hdr.magic = cpu_to_be16(from->magic);
210                 hdr3->count = cpu_to_be16(from->count);
211                 hdr3->usedbytes = cpu_to_be16(from->usedbytes);
212                 xfs_attr3_leaf_firstused_to_disk(geo, to, from);
213                 hdr3->holes = from->holes;
214                 hdr3->pad1 = 0;
215
216                 for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
217                         hdr3->freemap[i].base = cpu_to_be16(from->freemap[i].base);
218                         hdr3->freemap[i].size = cpu_to_be16(from->freemap[i].size);
219                 }
220                 return;
221         }
222         to->hdr.info.forw = cpu_to_be32(from->forw);
223         to->hdr.info.back = cpu_to_be32(from->back);
224         to->hdr.info.magic = cpu_to_be16(from->magic);
225         to->hdr.count = cpu_to_be16(from->count);
226         to->hdr.usedbytes = cpu_to_be16(from->usedbytes);
227         xfs_attr3_leaf_firstused_to_disk(geo, to, from);
228         to->hdr.holes = from->holes;
229         to->hdr.pad1 = 0;
230
231         for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
232                 to->hdr.freemap[i].base = cpu_to_be16(from->freemap[i].base);
233                 to->hdr.freemap[i].size = cpu_to_be16(from->freemap[i].size);
234         }
235 }
236
237 static xfs_failaddr_t
238 xfs_attr3_leaf_verify_entry(
239         struct xfs_mount                        *mp,
240         char                                    *buf_end,
241         struct xfs_attr_leafblock               *leaf,
242         struct xfs_attr3_icleaf_hdr             *leafhdr,
243         struct xfs_attr_leaf_entry              *ent,
244         int                                     idx,
245         __u32                                   *last_hashval)
246 {
247         struct xfs_attr_leaf_name_local         *lentry;
248         struct xfs_attr_leaf_name_remote        *rentry;
249         char                                    *name_end;
250         unsigned int                            nameidx;
251         unsigned int                            namesize;
252         __u32                                   hashval;
253
254         /* hash order check */
255         hashval = be32_to_cpu(ent->hashval);
256         if (hashval < *last_hashval)
257                 return __this_address;
258         *last_hashval = hashval;
259
260         nameidx = be16_to_cpu(ent->nameidx);
261         if (nameidx < leafhdr->firstused || nameidx >= mp->m_attr_geo->blksize)
262                 return __this_address;
263
264         /*
265          * Check the name information.  The namelen fields are u8 so we can't
266          * possibly exceed the maximum name length of 255 bytes.
267          */
268         if (ent->flags & XFS_ATTR_LOCAL) {
269                 lentry = xfs_attr3_leaf_name_local(leaf, idx);
270                 namesize = xfs_attr_leaf_entsize_local(lentry->namelen,
271                                 be16_to_cpu(lentry->valuelen));
272                 name_end = (char *)lentry + namesize;
273                 if (lentry->namelen == 0)
274                         return __this_address;
275         } else {
276                 rentry = xfs_attr3_leaf_name_remote(leaf, idx);
277                 namesize = xfs_attr_leaf_entsize_remote(rentry->namelen);
278                 name_end = (char *)rentry + namesize;
279                 if (rentry->namelen == 0)
280                         return __this_address;
281                 if (!(ent->flags & XFS_ATTR_INCOMPLETE) &&
282                     rentry->valueblk == 0)
283                         return __this_address;
284         }
285
286         if (name_end > buf_end)
287                 return __this_address;
288
289         return NULL;
290 }
291
292 /*
293  * Validate an attribute leaf block.
294  *
295  * Empty leaf blocks can occur under the following circumstances:
296  *
297  * 1. setxattr adds a new extended attribute to a file;
298  * 2. The file has zero existing attributes;
299  * 3. The attribute is too large to fit in the attribute fork;
300  * 4. The attribute is small enough to fit in a leaf block;
301  * 5. A log flush occurs after committing the transaction that creates
302  *    the (empty) leaf block; and
303  * 6. The filesystem goes down after the log flush but before the new
304  *    attribute can be committed to the leaf block.
305  *
306  * Hence we need to ensure that we don't fail the validation purely
307  * because the leaf is empty.
308  */
309 static xfs_failaddr_t
310 xfs_attr3_leaf_verify(
311         struct xfs_buf                  *bp)
312 {
313         struct xfs_attr3_icleaf_hdr     ichdr;
314         struct xfs_mount                *mp = bp->b_mount;
315         struct xfs_attr_leafblock       *leaf = bp->b_addr;
316         struct xfs_attr_leaf_entry      *entries;
317         struct xfs_attr_leaf_entry      *ent;
318         char                            *buf_end;
319         uint32_t                        end;    /* must be 32bit - see below */
320         __u32                           last_hashval = 0;
321         int                             i;
322         xfs_failaddr_t                  fa;
323
324         xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf);
325
326         fa = xfs_da3_blkinfo_verify(bp, bp->b_addr);
327         if (fa)
328                 return fa;
329
330         /*
331          * firstused is the block offset of the first name info structure.
332          * Make sure it doesn't go off the block or crash into the header.
333          */
334         if (ichdr.firstused > mp->m_attr_geo->blksize)
335                 return __this_address;
336         if (ichdr.firstused < xfs_attr3_leaf_hdr_size(leaf))
337                 return __this_address;
338
339         /* Make sure the entries array doesn't crash into the name info. */
340         entries = xfs_attr3_leaf_entryp(bp->b_addr);
341         if ((char *)&entries[ichdr.count] >
342             (char *)bp->b_addr + ichdr.firstused)
343                 return __this_address;
344
345         /*
346          * NOTE: This verifier historically failed empty leaf buffers because
347          * we expect the fork to be in another format. Empty attr fork format
348          * conversions are possible during xattr set, however, and format
349          * conversion is not atomic with the xattr set that triggers it. We
350          * cannot assume leaf blocks are non-empty until that is addressed.
351         */
352         buf_end = (char *)bp->b_addr + mp->m_attr_geo->blksize;
353         for (i = 0, ent = entries; i < ichdr.count; ent++, i++) {
354                 fa = xfs_attr3_leaf_verify_entry(mp, buf_end, leaf, &ichdr,
355                                 ent, i, &last_hashval);
356                 if (fa)
357                         return fa;
358         }
359
360         /*
361          * Quickly check the freemap information.  Attribute data has to be
362          * aligned to 4-byte boundaries, and likewise for the free space.
363          *
364          * Note that for 64k block size filesystems, the freemap entries cannot
365          * overflow as they are only be16 fields. However, when checking end
366          * pointer of the freemap, we have to be careful to detect overflows and
367          * so use uint32_t for those checks.
368          */
369         for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
370                 if (ichdr.freemap[i].base > mp->m_attr_geo->blksize)
371                         return __this_address;
372                 if (ichdr.freemap[i].base & 0x3)
373                         return __this_address;
374                 if (ichdr.freemap[i].size > mp->m_attr_geo->blksize)
375                         return __this_address;
376                 if (ichdr.freemap[i].size & 0x3)
377                         return __this_address;
378
379                 /* be care of 16 bit overflows here */
380                 end = (uint32_t)ichdr.freemap[i].base + ichdr.freemap[i].size;
381                 if (end < ichdr.freemap[i].base)
382                         return __this_address;
383                 if (end > mp->m_attr_geo->blksize)
384                         return __this_address;
385         }
386
387         return NULL;
388 }
389
390 static void
391 xfs_attr3_leaf_write_verify(
392         struct xfs_buf  *bp)
393 {
394         struct xfs_mount        *mp = bp->b_mount;
395         struct xfs_buf_log_item *bip = bp->b_log_item;
396         struct xfs_attr3_leaf_hdr *hdr3 = bp->b_addr;
397         xfs_failaddr_t          fa;
398
399         fa = xfs_attr3_leaf_verify(bp);
400         if (fa) {
401                 xfs_verifier_error(bp, -EFSCORRUPTED, fa);
402                 return;
403         }
404
405         if (!xfs_has_crc(mp))
406                 return;
407
408         if (bip)
409                 hdr3->info.lsn = cpu_to_be64(bip->bli_item.li_lsn);
410
411         xfs_buf_update_cksum(bp, XFS_ATTR3_LEAF_CRC_OFF);
412 }
413
414 /*
415  * leaf/node format detection on trees is sketchy, so a node read can be done on
416  * leaf level blocks when detection identifies the tree as a node format tree
417  * incorrectly. In this case, we need to swap the verifier to match the correct
418  * format of the block being read.
419  */
420 static void
421 xfs_attr3_leaf_read_verify(
422         struct xfs_buf          *bp)
423 {
424         struct xfs_mount        *mp = bp->b_mount;
425         xfs_failaddr_t          fa;
426
427         if (xfs_has_crc(mp) &&
428              !xfs_buf_verify_cksum(bp, XFS_ATTR3_LEAF_CRC_OFF))
429                 xfs_verifier_error(bp, -EFSBADCRC, __this_address);
430         else {
431                 fa = xfs_attr3_leaf_verify(bp);
432                 if (fa)
433                         xfs_verifier_error(bp, -EFSCORRUPTED, fa);
434         }
435 }
436
437 const struct xfs_buf_ops xfs_attr3_leaf_buf_ops = {
438         .name = "xfs_attr3_leaf",
439         .magic16 = { cpu_to_be16(XFS_ATTR_LEAF_MAGIC),
440                      cpu_to_be16(XFS_ATTR3_LEAF_MAGIC) },
441         .verify_read = xfs_attr3_leaf_read_verify,
442         .verify_write = xfs_attr3_leaf_write_verify,
443         .verify_struct = xfs_attr3_leaf_verify,
444 };
445
446 int
447 xfs_attr3_leaf_read(
448         struct xfs_trans        *tp,
449         struct xfs_inode        *dp,
450         xfs_dablk_t             bno,
451         struct xfs_buf          **bpp)
452 {
453         int                     err;
454
455         err = xfs_da_read_buf(tp, dp, bno, 0, bpp, XFS_ATTR_FORK,
456                         &xfs_attr3_leaf_buf_ops);
457         if (!err && tp && *bpp)
458                 xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_ATTR_LEAF_BUF);
459         return err;
460 }
461
462 /*========================================================================
463  * Namespace helper routines
464  *========================================================================*/
465
466 /*
467  * If we are in log recovery, then we want the lookup to ignore the INCOMPLETE
468  * flag on disk - if there's an incomplete attr then recovery needs to tear it
469  * down. If there's no incomplete attr, then recovery needs to tear that attr
470  * down to replace it with the attr that has been logged. In this case, the
471  * INCOMPLETE flag will not be set in attr->attr_filter, but rather
472  * XFS_DA_OP_RECOVERY will be set in args->op_flags.
473  */
474 static bool
475 xfs_attr_match(
476         struct xfs_da_args      *args,
477         uint8_t                 namelen,
478         unsigned char           *name,
479         int                     flags)
480 {
481
482         if (args->namelen != namelen)
483                 return false;
484         if (memcmp(args->name, name, namelen) != 0)
485                 return false;
486
487         /* Recovery ignores the INCOMPLETE flag. */
488         if ((args->op_flags & XFS_DA_OP_RECOVERY) &&
489             args->attr_filter == (flags & XFS_ATTR_NSP_ONDISK_MASK))
490                 return true;
491
492         /* All remaining matches need to be filtered by INCOMPLETE state. */
493         if (args->attr_filter !=
494             (flags & (XFS_ATTR_NSP_ONDISK_MASK | XFS_ATTR_INCOMPLETE)))
495                 return false;
496         return true;
497 }
498
499 static int
500 xfs_attr_copy_value(
501         struct xfs_da_args      *args,
502         unsigned char           *value,
503         int                     valuelen)
504 {
505         /*
506          * No copy if all we have to do is get the length
507          */
508         if (!args->valuelen) {
509                 args->valuelen = valuelen;
510                 return 0;
511         }
512
513         /*
514          * No copy if the length of the existing buffer is too small
515          */
516         if (args->valuelen < valuelen) {
517                 args->valuelen = valuelen;
518                 return -ERANGE;
519         }
520
521         if (!args->value) {
522                 args->value = kvmalloc(valuelen, GFP_KERNEL | __GFP_NOLOCKDEP);
523                 if (!args->value)
524                         return -ENOMEM;
525         }
526         args->valuelen = valuelen;
527
528         /* remote block xattr requires IO for copy-in */
529         if (args->rmtblkno)
530                 return xfs_attr_rmtval_get(args);
531
532         /*
533          * This is to prevent a GCC warning because the remote xattr case
534          * doesn't have a value to pass in. In that case, we never reach here,
535          * but GCC can't work that out and so throws a "passing NULL to
536          * memcpy" warning.
537          */
538         if (!value)
539                 return -EINVAL;
540         memcpy(args->value, value, valuelen);
541         return 0;
542 }
543
544 /*========================================================================
545  * External routines when attribute fork size < XFS_LITINO(mp).
546  *========================================================================*/
547
548 /*
549  * Query whether the total requested number of attr fork bytes of extended
550  * attribute space will be able to fit inline.
551  *
552  * Returns zero if not, else the i_forkoff fork offset to be used in the
553  * literal area for attribute data once the new bytes have been added.
554  *
555  * i_forkoff must be 8 byte aligned, hence is stored as a >>3 value;
556  * special case for dev/uuid inodes, they have fixed size data forks.
557  */
558 int
559 xfs_attr_shortform_bytesfit(
560         struct xfs_inode        *dp,
561         int                     bytes)
562 {
563         struct xfs_mount        *mp = dp->i_mount;
564         int64_t                 dsize;
565         int                     minforkoff;
566         int                     maxforkoff;
567         int                     offset;
568
569         /*
570          * Check if the new size could fit at all first:
571          */
572         if (bytes > XFS_LITINO(mp))
573                 return 0;
574
575         /* rounded down */
576         offset = (XFS_LITINO(mp) - bytes) >> 3;
577
578         if (dp->i_df.if_format == XFS_DINODE_FMT_DEV) {
579                 minforkoff = roundup(sizeof(xfs_dev_t), 8) >> 3;
580                 return (offset >= minforkoff) ? minforkoff : 0;
581         }
582
583         /*
584          * If the requested numbers of bytes is smaller or equal to the
585          * current attribute fork size we can always proceed.
586          *
587          * Note that if_bytes in the data fork might actually be larger than
588          * the current data fork size is due to delalloc extents. In that
589          * case either the extent count will go down when they are converted
590          * to real extents, or the delalloc conversion will take care of the
591          * literal area rebalancing.
592          */
593         if (bytes <= XFS_IFORK_ASIZE(dp))
594                 return dp->i_forkoff;
595
596         /*
597          * For attr2 we can try to move the forkoff if there is space in the
598          * literal area, but for the old format we are done if there is no
599          * space in the fixed attribute fork.
600          */
601         if (!xfs_has_attr2(mp))
602                 return 0;
603
604         dsize = dp->i_df.if_bytes;
605
606         switch (dp->i_df.if_format) {
607         case XFS_DINODE_FMT_EXTENTS:
608                 /*
609                  * If there is no attr fork and the data fork is extents,
610                  * determine if creating the default attr fork will result
611                  * in the extents form migrating to btree. If so, the
612                  * minimum offset only needs to be the space required for
613                  * the btree root.
614                  */
615                 if (!dp->i_forkoff && dp->i_df.if_bytes >
616                     xfs_default_attroffset(dp))
617                         dsize = XFS_BMDR_SPACE_CALC(MINDBTPTRS);
618                 break;
619         case XFS_DINODE_FMT_BTREE:
620                 /*
621                  * If we have a data btree then keep forkoff if we have one,
622                  * otherwise we are adding a new attr, so then we set
623                  * minforkoff to where the btree root can finish so we have
624                  * plenty of room for attrs
625                  */
626                 if (dp->i_forkoff) {
627                         if (offset < dp->i_forkoff)
628                                 return 0;
629                         return dp->i_forkoff;
630                 }
631                 dsize = XFS_BMAP_BROOT_SPACE(mp, dp->i_df.if_broot);
632                 break;
633         }
634
635         /*
636          * A data fork btree root must have space for at least
637          * MINDBTPTRS key/ptr pairs if the data fork is small or empty.
638          */
639         minforkoff = max_t(int64_t, dsize, XFS_BMDR_SPACE_CALC(MINDBTPTRS));
640         minforkoff = roundup(minforkoff, 8) >> 3;
641
642         /* attr fork btree root can have at least this many key/ptr pairs */
643         maxforkoff = XFS_LITINO(mp) - XFS_BMDR_SPACE_CALC(MINABTPTRS);
644         maxforkoff = maxforkoff >> 3;   /* rounded down */
645
646         if (offset >= maxforkoff)
647                 return maxforkoff;
648         if (offset >= minforkoff)
649                 return offset;
650         return 0;
651 }
652
653 /*
654  * Switch on the ATTR2 superblock bit (implies also FEATURES2) unless:
655  * - noattr2 mount option is set,
656  * - on-disk version bit says it is already set, or
657  * - the attr2 mount option is not set to enable automatic upgrade from attr1.
658  */
659 STATIC void
660 xfs_sbversion_add_attr2(
661         struct xfs_mount        *mp,
662         struct xfs_trans        *tp)
663 {
664         if (xfs_has_noattr2(mp))
665                 return;
666         if (mp->m_sb.sb_features2 & XFS_SB_VERSION2_ATTR2BIT)
667                 return;
668         if (!xfs_has_attr2(mp))
669                 return;
670
671         spin_lock(&mp->m_sb_lock);
672         xfs_add_attr2(mp);
673         spin_unlock(&mp->m_sb_lock);
674         xfs_log_sb(tp);
675 }
676
677 /*
678  * Create the initial contents of a shortform attribute list.
679  */
680 void
681 xfs_attr_shortform_create(
682         struct xfs_da_args      *args)
683 {
684         struct xfs_inode        *dp = args->dp;
685         struct xfs_ifork        *ifp = dp->i_afp;
686         struct xfs_attr_sf_hdr  *hdr;
687
688         trace_xfs_attr_sf_create(args);
689
690         ASSERT(ifp->if_bytes == 0);
691         if (ifp->if_format == XFS_DINODE_FMT_EXTENTS)
692                 ifp->if_format = XFS_DINODE_FMT_LOCAL;
693         xfs_idata_realloc(dp, sizeof(*hdr), XFS_ATTR_FORK);
694         hdr = (struct xfs_attr_sf_hdr *)ifp->if_u1.if_data;
695         memset(hdr, 0, sizeof(*hdr));
696         hdr->totsize = cpu_to_be16(sizeof(*hdr));
697         xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA);
698 }
699
700 /*
701  * Return -EEXIST if attr is found, or -ENOATTR if not
702  * args:  args containing attribute name and namelen
703  * sfep:  If not null, pointer will be set to the last attr entry found on
704           -EEXIST.  On -ENOATTR pointer is left at the last entry in the list
705  * basep: If not null, pointer is set to the byte offset of the entry in the
706  *        list on -EEXIST.  On -ENOATTR, pointer is left at the byte offset of
707  *        the last entry in the list
708  */
709 int
710 xfs_attr_sf_findname(
711         struct xfs_da_args       *args,
712         struct xfs_attr_sf_entry **sfep,
713         unsigned int             *basep)
714 {
715         struct xfs_attr_shortform *sf;
716         struct xfs_attr_sf_entry *sfe;
717         unsigned int            base = sizeof(struct xfs_attr_sf_hdr);
718         int                     size = 0;
719         int                     end;
720         int                     i;
721
722         sf = (struct xfs_attr_shortform *)args->dp->i_afp->if_u1.if_data;
723         sfe = &sf->list[0];
724         end = sf->hdr.count;
725         for (i = 0; i < end; sfe = xfs_attr_sf_nextentry(sfe),
726                              base += size, i++) {
727                 size = xfs_attr_sf_entsize(sfe);
728                 if (!xfs_attr_match(args, sfe->namelen, sfe->nameval,
729                                     sfe->flags))
730                         continue;
731                 break;
732         }
733
734         if (sfep != NULL)
735                 *sfep = sfe;
736
737         if (basep != NULL)
738                 *basep = base;
739
740         if (i == end)
741                 return -ENOATTR;
742         return -EEXIST;
743 }
744
745 /*
746  * Add a name/value pair to the shortform attribute list.
747  * Overflow from the inode has already been checked for.
748  */
749 void
750 xfs_attr_shortform_add(
751         struct xfs_da_args              *args,
752         int                             forkoff)
753 {
754         struct xfs_attr_shortform       *sf;
755         struct xfs_attr_sf_entry        *sfe;
756         int                             offset, size;
757         struct xfs_mount                *mp;
758         struct xfs_inode                *dp;
759         struct xfs_ifork                *ifp;
760
761         trace_xfs_attr_sf_add(args);
762
763         dp = args->dp;
764         mp = dp->i_mount;
765         dp->i_forkoff = forkoff;
766
767         ifp = dp->i_afp;
768         ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
769         sf = (struct xfs_attr_shortform *)ifp->if_u1.if_data;
770         if (xfs_attr_sf_findname(args, &sfe, NULL) == -EEXIST)
771                 ASSERT(0);
772
773         offset = (char *)sfe - (char *)sf;
774         size = xfs_attr_sf_entsize_byname(args->namelen, args->valuelen);
775         xfs_idata_realloc(dp, size, XFS_ATTR_FORK);
776         sf = (struct xfs_attr_shortform *)ifp->if_u1.if_data;
777         sfe = (struct xfs_attr_sf_entry *)((char *)sf + offset);
778
779         sfe->namelen = args->namelen;
780         sfe->valuelen = args->valuelen;
781         sfe->flags = args->attr_filter;
782         memcpy(sfe->nameval, args->name, args->namelen);
783         memcpy(&sfe->nameval[args->namelen], args->value, args->valuelen);
784         sf->hdr.count++;
785         be16_add_cpu(&sf->hdr.totsize, size);
786         xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_ADATA);
787
788         xfs_sbversion_add_attr2(mp, args->trans);
789 }
790
791 /*
792  * After the last attribute is removed revert to original inode format,
793  * making all literal area available to the data fork once more.
794  */
795 void
796 xfs_attr_fork_remove(
797         struct xfs_inode        *ip,
798         struct xfs_trans        *tp)
799 {
800         ASSERT(ip->i_afp->if_nextents == 0);
801
802         xfs_idestroy_fork(ip->i_afp);
803         kmem_cache_free(xfs_ifork_cache, ip->i_afp);
804         ip->i_afp = NULL;
805         ip->i_forkoff = 0;
806         xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
807 }
808
809 /*
810  * Remove an attribute from the shortform attribute list structure.
811  */
812 int
813 xfs_attr_sf_removename(
814         struct xfs_da_args              *args)
815 {
816         struct xfs_attr_shortform       *sf;
817         struct xfs_attr_sf_entry        *sfe;
818         int                             size = 0, end, totsize;
819         unsigned int                    base;
820         struct xfs_mount                *mp;
821         struct xfs_inode                *dp;
822         int                             error;
823
824         trace_xfs_attr_sf_remove(args);
825
826         dp = args->dp;
827         mp = dp->i_mount;
828         sf = (struct xfs_attr_shortform *)dp->i_afp->if_u1.if_data;
829
830         error = xfs_attr_sf_findname(args, &sfe, &base);
831
832         /*
833          * If we are recovering an operation, finding nothing to
834          * remove is not an error - it just means there was nothing
835          * to clean up.
836          */
837         if (error == -ENOATTR && (args->op_flags & XFS_DA_OP_RECOVERY))
838                 return 0;
839         if (error != -EEXIST)
840                 return error;
841         size = xfs_attr_sf_entsize(sfe);
842
843         /*
844          * Fix up the attribute fork data, covering the hole
845          */
846         end = base + size;
847         totsize = be16_to_cpu(sf->hdr.totsize);
848         if (end != totsize)
849                 memmove(&((char *)sf)[base], &((char *)sf)[end], totsize - end);
850         sf->hdr.count--;
851         be16_add_cpu(&sf->hdr.totsize, -size);
852
853         /*
854          * Fix up the start offset of the attribute fork
855          */
856         totsize -= size;
857         if (totsize == sizeof(xfs_attr_sf_hdr_t) && xfs_has_attr2(mp) &&
858             (dp->i_df.if_format != XFS_DINODE_FMT_BTREE) &&
859             !(args->op_flags & (XFS_DA_OP_ADDNAME | XFS_DA_OP_REPLACE))) {
860                 xfs_attr_fork_remove(dp, args->trans);
861         } else {
862                 xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
863                 dp->i_forkoff = xfs_attr_shortform_bytesfit(dp, totsize);
864                 ASSERT(dp->i_forkoff);
865                 ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) ||
866                                 (args->op_flags & XFS_DA_OP_ADDNAME) ||
867                                 !xfs_has_attr2(mp) ||
868                                 dp->i_df.if_format == XFS_DINODE_FMT_BTREE);
869                 xfs_trans_log_inode(args->trans, dp,
870                                         XFS_ILOG_CORE | XFS_ILOG_ADATA);
871         }
872
873         xfs_sbversion_add_attr2(mp, args->trans);
874
875         return 0;
876 }
877
878 /*
879  * Look up a name in a shortform attribute list structure.
880  */
881 /*ARGSUSED*/
882 int
883 xfs_attr_shortform_lookup(xfs_da_args_t *args)
884 {
885         struct xfs_attr_shortform *sf;
886         struct xfs_attr_sf_entry *sfe;
887         int i;
888         struct xfs_ifork *ifp;
889
890         trace_xfs_attr_sf_lookup(args);
891
892         ifp = args->dp->i_afp;
893         ASSERT(ifp->if_format == XFS_DINODE_FMT_LOCAL);
894         sf = (struct xfs_attr_shortform *)ifp->if_u1.if_data;
895         sfe = &sf->list[0];
896         for (i = 0; i < sf->hdr.count;
897                                 sfe = xfs_attr_sf_nextentry(sfe), i++) {
898                 if (xfs_attr_match(args, sfe->namelen, sfe->nameval,
899                                 sfe->flags))
900                         return -EEXIST;
901         }
902         return -ENOATTR;
903 }
904
905 /*
906  * Retrieve the attribute value and length.
907  *
908  * If args->valuelen is zero, only the length needs to be returned.  Unlike a
909  * lookup, we only return an error if the attribute does not exist or we can't
910  * retrieve the value.
911  */
912 int
913 xfs_attr_shortform_getvalue(
914         struct xfs_da_args      *args)
915 {
916         struct xfs_attr_shortform *sf;
917         struct xfs_attr_sf_entry *sfe;
918         int                     i;
919
920         ASSERT(args->dp->i_afp->if_format == XFS_DINODE_FMT_LOCAL);
921         sf = (struct xfs_attr_shortform *)args->dp->i_afp->if_u1.if_data;
922         sfe = &sf->list[0];
923         for (i = 0; i < sf->hdr.count;
924                                 sfe = xfs_attr_sf_nextentry(sfe), i++) {
925                 if (xfs_attr_match(args, sfe->namelen, sfe->nameval,
926                                 sfe->flags))
927                         return xfs_attr_copy_value(args,
928                                 &sfe->nameval[args->namelen], sfe->valuelen);
929         }
930         return -ENOATTR;
931 }
932
933 /* Convert from using the shortform to the leaf format. */
934 int
935 xfs_attr_shortform_to_leaf(
936         struct xfs_da_args              *args)
937 {
938         struct xfs_inode                *dp;
939         struct xfs_attr_shortform       *sf;
940         struct xfs_attr_sf_entry        *sfe;
941         struct xfs_da_args              nargs;
942         char                            *tmpbuffer;
943         int                             error, i, size;
944         xfs_dablk_t                     blkno;
945         struct xfs_buf                  *bp;
946         struct xfs_ifork                *ifp;
947
948         trace_xfs_attr_sf_to_leaf(args);
949
950         dp = args->dp;
951         ifp = dp->i_afp;
952         sf = (struct xfs_attr_shortform *)ifp->if_u1.if_data;
953         size = be16_to_cpu(sf->hdr.totsize);
954         tmpbuffer = kmem_alloc(size, 0);
955         ASSERT(tmpbuffer != NULL);
956         memcpy(tmpbuffer, ifp->if_u1.if_data, size);
957         sf = (struct xfs_attr_shortform *)tmpbuffer;
958
959         xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
960         xfs_bmap_local_to_extents_empty(args->trans, dp, XFS_ATTR_FORK);
961
962         bp = NULL;
963         error = xfs_da_grow_inode(args, &blkno);
964         if (error)
965                 goto out;
966
967         ASSERT(blkno == 0);
968         error = xfs_attr3_leaf_create(args, blkno, &bp);
969         if (error)
970                 goto out;
971
972         memset((char *)&nargs, 0, sizeof(nargs));
973         nargs.dp = dp;
974         nargs.geo = args->geo;
975         nargs.total = args->total;
976         nargs.whichfork = XFS_ATTR_FORK;
977         nargs.trans = args->trans;
978         nargs.op_flags = XFS_DA_OP_OKNOENT;
979
980         sfe = &sf->list[0];
981         for (i = 0; i < sf->hdr.count; i++) {
982                 nargs.name = sfe->nameval;
983                 nargs.namelen = sfe->namelen;
984                 nargs.value = &sfe->nameval[nargs.namelen];
985                 nargs.valuelen = sfe->valuelen;
986                 nargs.hashval = xfs_da_hashname(sfe->nameval,
987                                                 sfe->namelen);
988                 nargs.attr_filter = sfe->flags & XFS_ATTR_NSP_ONDISK_MASK;
989                 error = xfs_attr3_leaf_lookup_int(bp, &nargs); /* set a->index */
990                 ASSERT(error == -ENOATTR);
991                 error = xfs_attr3_leaf_add(bp, &nargs);
992                 ASSERT(error != -ENOSPC);
993                 if (error)
994                         goto out;
995                 sfe = xfs_attr_sf_nextentry(sfe);
996         }
997         error = 0;
998 out:
999         kmem_free(tmpbuffer);
1000         return error;
1001 }
1002
1003 /*
1004  * Check a leaf attribute block to see if all the entries would fit into
1005  * a shortform attribute list.
1006  */
1007 int
1008 xfs_attr_shortform_allfit(
1009         struct xfs_buf          *bp,
1010         struct xfs_inode        *dp)
1011 {
1012         struct xfs_attr_leafblock *leaf;
1013         struct xfs_attr_leaf_entry *entry;
1014         xfs_attr_leaf_name_local_t *name_loc;
1015         struct xfs_attr3_icleaf_hdr leafhdr;
1016         int                     bytes;
1017         int                     i;
1018         struct xfs_mount        *mp = bp->b_mount;
1019
1020         leaf = bp->b_addr;
1021         xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &leafhdr, leaf);
1022         entry = xfs_attr3_leaf_entryp(leaf);
1023
1024         bytes = sizeof(struct xfs_attr_sf_hdr);
1025         for (i = 0; i < leafhdr.count; entry++, i++) {
1026                 if (entry->flags & XFS_ATTR_INCOMPLETE)
1027                         continue;               /* don't copy partial entries */
1028                 if (!(entry->flags & XFS_ATTR_LOCAL))
1029                         return 0;
1030                 name_loc = xfs_attr3_leaf_name_local(leaf, i);
1031                 if (name_loc->namelen >= XFS_ATTR_SF_ENTSIZE_MAX)
1032                         return 0;
1033                 if (be16_to_cpu(name_loc->valuelen) >= XFS_ATTR_SF_ENTSIZE_MAX)
1034                         return 0;
1035                 bytes += xfs_attr_sf_entsize_byname(name_loc->namelen,
1036                                         be16_to_cpu(name_loc->valuelen));
1037         }
1038         if (xfs_has_attr2(dp->i_mount) &&
1039             (dp->i_df.if_format != XFS_DINODE_FMT_BTREE) &&
1040             (bytes == sizeof(struct xfs_attr_sf_hdr)))
1041                 return -1;
1042         return xfs_attr_shortform_bytesfit(dp, bytes);
1043 }
1044
1045 /* Verify the consistency of an inline attribute fork. */
1046 xfs_failaddr_t
1047 xfs_attr_shortform_verify(
1048         struct xfs_inode                *ip)
1049 {
1050         struct xfs_attr_shortform       *sfp;
1051         struct xfs_attr_sf_entry        *sfep;
1052         struct xfs_attr_sf_entry        *next_sfep;
1053         char                            *endp;
1054         struct xfs_ifork                *ifp;
1055         int                             i;
1056         int64_t                         size;
1057
1058         ASSERT(ip->i_afp->if_format == XFS_DINODE_FMT_LOCAL);
1059         ifp = XFS_IFORK_PTR(ip, XFS_ATTR_FORK);
1060         sfp = (struct xfs_attr_shortform *)ifp->if_u1.if_data;
1061         size = ifp->if_bytes;
1062
1063         /*
1064          * Give up if the attribute is way too short.
1065          */
1066         if (size < sizeof(struct xfs_attr_sf_hdr))
1067                 return __this_address;
1068
1069         endp = (char *)sfp + size;
1070
1071         /* Check all reported entries */
1072         sfep = &sfp->list[0];
1073         for (i = 0; i < sfp->hdr.count; i++) {
1074                 /*
1075                  * struct xfs_attr_sf_entry has a variable length.
1076                  * Check the fixed-offset parts of the structure are
1077                  * within the data buffer.
1078                  * xfs_attr_sf_entry is defined with a 1-byte variable
1079                  * array at the end, so we must subtract that off.
1080                  */
1081                 if (((char *)sfep + sizeof(*sfep)) >= endp)
1082                         return __this_address;
1083
1084                 /* Don't allow names with known bad length. */
1085                 if (sfep->namelen == 0)
1086                         return __this_address;
1087
1088                 /*
1089                  * Check that the variable-length part of the structure is
1090                  * within the data buffer.  The next entry starts after the
1091                  * name component, so nextentry is an acceptable test.
1092                  */
1093                 next_sfep = xfs_attr_sf_nextentry(sfep);
1094                 if ((char *)next_sfep > endp)
1095                         return __this_address;
1096
1097                 /*
1098                  * Check for unknown flags.  Short form doesn't support
1099                  * the incomplete or local bits, so we can use the namespace
1100                  * mask here.
1101                  */
1102                 if (sfep->flags & ~XFS_ATTR_NSP_ONDISK_MASK)
1103                         return __this_address;
1104
1105                 /*
1106                  * Check for invalid namespace combinations.  We only allow
1107                  * one namespace flag per xattr, so we can just count the
1108                  * bits (i.e. hweight) here.
1109                  */
1110                 if (hweight8(sfep->flags & XFS_ATTR_NSP_ONDISK_MASK) > 1)
1111                         return __this_address;
1112
1113                 sfep = next_sfep;
1114         }
1115         if ((void *)sfep != (void *)endp)
1116                 return __this_address;
1117
1118         return NULL;
1119 }
1120
1121 /*
1122  * Convert a leaf attribute list to shortform attribute list
1123  */
1124 int
1125 xfs_attr3_leaf_to_shortform(
1126         struct xfs_buf          *bp,
1127         struct xfs_da_args      *args,
1128         int                     forkoff)
1129 {
1130         struct xfs_attr_leafblock *leaf;
1131         struct xfs_attr3_icleaf_hdr ichdr;
1132         struct xfs_attr_leaf_entry *entry;
1133         struct xfs_attr_leaf_name_local *name_loc;
1134         struct xfs_da_args      nargs;
1135         struct xfs_inode        *dp = args->dp;
1136         char                    *tmpbuffer;
1137         int                     error;
1138         int                     i;
1139
1140         trace_xfs_attr_leaf_to_sf(args);
1141
1142         tmpbuffer = kmem_alloc(args->geo->blksize, 0);
1143         if (!tmpbuffer)
1144                 return -ENOMEM;
1145
1146         memcpy(tmpbuffer, bp->b_addr, args->geo->blksize);
1147
1148         leaf = (xfs_attr_leafblock_t *)tmpbuffer;
1149         xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
1150         entry = xfs_attr3_leaf_entryp(leaf);
1151
1152         /* XXX (dgc): buffer is about to be marked stale - why zero it? */
1153         memset(bp->b_addr, 0, args->geo->blksize);
1154
1155         /*
1156          * Clean out the prior contents of the attribute list.
1157          */
1158         error = xfs_da_shrink_inode(args, 0, bp);
1159         if (error)
1160                 goto out;
1161
1162         if (forkoff == -1) {
1163                 /*
1164                  * Don't remove the attr fork if this operation is the first
1165                  * part of a attr replace operations. We're going to add a new
1166                  * attr immediately, so we need to keep the attr fork around in
1167                  * this case.
1168                  */
1169                 if (!(args->op_flags & XFS_DA_OP_REPLACE)) {
1170                         ASSERT(xfs_has_attr2(dp->i_mount));
1171                         ASSERT(dp->i_df.if_format != XFS_DINODE_FMT_BTREE);
1172                         xfs_attr_fork_remove(dp, args->trans);
1173                 }
1174                 goto out;
1175         }
1176
1177         xfs_attr_shortform_create(args);
1178
1179         /*
1180          * Copy the attributes
1181          */
1182         memset((char *)&nargs, 0, sizeof(nargs));
1183         nargs.geo = args->geo;
1184         nargs.dp = dp;
1185         nargs.total = args->total;
1186         nargs.whichfork = XFS_ATTR_FORK;
1187         nargs.trans = args->trans;
1188         nargs.op_flags = XFS_DA_OP_OKNOENT;
1189
1190         for (i = 0; i < ichdr.count; entry++, i++) {
1191                 if (entry->flags & XFS_ATTR_INCOMPLETE)
1192                         continue;       /* don't copy partial entries */
1193                 if (!entry->nameidx)
1194                         continue;
1195                 ASSERT(entry->flags & XFS_ATTR_LOCAL);
1196                 name_loc = xfs_attr3_leaf_name_local(leaf, i);
1197                 nargs.name = name_loc->nameval;
1198                 nargs.namelen = name_loc->namelen;
1199                 nargs.value = &name_loc->nameval[nargs.namelen];
1200                 nargs.valuelen = be16_to_cpu(name_loc->valuelen);
1201                 nargs.hashval = be32_to_cpu(entry->hashval);
1202                 nargs.attr_filter = entry->flags & XFS_ATTR_NSP_ONDISK_MASK;
1203                 xfs_attr_shortform_add(&nargs, forkoff);
1204         }
1205         error = 0;
1206
1207 out:
1208         kmem_free(tmpbuffer);
1209         return error;
1210 }
1211
1212 /*
1213  * Convert from using a single leaf to a root node and a leaf.
1214  */
1215 int
1216 xfs_attr3_leaf_to_node(
1217         struct xfs_da_args      *args)
1218 {
1219         struct xfs_attr_leafblock *leaf;
1220         struct xfs_attr3_icleaf_hdr icleafhdr;
1221         struct xfs_attr_leaf_entry *entries;
1222         struct xfs_da3_icnode_hdr icnodehdr;
1223         struct xfs_da_intnode   *node;
1224         struct xfs_inode        *dp = args->dp;
1225         struct xfs_mount        *mp = dp->i_mount;
1226         struct xfs_buf          *bp1 = NULL;
1227         struct xfs_buf          *bp2 = NULL;
1228         xfs_dablk_t             blkno;
1229         int                     error;
1230
1231         trace_xfs_attr_leaf_to_node(args);
1232
1233         if (XFS_TEST_ERROR(false, mp, XFS_ERRTAG_ATTR_LEAF_TO_NODE)) {
1234                 error = -EIO;
1235                 goto out;
1236         }
1237
1238         error = xfs_da_grow_inode(args, &blkno);
1239         if (error)
1240                 goto out;
1241         error = xfs_attr3_leaf_read(args->trans, dp, 0, &bp1);
1242         if (error)
1243                 goto out;
1244
1245         error = xfs_da_get_buf(args->trans, dp, blkno, &bp2, XFS_ATTR_FORK);
1246         if (error)
1247                 goto out;
1248
1249         /* copy leaf to new buffer, update identifiers */
1250         xfs_trans_buf_set_type(args->trans, bp2, XFS_BLFT_ATTR_LEAF_BUF);
1251         bp2->b_ops = bp1->b_ops;
1252         memcpy(bp2->b_addr, bp1->b_addr, args->geo->blksize);
1253         if (xfs_has_crc(mp)) {
1254                 struct xfs_da3_blkinfo *hdr3 = bp2->b_addr;
1255                 hdr3->blkno = cpu_to_be64(xfs_buf_daddr(bp2));
1256         }
1257         xfs_trans_log_buf(args->trans, bp2, 0, args->geo->blksize - 1);
1258
1259         /*
1260          * Set up the new root node.
1261          */
1262         error = xfs_da3_node_create(args, 0, 1, &bp1, XFS_ATTR_FORK);
1263         if (error)
1264                 goto out;
1265         node = bp1->b_addr;
1266         xfs_da3_node_hdr_from_disk(mp, &icnodehdr, node);
1267
1268         leaf = bp2->b_addr;
1269         xfs_attr3_leaf_hdr_from_disk(args->geo, &icleafhdr, leaf);
1270         entries = xfs_attr3_leaf_entryp(leaf);
1271
1272         /* both on-disk, don't endian-flip twice */
1273         icnodehdr.btree[0].hashval = entries[icleafhdr.count - 1].hashval;
1274         icnodehdr.btree[0].before = cpu_to_be32(blkno);
1275         icnodehdr.count = 1;
1276         xfs_da3_node_hdr_to_disk(dp->i_mount, node, &icnodehdr);
1277         xfs_trans_log_buf(args->trans, bp1, 0, args->geo->blksize - 1);
1278         error = 0;
1279 out:
1280         return error;
1281 }
1282
1283 /*========================================================================
1284  * Routines used for growing the Btree.
1285  *========================================================================*/
1286
1287 /*
1288  * Create the initial contents of a leaf attribute list
1289  * or a leaf in a node attribute list.
1290  */
1291 STATIC int
1292 xfs_attr3_leaf_create(
1293         struct xfs_da_args      *args,
1294         xfs_dablk_t             blkno,
1295         struct xfs_buf          **bpp)
1296 {
1297         struct xfs_attr_leafblock *leaf;
1298         struct xfs_attr3_icleaf_hdr ichdr;
1299         struct xfs_inode        *dp = args->dp;
1300         struct xfs_mount        *mp = dp->i_mount;
1301         struct xfs_buf          *bp;
1302         int                     error;
1303
1304         trace_xfs_attr_leaf_create(args);
1305
1306         error = xfs_da_get_buf(args->trans, args->dp, blkno, &bp,
1307                                             XFS_ATTR_FORK);
1308         if (error)
1309                 return error;
1310         bp->b_ops = &xfs_attr3_leaf_buf_ops;
1311         xfs_trans_buf_set_type(args->trans, bp, XFS_BLFT_ATTR_LEAF_BUF);
1312         leaf = bp->b_addr;
1313         memset(leaf, 0, args->geo->blksize);
1314
1315         memset(&ichdr, 0, sizeof(ichdr));
1316         ichdr.firstused = args->geo->blksize;
1317
1318         if (xfs_has_crc(mp)) {
1319                 struct xfs_da3_blkinfo *hdr3 = bp->b_addr;
1320
1321                 ichdr.magic = XFS_ATTR3_LEAF_MAGIC;
1322
1323                 hdr3->blkno = cpu_to_be64(xfs_buf_daddr(bp));
1324                 hdr3->owner = cpu_to_be64(dp->i_ino);
1325                 uuid_copy(&hdr3->uuid, &mp->m_sb.sb_meta_uuid);
1326
1327                 ichdr.freemap[0].base = sizeof(struct xfs_attr3_leaf_hdr);
1328         } else {
1329                 ichdr.magic = XFS_ATTR_LEAF_MAGIC;
1330                 ichdr.freemap[0].base = sizeof(struct xfs_attr_leaf_hdr);
1331         }
1332         ichdr.freemap[0].size = ichdr.firstused - ichdr.freemap[0].base;
1333
1334         xfs_attr3_leaf_hdr_to_disk(args->geo, leaf, &ichdr);
1335         xfs_trans_log_buf(args->trans, bp, 0, args->geo->blksize - 1);
1336
1337         *bpp = bp;
1338         return 0;
1339 }
1340
1341 /*
1342  * Split the leaf node, rebalance, then add the new entry.
1343  */
1344 int
1345 xfs_attr3_leaf_split(
1346         struct xfs_da_state     *state,
1347         struct xfs_da_state_blk *oldblk,
1348         struct xfs_da_state_blk *newblk)
1349 {
1350         xfs_dablk_t blkno;
1351         int error;
1352
1353         trace_xfs_attr_leaf_split(state->args);
1354
1355         /*
1356          * Allocate space for a new leaf node.
1357          */
1358         ASSERT(oldblk->magic == XFS_ATTR_LEAF_MAGIC);
1359         error = xfs_da_grow_inode(state->args, &blkno);
1360         if (error)
1361                 return error;
1362         error = xfs_attr3_leaf_create(state->args, blkno, &newblk->bp);
1363         if (error)
1364                 return error;
1365         newblk->blkno = blkno;
1366         newblk->magic = XFS_ATTR_LEAF_MAGIC;
1367
1368         /*
1369          * Rebalance the entries across the two leaves.
1370          * NOTE: rebalance() currently depends on the 2nd block being empty.
1371          */
1372         xfs_attr3_leaf_rebalance(state, oldblk, newblk);
1373         error = xfs_da3_blk_link(state, oldblk, newblk);
1374         if (error)
1375                 return error;
1376
1377         /*
1378          * Save info on "old" attribute for "atomic rename" ops, leaf_add()
1379          * modifies the index/blkno/rmtblk/rmtblkcnt fields to show the
1380          * "new" attrs info.  Will need the "old" info to remove it later.
1381          *
1382          * Insert the "new" entry in the correct block.
1383          */
1384         if (state->inleaf) {
1385                 trace_xfs_attr_leaf_add_old(state->args);
1386                 error = xfs_attr3_leaf_add(oldblk->bp, state->args);
1387         } else {
1388                 trace_xfs_attr_leaf_add_new(state->args);
1389                 error = xfs_attr3_leaf_add(newblk->bp, state->args);
1390         }
1391
1392         /*
1393          * Update last hashval in each block since we added the name.
1394          */
1395         oldblk->hashval = xfs_attr_leaf_lasthash(oldblk->bp, NULL);
1396         newblk->hashval = xfs_attr_leaf_lasthash(newblk->bp, NULL);
1397         return error;
1398 }
1399
1400 /*
1401  * Add a name to the leaf attribute list structure.
1402  */
1403 int
1404 xfs_attr3_leaf_add(
1405         struct xfs_buf          *bp,
1406         struct xfs_da_args      *args)
1407 {
1408         struct xfs_attr_leafblock *leaf;
1409         struct xfs_attr3_icleaf_hdr ichdr;
1410         int                     tablesize;
1411         int                     entsize;
1412         int                     sum;
1413         int                     tmp;
1414         int                     i;
1415
1416         trace_xfs_attr_leaf_add(args);
1417
1418         leaf = bp->b_addr;
1419         xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
1420         ASSERT(args->index >= 0 && args->index <= ichdr.count);
1421         entsize = xfs_attr_leaf_newentsize(args, NULL);
1422
1423         /*
1424          * Search through freemap for first-fit on new name length.
1425          * (may need to figure in size of entry struct too)
1426          */
1427         tablesize = (ichdr.count + 1) * sizeof(xfs_attr_leaf_entry_t)
1428                                         + xfs_attr3_leaf_hdr_size(leaf);
1429         for (sum = 0, i = XFS_ATTR_LEAF_MAPSIZE - 1; i >= 0; i--) {
1430                 if (tablesize > ichdr.firstused) {
1431                         sum += ichdr.freemap[i].size;
1432                         continue;
1433                 }
1434                 if (!ichdr.freemap[i].size)
1435                         continue;       /* no space in this map */
1436                 tmp = entsize;
1437                 if (ichdr.freemap[i].base < ichdr.firstused)
1438                         tmp += sizeof(xfs_attr_leaf_entry_t);
1439                 if (ichdr.freemap[i].size >= tmp) {
1440                         tmp = xfs_attr3_leaf_add_work(bp, &ichdr, args, i);
1441                         goto out_log_hdr;
1442                 }
1443                 sum += ichdr.freemap[i].size;
1444         }
1445
1446         /*
1447          * If there are no holes in the address space of the block,
1448          * and we don't have enough freespace, then compaction will do us
1449          * no good and we should just give up.
1450          */
1451         if (!ichdr.holes && sum < entsize)
1452                 return -ENOSPC;
1453
1454         /*
1455          * Compact the entries to coalesce free space.
1456          * This may change the hdr->count via dropping INCOMPLETE entries.
1457          */
1458         xfs_attr3_leaf_compact(args, &ichdr, bp);
1459
1460         /*
1461          * After compaction, the block is guaranteed to have only one
1462          * free region, in freemap[0].  If it is not big enough, give up.
1463          */
1464         if (ichdr.freemap[0].size < (entsize + sizeof(xfs_attr_leaf_entry_t))) {
1465                 tmp = -ENOSPC;
1466                 goto out_log_hdr;
1467         }
1468
1469         tmp = xfs_attr3_leaf_add_work(bp, &ichdr, args, 0);
1470
1471 out_log_hdr:
1472         xfs_attr3_leaf_hdr_to_disk(args->geo, leaf, &ichdr);
1473         xfs_trans_log_buf(args->trans, bp,
1474                 XFS_DA_LOGRANGE(leaf, &leaf->hdr,
1475                                 xfs_attr3_leaf_hdr_size(leaf)));
1476         return tmp;
1477 }
1478
1479 /*
1480  * Add a name to a leaf attribute list structure.
1481  */
1482 STATIC int
1483 xfs_attr3_leaf_add_work(
1484         struct xfs_buf          *bp,
1485         struct xfs_attr3_icleaf_hdr *ichdr,
1486         struct xfs_da_args      *args,
1487         int                     mapindex)
1488 {
1489         struct xfs_attr_leafblock *leaf;
1490         struct xfs_attr_leaf_entry *entry;
1491         struct xfs_attr_leaf_name_local *name_loc;
1492         struct xfs_attr_leaf_name_remote *name_rmt;
1493         struct xfs_mount        *mp;
1494         int                     tmp;
1495         int                     i;
1496
1497         trace_xfs_attr_leaf_add_work(args);
1498
1499         leaf = bp->b_addr;
1500         ASSERT(mapindex >= 0 && mapindex < XFS_ATTR_LEAF_MAPSIZE);
1501         ASSERT(args->index >= 0 && args->index <= ichdr->count);
1502
1503         /*
1504          * Force open some space in the entry array and fill it in.
1505          */
1506         entry = &xfs_attr3_leaf_entryp(leaf)[args->index];
1507         if (args->index < ichdr->count) {
1508                 tmp  = ichdr->count - args->index;
1509                 tmp *= sizeof(xfs_attr_leaf_entry_t);
1510                 memmove(entry + 1, entry, tmp);
1511                 xfs_trans_log_buf(args->trans, bp,
1512                     XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(*entry)));
1513         }
1514         ichdr->count++;
1515
1516         /*
1517          * Allocate space for the new string (at the end of the run).
1518          */
1519         mp = args->trans->t_mountp;
1520         ASSERT(ichdr->freemap[mapindex].base < args->geo->blksize);
1521         ASSERT((ichdr->freemap[mapindex].base & 0x3) == 0);
1522         ASSERT(ichdr->freemap[mapindex].size >=
1523                 xfs_attr_leaf_newentsize(args, NULL));
1524         ASSERT(ichdr->freemap[mapindex].size < args->geo->blksize);
1525         ASSERT((ichdr->freemap[mapindex].size & 0x3) == 0);
1526
1527         ichdr->freemap[mapindex].size -= xfs_attr_leaf_newentsize(args, &tmp);
1528
1529         entry->nameidx = cpu_to_be16(ichdr->freemap[mapindex].base +
1530                                      ichdr->freemap[mapindex].size);
1531         entry->hashval = cpu_to_be32(args->hashval);
1532         entry->flags = args->attr_filter;
1533         if (tmp)
1534                 entry->flags |= XFS_ATTR_LOCAL;
1535         if (args->op_flags & XFS_DA_OP_REPLACE) {
1536                 if (!(args->op_flags & XFS_DA_OP_LOGGED))
1537                         entry->flags |= XFS_ATTR_INCOMPLETE;
1538                 if ((args->blkno2 == args->blkno) &&
1539                     (args->index2 <= args->index)) {
1540                         args->index2++;
1541                 }
1542         }
1543         xfs_trans_log_buf(args->trans, bp,
1544                           XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
1545         ASSERT((args->index == 0) ||
1546                (be32_to_cpu(entry->hashval) >= be32_to_cpu((entry-1)->hashval)));
1547         ASSERT((args->index == ichdr->count - 1) ||
1548                (be32_to_cpu(entry->hashval) <= be32_to_cpu((entry+1)->hashval)));
1549
1550         /*
1551          * For "remote" attribute values, simply note that we need to
1552          * allocate space for the "remote" value.  We can't actually
1553          * allocate the extents in this transaction, and we can't decide
1554          * which blocks they should be as we might allocate more blocks
1555          * as part of this transaction (a split operation for example).
1556          */
1557         if (entry->flags & XFS_ATTR_LOCAL) {
1558                 name_loc = xfs_attr3_leaf_name_local(leaf, args->index);
1559                 name_loc->namelen = args->namelen;
1560                 name_loc->valuelen = cpu_to_be16(args->valuelen);
1561                 memcpy((char *)name_loc->nameval, args->name, args->namelen);
1562                 memcpy((char *)&name_loc->nameval[args->namelen], args->value,
1563                                    be16_to_cpu(name_loc->valuelen));
1564         } else {
1565                 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
1566                 name_rmt->namelen = args->namelen;
1567                 memcpy((char *)name_rmt->name, args->name, args->namelen);
1568                 entry->flags |= XFS_ATTR_INCOMPLETE;
1569                 /* just in case */
1570                 name_rmt->valuelen = 0;
1571                 name_rmt->valueblk = 0;
1572                 args->rmtblkno = 1;
1573                 args->rmtblkcnt = xfs_attr3_rmt_blocks(mp, args->valuelen);
1574                 args->rmtvaluelen = args->valuelen;
1575         }
1576         xfs_trans_log_buf(args->trans, bp,
1577              XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index),
1578                                    xfs_attr_leaf_entsize(leaf, args->index)));
1579
1580         /*
1581          * Update the control info for this leaf node
1582          */
1583         if (be16_to_cpu(entry->nameidx) < ichdr->firstused)
1584                 ichdr->firstused = be16_to_cpu(entry->nameidx);
1585
1586         ASSERT(ichdr->firstused >= ichdr->count * sizeof(xfs_attr_leaf_entry_t)
1587                                         + xfs_attr3_leaf_hdr_size(leaf));
1588         tmp = (ichdr->count - 1) * sizeof(xfs_attr_leaf_entry_t)
1589                                         + xfs_attr3_leaf_hdr_size(leaf);
1590
1591         for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
1592                 if (ichdr->freemap[i].base == tmp) {
1593                         ichdr->freemap[i].base += sizeof(xfs_attr_leaf_entry_t);
1594                         ichdr->freemap[i].size -=
1595                                 min_t(uint16_t, ichdr->freemap[i].size,
1596                                                 sizeof(xfs_attr_leaf_entry_t));
1597                 }
1598         }
1599         ichdr->usedbytes += xfs_attr_leaf_entsize(leaf, args->index);
1600         return 0;
1601 }
1602
1603 /*
1604  * Garbage collect a leaf attribute list block by copying it to a new buffer.
1605  */
1606 STATIC void
1607 xfs_attr3_leaf_compact(
1608         struct xfs_da_args      *args,
1609         struct xfs_attr3_icleaf_hdr *ichdr_dst,
1610         struct xfs_buf          *bp)
1611 {
1612         struct xfs_attr_leafblock *leaf_src;
1613         struct xfs_attr_leafblock *leaf_dst;
1614         struct xfs_attr3_icleaf_hdr ichdr_src;
1615         struct xfs_trans        *trans = args->trans;
1616         char                    *tmpbuffer;
1617
1618         trace_xfs_attr_leaf_compact(args);
1619
1620         tmpbuffer = kmem_alloc(args->geo->blksize, 0);
1621         memcpy(tmpbuffer, bp->b_addr, args->geo->blksize);
1622         memset(bp->b_addr, 0, args->geo->blksize);
1623         leaf_src = (xfs_attr_leafblock_t *)tmpbuffer;
1624         leaf_dst = bp->b_addr;
1625
1626         /*
1627          * Copy the on-disk header back into the destination buffer to ensure
1628          * all the information in the header that is not part of the incore
1629          * header structure is preserved.
1630          */
1631         memcpy(bp->b_addr, tmpbuffer, xfs_attr3_leaf_hdr_size(leaf_src));
1632
1633         /* Initialise the incore headers */
1634         ichdr_src = *ichdr_dst; /* struct copy */
1635         ichdr_dst->firstused = args->geo->blksize;
1636         ichdr_dst->usedbytes = 0;
1637         ichdr_dst->count = 0;
1638         ichdr_dst->holes = 0;
1639         ichdr_dst->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_src);
1640         ichdr_dst->freemap[0].size = ichdr_dst->firstused -
1641                                                 ichdr_dst->freemap[0].base;
1642
1643         /* write the header back to initialise the underlying buffer */
1644         xfs_attr3_leaf_hdr_to_disk(args->geo, leaf_dst, ichdr_dst);
1645
1646         /*
1647          * Copy all entry's in the same (sorted) order,
1648          * but allocate name/value pairs packed and in sequence.
1649          */
1650         xfs_attr3_leaf_moveents(args, leaf_src, &ichdr_src, 0,
1651                                 leaf_dst, ichdr_dst, 0, ichdr_src.count);
1652         /*
1653          * this logs the entire buffer, but the caller must write the header
1654          * back to the buffer when it is finished modifying it.
1655          */
1656         xfs_trans_log_buf(trans, bp, 0, args->geo->blksize - 1);
1657
1658         kmem_free(tmpbuffer);
1659 }
1660
1661 /*
1662  * Compare two leaf blocks "order".
1663  * Return 0 unless leaf2 should go before leaf1.
1664  */
1665 static int
1666 xfs_attr3_leaf_order(
1667         struct xfs_buf  *leaf1_bp,
1668         struct xfs_attr3_icleaf_hdr *leaf1hdr,
1669         struct xfs_buf  *leaf2_bp,
1670         struct xfs_attr3_icleaf_hdr *leaf2hdr)
1671 {
1672         struct xfs_attr_leaf_entry *entries1;
1673         struct xfs_attr_leaf_entry *entries2;
1674
1675         entries1 = xfs_attr3_leaf_entryp(leaf1_bp->b_addr);
1676         entries2 = xfs_attr3_leaf_entryp(leaf2_bp->b_addr);
1677         if (leaf1hdr->count > 0 && leaf2hdr->count > 0 &&
1678             ((be32_to_cpu(entries2[0].hashval) <
1679               be32_to_cpu(entries1[0].hashval)) ||
1680              (be32_to_cpu(entries2[leaf2hdr->count - 1].hashval) <
1681               be32_to_cpu(entries1[leaf1hdr->count - 1].hashval)))) {
1682                 return 1;
1683         }
1684         return 0;
1685 }
1686
1687 int
1688 xfs_attr_leaf_order(
1689         struct xfs_buf  *leaf1_bp,
1690         struct xfs_buf  *leaf2_bp)
1691 {
1692         struct xfs_attr3_icleaf_hdr ichdr1;
1693         struct xfs_attr3_icleaf_hdr ichdr2;
1694         struct xfs_mount *mp = leaf1_bp->b_mount;
1695
1696         xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr1, leaf1_bp->b_addr);
1697         xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr2, leaf2_bp->b_addr);
1698         return xfs_attr3_leaf_order(leaf1_bp, &ichdr1, leaf2_bp, &ichdr2);
1699 }
1700
1701 /*
1702  * Redistribute the attribute list entries between two leaf nodes,
1703  * taking into account the size of the new entry.
1704  *
1705  * NOTE: if new block is empty, then it will get the upper half of the
1706  * old block.  At present, all (one) callers pass in an empty second block.
1707  *
1708  * This code adjusts the args->index/blkno and args->index2/blkno2 fields
1709  * to match what it is doing in splitting the attribute leaf block.  Those
1710  * values are used in "atomic rename" operations on attributes.  Note that
1711  * the "new" and "old" values can end up in different blocks.
1712  */
1713 STATIC void
1714 xfs_attr3_leaf_rebalance(
1715         struct xfs_da_state     *state,
1716         struct xfs_da_state_blk *blk1,
1717         struct xfs_da_state_blk *blk2)
1718 {
1719         struct xfs_da_args      *args;
1720         struct xfs_attr_leafblock *leaf1;
1721         struct xfs_attr_leafblock *leaf2;
1722         struct xfs_attr3_icleaf_hdr ichdr1;
1723         struct xfs_attr3_icleaf_hdr ichdr2;
1724         struct xfs_attr_leaf_entry *entries1;
1725         struct xfs_attr_leaf_entry *entries2;
1726         int                     count;
1727         int                     totallen;
1728         int                     max;
1729         int                     space;
1730         int                     swap;
1731
1732         /*
1733          * Set up environment.
1734          */
1735         ASSERT(blk1->magic == XFS_ATTR_LEAF_MAGIC);
1736         ASSERT(blk2->magic == XFS_ATTR_LEAF_MAGIC);
1737         leaf1 = blk1->bp->b_addr;
1738         leaf2 = blk2->bp->b_addr;
1739         xfs_attr3_leaf_hdr_from_disk(state->args->geo, &ichdr1, leaf1);
1740         xfs_attr3_leaf_hdr_from_disk(state->args->geo, &ichdr2, leaf2);
1741         ASSERT(ichdr2.count == 0);
1742         args = state->args;
1743
1744         trace_xfs_attr_leaf_rebalance(args);
1745
1746         /*
1747          * Check ordering of blocks, reverse if it makes things simpler.
1748          *
1749          * NOTE: Given that all (current) callers pass in an empty
1750          * second block, this code should never set "swap".
1751          */
1752         swap = 0;
1753         if (xfs_attr3_leaf_order(blk1->bp, &ichdr1, blk2->bp, &ichdr2)) {
1754                 swap(blk1, blk2);
1755
1756                 /* swap structures rather than reconverting them */
1757                 swap(ichdr1, ichdr2);
1758
1759                 leaf1 = blk1->bp->b_addr;
1760                 leaf2 = blk2->bp->b_addr;
1761                 swap = 1;
1762         }
1763
1764         /*
1765          * Examine entries until we reduce the absolute difference in
1766          * byte usage between the two blocks to a minimum.  Then get
1767          * the direction to copy and the number of elements to move.
1768          *
1769          * "inleaf" is true if the new entry should be inserted into blk1.
1770          * If "swap" is also true, then reverse the sense of "inleaf".
1771          */
1772         state->inleaf = xfs_attr3_leaf_figure_balance(state, blk1, &ichdr1,
1773                                                       blk2, &ichdr2,
1774                                                       &count, &totallen);
1775         if (swap)
1776                 state->inleaf = !state->inleaf;
1777
1778         /*
1779          * Move any entries required from leaf to leaf:
1780          */
1781         if (count < ichdr1.count) {
1782                 /*
1783                  * Figure the total bytes to be added to the destination leaf.
1784                  */
1785                 /* number entries being moved */
1786                 count = ichdr1.count - count;
1787                 space  = ichdr1.usedbytes - totallen;
1788                 space += count * sizeof(xfs_attr_leaf_entry_t);
1789
1790                 /*
1791                  * leaf2 is the destination, compact it if it looks tight.
1792                  */
1793                 max  = ichdr2.firstused - xfs_attr3_leaf_hdr_size(leaf1);
1794                 max -= ichdr2.count * sizeof(xfs_attr_leaf_entry_t);
1795                 if (space > max)
1796                         xfs_attr3_leaf_compact(args, &ichdr2, blk2->bp);
1797
1798                 /*
1799                  * Move high entries from leaf1 to low end of leaf2.
1800                  */
1801                 xfs_attr3_leaf_moveents(args, leaf1, &ichdr1,
1802                                 ichdr1.count - count, leaf2, &ichdr2, 0, count);
1803
1804         } else if (count > ichdr1.count) {
1805                 /*
1806                  * I assert that since all callers pass in an empty
1807                  * second buffer, this code should never execute.
1808                  */
1809                 ASSERT(0);
1810
1811                 /*
1812                  * Figure the total bytes to be added to the destination leaf.
1813                  */
1814                 /* number entries being moved */
1815                 count -= ichdr1.count;
1816                 space  = totallen - ichdr1.usedbytes;
1817                 space += count * sizeof(xfs_attr_leaf_entry_t);
1818
1819                 /*
1820                  * leaf1 is the destination, compact it if it looks tight.
1821                  */
1822                 max  = ichdr1.firstused - xfs_attr3_leaf_hdr_size(leaf1);
1823                 max -= ichdr1.count * sizeof(xfs_attr_leaf_entry_t);
1824                 if (space > max)
1825                         xfs_attr3_leaf_compact(args, &ichdr1, blk1->bp);
1826
1827                 /*
1828                  * Move low entries from leaf2 to high end of leaf1.
1829                  */
1830                 xfs_attr3_leaf_moveents(args, leaf2, &ichdr2, 0, leaf1, &ichdr1,
1831                                         ichdr1.count, count);
1832         }
1833
1834         xfs_attr3_leaf_hdr_to_disk(state->args->geo, leaf1, &ichdr1);
1835         xfs_attr3_leaf_hdr_to_disk(state->args->geo, leaf2, &ichdr2);
1836         xfs_trans_log_buf(args->trans, blk1->bp, 0, args->geo->blksize - 1);
1837         xfs_trans_log_buf(args->trans, blk2->bp, 0, args->geo->blksize - 1);
1838
1839         /*
1840          * Copy out last hashval in each block for B-tree code.
1841          */
1842         entries1 = xfs_attr3_leaf_entryp(leaf1);
1843         entries2 = xfs_attr3_leaf_entryp(leaf2);
1844         blk1->hashval = be32_to_cpu(entries1[ichdr1.count - 1].hashval);
1845         blk2->hashval = be32_to_cpu(entries2[ichdr2.count - 1].hashval);
1846
1847         /*
1848          * Adjust the expected index for insertion.
1849          * NOTE: this code depends on the (current) situation that the
1850          * second block was originally empty.
1851          *
1852          * If the insertion point moved to the 2nd block, we must adjust
1853          * the index.  We must also track the entry just following the
1854          * new entry for use in an "atomic rename" operation, that entry
1855          * is always the "old" entry and the "new" entry is what we are
1856          * inserting.  The index/blkno fields refer to the "old" entry,
1857          * while the index2/blkno2 fields refer to the "new" entry.
1858          */
1859         if (blk1->index > ichdr1.count) {
1860                 ASSERT(state->inleaf == 0);
1861                 blk2->index = blk1->index - ichdr1.count;
1862                 args->index = args->index2 = blk2->index;
1863                 args->blkno = args->blkno2 = blk2->blkno;
1864         } else if (blk1->index == ichdr1.count) {
1865                 if (state->inleaf) {
1866                         args->index = blk1->index;
1867                         args->blkno = blk1->blkno;
1868                         args->index2 = 0;
1869                         args->blkno2 = blk2->blkno;
1870                 } else {
1871                         /*
1872                          * On a double leaf split, the original attr location
1873                          * is already stored in blkno2/index2, so don't
1874                          * overwrite it overwise we corrupt the tree.
1875                          */
1876                         blk2->index = blk1->index - ichdr1.count;
1877                         args->index = blk2->index;
1878                         args->blkno = blk2->blkno;
1879                         if (!state->extravalid) {
1880                                 /*
1881                                  * set the new attr location to match the old
1882                                  * one and let the higher level split code
1883                                  * decide where in the leaf to place it.
1884                                  */
1885                                 args->index2 = blk2->index;
1886                                 args->blkno2 = blk2->blkno;
1887                         }
1888                 }
1889         } else {
1890                 ASSERT(state->inleaf == 1);
1891                 args->index = args->index2 = blk1->index;
1892                 args->blkno = args->blkno2 = blk1->blkno;
1893         }
1894 }
1895
1896 /*
1897  * Examine entries until we reduce the absolute difference in
1898  * byte usage between the two blocks to a minimum.
1899  * GROT: Is this really necessary?  With other than a 512 byte blocksize,
1900  * GROT: there will always be enough room in either block for a new entry.
1901  * GROT: Do a double-split for this case?
1902  */
1903 STATIC int
1904 xfs_attr3_leaf_figure_balance(
1905         struct xfs_da_state             *state,
1906         struct xfs_da_state_blk         *blk1,
1907         struct xfs_attr3_icleaf_hdr     *ichdr1,
1908         struct xfs_da_state_blk         *blk2,
1909         struct xfs_attr3_icleaf_hdr     *ichdr2,
1910         int                             *countarg,
1911         int                             *usedbytesarg)
1912 {
1913         struct xfs_attr_leafblock       *leaf1 = blk1->bp->b_addr;
1914         struct xfs_attr_leafblock       *leaf2 = blk2->bp->b_addr;
1915         struct xfs_attr_leaf_entry      *entry;
1916         int                             count;
1917         int                             max;
1918         int                             index;
1919         int                             totallen = 0;
1920         int                             half;
1921         int                             lastdelta;
1922         int                             foundit = 0;
1923         int                             tmp;
1924
1925         /*
1926          * Examine entries until we reduce the absolute difference in
1927          * byte usage between the two blocks to a minimum.
1928          */
1929         max = ichdr1->count + ichdr2->count;
1930         half = (max + 1) * sizeof(*entry);
1931         half += ichdr1->usedbytes + ichdr2->usedbytes +
1932                         xfs_attr_leaf_newentsize(state->args, NULL);
1933         half /= 2;
1934         lastdelta = state->args->geo->blksize;
1935         entry = xfs_attr3_leaf_entryp(leaf1);
1936         for (count = index = 0; count < max; entry++, index++, count++) {
1937
1938 #define XFS_ATTR_ABS(A) (((A) < 0) ? -(A) : (A))
1939                 /*
1940                  * The new entry is in the first block, account for it.
1941                  */
1942                 if (count == blk1->index) {
1943                         tmp = totallen + sizeof(*entry) +
1944                                 xfs_attr_leaf_newentsize(state->args, NULL);
1945                         if (XFS_ATTR_ABS(half - tmp) > lastdelta)
1946                                 break;
1947                         lastdelta = XFS_ATTR_ABS(half - tmp);
1948                         totallen = tmp;
1949                         foundit = 1;
1950                 }
1951
1952                 /*
1953                  * Wrap around into the second block if necessary.
1954                  */
1955                 if (count == ichdr1->count) {
1956                         leaf1 = leaf2;
1957                         entry = xfs_attr3_leaf_entryp(leaf1);
1958                         index = 0;
1959                 }
1960
1961                 /*
1962                  * Figure out if next leaf entry would be too much.
1963                  */
1964                 tmp = totallen + sizeof(*entry) + xfs_attr_leaf_entsize(leaf1,
1965                                                                         index);
1966                 if (XFS_ATTR_ABS(half - tmp) > lastdelta)
1967                         break;
1968                 lastdelta = XFS_ATTR_ABS(half - tmp);
1969                 totallen = tmp;
1970 #undef XFS_ATTR_ABS
1971         }
1972
1973         /*
1974          * Calculate the number of usedbytes that will end up in lower block.
1975          * If new entry not in lower block, fix up the count.
1976          */
1977         totallen -= count * sizeof(*entry);
1978         if (foundit) {
1979                 totallen -= sizeof(*entry) +
1980                                 xfs_attr_leaf_newentsize(state->args, NULL);
1981         }
1982
1983         *countarg = count;
1984         *usedbytesarg = totallen;
1985         return foundit;
1986 }
1987
1988 /*========================================================================
1989  * Routines used for shrinking the Btree.
1990  *========================================================================*/
1991
1992 /*
1993  * Check a leaf block and its neighbors to see if the block should be
1994  * collapsed into one or the other neighbor.  Always keep the block
1995  * with the smaller block number.
1996  * If the current block is over 50% full, don't try to join it, return 0.
1997  * If the block is empty, fill in the state structure and return 2.
1998  * If it can be collapsed, fill in the state structure and return 1.
1999  * If nothing can be done, return 0.
2000  *
2001  * GROT: allow for INCOMPLETE entries in calculation.
2002  */
2003 int
2004 xfs_attr3_leaf_toosmall(
2005         struct xfs_da_state     *state,
2006         int                     *action)
2007 {
2008         struct xfs_attr_leafblock *leaf;
2009         struct xfs_da_state_blk *blk;
2010         struct xfs_attr3_icleaf_hdr ichdr;
2011         struct xfs_buf          *bp;
2012         xfs_dablk_t             blkno;
2013         int                     bytes;
2014         int                     forward;
2015         int                     error;
2016         int                     retval;
2017         int                     i;
2018
2019         trace_xfs_attr_leaf_toosmall(state->args);
2020
2021         /*
2022          * Check for the degenerate case of the block being over 50% full.
2023          * If so, it's not worth even looking to see if we might be able
2024          * to coalesce with a sibling.
2025          */
2026         blk = &state->path.blk[ state->path.active-1 ];
2027         leaf = blk->bp->b_addr;
2028         xfs_attr3_leaf_hdr_from_disk(state->args->geo, &ichdr, leaf);
2029         bytes = xfs_attr3_leaf_hdr_size(leaf) +
2030                 ichdr.count * sizeof(xfs_attr_leaf_entry_t) +
2031                 ichdr.usedbytes;
2032         if (bytes > (state->args->geo->blksize >> 1)) {
2033                 *action = 0;    /* blk over 50%, don't try to join */
2034                 return 0;
2035         }
2036
2037         /*
2038          * Check for the degenerate case of the block being empty.
2039          * If the block is empty, we'll simply delete it, no need to
2040          * coalesce it with a sibling block.  We choose (arbitrarily)
2041          * to merge with the forward block unless it is NULL.
2042          */
2043         if (ichdr.count == 0) {
2044                 /*
2045                  * Make altpath point to the block we want to keep and
2046                  * path point to the block we want to drop (this one).
2047                  */
2048                 forward = (ichdr.forw != 0);
2049                 memcpy(&state->altpath, &state->path, sizeof(state->path));
2050                 error = xfs_da3_path_shift(state, &state->altpath, forward,
2051                                                  0, &retval);
2052                 if (error)
2053                         return error;
2054                 if (retval) {
2055                         *action = 0;
2056                 } else {
2057                         *action = 2;
2058                 }
2059                 return 0;
2060         }
2061
2062         /*
2063          * Examine each sibling block to see if we can coalesce with
2064          * at least 25% free space to spare.  We need to figure out
2065          * whether to merge with the forward or the backward block.
2066          * We prefer coalescing with the lower numbered sibling so as
2067          * to shrink an attribute list over time.
2068          */
2069         /* start with smaller blk num */
2070         forward = ichdr.forw < ichdr.back;
2071         for (i = 0; i < 2; forward = !forward, i++) {
2072                 struct xfs_attr3_icleaf_hdr ichdr2;
2073                 if (forward)
2074                         blkno = ichdr.forw;
2075                 else
2076                         blkno = ichdr.back;
2077                 if (blkno == 0)
2078                         continue;
2079                 error = xfs_attr3_leaf_read(state->args->trans, state->args->dp,
2080                                         blkno, &bp);
2081                 if (error)
2082                         return error;
2083
2084                 xfs_attr3_leaf_hdr_from_disk(state->args->geo, &ichdr2, bp->b_addr);
2085
2086                 bytes = state->args->geo->blksize -
2087                         (state->args->geo->blksize >> 2) -
2088                         ichdr.usedbytes - ichdr2.usedbytes -
2089                         ((ichdr.count + ichdr2.count) *
2090                                         sizeof(xfs_attr_leaf_entry_t)) -
2091                         xfs_attr3_leaf_hdr_size(leaf);
2092
2093                 xfs_trans_brelse(state->args->trans, bp);
2094                 if (bytes >= 0)
2095                         break;  /* fits with at least 25% to spare */
2096         }
2097         if (i >= 2) {
2098                 *action = 0;
2099                 return 0;
2100         }
2101
2102         /*
2103          * Make altpath point to the block we want to keep (the lower
2104          * numbered block) and path point to the block we want to drop.
2105          */
2106         memcpy(&state->altpath, &state->path, sizeof(state->path));
2107         if (blkno < blk->blkno) {
2108                 error = xfs_da3_path_shift(state, &state->altpath, forward,
2109                                                  0, &retval);
2110         } else {
2111                 error = xfs_da3_path_shift(state, &state->path, forward,
2112                                                  0, &retval);
2113         }
2114         if (error)
2115                 return error;
2116         if (retval) {
2117                 *action = 0;
2118         } else {
2119                 *action = 1;
2120         }
2121         return 0;
2122 }
2123
2124 /*
2125  * Remove a name from the leaf attribute list structure.
2126  *
2127  * Return 1 if leaf is less than 37% full, 0 if >= 37% full.
2128  * If two leaves are 37% full, when combined they will leave 25% free.
2129  */
2130 int
2131 xfs_attr3_leaf_remove(
2132         struct xfs_buf          *bp,
2133         struct xfs_da_args      *args)
2134 {
2135         struct xfs_attr_leafblock *leaf;
2136         struct xfs_attr3_icleaf_hdr ichdr;
2137         struct xfs_attr_leaf_entry *entry;
2138         int                     before;
2139         int                     after;
2140         int                     smallest;
2141         int                     entsize;
2142         int                     tablesize;
2143         int                     tmp;
2144         int                     i;
2145
2146         trace_xfs_attr_leaf_remove(args);
2147
2148         leaf = bp->b_addr;
2149         xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
2150
2151         ASSERT(ichdr.count > 0 && ichdr.count < args->geo->blksize / 8);
2152         ASSERT(args->index >= 0 && args->index < ichdr.count);
2153         ASSERT(ichdr.firstused >= ichdr.count * sizeof(*entry) +
2154                                         xfs_attr3_leaf_hdr_size(leaf));
2155
2156         entry = &xfs_attr3_leaf_entryp(leaf)[args->index];
2157
2158         ASSERT(be16_to_cpu(entry->nameidx) >= ichdr.firstused);
2159         ASSERT(be16_to_cpu(entry->nameidx) < args->geo->blksize);
2160
2161         /*
2162          * Scan through free region table:
2163          *    check for adjacency of free'd entry with an existing one,
2164          *    find smallest free region in case we need to replace it,
2165          *    adjust any map that borders the entry table,
2166          */
2167         tablesize = ichdr.count * sizeof(xfs_attr_leaf_entry_t)
2168                                         + xfs_attr3_leaf_hdr_size(leaf);
2169         tmp = ichdr.freemap[0].size;
2170         before = after = -1;
2171         smallest = XFS_ATTR_LEAF_MAPSIZE - 1;
2172         entsize = xfs_attr_leaf_entsize(leaf, args->index);
2173         for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
2174                 ASSERT(ichdr.freemap[i].base < args->geo->blksize);
2175                 ASSERT(ichdr.freemap[i].size < args->geo->blksize);
2176                 if (ichdr.freemap[i].base == tablesize) {
2177                         ichdr.freemap[i].base -= sizeof(xfs_attr_leaf_entry_t);
2178                         ichdr.freemap[i].size += sizeof(xfs_attr_leaf_entry_t);
2179                 }
2180
2181                 if (ichdr.freemap[i].base + ichdr.freemap[i].size ==
2182                                 be16_to_cpu(entry->nameidx)) {
2183                         before = i;
2184                 } else if (ichdr.freemap[i].base ==
2185                                 (be16_to_cpu(entry->nameidx) + entsize)) {
2186                         after = i;
2187                 } else if (ichdr.freemap[i].size < tmp) {
2188                         tmp = ichdr.freemap[i].size;
2189                         smallest = i;
2190                 }
2191         }
2192
2193         /*
2194          * Coalesce adjacent freemap regions,
2195          * or replace the smallest region.
2196          */
2197         if ((before >= 0) || (after >= 0)) {
2198                 if ((before >= 0) && (after >= 0)) {
2199                         ichdr.freemap[before].size += entsize;
2200                         ichdr.freemap[before].size += ichdr.freemap[after].size;
2201                         ichdr.freemap[after].base = 0;
2202                         ichdr.freemap[after].size = 0;
2203                 } else if (before >= 0) {
2204                         ichdr.freemap[before].size += entsize;
2205                 } else {
2206                         ichdr.freemap[after].base = be16_to_cpu(entry->nameidx);
2207                         ichdr.freemap[after].size += entsize;
2208                 }
2209         } else {
2210                 /*
2211                  * Replace smallest region (if it is smaller than free'd entry)
2212                  */
2213                 if (ichdr.freemap[smallest].size < entsize) {
2214                         ichdr.freemap[smallest].base = be16_to_cpu(entry->nameidx);
2215                         ichdr.freemap[smallest].size = entsize;
2216                 }
2217         }
2218
2219         /*
2220          * Did we remove the first entry?
2221          */
2222         if (be16_to_cpu(entry->nameidx) == ichdr.firstused)
2223                 smallest = 1;
2224         else
2225                 smallest = 0;
2226
2227         /*
2228          * Compress the remaining entries and zero out the removed stuff.
2229          */
2230         memset(xfs_attr3_leaf_name(leaf, args->index), 0, entsize);
2231         ichdr.usedbytes -= entsize;
2232         xfs_trans_log_buf(args->trans, bp,
2233              XFS_DA_LOGRANGE(leaf, xfs_attr3_leaf_name(leaf, args->index),
2234                                    entsize));
2235
2236         tmp = (ichdr.count - args->index) * sizeof(xfs_attr_leaf_entry_t);
2237         memmove(entry, entry + 1, tmp);
2238         ichdr.count--;
2239         xfs_trans_log_buf(args->trans, bp,
2240             XFS_DA_LOGRANGE(leaf, entry, tmp + sizeof(xfs_attr_leaf_entry_t)));
2241
2242         entry = &xfs_attr3_leaf_entryp(leaf)[ichdr.count];
2243         memset(entry, 0, sizeof(xfs_attr_leaf_entry_t));
2244
2245         /*
2246          * If we removed the first entry, re-find the first used byte
2247          * in the name area.  Note that if the entry was the "firstused",
2248          * then we don't have a "hole" in our block resulting from
2249          * removing the name.
2250          */
2251         if (smallest) {
2252                 tmp = args->geo->blksize;
2253                 entry = xfs_attr3_leaf_entryp(leaf);
2254                 for (i = ichdr.count - 1; i >= 0; entry++, i--) {
2255                         ASSERT(be16_to_cpu(entry->nameidx) >= ichdr.firstused);
2256                         ASSERT(be16_to_cpu(entry->nameidx) < args->geo->blksize);
2257
2258                         if (be16_to_cpu(entry->nameidx) < tmp)
2259                                 tmp = be16_to_cpu(entry->nameidx);
2260                 }
2261                 ichdr.firstused = tmp;
2262                 ASSERT(ichdr.firstused != 0);
2263         } else {
2264                 ichdr.holes = 1;        /* mark as needing compaction */
2265         }
2266         xfs_attr3_leaf_hdr_to_disk(args->geo, leaf, &ichdr);
2267         xfs_trans_log_buf(args->trans, bp,
2268                           XFS_DA_LOGRANGE(leaf, &leaf->hdr,
2269                                           xfs_attr3_leaf_hdr_size(leaf)));
2270
2271         /*
2272          * Check if leaf is less than 50% full, caller may want to
2273          * "join" the leaf with a sibling if so.
2274          */
2275         tmp = ichdr.usedbytes + xfs_attr3_leaf_hdr_size(leaf) +
2276               ichdr.count * sizeof(xfs_attr_leaf_entry_t);
2277
2278         return tmp < args->geo->magicpct; /* leaf is < 37% full */
2279 }
2280
2281 /*
2282  * Move all the attribute list entries from drop_leaf into save_leaf.
2283  */
2284 void
2285 xfs_attr3_leaf_unbalance(
2286         struct xfs_da_state     *state,
2287         struct xfs_da_state_blk *drop_blk,
2288         struct xfs_da_state_blk *save_blk)
2289 {
2290         struct xfs_attr_leafblock *drop_leaf = drop_blk->bp->b_addr;
2291         struct xfs_attr_leafblock *save_leaf = save_blk->bp->b_addr;
2292         struct xfs_attr3_icleaf_hdr drophdr;
2293         struct xfs_attr3_icleaf_hdr savehdr;
2294         struct xfs_attr_leaf_entry *entry;
2295
2296         trace_xfs_attr_leaf_unbalance(state->args);
2297
2298         drop_leaf = drop_blk->bp->b_addr;
2299         save_leaf = save_blk->bp->b_addr;
2300         xfs_attr3_leaf_hdr_from_disk(state->args->geo, &drophdr, drop_leaf);
2301         xfs_attr3_leaf_hdr_from_disk(state->args->geo, &savehdr, save_leaf);
2302         entry = xfs_attr3_leaf_entryp(drop_leaf);
2303
2304         /*
2305          * Save last hashval from dying block for later Btree fixup.
2306          */
2307         drop_blk->hashval = be32_to_cpu(entry[drophdr.count - 1].hashval);
2308
2309         /*
2310          * Check if we need a temp buffer, or can we do it in place.
2311          * Note that we don't check "leaf" for holes because we will
2312          * always be dropping it, toosmall() decided that for us already.
2313          */
2314         if (savehdr.holes == 0) {
2315                 /*
2316                  * dest leaf has no holes, so we add there.  May need
2317                  * to make some room in the entry array.
2318                  */
2319                 if (xfs_attr3_leaf_order(save_blk->bp, &savehdr,
2320                                          drop_blk->bp, &drophdr)) {
2321                         xfs_attr3_leaf_moveents(state->args,
2322                                                 drop_leaf, &drophdr, 0,
2323                                                 save_leaf, &savehdr, 0,
2324                                                 drophdr.count);
2325                 } else {
2326                         xfs_attr3_leaf_moveents(state->args,
2327                                                 drop_leaf, &drophdr, 0,
2328                                                 save_leaf, &savehdr,
2329                                                 savehdr.count, drophdr.count);
2330                 }
2331         } else {
2332                 /*
2333                  * Destination has holes, so we make a temporary copy
2334                  * of the leaf and add them both to that.
2335                  */
2336                 struct xfs_attr_leafblock *tmp_leaf;
2337                 struct xfs_attr3_icleaf_hdr tmphdr;
2338
2339                 tmp_leaf = kmem_zalloc(state->args->geo->blksize, 0);
2340
2341                 /*
2342                  * Copy the header into the temp leaf so that all the stuff
2343                  * not in the incore header is present and gets copied back in
2344                  * once we've moved all the entries.
2345                  */
2346                 memcpy(tmp_leaf, save_leaf, xfs_attr3_leaf_hdr_size(save_leaf));
2347
2348                 memset(&tmphdr, 0, sizeof(tmphdr));
2349                 tmphdr.magic = savehdr.magic;
2350                 tmphdr.forw = savehdr.forw;
2351                 tmphdr.back = savehdr.back;
2352                 tmphdr.firstused = state->args->geo->blksize;
2353
2354                 /* write the header to the temp buffer to initialise it */
2355                 xfs_attr3_leaf_hdr_to_disk(state->args->geo, tmp_leaf, &tmphdr);
2356
2357                 if (xfs_attr3_leaf_order(save_blk->bp, &savehdr,
2358                                          drop_blk->bp, &drophdr)) {
2359                         xfs_attr3_leaf_moveents(state->args,
2360                                                 drop_leaf, &drophdr, 0,
2361                                                 tmp_leaf, &tmphdr, 0,
2362                                                 drophdr.count);
2363                         xfs_attr3_leaf_moveents(state->args,
2364                                                 save_leaf, &savehdr, 0,
2365                                                 tmp_leaf, &tmphdr, tmphdr.count,
2366                                                 savehdr.count);
2367                 } else {
2368                         xfs_attr3_leaf_moveents(state->args,
2369                                                 save_leaf, &savehdr, 0,
2370                                                 tmp_leaf, &tmphdr, 0,
2371                                                 savehdr.count);
2372                         xfs_attr3_leaf_moveents(state->args,
2373                                                 drop_leaf, &drophdr, 0,
2374                                                 tmp_leaf, &tmphdr, tmphdr.count,
2375                                                 drophdr.count);
2376                 }
2377                 memcpy(save_leaf, tmp_leaf, state->args->geo->blksize);
2378                 savehdr = tmphdr; /* struct copy */
2379                 kmem_free(tmp_leaf);
2380         }
2381
2382         xfs_attr3_leaf_hdr_to_disk(state->args->geo, save_leaf, &savehdr);
2383         xfs_trans_log_buf(state->args->trans, save_blk->bp, 0,
2384                                            state->args->geo->blksize - 1);
2385
2386         /*
2387          * Copy out last hashval in each block for B-tree code.
2388          */
2389         entry = xfs_attr3_leaf_entryp(save_leaf);
2390         save_blk->hashval = be32_to_cpu(entry[savehdr.count - 1].hashval);
2391 }
2392
2393 /*========================================================================
2394  * Routines used for finding things in the Btree.
2395  *========================================================================*/
2396
2397 /*
2398  * Look up a name in a leaf attribute list structure.
2399  * This is the internal routine, it uses the caller's buffer.
2400  *
2401  * Note that duplicate keys are allowed, but only check within the
2402  * current leaf node.  The Btree code must check in adjacent leaf nodes.
2403  *
2404  * Return in args->index the index into the entry[] array of either
2405  * the found entry, or where the entry should have been (insert before
2406  * that entry).
2407  *
2408  * Don't change the args->value unless we find the attribute.
2409  */
2410 int
2411 xfs_attr3_leaf_lookup_int(
2412         struct xfs_buf          *bp,
2413         struct xfs_da_args      *args)
2414 {
2415         struct xfs_attr_leafblock *leaf;
2416         struct xfs_attr3_icleaf_hdr ichdr;
2417         struct xfs_attr_leaf_entry *entry;
2418         struct xfs_attr_leaf_entry *entries;
2419         struct xfs_attr_leaf_name_local *name_loc;
2420         struct xfs_attr_leaf_name_remote *name_rmt;
2421         xfs_dahash_t            hashval;
2422         int                     probe;
2423         int                     span;
2424
2425         trace_xfs_attr_leaf_lookup(args);
2426
2427         leaf = bp->b_addr;
2428         xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
2429         entries = xfs_attr3_leaf_entryp(leaf);
2430         if (ichdr.count >= args->geo->blksize / 8) {
2431                 xfs_buf_mark_corrupt(bp);
2432                 return -EFSCORRUPTED;
2433         }
2434
2435         /*
2436          * Binary search.  (note: small blocks will skip this loop)
2437          */
2438         hashval = args->hashval;
2439         probe = span = ichdr.count / 2;
2440         for (entry = &entries[probe]; span > 4; entry = &entries[probe]) {
2441                 span /= 2;
2442                 if (be32_to_cpu(entry->hashval) < hashval)
2443                         probe += span;
2444                 else if (be32_to_cpu(entry->hashval) > hashval)
2445                         probe -= span;
2446                 else
2447                         break;
2448         }
2449         if (!(probe >= 0 && (!ichdr.count || probe < ichdr.count))) {
2450                 xfs_buf_mark_corrupt(bp);
2451                 return -EFSCORRUPTED;
2452         }
2453         if (!(span <= 4 || be32_to_cpu(entry->hashval) == hashval)) {
2454                 xfs_buf_mark_corrupt(bp);
2455                 return -EFSCORRUPTED;
2456         }
2457
2458         /*
2459          * Since we may have duplicate hashval's, find the first matching
2460          * hashval in the leaf.
2461          */
2462         while (probe > 0 && be32_to_cpu(entry->hashval) >= hashval) {
2463                 entry--;
2464                 probe--;
2465         }
2466         while (probe < ichdr.count &&
2467                be32_to_cpu(entry->hashval) < hashval) {
2468                 entry++;
2469                 probe++;
2470         }
2471         if (probe == ichdr.count || be32_to_cpu(entry->hashval) != hashval) {
2472                 args->index = probe;
2473                 return -ENOATTR;
2474         }
2475
2476         /*
2477          * Duplicate keys may be present, so search all of them for a match.
2478          */
2479         for (; probe < ichdr.count && (be32_to_cpu(entry->hashval) == hashval);
2480                         entry++, probe++) {
2481 /*
2482  * GROT: Add code to remove incomplete entries.
2483  */
2484                 if (entry->flags & XFS_ATTR_LOCAL) {
2485                         name_loc = xfs_attr3_leaf_name_local(leaf, probe);
2486                         if (!xfs_attr_match(args, name_loc->namelen,
2487                                         name_loc->nameval, entry->flags))
2488                                 continue;
2489                         args->index = probe;
2490                         return -EEXIST;
2491                 } else {
2492                         name_rmt = xfs_attr3_leaf_name_remote(leaf, probe);
2493                         if (!xfs_attr_match(args, name_rmt->namelen,
2494                                         name_rmt->name, entry->flags))
2495                                 continue;
2496                         args->index = probe;
2497                         args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen);
2498                         args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
2499                         args->rmtblkcnt = xfs_attr3_rmt_blocks(
2500                                                         args->dp->i_mount,
2501                                                         args->rmtvaluelen);
2502                         return -EEXIST;
2503                 }
2504         }
2505         args->index = probe;
2506         return -ENOATTR;
2507 }
2508
2509 /*
2510  * Get the value associated with an attribute name from a leaf attribute
2511  * list structure.
2512  *
2513  * If args->valuelen is zero, only the length needs to be returned.  Unlike a
2514  * lookup, we only return an error if the attribute does not exist or we can't
2515  * retrieve the value.
2516  */
2517 int
2518 xfs_attr3_leaf_getvalue(
2519         struct xfs_buf          *bp,
2520         struct xfs_da_args      *args)
2521 {
2522         struct xfs_attr_leafblock *leaf;
2523         struct xfs_attr3_icleaf_hdr ichdr;
2524         struct xfs_attr_leaf_entry *entry;
2525         struct xfs_attr_leaf_name_local *name_loc;
2526         struct xfs_attr_leaf_name_remote *name_rmt;
2527
2528         leaf = bp->b_addr;
2529         xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
2530         ASSERT(ichdr.count < args->geo->blksize / 8);
2531         ASSERT(args->index < ichdr.count);
2532
2533         entry = &xfs_attr3_leaf_entryp(leaf)[args->index];
2534         if (entry->flags & XFS_ATTR_LOCAL) {
2535                 name_loc = xfs_attr3_leaf_name_local(leaf, args->index);
2536                 ASSERT(name_loc->namelen == args->namelen);
2537                 ASSERT(memcmp(args->name, name_loc->nameval, args->namelen) == 0);
2538                 return xfs_attr_copy_value(args,
2539                                         &name_loc->nameval[args->namelen],
2540                                         be16_to_cpu(name_loc->valuelen));
2541         }
2542
2543         name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
2544         ASSERT(name_rmt->namelen == args->namelen);
2545         ASSERT(memcmp(args->name, name_rmt->name, args->namelen) == 0);
2546         args->rmtvaluelen = be32_to_cpu(name_rmt->valuelen);
2547         args->rmtblkno = be32_to_cpu(name_rmt->valueblk);
2548         args->rmtblkcnt = xfs_attr3_rmt_blocks(args->dp->i_mount,
2549                                                args->rmtvaluelen);
2550         return xfs_attr_copy_value(args, NULL, args->rmtvaluelen);
2551 }
2552
2553 /*========================================================================
2554  * Utility routines.
2555  *========================================================================*/
2556
2557 /*
2558  * Move the indicated entries from one leaf to another.
2559  * NOTE: this routine modifies both source and destination leaves.
2560  */
2561 /*ARGSUSED*/
2562 STATIC void
2563 xfs_attr3_leaf_moveents(
2564         struct xfs_da_args              *args,
2565         struct xfs_attr_leafblock       *leaf_s,
2566         struct xfs_attr3_icleaf_hdr     *ichdr_s,
2567         int                             start_s,
2568         struct xfs_attr_leafblock       *leaf_d,
2569         struct xfs_attr3_icleaf_hdr     *ichdr_d,
2570         int                             start_d,
2571         int                             count)
2572 {
2573         struct xfs_attr_leaf_entry      *entry_s;
2574         struct xfs_attr_leaf_entry      *entry_d;
2575         int                             desti;
2576         int                             tmp;
2577         int                             i;
2578
2579         /*
2580          * Check for nothing to do.
2581          */
2582         if (count == 0)
2583                 return;
2584
2585         /*
2586          * Set up environment.
2587          */
2588         ASSERT(ichdr_s->magic == XFS_ATTR_LEAF_MAGIC ||
2589                ichdr_s->magic == XFS_ATTR3_LEAF_MAGIC);
2590         ASSERT(ichdr_s->magic == ichdr_d->magic);
2591         ASSERT(ichdr_s->count > 0 && ichdr_s->count < args->geo->blksize / 8);
2592         ASSERT(ichdr_s->firstused >= (ichdr_s->count * sizeof(*entry_s))
2593                                         + xfs_attr3_leaf_hdr_size(leaf_s));
2594         ASSERT(ichdr_d->count < args->geo->blksize / 8);
2595         ASSERT(ichdr_d->firstused >= (ichdr_d->count * sizeof(*entry_d))
2596                                         + xfs_attr3_leaf_hdr_size(leaf_d));
2597
2598         ASSERT(start_s < ichdr_s->count);
2599         ASSERT(start_d <= ichdr_d->count);
2600         ASSERT(count <= ichdr_s->count);
2601
2602
2603         /*
2604          * Move the entries in the destination leaf up to make a hole?
2605          */
2606         if (start_d < ichdr_d->count) {
2607                 tmp  = ichdr_d->count - start_d;
2608                 tmp *= sizeof(xfs_attr_leaf_entry_t);
2609                 entry_s = &xfs_attr3_leaf_entryp(leaf_d)[start_d];
2610                 entry_d = &xfs_attr3_leaf_entryp(leaf_d)[start_d + count];
2611                 memmove(entry_d, entry_s, tmp);
2612         }
2613
2614         /*
2615          * Copy all entry's in the same (sorted) order,
2616          * but allocate attribute info packed and in sequence.
2617          */
2618         entry_s = &xfs_attr3_leaf_entryp(leaf_s)[start_s];
2619         entry_d = &xfs_attr3_leaf_entryp(leaf_d)[start_d];
2620         desti = start_d;
2621         for (i = 0; i < count; entry_s++, entry_d++, desti++, i++) {
2622                 ASSERT(be16_to_cpu(entry_s->nameidx) >= ichdr_s->firstused);
2623                 tmp = xfs_attr_leaf_entsize(leaf_s, start_s + i);
2624 #ifdef GROT
2625                 /*
2626                  * Code to drop INCOMPLETE entries.  Difficult to use as we
2627                  * may also need to change the insertion index.  Code turned
2628                  * off for 6.2, should be revisited later.
2629                  */
2630                 if (entry_s->flags & XFS_ATTR_INCOMPLETE) { /* skip partials? */
2631                         memset(xfs_attr3_leaf_name(leaf_s, start_s + i), 0, tmp);
2632                         ichdr_s->usedbytes -= tmp;
2633                         ichdr_s->count -= 1;
2634                         entry_d--;      /* to compensate for ++ in loop hdr */
2635                         desti--;
2636                         if ((start_s + i) < offset)
2637                                 result++;       /* insertion index adjustment */
2638                 } else {
2639 #endif /* GROT */
2640                         ichdr_d->firstused -= tmp;
2641                         /* both on-disk, don't endian flip twice */
2642                         entry_d->hashval = entry_s->hashval;
2643                         entry_d->nameidx = cpu_to_be16(ichdr_d->firstused);
2644                         entry_d->flags = entry_s->flags;
2645                         ASSERT(be16_to_cpu(entry_d->nameidx) + tmp
2646                                                         <= args->geo->blksize);
2647                         memmove(xfs_attr3_leaf_name(leaf_d, desti),
2648                                 xfs_attr3_leaf_name(leaf_s, start_s + i), tmp);
2649                         ASSERT(be16_to_cpu(entry_s->nameidx) + tmp
2650                                                         <= args->geo->blksize);
2651                         memset(xfs_attr3_leaf_name(leaf_s, start_s + i), 0, tmp);
2652                         ichdr_s->usedbytes -= tmp;
2653                         ichdr_d->usedbytes += tmp;
2654                         ichdr_s->count -= 1;
2655                         ichdr_d->count += 1;
2656                         tmp = ichdr_d->count * sizeof(xfs_attr_leaf_entry_t)
2657                                         + xfs_attr3_leaf_hdr_size(leaf_d);
2658                         ASSERT(ichdr_d->firstused >= tmp);
2659 #ifdef GROT
2660                 }
2661 #endif /* GROT */
2662         }
2663
2664         /*
2665          * Zero out the entries we just copied.
2666          */
2667         if (start_s == ichdr_s->count) {
2668                 tmp = count * sizeof(xfs_attr_leaf_entry_t);
2669                 entry_s = &xfs_attr3_leaf_entryp(leaf_s)[start_s];
2670                 ASSERT(((char *)entry_s + tmp) <=
2671                        ((char *)leaf_s + args->geo->blksize));
2672                 memset(entry_s, 0, tmp);
2673         } else {
2674                 /*
2675                  * Move the remaining entries down to fill the hole,
2676                  * then zero the entries at the top.
2677                  */
2678                 tmp  = (ichdr_s->count - count) * sizeof(xfs_attr_leaf_entry_t);
2679                 entry_s = &xfs_attr3_leaf_entryp(leaf_s)[start_s + count];
2680                 entry_d = &xfs_attr3_leaf_entryp(leaf_s)[start_s];
2681                 memmove(entry_d, entry_s, tmp);
2682
2683                 tmp = count * sizeof(xfs_attr_leaf_entry_t);
2684                 entry_s = &xfs_attr3_leaf_entryp(leaf_s)[ichdr_s->count];
2685                 ASSERT(((char *)entry_s + tmp) <=
2686                        ((char *)leaf_s + args->geo->blksize));
2687                 memset(entry_s, 0, tmp);
2688         }
2689
2690         /*
2691          * Fill in the freemap information
2692          */
2693         ichdr_d->freemap[0].base = xfs_attr3_leaf_hdr_size(leaf_d);
2694         ichdr_d->freemap[0].base += ichdr_d->count * sizeof(xfs_attr_leaf_entry_t);
2695         ichdr_d->freemap[0].size = ichdr_d->firstused - ichdr_d->freemap[0].base;
2696         ichdr_d->freemap[1].base = 0;
2697         ichdr_d->freemap[2].base = 0;
2698         ichdr_d->freemap[1].size = 0;
2699         ichdr_d->freemap[2].size = 0;
2700         ichdr_s->holes = 1;     /* leaf may not be compact */
2701 }
2702
2703 /*
2704  * Pick up the last hashvalue from a leaf block.
2705  */
2706 xfs_dahash_t
2707 xfs_attr_leaf_lasthash(
2708         struct xfs_buf  *bp,
2709         int             *count)
2710 {
2711         struct xfs_attr3_icleaf_hdr ichdr;
2712         struct xfs_attr_leaf_entry *entries;
2713         struct xfs_mount *mp = bp->b_mount;
2714
2715         xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, bp->b_addr);
2716         entries = xfs_attr3_leaf_entryp(bp->b_addr);
2717         if (count)
2718                 *count = ichdr.count;
2719         if (!ichdr.count)
2720                 return 0;
2721         return be32_to_cpu(entries[ichdr.count - 1].hashval);
2722 }
2723
2724 /*
2725  * Calculate the number of bytes used to store the indicated attribute
2726  * (whether local or remote only calculate bytes in this block).
2727  */
2728 STATIC int
2729 xfs_attr_leaf_entsize(xfs_attr_leafblock_t *leaf, int index)
2730 {
2731         struct xfs_attr_leaf_entry *entries;
2732         xfs_attr_leaf_name_local_t *name_loc;
2733         xfs_attr_leaf_name_remote_t *name_rmt;
2734         int size;
2735
2736         entries = xfs_attr3_leaf_entryp(leaf);
2737         if (entries[index].flags & XFS_ATTR_LOCAL) {
2738                 name_loc = xfs_attr3_leaf_name_local(leaf, index);
2739                 size = xfs_attr_leaf_entsize_local(name_loc->namelen,
2740                                                    be16_to_cpu(name_loc->valuelen));
2741         } else {
2742                 name_rmt = xfs_attr3_leaf_name_remote(leaf, index);
2743                 size = xfs_attr_leaf_entsize_remote(name_rmt->namelen);
2744         }
2745         return size;
2746 }
2747
2748 /*
2749  * Calculate the number of bytes that would be required to store the new
2750  * attribute (whether local or remote only calculate bytes in this block).
2751  * This routine decides as a side effect whether the attribute will be
2752  * a "local" or a "remote" attribute.
2753  */
2754 int
2755 xfs_attr_leaf_newentsize(
2756         struct xfs_da_args      *args,
2757         int                     *local)
2758 {
2759         int                     size;
2760
2761         size = xfs_attr_leaf_entsize_local(args->namelen, args->valuelen);
2762         if (size < xfs_attr_leaf_entsize_local_max(args->geo->blksize)) {
2763                 if (local)
2764                         *local = 1;
2765                 return size;
2766         }
2767         if (local)
2768                 *local = 0;
2769         return xfs_attr_leaf_entsize_remote(args->namelen);
2770 }
2771
2772
2773 /*========================================================================
2774  * Manage the INCOMPLETE flag in a leaf entry
2775  *========================================================================*/
2776
2777 /*
2778  * Clear the INCOMPLETE flag on an entry in a leaf block.
2779  */
2780 int
2781 xfs_attr3_leaf_clearflag(
2782         struct xfs_da_args      *args)
2783 {
2784         struct xfs_attr_leafblock *leaf;
2785         struct xfs_attr_leaf_entry *entry;
2786         struct xfs_attr_leaf_name_remote *name_rmt;
2787         struct xfs_buf          *bp;
2788         int                     error;
2789 #ifdef DEBUG
2790         struct xfs_attr3_icleaf_hdr ichdr;
2791         xfs_attr_leaf_name_local_t *name_loc;
2792         int namelen;
2793         char *name;
2794 #endif /* DEBUG */
2795
2796         trace_xfs_attr_leaf_clearflag(args);
2797         /*
2798          * Set up the operation.
2799          */
2800         error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp);
2801         if (error)
2802                 return error;
2803
2804         leaf = bp->b_addr;
2805         entry = &xfs_attr3_leaf_entryp(leaf)[args->index];
2806         ASSERT(entry->flags & XFS_ATTR_INCOMPLETE);
2807
2808 #ifdef DEBUG
2809         xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
2810         ASSERT(args->index < ichdr.count);
2811         ASSERT(args->index >= 0);
2812
2813         if (entry->flags & XFS_ATTR_LOCAL) {
2814                 name_loc = xfs_attr3_leaf_name_local(leaf, args->index);
2815                 namelen = name_loc->namelen;
2816                 name = (char *)name_loc->nameval;
2817         } else {
2818                 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
2819                 namelen = name_rmt->namelen;
2820                 name = (char *)name_rmt->name;
2821         }
2822         ASSERT(be32_to_cpu(entry->hashval) == args->hashval);
2823         ASSERT(namelen == args->namelen);
2824         ASSERT(memcmp(name, args->name, namelen) == 0);
2825 #endif /* DEBUG */
2826
2827         entry->flags &= ~XFS_ATTR_INCOMPLETE;
2828         xfs_trans_log_buf(args->trans, bp,
2829                          XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
2830
2831         if (args->rmtblkno) {
2832                 ASSERT((entry->flags & XFS_ATTR_LOCAL) == 0);
2833                 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
2834                 name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
2835                 name_rmt->valuelen = cpu_to_be32(args->rmtvaluelen);
2836                 xfs_trans_log_buf(args->trans, bp,
2837                          XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
2838         }
2839
2840         return 0;
2841 }
2842
2843 /*
2844  * Set the INCOMPLETE flag on an entry in a leaf block.
2845  */
2846 int
2847 xfs_attr3_leaf_setflag(
2848         struct xfs_da_args      *args)
2849 {
2850         struct xfs_attr_leafblock *leaf;
2851         struct xfs_attr_leaf_entry *entry;
2852         struct xfs_attr_leaf_name_remote *name_rmt;
2853         struct xfs_buf          *bp;
2854         int error;
2855 #ifdef DEBUG
2856         struct xfs_attr3_icleaf_hdr ichdr;
2857 #endif
2858
2859         trace_xfs_attr_leaf_setflag(args);
2860
2861         /*
2862          * Set up the operation.
2863          */
2864         error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp);
2865         if (error)
2866                 return error;
2867
2868         leaf = bp->b_addr;
2869 #ifdef DEBUG
2870         xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr, leaf);
2871         ASSERT(args->index < ichdr.count);
2872         ASSERT(args->index >= 0);
2873 #endif
2874         entry = &xfs_attr3_leaf_entryp(leaf)[args->index];
2875
2876         ASSERT((entry->flags & XFS_ATTR_INCOMPLETE) == 0);
2877         entry->flags |= XFS_ATTR_INCOMPLETE;
2878         xfs_trans_log_buf(args->trans, bp,
2879                         XFS_DA_LOGRANGE(leaf, entry, sizeof(*entry)));
2880         if ((entry->flags & XFS_ATTR_LOCAL) == 0) {
2881                 name_rmt = xfs_attr3_leaf_name_remote(leaf, args->index);
2882                 name_rmt->valueblk = 0;
2883                 name_rmt->valuelen = 0;
2884                 xfs_trans_log_buf(args->trans, bp,
2885                          XFS_DA_LOGRANGE(leaf, name_rmt, sizeof(*name_rmt)));
2886         }
2887
2888         return 0;
2889 }
2890
2891 /*
2892  * In a single transaction, clear the INCOMPLETE flag on the leaf entry
2893  * given by args->blkno/index and set the INCOMPLETE flag on the leaf
2894  * entry given by args->blkno2/index2.
2895  *
2896  * Note that they could be in different blocks, or in the same block.
2897  */
2898 int
2899 xfs_attr3_leaf_flipflags(
2900         struct xfs_da_args      *args)
2901 {
2902         struct xfs_attr_leafblock *leaf1;
2903         struct xfs_attr_leafblock *leaf2;
2904         struct xfs_attr_leaf_entry *entry1;
2905         struct xfs_attr_leaf_entry *entry2;
2906         struct xfs_attr_leaf_name_remote *name_rmt;
2907         struct xfs_buf          *bp1;
2908         struct xfs_buf          *bp2;
2909         int error;
2910 #ifdef DEBUG
2911         struct xfs_attr3_icleaf_hdr ichdr1;
2912         struct xfs_attr3_icleaf_hdr ichdr2;
2913         xfs_attr_leaf_name_local_t *name_loc;
2914         int namelen1, namelen2;
2915         char *name1, *name2;
2916 #endif /* DEBUG */
2917
2918         trace_xfs_attr_leaf_flipflags(args);
2919
2920         /*
2921          * Read the block containing the "old" attr
2922          */
2923         error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno, &bp1);
2924         if (error)
2925                 return error;
2926
2927         /*
2928          * Read the block containing the "new" attr, if it is different
2929          */
2930         if (args->blkno2 != args->blkno) {
2931                 error = xfs_attr3_leaf_read(args->trans, args->dp, args->blkno2,
2932                                            &bp2);
2933                 if (error)
2934                         return error;
2935         } else {
2936                 bp2 = bp1;
2937         }
2938
2939         leaf1 = bp1->b_addr;
2940         entry1 = &xfs_attr3_leaf_entryp(leaf1)[args->index];
2941
2942         leaf2 = bp2->b_addr;
2943         entry2 = &xfs_attr3_leaf_entryp(leaf2)[args->index2];
2944
2945 #ifdef DEBUG
2946         xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr1, leaf1);
2947         ASSERT(args->index < ichdr1.count);
2948         ASSERT(args->index >= 0);
2949
2950         xfs_attr3_leaf_hdr_from_disk(args->geo, &ichdr2, leaf2);
2951         ASSERT(args->index2 < ichdr2.count);
2952         ASSERT(args->index2 >= 0);
2953
2954         if (entry1->flags & XFS_ATTR_LOCAL) {
2955                 name_loc = xfs_attr3_leaf_name_local(leaf1, args->index);
2956                 namelen1 = name_loc->namelen;
2957                 name1 = (char *)name_loc->nameval;
2958         } else {
2959                 name_rmt = xfs_attr3_leaf_name_remote(leaf1, args->index);
2960                 namelen1 = name_rmt->namelen;
2961                 name1 = (char *)name_rmt->name;
2962         }
2963         if (entry2->flags & XFS_ATTR_LOCAL) {
2964                 name_loc = xfs_attr3_leaf_name_local(leaf2, args->index2);
2965                 namelen2 = name_loc->namelen;
2966                 name2 = (char *)name_loc->nameval;
2967         } else {
2968                 name_rmt = xfs_attr3_leaf_name_remote(leaf2, args->index2);
2969                 namelen2 = name_rmt->namelen;
2970                 name2 = (char *)name_rmt->name;
2971         }
2972         ASSERT(be32_to_cpu(entry1->hashval) == be32_to_cpu(entry2->hashval));
2973         ASSERT(namelen1 == namelen2);
2974         ASSERT(memcmp(name1, name2, namelen1) == 0);
2975 #endif /* DEBUG */
2976
2977         ASSERT(entry1->flags & XFS_ATTR_INCOMPLETE);
2978         ASSERT((entry2->flags & XFS_ATTR_INCOMPLETE) == 0);
2979
2980         entry1->flags &= ~XFS_ATTR_INCOMPLETE;
2981         xfs_trans_log_buf(args->trans, bp1,
2982                           XFS_DA_LOGRANGE(leaf1, entry1, sizeof(*entry1)));
2983         if (args->rmtblkno) {
2984                 ASSERT((entry1->flags & XFS_ATTR_LOCAL) == 0);
2985                 name_rmt = xfs_attr3_leaf_name_remote(leaf1, args->index);
2986                 name_rmt->valueblk = cpu_to_be32(args->rmtblkno);
2987                 name_rmt->valuelen = cpu_to_be32(args->rmtvaluelen);
2988                 xfs_trans_log_buf(args->trans, bp1,
2989                          XFS_DA_LOGRANGE(leaf1, name_rmt, sizeof(*name_rmt)));
2990         }
2991
2992         entry2->flags |= XFS_ATTR_INCOMPLETE;
2993         xfs_trans_log_buf(args->trans, bp2,
2994                           XFS_DA_LOGRANGE(leaf2, entry2, sizeof(*entry2)));
2995         if ((entry2->flags & XFS_ATTR_LOCAL) == 0) {
2996                 name_rmt = xfs_attr3_leaf_name_remote(leaf2, args->index2);
2997                 name_rmt->valueblk = 0;
2998                 name_rmt->valuelen = 0;
2999                 xfs_trans_log_buf(args->trans, bp2,
3000                          XFS_DA_LOGRANGE(leaf2, name_rmt, sizeof(*name_rmt)));
3001         }
3002
3003         return 0;
3004 }