GNU Linux-libre 4.14.330-gnu1
[releases.git] / fs / ext4 / xattr.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/fs/ext4/xattr.c
4  *
5  * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de>
6  *
7  * Fix by Harrison Xing <harrison@mountainviewdata.com>.
8  * Ext4 code with a lot of help from Eric Jarman <ejarman@acm.org>.
9  * Extended attributes for symlinks and special files added per
10  *  suggestion of Luka Renko <luka.renko@hermes.si>.
11  * xattr consolidation Copyright (c) 2004 James Morris <jmorris@redhat.com>,
12  *  Red Hat Inc.
13  * ea-in-inode support by Alex Tomas <alex@clusterfs.com> aka bzzz
14  *  and Andreas Gruenbacher <agruen@suse.de>.
15  */
16
17 /*
18  * Extended attributes are stored directly in inodes (on file systems with
19  * inodes bigger than 128 bytes) and on additional disk blocks. The i_file_acl
20  * field contains the block number if an inode uses an additional block. All
21  * attributes must fit in the inode and one additional block. Blocks that
22  * contain the identical set of attributes may be shared among several inodes.
23  * Identical blocks are detected by keeping a cache of blocks that have
24  * recently been accessed.
25  *
26  * The attributes in inodes and on blocks have a different header; the entries
27  * are stored in the same format:
28  *
29  *   +------------------+
30  *   | header           |
31  *   | entry 1          | |
32  *   | entry 2          | | growing downwards
33  *   | entry 3          | v
34  *   | four null bytes  |
35  *   | . . .            |
36  *   | value 1          | ^
37  *   | value 3          | | growing upwards
38  *   | value 2          | |
39  *   +------------------+
40  *
41  * The header is followed by multiple entry descriptors. In disk blocks, the
42  * entry descriptors are kept sorted. In inodes, they are unsorted. The
43  * attribute values are aligned to the end of the block in no specific order.
44  *
45  * Locking strategy
46  * ----------------
47  * EXT4_I(inode)->i_file_acl is protected by EXT4_I(inode)->xattr_sem.
48  * EA blocks are only changed if they are exclusive to an inode, so
49  * holding xattr_sem also means that nothing but the EA block's reference
50  * count can change. Multiple writers to the same block are synchronized
51  * by the buffer lock.
52  */
53
54 #include <linux/init.h>
55 #include <linux/fs.h>
56 #include <linux/slab.h>
57 #include <linux/mbcache.h>
58 #include <linux/quotaops.h>
59 #include "ext4_jbd2.h"
60 #include "ext4.h"
61 #include "xattr.h"
62 #include "acl.h"
63
64 #ifdef EXT4_XATTR_DEBUG
65 # define ea_idebug(inode, fmt, ...)                                     \
66         printk(KERN_DEBUG "inode %s:%lu: " fmt "\n",                    \
67                inode->i_sb->s_id, inode->i_ino, ##__VA_ARGS__)
68 # define ea_bdebug(bh, fmt, ...)                                        \
69         printk(KERN_DEBUG "block %pg:%lu: " fmt "\n",                   \
70                bh->b_bdev, (unsigned long)bh->b_blocknr, ##__VA_ARGS__)
71 #else
72 # define ea_idebug(inode, fmt, ...)     no_printk(fmt, ##__VA_ARGS__)
73 # define ea_bdebug(bh, fmt, ...)        no_printk(fmt, ##__VA_ARGS__)
74 #endif
75
76 static void ext4_xattr_block_cache_insert(struct mb_cache *,
77                                           struct buffer_head *);
78 static struct buffer_head *
79 ext4_xattr_block_cache_find(struct inode *, struct ext4_xattr_header *,
80                             struct mb_cache_entry **);
81 static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value,
82                                     size_t value_count);
83 static void ext4_xattr_rehash(struct ext4_xattr_header *);
84
85 static const struct xattr_handler * const ext4_xattr_handler_map[] = {
86         [EXT4_XATTR_INDEX_USER]              = &ext4_xattr_user_handler,
87 #ifdef CONFIG_EXT4_FS_POSIX_ACL
88         [EXT4_XATTR_INDEX_POSIX_ACL_ACCESS]  = &posix_acl_access_xattr_handler,
89         [EXT4_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_default_xattr_handler,
90 #endif
91         [EXT4_XATTR_INDEX_TRUSTED]           = &ext4_xattr_trusted_handler,
92 #ifdef CONFIG_EXT4_FS_SECURITY
93         [EXT4_XATTR_INDEX_SECURITY]          = &ext4_xattr_security_handler,
94 #endif
95 };
96
97 const struct xattr_handler *ext4_xattr_handlers[] = {
98         &ext4_xattr_user_handler,
99         &ext4_xattr_trusted_handler,
100 #ifdef CONFIG_EXT4_FS_POSIX_ACL
101         &posix_acl_access_xattr_handler,
102         &posix_acl_default_xattr_handler,
103 #endif
104 #ifdef CONFIG_EXT4_FS_SECURITY
105         &ext4_xattr_security_handler,
106 #endif
107         NULL
108 };
109
110 #define EA_BLOCK_CACHE(inode)   (((struct ext4_sb_info *) \
111                                 inode->i_sb->s_fs_info)->s_ea_block_cache)
112
113 #define EA_INODE_CACHE(inode)   (((struct ext4_sb_info *) \
114                                 inode->i_sb->s_fs_info)->s_ea_inode_cache)
115
116 static int
117 ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array,
118                         struct inode *inode);
119
120 #ifdef CONFIG_LOCKDEP
121 void ext4_xattr_inode_set_class(struct inode *ea_inode)
122 {
123         struct ext4_inode_info *ei = EXT4_I(ea_inode);
124
125         lockdep_set_subclass(&ea_inode->i_rwsem, 1);
126         (void) ei;      /* shut up clang warning if !CONFIG_LOCKDEP */
127         lockdep_set_subclass(&ei->i_data_sem, I_DATA_SEM_EA);
128 }
129 #endif
130
131 static __le32 ext4_xattr_block_csum(struct inode *inode,
132                                     sector_t block_nr,
133                                     struct ext4_xattr_header *hdr)
134 {
135         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
136         __u32 csum;
137         __le64 dsk_block_nr = cpu_to_le64(block_nr);
138         __u32 dummy_csum = 0;
139         int offset = offsetof(struct ext4_xattr_header, h_checksum);
140
141         csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&dsk_block_nr,
142                            sizeof(dsk_block_nr));
143         csum = ext4_chksum(sbi, csum, (__u8 *)hdr, offset);
144         csum = ext4_chksum(sbi, csum, (__u8 *)&dummy_csum, sizeof(dummy_csum));
145         offset += sizeof(dummy_csum);
146         csum = ext4_chksum(sbi, csum, (__u8 *)hdr + offset,
147                            EXT4_BLOCK_SIZE(inode->i_sb) - offset);
148
149         return cpu_to_le32(csum);
150 }
151
152 static int ext4_xattr_block_csum_verify(struct inode *inode,
153                                         struct buffer_head *bh)
154 {
155         struct ext4_xattr_header *hdr = BHDR(bh);
156         int ret = 1;
157
158         if (ext4_has_metadata_csum(inode->i_sb)) {
159                 lock_buffer(bh);
160                 ret = (hdr->h_checksum == ext4_xattr_block_csum(inode,
161                                                         bh->b_blocknr, hdr));
162                 unlock_buffer(bh);
163         }
164         return ret;
165 }
166
167 static void ext4_xattr_block_csum_set(struct inode *inode,
168                                       struct buffer_head *bh)
169 {
170         if (ext4_has_metadata_csum(inode->i_sb))
171                 BHDR(bh)->h_checksum = ext4_xattr_block_csum(inode,
172                                                 bh->b_blocknr, BHDR(bh));
173 }
174
175 static inline const struct xattr_handler *
176 ext4_xattr_handler(int name_index)
177 {
178         const struct xattr_handler *handler = NULL;
179
180         if (name_index > 0 && name_index < ARRAY_SIZE(ext4_xattr_handler_map))
181                 handler = ext4_xattr_handler_map[name_index];
182         return handler;
183 }
184
185 static int
186 ext4_xattr_check_entries(struct ext4_xattr_entry *entry, void *end,
187                          void *value_start)
188 {
189         struct ext4_xattr_entry *e = entry;
190
191         /* Find the end of the names list */
192         while (!IS_LAST_ENTRY(e)) {
193                 struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
194                 if ((void *)next >= end)
195                         return -EFSCORRUPTED;
196                 if (strnlen(e->e_name, e->e_name_len) != e->e_name_len)
197                         return -EFSCORRUPTED;
198                 e = next;
199         }
200
201         /* Check the values */
202         while (!IS_LAST_ENTRY(entry)) {
203                 u32 size = le32_to_cpu(entry->e_value_size);
204
205                 if (size > EXT4_XATTR_SIZE_MAX)
206                         return -EFSCORRUPTED;
207
208                 if (size != 0 && entry->e_value_inum == 0) {
209                         u16 offs = le16_to_cpu(entry->e_value_offs);
210                         void *value;
211
212                         /*
213                          * The value cannot overlap the names, and the value
214                          * with padding cannot extend beyond 'end'.  Check both
215                          * the padded and unpadded sizes, since the size may
216                          * overflow to 0 when adding padding.
217                          */
218                         if (offs > end - value_start)
219                                 return -EFSCORRUPTED;
220                         value = value_start + offs;
221                         if (value < (void *)e + sizeof(u32) ||
222                             size > end - value ||
223                             EXT4_XATTR_SIZE(size) > end - value)
224                                 return -EFSCORRUPTED;
225                 }
226                 entry = EXT4_XATTR_NEXT(entry);
227         }
228
229         return 0;
230 }
231
232 static inline int
233 __ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh,
234                          const char *function, unsigned int line)
235 {
236         int error = -EFSCORRUPTED;
237
238         if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
239             BHDR(bh)->h_blocks != cpu_to_le32(1))
240                 goto errout;
241         if (buffer_verified(bh))
242                 return 0;
243
244         error = -EFSBADCRC;
245         if (!ext4_xattr_block_csum_verify(inode, bh))
246                 goto errout;
247         error = ext4_xattr_check_entries(BFIRST(bh), bh->b_data + bh->b_size,
248                                          bh->b_data);
249 errout:
250         if (error)
251                 __ext4_error_inode(inode, function, line, 0,
252                                    "corrupted xattr block %llu",
253                                    (unsigned long long) bh->b_blocknr);
254         else
255                 set_buffer_verified(bh);
256         return error;
257 }
258
259 #define ext4_xattr_check_block(inode, bh) \
260         __ext4_xattr_check_block((inode), (bh),  __func__, __LINE__)
261
262
263 static int
264 __xattr_check_inode(struct inode *inode, struct ext4_xattr_ibody_header *header,
265                          void *end, const char *function, unsigned int line)
266 {
267         int error = -EFSCORRUPTED;
268
269         if (end - (void *)header < sizeof(*header) + sizeof(u32) ||
270             (header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)))
271                 goto errout;
272         error = ext4_xattr_check_entries(IFIRST(header), end, IFIRST(header));
273 errout:
274         if (error)
275                 __ext4_error_inode(inode, function, line, 0,
276                                    "corrupted in-inode xattr");
277         return error;
278 }
279
280 #define xattr_check_inode(inode, header, end) \
281         __xattr_check_inode((inode), (header), (end), __func__, __LINE__)
282
283 static int
284 xattr_find_entry(struct inode *inode, struct ext4_xattr_entry **pentry,
285                  void *end, int name_index, const char *name, int sorted)
286 {
287         struct ext4_xattr_entry *entry, *next;
288         size_t name_len;
289         int cmp = 1;
290
291         if (name == NULL)
292                 return -EINVAL;
293         name_len = strlen(name);
294         for (entry = *pentry; !IS_LAST_ENTRY(entry); entry = next) {
295                 next = EXT4_XATTR_NEXT(entry);
296                 if ((void *) next >= end) {
297                         EXT4_ERROR_INODE(inode, "corrupted xattr entries");
298                         return -EFSCORRUPTED;
299                 }
300                 cmp = name_index - entry->e_name_index;
301                 if (!cmp)
302                         cmp = name_len - entry->e_name_len;
303                 if (!cmp)
304                         cmp = memcmp(name, entry->e_name, name_len);
305                 if (cmp <= 0 && (sorted || cmp == 0))
306                         break;
307         }
308         *pentry = entry;
309         return cmp ? -ENODATA : 0;
310 }
311
312 static u32
313 ext4_xattr_inode_hash(struct ext4_sb_info *sbi, const void *buffer, size_t size)
314 {
315         return ext4_chksum(sbi, sbi->s_csum_seed, buffer, size);
316 }
317
318 static u64 ext4_xattr_inode_get_ref(struct inode *ea_inode)
319 {
320         return ((u64)ea_inode->i_ctime.tv_sec << 32) |
321                ((u32)ea_inode->i_version);
322 }
323
324 static void ext4_xattr_inode_set_ref(struct inode *ea_inode, u64 ref_count)
325 {
326         ea_inode->i_ctime.tv_sec = (u32)(ref_count >> 32);
327         ea_inode->i_version = (u32)ref_count;
328 }
329
330 static u32 ext4_xattr_inode_get_hash(struct inode *ea_inode)
331 {
332         return (u32)ea_inode->i_atime.tv_sec;
333 }
334
335 static void ext4_xattr_inode_set_hash(struct inode *ea_inode, u32 hash)
336 {
337         ea_inode->i_atime.tv_sec = hash;
338 }
339
340 /*
341  * Read the EA value from an inode.
342  */
343 static int ext4_xattr_inode_read(struct inode *ea_inode, void *buf, size_t size)
344 {
345         int blocksize = 1 << ea_inode->i_blkbits;
346         int bh_count = (size + blocksize - 1) >> ea_inode->i_blkbits;
347         int tail_size = (size % blocksize) ?: blocksize;
348         struct buffer_head *bhs_inline[8];
349         struct buffer_head **bhs = bhs_inline;
350         int i, ret;
351
352         if (bh_count > ARRAY_SIZE(bhs_inline)) {
353                 bhs = kmalloc_array(bh_count, sizeof(*bhs), GFP_NOFS);
354                 if (!bhs)
355                         return -ENOMEM;
356         }
357
358         ret = ext4_bread_batch(ea_inode, 0 /* block */, bh_count,
359                                true /* wait */, bhs);
360         if (ret)
361                 goto free_bhs;
362
363         for (i = 0; i < bh_count; i++) {
364                 /* There shouldn't be any holes in ea_inode. */
365                 if (!bhs[i]) {
366                         ret = -EFSCORRUPTED;
367                         goto put_bhs;
368                 }
369                 memcpy((char *)buf + blocksize * i, bhs[i]->b_data,
370                        i < bh_count - 1 ? blocksize : tail_size);
371         }
372         ret = 0;
373 put_bhs:
374         for (i = 0; i < bh_count; i++)
375                 brelse(bhs[i]);
376 free_bhs:
377         if (bhs != bhs_inline)
378                 kfree(bhs);
379         return ret;
380 }
381
382 #define EXT4_XATTR_INODE_GET_PARENT(inode) ((__u32)(inode)->i_mtime.tv_sec)
383
384 static int ext4_xattr_inode_iget(struct inode *parent, unsigned long ea_ino,
385                                  u32 ea_inode_hash, struct inode **ea_inode)
386 {
387         struct inode *inode;
388         int err;
389
390         /*
391          * We have to check for this corruption early as otherwise
392          * iget_locked() could wait indefinitely for the state of our
393          * parent inode.
394          */
395         if (parent->i_ino == ea_ino) {
396                 ext4_error(parent->i_sb,
397                            "Parent and EA inode have the same ino %lu", ea_ino);
398                 return -EFSCORRUPTED;
399         }
400
401         inode = ext4_iget(parent->i_sb, ea_ino, EXT4_IGET_NORMAL);
402         if (IS_ERR(inode)) {
403                 err = PTR_ERR(inode);
404                 ext4_error(parent->i_sb,
405                            "error while reading EA inode %lu err=%d", ea_ino,
406                            err);
407                 return err;
408         }
409
410         if (is_bad_inode(inode)) {
411                 ext4_error(parent->i_sb,
412                            "error while reading EA inode %lu is_bad_inode",
413                            ea_ino);
414                 err = -EIO;
415                 goto error;
416         }
417
418         if (!(EXT4_I(inode)->i_flags & EXT4_EA_INODE_FL)) {
419                 ext4_error(parent->i_sb,
420                            "EA inode %lu does not have EXT4_EA_INODE_FL flag",
421                             ea_ino);
422                 err = -EINVAL;
423                 goto error;
424         }
425
426         ext4_xattr_inode_set_class(inode);
427
428         /*
429          * Check whether this is an old Lustre-style xattr inode. Lustre
430          * implementation does not have hash validation, rather it has a
431          * backpointer from ea_inode to the parent inode.
432          */
433         if (ea_inode_hash != ext4_xattr_inode_get_hash(inode) &&
434             EXT4_XATTR_INODE_GET_PARENT(inode) == parent->i_ino &&
435             inode->i_generation == parent->i_generation) {
436                 ext4_set_inode_state(inode, EXT4_STATE_LUSTRE_EA_INODE);
437                 ext4_xattr_inode_set_ref(inode, 1);
438         } else {
439                 inode_lock(inode);
440                 inode->i_flags |= S_NOQUOTA;
441                 inode_unlock(inode);
442         }
443
444         *ea_inode = inode;
445         return 0;
446 error:
447         iput(inode);
448         return err;
449 }
450
451 static int
452 ext4_xattr_inode_verify_hashes(struct inode *ea_inode,
453                                struct ext4_xattr_entry *entry, void *buffer,
454                                size_t size)
455 {
456         u32 hash;
457
458         /* Verify stored hash matches calculated hash. */
459         hash = ext4_xattr_inode_hash(EXT4_SB(ea_inode->i_sb), buffer, size);
460         if (hash != ext4_xattr_inode_get_hash(ea_inode))
461                 return -EFSCORRUPTED;
462
463         if (entry) {
464                 __le32 e_hash, tmp_data;
465
466                 /* Verify entry hash. */
467                 tmp_data = cpu_to_le32(hash);
468                 e_hash = ext4_xattr_hash_entry(entry->e_name, entry->e_name_len,
469                                                &tmp_data, 1);
470                 if (e_hash != entry->e_hash)
471                         return -EFSCORRUPTED;
472         }
473         return 0;
474 }
475
476 /*
477  * Read xattr value from the EA inode.
478  */
479 static int
480 ext4_xattr_inode_get(struct inode *inode, struct ext4_xattr_entry *entry,
481                      void *buffer, size_t size)
482 {
483         struct mb_cache *ea_inode_cache = EA_INODE_CACHE(inode);
484         struct inode *ea_inode;
485         int err;
486
487         err = ext4_xattr_inode_iget(inode, le32_to_cpu(entry->e_value_inum),
488                                     le32_to_cpu(entry->e_hash), &ea_inode);
489         if (err) {
490                 ea_inode = NULL;
491                 goto out;
492         }
493
494         if (i_size_read(ea_inode) != size) {
495                 ext4_warning_inode(ea_inode,
496                                    "ea_inode file size=%llu entry size=%zu",
497                                    i_size_read(ea_inode), size);
498                 err = -EFSCORRUPTED;
499                 goto out;
500         }
501
502         err = ext4_xattr_inode_read(ea_inode, buffer, size);
503         if (err)
504                 goto out;
505
506         if (!ext4_test_inode_state(ea_inode, EXT4_STATE_LUSTRE_EA_INODE)) {
507                 err = ext4_xattr_inode_verify_hashes(ea_inode, entry, buffer,
508                                                      size);
509                 if (err) {
510                         ext4_warning_inode(ea_inode,
511                                            "EA inode hash validation failed");
512                         goto out;
513                 }
514
515                 if (ea_inode_cache)
516                         mb_cache_entry_create(ea_inode_cache, GFP_NOFS,
517                                         ext4_xattr_inode_get_hash(ea_inode),
518                                         ea_inode->i_ino, true /* reusable */);
519         }
520 out:
521         iput(ea_inode);
522         return err;
523 }
524
525 static int
526 ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
527                      void *buffer, size_t buffer_size)
528 {
529         struct buffer_head *bh = NULL;
530         struct ext4_xattr_entry *entry;
531         size_t size;
532         void *end;
533         int error;
534         struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
535
536         ea_idebug(inode, "name=%d.%s, buffer=%p, buffer_size=%ld",
537                   name_index, name, buffer, (long)buffer_size);
538
539         if (!EXT4_I(inode)->i_file_acl)
540                 return -ENODATA;
541         ea_idebug(inode, "reading block %llu",
542                   (unsigned long long)EXT4_I(inode)->i_file_acl);
543         bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
544         if (IS_ERR(bh))
545                 return PTR_ERR(bh);
546         ea_bdebug(bh, "b_count=%d, refcount=%d",
547                 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
548         error = ext4_xattr_check_block(inode, bh);
549         if (error)
550                 goto cleanup;
551         ext4_xattr_block_cache_insert(ea_block_cache, bh);
552         entry = BFIRST(bh);
553         end = bh->b_data + bh->b_size;
554         error = xattr_find_entry(inode, &entry, end, name_index, name, 1);
555         if (error)
556                 goto cleanup;
557         size = le32_to_cpu(entry->e_value_size);
558         error = -ERANGE;
559         if (unlikely(size > EXT4_XATTR_SIZE_MAX))
560                 goto cleanup;
561         if (buffer) {
562                 if (size > buffer_size)
563                         goto cleanup;
564                 if (entry->e_value_inum) {
565                         error = ext4_xattr_inode_get(inode, entry, buffer,
566                                                      size);
567                         if (error)
568                                 goto cleanup;
569                 } else {
570                         u16 offset = le16_to_cpu(entry->e_value_offs);
571                         void *p = bh->b_data + offset;
572
573                         if (unlikely(p + size > end))
574                                 goto cleanup;
575                         memcpy(buffer, p, size);
576                 }
577         }
578         error = size;
579
580 cleanup:
581         brelse(bh);
582         return error;
583 }
584
585 int
586 ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
587                      void *buffer, size_t buffer_size)
588 {
589         struct ext4_xattr_ibody_header *header;
590         struct ext4_xattr_entry *entry;
591         struct ext4_inode *raw_inode;
592         struct ext4_iloc iloc;
593         size_t size;
594         void *end;
595         int error;
596
597         if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
598                 return -ENODATA;
599         error = ext4_get_inode_loc(inode, &iloc);
600         if (error)
601                 return error;
602         raw_inode = ext4_raw_inode(&iloc);
603         header = IHDR(inode, raw_inode);
604         end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
605         error = xattr_check_inode(inode, header, end);
606         if (error)
607                 goto cleanup;
608         entry = IFIRST(header);
609         error = xattr_find_entry(inode, &entry, end, name_index, name, 0);
610         if (error)
611                 goto cleanup;
612         size = le32_to_cpu(entry->e_value_size);
613         error = -ERANGE;
614         if (unlikely(size > EXT4_XATTR_SIZE_MAX))
615                 goto cleanup;
616         if (buffer) {
617                 if (size > buffer_size)
618                         goto cleanup;
619                 if (entry->e_value_inum) {
620                         error = ext4_xattr_inode_get(inode, entry, buffer,
621                                                      size);
622                         if (error)
623                                 goto cleanup;
624                 } else {
625                         u16 offset = le16_to_cpu(entry->e_value_offs);
626                         void *p = (void *)IFIRST(header) + offset;
627
628                         if (unlikely(p + size > end))
629                                 goto cleanup;
630                         memcpy(buffer, p, size);
631                 }
632         }
633         error = size;
634
635 cleanup:
636         brelse(iloc.bh);
637         return error;
638 }
639
640 /*
641  * ext4_xattr_get()
642  *
643  * Copy an extended attribute into the buffer
644  * provided, or compute the buffer size required.
645  * Buffer is NULL to compute the size of the buffer required.
646  *
647  * Returns a negative error number on failure, or the number of bytes
648  * used / required on success.
649  */
650 int
651 ext4_xattr_get(struct inode *inode, int name_index, const char *name,
652                void *buffer, size_t buffer_size)
653 {
654         int error;
655
656         if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
657                 return -EIO;
658
659         if (strlen(name) > 255)
660                 return -ERANGE;
661
662         down_read(&EXT4_I(inode)->xattr_sem);
663         error = ext4_xattr_ibody_get(inode, name_index, name, buffer,
664                                      buffer_size);
665         if (error == -ENODATA)
666                 error = ext4_xattr_block_get(inode, name_index, name, buffer,
667                                              buffer_size);
668         up_read(&EXT4_I(inode)->xattr_sem);
669         return error;
670 }
671
672 static int
673 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
674                         char *buffer, size_t buffer_size)
675 {
676         size_t rest = buffer_size;
677
678         for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
679                 const struct xattr_handler *handler =
680                         ext4_xattr_handler(entry->e_name_index);
681
682                 if (handler && (!handler->list || handler->list(dentry))) {
683                         const char *prefix = handler->prefix ?: handler->name;
684                         size_t prefix_len = strlen(prefix);
685                         size_t size = prefix_len + entry->e_name_len + 1;
686
687                         if (buffer) {
688                                 if (size > rest)
689                                         return -ERANGE;
690                                 memcpy(buffer, prefix, prefix_len);
691                                 buffer += prefix_len;
692                                 memcpy(buffer, entry->e_name, entry->e_name_len);
693                                 buffer += entry->e_name_len;
694                                 *buffer++ = 0;
695                         }
696                         rest -= size;
697                 }
698         }
699         return buffer_size - rest;  /* total size */
700 }
701
702 static int
703 ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
704 {
705         struct inode *inode = d_inode(dentry);
706         struct buffer_head *bh = NULL;
707         int error;
708
709         ea_idebug(inode, "buffer=%p, buffer_size=%ld",
710                   buffer, (long)buffer_size);
711
712         if (!EXT4_I(inode)->i_file_acl)
713                 return 0;
714         ea_idebug(inode, "reading block %llu",
715                   (unsigned long long)EXT4_I(inode)->i_file_acl);
716         bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
717         if (IS_ERR(bh))
718                 return PTR_ERR(bh);
719         ea_bdebug(bh, "b_count=%d, refcount=%d",
720                 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
721         error = ext4_xattr_check_block(inode, bh);
722         if (error)
723                 goto cleanup;
724         ext4_xattr_block_cache_insert(EA_BLOCK_CACHE(inode), bh);
725         error = ext4_xattr_list_entries(dentry, BFIRST(bh), buffer,
726                                         buffer_size);
727 cleanup:
728         brelse(bh);
729         return error;
730 }
731
732 static int
733 ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
734 {
735         struct inode *inode = d_inode(dentry);
736         struct ext4_xattr_ibody_header *header;
737         struct ext4_inode *raw_inode;
738         struct ext4_iloc iloc;
739         void *end;
740         int error;
741
742         if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR))
743                 return 0;
744         error = ext4_get_inode_loc(inode, &iloc);
745         if (error)
746                 return error;
747         raw_inode = ext4_raw_inode(&iloc);
748         header = IHDR(inode, raw_inode);
749         end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
750         error = xattr_check_inode(inode, header, end);
751         if (error)
752                 goto cleanup;
753         error = ext4_xattr_list_entries(dentry, IFIRST(header),
754                                         buffer, buffer_size);
755
756 cleanup:
757         brelse(iloc.bh);
758         return error;
759 }
760
761 /*
762  * Inode operation listxattr()
763  *
764  * d_inode(dentry)->i_rwsem: don't care
765  *
766  * Copy a list of attribute names into the buffer
767  * provided, or compute the buffer size required.
768  * Buffer is NULL to compute the size of the buffer required.
769  *
770  * Returns a negative error number on failure, or the number of bytes
771  * used / required on success.
772  */
773 ssize_t
774 ext4_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
775 {
776         int ret, ret2;
777
778         down_read(&EXT4_I(d_inode(dentry))->xattr_sem);
779         ret = ret2 = ext4_xattr_ibody_list(dentry, buffer, buffer_size);
780         if (ret < 0)
781                 goto errout;
782         if (buffer) {
783                 buffer += ret;
784                 buffer_size -= ret;
785         }
786         ret = ext4_xattr_block_list(dentry, buffer, buffer_size);
787         if (ret < 0)
788                 goto errout;
789         ret += ret2;
790 errout:
791         up_read(&EXT4_I(d_inode(dentry))->xattr_sem);
792         return ret;
793 }
794
795 /*
796  * If the EXT4_FEATURE_COMPAT_EXT_ATTR feature of this file system is
797  * not set, set it.
798  */
799 static void ext4_xattr_update_super_block(handle_t *handle,
800                                           struct super_block *sb)
801 {
802         if (ext4_has_feature_xattr(sb))
803                 return;
804
805         BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
806         if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
807                 ext4_set_feature_xattr(sb);
808                 ext4_handle_dirty_super(handle, sb);
809         }
810 }
811
812 int ext4_get_inode_usage(struct inode *inode, qsize_t *usage)
813 {
814         struct ext4_iloc iloc = { .bh = NULL };
815         struct buffer_head *bh = NULL;
816         struct ext4_inode *raw_inode;
817         struct ext4_xattr_ibody_header *header;
818         struct ext4_xattr_entry *entry;
819         qsize_t ea_inode_refs = 0;
820         void *end;
821         int ret;
822
823         lockdep_assert_held_read(&EXT4_I(inode)->xattr_sem);
824
825         if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
826                 ret = ext4_get_inode_loc(inode, &iloc);
827                 if (ret)
828                         goto out;
829                 raw_inode = ext4_raw_inode(&iloc);
830                 header = IHDR(inode, raw_inode);
831                 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
832                 ret = xattr_check_inode(inode, header, end);
833                 if (ret)
834                         goto out;
835
836                 for (entry = IFIRST(header); !IS_LAST_ENTRY(entry);
837                      entry = EXT4_XATTR_NEXT(entry))
838                         if (entry->e_value_inum)
839                                 ea_inode_refs++;
840         }
841
842         if (EXT4_I(inode)->i_file_acl) {
843                 bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
844                 if (IS_ERR(bh)) {
845                         ret = PTR_ERR(bh);
846                         bh = NULL;
847                         goto out;
848                 }
849
850                 ret = ext4_xattr_check_block(inode, bh);
851                 if (ret)
852                         goto out;
853
854                 for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry);
855                      entry = EXT4_XATTR_NEXT(entry))
856                         if (entry->e_value_inum)
857                                 ea_inode_refs++;
858         }
859         *usage = ea_inode_refs + 1;
860         ret = 0;
861 out:
862         brelse(iloc.bh);
863         brelse(bh);
864         return ret;
865 }
866
867 static inline size_t round_up_cluster(struct inode *inode, size_t length)
868 {
869         struct super_block *sb = inode->i_sb;
870         size_t cluster_size = 1 << (EXT4_SB(sb)->s_cluster_bits +
871                                     inode->i_blkbits);
872         size_t mask = ~(cluster_size - 1);
873
874         return (length + cluster_size - 1) & mask;
875 }
876
877 static int ext4_xattr_inode_alloc_quota(struct inode *inode, size_t len)
878 {
879         int err;
880
881         err = dquot_alloc_inode(inode);
882         if (err)
883                 return err;
884         err = dquot_alloc_space_nodirty(inode, round_up_cluster(inode, len));
885         if (err)
886                 dquot_free_inode(inode);
887         return err;
888 }
889
890 static void ext4_xattr_inode_free_quota(struct inode *parent,
891                                         struct inode *ea_inode,
892                                         size_t len)
893 {
894         if (ea_inode &&
895             ext4_test_inode_state(ea_inode, EXT4_STATE_LUSTRE_EA_INODE))
896                 return;
897         dquot_free_space_nodirty(parent, round_up_cluster(parent, len));
898         dquot_free_inode(parent);
899 }
900
901 int __ext4_xattr_set_credits(struct super_block *sb, struct inode *inode,
902                              struct buffer_head *block_bh, size_t value_len,
903                              bool is_create)
904 {
905         int credits;
906         int blocks;
907
908         /*
909          * 1) Owner inode update
910          * 2) Ref count update on old xattr block
911          * 3) new xattr block
912          * 4) block bitmap update for new xattr block
913          * 5) group descriptor for new xattr block
914          * 6) block bitmap update for old xattr block
915          * 7) group descriptor for old block
916          *
917          * 6 & 7 can happen if we have two racing threads T_a and T_b
918          * which are each trying to set an xattr on inodes I_a and I_b
919          * which were both initially sharing an xattr block.
920          */
921         credits = 7;
922
923         /* Quota updates. */
924         credits += EXT4_MAXQUOTAS_TRANS_BLOCKS(sb);
925
926         /*
927          * In case of inline data, we may push out the data to a block,
928          * so we need to reserve credits for this eventuality
929          */
930         if (inode && ext4_has_inline_data(inode))
931                 credits += ext4_writepage_trans_blocks(inode) + 1;
932
933         /* We are done if ea_inode feature is not enabled. */
934         if (!ext4_has_feature_ea_inode(sb))
935                 return credits;
936
937         /* New ea_inode, inode map, block bitmap, group descriptor. */
938         credits += 4;
939
940         /* Data blocks. */
941         blocks = (value_len + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
942
943         /* Indirection block or one level of extent tree. */
944         blocks += 1;
945
946         /* Block bitmap and group descriptor updates for each block. */
947         credits += blocks * 2;
948
949         /* Blocks themselves. */
950         credits += blocks;
951
952         if (!is_create) {
953                 /* Dereference ea_inode holding old xattr value.
954                  * Old ea_inode, inode map, block bitmap, group descriptor.
955                  */
956                 credits += 4;
957
958                 /* Data blocks for old ea_inode. */
959                 blocks = XATTR_SIZE_MAX >> sb->s_blocksize_bits;
960
961                 /* Indirection block or one level of extent tree for old
962                  * ea_inode.
963                  */
964                 blocks += 1;
965
966                 /* Block bitmap and group descriptor updates for each block. */
967                 credits += blocks * 2;
968         }
969
970         /* We may need to clone the existing xattr block in which case we need
971          * to increment ref counts for existing ea_inodes referenced by it.
972          */
973         if (block_bh) {
974                 struct ext4_xattr_entry *entry = BFIRST(block_bh);
975
976                 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry))
977                         if (entry->e_value_inum)
978                                 /* Ref count update on ea_inode. */
979                                 credits += 1;
980         }
981         return credits;
982 }
983
984 static int ext4_xattr_ensure_credits(handle_t *handle, struct inode *inode,
985                                      int credits, struct buffer_head *bh,
986                                      bool dirty, bool block_csum)
987 {
988         int error;
989
990         if (!ext4_handle_valid(handle))
991                 return 0;
992
993         if (handle->h_buffer_credits >= credits)
994                 return 0;
995
996         error = ext4_journal_extend(handle, credits - handle->h_buffer_credits);
997         if (!error)
998                 return 0;
999         if (error < 0) {
1000                 ext4_warning(inode->i_sb, "Extend journal (error %d)", error);
1001                 return error;
1002         }
1003
1004         if (bh && dirty) {
1005                 if (block_csum)
1006                         ext4_xattr_block_csum_set(inode, bh);
1007                 error = ext4_handle_dirty_metadata(handle, NULL, bh);
1008                 if (error) {
1009                         ext4_warning(inode->i_sb, "Handle metadata (error %d)",
1010                                      error);
1011                         return error;
1012                 }
1013         }
1014
1015         error = ext4_journal_restart(handle, credits);
1016         if (error) {
1017                 ext4_warning(inode->i_sb, "Restart journal (error %d)", error);
1018                 return error;
1019         }
1020
1021         if (bh) {
1022                 error = ext4_journal_get_write_access(handle, bh);
1023                 if (error) {
1024                         ext4_warning(inode->i_sb,
1025                                      "Get write access failed (error %d)",
1026                                      error);
1027                         return error;
1028                 }
1029         }
1030         return 0;
1031 }
1032
1033 static int ext4_xattr_inode_update_ref(handle_t *handle, struct inode *ea_inode,
1034                                        int ref_change)
1035 {
1036         struct mb_cache *ea_inode_cache = EA_INODE_CACHE(ea_inode);
1037         struct ext4_iloc iloc;
1038         s64 ref_count;
1039         u32 hash;
1040         int ret;
1041
1042         inode_lock(ea_inode);
1043
1044         ret = ext4_reserve_inode_write(handle, ea_inode, &iloc);
1045         if (ret) {
1046                 iloc.bh = NULL;
1047                 goto out;
1048         }
1049
1050         ref_count = ext4_xattr_inode_get_ref(ea_inode);
1051         ref_count += ref_change;
1052         ext4_xattr_inode_set_ref(ea_inode, ref_count);
1053
1054         if (ref_change > 0) {
1055                 WARN_ONCE(ref_count <= 0, "EA inode %lu ref_count=%lld",
1056                           ea_inode->i_ino, ref_count);
1057
1058                 if (ref_count == 1) {
1059                         WARN_ONCE(ea_inode->i_nlink, "EA inode %lu i_nlink=%u",
1060                                   ea_inode->i_ino, ea_inode->i_nlink);
1061
1062                         set_nlink(ea_inode, 1);
1063                         ext4_orphan_del(handle, ea_inode);
1064
1065                         if (ea_inode_cache) {
1066                                 hash = ext4_xattr_inode_get_hash(ea_inode);
1067                                 mb_cache_entry_create(ea_inode_cache,
1068                                                       GFP_NOFS, hash,
1069                                                       ea_inode->i_ino,
1070                                                       true /* reusable */);
1071                         }
1072                 }
1073         } else {
1074                 WARN_ONCE(ref_count < 0, "EA inode %lu ref_count=%lld",
1075                           ea_inode->i_ino, ref_count);
1076
1077                 if (ref_count == 0) {
1078                         WARN_ONCE(ea_inode->i_nlink != 1,
1079                                   "EA inode %lu i_nlink=%u",
1080                                   ea_inode->i_ino, ea_inode->i_nlink);
1081
1082                         clear_nlink(ea_inode);
1083                         ext4_orphan_add(handle, ea_inode);
1084
1085                         if (ea_inode_cache) {
1086                                 hash = ext4_xattr_inode_get_hash(ea_inode);
1087                                 mb_cache_entry_delete(ea_inode_cache, hash,
1088                                                       ea_inode->i_ino);
1089                         }
1090                 }
1091         }
1092
1093         ret = ext4_mark_iloc_dirty(handle, ea_inode, &iloc);
1094         iloc.bh = NULL;
1095         if (ret)
1096                 ext4_warning_inode(ea_inode,
1097                                    "ext4_mark_iloc_dirty() failed ret=%d", ret);
1098 out:
1099         brelse(iloc.bh);
1100         inode_unlock(ea_inode);
1101         return ret;
1102 }
1103
1104 static int ext4_xattr_inode_inc_ref(handle_t *handle, struct inode *ea_inode)
1105 {
1106         return ext4_xattr_inode_update_ref(handle, ea_inode, 1);
1107 }
1108
1109 static int ext4_xattr_inode_dec_ref(handle_t *handle, struct inode *ea_inode)
1110 {
1111         return ext4_xattr_inode_update_ref(handle, ea_inode, -1);
1112 }
1113
1114 static int ext4_xattr_inode_inc_ref_all(handle_t *handle, struct inode *parent,
1115                                         struct ext4_xattr_entry *first)
1116 {
1117         struct inode *ea_inode;
1118         struct ext4_xattr_entry *entry;
1119         struct ext4_xattr_entry *failed_entry;
1120         unsigned int ea_ino;
1121         int err, saved_err;
1122
1123         for (entry = first; !IS_LAST_ENTRY(entry);
1124              entry = EXT4_XATTR_NEXT(entry)) {
1125                 if (!entry->e_value_inum)
1126                         continue;
1127                 ea_ino = le32_to_cpu(entry->e_value_inum);
1128                 err = ext4_xattr_inode_iget(parent, ea_ino,
1129                                             le32_to_cpu(entry->e_hash),
1130                                             &ea_inode);
1131                 if (err)
1132                         goto cleanup;
1133                 err = ext4_xattr_inode_inc_ref(handle, ea_inode);
1134                 if (err) {
1135                         ext4_warning_inode(ea_inode, "inc ref error %d", err);
1136                         iput(ea_inode);
1137                         goto cleanup;
1138                 }
1139                 iput(ea_inode);
1140         }
1141         return 0;
1142
1143 cleanup:
1144         saved_err = err;
1145         failed_entry = entry;
1146
1147         for (entry = first; entry != failed_entry;
1148              entry = EXT4_XATTR_NEXT(entry)) {
1149                 if (!entry->e_value_inum)
1150                         continue;
1151                 ea_ino = le32_to_cpu(entry->e_value_inum);
1152                 err = ext4_xattr_inode_iget(parent, ea_ino,
1153                                             le32_to_cpu(entry->e_hash),
1154                                             &ea_inode);
1155                 if (err) {
1156                         ext4_warning(parent->i_sb,
1157                                      "cleanup ea_ino %u iget error %d", ea_ino,
1158                                      err);
1159                         continue;
1160                 }
1161                 err = ext4_xattr_inode_dec_ref(handle, ea_inode);
1162                 if (err)
1163                         ext4_warning_inode(ea_inode, "cleanup dec ref error %d",
1164                                            err);
1165                 iput(ea_inode);
1166         }
1167         return saved_err;
1168 }
1169
1170 static void
1171 ext4_xattr_inode_dec_ref_all(handle_t *handle, struct inode *parent,
1172                              struct buffer_head *bh,
1173                              struct ext4_xattr_entry *first, bool block_csum,
1174                              struct ext4_xattr_inode_array **ea_inode_array,
1175                              int extra_credits, bool skip_quota)
1176 {
1177         struct inode *ea_inode;
1178         struct ext4_xattr_entry *entry;
1179         bool dirty = false;
1180         unsigned int ea_ino;
1181         int err;
1182         int credits;
1183
1184         /* One credit for dec ref on ea_inode, one for orphan list addition, */
1185         credits = 2 + extra_credits;
1186
1187         for (entry = first; !IS_LAST_ENTRY(entry);
1188              entry = EXT4_XATTR_NEXT(entry)) {
1189                 if (!entry->e_value_inum)
1190                         continue;
1191                 ea_ino = le32_to_cpu(entry->e_value_inum);
1192                 err = ext4_xattr_inode_iget(parent, ea_ino,
1193                                             le32_to_cpu(entry->e_hash),
1194                                             &ea_inode);
1195                 if (err)
1196                         continue;
1197
1198                 err = ext4_expand_inode_array(ea_inode_array, ea_inode);
1199                 if (err) {
1200                         ext4_warning_inode(ea_inode,
1201                                            "Expand inode array err=%d", err);
1202                         iput(ea_inode);
1203                         continue;
1204                 }
1205
1206                 err = ext4_xattr_ensure_credits(handle, parent, credits, bh,
1207                                                 dirty, block_csum);
1208                 if (err) {
1209                         ext4_warning_inode(ea_inode, "Ensure credits err=%d",
1210                                            err);
1211                         continue;
1212                 }
1213
1214                 err = ext4_xattr_inode_dec_ref(handle, ea_inode);
1215                 if (err) {
1216                         ext4_warning_inode(ea_inode, "ea_inode dec ref err=%d",
1217                                            err);
1218                         continue;
1219                 }
1220
1221                 if (!skip_quota)
1222                         ext4_xattr_inode_free_quota(parent, ea_inode,
1223                                               le32_to_cpu(entry->e_value_size));
1224
1225                 /*
1226                  * Forget about ea_inode within the same transaction that
1227                  * decrements the ref count. This avoids duplicate decrements in
1228                  * case the rest of the work spills over to subsequent
1229                  * transactions.
1230                  */
1231                 entry->e_value_inum = 0;
1232                 entry->e_value_size = 0;
1233
1234                 dirty = true;
1235         }
1236
1237         if (dirty) {
1238                 /*
1239                  * Note that we are deliberately skipping csum calculation for
1240                  * the final update because we do not expect any journal
1241                  * restarts until xattr block is freed.
1242                  */
1243
1244                 err = ext4_handle_dirty_metadata(handle, NULL, bh);
1245                 if (err)
1246                         ext4_warning_inode(parent,
1247                                            "handle dirty metadata err=%d", err);
1248         }
1249 }
1250
1251 /*
1252  * Release the xattr block BH: If the reference count is > 1, decrement it;
1253  * otherwise free the block.
1254  */
1255 static void
1256 ext4_xattr_release_block(handle_t *handle, struct inode *inode,
1257                          struct buffer_head *bh,
1258                          struct ext4_xattr_inode_array **ea_inode_array,
1259                          int extra_credits)
1260 {
1261         struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
1262         u32 hash, ref;
1263         int error = 0;
1264
1265         BUFFER_TRACE(bh, "get_write_access");
1266         error = ext4_journal_get_write_access(handle, bh);
1267         if (error)
1268                 goto out;
1269
1270         lock_buffer(bh);
1271         hash = le32_to_cpu(BHDR(bh)->h_hash);
1272         ref = le32_to_cpu(BHDR(bh)->h_refcount);
1273         if (ref == 1) {
1274                 ea_bdebug(bh, "refcount now=0; freeing");
1275                 /*
1276                  * This must happen under buffer lock for
1277                  * ext4_xattr_block_set() to reliably detect freed block
1278                  */
1279                 if (ea_block_cache)
1280                         mb_cache_entry_delete(ea_block_cache, hash,
1281                                               bh->b_blocknr);
1282                 get_bh(bh);
1283                 unlock_buffer(bh);
1284
1285                 if (ext4_has_feature_ea_inode(inode->i_sb))
1286                         ext4_xattr_inode_dec_ref_all(handle, inode, bh,
1287                                                      BFIRST(bh),
1288                                                      true /* block_csum */,
1289                                                      ea_inode_array,
1290                                                      extra_credits,
1291                                                      true /* skip_quota */);
1292                 ext4_free_blocks(handle, inode, bh, 0, 1,
1293                                  EXT4_FREE_BLOCKS_METADATA |
1294                                  EXT4_FREE_BLOCKS_FORGET);
1295         } else {
1296                 ref--;
1297                 BHDR(bh)->h_refcount = cpu_to_le32(ref);
1298                 if (ref == EXT4_XATTR_REFCOUNT_MAX - 1) {
1299                         struct mb_cache_entry *ce;
1300
1301                         if (ea_block_cache) {
1302                                 ce = mb_cache_entry_get(ea_block_cache, hash,
1303                                                         bh->b_blocknr);
1304                                 if (ce) {
1305                                         ce->e_reusable = 1;
1306                                         mb_cache_entry_put(ea_block_cache, ce);
1307                                 }
1308                         }
1309                 }
1310
1311                 ext4_xattr_block_csum_set(inode, bh);
1312                 /*
1313                  * Beware of this ugliness: Releasing of xattr block references
1314                  * from different inodes can race and so we have to protect
1315                  * from a race where someone else frees the block (and releases
1316                  * its journal_head) before we are done dirtying the buffer. In
1317                  * nojournal mode this race is harmless and we actually cannot
1318                  * call ext4_handle_dirty_metadata() with locked buffer as
1319                  * that function can call sync_dirty_buffer() so for that case
1320                  * we handle the dirtying after unlocking the buffer.
1321                  */
1322                 if (ext4_handle_valid(handle))
1323                         error = ext4_handle_dirty_metadata(handle, inode, bh);
1324                 unlock_buffer(bh);
1325                 if (!ext4_handle_valid(handle))
1326                         error = ext4_handle_dirty_metadata(handle, inode, bh);
1327                 if (IS_SYNC(inode))
1328                         ext4_handle_sync(handle);
1329                 dquot_free_block(inode, EXT4_C2B(EXT4_SB(inode->i_sb), 1));
1330                 ea_bdebug(bh, "refcount now=%d; releasing",
1331                           le32_to_cpu(BHDR(bh)->h_refcount));
1332         }
1333 out:
1334         ext4_std_error(inode->i_sb, error);
1335         return;
1336 }
1337
1338 /*
1339  * Find the available free space for EAs. This also returns the total number of
1340  * bytes used by EA entries.
1341  */
1342 static size_t ext4_xattr_free_space(struct ext4_xattr_entry *last,
1343                                     size_t *min_offs, void *base, int *total)
1344 {
1345         for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
1346                 if (!last->e_value_inum && last->e_value_size) {
1347                         size_t offs = le16_to_cpu(last->e_value_offs);
1348                         if (offs < *min_offs)
1349                                 *min_offs = offs;
1350                 }
1351                 if (total)
1352                         *total += EXT4_XATTR_LEN(last->e_name_len);
1353         }
1354         return (*min_offs - ((void *)last - base) - sizeof(__u32));
1355 }
1356
1357 /*
1358  * Write the value of the EA in an inode.
1359  */
1360 static int ext4_xattr_inode_write(handle_t *handle, struct inode *ea_inode,
1361                                   const void *buf, int bufsize)
1362 {
1363         struct buffer_head *bh = NULL;
1364         unsigned long block = 0;
1365         int blocksize = ea_inode->i_sb->s_blocksize;
1366         int max_blocks = (bufsize + blocksize - 1) >> ea_inode->i_blkbits;
1367         int csize, wsize = 0;
1368         int ret = 0;
1369         int retries = 0;
1370
1371 retry:
1372         while (ret >= 0 && ret < max_blocks) {
1373                 struct ext4_map_blocks map;
1374                 map.m_lblk = block += ret;
1375                 map.m_len = max_blocks -= ret;
1376
1377                 ret = ext4_map_blocks(handle, ea_inode, &map,
1378                                       EXT4_GET_BLOCKS_CREATE);
1379                 if (ret <= 0) {
1380                         ext4_mark_inode_dirty(handle, ea_inode);
1381                         if (ret == -ENOSPC &&
1382                             ext4_should_retry_alloc(ea_inode->i_sb, &retries)) {
1383                                 ret = 0;
1384                                 goto retry;
1385                         }
1386                         break;
1387                 }
1388         }
1389
1390         if (ret < 0)
1391                 return ret;
1392
1393         block = 0;
1394         while (wsize < bufsize) {
1395                 if (bh != NULL)
1396                         brelse(bh);
1397                 csize = (bufsize - wsize) > blocksize ? blocksize :
1398                                                                 bufsize - wsize;
1399                 bh = ext4_getblk(handle, ea_inode, block, 0);
1400                 if (IS_ERR(bh))
1401                         return PTR_ERR(bh);
1402                 if (!bh) {
1403                         WARN_ON_ONCE(1);
1404                         EXT4_ERROR_INODE(ea_inode,
1405                                          "ext4_getblk() return bh = NULL");
1406                         return -EFSCORRUPTED;
1407                 }
1408                 ret = ext4_journal_get_write_access(handle, bh);
1409                 if (ret)
1410                         goto out;
1411
1412                 memcpy(bh->b_data, buf, csize);
1413                 set_buffer_uptodate(bh);
1414                 ext4_handle_dirty_metadata(handle, ea_inode, bh);
1415
1416                 buf += csize;
1417                 wsize += csize;
1418                 block += 1;
1419         }
1420
1421         inode_lock(ea_inode);
1422         i_size_write(ea_inode, wsize);
1423         ext4_update_i_disksize(ea_inode, wsize);
1424         inode_unlock(ea_inode);
1425
1426         ext4_mark_inode_dirty(handle, ea_inode);
1427
1428 out:
1429         brelse(bh);
1430
1431         return ret;
1432 }
1433
1434 /*
1435  * Create an inode to store the value of a large EA.
1436  */
1437 static struct inode *ext4_xattr_inode_create(handle_t *handle,
1438                                              struct inode *inode, u32 hash)
1439 {
1440         struct inode *ea_inode = NULL;
1441         uid_t owner[2] = { i_uid_read(inode), i_gid_read(inode) };
1442         int err;
1443
1444         if (inode->i_sb->s_root == NULL) {
1445                 ext4_warning(inode->i_sb,
1446                              "refuse to create EA inode when umounting");
1447                 WARN_ON(1);
1448                 return ERR_PTR(-EINVAL);
1449         }
1450
1451         /*
1452          * Let the next inode be the goal, so we try and allocate the EA inode
1453          * in the same group, or nearby one.
1454          */
1455         ea_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
1456                                   S_IFREG | 0600, NULL, inode->i_ino + 1, owner,
1457                                   EXT4_EA_INODE_FL);
1458         if (!IS_ERR(ea_inode)) {
1459                 ea_inode->i_op = &ext4_file_inode_operations;
1460                 ea_inode->i_fop = &ext4_file_operations;
1461                 ext4_set_aops(ea_inode);
1462                 ext4_xattr_inode_set_class(ea_inode);
1463                 unlock_new_inode(ea_inode);
1464                 ext4_xattr_inode_set_ref(ea_inode, 1);
1465                 ext4_xattr_inode_set_hash(ea_inode, hash);
1466                 err = ext4_mark_inode_dirty(handle, ea_inode);
1467                 if (!err)
1468                         err = ext4_inode_attach_jinode(ea_inode);
1469                 if (err) {
1470                         if (ext4_xattr_inode_dec_ref(handle, ea_inode))
1471                                 ext4_warning_inode(ea_inode,
1472                                         "cleanup dec ref error %d", err);
1473                         iput(ea_inode);
1474                         return ERR_PTR(err);
1475                 }
1476
1477                 /*
1478                  * Xattr inodes are shared therefore quota charging is performed
1479                  * at a higher level.
1480                  */
1481                 dquot_free_inode(ea_inode);
1482                 dquot_drop(ea_inode);
1483                 inode_lock(ea_inode);
1484                 ea_inode->i_flags |= S_NOQUOTA;
1485                 inode_unlock(ea_inode);
1486         }
1487
1488         return ea_inode;
1489 }
1490
1491 static struct inode *
1492 ext4_xattr_inode_cache_find(struct inode *inode, const void *value,
1493                             size_t value_len, u32 hash)
1494 {
1495         struct inode *ea_inode;
1496         struct mb_cache_entry *ce;
1497         struct mb_cache *ea_inode_cache = EA_INODE_CACHE(inode);
1498         void *ea_data;
1499
1500         if (!ea_inode_cache)
1501                 return NULL;
1502
1503         ce = mb_cache_entry_find_first(ea_inode_cache, hash);
1504         if (!ce)
1505                 return NULL;
1506
1507         WARN_ON_ONCE(ext4_handle_valid(journal_current_handle()) &&
1508                      !(current->flags & PF_MEMALLOC_NOFS));
1509
1510         ea_data = ext4_kvmalloc(value_len, GFP_NOFS);
1511         if (!ea_data) {
1512                 mb_cache_entry_put(ea_inode_cache, ce);
1513                 return NULL;
1514         }
1515
1516         while (ce) {
1517                 ea_inode = ext4_iget(inode->i_sb, ce->e_value,
1518                                      EXT4_IGET_NORMAL);
1519                 if (!IS_ERR(ea_inode) &&
1520                     !is_bad_inode(ea_inode) &&
1521                     (EXT4_I(ea_inode)->i_flags & EXT4_EA_INODE_FL) &&
1522                     i_size_read(ea_inode) == value_len &&
1523                     !ext4_xattr_inode_read(ea_inode, ea_data, value_len) &&
1524                     !ext4_xattr_inode_verify_hashes(ea_inode, NULL, ea_data,
1525                                                     value_len) &&
1526                     !memcmp(value, ea_data, value_len)) {
1527                         mb_cache_entry_touch(ea_inode_cache, ce);
1528                         mb_cache_entry_put(ea_inode_cache, ce);
1529                         kvfree(ea_data);
1530                         return ea_inode;
1531                 }
1532
1533                 if (!IS_ERR(ea_inode))
1534                         iput(ea_inode);
1535                 ce = mb_cache_entry_find_next(ea_inode_cache, ce);
1536         }
1537         kvfree(ea_data);
1538         return NULL;
1539 }
1540
1541 /*
1542  * Add value of the EA in an inode.
1543  */
1544 static int ext4_xattr_inode_lookup_create(handle_t *handle, struct inode *inode,
1545                                           const void *value, size_t value_len,
1546                                           struct inode **ret_inode)
1547 {
1548         struct inode *ea_inode;
1549         u32 hash;
1550         int err;
1551
1552         hash = ext4_xattr_inode_hash(EXT4_SB(inode->i_sb), value, value_len);
1553         ea_inode = ext4_xattr_inode_cache_find(inode, value, value_len, hash);
1554         if (ea_inode) {
1555                 err = ext4_xattr_inode_inc_ref(handle, ea_inode);
1556                 if (err) {
1557                         iput(ea_inode);
1558                         return err;
1559                 }
1560
1561                 *ret_inode = ea_inode;
1562                 return 0;
1563         }
1564
1565         /* Create an inode for the EA value */
1566         ea_inode = ext4_xattr_inode_create(handle, inode, hash);
1567         if (IS_ERR(ea_inode))
1568                 return PTR_ERR(ea_inode);
1569
1570         err = ext4_xattr_inode_write(handle, ea_inode, value, value_len);
1571         if (err) {
1572                 ext4_xattr_inode_dec_ref(handle, ea_inode);
1573                 iput(ea_inode);
1574                 return err;
1575         }
1576
1577         if (EA_INODE_CACHE(inode))
1578                 mb_cache_entry_create(EA_INODE_CACHE(inode), GFP_NOFS, hash,
1579                                       ea_inode->i_ino, true /* reusable */);
1580
1581         *ret_inode = ea_inode;
1582         return 0;
1583 }
1584
1585 /*
1586  * Reserve min(block_size/8, 1024) bytes for xattr entries/names if ea_inode
1587  * feature is enabled.
1588  */
1589 #define EXT4_XATTR_BLOCK_RESERVE(inode) min(i_blocksize(inode)/8, 1024U)
1590
1591 static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
1592                                 struct ext4_xattr_search *s,
1593                                 handle_t *handle, struct inode *inode,
1594                                 bool is_block)
1595 {
1596         struct ext4_xattr_entry *last, *next;
1597         struct ext4_xattr_entry *here = s->here;
1598         size_t min_offs = s->end - s->base, name_len = strlen(i->name);
1599         int in_inode = i->in_inode;
1600         struct inode *old_ea_inode = NULL;
1601         struct inode *new_ea_inode = NULL;
1602         size_t old_size, new_size;
1603         int ret;
1604
1605         /* Space used by old and new values. */
1606         old_size = (!s->not_found && !here->e_value_inum) ?
1607                         EXT4_XATTR_SIZE(le32_to_cpu(here->e_value_size)) : 0;
1608         new_size = (i->value && !in_inode) ? EXT4_XATTR_SIZE(i->value_len) : 0;
1609
1610         /*
1611          * Optimization for the simple case when old and new values have the
1612          * same padded sizes. Not applicable if external inodes are involved.
1613          */
1614         if (new_size && new_size == old_size) {
1615                 size_t offs = le16_to_cpu(here->e_value_offs);
1616                 void *val = s->base + offs;
1617
1618                 here->e_value_size = cpu_to_le32(i->value_len);
1619                 if (i->value == EXT4_ZERO_XATTR_VALUE) {
1620                         memset(val, 0, new_size);
1621                 } else {
1622                         memcpy(val, i->value, i->value_len);
1623                         /* Clear padding bytes. */
1624                         memset(val + i->value_len, 0, new_size - i->value_len);
1625                 }
1626                 goto update_hash;
1627         }
1628
1629         /* Compute min_offs and last. */
1630         last = s->first;
1631         for (; !IS_LAST_ENTRY(last); last = next) {
1632                 next = EXT4_XATTR_NEXT(last);
1633                 if ((void *)next >= s->end) {
1634                         EXT4_ERROR_INODE(inode, "corrupted xattr entries");
1635                         ret = -EFSCORRUPTED;
1636                         goto out;
1637                 }
1638                 if (!last->e_value_inum && last->e_value_size) {
1639                         size_t offs = le16_to_cpu(last->e_value_offs);
1640                         if (offs < min_offs)
1641                                 min_offs = offs;
1642                 }
1643         }
1644
1645         /* Check whether we have enough space. */
1646         if (i->value) {
1647                 size_t free;
1648
1649                 free = min_offs - ((void *)last - s->base) - sizeof(__u32);
1650                 if (!s->not_found)
1651                         free += EXT4_XATTR_LEN(name_len) + old_size;
1652
1653                 if (free < EXT4_XATTR_LEN(name_len) + new_size) {
1654                         ret = -ENOSPC;
1655                         goto out;
1656                 }
1657
1658                 /*
1659                  * If storing the value in an external inode is an option,
1660                  * reserve space for xattr entries/names in the external
1661                  * attribute block so that a long value does not occupy the
1662                  * whole space and prevent futher entries being added.
1663                  */
1664                 if (ext4_has_feature_ea_inode(inode->i_sb) &&
1665                     new_size && is_block &&
1666                     (min_offs + old_size - new_size) <
1667                                         EXT4_XATTR_BLOCK_RESERVE(inode)) {
1668                         ret = -ENOSPC;
1669                         goto out;
1670                 }
1671         }
1672
1673         /*
1674          * Getting access to old and new ea inodes is subject to failures.
1675          * Finish that work before doing any modifications to the xattr data.
1676          */
1677         if (!s->not_found && here->e_value_inum) {
1678                 ret = ext4_xattr_inode_iget(inode,
1679                                             le32_to_cpu(here->e_value_inum),
1680                                             le32_to_cpu(here->e_hash),
1681                                             &old_ea_inode);
1682                 if (ret) {
1683                         old_ea_inode = NULL;
1684                         goto out;
1685                 }
1686         }
1687         if (i->value && in_inode) {
1688                 WARN_ON_ONCE(!i->value_len);
1689
1690                 ret = ext4_xattr_inode_alloc_quota(inode, i->value_len);
1691                 if (ret)
1692                         goto out;
1693
1694                 ret = ext4_xattr_inode_lookup_create(handle, inode, i->value,
1695                                                      i->value_len,
1696                                                      &new_ea_inode);
1697                 if (ret) {
1698                         new_ea_inode = NULL;
1699                         ext4_xattr_inode_free_quota(inode, NULL, i->value_len);
1700                         goto out;
1701                 }
1702         }
1703
1704         if (old_ea_inode) {
1705                 /* We are ready to release ref count on the old_ea_inode. */
1706                 ret = ext4_xattr_inode_dec_ref(handle, old_ea_inode);
1707                 if (ret) {
1708                         /* Release newly required ref count on new_ea_inode. */
1709                         if (new_ea_inode) {
1710                                 int err;
1711
1712                                 err = ext4_xattr_inode_dec_ref(handle,
1713                                                                new_ea_inode);
1714                                 if (err)
1715                                         ext4_warning_inode(new_ea_inode,
1716                                                   "dec ref new_ea_inode err=%d",
1717                                                   err);
1718                                 ext4_xattr_inode_free_quota(inode, new_ea_inode,
1719                                                             i->value_len);
1720                         }
1721                         goto out;
1722                 }
1723
1724                 ext4_xattr_inode_free_quota(inode, old_ea_inode,
1725                                             le32_to_cpu(here->e_value_size));
1726         }
1727
1728         /* No failures allowed past this point. */
1729
1730         if (!s->not_found && here->e_value_size && !here->e_value_inum) {
1731                 /* Remove the old value. */
1732                 void *first_val = s->base + min_offs;
1733                 size_t offs = le16_to_cpu(here->e_value_offs);
1734                 void *val = s->base + offs;
1735
1736                 memmove(first_val + old_size, first_val, val - first_val);
1737                 memset(first_val, 0, old_size);
1738                 min_offs += old_size;
1739
1740                 /* Adjust all value offsets. */
1741                 last = s->first;
1742                 while (!IS_LAST_ENTRY(last)) {
1743                         size_t o = le16_to_cpu(last->e_value_offs);
1744
1745                         if (!last->e_value_inum &&
1746                             last->e_value_size && o < offs)
1747                                 last->e_value_offs = cpu_to_le16(o + old_size);
1748                         last = EXT4_XATTR_NEXT(last);
1749                 }
1750         }
1751
1752         if (!i->value) {
1753                 /* Remove old name. */
1754                 size_t size = EXT4_XATTR_LEN(name_len);
1755
1756                 last = ENTRY((void *)last - size);
1757                 memmove(here, (void *)here + size,
1758                         (void *)last - (void *)here + sizeof(__u32));
1759                 memset(last, 0, size);
1760
1761                 /*
1762                  * Update i_inline_off - moved ibody region might contain
1763                  * system.data attribute.  Handling a failure here won't
1764                  * cause other complications for setting an xattr.
1765                  */
1766                 if (!is_block && ext4_has_inline_data(inode)) {
1767                         ret = ext4_find_inline_data_nolock(inode);
1768                         if (ret) {
1769                                 ext4_warning_inode(inode,
1770                                         "unable to update i_inline_off");
1771                                 goto out;
1772                         }
1773                 }
1774         } else if (s->not_found) {
1775                 /* Insert new name. */
1776                 size_t size = EXT4_XATTR_LEN(name_len);
1777                 size_t rest = (void *)last - (void *)here + sizeof(__u32);
1778
1779                 memmove((void *)here + size, here, rest);
1780                 memset(here, 0, size);
1781                 here->e_name_index = i->name_index;
1782                 here->e_name_len = name_len;
1783                 memcpy(here->e_name, i->name, name_len);
1784         } else {
1785                 /* This is an update, reset value info. */
1786                 here->e_value_inum = 0;
1787                 here->e_value_offs = 0;
1788                 here->e_value_size = 0;
1789         }
1790
1791         if (i->value) {
1792                 /* Insert new value. */
1793                 if (in_inode) {
1794                         here->e_value_inum = cpu_to_le32(new_ea_inode->i_ino);
1795                 } else if (i->value_len) {
1796                         void *val = s->base + min_offs - new_size;
1797
1798                         here->e_value_offs = cpu_to_le16(min_offs - new_size);
1799                         if (i->value == EXT4_ZERO_XATTR_VALUE) {
1800                                 memset(val, 0, new_size);
1801                         } else {
1802                                 memcpy(val, i->value, i->value_len);
1803                                 /* Clear padding bytes. */
1804                                 memset(val + i->value_len, 0,
1805                                        new_size - i->value_len);
1806                         }
1807                 }
1808                 here->e_value_size = cpu_to_le32(i->value_len);
1809         }
1810
1811 update_hash:
1812         if (i->value) {
1813                 __le32 hash = 0;
1814
1815                 /* Entry hash calculation. */
1816                 if (in_inode) {
1817                         __le32 crc32c_hash;
1818
1819                         /*
1820                          * Feed crc32c hash instead of the raw value for entry
1821                          * hash calculation. This is to avoid walking
1822                          * potentially long value buffer again.
1823                          */
1824                         crc32c_hash = cpu_to_le32(
1825                                        ext4_xattr_inode_get_hash(new_ea_inode));
1826                         hash = ext4_xattr_hash_entry(here->e_name,
1827                                                      here->e_name_len,
1828                                                      &crc32c_hash, 1);
1829                 } else if (is_block) {
1830                         __le32 *value = s->base + le16_to_cpu(
1831                                                         here->e_value_offs);
1832
1833                         hash = ext4_xattr_hash_entry(here->e_name,
1834                                                      here->e_name_len, value,
1835                                                      new_size >> 2);
1836                 }
1837                 here->e_hash = hash;
1838         }
1839
1840         if (is_block)
1841                 ext4_xattr_rehash((struct ext4_xattr_header *)s->base);
1842
1843         ret = 0;
1844 out:
1845         iput(old_ea_inode);
1846         iput(new_ea_inode);
1847         return ret;
1848 }
1849
1850 struct ext4_xattr_block_find {
1851         struct ext4_xattr_search s;
1852         struct buffer_head *bh;
1853 };
1854
1855 static int
1856 ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
1857                       struct ext4_xattr_block_find *bs)
1858 {
1859         struct super_block *sb = inode->i_sb;
1860         int error;
1861
1862         ea_idebug(inode, "name=%d.%s, value=%p, value_len=%ld",
1863                   i->name_index, i->name, i->value, (long)i->value_len);
1864
1865         if (EXT4_I(inode)->i_file_acl) {
1866                 /* The inode already has an extended attribute block. */
1867                 bs->bh = ext4_sb_bread(sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
1868                 if (IS_ERR(bs->bh)) {
1869                         error = PTR_ERR(bs->bh);
1870                         bs->bh = NULL;
1871                         return error;
1872                 }
1873                 ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
1874                         atomic_read(&(bs->bh->b_count)),
1875                         le32_to_cpu(BHDR(bs->bh)->h_refcount));
1876                 error = ext4_xattr_check_block(inode, bs->bh);
1877                 if (error)
1878                         return error;
1879                 /* Find the named attribute. */
1880                 bs->s.base = BHDR(bs->bh);
1881                 bs->s.first = BFIRST(bs->bh);
1882                 bs->s.end = bs->bh->b_data + bs->bh->b_size;
1883                 bs->s.here = bs->s.first;
1884                 error = xattr_find_entry(inode, &bs->s.here, bs->s.end,
1885                                          i->name_index, i->name, 1);
1886                 if (error && error != -ENODATA)
1887                         return error;
1888                 bs->s.not_found = error;
1889         }
1890         return 0;
1891 }
1892
1893 static int
1894 ext4_xattr_block_set(handle_t *handle, struct inode *inode,
1895                      struct ext4_xattr_info *i,
1896                      struct ext4_xattr_block_find *bs)
1897 {
1898         struct super_block *sb = inode->i_sb;
1899         struct buffer_head *new_bh = NULL;
1900         struct ext4_xattr_search s_copy = bs->s;
1901         struct ext4_xattr_search *s = &s_copy;
1902         struct mb_cache_entry *ce = NULL;
1903         int error = 0;
1904         struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
1905         struct inode *ea_inode = NULL, *tmp_inode;
1906         size_t old_ea_inode_quota = 0;
1907         unsigned int ea_ino;
1908
1909
1910 #define header(x) ((struct ext4_xattr_header *)(x))
1911
1912         if (s->base) {
1913                 BUFFER_TRACE(bs->bh, "get_write_access");
1914                 error = ext4_journal_get_write_access(handle, bs->bh);
1915                 if (error)
1916                         goto cleanup;
1917                 lock_buffer(bs->bh);
1918
1919                 if (header(s->base)->h_refcount == cpu_to_le32(1)) {
1920                         __u32 hash = le32_to_cpu(BHDR(bs->bh)->h_hash);
1921
1922                         /*
1923                          * This must happen under buffer lock for
1924                          * ext4_xattr_block_set() to reliably detect modified
1925                          * block
1926                          */
1927                         if (ea_block_cache)
1928                                 mb_cache_entry_delete(ea_block_cache, hash,
1929                                                       bs->bh->b_blocknr);
1930                         ea_bdebug(bs->bh, "modifying in-place");
1931                         error = ext4_xattr_set_entry(i, s, handle, inode,
1932                                                      true /* is_block */);
1933                         ext4_xattr_block_csum_set(inode, bs->bh);
1934                         unlock_buffer(bs->bh);
1935                         if (error == -EFSCORRUPTED)
1936                                 goto bad_block;
1937                         if (!error)
1938                                 error = ext4_handle_dirty_metadata(handle,
1939                                                                    inode,
1940                                                                    bs->bh);
1941                         if (error)
1942                                 goto cleanup;
1943                         goto inserted;
1944                 } else {
1945                         int offset = (char *)s->here - bs->bh->b_data;
1946
1947                         unlock_buffer(bs->bh);
1948                         ea_bdebug(bs->bh, "cloning");
1949                         s->base = kmalloc(bs->bh->b_size, GFP_NOFS);
1950                         error = -ENOMEM;
1951                         if (s->base == NULL)
1952                                 goto cleanup;
1953                         memcpy(s->base, BHDR(bs->bh), bs->bh->b_size);
1954                         s->first = ENTRY(header(s->base)+1);
1955                         header(s->base)->h_refcount = cpu_to_le32(1);
1956                         s->here = ENTRY(s->base + offset);
1957                         s->end = s->base + bs->bh->b_size;
1958
1959                         /*
1960                          * If existing entry points to an xattr inode, we need
1961                          * to prevent ext4_xattr_set_entry() from decrementing
1962                          * ref count on it because the reference belongs to the
1963                          * original block. In this case, make the entry look
1964                          * like it has an empty value.
1965                          */
1966                         if (!s->not_found && s->here->e_value_inum) {
1967                                 ea_ino = le32_to_cpu(s->here->e_value_inum);
1968                                 error = ext4_xattr_inode_iget(inode, ea_ino,
1969                                               le32_to_cpu(s->here->e_hash),
1970                                               &tmp_inode);
1971                                 if (error)
1972                                         goto cleanup;
1973
1974                                 if (!ext4_test_inode_state(tmp_inode,
1975                                                 EXT4_STATE_LUSTRE_EA_INODE)) {
1976                                         /*
1977                                          * Defer quota free call for previous
1978                                          * inode until success is guaranteed.
1979                                          */
1980                                         old_ea_inode_quota = le32_to_cpu(
1981                                                         s->here->e_value_size);
1982                                 }
1983                                 iput(tmp_inode);
1984
1985                                 s->here->e_value_inum = 0;
1986                                 s->here->e_value_size = 0;
1987                         }
1988                 }
1989         } else {
1990                 /* Allocate a buffer where we construct the new block. */
1991                 s->base = kzalloc(sb->s_blocksize, GFP_NOFS);
1992                 /* assert(header == s->base) */
1993                 error = -ENOMEM;
1994                 if (s->base == NULL)
1995                         goto cleanup;
1996                 header(s->base)->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
1997                 header(s->base)->h_blocks = cpu_to_le32(1);
1998                 header(s->base)->h_refcount = cpu_to_le32(1);
1999                 s->first = ENTRY(header(s->base)+1);
2000                 s->here = ENTRY(header(s->base)+1);
2001                 s->end = s->base + sb->s_blocksize;
2002         }
2003
2004         error = ext4_xattr_set_entry(i, s, handle, inode, true /* is_block */);
2005         if (error == -EFSCORRUPTED)
2006                 goto bad_block;
2007         if (error)
2008                 goto cleanup;
2009
2010         if (i->value && s->here->e_value_inum) {
2011                 /*
2012                  * A ref count on ea_inode has been taken as part of the call to
2013                  * ext4_xattr_set_entry() above. We would like to drop this
2014                  * extra ref but we have to wait until the xattr block is
2015                  * initialized and has its own ref count on the ea_inode.
2016                  */
2017                 ea_ino = le32_to_cpu(s->here->e_value_inum);
2018                 error = ext4_xattr_inode_iget(inode, ea_ino,
2019                                               le32_to_cpu(s->here->e_hash),
2020                                               &ea_inode);
2021                 if (error) {
2022                         ea_inode = NULL;
2023                         goto cleanup;
2024                 }
2025         }
2026
2027 inserted:
2028         if (!IS_LAST_ENTRY(s->first)) {
2029                 new_bh = ext4_xattr_block_cache_find(inode, header(s->base),
2030                                                      &ce);
2031                 if (new_bh) {
2032                         /* We found an identical block in the cache. */
2033                         if (new_bh == bs->bh)
2034                                 ea_bdebug(new_bh, "keeping");
2035                         else {
2036                                 u32 ref;
2037
2038 #ifdef EXT4_XATTR_DEBUG
2039                                 WARN_ON_ONCE(dquot_initialize_needed(inode));
2040 #endif
2041                                 /* The old block is released after updating
2042                                    the inode. */
2043                                 error = dquot_alloc_block(inode,
2044                                                 EXT4_C2B(EXT4_SB(sb), 1));
2045                                 if (error)
2046                                         goto cleanup;
2047                                 BUFFER_TRACE(new_bh, "get_write_access");
2048                                 error = ext4_journal_get_write_access(handle,
2049                                                                       new_bh);
2050                                 if (error)
2051                                         goto cleanup_dquot;
2052                                 lock_buffer(new_bh);
2053                                 /*
2054                                  * We have to be careful about races with
2055                                  * freeing, rehashing or adding references to
2056                                  * xattr block. Once we hold buffer lock xattr
2057                                  * block's state is stable so we can check
2058                                  * whether the block got freed / rehashed or
2059                                  * not.  Since we unhash mbcache entry under
2060                                  * buffer lock when freeing / rehashing xattr
2061                                  * block, checking whether entry is still
2062                                  * hashed is reliable. Same rules hold for
2063                                  * e_reusable handling.
2064                                  */
2065                                 if (hlist_bl_unhashed(&ce->e_hash_list) ||
2066                                     !ce->e_reusable) {
2067                                         /*
2068                                          * Undo everything and check mbcache
2069                                          * again.
2070                                          */
2071                                         unlock_buffer(new_bh);
2072                                         dquot_free_block(inode,
2073                                                          EXT4_C2B(EXT4_SB(sb),
2074                                                                   1));
2075                                         brelse(new_bh);
2076                                         mb_cache_entry_put(ea_block_cache, ce);
2077                                         ce = NULL;
2078                                         new_bh = NULL;
2079                                         goto inserted;
2080                                 }
2081                                 ref = le32_to_cpu(BHDR(new_bh)->h_refcount) + 1;
2082                                 BHDR(new_bh)->h_refcount = cpu_to_le32(ref);
2083                                 if (ref >= EXT4_XATTR_REFCOUNT_MAX)
2084                                         ce->e_reusable = 0;
2085                                 ea_bdebug(new_bh, "reusing; refcount now=%d",
2086                                           ref);
2087                                 ext4_xattr_block_csum_set(inode, new_bh);
2088                                 unlock_buffer(new_bh);
2089                                 error = ext4_handle_dirty_metadata(handle,
2090                                                                    inode,
2091                                                                    new_bh);
2092                                 if (error)
2093                                         goto cleanup_dquot;
2094                         }
2095                         mb_cache_entry_touch(ea_block_cache, ce);
2096                         mb_cache_entry_put(ea_block_cache, ce);
2097                         ce = NULL;
2098                 } else if (bs->bh && s->base == bs->bh->b_data) {
2099                         /* We were modifying this block in-place. */
2100                         ea_bdebug(bs->bh, "keeping this block");
2101                         ext4_xattr_block_cache_insert(ea_block_cache, bs->bh);
2102                         new_bh = bs->bh;
2103                         get_bh(new_bh);
2104                 } else {
2105                         /* We need to allocate a new block */
2106                         ext4_fsblk_t goal, block;
2107
2108 #ifdef EXT4_XATTR_DEBUG
2109                         WARN_ON_ONCE(dquot_initialize_needed(inode));
2110 #endif
2111                         goal = ext4_group_first_block_no(sb,
2112                                                 EXT4_I(inode)->i_block_group);
2113                         block = ext4_new_meta_blocks(handle, inode, goal, 0,
2114                                                      NULL, &error);
2115                         if (error)
2116                                 goto cleanup;
2117
2118                         ea_idebug(inode, "creating block %llu",
2119                                   (unsigned long long)block);
2120
2121                         new_bh = sb_getblk(sb, block);
2122                         if (unlikely(!new_bh)) {
2123                                 error = -ENOMEM;
2124 getblk_failed:
2125                                 ext4_free_blocks(handle, inode, NULL, block, 1,
2126                                                  EXT4_FREE_BLOCKS_METADATA);
2127                                 goto cleanup;
2128                         }
2129                         error = ext4_xattr_inode_inc_ref_all(handle, inode,
2130                                                       ENTRY(header(s->base)+1));
2131                         if (error)
2132                                 goto getblk_failed;
2133                         if (ea_inode) {
2134                                 /* Drop the extra ref on ea_inode. */
2135                                 error = ext4_xattr_inode_dec_ref(handle,
2136                                                                  ea_inode);
2137                                 if (error)
2138                                         ext4_warning_inode(ea_inode,
2139                                                            "dec ref error=%d",
2140                                                            error);
2141                                 iput(ea_inode);
2142                                 ea_inode = NULL;
2143                         }
2144
2145                         lock_buffer(new_bh);
2146                         error = ext4_journal_get_create_access(handle, new_bh);
2147                         if (error) {
2148                                 unlock_buffer(new_bh);
2149                                 error = -EIO;
2150                                 goto getblk_failed;
2151                         }
2152                         memcpy(new_bh->b_data, s->base, new_bh->b_size);
2153                         ext4_xattr_block_csum_set(inode, new_bh);
2154                         set_buffer_uptodate(new_bh);
2155                         unlock_buffer(new_bh);
2156                         ext4_xattr_block_cache_insert(ea_block_cache, new_bh);
2157                         error = ext4_handle_dirty_metadata(handle, inode,
2158                                                            new_bh);
2159                         if (error)
2160                                 goto cleanup;
2161                 }
2162         }
2163
2164         if (old_ea_inode_quota)
2165                 ext4_xattr_inode_free_quota(inode, NULL, old_ea_inode_quota);
2166
2167         /* Update the inode. */
2168         EXT4_I(inode)->i_file_acl = new_bh ? new_bh->b_blocknr : 0;
2169
2170         /* Drop the previous xattr block. */
2171         if (bs->bh && bs->bh != new_bh) {
2172                 struct ext4_xattr_inode_array *ea_inode_array = NULL;
2173
2174                 ext4_xattr_release_block(handle, inode, bs->bh,
2175                                          &ea_inode_array,
2176                                          0 /* extra_credits */);
2177                 ext4_xattr_inode_array_free(ea_inode_array);
2178         }
2179         error = 0;
2180
2181 cleanup:
2182         if (ea_inode) {
2183                 int error2;
2184
2185                 error2 = ext4_xattr_inode_dec_ref(handle, ea_inode);
2186                 if (error2)
2187                         ext4_warning_inode(ea_inode, "dec ref error=%d",
2188                                            error2);
2189
2190                 /* If there was an error, revert the quota charge. */
2191                 if (error)
2192                         ext4_xattr_inode_free_quota(inode, ea_inode,
2193                                                     i_size_read(ea_inode));
2194                 iput(ea_inode);
2195         }
2196         if (ce)
2197                 mb_cache_entry_put(ea_block_cache, ce);
2198         brelse(new_bh);
2199         if (!(bs->bh && s->base == bs->bh->b_data))
2200                 kfree(s->base);
2201
2202         return error;
2203
2204 cleanup_dquot:
2205         dquot_free_block(inode, EXT4_C2B(EXT4_SB(sb), 1));
2206         goto cleanup;
2207
2208 bad_block:
2209         EXT4_ERROR_INODE(inode, "bad block %llu",
2210                          EXT4_I(inode)->i_file_acl);
2211         goto cleanup;
2212
2213 #undef header
2214 }
2215
2216 int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
2217                           struct ext4_xattr_ibody_find *is)
2218 {
2219         struct ext4_xattr_ibody_header *header;
2220         struct ext4_inode *raw_inode;
2221         int error;
2222
2223         if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
2224                 return 0;
2225
2226         raw_inode = ext4_raw_inode(&is->iloc);
2227         header = IHDR(inode, raw_inode);
2228         is->s.base = is->s.first = IFIRST(header);
2229         is->s.here = is->s.first;
2230         is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
2231         if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
2232                 error = xattr_check_inode(inode, header, is->s.end);
2233                 if (error)
2234                         return error;
2235                 /* Find the named attribute. */
2236                 error = xattr_find_entry(inode, &is->s.here, is->s.end,
2237                                          i->name_index, i->name, 0);
2238                 if (error && error != -ENODATA)
2239                         return error;
2240                 is->s.not_found = error;
2241         }
2242         return 0;
2243 }
2244
2245 int ext4_xattr_ibody_set(handle_t *handle, struct inode *inode,
2246                                 struct ext4_xattr_info *i,
2247                                 struct ext4_xattr_ibody_find *is)
2248 {
2249         struct ext4_xattr_ibody_header *header;
2250         struct ext4_xattr_search *s = &is->s;
2251         int error;
2252
2253         if (!EXT4_INODE_HAS_XATTR_SPACE(inode))
2254                 return -ENOSPC;
2255
2256         error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
2257         if (error)
2258                 return error;
2259         header = IHDR(inode, ext4_raw_inode(&is->iloc));
2260         if (!IS_LAST_ENTRY(s->first)) {
2261                 header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
2262                 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
2263         } else {
2264                 header->h_magic = cpu_to_le32(0);
2265                 ext4_clear_inode_state(inode, EXT4_STATE_XATTR);
2266         }
2267         return 0;
2268 }
2269
2270 static int ext4_xattr_value_same(struct ext4_xattr_search *s,
2271                                  struct ext4_xattr_info *i)
2272 {
2273         void *value;
2274
2275         /* When e_value_inum is set the value is stored externally. */
2276         if (s->here->e_value_inum)
2277                 return 0;
2278         if (le32_to_cpu(s->here->e_value_size) != i->value_len)
2279                 return 0;
2280         value = ((void *)s->base) + le16_to_cpu(s->here->e_value_offs);
2281         return !memcmp(value, i->value, i->value_len);
2282 }
2283
2284 static struct buffer_head *ext4_xattr_get_block(struct inode *inode)
2285 {
2286         struct buffer_head *bh;
2287         int error;
2288
2289         if (!EXT4_I(inode)->i_file_acl)
2290                 return NULL;
2291         bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
2292         if (IS_ERR(bh))
2293                 return bh;
2294         error = ext4_xattr_check_block(inode, bh);
2295         if (error) {
2296                 brelse(bh);
2297                 return ERR_PTR(error);
2298         }
2299         return bh;
2300 }
2301
2302 /*
2303  * ext4_xattr_set_handle()
2304  *
2305  * Create, replace or remove an extended attribute for this inode.  Value
2306  * is NULL to remove an existing extended attribute, and non-NULL to
2307  * either replace an existing extended attribute, or create a new extended
2308  * attribute. The flags XATTR_REPLACE and XATTR_CREATE
2309  * specify that an extended attribute must exist and must not exist
2310  * previous to the call, respectively.
2311  *
2312  * Returns 0, or a negative error number on failure.
2313  */
2314 int
2315 ext4_xattr_set_handle(handle_t *handle, struct inode *inode, int name_index,
2316                       const char *name, const void *value, size_t value_len,
2317                       int flags)
2318 {
2319         struct ext4_xattr_info i = {
2320                 .name_index = name_index,
2321                 .name = name,
2322                 .value = value,
2323                 .value_len = value_len,
2324                 .in_inode = 0,
2325         };
2326         struct ext4_xattr_ibody_find is = {
2327                 .s = { .not_found = -ENODATA, },
2328         };
2329         struct ext4_xattr_block_find bs = {
2330                 .s = { .not_found = -ENODATA, },
2331         };
2332         int no_expand;
2333         int error;
2334
2335         if (!name)
2336                 return -EINVAL;
2337         if (strlen(name) > 255)
2338                 return -ERANGE;
2339
2340         ext4_write_lock_xattr(inode, &no_expand);
2341
2342         /* Check journal credits under write lock. */
2343         if (ext4_handle_valid(handle)) {
2344                 struct buffer_head *bh;
2345                 int credits;
2346
2347                 bh = ext4_xattr_get_block(inode);
2348                 if (IS_ERR(bh)) {
2349                         error = PTR_ERR(bh);
2350                         goto cleanup;
2351                 }
2352
2353                 credits = __ext4_xattr_set_credits(inode->i_sb, inode, bh,
2354                                                    value_len,
2355                                                    flags & XATTR_CREATE);
2356                 brelse(bh);
2357
2358                 if (!ext4_handle_has_enough_credits(handle, credits)) {
2359                         error = -ENOSPC;
2360                         goto cleanup;
2361                 }
2362                 WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS));
2363         }
2364
2365         error = ext4_reserve_inode_write(handle, inode, &is.iloc);
2366         if (error)
2367                 goto cleanup;
2368
2369         if (ext4_test_inode_state(inode, EXT4_STATE_NEW)) {
2370                 struct ext4_inode *raw_inode = ext4_raw_inode(&is.iloc);
2371                 memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
2372                 ext4_clear_inode_state(inode, EXT4_STATE_NEW);
2373         }
2374
2375         error = ext4_xattr_ibody_find(inode, &i, &is);
2376         if (error)
2377                 goto cleanup;
2378         if (is.s.not_found)
2379                 error = ext4_xattr_block_find(inode, &i, &bs);
2380         if (error)
2381                 goto cleanup;
2382         if (is.s.not_found && bs.s.not_found) {
2383                 error = -ENODATA;
2384                 if (flags & XATTR_REPLACE)
2385                         goto cleanup;
2386                 error = 0;
2387                 if (!value)
2388                         goto cleanup;
2389         } else {
2390                 error = -EEXIST;
2391                 if (flags & XATTR_CREATE)
2392                         goto cleanup;
2393         }
2394
2395         if (!value) {
2396                 if (!is.s.not_found)
2397                         error = ext4_xattr_ibody_set(handle, inode, &i, &is);
2398                 else if (!bs.s.not_found)
2399                         error = ext4_xattr_block_set(handle, inode, &i, &bs);
2400         } else {
2401                 error = 0;
2402                 /* Xattr value did not change? Save us some work and bail out */
2403                 if (!is.s.not_found && ext4_xattr_value_same(&is.s, &i))
2404                         goto cleanup;
2405                 if (!bs.s.not_found && ext4_xattr_value_same(&bs.s, &i))
2406                         goto cleanup;
2407
2408                 if (ext4_has_feature_ea_inode(inode->i_sb) &&
2409                     (EXT4_XATTR_SIZE(i.value_len) >
2410                         EXT4_XATTR_MIN_LARGE_EA_SIZE(inode->i_sb->s_blocksize)))
2411                         i.in_inode = 1;
2412 retry_inode:
2413                 error = ext4_xattr_ibody_set(handle, inode, &i, &is);
2414                 if (!error && !bs.s.not_found) {
2415                         i.value = NULL;
2416                         error = ext4_xattr_block_set(handle, inode, &i, &bs);
2417                 } else if (error == -ENOSPC) {
2418                         if (EXT4_I(inode)->i_file_acl && !bs.s.base) {
2419                                 brelse(bs.bh);
2420                                 bs.bh = NULL;
2421                                 error = ext4_xattr_block_find(inode, &i, &bs);
2422                                 if (error)
2423                                         goto cleanup;
2424                         }
2425                         error = ext4_xattr_block_set(handle, inode, &i, &bs);
2426                         if (!error && !is.s.not_found) {
2427                                 i.value = NULL;
2428                                 error = ext4_xattr_ibody_set(handle, inode, &i,
2429                                                              &is);
2430                         } else if (error == -ENOSPC) {
2431                                 /*
2432                                  * Xattr does not fit in the block, store at
2433                                  * external inode if possible.
2434                                  */
2435                                 if (ext4_has_feature_ea_inode(inode->i_sb) &&
2436                                     i.value_len && !i.in_inode) {
2437                                         i.in_inode = 1;
2438                                         goto retry_inode;
2439                                 }
2440                         }
2441                 }
2442         }
2443         if (!error) {
2444                 ext4_xattr_update_super_block(handle, inode->i_sb);
2445                 inode->i_ctime = current_time(inode);
2446                 if (!value)
2447                         no_expand = 0;
2448                 error = ext4_mark_iloc_dirty(handle, inode, &is.iloc);
2449                 /*
2450                  * The bh is consumed by ext4_mark_iloc_dirty, even with
2451                  * error != 0.
2452                  */
2453                 is.iloc.bh = NULL;
2454                 if (IS_SYNC(inode))
2455                         ext4_handle_sync(handle);
2456         }
2457
2458 cleanup:
2459         brelse(is.iloc.bh);
2460         brelse(bs.bh);
2461         ext4_write_unlock_xattr(inode, &no_expand);
2462         return error;
2463 }
2464
2465 int ext4_xattr_set_credits(struct inode *inode, size_t value_len,
2466                            bool is_create, int *credits)
2467 {
2468         struct buffer_head *bh;
2469         int err;
2470
2471         *credits = 0;
2472
2473         if (!EXT4_SB(inode->i_sb)->s_journal)
2474                 return 0;
2475
2476         down_read(&EXT4_I(inode)->xattr_sem);
2477
2478         bh = ext4_xattr_get_block(inode);
2479         if (IS_ERR(bh)) {
2480                 err = PTR_ERR(bh);
2481         } else {
2482                 *credits = __ext4_xattr_set_credits(inode->i_sb, inode, bh,
2483                                                     value_len, is_create);
2484                 brelse(bh);
2485                 err = 0;
2486         }
2487
2488         up_read(&EXT4_I(inode)->xattr_sem);
2489         return err;
2490 }
2491
2492 /*
2493  * ext4_xattr_set()
2494  *
2495  * Like ext4_xattr_set_handle, but start from an inode. This extended
2496  * attribute modification is a filesystem transaction by itself.
2497  *
2498  * Returns 0, or a negative error number on failure.
2499  */
2500 int
2501 ext4_xattr_set(struct inode *inode, int name_index, const char *name,
2502                const void *value, size_t value_len, int flags)
2503 {
2504         handle_t *handle;
2505         struct super_block *sb = inode->i_sb;
2506         int error, retries = 0;
2507         int credits;
2508
2509         error = dquot_initialize(inode);
2510         if (error)
2511                 return error;
2512
2513 retry:
2514         error = ext4_xattr_set_credits(inode, value_len, flags & XATTR_CREATE,
2515                                        &credits);
2516         if (error)
2517                 return error;
2518
2519         handle = ext4_journal_start(inode, EXT4_HT_XATTR, credits);
2520         if (IS_ERR(handle)) {
2521                 error = PTR_ERR(handle);
2522         } else {
2523                 int error2;
2524
2525                 error = ext4_xattr_set_handle(handle, inode, name_index, name,
2526                                               value, value_len, flags);
2527                 error2 = ext4_journal_stop(handle);
2528                 if (error == -ENOSPC &&
2529                     ext4_should_retry_alloc(sb, &retries))
2530                         goto retry;
2531                 if (error == 0)
2532                         error = error2;
2533         }
2534
2535         return error;
2536 }
2537
2538 /*
2539  * Shift the EA entries in the inode to create space for the increased
2540  * i_extra_isize.
2541  */
2542 static void ext4_xattr_shift_entries(struct ext4_xattr_entry *entry,
2543                                      int value_offs_shift, void *to,
2544                                      void *from, size_t n)
2545 {
2546         struct ext4_xattr_entry *last = entry;
2547         int new_offs;
2548
2549         /* We always shift xattr headers further thus offsets get lower */
2550         BUG_ON(value_offs_shift > 0);
2551
2552         /* Adjust the value offsets of the entries */
2553         for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
2554                 if (!last->e_value_inum && last->e_value_size) {
2555                         new_offs = le16_to_cpu(last->e_value_offs) +
2556                                                         value_offs_shift;
2557                         last->e_value_offs = cpu_to_le16(new_offs);
2558                 }
2559         }
2560         /* Shift the entries by n bytes */
2561         memmove(to, from, n);
2562 }
2563
2564 /*
2565  * Move xattr pointed to by 'entry' from inode into external xattr block
2566  */
2567 static int ext4_xattr_move_to_block(handle_t *handle, struct inode *inode,
2568                                     struct ext4_inode *raw_inode,
2569                                     struct ext4_xattr_entry *entry)
2570 {
2571         struct ext4_xattr_ibody_find *is = NULL;
2572         struct ext4_xattr_block_find *bs = NULL;
2573         char *buffer = NULL, *b_entry_name = NULL;
2574         size_t value_size = le32_to_cpu(entry->e_value_size);
2575         struct ext4_xattr_info i = {
2576                 .value = NULL,
2577                 .value_len = 0,
2578                 .name_index = entry->e_name_index,
2579                 .in_inode = !!entry->e_value_inum,
2580         };
2581         struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
2582         int needs_kvfree = 0;
2583         int error;
2584
2585         is = kzalloc(sizeof(struct ext4_xattr_ibody_find), GFP_NOFS);
2586         bs = kzalloc(sizeof(struct ext4_xattr_block_find), GFP_NOFS);
2587         b_entry_name = kmalloc(entry->e_name_len + 1, GFP_NOFS);
2588         if (!is || !bs || !b_entry_name) {
2589                 error = -ENOMEM;
2590                 goto out;
2591         }
2592
2593         is->s.not_found = -ENODATA;
2594         bs->s.not_found = -ENODATA;
2595         is->iloc.bh = NULL;
2596         bs->bh = NULL;
2597
2598         /* Save the entry name and the entry value */
2599         if (entry->e_value_inum) {
2600                 buffer = kvmalloc(value_size, GFP_NOFS);
2601                 if (!buffer) {
2602                         error = -ENOMEM;
2603                         goto out;
2604                 }
2605                 needs_kvfree = 1;
2606                 error = ext4_xattr_inode_get(inode, entry, buffer, value_size);
2607                 if (error)
2608                         goto out;
2609         } else {
2610                 size_t value_offs = le16_to_cpu(entry->e_value_offs);
2611                 buffer = (void *)IFIRST(header) + value_offs;
2612         }
2613
2614         memcpy(b_entry_name, entry->e_name, entry->e_name_len);
2615         b_entry_name[entry->e_name_len] = '\0';
2616         i.name = b_entry_name;
2617
2618         error = ext4_get_inode_loc(inode, &is->iloc);
2619         if (error)
2620                 goto out;
2621
2622         error = ext4_xattr_ibody_find(inode, &i, is);
2623         if (error)
2624                 goto out;
2625
2626         i.value = buffer;
2627         i.value_len = value_size;
2628         error = ext4_xattr_block_find(inode, &i, bs);
2629         if (error)
2630                 goto out;
2631
2632         /* Move ea entry from the inode into the block */
2633         error = ext4_xattr_block_set(handle, inode, &i, bs);
2634         if (error)
2635                 goto out;
2636
2637         /* Remove the chosen entry from the inode */
2638         i.value = NULL;
2639         i.value_len = 0;
2640         error = ext4_xattr_ibody_set(handle, inode, &i, is);
2641
2642 out:
2643         kfree(b_entry_name);
2644         if (needs_kvfree && buffer)
2645                 kvfree(buffer);
2646         if (is)
2647                 brelse(is->iloc.bh);
2648         if (bs)
2649                 brelse(bs->bh);
2650         kfree(is);
2651         kfree(bs);
2652
2653         return error;
2654 }
2655
2656 static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode,
2657                                        struct ext4_inode *raw_inode,
2658                                        int isize_diff, size_t ifree,
2659                                        size_t bfree, int *total_ino)
2660 {
2661         struct ext4_xattr_ibody_header *header = IHDR(inode, raw_inode);
2662         struct ext4_xattr_entry *small_entry;
2663         struct ext4_xattr_entry *entry;
2664         struct ext4_xattr_entry *last;
2665         unsigned int entry_size;        /* EA entry size */
2666         unsigned int total_size;        /* EA entry size + value size */
2667         unsigned int min_total_size;
2668         int error;
2669
2670         while (isize_diff > ifree) {
2671                 entry = NULL;
2672                 small_entry = NULL;
2673                 min_total_size = ~0U;
2674                 last = IFIRST(header);
2675                 /* Find the entry best suited to be pushed into EA block */
2676                 for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
2677                         /* never move system.data out of the inode */
2678                         if ((last->e_name_len == 4) &&
2679                             (last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) &&
2680                             !memcmp(last->e_name, "data", 4))
2681                                 continue;
2682                         total_size = EXT4_XATTR_LEN(last->e_name_len);
2683                         if (!last->e_value_inum)
2684                                 total_size += EXT4_XATTR_SIZE(
2685                                                le32_to_cpu(last->e_value_size));
2686                         if (total_size <= bfree &&
2687                             total_size < min_total_size) {
2688                                 if (total_size + ifree < isize_diff) {
2689                                         small_entry = last;
2690                                 } else {
2691                                         entry = last;
2692                                         min_total_size = total_size;
2693                                 }
2694                         }
2695                 }
2696
2697                 if (entry == NULL) {
2698                         if (small_entry == NULL)
2699                                 return -ENOSPC;
2700                         entry = small_entry;
2701                 }
2702
2703                 entry_size = EXT4_XATTR_LEN(entry->e_name_len);
2704                 total_size = entry_size;
2705                 if (!entry->e_value_inum)
2706                         total_size += EXT4_XATTR_SIZE(
2707                                               le32_to_cpu(entry->e_value_size));
2708                 error = ext4_xattr_move_to_block(handle, inode, raw_inode,
2709                                                  entry);
2710                 if (error)
2711                         return error;
2712
2713                 *total_ino -= entry_size;
2714                 ifree += total_size;
2715                 bfree -= total_size;
2716         }
2717
2718         return 0;
2719 }
2720
2721 /*
2722  * Expand an inode by new_extra_isize bytes when EAs are present.
2723  * Returns 0 on success or negative error number on failure.
2724  */
2725 int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
2726                                struct ext4_inode *raw_inode, handle_t *handle)
2727 {
2728         struct ext4_xattr_ibody_header *header;
2729         struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2730         static unsigned int mnt_count;
2731         size_t min_offs;
2732         size_t ifree, bfree;
2733         int total_ino;
2734         void *base, *end;
2735         int error = 0, tried_min_extra_isize = 0;
2736         int s_min_extra_isize = le16_to_cpu(sbi->s_es->s_min_extra_isize);
2737         int isize_diff; /* How much do we need to grow i_extra_isize */
2738
2739 retry:
2740         isize_diff = new_extra_isize - EXT4_I(inode)->i_extra_isize;
2741         if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
2742                 return 0;
2743
2744         header = IHDR(inode, raw_inode);
2745
2746         /*
2747          * Check if enough free space is available in the inode to shift the
2748          * entries ahead by new_extra_isize.
2749          */
2750
2751         base = IFIRST(header);
2752         end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
2753         min_offs = end - base;
2754         total_ino = sizeof(struct ext4_xattr_ibody_header) + sizeof(u32);
2755
2756         error = xattr_check_inode(inode, header, end);
2757         if (error)
2758                 goto cleanup;
2759
2760         ifree = ext4_xattr_free_space(base, &min_offs, base, &total_ino);
2761         if (ifree >= isize_diff)
2762                 goto shift;
2763
2764         /*
2765          * Enough free space isn't available in the inode, check if
2766          * EA block can hold new_extra_isize bytes.
2767          */
2768         if (EXT4_I(inode)->i_file_acl) {
2769                 struct buffer_head *bh;
2770
2771                 bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
2772                 if (IS_ERR(bh)) {
2773                         error = PTR_ERR(bh);
2774                         goto cleanup;
2775                 }
2776                 error = ext4_xattr_check_block(inode, bh);
2777                 if (error) {
2778                         brelse(bh);
2779                         goto cleanup;
2780                 }
2781                 base = BHDR(bh);
2782                 end = bh->b_data + bh->b_size;
2783                 min_offs = end - base;
2784                 bfree = ext4_xattr_free_space(BFIRST(bh), &min_offs, base,
2785                                               NULL);
2786                 brelse(bh);
2787                 if (bfree + ifree < isize_diff) {
2788                         if (!tried_min_extra_isize && s_min_extra_isize) {
2789                                 tried_min_extra_isize++;
2790                                 new_extra_isize = s_min_extra_isize;
2791                                 goto retry;
2792                         }
2793                         error = -ENOSPC;
2794                         goto cleanup;
2795                 }
2796         } else {
2797                 bfree = inode->i_sb->s_blocksize;
2798         }
2799
2800         error = ext4_xattr_make_inode_space(handle, inode, raw_inode,
2801                                             isize_diff, ifree, bfree,
2802                                             &total_ino);
2803         if (error) {
2804                 if (error == -ENOSPC && !tried_min_extra_isize &&
2805                     s_min_extra_isize) {
2806                         tried_min_extra_isize++;
2807                         new_extra_isize = s_min_extra_isize;
2808                         goto retry;
2809                 }
2810                 goto cleanup;
2811         }
2812 shift:
2813         /* Adjust the offsets and shift the remaining entries ahead */
2814         ext4_xattr_shift_entries(IFIRST(header), EXT4_I(inode)->i_extra_isize
2815                         - new_extra_isize, (void *)raw_inode +
2816                         EXT4_GOOD_OLD_INODE_SIZE + new_extra_isize,
2817                         (void *)header, total_ino);
2818         EXT4_I(inode)->i_extra_isize = new_extra_isize;
2819
2820         if (ext4_has_inline_data(inode))
2821                 error = ext4_find_inline_data_nolock(inode);
2822
2823 cleanup:
2824         if (error && (mnt_count != le16_to_cpu(sbi->s_es->s_mnt_count))) {
2825                 ext4_warning(inode->i_sb, "Unable to expand inode %lu. Delete some EAs or run e2fsck.",
2826                              inode->i_ino);
2827                 mnt_count = le16_to_cpu(sbi->s_es->s_mnt_count);
2828         }
2829         return error;
2830 }
2831
2832 #define EIA_INCR 16 /* must be 2^n */
2833 #define EIA_MASK (EIA_INCR - 1)
2834
2835 /* Add the large xattr @inode into @ea_inode_array for deferred iput().
2836  * If @ea_inode_array is new or full it will be grown and the old
2837  * contents copied over.
2838  */
2839 static int
2840 ext4_expand_inode_array(struct ext4_xattr_inode_array **ea_inode_array,
2841                         struct inode *inode)
2842 {
2843         if (*ea_inode_array == NULL) {
2844                 /*
2845                  * Start with 15 inodes, so it fits into a power-of-two size.
2846                  * If *ea_inode_array is NULL, this is essentially offsetof()
2847                  */
2848                 (*ea_inode_array) =
2849                         kmalloc(offsetof(struct ext4_xattr_inode_array,
2850                                          inodes[EIA_MASK]),
2851                                 GFP_NOFS);
2852                 if (*ea_inode_array == NULL)
2853                         return -ENOMEM;
2854                 (*ea_inode_array)->count = 0;
2855         } else if (((*ea_inode_array)->count & EIA_MASK) == EIA_MASK) {
2856                 /* expand the array once all 15 + n * 16 slots are full */
2857                 struct ext4_xattr_inode_array *new_array = NULL;
2858                 int count = (*ea_inode_array)->count;
2859
2860                 /* if new_array is NULL, this is essentially offsetof() */
2861                 new_array = kmalloc(
2862                                 offsetof(struct ext4_xattr_inode_array,
2863                                          inodes[count + EIA_INCR]),
2864                                 GFP_NOFS);
2865                 if (new_array == NULL)
2866                         return -ENOMEM;
2867                 memcpy(new_array, *ea_inode_array,
2868                        offsetof(struct ext4_xattr_inode_array, inodes[count]));
2869                 kfree(*ea_inode_array);
2870                 *ea_inode_array = new_array;
2871         }
2872         (*ea_inode_array)->inodes[(*ea_inode_array)->count++] = inode;
2873         return 0;
2874 }
2875
2876 /*
2877  * ext4_xattr_delete_inode()
2878  *
2879  * Free extended attribute resources associated with this inode. Traverse
2880  * all entries and decrement reference on any xattr inodes associated with this
2881  * inode. This is called immediately before an inode is freed. We have exclusive
2882  * access to the inode. If an orphan inode is deleted it will also release its
2883  * references on xattr block and xattr inodes.
2884  */
2885 int ext4_xattr_delete_inode(handle_t *handle, struct inode *inode,
2886                             struct ext4_xattr_inode_array **ea_inode_array,
2887                             int extra_credits)
2888 {
2889         struct buffer_head *bh = NULL;
2890         struct ext4_xattr_ibody_header *header;
2891         struct ext4_iloc iloc = { .bh = NULL };
2892         struct ext4_xattr_entry *entry;
2893         struct inode *ea_inode;
2894         int error;
2895
2896         error = ext4_xattr_ensure_credits(handle, inode, extra_credits,
2897                                           NULL /* bh */,
2898                                           false /* dirty */,
2899                                           false /* block_csum */);
2900         if (error) {
2901                 EXT4_ERROR_INODE(inode, "ensure credits (error %d)", error);
2902                 goto cleanup;
2903         }
2904
2905         if (ext4_has_feature_ea_inode(inode->i_sb) &&
2906             ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
2907
2908                 error = ext4_get_inode_loc(inode, &iloc);
2909                 if (error) {
2910                         EXT4_ERROR_INODE(inode, "inode loc (error %d)", error);
2911                         goto cleanup;
2912                 }
2913
2914                 error = ext4_journal_get_write_access(handle, iloc.bh);
2915                 if (error) {
2916                         EXT4_ERROR_INODE(inode, "write access (error %d)",
2917                                          error);
2918                         goto cleanup;
2919                 }
2920
2921                 header = IHDR(inode, ext4_raw_inode(&iloc));
2922                 if (header->h_magic == cpu_to_le32(EXT4_XATTR_MAGIC))
2923                         ext4_xattr_inode_dec_ref_all(handle, inode, iloc.bh,
2924                                                      IFIRST(header),
2925                                                      false /* block_csum */,
2926                                                      ea_inode_array,
2927                                                      extra_credits,
2928                                                      false /* skip_quota */);
2929         }
2930
2931         if (EXT4_I(inode)->i_file_acl) {
2932                 bh = ext4_sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl, REQ_PRIO);
2933                 if (IS_ERR(bh)) {
2934                         error = PTR_ERR(bh);
2935                         if (error == -EIO)
2936                                 EXT4_ERROR_INODE(inode, "block %llu read error",
2937                                                  EXT4_I(inode)->i_file_acl);
2938                         bh = NULL;
2939                         goto cleanup;
2940                 }
2941                 error = ext4_xattr_check_block(inode, bh);
2942                 if (error)
2943                         goto cleanup;
2944
2945                 if (ext4_has_feature_ea_inode(inode->i_sb)) {
2946                         for (entry = BFIRST(bh); !IS_LAST_ENTRY(entry);
2947                              entry = EXT4_XATTR_NEXT(entry)) {
2948                                 if (!entry->e_value_inum)
2949                                         continue;
2950                                 error = ext4_xattr_inode_iget(inode,
2951                                               le32_to_cpu(entry->e_value_inum),
2952                                               le32_to_cpu(entry->e_hash),
2953                                               &ea_inode);
2954                                 if (error)
2955                                         continue;
2956                                 ext4_xattr_inode_free_quota(inode, ea_inode,
2957                                               le32_to_cpu(entry->e_value_size));
2958                                 iput(ea_inode);
2959                         }
2960
2961                 }
2962
2963                 ext4_xattr_release_block(handle, inode, bh, ea_inode_array,
2964                                          extra_credits);
2965                 /*
2966                  * Update i_file_acl value in the same transaction that releases
2967                  * block.
2968                  */
2969                 EXT4_I(inode)->i_file_acl = 0;
2970                 error = ext4_mark_inode_dirty(handle, inode);
2971                 if (error) {
2972                         EXT4_ERROR_INODE(inode, "mark inode dirty (error %d)",
2973                                          error);
2974                         goto cleanup;
2975                 }
2976         }
2977         error = 0;
2978 cleanup:
2979         brelse(iloc.bh);
2980         brelse(bh);
2981         return error;
2982 }
2983
2984 void ext4_xattr_inode_array_free(struct ext4_xattr_inode_array *ea_inode_array)
2985 {
2986         int idx;
2987
2988         if (ea_inode_array == NULL)
2989                 return;
2990
2991         for (idx = 0; idx < ea_inode_array->count; ++idx)
2992                 iput(ea_inode_array->inodes[idx]);
2993         kfree(ea_inode_array);
2994 }
2995
2996 /*
2997  * ext4_xattr_block_cache_insert()
2998  *
2999  * Create a new entry in the extended attribute block cache, and insert
3000  * it unless such an entry is already in the cache.
3001  *
3002  * Returns 0, or a negative error number on failure.
3003  */
3004 static void
3005 ext4_xattr_block_cache_insert(struct mb_cache *ea_block_cache,
3006                               struct buffer_head *bh)
3007 {
3008         struct ext4_xattr_header *header = BHDR(bh);
3009         __u32 hash = le32_to_cpu(header->h_hash);
3010         int reusable = le32_to_cpu(header->h_refcount) <
3011                        EXT4_XATTR_REFCOUNT_MAX;
3012         int error;
3013
3014         if (!ea_block_cache)
3015                 return;
3016         error = mb_cache_entry_create(ea_block_cache, GFP_NOFS, hash,
3017                                       bh->b_blocknr, reusable);
3018         if (error) {
3019                 if (error == -EBUSY)
3020                         ea_bdebug(bh, "already in cache");
3021         } else
3022                 ea_bdebug(bh, "inserting [%x]", (int)hash);
3023 }
3024
3025 /*
3026  * ext4_xattr_cmp()
3027  *
3028  * Compare two extended attribute blocks for equality.
3029  *
3030  * Returns 0 if the blocks are equal, 1 if they differ, and
3031  * a negative error number on errors.
3032  */
3033 static int
3034 ext4_xattr_cmp(struct ext4_xattr_header *header1,
3035                struct ext4_xattr_header *header2)
3036 {
3037         struct ext4_xattr_entry *entry1, *entry2;
3038
3039         entry1 = ENTRY(header1+1);
3040         entry2 = ENTRY(header2+1);
3041         while (!IS_LAST_ENTRY(entry1)) {
3042                 if (IS_LAST_ENTRY(entry2))
3043                         return 1;
3044                 if (entry1->e_hash != entry2->e_hash ||
3045                     entry1->e_name_index != entry2->e_name_index ||
3046                     entry1->e_name_len != entry2->e_name_len ||
3047                     entry1->e_value_size != entry2->e_value_size ||
3048                     entry1->e_value_inum != entry2->e_value_inum ||
3049                     memcmp(entry1->e_name, entry2->e_name, entry1->e_name_len))
3050                         return 1;
3051                 if (!entry1->e_value_inum &&
3052                     memcmp((char *)header1 + le16_to_cpu(entry1->e_value_offs),
3053                            (char *)header2 + le16_to_cpu(entry2->e_value_offs),
3054                            le32_to_cpu(entry1->e_value_size)))
3055                         return 1;
3056
3057                 entry1 = EXT4_XATTR_NEXT(entry1);
3058                 entry2 = EXT4_XATTR_NEXT(entry2);
3059         }
3060         if (!IS_LAST_ENTRY(entry2))
3061                 return 1;
3062         return 0;
3063 }
3064
3065 /*
3066  * ext4_xattr_block_cache_find()
3067  *
3068  * Find an identical extended attribute block.
3069  *
3070  * Returns a pointer to the block found, or NULL if such a block was
3071  * not found or an error occurred.
3072  */
3073 static struct buffer_head *
3074 ext4_xattr_block_cache_find(struct inode *inode,
3075                             struct ext4_xattr_header *header,
3076                             struct mb_cache_entry **pce)
3077 {
3078         __u32 hash = le32_to_cpu(header->h_hash);
3079         struct mb_cache_entry *ce;
3080         struct mb_cache *ea_block_cache = EA_BLOCK_CACHE(inode);
3081
3082         if (!ea_block_cache)
3083                 return NULL;
3084         if (!header->h_hash)
3085                 return NULL;  /* never share */
3086         ea_idebug(inode, "looking for cached blocks [%x]", (int)hash);
3087         ce = mb_cache_entry_find_first(ea_block_cache, hash);
3088         while (ce) {
3089                 struct buffer_head *bh;
3090
3091                 bh = ext4_sb_bread(inode->i_sb, ce->e_value, REQ_PRIO);
3092                 if (IS_ERR(bh)) {
3093                         if (PTR_ERR(bh) == -ENOMEM)
3094                                 return NULL;
3095                         bh = NULL;
3096                         EXT4_ERROR_INODE(inode, "block %lu read error",
3097                                          (unsigned long)ce->e_value);
3098                 } else if (ext4_xattr_cmp(header, BHDR(bh)) == 0) {
3099                         *pce = ce;
3100                         return bh;
3101                 }
3102                 brelse(bh);
3103                 ce = mb_cache_entry_find_next(ea_block_cache, ce);
3104         }
3105         return NULL;
3106 }
3107
3108 #define NAME_HASH_SHIFT 5
3109 #define VALUE_HASH_SHIFT 16
3110
3111 /*
3112  * ext4_xattr_hash_entry()
3113  *
3114  * Compute the hash of an extended attribute.
3115  */
3116 static __le32 ext4_xattr_hash_entry(char *name, size_t name_len, __le32 *value,
3117                                     size_t value_count)
3118 {
3119         __u32 hash = 0;
3120
3121         while (name_len--) {
3122                 hash = (hash << NAME_HASH_SHIFT) ^
3123                        (hash >> (8*sizeof(hash) - NAME_HASH_SHIFT)) ^
3124                        *name++;
3125         }
3126         while (value_count--) {
3127                 hash = (hash << VALUE_HASH_SHIFT) ^
3128                        (hash >> (8*sizeof(hash) - VALUE_HASH_SHIFT)) ^
3129                        le32_to_cpu(*value++);
3130         }
3131         return cpu_to_le32(hash);
3132 }
3133
3134 #undef NAME_HASH_SHIFT
3135 #undef VALUE_HASH_SHIFT
3136
3137 #define BLOCK_HASH_SHIFT 16
3138
3139 /*
3140  * ext4_xattr_rehash()
3141  *
3142  * Re-compute the extended attribute hash value after an entry has changed.
3143  */
3144 static void ext4_xattr_rehash(struct ext4_xattr_header *header)
3145 {
3146         struct ext4_xattr_entry *here;
3147         __u32 hash = 0;
3148
3149         here = ENTRY(header+1);
3150         while (!IS_LAST_ENTRY(here)) {
3151                 if (!here->e_hash) {
3152                         /* Block is not shared if an entry's hash value == 0 */
3153                         hash = 0;
3154                         break;
3155                 }
3156                 hash = (hash << BLOCK_HASH_SHIFT) ^
3157                        (hash >> (8*sizeof(hash) - BLOCK_HASH_SHIFT)) ^
3158                        le32_to_cpu(here->e_hash);
3159                 here = EXT4_XATTR_NEXT(here);
3160         }
3161         header->h_hash = cpu_to_le32(hash);
3162 }
3163
3164 #undef BLOCK_HASH_SHIFT
3165
3166 #define HASH_BUCKET_BITS        10
3167
3168 struct mb_cache *
3169 ext4_xattr_create_cache(void)
3170 {
3171         return mb_cache_create(HASH_BUCKET_BITS);
3172 }
3173
3174 void ext4_xattr_destroy_cache(struct mb_cache *cache)
3175 {
3176         if (cache)
3177                 mb_cache_destroy(cache);
3178 }
3179