2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/xattr.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/posix_acl_xattr.h>
17 #include <linux/uaccess.h>
33 * ea_calc_size - returns the acutal number of bytes the request will take up
34 * (not counting any unstuffed data blocks)
39 * Returns: 1 if the EA should be stuffed
42 static int ea_calc_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize,
45 unsigned int jbsize = sdp->sd_jbsize;
48 *size = ALIGN(sizeof(struct gfs2_ea_header) + nsize + dsize, 8);
54 *size = ALIGN(sizeof(struct gfs2_ea_header) + nsize +
55 (sizeof(__be64) * DIV_ROUND_UP(dsize, jbsize)), 8);
60 static int ea_check_size(struct gfs2_sbd *sdp, unsigned int nsize, size_t dsize)
64 if (dsize > GFS2_EA_MAX_DATA_LEN)
67 ea_calc_size(sdp, nsize, dsize, &size);
69 /* This can only happen with 512 byte blocks */
70 if (size > sdp->sd_jbsize)
76 typedef int (*ea_call_t) (struct gfs2_inode *ip, struct buffer_head *bh,
77 struct gfs2_ea_header *ea,
78 struct gfs2_ea_header *prev, void *private);
80 static int ea_foreach_i(struct gfs2_inode *ip, struct buffer_head *bh,
81 ea_call_t ea_call, void *data)
83 struct gfs2_ea_header *ea, *prev = NULL;
86 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA))
89 for (ea = GFS2_EA_BH2FIRST(bh);; prev = ea, ea = GFS2_EA2NEXT(ea)) {
90 if (!GFS2_EA_REC_LEN(ea))
92 if (!(bh->b_data <= (char *)ea && (char *)GFS2_EA2NEXT(ea) <=
93 bh->b_data + bh->b_size))
95 if (!GFS2_EATYPE_VALID(ea->ea_type))
98 error = ea_call(ip, bh, ea, prev, data);
102 if (GFS2_EA_IS_LAST(ea)) {
103 if ((char *)GFS2_EA2NEXT(ea) !=
104 bh->b_data + bh->b_size)
113 gfs2_consist_inode(ip);
117 static int ea_foreach(struct gfs2_inode *ip, ea_call_t ea_call, void *data)
119 struct buffer_head *bh, *eabh;
123 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0, &bh);
127 if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT)) {
128 error = ea_foreach_i(ip, bh, ea_call, data);
132 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) {
137 eablk = (__be64 *)(bh->b_data + sizeof(struct gfs2_meta_header));
138 end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
140 for (; eablk < end; eablk++) {
145 bn = be64_to_cpu(*eablk);
147 error = gfs2_meta_read(ip->i_gl, bn, DIO_WAIT, 0, &eabh);
150 error = ea_foreach_i(ip, eabh, ea_call, data);
164 struct gfs2_ea_location *ef_el;
167 static int ea_find_i(struct gfs2_inode *ip, struct buffer_head *bh,
168 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
171 struct ea_find *ef = private;
173 if (ea->ea_type == GFS2_EATYPE_UNUSED)
176 if (ea->ea_type == ef->type) {
177 if (ea->ea_name_len == ef->namel &&
178 !memcmp(GFS2_EA2NAME(ea), ef->name, ea->ea_name_len)) {
179 struct gfs2_ea_location *el = ef->ef_el;
191 static int gfs2_ea_find(struct gfs2_inode *ip, int type, const char *name,
192 struct gfs2_ea_location *el)
199 ef.namel = strlen(name);
202 memset(el, 0, sizeof(struct gfs2_ea_location));
204 error = ea_foreach(ip, ea_find_i, &ef);
212 * ea_dealloc_unstuffed -
219 * Take advantage of the fact that all unstuffed blocks are
220 * allocated from the same RG. But watch, this may not always
226 static int ea_dealloc_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
227 struct gfs2_ea_header *ea,
228 struct gfs2_ea_header *prev, void *private)
230 int *leave = private;
231 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
232 struct gfs2_rgrpd *rgd;
233 struct gfs2_holder rg_gh;
237 unsigned int blen = 0;
238 unsigned int blks = 0;
242 error = gfs2_rindex_update(sdp);
246 if (GFS2_EA_IS_STUFFED(ea))
249 dataptrs = GFS2_EA2DATAPTRS(ea);
250 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
253 bn = be64_to_cpu(*dataptrs);
259 rgd = gfs2_blk2rgrpd(sdp, bn, 1);
261 gfs2_consist_inode(ip);
265 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &rg_gh);
269 error = gfs2_trans_begin(sdp, rgd->rd_length + RES_DINODE +
270 RES_EATTR + RES_STATFS + RES_QUOTA, blks);
274 gfs2_trans_add_meta(ip->i_gl, bh);
276 dataptrs = GFS2_EA2DATAPTRS(ea);
277 for (x = 0; x < ea->ea_num_ptrs; x++, dataptrs++) {
280 bn = be64_to_cpu(*dataptrs);
282 if (bstart + blen == bn)
286 gfs2_free_meta(ip, bstart, blen);
292 gfs2_add_inode_blocks(&ip->i_inode, -1);
295 gfs2_free_meta(ip, bstart, blen);
297 if (prev && !leave) {
300 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
301 prev->ea_rec_len = cpu_to_be32(len);
303 if (GFS2_EA_IS_LAST(ea))
304 prev->ea_flags |= GFS2_EAFLAG_LAST;
306 ea->ea_type = GFS2_EATYPE_UNUSED;
310 ip->i_inode.i_ctime = current_time(&ip->i_inode);
311 __mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC);
316 gfs2_glock_dq_uninit(&rg_gh);
320 static int ea_remove_unstuffed(struct gfs2_inode *ip, struct buffer_head *bh,
321 struct gfs2_ea_header *ea,
322 struct gfs2_ea_header *prev, int leave)
326 error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
330 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
334 error = ea_dealloc_unstuffed(ip, bh, ea, prev, (leave) ? &error : NULL);
336 gfs2_quota_unhold(ip);
342 struct gfs2_ea_request *ei_er;
343 unsigned int ei_size;
346 static int ea_list_i(struct gfs2_inode *ip, struct buffer_head *bh,
347 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
350 struct ea_list *ei = private;
351 struct gfs2_ea_request *er = ei->ei_er;
352 unsigned int ea_size;
356 if (ea->ea_type == GFS2_EATYPE_UNUSED)
359 switch (ea->ea_type) {
360 case GFS2_EATYPE_USR:
364 case GFS2_EATYPE_SYS:
368 case GFS2_EATYPE_SECURITY:
369 prefix = "security.";
376 ea_size = l + ea->ea_name_len + 1;
377 if (er->er_data_len) {
378 if (ei->ei_size + ea_size > er->er_data_len)
381 memcpy(er->er_data + ei->ei_size, prefix, l);
382 memcpy(er->er_data + ei->ei_size + l, GFS2_EA2NAME(ea),
384 er->er_data[ei->ei_size + ea_size - 1] = 0;
387 ei->ei_size += ea_size;
393 * gfs2_listxattr - List gfs2 extended attributes
394 * @dentry: The dentry whose inode we are interested in
395 * @buffer: The buffer to write the results
396 * @size: The size of the buffer
398 * Returns: actual size of data on success, -errno on error
401 ssize_t gfs2_listxattr(struct dentry *dentry, char *buffer, size_t size)
403 struct gfs2_inode *ip = GFS2_I(d_inode(dentry));
404 struct gfs2_ea_request er;
405 struct gfs2_holder i_gh;
408 memset(&er, 0, sizeof(struct gfs2_ea_request));
411 er.er_data_len = size;
414 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
419 struct ea_list ei = { .ei_er = &er, .ei_size = 0 };
421 error = ea_foreach(ip, ea_list_i, &ei);
426 gfs2_glock_dq_uninit(&i_gh);
432 * ea_iter_unstuffed - copies the unstuffed xattr data to/from the
434 * @ip: The GFS2 inode
435 * @ea: The extended attribute header structure
436 * @din: The data to be copied in
437 * @dout: The data to be copied out (one of din,dout will be NULL)
442 static int gfs2_iter_unstuffed(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
443 const char *din, char *dout)
445 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
446 struct buffer_head **bh;
447 unsigned int amount = GFS2_EA_DATA_LEN(ea);
448 unsigned int nptrs = DIV_ROUND_UP(amount, sdp->sd_jbsize);
449 __be64 *dataptrs = GFS2_EA2DATAPTRS(ea);
455 bh = kcalloc(nptrs, sizeof(struct buffer_head *), GFP_NOFS);
459 for (x = 0; x < nptrs; x++) {
460 error = gfs2_meta_read(ip->i_gl, be64_to_cpu(*dataptrs), 0, 0,
470 for (x = 0; x < nptrs; x++) {
471 error = gfs2_meta_wait(sdp, bh[x]);
473 for (; x < nptrs; x++)
477 if (gfs2_metatype_check(sdp, bh[x], GFS2_METATYPE_ED)) {
478 for (; x < nptrs; x++)
484 pos = bh[x]->b_data + sizeof(struct gfs2_meta_header);
485 cp_size = (sdp->sd_jbsize > amount) ? amount : sdp->sd_jbsize;
488 memcpy(dout, pos, cp_size);
489 dout += sdp->sd_jbsize;
493 gfs2_trans_add_meta(ip->i_gl, bh[x]);
494 memcpy(pos, din, cp_size);
495 din += sdp->sd_jbsize;
498 amount -= sdp->sd_jbsize;
507 static int gfs2_ea_get_copy(struct gfs2_inode *ip, struct gfs2_ea_location *el,
508 char *data, size_t size)
511 size_t len = GFS2_EA_DATA_LEN(el->el_ea);
515 if (GFS2_EA_IS_STUFFED(el->el_ea)) {
516 memcpy(data, GFS2_EA2DATA(el->el_ea), len);
519 ret = gfs2_iter_unstuffed(ip, el->el_ea, NULL, data);
525 int gfs2_xattr_acl_get(struct gfs2_inode *ip, const char *name, char **ppdata)
527 struct gfs2_ea_location el;
532 error = gfs2_ea_find(ip, GFS2_EATYPE_SYS, name, &el);
537 if (!GFS2_EA_DATA_LEN(el.el_ea))
540 len = GFS2_EA_DATA_LEN(el.el_ea);
541 data = kmalloc(len, GFP_NOFS);
546 error = gfs2_ea_get_copy(ip, &el, data, len);
557 * gfs2_xattr_get - Get a GFS2 extended attribute
559 * @name: The name of the extended attribute
560 * @buffer: The buffer to write the result into
561 * @size: The size of the buffer
562 * @type: The type of extended attribute
564 * Returns: actual size of data on success, -errno on error
566 static int __gfs2_xattr_get(struct inode *inode, const char *name,
567 void *buffer, size_t size, int type)
569 struct gfs2_inode *ip = GFS2_I(inode);
570 struct gfs2_ea_location el;
575 if (strlen(name) > GFS2_EA_MAX_NAME_LEN)
578 error = gfs2_ea_find(ip, type, name, &el);
584 error = gfs2_ea_get_copy(ip, &el, buffer, size);
586 error = GFS2_EA_DATA_LEN(el.el_ea);
592 static int gfs2_xattr_get(const struct xattr_handler *handler,
593 struct dentry *unused, struct inode *inode,
594 const char *name, void *buffer, size_t size)
596 struct gfs2_inode *ip = GFS2_I(inode);
597 struct gfs2_holder gh;
600 /* During lookup, SELinux calls this function with the glock locked. */
602 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
603 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &gh);
607 gfs2_holder_mark_uninitialized(&gh);
609 ret = __gfs2_xattr_get(inode, name, buffer, size, handler->flags);
610 if (gfs2_holder_initialized(&gh))
611 gfs2_glock_dq_uninit(&gh);
616 * ea_alloc_blk - allocates a new block for extended attributes.
617 * @ip: A pointer to the inode that's getting extended attributes
618 * @bhp: Pointer to pointer to a struct buffer_head
623 static int ea_alloc_blk(struct gfs2_inode *ip, struct buffer_head **bhp)
625 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
626 struct gfs2_ea_header *ea;
631 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
634 gfs2_trans_add_unrevoke(sdp, block, 1);
635 *bhp = gfs2_meta_new(ip->i_gl, block);
636 gfs2_trans_add_meta(ip->i_gl, *bhp);
637 gfs2_metatype_set(*bhp, GFS2_METATYPE_EA, GFS2_FORMAT_EA);
638 gfs2_buffer_clear_tail(*bhp, sizeof(struct gfs2_meta_header));
640 ea = GFS2_EA_BH2FIRST(*bhp);
641 ea->ea_rec_len = cpu_to_be32(sdp->sd_jbsize);
642 ea->ea_type = GFS2_EATYPE_UNUSED;
643 ea->ea_flags = GFS2_EAFLAG_LAST;
646 gfs2_add_inode_blocks(&ip->i_inode, 1);
652 * ea_write - writes the request info to an ea, creating new blocks if
654 * @ip: inode that is being modified
655 * @ea: the location of the new ea in a block
656 * @er: the write request
658 * Note: does not update ea_rec_len or the GFS2_EAFLAG_LAST bin of ea_flags
663 static int ea_write(struct gfs2_inode *ip, struct gfs2_ea_header *ea,
664 struct gfs2_ea_request *er)
666 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
669 ea->ea_data_len = cpu_to_be32(er->er_data_len);
670 ea->ea_name_len = er->er_name_len;
671 ea->ea_type = er->er_type;
674 memcpy(GFS2_EA2NAME(ea), er->er_name, er->er_name_len);
676 if (GFS2_EAREQ_SIZE_STUFFED(er) <= sdp->sd_jbsize) {
678 memcpy(GFS2_EA2DATA(ea), er->er_data, er->er_data_len);
680 __be64 *dataptr = GFS2_EA2DATAPTRS(ea);
681 const char *data = er->er_data;
682 unsigned int data_len = er->er_data_len;
686 ea->ea_num_ptrs = DIV_ROUND_UP(er->er_data_len, sdp->sd_jbsize);
687 for (x = 0; x < ea->ea_num_ptrs; x++) {
688 struct buffer_head *bh;
690 int mh_size = sizeof(struct gfs2_meta_header);
693 error = gfs2_alloc_blocks(ip, &block, &n, 0, NULL);
696 gfs2_trans_add_unrevoke(sdp, block, 1);
697 bh = gfs2_meta_new(ip->i_gl, block);
698 gfs2_trans_add_meta(ip->i_gl, bh);
699 gfs2_metatype_set(bh, GFS2_METATYPE_ED, GFS2_FORMAT_ED);
701 gfs2_add_inode_blocks(&ip->i_inode, 1);
703 copy = data_len > sdp->sd_jbsize ? sdp->sd_jbsize :
705 memcpy(bh->b_data + mh_size, data, copy);
706 if (copy < sdp->sd_jbsize)
707 memset(bh->b_data + mh_size + copy, 0,
708 sdp->sd_jbsize - copy);
710 *dataptr++ = cpu_to_be64(bh->b_blocknr);
717 gfs2_assert_withdraw(sdp, !data_len);
723 typedef int (*ea_skeleton_call_t) (struct gfs2_inode *ip,
724 struct gfs2_ea_request *er, void *private);
726 static int ea_alloc_skeleton(struct gfs2_inode *ip, struct gfs2_ea_request *er,
728 ea_skeleton_call_t skeleton_call, void *private)
730 struct gfs2_alloc_parms ap = { .target = blks };
733 error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
737 error = gfs2_quota_lock_check(ip, &ap);
741 error = gfs2_inplace_reserve(ip, &ap);
745 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode),
746 blks + gfs2_rg_blocks(ip, blks) +
747 RES_DINODE + RES_STATFS + RES_QUOTA, 0);
751 error = skeleton_call(ip, er, private);
755 ip->i_inode.i_ctime = current_time(&ip->i_inode);
756 __mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC);
759 gfs2_trans_end(GFS2_SB(&ip->i_inode));
761 gfs2_inplace_release(ip);
763 gfs2_quota_unlock(ip);
767 static int ea_init_i(struct gfs2_inode *ip, struct gfs2_ea_request *er,
770 struct buffer_head *bh;
773 error = ea_alloc_blk(ip, &bh);
777 ip->i_eattr = bh->b_blocknr;
778 error = ea_write(ip, GFS2_EA_BH2FIRST(bh), er);
786 * ea_init - initializes a new eattr block
793 static int ea_init(struct gfs2_inode *ip, int type, const char *name,
794 const void *data, size_t size)
796 struct gfs2_ea_request er;
797 unsigned int jbsize = GFS2_SB(&ip->i_inode)->sd_jbsize;
798 unsigned int blks = 1;
802 er.er_name_len = strlen(name);
803 er.er_data = (void *)data;
804 er.er_data_len = size;
806 if (GFS2_EAREQ_SIZE_STUFFED(&er) > jbsize)
807 blks += DIV_ROUND_UP(er.er_data_len, jbsize);
809 return ea_alloc_skeleton(ip, &er, blks, ea_init_i, NULL);
812 static struct gfs2_ea_header *ea_split_ea(struct gfs2_ea_header *ea)
814 u32 ea_size = GFS2_EA_SIZE(ea);
815 struct gfs2_ea_header *new = (struct gfs2_ea_header *)((char *)ea +
817 u32 new_size = GFS2_EA_REC_LEN(ea) - ea_size;
818 int last = ea->ea_flags & GFS2_EAFLAG_LAST;
820 ea->ea_rec_len = cpu_to_be32(ea_size);
821 ea->ea_flags ^= last;
823 new->ea_rec_len = cpu_to_be32(new_size);
824 new->ea_flags = last;
829 static void ea_set_remove_stuffed(struct gfs2_inode *ip,
830 struct gfs2_ea_location *el)
832 struct gfs2_ea_header *ea = el->el_ea;
833 struct gfs2_ea_header *prev = el->el_prev;
836 gfs2_trans_add_meta(ip->i_gl, el->el_bh);
838 if (!prev || !GFS2_EA_IS_STUFFED(ea)) {
839 ea->ea_type = GFS2_EATYPE_UNUSED;
841 } else if (GFS2_EA2NEXT(prev) != ea) {
842 prev = GFS2_EA2NEXT(prev);
843 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode), GFS2_EA2NEXT(prev) == ea);
846 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
847 prev->ea_rec_len = cpu_to_be32(len);
849 if (GFS2_EA_IS_LAST(ea))
850 prev->ea_flags |= GFS2_EAFLAG_LAST;
856 struct gfs2_ea_request *es_er;
857 struct gfs2_ea_location *es_el;
859 struct buffer_head *es_bh;
860 struct gfs2_ea_header *es_ea;
863 static int ea_set_simple_noalloc(struct gfs2_inode *ip, struct buffer_head *bh,
864 struct gfs2_ea_header *ea, struct ea_set *es)
866 struct gfs2_ea_request *er = es->es_er;
869 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + 2 * RES_EATTR, 0);
873 gfs2_trans_add_meta(ip->i_gl, bh);
876 ea = ea_split_ea(ea);
878 ea_write(ip, ea, er);
881 ea_set_remove_stuffed(ip, es->es_el);
883 ip->i_inode.i_ctime = current_time(&ip->i_inode);
884 __mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC);
886 gfs2_trans_end(GFS2_SB(&ip->i_inode));
890 static int ea_set_simple_alloc(struct gfs2_inode *ip,
891 struct gfs2_ea_request *er, void *private)
893 struct ea_set *es = private;
894 struct gfs2_ea_header *ea = es->es_ea;
897 gfs2_trans_add_meta(ip->i_gl, es->es_bh);
900 ea = ea_split_ea(ea);
902 error = ea_write(ip, ea, er);
907 ea_set_remove_stuffed(ip, es->es_el);
912 static int ea_set_simple(struct gfs2_inode *ip, struct buffer_head *bh,
913 struct gfs2_ea_header *ea, struct gfs2_ea_header *prev,
916 struct ea_set *es = private;
921 stuffed = ea_calc_size(GFS2_SB(&ip->i_inode), es->es_er->er_name_len,
922 es->es_er->er_data_len, &size);
924 if (ea->ea_type == GFS2_EATYPE_UNUSED) {
925 if (GFS2_EA_REC_LEN(ea) < size)
927 if (!GFS2_EA_IS_STUFFED(ea)) {
928 error = ea_remove_unstuffed(ip, bh, ea, prev, 1);
933 } else if (GFS2_EA_REC_LEN(ea) - GFS2_EA_SIZE(ea) >= size)
939 error = ea_set_simple_noalloc(ip, bh, ea, es);
947 blks = 2 + DIV_ROUND_UP(es->es_er->er_data_len,
948 GFS2_SB(&ip->i_inode)->sd_jbsize);
950 error = ea_alloc_skeleton(ip, es->es_er, blks,
951 ea_set_simple_alloc, es);
959 static int ea_set_block(struct gfs2_inode *ip, struct gfs2_ea_request *er,
962 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
963 struct buffer_head *indbh, *newbh;
966 int mh_size = sizeof(struct gfs2_meta_header);
968 if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
971 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0,
976 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
981 eablk = (__be64 *)(indbh->b_data + mh_size);
982 end = eablk + sdp->sd_inptrs;
984 for (; eablk < end; eablk++)
993 gfs2_trans_add_meta(ip->i_gl, indbh);
997 error = gfs2_alloc_blocks(ip, &blk, &n, 0, NULL);
1000 gfs2_trans_add_unrevoke(sdp, blk, 1);
1001 indbh = gfs2_meta_new(ip->i_gl, blk);
1002 gfs2_trans_add_meta(ip->i_gl, indbh);
1003 gfs2_metatype_set(indbh, GFS2_METATYPE_IN, GFS2_FORMAT_IN);
1004 gfs2_buffer_clear_tail(indbh, mh_size);
1006 eablk = (__be64 *)(indbh->b_data + mh_size);
1007 *eablk = cpu_to_be64(ip->i_eattr);
1009 ip->i_diskflags |= GFS2_DIF_EA_INDIRECT;
1010 gfs2_add_inode_blocks(&ip->i_inode, 1);
1015 error = ea_alloc_blk(ip, &newbh);
1019 *eablk = cpu_to_be64((u64)newbh->b_blocknr);
1020 error = ea_write(ip, GFS2_EA_BH2FIRST(newbh), er);
1026 ea_set_remove_stuffed(ip, private);
1033 static int ea_set_i(struct gfs2_inode *ip, int type, const char *name,
1034 const void *value, size_t size, struct gfs2_ea_location *el)
1036 struct gfs2_ea_request er;
1038 unsigned int blks = 2;
1043 er.er_data = (void *)value;
1044 er.er_name_len = strlen(name);
1045 er.er_data_len = size;
1047 memset(&es, 0, sizeof(struct ea_set));
1051 error = ea_foreach(ip, ea_set_simple, &es);
1057 if (!(ip->i_diskflags & GFS2_DIF_EA_INDIRECT))
1059 if (GFS2_EAREQ_SIZE_STUFFED(&er) > GFS2_SB(&ip->i_inode)->sd_jbsize)
1060 blks += DIV_ROUND_UP(er.er_data_len, GFS2_SB(&ip->i_inode)->sd_jbsize);
1062 return ea_alloc_skeleton(ip, &er, blks, ea_set_block, el);
1065 static int ea_set_remove_unstuffed(struct gfs2_inode *ip,
1066 struct gfs2_ea_location *el)
1068 if (el->el_prev && GFS2_EA2NEXT(el->el_prev) != el->el_ea) {
1069 el->el_prev = GFS2_EA2NEXT(el->el_prev);
1070 gfs2_assert_withdraw(GFS2_SB(&ip->i_inode),
1071 GFS2_EA2NEXT(el->el_prev) == el->el_ea);
1074 return ea_remove_unstuffed(ip, el->el_bh, el->el_ea, el->el_prev, 0);
1077 static int ea_remove_stuffed(struct gfs2_inode *ip, struct gfs2_ea_location *el)
1079 struct gfs2_ea_header *ea = el->el_ea;
1080 struct gfs2_ea_header *prev = el->el_prev;
1083 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE + RES_EATTR, 0);
1087 gfs2_trans_add_meta(ip->i_gl, el->el_bh);
1092 len = GFS2_EA_REC_LEN(prev) + GFS2_EA_REC_LEN(ea);
1093 prev->ea_rec_len = cpu_to_be32(len);
1095 if (GFS2_EA_IS_LAST(ea))
1096 prev->ea_flags |= GFS2_EAFLAG_LAST;
1098 ea->ea_type = GFS2_EATYPE_UNUSED;
1101 ip->i_inode.i_ctime = current_time(&ip->i_inode);
1102 __mark_inode_dirty(&ip->i_inode, I_DIRTY_DATASYNC);
1104 gfs2_trans_end(GFS2_SB(&ip->i_inode));
1110 * gfs2_xattr_remove - Remove a GFS2 extended attribute
1112 * @type: The type of the extended attribute
1113 * @name: The name of the extended attribute
1115 * This is not called directly by the VFS since we use the (common)
1116 * scheme of making a "set with NULL data" mean a remove request. Note
1117 * that this is different from a set with zero length data.
1119 * Returns: 0, or errno on failure
1122 static int gfs2_xattr_remove(struct gfs2_inode *ip, int type, const char *name)
1124 struct gfs2_ea_location el;
1130 error = gfs2_ea_find(ip, type, name, &el);
1136 if (GFS2_EA_IS_STUFFED(el.el_ea))
1137 error = ea_remove_stuffed(ip, &el);
1139 error = ea_remove_unstuffed(ip, el.el_bh, el.el_ea, el.el_prev, 0);
1147 * __gfs2_xattr_set - Set (or remove) a GFS2 extended attribute
1149 * @name: The name of the extended attribute
1150 * @value: The value of the extended attribute (NULL for remove)
1151 * @size: The size of the @value argument
1152 * @flags: Create or Replace
1153 * @type: The type of the extended attribute
1155 * See gfs2_xattr_remove() for details of the removal of xattrs.
1157 * Returns: 0 or errno on failure
1160 int __gfs2_xattr_set(struct inode *inode, const char *name,
1161 const void *value, size_t size, int flags, int type)
1163 struct gfs2_inode *ip = GFS2_I(inode);
1164 struct gfs2_sbd *sdp = GFS2_SB(inode);
1165 struct gfs2_ea_location el;
1166 unsigned int namel = strlen(name);
1169 if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
1171 if (namel > GFS2_EA_MAX_NAME_LEN)
1174 if (value == NULL) {
1175 error = gfs2_xattr_remove(ip, type, name);
1176 if (error == -ENODATA && !(flags & XATTR_REPLACE))
1181 if (ea_check_size(sdp, namel, size))
1185 if (flags & XATTR_REPLACE)
1187 return ea_init(ip, type, name, value, size);
1190 error = gfs2_ea_find(ip, type, name, &el);
1195 if (ip->i_diskflags & GFS2_DIF_APPENDONLY) {
1201 if (!(flags & XATTR_CREATE)) {
1202 int unstuffed = !GFS2_EA_IS_STUFFED(el.el_ea);
1203 error = ea_set_i(ip, type, name, value, size, &el);
1204 if (!error && unstuffed)
1205 ea_set_remove_unstuffed(ip, &el);
1213 if (!(flags & XATTR_REPLACE))
1214 error = ea_set_i(ip, type, name, value, size, NULL);
1219 static int gfs2_xattr_set(const struct xattr_handler *handler,
1220 struct dentry *unused, struct inode *inode,
1221 const char *name, const void *value,
1222 size_t size, int flags)
1224 struct gfs2_inode *ip = GFS2_I(inode);
1225 struct gfs2_holder gh;
1228 ret = gfs2_rsqa_alloc(ip);
1232 /* May be called from gfs_setattr with the glock locked. */
1234 if (!gfs2_glock_is_locked_by_me(ip->i_gl)) {
1235 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
1239 if (WARN_ON_ONCE(ip->i_gl->gl_state != LM_ST_EXCLUSIVE))
1241 gfs2_holder_mark_uninitialized(&gh);
1243 ret = __gfs2_xattr_set(inode, name, value, size, flags, handler->flags);
1244 if (gfs2_holder_initialized(&gh))
1245 gfs2_glock_dq_uninit(&gh);
1249 static int ea_dealloc_indirect(struct gfs2_inode *ip)
1251 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1252 struct gfs2_rgrp_list rlist;
1253 struct buffer_head *indbh, *dibh;
1254 __be64 *eablk, *end;
1255 unsigned int rg_blocks = 0;
1257 unsigned int blen = 0;
1258 unsigned int blks = 0;
1262 error = gfs2_rindex_update(sdp);
1266 memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
1268 error = gfs2_meta_read(ip->i_gl, ip->i_eattr, DIO_WAIT, 0, &indbh);
1272 if (gfs2_metatype_check(sdp, indbh, GFS2_METATYPE_IN)) {
1277 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1278 end = eablk + sdp->sd_inptrs;
1280 for (; eablk < end; eablk++) {
1285 bn = be64_to_cpu(*eablk);
1287 if (bstart + blen == bn)
1291 gfs2_rlist_add(ip, &rlist, bstart);
1298 gfs2_rlist_add(ip, &rlist, bstart);
1302 gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
1304 for (x = 0; x < rlist.rl_rgrps; x++) {
1305 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
1307 rg_blocks += rgd->rd_length;
1310 error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
1312 goto out_rlist_free;
1314 error = gfs2_trans_begin(sdp, rg_blocks + RES_DINODE + RES_INDIRECT +
1315 RES_STATFS + RES_QUOTA, blks);
1319 gfs2_trans_add_meta(ip->i_gl, indbh);
1321 eablk = (__be64 *)(indbh->b_data + sizeof(struct gfs2_meta_header));
1325 for (; eablk < end; eablk++) {
1330 bn = be64_to_cpu(*eablk);
1332 if (bstart + blen == bn)
1336 gfs2_free_meta(ip, bstart, blen);
1342 gfs2_add_inode_blocks(&ip->i_inode, -1);
1345 gfs2_free_meta(ip, bstart, blen);
1347 ip->i_diskflags &= ~GFS2_DIF_EA_INDIRECT;
1349 error = gfs2_meta_inode_buffer(ip, &dibh);
1351 gfs2_trans_add_meta(ip->i_gl, dibh);
1352 gfs2_dinode_out(ip, dibh->b_data);
1356 gfs2_trans_end(sdp);
1359 gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
1361 gfs2_rlist_free(&rlist);
1367 static int ea_dealloc_block(struct gfs2_inode *ip)
1369 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1370 struct gfs2_rgrpd *rgd;
1371 struct buffer_head *dibh;
1372 struct gfs2_holder gh;
1375 error = gfs2_rindex_update(sdp);
1379 rgd = gfs2_blk2rgrpd(sdp, ip->i_eattr, 1);
1381 gfs2_consist_inode(ip);
1385 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, &gh);
1389 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_DINODE + RES_STATFS +
1394 gfs2_free_meta(ip, ip->i_eattr, 1);
1397 gfs2_add_inode_blocks(&ip->i_inode, -1);
1399 error = gfs2_meta_inode_buffer(ip, &dibh);
1401 gfs2_trans_add_meta(ip->i_gl, dibh);
1402 gfs2_dinode_out(ip, dibh->b_data);
1406 gfs2_trans_end(sdp);
1409 gfs2_glock_dq_uninit(&gh);
1414 * gfs2_ea_dealloc - deallocate the extended attribute fork
1420 int gfs2_ea_dealloc(struct gfs2_inode *ip)
1424 error = gfs2_rindex_update(GFS2_SB(&ip->i_inode));
1428 error = gfs2_quota_hold(ip, NO_UID_QUOTA_CHANGE, NO_GID_QUOTA_CHANGE);
1432 error = ea_foreach(ip, ea_dealloc_unstuffed, NULL);
1436 if (ip->i_diskflags & GFS2_DIF_EA_INDIRECT) {
1437 error = ea_dealloc_indirect(ip);
1442 error = ea_dealloc_block(ip);
1445 gfs2_quota_unhold(ip);
1449 static const struct xattr_handler gfs2_xattr_user_handler = {
1450 .prefix = XATTR_USER_PREFIX,
1451 .flags = GFS2_EATYPE_USR,
1452 .get = gfs2_xattr_get,
1453 .set = gfs2_xattr_set,
1456 static const struct xattr_handler gfs2_xattr_security_handler = {
1457 .prefix = XATTR_SECURITY_PREFIX,
1458 .flags = GFS2_EATYPE_SECURITY,
1459 .get = gfs2_xattr_get,
1460 .set = gfs2_xattr_set,
1463 const struct xattr_handler *gfs2_xattr_handlers[] = {
1464 &gfs2_xattr_user_handler,
1465 &gfs2_xattr_security_handler,
1466 &posix_acl_access_xattr_handler,
1467 &posix_acl_default_xattr_handler,