1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright 2012 Xyratex Technology Limited
5 * Copyright (c) 2013, 2015, Intel Corporation.
7 * Author: Andrew Perepechko <Andrew_Perepechko@xyratex.com>
11 #define DEBUG_SUBSYSTEM S_LLITE
14 #include <linux/sched.h>
16 #include <obd_support.h>
17 #include <lustre_dlm.h>
18 #include "llite_internal.h"
20 /* If we ever have hundreds of extended attributes, we might want to consider
21 * using a hash or a tree structure instead of list for faster lookups.
23 struct ll_xattr_entry {
24 struct list_head xe_list; /* protected with
25 * lli_xattrs_list_rwsem
27 char *xe_name; /* xattr name, \0-terminated */
28 char *xe_value; /* xattr value */
29 unsigned int xe_namelen; /* strlen(xe_name) + 1 */
30 unsigned int xe_vallen; /* xattr value length */
33 static struct kmem_cache *xattr_kmem;
34 static struct lu_kmem_descr xattr_caches[] = {
36 .ckd_cache = &xattr_kmem,
37 .ckd_name = "xattr_kmem",
38 .ckd_size = sizeof(struct ll_xattr_entry)
45 int ll_xattr_init(void)
47 return lu_kmem_init(xattr_caches);
50 void ll_xattr_fini(void)
52 lu_kmem_fini(xattr_caches);
56 * Initializes xattr cache for an inode.
58 * This initializes the xattr list and marks cache presence.
60 static void ll_xattr_cache_init(struct ll_inode_info *lli)
62 INIT_LIST_HEAD(&lli->lli_xattrs);
63 set_bit(LLIF_XATTR_CACHE, &lli->lli_flags);
67 * This looks for a specific extended attribute.
69 * Find in @cache and return @xattr_name attribute in @xattr,
70 * for the NULL @xattr_name return the first cached @xattr.
73 * \retval -ENODATA if not found
75 static int ll_xattr_cache_find(struct list_head *cache,
76 const char *xattr_name,
77 struct ll_xattr_entry **xattr)
79 struct ll_xattr_entry *entry;
81 list_for_each_entry(entry, cache, xe_list) {
82 /* xattr_name == NULL means look for any entry */
83 if (!xattr_name || strcmp(xattr_name, entry->xe_name) == 0) {
85 CDEBUG(D_CACHE, "find: [%s]=%.*s\n",
86 entry->xe_name, entry->xe_vallen,
98 * Add @xattr_name attr with @xattr_val value and @xattr_val_len length,
101 * \retval -ENOMEM if no memory could be allocated for the cached attr
102 * \retval -EPROTO if duplicate xattr is being added
104 static int ll_xattr_cache_add(struct list_head *cache,
105 const char *xattr_name,
106 const char *xattr_val,
107 unsigned int xattr_val_len)
109 struct ll_xattr_entry *xattr;
111 if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
112 CDEBUG(D_CACHE, "duplicate xattr: [%s]\n", xattr_name);
116 xattr = kmem_cache_zalloc(xattr_kmem, GFP_NOFS);
118 CDEBUG(D_CACHE, "failed to allocate xattr\n");
122 xattr->xe_name = kstrdup(xattr_name, GFP_NOFS);
123 if (!xattr->xe_name) {
124 CDEBUG(D_CACHE, "failed to alloc xattr name %u\n",
128 xattr->xe_value = kmemdup(xattr_val, xattr_val_len, GFP_NOFS);
129 if (!xattr->xe_value)
132 xattr->xe_vallen = xattr_val_len;
133 list_add(&xattr->xe_list, cache);
135 CDEBUG(D_CACHE, "set: [%s]=%.*s\n", xattr_name, xattr_val_len,
140 kfree(xattr->xe_name);
142 kmem_cache_free(xattr_kmem, xattr);
148 * This removes an extended attribute from cache.
150 * Remove @xattr_name attribute from @cache.
153 * \retval -ENODATA if @xattr_name is not cached
155 static int ll_xattr_cache_del(struct list_head *cache,
156 const char *xattr_name)
158 struct ll_xattr_entry *xattr;
160 CDEBUG(D_CACHE, "del xattr: %s\n", xattr_name);
162 if (ll_xattr_cache_find(cache, xattr_name, &xattr) == 0) {
163 list_del(&xattr->xe_list);
164 kfree(xattr->xe_name);
165 kfree(xattr->xe_value);
166 kmem_cache_free(xattr_kmem, xattr);
175 * This iterates cached extended attributes.
177 * Walk over cached attributes in @cache and
178 * fill in @xld_buffer or only calculate buffer
179 * size if @xld_buffer is NULL.
181 * \retval >= 0 buffer list size
182 * \retval -ENODATA if the list cannot fit @xld_size buffer
184 static int ll_xattr_cache_list(struct list_head *cache,
188 struct ll_xattr_entry *xattr, *tmp;
191 list_for_each_entry_safe(xattr, tmp, cache, xe_list) {
192 CDEBUG(D_CACHE, "list: buffer=%p[%d] name=%s\n",
193 xld_buffer, xld_tail, xattr->xe_name);
196 xld_size -= xattr->xe_namelen;
199 memcpy(&xld_buffer[xld_tail],
200 xattr->xe_name, xattr->xe_namelen);
202 xld_tail += xattr->xe_namelen;
212 * Check if the xattr cache is initialized (filled).
214 * \retval 0 @cache is not initialized
215 * \retval 1 @cache is initialized
217 static int ll_xattr_cache_valid(struct ll_inode_info *lli)
219 return test_bit(LLIF_XATTR_CACHE, &lli->lli_flags);
223 * This finalizes the xattr cache.
225 * Free all xattr memory. @lli is the inode info pointer.
227 * \retval 0 no error occurred
229 static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
231 if (!ll_xattr_cache_valid(lli))
234 while (ll_xattr_cache_del(&lli->lli_xattrs, NULL) == 0)
237 clear_bit(LLIF_XATTR_CACHE, &lli->lli_flags);
242 int ll_xattr_cache_destroy(struct inode *inode)
244 struct ll_inode_info *lli = ll_i2info(inode);
247 down_write(&lli->lli_xattrs_list_rwsem);
248 rc = ll_xattr_cache_destroy_locked(lli);
249 up_write(&lli->lli_xattrs_list_rwsem);
255 * Match or enqueue a PR lock.
257 * Find or request an LDLM lock with xattr data.
258 * Since LDLM does not provide API for atomic match_or_enqueue,
259 * the function handles it with a separate enq lock.
260 * If successful, the function exits with the list lock held.
262 * \retval 0 no error occurred
263 * \retval -ENOMEM not enough memory
265 static int ll_xattr_find_get_lock(struct inode *inode,
266 struct lookup_intent *oit,
267 struct ptlrpc_request **req)
270 struct lustre_handle lockh = { 0 };
271 struct md_op_data *op_data;
272 struct ll_inode_info *lli = ll_i2info(inode);
273 struct ldlm_enqueue_info einfo = {
274 .ei_type = LDLM_IBITS,
275 .ei_mode = it_to_lock_mode(oit),
276 .ei_cb_bl = &ll_md_blocking_ast,
277 .ei_cb_cp = &ldlm_completion_ast,
279 struct ll_sb_info *sbi = ll_i2sbi(inode);
280 struct obd_export *exp = sbi->ll_md_exp;
283 mutex_lock(&lli->lli_xattrs_enq_lock);
284 /* inode may have been shrunk and recreated, so data is gone, match lock
285 * only when data exists.
287 if (ll_xattr_cache_valid(lli)) {
288 /* Try matching first. */
289 mode = ll_take_md_lock(inode, MDS_INODELOCK_XATTR, &lockh, 0,
292 /* fake oit in mdc_revalidate_lock() manner */
293 oit->it_lock_handle = lockh.cookie;
294 oit->it_lock_mode = mode;
299 /* Enqueue if the lock isn't cached locally. */
300 op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
301 LUSTRE_OPC_ANY, NULL);
302 if (IS_ERR(op_data)) {
303 mutex_unlock(&lli->lli_xattrs_enq_lock);
304 return PTR_ERR(op_data);
307 op_data->op_valid = OBD_MD_FLXATTR | OBD_MD_FLXATTRLS;
309 rc = md_enqueue(exp, &einfo, NULL, oit, op_data, &lockh, 0);
310 ll_finish_md_op_data(op_data);
314 "md_intent_lock failed with %d for fid " DFID "\n",
315 rc, PFID(ll_inode2fid(inode)));
316 mutex_unlock(&lli->lli_xattrs_enq_lock);
320 *req = oit->it_request;
322 down_write(&lli->lli_xattrs_list_rwsem);
323 mutex_unlock(&lli->lli_xattrs_enq_lock);
329 * Refill the xattr cache.
331 * Fetch and cache the whole of xattrs for @inode, acquiring
332 * a read or a write xattr lock depending on operation in @oit.
333 * Intent is dropped on exit unless the operation is setxattr.
335 * \retval 0 no error occurred
336 * \retval -EPROTO network protocol error
337 * \retval -ENOMEM not enough memory for the cache
339 static int ll_xattr_cache_refill(struct inode *inode, struct lookup_intent *oit)
341 struct ll_sb_info *sbi = ll_i2sbi(inode);
342 struct ptlrpc_request *req = NULL;
343 const char *xdata, *xval, *xtail, *xvtail;
344 struct ll_inode_info *lli = ll_i2info(inode);
345 struct mdt_body *body;
349 rc = ll_xattr_find_get_lock(inode, oit, &req);
353 /* Do we have the data at this point? */
354 if (ll_xattr_cache_valid(lli)) {
355 ll_stats_ops_tally(sbi, LPROC_LL_GETXATTR_HITS, 1);
360 /* Matched but no cache? Cancelled on error by a parallel refill. */
361 if (unlikely(!req)) {
362 CDEBUG(D_CACHE, "cancelled by a parallel getxattr\n");
367 if (oit->it_status < 0) {
368 CDEBUG(D_CACHE, "getxattr intent returned %d for fid " DFID "\n",
369 oit->it_status, PFID(ll_inode2fid(inode)));
371 /* xattr data is so large that we don't want to cache it */
377 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
379 CERROR("no MDT BODY in the refill xattr reply\n");
383 /* do not need swab xattr data */
384 xdata = req_capsule_server_sized_get(&req->rq_pill, &RMF_EADATA,
385 body->mbo_eadatasize);
386 xval = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS,
388 xsizes = req_capsule_server_sized_get(&req->rq_pill, &RMF_EAVALS_LENS,
389 body->mbo_max_mdsize * sizeof(__u32));
390 if (!xdata || !xval || !xsizes) {
391 CERROR("wrong setxattr reply\n");
396 xtail = xdata + body->mbo_eadatasize;
397 xvtail = xval + body->mbo_aclsize;
399 CDEBUG(D_CACHE, "caching: xdata=%p xtail=%p\n", xdata, xtail);
401 ll_xattr_cache_init(lli);
403 for (i = 0; i < body->mbo_max_mdsize; i++) {
404 CDEBUG(D_CACHE, "caching [%s]=%.*s\n", xdata, *xsizes, xval);
405 /* Perform consistency checks: attr names and vals in pill */
406 if (!memchr(xdata, 0, xtail - xdata)) {
407 CERROR("xattr protocol violation (names are broken)\n");
409 } else if (xval + *xsizes > xvtail) {
410 CERROR("xattr protocol violation (vals are broken)\n");
412 } else if (OBD_FAIL_CHECK(OBD_FAIL_LLITE_XATTR_ENOMEM)) {
414 } else if (!strcmp(xdata, XATTR_NAME_ACL_ACCESS)) {
415 /* Filter out ACL ACCESS since it's cached separately */
416 CDEBUG(D_CACHE, "not caching %s\n",
417 XATTR_NAME_ACL_ACCESS);
419 } else if (!strcmp(xdata, "security.selinux")) {
420 /* Filter out security.selinux, it is cached in slab */
421 CDEBUG(D_CACHE, "not caching security.selinux\n");
424 rc = ll_xattr_cache_add(&lli->lli_xattrs, xdata, xval,
428 ll_xattr_cache_destroy_locked(lli);
431 xdata += strlen(xdata) + 1;
436 if (xdata != xtail || xval != xvtail)
437 CERROR("a hole in xattr data\n");
439 ll_set_lock_data(sbi->ll_md_exp, inode, oit, NULL);
444 ll_intent_drop_lock(oit);
447 up_write(&lli->lli_xattrs_list_rwsem);
449 ptlrpc_req_finished(req);
454 up_write(&lli->lli_xattrs_list_rwsem);
456 ldlm_lock_decref_and_cancel((struct lustre_handle *)
457 &oit->it_lock_handle,
464 * Get an xattr value or list xattrs using the write-through cache.
466 * Get the xattr value (@valid has OBD_MD_FLXATTR set) of @name or
467 * list xattr names (@valid has OBD_MD_FLXATTRLS set) for @inode.
468 * The resulting value/list is stored in @buffer if the former
469 * is not larger than @size.
471 * \retval 0 no error occurred
472 * \retval -EPROTO network protocol error
473 * \retval -ENOMEM not enough memory for the cache
474 * \retval -ERANGE the buffer is not large enough
475 * \retval -ENODATA no such attr or the list is empty
477 int ll_xattr_cache_get(struct inode *inode, const char *name, char *buffer,
478 size_t size, __u64 valid)
480 struct lookup_intent oit = { .it_op = IT_GETXATTR };
481 struct ll_inode_info *lli = ll_i2info(inode);
484 LASSERT(!!(valid & OBD_MD_FLXATTR) ^ !!(valid & OBD_MD_FLXATTRLS));
486 down_read(&lli->lli_xattrs_list_rwsem);
487 if (!ll_xattr_cache_valid(lli)) {
488 up_read(&lli->lli_xattrs_list_rwsem);
489 rc = ll_xattr_cache_refill(inode, &oit);
492 downgrade_write(&lli->lli_xattrs_list_rwsem);
494 ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR_HITS, 1);
497 if (valid & OBD_MD_FLXATTR) {
498 struct ll_xattr_entry *xattr;
500 rc = ll_xattr_cache_find(&lli->lli_xattrs, name, &xattr);
502 rc = xattr->xe_vallen;
503 /* zero size means we are only requested size in rc */
505 if (size >= xattr->xe_vallen)
506 memcpy(buffer, xattr->xe_value,
512 } else if (valid & OBD_MD_FLXATTRLS) {
513 rc = ll_xattr_cache_list(&lli->lli_xattrs,
514 size ? buffer : NULL, size);
519 up_read(&lli->lli_xattrs_list_rwsem);