4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * Client Lustre Object.
34 * Author: Nikita Danilov <nikita.danilov@sun.com>
35 * Author: Jinshan Xiong <jinshan.xiong@intel.com>
47 #define DEBUG_SUBSYSTEM S_CLASS
49 #include <linux/libcfs/libcfs.h>
50 /* class_put_type() */
51 #include <obd_class.h>
52 #include <obd_support.h>
53 #include <lustre_fid.h>
54 #include <linux/list.h>
55 #include <linux/libcfs/libcfs_hash.h> /* for cfs_hash stuff */
56 #include <cl_object.h>
57 #include <lu_object.h>
58 #include "cl_internal.h"
60 static struct kmem_cache *cl_env_kmem;
62 /** Lock class of cl_object_header::coh_attr_guard */
63 static struct lock_class_key cl_attr_guard_class;
66 * Initialize cl_object_header.
68 int cl_object_header_init(struct cl_object_header *h)
72 result = lu_object_header_init(&h->coh_lu);
74 spin_lock_init(&h->coh_attr_guard);
75 lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
76 h->coh_page_bufsize = 0;
80 EXPORT_SYMBOL(cl_object_header_init);
83 * Returns a cl_object with a given \a fid.
85 * Returns either cached or newly created object. Additional reference on the
86 * returned object is acquired.
88 * \see lu_object_find(), cl_page_find(), cl_lock_find()
90 struct cl_object *cl_object_find(const struct lu_env *env,
91 struct cl_device *cd, const struct lu_fid *fid,
92 const struct cl_object_conf *c)
95 return lu2cl(lu_object_find_slice(env, cl2lu_dev(cd), fid, &c->coc_lu));
97 EXPORT_SYMBOL(cl_object_find);
100 * Releases a reference on \a o.
102 * When last reference is released object is returned to the cache, unless
103 * lu_object_header_flags::LU_OBJECT_HEARD_BANSHEE bit is set in its header.
105 * \see cl_page_put(), cl_lock_put().
107 void cl_object_put(const struct lu_env *env, struct cl_object *o)
109 lu_object_put(env, &o->co_lu);
111 EXPORT_SYMBOL(cl_object_put);
114 * Acquire an additional reference to the object \a o.
116 * This can only be used to acquire _additional_ reference, i.e., caller
117 * already has to possess at least one reference to \a o before calling this.
119 * \see cl_page_get(), cl_lock_get().
121 void cl_object_get(struct cl_object *o)
123 lu_object_get(&o->co_lu);
125 EXPORT_SYMBOL(cl_object_get);
128 * Returns the top-object for a given \a o.
132 struct cl_object *cl_object_top(struct cl_object *o)
134 struct cl_object_header *hdr = cl_object_header(o);
135 struct cl_object *top;
137 while (hdr->coh_parent)
138 hdr = hdr->coh_parent;
140 top = lu2cl(lu_object_top(&hdr->coh_lu));
141 CDEBUG(D_TRACE, "%p -> %p\n", o, top);
144 EXPORT_SYMBOL(cl_object_top);
147 * Returns pointer to the lock protecting data-attributes for the given object
150 * Data-attributes are protected by the cl_object_header::coh_attr_guard
151 * spin-lock in the top-object.
153 * \see cl_attr, cl_object_attr_lock(), cl_object_operations::coo_attr_get().
155 static spinlock_t *cl_object_attr_guard(struct cl_object *o)
157 return &cl_object_header(cl_object_top(o))->coh_attr_guard;
161 * Locks data-attributes.
163 * Prevents data-attributes from changing, until lock is released by
164 * cl_object_attr_unlock(). This has to be called before calls to
165 * cl_object_attr_get(), cl_object_attr_update().
167 void cl_object_attr_lock(struct cl_object *o)
168 __acquires(cl_object_attr_guard(o))
170 spin_lock(cl_object_attr_guard(o));
172 EXPORT_SYMBOL(cl_object_attr_lock);
175 * Releases data-attributes lock, acquired by cl_object_attr_lock().
177 void cl_object_attr_unlock(struct cl_object *o)
178 __releases(cl_object_attr_guard(o))
180 spin_unlock(cl_object_attr_guard(o));
182 EXPORT_SYMBOL(cl_object_attr_unlock);
185 * Returns data-attributes of an object \a obj.
187 * Every layer is asked (by calling cl_object_operations::coo_attr_get())
188 * top-to-bottom to fill in parts of \a attr that this layer is responsible
191 int cl_object_attr_get(const struct lu_env *env, struct cl_object *obj,
192 struct cl_attr *attr)
194 struct lu_object_header *top;
197 assert_spin_locked(cl_object_attr_guard(obj));
199 top = obj->co_lu.lo_header;
201 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
202 if (obj->co_ops->coo_attr_get) {
203 result = obj->co_ops->coo_attr_get(env, obj, attr);
213 EXPORT_SYMBOL(cl_object_attr_get);
216 * Updates data-attributes of an object \a obj.
218 * Only attributes, mentioned in a validness bit-mask \a v are
219 * updated. Calls cl_object_operations::coo_attr_update() on every layer,
222 int cl_object_attr_update(const struct lu_env *env, struct cl_object *obj,
223 const struct cl_attr *attr, unsigned int v)
225 struct lu_object_header *top;
228 assert_spin_locked(cl_object_attr_guard(obj));
230 top = obj->co_lu.lo_header;
232 list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
233 if (obj->co_ops->coo_attr_update) {
234 result = obj->co_ops->coo_attr_update(env, obj, attr,
245 EXPORT_SYMBOL(cl_object_attr_update);
248 * Notifies layers (bottom-to-top) that glimpse AST was received.
250 * Layers have to fill \a lvb fields with information that will be shipped
251 * back to glimpse issuer.
253 * \see cl_lock_operations::clo_glimpse()
255 int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
258 struct lu_object_header *top;
261 top = obj->co_lu.lo_header;
263 list_for_each_entry_reverse(obj, &top->loh_layers, co_lu.lo_linkage) {
264 if (obj->co_ops->coo_glimpse) {
265 result = obj->co_ops->coo_glimpse(env, obj, lvb);
270 LU_OBJECT_HEADER(D_DLMTRACE, env, lu_object_top(top),
271 "size: %llu mtime: %llu atime: %llu ctime: %llu blocks: %llu\n",
272 lvb->lvb_size, lvb->lvb_mtime, lvb->lvb_atime,
273 lvb->lvb_ctime, lvb->lvb_blocks);
276 EXPORT_SYMBOL(cl_object_glimpse);
279 * Updates a configuration of an object \a obj.
281 int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
282 const struct cl_object_conf *conf)
284 struct lu_object_header *top;
287 top = obj->co_lu.lo_header;
289 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
290 if (obj->co_ops->coo_conf_set) {
291 result = obj->co_ops->coo_conf_set(env, obj, conf);
298 EXPORT_SYMBOL(cl_conf_set);
301 * Prunes caches of pages and locks for this object.
303 int cl_object_prune(const struct lu_env *env, struct cl_object *obj)
305 struct lu_object_header *top;
309 top = obj->co_lu.lo_header;
311 list_for_each_entry(o, &top->loh_layers, co_lu.lo_linkage) {
312 if (o->co_ops->coo_prune) {
313 result = o->co_ops->coo_prune(env, o);
321 EXPORT_SYMBOL(cl_object_prune);
324 * Get stripe information of this object.
326 int cl_object_getstripe(const struct lu_env *env, struct cl_object *obj,
327 struct lov_user_md __user *uarg)
329 struct lu_object_header *top;
332 top = obj->co_lu.lo_header;
333 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
334 if (obj->co_ops->coo_getstripe) {
335 result = obj->co_ops->coo_getstripe(env, obj, uarg);
342 EXPORT_SYMBOL(cl_object_getstripe);
345 * Get fiemap extents from file object.
347 * \param env [in] lustre environment
348 * \param obj [in] file object
349 * \param key [in] fiemap request argument
350 * \param fiemap [out] fiemap extents mapping retrived
351 * \param buflen [in] max buffer length of @fiemap
356 int cl_object_fiemap(const struct lu_env *env, struct cl_object *obj,
357 struct ll_fiemap_info_key *key,
358 struct fiemap *fiemap, size_t *buflen)
360 struct lu_object_header *top;
363 top = obj->co_lu.lo_header;
364 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
365 if (obj->co_ops->coo_fiemap) {
366 result = obj->co_ops->coo_fiemap(env, obj, key, fiemap,
374 EXPORT_SYMBOL(cl_object_fiemap);
376 int cl_object_layout_get(const struct lu_env *env, struct cl_object *obj,
377 struct cl_layout *cl)
379 struct lu_object_header *top = obj->co_lu.lo_header;
381 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
382 if (obj->co_ops->coo_layout_get)
383 return obj->co_ops->coo_layout_get(env, obj, cl);
388 EXPORT_SYMBOL(cl_object_layout_get);
390 loff_t cl_object_maxbytes(struct cl_object *obj)
392 struct lu_object_header *top = obj->co_lu.lo_header;
393 loff_t maxbytes = LLONG_MAX;
395 list_for_each_entry(obj, &top->loh_layers, co_lu.lo_linkage) {
396 if (obj->co_ops->coo_maxbytes)
397 maxbytes = min_t(loff_t, obj->co_ops->coo_maxbytes(obj),
403 EXPORT_SYMBOL(cl_object_maxbytes);
406 * Helper function removing all object locks, and marking object for
407 * deletion. All object pages must have been deleted at this point.
409 * This is called by cl_inode_fini() and lov_object_delete() to destroy top-
410 * and sub- objects respectively.
412 void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
414 struct cl_object_header *hdr = cl_object_header(obj);
416 set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
418 EXPORT_SYMBOL(cl_object_kill);
420 void cache_stats_init(struct cache_stats *cs, const char *name)
425 for (i = 0; i < CS_NR; i++)
426 atomic_set(&cs->cs_stats[i], 0);
429 static int cache_stats_print(const struct cache_stats *cs,
430 struct seq_file *m, int h)
434 * lookup hit total cached create
435 * env: ...... ...... ...... ...... ......
438 const char *names[CS_NR] = CS_NAMES;
440 seq_printf(m, "%6s", " ");
441 for (i = 0; i < CS_NR; i++)
442 seq_printf(m, "%8s", names[i]);
446 seq_printf(m, "%5.5s:", cs->cs_name);
447 for (i = 0; i < CS_NR; i++)
448 seq_printf(m, "%8u", atomic_read(&cs->cs_stats[i]));
452 static void cl_env_percpu_refill(void);
455 * Initialize client site.
457 * Perform common initialization (lu_site_init()), and initialize statistical
458 * counters. Also perform global initializations on the first call.
460 int cl_site_init(struct cl_site *s, struct cl_device *d)
465 result = lu_site_init(&s->cs_lu, &d->cd_lu_dev);
467 cache_stats_init(&s->cs_pages, "pages");
468 for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
469 atomic_set(&s->cs_pages_state[0], 0);
470 cl_env_percpu_refill();
474 EXPORT_SYMBOL(cl_site_init);
477 * Finalize client site. Dual to cl_site_init().
479 void cl_site_fini(struct cl_site *s)
481 lu_site_fini(&s->cs_lu);
483 EXPORT_SYMBOL(cl_site_fini);
485 static struct cache_stats cl_env_stats = {
487 .cs_stats = { ATOMIC_INIT(0), }
491 * Outputs client site statistical counters into a buffer. Suitable for
492 * ll_rd_*()-style functions.
494 int cl_site_stats_print(const struct cl_site *site, struct seq_file *m)
497 static const char *pstate[] = {
505 lookup hit total busy create
506 pages: ...... ...... ...... ...... ...... [...... ...... ...... ......]
507 locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......]
508 env: ...... ...... ...... ...... ......
510 lu_site_stats_print(&site->cs_lu, m);
511 cache_stats_print(&site->cs_pages, m, 1);
513 for (i = 0; i < ARRAY_SIZE(site->cs_pages_state); ++i)
514 seq_printf(m, "%s: %u ", pstate[i],
515 atomic_read(&site->cs_pages_state[i]));
516 seq_printf(m, "]\n");
517 cache_stats_print(&cl_env_stats, m, 0);
521 EXPORT_SYMBOL(cl_site_stats_print);
523 /*****************************************************************************
525 * lu_env handling on client.
530 * The most efficient way is to store cl_env pointer in task specific
531 * structures. On Linux, it wont' be easy to use task_struct->journal_info
532 * because Lustre code may call into other fs which has certain assumptions
533 * about journal_info. Currently following fields in task_struct are identified
534 * can be used for this purpose:
535 * - tux_info: only on RedHat kernel.
537 * \note As long as we use task_struct to store cl_env, we assume that once
538 * called into Lustre, we'll never call into the other part of the kernel
539 * which will use those fields in task_struct without explicitly exiting
542 * If there's no space in task_struct is available, hash will be used.
546 static unsigned int cl_envs_cached_max = 32; /* XXX: prototype: arbitrary limit
549 static struct cl_env_cache {
551 unsigned int cec_count;
552 struct list_head cec_envs;
558 struct lu_context ce_ses;
561 * Linkage into global list of all client environments. Used for
562 * garbage collection.
564 struct list_head ce_linkage;
570 * Debugging field: address of the caller who made original
576 #define CL_ENV_INC(counter)
577 #define CL_ENV_DEC(counter)
579 static void cl_env_init0(struct cl_env *cle, void *debug)
581 LASSERT(cle->ce_ref == 0);
582 LASSERT(cle->ce_magic == &cl_env_init0);
583 LASSERT(!cle->ce_debug);
586 cle->ce_debug = debug;
590 static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
595 cle = kmem_cache_zalloc(cl_env_kmem, GFP_NOFS);
599 INIT_LIST_HEAD(&cle->ce_linkage);
600 cle->ce_magic = &cl_env_init0;
602 rc = lu_env_init(env, ctx_tags | LCT_CL_THREAD);
604 rc = lu_context_init(&cle->ce_ses,
605 ses_tags | LCT_SESSION);
607 lu_context_enter(&cle->ce_ses);
608 env->le_ses = &cle->ce_ses;
609 cl_env_init0(cle, debug);
615 kmem_cache_free(cl_env_kmem, cle);
622 env = ERR_PTR(-ENOMEM);
627 static void cl_env_fini(struct cl_env *cle)
630 lu_context_fini(&cle->ce_lu.le_ctx);
631 lu_context_fini(&cle->ce_ses);
632 kmem_cache_free(cl_env_kmem, cle);
635 static struct lu_env *cl_env_obtain(void *debug)
641 read_lock(&cl_envs[cpu].cec_guard);
642 LASSERT(equi(cl_envs[cpu].cec_count == 0,
643 list_empty(&cl_envs[cpu].cec_envs)));
644 if (cl_envs[cpu].cec_count > 0) {
647 cle = container_of(cl_envs[cpu].cec_envs.next, struct cl_env,
649 list_del_init(&cle->ce_linkage);
650 cl_envs[cpu].cec_count--;
651 read_unlock(&cl_envs[cpu].cec_guard);
655 rc = lu_env_refill(env);
657 cl_env_init0(cle, debug);
658 lu_context_enter(&env->le_ctx);
659 lu_context_enter(&cle->ce_ses);
665 read_unlock(&cl_envs[cpu].cec_guard);
667 env = cl_env_new(lu_context_tags_default,
668 lu_session_tags_default, debug);
673 static inline struct cl_env *cl_env_container(struct lu_env *env)
675 return container_of(env, struct cl_env, ce_lu);
679 * Returns lu_env: if there already is an environment associated with the
680 * current thread, it is returned, otherwise, new environment is allocated.
682 * Allocations are amortized through the global cache of environments.
684 * \param refcheck pointer to a counter used to detect environment leaks. In
685 * the usual case cl_env_get() and cl_env_put() are called in the same lexical
686 * scope and pointer to the same integer is passed as \a refcheck. This is
687 * used to detect missed cl_env_put().
691 struct lu_env *cl_env_get(u16 *refcheck)
695 env = cl_env_obtain(__builtin_return_address(0));
699 cle = cl_env_container(env);
700 *refcheck = cle->ce_ref;
701 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
705 EXPORT_SYMBOL(cl_env_get);
708 * Forces an allocation of a fresh environment with given tags.
712 struct lu_env *cl_env_alloc(u16 *refcheck, u32 tags)
716 env = cl_env_new(tags, tags, __builtin_return_address(0));
720 cle = cl_env_container(env);
721 *refcheck = cle->ce_ref;
722 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
726 EXPORT_SYMBOL(cl_env_alloc);
728 static void cl_env_exit(struct cl_env *cle)
730 lu_context_exit(&cle->ce_lu.le_ctx);
731 lu_context_exit(&cle->ce_ses);
735 * Finalizes and frees a given number of cached environments. This is done to
736 * (1) free some memory (not currently hooked into VM), or (2) release
737 * references to modules.
739 unsigned int cl_env_cache_purge(unsigned int nr)
744 for_each_possible_cpu(i) {
745 write_lock(&cl_envs[i].cec_guard);
746 for (; !list_empty(&cl_envs[i].cec_envs) && nr > 0; --nr) {
747 cle = container_of(cl_envs[i].cec_envs.next,
748 struct cl_env, ce_linkage);
749 list_del_init(&cle->ce_linkage);
750 LASSERT(cl_envs[i].cec_count > 0);
751 cl_envs[i].cec_count--;
752 write_unlock(&cl_envs[i].cec_guard);
755 write_lock(&cl_envs[i].cec_guard);
757 LASSERT(equi(cl_envs[i].cec_count == 0,
758 list_empty(&cl_envs[i].cec_envs)));
759 write_unlock(&cl_envs[i].cec_guard);
763 EXPORT_SYMBOL(cl_env_cache_purge);
766 * Release an environment.
768 * Decrement \a env reference counter. When counter drops to 0, nothing in
769 * this thread is using environment and it is returned to the allocation
770 * cache, or freed straight away, if cache is large enough.
772 void cl_env_put(struct lu_env *env, u16 *refcheck)
776 cle = cl_env_container(env);
778 LASSERT(cle->ce_ref > 0);
779 LASSERT(ergo(refcheck, cle->ce_ref == *refcheck));
781 CDEBUG(D_OTHER, "%d@%p\n", cle->ce_ref, cle);
782 if (--cle->ce_ref == 0) {
786 cle->ce_debug = NULL;
789 * Don't bother to take a lock here.
791 * Return environment to the cache only when it was allocated
792 * with the standard tags.
794 if (cl_envs[cpu].cec_count < cl_envs_cached_max &&
795 (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
796 (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
797 read_lock(&cl_envs[cpu].cec_guard);
798 list_add(&cle->ce_linkage, &cl_envs[cpu].cec_envs);
799 cl_envs[cpu].cec_count++;
800 read_unlock(&cl_envs[cpu].cec_guard);
807 EXPORT_SYMBOL(cl_env_put);
810 * Converts struct ost_lvb to struct cl_attr.
814 void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb)
816 attr->cat_size = lvb->lvb_size;
817 attr->cat_mtime = lvb->lvb_mtime;
818 attr->cat_atime = lvb->lvb_atime;
819 attr->cat_ctime = lvb->lvb_ctime;
820 attr->cat_blocks = lvb->lvb_blocks;
822 EXPORT_SYMBOL(cl_lvb2attr);
824 static struct cl_env cl_env_percpu[NR_CPUS];
826 static int cl_env_percpu_init(void)
829 int tags = LCT_REMEMBER | LCT_NOREF;
833 for_each_possible_cpu(i) {
836 rwlock_init(&cl_envs[i].cec_guard);
837 INIT_LIST_HEAD(&cl_envs[i].cec_envs);
838 cl_envs[i].cec_count = 0;
840 cle = &cl_env_percpu[i];
843 INIT_LIST_HEAD(&cle->ce_linkage);
844 cle->ce_magic = &cl_env_init0;
845 rc = lu_env_init(env, LCT_CL_THREAD | tags);
847 rc = lu_context_init(&cle->ce_ses, LCT_SESSION | tags);
849 lu_context_enter(&cle->ce_ses);
850 env->le_ses = &cle->ce_ses;
859 /* Indices 0 to i (excluding i) were correctly initialized,
860 * thus we must uninitialize up to i, the rest are undefined.
862 for (j = 0; j < i; j++) {
863 cle = &cl_env_percpu[j];
864 lu_context_exit(&cle->ce_ses);
865 lu_context_fini(&cle->ce_ses);
866 lu_env_fini(&cle->ce_lu);
873 static void cl_env_percpu_fini(void)
877 for_each_possible_cpu(i) {
878 struct cl_env *cle = &cl_env_percpu[i];
880 lu_context_exit(&cle->ce_ses);
881 lu_context_fini(&cle->ce_ses);
882 lu_env_fini(&cle->ce_lu);
886 static void cl_env_percpu_refill(void)
890 for_each_possible_cpu(i)
891 lu_env_refill(&cl_env_percpu[i].ce_lu);
894 void cl_env_percpu_put(struct lu_env *env)
899 cpu = smp_processor_id();
900 cle = cl_env_container(env);
901 LASSERT(cle == &cl_env_percpu[cpu]);
904 LASSERT(cle->ce_ref == 0);
907 cle->ce_debug = NULL;
911 EXPORT_SYMBOL(cl_env_percpu_put);
913 struct lu_env *cl_env_percpu_get(void)
917 cle = &cl_env_percpu[get_cpu()];
918 cl_env_init0(cle, __builtin_return_address(0));
922 EXPORT_SYMBOL(cl_env_percpu_get);
924 /*****************************************************************************
926 * Temporary prototype thing: mirror obd-devices into cl devices.
930 struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
931 struct lu_device_type *ldt,
932 struct lu_device *next)
934 const char *typename;
937 typename = ldt->ldt_name;
938 d = ldt->ldt_ops->ldto_device_alloc(env, ldt, NULL);
944 rc = ldt->ldt_ops->ldto_device_init(env, d, typename, next);
947 lu_ref_add(&d->ld_reference,
948 "lu-stack", &lu_site_init);
950 ldt->ldt_ops->ldto_device_free(env, d);
951 CERROR("can't init device '%s', %d\n", typename, rc);
955 CERROR("Cannot allocate device: '%s'\n", typename);
959 EXPORT_SYMBOL(cl_type_setup);
962 * Finalize device stack by calling lu_stack_fini().
964 void cl_stack_fini(const struct lu_env *env, struct cl_device *cl)
966 lu_stack_fini(env, cl2lu_dev(cl));
968 EXPORT_SYMBOL(cl_stack_fini);
970 static struct lu_context_key cl_key;
972 struct cl_thread_info *cl_env_info(const struct lu_env *env)
974 return lu_context_key_get(&env->le_ctx, &cl_key);
977 /* defines cl0_key_{init,fini}() */
978 LU_KEY_INIT_FINI(cl0, struct cl_thread_info);
980 static void *cl_key_init(const struct lu_context *ctx,
981 struct lu_context_key *key)
983 return cl0_key_init(ctx, key);
986 static void cl_key_fini(const struct lu_context *ctx,
987 struct lu_context_key *key, void *data)
989 cl0_key_fini(ctx, key, data);
992 static struct lu_context_key cl_key = {
993 .lct_tags = LCT_CL_THREAD,
994 .lct_init = cl_key_init,
995 .lct_fini = cl_key_fini,
998 static struct lu_kmem_descr cl_object_caches[] = {
1000 .ckd_cache = &cl_env_kmem,
1001 .ckd_name = "cl_env_kmem",
1002 .ckd_size = sizeof(struct cl_env)
1010 * Global initialization of cl-data. Create kmem caches, register
1011 * lu_context_key's, etc.
1013 * \see cl_global_fini()
1015 int cl_global_init(void)
1019 cl_envs = kzalloc(sizeof(*cl_envs) * num_possible_cpus(), GFP_KERNEL);
1025 result = lu_kmem_init(cl_object_caches);
1029 LU_CONTEXT_KEY_INIT(&cl_key);
1030 result = lu_context_key_register(&cl_key);
1034 result = cl_env_percpu_init();
1036 /* no cl_env_percpu_fini on error */
1042 lu_context_key_degister(&cl_key);
1044 lu_kmem_fini(cl_object_caches);
1052 * Finalization of global cl-data. Dual to cl_global_init().
1054 void cl_global_fini(void)
1056 cl_env_percpu_fini();
1057 lu_context_key_degister(&cl_key);
1058 lu_kmem_fini(cl_object_caches);