1 // SPDX-License-Identifier: GPL-2.0
10 #include <linux/poison.h>
11 #include <linux/slab.h>
12 #include <linux/radix-tree.h>
13 #include <urcu/uatomic.h>
26 unsigned int non_kernel;
27 unsigned long nr_allocated;
28 unsigned long nr_tallocated;
31 void kmem_cache_set_non_kernel(struct kmem_cache *cachep, unsigned int val)
33 cachep->non_kernel = val;
36 unsigned long kmem_cache_get_alloc(struct kmem_cache *cachep)
38 return cachep->size * cachep->nr_allocated;
41 unsigned long kmem_cache_nr_allocated(struct kmem_cache *cachep)
43 return cachep->nr_allocated;
46 unsigned long kmem_cache_nr_tallocated(struct kmem_cache *cachep)
48 return cachep->nr_tallocated;
51 void kmem_cache_zero_nr_tallocated(struct kmem_cache *cachep)
53 cachep->nr_tallocated = 0;
56 void *kmem_cache_alloc_lru(struct kmem_cache *cachep, struct list_lru *lru,
61 if (!(gfp & __GFP_DIRECT_RECLAIM)) {
62 if (!cachep->non_kernel)
68 pthread_mutex_lock(&cachep->lock);
69 if (cachep->nr_objs) {
70 struct radix_tree_node *node = cachep->objs;
72 cachep->objs = node->parent;
73 pthread_mutex_unlock(&cachep->lock);
77 pthread_mutex_unlock(&cachep->lock);
79 posix_memalign(&p, cachep->align, cachep->size);
81 p = malloc(cachep->size);
84 else if (gfp & __GFP_ZERO)
85 memset(p, 0, cachep->size);
88 uatomic_inc(&cachep->nr_allocated);
89 uatomic_inc(&nr_allocated);
90 uatomic_inc(&cachep->nr_tallocated);
92 printf("Allocating %p from slab\n", p);
96 void __kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
99 if (cachep->nr_objs > 10 || cachep->align) {
100 memset(objp, POISON_FREE, cachep->size);
103 struct radix_tree_node *node = objp;
105 node->parent = cachep->objs;
110 void kmem_cache_free_locked(struct kmem_cache *cachep, void *objp)
112 uatomic_dec(&nr_allocated);
113 uatomic_dec(&cachep->nr_allocated);
115 printf("Freeing %p to slab\n", objp);
116 __kmem_cache_free_locked(cachep, objp);
119 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
121 pthread_mutex_lock(&cachep->lock);
122 kmem_cache_free_locked(cachep, objp);
123 pthread_mutex_unlock(&cachep->lock);
126 void kmem_cache_free_bulk(struct kmem_cache *cachep, size_t size, void **list)
129 pr_debug("Bulk free %p[0-%lu]\n", list, size - 1);
131 pthread_mutex_lock(&cachep->lock);
132 for (int i = 0; i < size; i++)
133 kmem_cache_free_locked(cachep, list[i]);
134 pthread_mutex_unlock(&cachep->lock);
137 void kmem_cache_shrink(struct kmem_cache *cachep)
141 int kmem_cache_alloc_bulk(struct kmem_cache *cachep, gfp_t gfp, size_t size,
147 pr_debug("Bulk alloc %lu\n", size);
149 pthread_mutex_lock(&cachep->lock);
150 if (cachep->nr_objs >= size) {
151 struct radix_tree_node *node;
153 for (i = 0; i < size; i++) {
154 if (!(gfp & __GFP_DIRECT_RECLAIM)) {
155 if (!cachep->non_kernel)
157 cachep->non_kernel--;
162 cachep->objs = node->parent;
166 pthread_mutex_unlock(&cachep->lock);
168 pthread_mutex_unlock(&cachep->lock);
169 for (i = 0; i < size; i++) {
170 if (!(gfp & __GFP_DIRECT_RECLAIM)) {
171 if (!cachep->non_kernel)
173 cachep->non_kernel--;
177 posix_memalign(&p[i], cachep->align,
180 p[i] = malloc(cachep->size);
186 else if (gfp & __GFP_ZERO)
187 memset(p[i], 0, cachep->size);
193 pthread_mutex_lock(&cachep->lock);
194 for (i = 0; i < size; i++)
195 __kmem_cache_free_locked(cachep, p[i]);
196 pthread_mutex_unlock(&cachep->lock);
200 for (i = 0; i < size; i++) {
201 uatomic_inc(&nr_allocated);
202 uatomic_inc(&cachep->nr_allocated);
203 uatomic_inc(&cachep->nr_tallocated);
205 printf("Allocating %p from slab\n", p[i]);
212 kmem_cache_create(const char *name, unsigned int size, unsigned int align,
213 unsigned int flags, void (*ctor)(void *))
215 struct kmem_cache *ret = malloc(sizeof(*ret));
217 pthread_mutex_init(&ret->lock, NULL);
221 ret->nr_allocated = 0;
222 ret->nr_tallocated = 0;
230 * Test the test infrastructure for kem_cache_alloc/free and bulk counterparts.
232 void test_kmem_cache_bulk(void)
236 static struct kmem_cache *test_cache, *test_cache2;
239 * Testing the bulk allocators without aligned kmem_cache to force the
240 * bulk alloc/free to reuse
242 test_cache = kmem_cache_create("test_cache", 256, 0, SLAB_PANIC, NULL);
244 for (i = 0; i < 5; i++)
245 list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
247 for (i = 0; i < 5; i++)
248 kmem_cache_free(test_cache, list[i]);
249 assert(test_cache->nr_objs == 5);
251 kmem_cache_alloc_bulk(test_cache, __GFP_DIRECT_RECLAIM, 5, list);
252 kmem_cache_free_bulk(test_cache, 5, list);
254 for (i = 0; i < 12 ; i++)
255 list[i] = kmem_cache_alloc(test_cache, __GFP_DIRECT_RECLAIM);
257 for (i = 0; i < 12; i++)
258 kmem_cache_free(test_cache, list[i]);
260 /* The last free will not be kept around */
261 assert(test_cache->nr_objs == 11);
263 /* Aligned caches will immediately free */
264 test_cache2 = kmem_cache_create("test_cache2", 128, 128, SLAB_PANIC, NULL);
266 kmem_cache_alloc_bulk(test_cache2, __GFP_DIRECT_RECLAIM, 10, list);
267 kmem_cache_free_bulk(test_cache2, 10, list);
268 assert(!test_cache2->nr_objs);