2 #include <linux/atomic.h>
3 #include <linux/export.h>
4 #include <linux/generic-radix-tree.h>
6 #include <linux/kmemleak.h>
8 #define GENRADIX_ARY (PAGE_SIZE / sizeof(struct genradix_node *))
9 #define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY)
11 struct genradix_node {
14 struct genradix_node *children[GENRADIX_ARY];
21 static inline int genradix_depth_shift(unsigned depth)
23 return PAGE_SHIFT + GENRADIX_ARY_SHIFT * depth;
27 * Returns size (of data, in bytes) that a tree of a given depth holds:
29 static inline size_t genradix_depth_size(unsigned depth)
31 return 1UL << genradix_depth_shift(depth);
34 /* depth that's needed for a genradix that can address up to ULONG_MAX: */
35 #define GENRADIX_MAX_DEPTH \
36 DIV_ROUND_UP(BITS_PER_LONG - PAGE_SHIFT, GENRADIX_ARY_SHIFT)
38 #define GENRADIX_DEPTH_MASK \
39 ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1))
41 static inline unsigned genradix_root_to_depth(struct genradix_root *r)
43 return (unsigned long) r & GENRADIX_DEPTH_MASK;
46 static inline struct genradix_node *genradix_root_to_node(struct genradix_root *r)
48 return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK);
52 * Returns pointer to the specified byte @offset within @radix, or NULL if not
55 void *__genradix_ptr(struct __genradix *radix, size_t offset)
57 struct genradix_root *r = READ_ONCE(radix->root);
58 struct genradix_node *n = genradix_root_to_node(r);
59 unsigned level = genradix_root_to_depth(r);
61 if (ilog2(offset) >= genradix_depth_shift(level))
72 n = n->children[offset >> genradix_depth_shift(level)];
73 offset &= genradix_depth_size(level) - 1;
76 return &n->data[offset];
78 EXPORT_SYMBOL(__genradix_ptr);
80 static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask)
82 struct genradix_node *node;
84 node = (struct genradix_node *)__get_free_page(gfp_mask|__GFP_ZERO);
87 * We're using pages (not slab allocations) directly for kernel data
88 * structures, so we need to explicitly inform kmemleak of them in order
89 * to avoid false positive memory leak reports.
91 kmemleak_alloc(node, PAGE_SIZE, 1, gfp_mask);
95 static inline void genradix_free_node(struct genradix_node *node)
98 free_page((unsigned long)node);
102 * Returns pointer to the specified byte @offset within @radix, allocating it if
103 * necessary - newly allocated slots are always zeroed out:
105 void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
108 struct genradix_root *v = READ_ONCE(radix->root);
109 struct genradix_node *n, *new_node = NULL;
112 /* Increase tree depth if necessary: */
114 struct genradix_root *r = v, *new_root;
116 n = genradix_root_to_node(r);
117 level = genradix_root_to_depth(r);
119 if (n && ilog2(offset) < genradix_depth_shift(level))
123 new_node = genradix_alloc_node(gfp_mask);
128 new_node->children[0] = n;
129 new_root = ((struct genradix_root *)
130 ((unsigned long) new_node | (n ? level + 1 : 0)));
132 if ((v = cmpxchg_release(&radix->root, r, new_root)) == r) {
139 struct genradix_node **p =
140 &n->children[offset >> genradix_depth_shift(level)];
141 offset &= genradix_depth_size(level) - 1;
146 new_node = genradix_alloc_node(gfp_mask);
151 if (!(n = cmpxchg_release(p, NULL, new_node)))
157 genradix_free_node(new_node);
159 return &n->data[offset];
161 EXPORT_SYMBOL(__genradix_ptr_alloc);
163 void *__genradix_iter_peek(struct genradix_iter *iter,
164 struct __genradix *radix,
165 size_t objs_per_page)
167 struct genradix_root *r;
168 struct genradix_node *n;
171 if (iter->offset == SIZE_MAX)
175 r = READ_ONCE(radix->root);
179 n = genradix_root_to_node(r);
180 level = genradix_root_to_depth(r);
182 if (ilog2(iter->offset) >= genradix_depth_shift(level))
188 i = (iter->offset >> genradix_depth_shift(level)) &
191 while (!n->children[i]) {
192 size_t objs_per_ptr = genradix_depth_size(level);
194 if (iter->offset + objs_per_ptr < iter->offset) {
195 iter->offset = SIZE_MAX;
196 iter->pos = SIZE_MAX;
201 iter->offset = round_down(iter->offset + objs_per_ptr,
203 iter->pos = (iter->offset >> PAGE_SHIFT) *
205 if (i == GENRADIX_ARY)
212 return &n->data[iter->offset & (PAGE_SIZE - 1)];
214 EXPORT_SYMBOL(__genradix_iter_peek);
216 void *__genradix_iter_peek_prev(struct genradix_iter *iter,
217 struct __genradix *radix,
218 size_t objs_per_page,
219 size_t obj_size_plus_page_remainder)
221 struct genradix_root *r;
222 struct genradix_node *n;
225 if (iter->offset == SIZE_MAX)
229 r = READ_ONCE(radix->root);
233 n = genradix_root_to_node(r);
234 level = genradix_root_to_depth(r);
236 if (ilog2(iter->offset) >= genradix_depth_shift(level)) {
237 iter->offset = genradix_depth_size(level);
238 iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page;
240 iter->offset -= obj_size_plus_page_remainder;
247 i = (iter->offset >> genradix_depth_shift(level)) &
250 while (!n->children[i]) {
251 size_t objs_per_ptr = genradix_depth_size(level);
253 iter->offset = round_down(iter->offset, objs_per_ptr);
254 iter->pos = (iter->offset >> PAGE_SHIFT) * objs_per_page;
259 iter->offset -= obj_size_plus_page_remainder;
270 return &n->data[iter->offset & (PAGE_SIZE - 1)];
272 EXPORT_SYMBOL(__genradix_iter_peek_prev);
274 static void genradix_free_recurse(struct genradix_node *n, unsigned level)
279 for (i = 0; i < GENRADIX_ARY; i++)
281 genradix_free_recurse(n->children[i], level - 1);
284 genradix_free_node(n);
287 int __genradix_prealloc(struct __genradix *radix, size_t size,
292 for (offset = 0; offset < size; offset += PAGE_SIZE)
293 if (!__genradix_ptr_alloc(radix, offset, gfp_mask))
298 EXPORT_SYMBOL(__genradix_prealloc);
300 void __genradix_free(struct __genradix *radix)
302 struct genradix_root *r = xchg(&radix->root, NULL);
304 genradix_free_recurse(genradix_root_to_node(r),
305 genradix_root_to_depth(r));
307 EXPORT_SYMBOL(__genradix_free);