GNU Linux-libre 6.9-gnu
[releases.git] / security / selinux / ss / sidtab.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Implementation of the SID table type.
4  *
5  * Original author: Stephen Smalley, <stephen.smalley.work@gmail.com>
6  * Author: Ondrej Mosnacek, <omosnacek@gmail.com>
7  *
8  * Copyright (C) 2018 Red Hat, Inc.
9  */
10
11 #include <linux/errno.h>
12 #include <linux/kernel.h>
13 #include <linux/list.h>
14 #include <linux/rcupdate.h>
15 #include <linux/slab.h>
16 #include <linux/sched.h>
17 #include <linux/spinlock.h>
18 #include <asm/barrier.h>
19 #include "flask.h"
20 #include "security.h"
21 #include "sidtab.h"
22 #include "services.h"
23
24 struct sidtab_str_cache {
25         struct rcu_head rcu_member;
26         struct list_head lru_member;
27         struct sidtab_entry *parent;
28         u32 len;
29         char str[] __counted_by(len);
30 };
31
32 #define index_to_sid(index) ((index) + SECINITSID_NUM + 1)
33 #define sid_to_index(sid)   ((sid) - (SECINITSID_NUM + 1))
34
35 int sidtab_init(struct sidtab *s)
36 {
37         u32 i;
38
39         memset(s->roots, 0, sizeof(s->roots));
40
41         for (i = 0; i < SECINITSID_NUM; i++)
42                 s->isids[i].set = 0;
43
44         s->frozen = false;
45         s->count = 0;
46         s->convert = NULL;
47         hash_init(s->context_to_sid);
48
49         spin_lock_init(&s->lock);
50
51 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
52         s->cache_free_slots = CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE;
53         INIT_LIST_HEAD(&s->cache_lru_list);
54         spin_lock_init(&s->cache_lock);
55 #endif
56
57         return 0;
58 }
59
60 static u32 context_to_sid(struct sidtab *s, struct context *context, u32 hash)
61 {
62         struct sidtab_entry *entry;
63         u32 sid = 0;
64
65         rcu_read_lock();
66         hash_for_each_possible_rcu(s->context_to_sid, entry, list, hash) {
67                 if (entry->hash != hash)
68                         continue;
69                 if (context_cmp(&entry->context, context)) {
70                         sid = entry->sid;
71                         break;
72                 }
73         }
74         rcu_read_unlock();
75         return sid;
76 }
77
78 int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
79 {
80         struct sidtab_isid_entry *isid;
81         u32 hash;
82         int rc;
83
84         if (sid == 0 || sid > SECINITSID_NUM)
85                 return -EINVAL;
86
87         isid = &s->isids[sid - 1];
88
89         rc = context_cpy(&isid->entry.context, context);
90         if (rc)
91                 return rc;
92
93 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
94         isid->entry.cache = NULL;
95 #endif
96         isid->set = 1;
97
98         hash = context_compute_hash(context);
99
100         /*
101          * Multiple initial sids may map to the same context. Check that this
102          * context is not already represented in the context_to_sid hashtable
103          * to avoid duplicate entries and long linked lists upon hash
104          * collision.
105          */
106         if (!context_to_sid(s, context, hash)) {
107                 isid->entry.sid = sid;
108                 isid->entry.hash = hash;
109                 hash_add(s->context_to_sid, &isid->entry.list, hash);
110         }
111
112         return 0;
113 }
114
115 int sidtab_hash_stats(struct sidtab *sidtab, char *page)
116 {
117         int i;
118         int chain_len = 0;
119         int slots_used = 0;
120         int entries = 0;
121         int max_chain_len = 0;
122         int cur_bucket = 0;
123         struct sidtab_entry *entry;
124
125         rcu_read_lock();
126         hash_for_each_rcu(sidtab->context_to_sid, i, entry, list) {
127                 entries++;
128                 if (i == cur_bucket) {
129                         chain_len++;
130                         if (chain_len == 1)
131                                 slots_used++;
132                 } else {
133                         cur_bucket = i;
134                         if (chain_len > max_chain_len)
135                                 max_chain_len = chain_len;
136                         chain_len = 0;
137                 }
138         }
139         rcu_read_unlock();
140
141         if (chain_len > max_chain_len)
142                 max_chain_len = chain_len;
143
144         return scnprintf(page, PAGE_SIZE,
145                          "entries: %d\nbuckets used: %d/%d\n"
146                          "longest chain: %d\n",
147                          entries, slots_used, SIDTAB_HASH_BUCKETS,
148                          max_chain_len);
149 }
150
151 static u32 sidtab_level_from_count(u32 count)
152 {
153         u32 capacity = SIDTAB_LEAF_ENTRIES;
154         u32 level = 0;
155
156         while (count > capacity) {
157                 capacity <<= SIDTAB_INNER_SHIFT;
158                 ++level;
159         }
160         return level;
161 }
162
163 static int sidtab_alloc_roots(struct sidtab *s, u32 level)
164 {
165         u32 l;
166
167         if (!s->roots[0].ptr_leaf) {
168                 s->roots[0].ptr_leaf =
169                         kzalloc(SIDTAB_NODE_ALLOC_SIZE, GFP_ATOMIC);
170                 if (!s->roots[0].ptr_leaf)
171                         return -ENOMEM;
172         }
173         for (l = 1; l <= level; ++l)
174                 if (!s->roots[l].ptr_inner) {
175                         s->roots[l].ptr_inner =
176                                 kzalloc(SIDTAB_NODE_ALLOC_SIZE, GFP_ATOMIC);
177                         if (!s->roots[l].ptr_inner)
178                                 return -ENOMEM;
179                         s->roots[l].ptr_inner->entries[0] = s->roots[l - 1];
180                 }
181         return 0;
182 }
183
184 static struct sidtab_entry *sidtab_do_lookup(struct sidtab *s, u32 index,
185                                              int alloc)
186 {
187         union sidtab_entry_inner *entry;
188         u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES;
189
190         /* find the level of the subtree we need */
191         level = sidtab_level_from_count(index + 1);
192         capacity_shift = level * SIDTAB_INNER_SHIFT;
193
194         /* allocate roots if needed */
195         if (alloc && sidtab_alloc_roots(s, level) != 0)
196                 return NULL;
197
198         /* lookup inside the subtree */
199         entry = &s->roots[level];
200         while (level != 0) {
201                 capacity_shift -= SIDTAB_INNER_SHIFT;
202                 --level;
203
204                 entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift];
205                 leaf_index &= ((u32)1 << capacity_shift) - 1;
206
207                 if (!entry->ptr_inner) {
208                         if (alloc)
209                                 entry->ptr_inner = kzalloc(
210                                         SIDTAB_NODE_ALLOC_SIZE, GFP_ATOMIC);
211                         if (!entry->ptr_inner)
212                                 return NULL;
213                 }
214         }
215         if (!entry->ptr_leaf) {
216                 if (alloc)
217                         entry->ptr_leaf =
218                                 kzalloc(SIDTAB_NODE_ALLOC_SIZE, GFP_ATOMIC);
219                 if (!entry->ptr_leaf)
220                         return NULL;
221         }
222         return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES];
223 }
224
225 static struct sidtab_entry *sidtab_lookup(struct sidtab *s, u32 index)
226 {
227         /* read entries only after reading count */
228         u32 count = smp_load_acquire(&s->count);
229
230         if (index >= count)
231                 return NULL;
232
233         return sidtab_do_lookup(s, index, 0);
234 }
235
236 static struct sidtab_entry *sidtab_lookup_initial(struct sidtab *s, u32 sid)
237 {
238         return s->isids[sid - 1].set ? &s->isids[sid - 1].entry : NULL;
239 }
240
241 static struct sidtab_entry *sidtab_search_core(struct sidtab *s, u32 sid,
242                                                int force)
243 {
244         if (sid != 0) {
245                 struct sidtab_entry *entry;
246
247                 if (sid > SECINITSID_NUM)
248                         entry = sidtab_lookup(s, sid_to_index(sid));
249                 else
250                         entry = sidtab_lookup_initial(s, sid);
251                 if (entry && (!entry->context.len || force))
252                         return entry;
253         }
254
255         return sidtab_lookup_initial(s, SECINITSID_UNLABELED);
256 }
257
258 struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid)
259 {
260         return sidtab_search_core(s, sid, 0);
261 }
262
263 struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid)
264 {
265         return sidtab_search_core(s, sid, 1);
266 }
267
268 int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid)
269 {
270         unsigned long flags;
271         u32 count, hash = context_compute_hash(context);
272         struct sidtab_convert_params *convert;
273         struct sidtab_entry *dst, *dst_convert;
274         int rc;
275
276         *sid = context_to_sid(s, context, hash);
277         if (*sid)
278                 return 0;
279
280         /* lock-free search failed: lock, re-search, and insert if not found */
281         spin_lock_irqsave(&s->lock, flags);
282
283         rc = 0;
284         *sid = context_to_sid(s, context, hash);
285         if (*sid)
286                 goto out_unlock;
287
288         if (unlikely(s->frozen)) {
289                 /*
290                  * This sidtab is now frozen - tell the caller to abort and
291                  * get the new one.
292                  */
293                 rc = -ESTALE;
294                 goto out_unlock;
295         }
296
297         count = s->count;
298
299         /* bail out if we already reached max entries */
300         rc = -EOVERFLOW;
301         if (count >= SIDTAB_MAX)
302                 goto out_unlock;
303
304         /* insert context into new entry */
305         rc = -ENOMEM;
306         dst = sidtab_do_lookup(s, count, 1);
307         if (!dst)
308                 goto out_unlock;
309
310         dst->sid = index_to_sid(count);
311         dst->hash = hash;
312
313         rc = context_cpy(&dst->context, context);
314         if (rc)
315                 goto out_unlock;
316
317         /*
318          * if we are building a new sidtab, we need to convert the context
319          * and insert it there as well
320          */
321         convert = s->convert;
322         if (convert) {
323                 struct sidtab *target = convert->target;
324
325                 rc = -ENOMEM;
326                 dst_convert = sidtab_do_lookup(target, count, 1);
327                 if (!dst_convert) {
328                         context_destroy(&dst->context);
329                         goto out_unlock;
330                 }
331
332                 rc = services_convert_context(convert->args, context,
333                                               &dst_convert->context,
334                                               GFP_ATOMIC);
335                 if (rc) {
336                         context_destroy(&dst->context);
337                         goto out_unlock;
338                 }
339                 dst_convert->sid = index_to_sid(count);
340                 dst_convert->hash = context_compute_hash(&dst_convert->context);
341                 target->count = count + 1;
342
343                 hash_add_rcu(target->context_to_sid, &dst_convert->list,
344                              dst_convert->hash);
345         }
346
347         if (context->len)
348                 pr_info("SELinux:  Context %s is not valid (left unmapped).\n",
349                         context->str);
350
351         *sid = index_to_sid(count);
352
353         /* write entries before updating count */
354         smp_store_release(&s->count, count + 1);
355         hash_add_rcu(s->context_to_sid, &dst->list, dst->hash);
356
357         rc = 0;
358 out_unlock:
359         spin_unlock_irqrestore(&s->lock, flags);
360         return rc;
361 }
362
363 static void sidtab_convert_hashtable(struct sidtab *s, u32 count)
364 {
365         struct sidtab_entry *entry;
366         u32 i;
367
368         for (i = 0; i < count; i++) {
369                 entry = sidtab_do_lookup(s, i, 0);
370                 entry->sid = index_to_sid(i);
371                 entry->hash = context_compute_hash(&entry->context);
372
373                 hash_add_rcu(s->context_to_sid, &entry->list, entry->hash);
374         }
375 }
376
377 static int sidtab_convert_tree(union sidtab_entry_inner *edst,
378                                union sidtab_entry_inner *esrc, u32 *pos,
379                                u32 count, u32 level,
380                                struct sidtab_convert_params *convert)
381 {
382         int rc;
383         u32 i;
384
385         if (level != 0) {
386                 if (!edst->ptr_inner) {
387                         edst->ptr_inner =
388                                 kzalloc(SIDTAB_NODE_ALLOC_SIZE, GFP_KERNEL);
389                         if (!edst->ptr_inner)
390                                 return -ENOMEM;
391                 }
392                 i = 0;
393                 while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
394                         rc = sidtab_convert_tree(&edst->ptr_inner->entries[i],
395                                                  &esrc->ptr_inner->entries[i],
396                                                  pos, count, level - 1,
397                                                  convert);
398                         if (rc)
399                                 return rc;
400                         i++;
401                 }
402         } else {
403                 if (!edst->ptr_leaf) {
404                         edst->ptr_leaf =
405                                 kzalloc(SIDTAB_NODE_ALLOC_SIZE, GFP_KERNEL);
406                         if (!edst->ptr_leaf)
407                                 return -ENOMEM;
408                 }
409                 i = 0;
410                 while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
411                         rc = services_convert_context(
412                                 convert->args,
413                                 &esrc->ptr_leaf->entries[i].context,
414                                 &edst->ptr_leaf->entries[i].context,
415                                 GFP_KERNEL);
416                         if (rc)
417                                 return rc;
418                         (*pos)++;
419                         i++;
420                 }
421                 cond_resched();
422         }
423         return 0;
424 }
425
426 int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
427 {
428         unsigned long flags;
429         u32 count, level, pos;
430         int rc;
431
432         spin_lock_irqsave(&s->lock, flags);
433
434         /* concurrent policy loads are not allowed */
435         if (s->convert) {
436                 spin_unlock_irqrestore(&s->lock, flags);
437                 return -EBUSY;
438         }
439
440         count = s->count;
441         level = sidtab_level_from_count(count);
442
443         /* allocate last leaf in the new sidtab (to avoid race with
444          * live convert)
445          */
446         rc = sidtab_do_lookup(params->target, count - 1, 1) ? 0 : -ENOMEM;
447         if (rc) {
448                 spin_unlock_irqrestore(&s->lock, flags);
449                 return rc;
450         }
451
452         /* set count in case no new entries are added during conversion */
453         params->target->count = count;
454
455         /* enable live convert of new entries */
456         s->convert = params;
457
458         /* we can safely convert the tree outside the lock */
459         spin_unlock_irqrestore(&s->lock, flags);
460
461         pr_info("SELinux:  Converting %u SID table entries...\n", count);
462
463         /* convert all entries not covered by live convert */
464         pos = 0;
465         rc = sidtab_convert_tree(&params->target->roots[level],
466                                  &s->roots[level], &pos, count, level, params);
467         if (rc) {
468                 /* we need to keep the old table - disable live convert */
469                 spin_lock_irqsave(&s->lock, flags);
470                 s->convert = NULL;
471                 spin_unlock_irqrestore(&s->lock, flags);
472                 return rc;
473         }
474         /*
475          * The hashtable can also be modified in sidtab_context_to_sid()
476          * so we must re-acquire the lock here.
477          */
478         spin_lock_irqsave(&s->lock, flags);
479         sidtab_convert_hashtable(params->target, count);
480         spin_unlock_irqrestore(&s->lock, flags);
481
482         return 0;
483 }
484
485 void sidtab_cancel_convert(struct sidtab *s)
486 {
487         unsigned long flags;
488
489         /* cancelling policy load - disable live convert of sidtab */
490         spin_lock_irqsave(&s->lock, flags);
491         s->convert = NULL;
492         spin_unlock_irqrestore(&s->lock, flags);
493 }
494
495 void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags)
496         __acquires(&s->lock)
497 {
498         spin_lock_irqsave(&s->lock, *flags);
499         s->frozen = true;
500         s->convert = NULL;
501 }
502 void sidtab_freeze_end(struct sidtab *s, unsigned long *flags)
503         __releases(&s->lock)
504 {
505         spin_unlock_irqrestore(&s->lock, *flags);
506 }
507
508 static void sidtab_destroy_entry(struct sidtab_entry *entry)
509 {
510         context_destroy(&entry->context);
511 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
512         kfree(rcu_dereference_raw(entry->cache));
513 #endif
514 }
515
516 static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
517 {
518         u32 i;
519
520         if (level != 0) {
521                 struct sidtab_node_inner *node = entry.ptr_inner;
522
523                 if (!node)
524                         return;
525
526                 for (i = 0; i < SIDTAB_INNER_ENTRIES; i++)
527                         sidtab_destroy_tree(node->entries[i], level - 1);
528                 kfree(node);
529         } else {
530                 struct sidtab_node_leaf *node = entry.ptr_leaf;
531
532                 if (!node)
533                         return;
534
535                 for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++)
536                         sidtab_destroy_entry(&node->entries[i]);
537                 kfree(node);
538         }
539 }
540
541 void sidtab_destroy(struct sidtab *s)
542 {
543         u32 i, level;
544
545         for (i = 0; i < SECINITSID_NUM; i++)
546                 if (s->isids[i].set)
547                         sidtab_destroy_entry(&s->isids[i].entry);
548
549         level = SIDTAB_MAX_LEVEL;
550         while (level && !s->roots[level].ptr_inner)
551                 --level;
552
553         sidtab_destroy_tree(s->roots[level], level);
554         /*
555          * The context_to_sid hashtable's objects are all shared
556          * with the isids array and context tree, and so don't need
557          * to be cleaned up here.
558          */
559 }
560
561 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
562
563 void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
564                         const char *str, u32 str_len)
565 {
566         struct sidtab_str_cache *cache, *victim = NULL;
567         unsigned long flags;
568
569         /* do not cache invalid contexts */
570         if (entry->context.len)
571                 return;
572
573         spin_lock_irqsave(&s->cache_lock, flags);
574
575         cache = rcu_dereference_protected(entry->cache,
576                                           lockdep_is_held(&s->cache_lock));
577         if (cache) {
578                 /* entry in cache - just bump to the head of LRU list */
579                 list_move(&cache->lru_member, &s->cache_lru_list);
580                 goto out_unlock;
581         }
582
583         cache = kmalloc(struct_size(cache, str, str_len), GFP_ATOMIC);
584         if (!cache)
585                 goto out_unlock;
586
587         if (s->cache_free_slots == 0) {
588                 /* pop a cache entry from the tail and free it */
589                 victim = container_of(s->cache_lru_list.prev,
590                                       struct sidtab_str_cache, lru_member);
591                 list_del(&victim->lru_member);
592                 rcu_assign_pointer(victim->parent->cache, NULL);
593         } else {
594                 s->cache_free_slots--;
595         }
596         cache->parent = entry;
597         cache->len = str_len;
598         memcpy(cache->str, str, str_len);
599         list_add(&cache->lru_member, &s->cache_lru_list);
600
601         rcu_assign_pointer(entry->cache, cache);
602
603 out_unlock:
604         spin_unlock_irqrestore(&s->cache_lock, flags);
605         kfree_rcu(victim, rcu_member);
606 }
607
608 int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry, char **out,
609                        u32 *out_len)
610 {
611         struct sidtab_str_cache *cache;
612         int rc = 0;
613
614         if (entry->context.len)
615                 return -ENOENT; /* do not cache invalid contexts */
616
617         rcu_read_lock();
618
619         cache = rcu_dereference(entry->cache);
620         if (!cache) {
621                 rc = -ENOENT;
622         } else {
623                 *out_len = cache->len;
624                 if (out) {
625                         *out = kmemdup(cache->str, cache->len, GFP_ATOMIC);
626                         if (!*out)
627                                 rc = -ENOMEM;
628                 }
629         }
630
631         rcu_read_unlock();
632
633         if (!rc && out)
634                 sidtab_sid2str_put(s, entry, *out, *out_len);
635         return rc;
636 }
637
638 #endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */