1 // SPDX-License-Identifier: GPL-2.0
3 * Implementation of the SID table type.
5 * Original author: Stephen Smalley, <sds@tycho.nsa.gov>
6 * Author: Ondrej Mosnacek, <omosnacek@gmail.com>
8 * Copyright (C) 2018 Red Hat, Inc.
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/list.h>
13 #include <linux/rcupdate.h>
14 #include <linux/slab.h>
15 #include <linux/sched.h>
16 #include <linux/spinlock.h>
17 #include <asm/barrier.h>
22 struct sidtab_str_cache {
23 struct rcu_head rcu_member;
24 struct list_head lru_member;
25 struct sidtab_entry *parent;
30 #define index_to_sid(index) (index + SECINITSID_NUM + 1)
31 #define sid_to_index(sid) (sid - (SECINITSID_NUM + 1))
33 int sidtab_init(struct sidtab *s)
37 memset(s->roots, 0, sizeof(s->roots));
39 for (i = 0; i < SECINITSID_NUM; i++)
44 hash_init(s->context_to_sid);
46 spin_lock_init(&s->lock);
48 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
49 s->cache_free_slots = CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE;
50 INIT_LIST_HEAD(&s->cache_lru_list);
51 spin_lock_init(&s->cache_lock);
57 static u32 context_to_sid(struct sidtab *s, struct context *context)
59 struct sidtab_entry *entry;
63 hash_for_each_possible_rcu(s->context_to_sid, entry, list,
65 if (context_cmp(&entry->context, context)) {
74 int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
76 struct sidtab_isid_entry *isid;
79 if (sid == 0 || sid > SECINITSID_NUM)
82 isid = &s->isids[sid - 1];
84 rc = context_cpy(&isid->entry.context, context);
88 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
89 isid->entry.cache = NULL;
94 * Multiple initial sids may map to the same context. Check that this
95 * context is not already represented in the context_to_sid hashtable
96 * to avoid duplicate entries and long linked lists upon hash
99 if (!context_to_sid(s, context)) {
100 isid->entry.sid = sid;
101 hash_add(s->context_to_sid, &isid->entry.list, context->hash);
107 int sidtab_hash_stats(struct sidtab *sidtab, char *page)
113 int max_chain_len = 0;
115 struct sidtab_entry *entry;
118 hash_for_each_rcu(sidtab->context_to_sid, i, entry, list) {
120 if (i == cur_bucket) {
126 if (chain_len > max_chain_len)
127 max_chain_len = chain_len;
133 if (chain_len > max_chain_len)
134 max_chain_len = chain_len;
136 return scnprintf(page, PAGE_SIZE, "entries: %d\nbuckets used: %d/%d\n"
137 "longest chain: %d\n", entries,
138 slots_used, SIDTAB_HASH_BUCKETS, max_chain_len);
141 static u32 sidtab_level_from_count(u32 count)
143 u32 capacity = SIDTAB_LEAF_ENTRIES;
146 while (count > capacity) {
147 capacity <<= SIDTAB_INNER_SHIFT;
153 static int sidtab_alloc_roots(struct sidtab *s, u32 level)
157 if (!s->roots[0].ptr_leaf) {
158 s->roots[0].ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
160 if (!s->roots[0].ptr_leaf)
163 for (l = 1; l <= level; ++l)
164 if (!s->roots[l].ptr_inner) {
165 s->roots[l].ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
167 if (!s->roots[l].ptr_inner)
169 s->roots[l].ptr_inner->entries[0] = s->roots[l - 1];
174 static struct sidtab_entry *sidtab_do_lookup(struct sidtab *s, u32 index,
177 union sidtab_entry_inner *entry;
178 u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES;
180 /* find the level of the subtree we need */
181 level = sidtab_level_from_count(index + 1);
182 capacity_shift = level * SIDTAB_INNER_SHIFT;
184 /* allocate roots if needed */
185 if (alloc && sidtab_alloc_roots(s, level) != 0)
188 /* lookup inside the subtree */
189 entry = &s->roots[level];
191 capacity_shift -= SIDTAB_INNER_SHIFT;
194 entry = &entry->ptr_inner->entries[leaf_index >> capacity_shift];
195 leaf_index &= ((u32)1 << capacity_shift) - 1;
197 if (!entry->ptr_inner) {
199 entry->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
201 if (!entry->ptr_inner)
205 if (!entry->ptr_leaf) {
207 entry->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
209 if (!entry->ptr_leaf)
212 return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES];
215 static struct sidtab_entry *sidtab_lookup(struct sidtab *s, u32 index)
217 /* read entries only after reading count */
218 u32 count = smp_load_acquire(&s->count);
223 return sidtab_do_lookup(s, index, 0);
226 static struct sidtab_entry *sidtab_lookup_initial(struct sidtab *s, u32 sid)
228 return s->isids[sid - 1].set ? &s->isids[sid - 1].entry : NULL;
231 static struct sidtab_entry *sidtab_search_core(struct sidtab *s, u32 sid,
235 struct sidtab_entry *entry;
237 if (sid > SECINITSID_NUM)
238 entry = sidtab_lookup(s, sid_to_index(sid));
240 entry = sidtab_lookup_initial(s, sid);
241 if (entry && (!entry->context.len || force))
245 return sidtab_lookup_initial(s, SECINITSID_UNLABELED);
248 struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid)
250 return sidtab_search_core(s, sid, 0);
253 struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid)
255 return sidtab_search_core(s, sid, 1);
258 int sidtab_context_to_sid(struct sidtab *s, struct context *context,
263 struct sidtab_convert_params *convert;
264 struct sidtab_entry *dst, *dst_convert;
267 *sid = context_to_sid(s, context);
271 /* lock-free search failed: lock, re-search, and insert if not found */
272 spin_lock_irqsave(&s->lock, flags);
275 *sid = context_to_sid(s, context);
279 /* read entries only after reading count */
280 count = smp_load_acquire(&s->count);
281 convert = s->convert;
283 /* bail out if we already reached max entries */
285 if (count >= SIDTAB_MAX)
288 /* insert context into new entry */
290 dst = sidtab_do_lookup(s, count, 1);
294 dst->sid = index_to_sid(count);
296 rc = context_cpy(&dst->context, context);
301 * if we are building a new sidtab, we need to convert the context
302 * and insert it there as well
306 dst_convert = sidtab_do_lookup(convert->target, count, 1);
308 context_destroy(&dst->context);
312 rc = convert->func(context, &dst_convert->context,
315 context_destroy(&dst->context);
318 dst_convert->sid = index_to_sid(count);
319 convert->target->count = count + 1;
321 hash_add_rcu(convert->target->context_to_sid,
322 &dst_convert->list, dst_convert->context.hash);
326 pr_info("SELinux: Context %s is not valid (left unmapped).\n",
329 *sid = index_to_sid(count);
331 /* write entries before updating count */
332 smp_store_release(&s->count, count + 1);
333 hash_add_rcu(s->context_to_sid, &dst->list, dst->context.hash);
337 spin_unlock_irqrestore(&s->lock, flags);
341 static void sidtab_convert_hashtable(struct sidtab *s, u32 count)
343 struct sidtab_entry *entry;
346 for (i = 0; i < count; i++) {
347 entry = sidtab_do_lookup(s, i, 0);
348 entry->sid = index_to_sid(i);
350 hash_add_rcu(s->context_to_sid, &entry->list,
351 entry->context.hash);
356 static int sidtab_convert_tree(union sidtab_entry_inner *edst,
357 union sidtab_entry_inner *esrc,
358 u32 *pos, u32 count, u32 level,
359 struct sidtab_convert_params *convert)
365 if (!edst->ptr_inner) {
366 edst->ptr_inner = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
368 if (!edst->ptr_inner)
372 while (i < SIDTAB_INNER_ENTRIES && *pos < count) {
373 rc = sidtab_convert_tree(&edst->ptr_inner->entries[i],
374 &esrc->ptr_inner->entries[i],
375 pos, count, level - 1,
382 if (!edst->ptr_leaf) {
383 edst->ptr_leaf = kzalloc(SIDTAB_NODE_ALLOC_SIZE,
389 while (i < SIDTAB_LEAF_ENTRIES && *pos < count) {
390 rc = convert->func(&esrc->ptr_leaf->entries[i].context,
391 &edst->ptr_leaf->entries[i].context,
403 int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
406 u32 count, level, pos;
409 spin_lock_irqsave(&s->lock, flags);
411 /* concurrent policy loads are not allowed */
413 spin_unlock_irqrestore(&s->lock, flags);
418 level = sidtab_level_from_count(count);
420 /* allocate last leaf in the new sidtab (to avoid race with
423 rc = sidtab_do_lookup(params->target, count - 1, 1) ? 0 : -ENOMEM;
425 spin_unlock_irqrestore(&s->lock, flags);
429 /* set count in case no new entries are added during conversion */
430 params->target->count = count;
432 /* enable live convert of new entries */
435 /* we can safely convert the tree outside the lock */
436 spin_unlock_irqrestore(&s->lock, flags);
438 pr_info("SELinux: Converting %u SID table entries...\n", count);
440 /* convert all entries not covered by live convert */
442 rc = sidtab_convert_tree(¶ms->target->roots[level],
443 &s->roots[level], &pos, count, level, params);
445 /* we need to keep the old table - disable live convert */
446 spin_lock_irqsave(&s->lock, flags);
448 spin_unlock_irqrestore(&s->lock, flags);
452 * The hashtable can also be modified in sidtab_context_to_sid()
453 * so we must re-acquire the lock here.
455 spin_lock_irqsave(&s->lock, flags);
456 sidtab_convert_hashtable(params->target, count);
457 spin_unlock_irqrestore(&s->lock, flags);
462 static void sidtab_destroy_entry(struct sidtab_entry *entry)
464 context_destroy(&entry->context);
465 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
466 kfree(rcu_dereference_raw(entry->cache));
470 static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
475 struct sidtab_node_inner *node = entry.ptr_inner;
480 for (i = 0; i < SIDTAB_INNER_ENTRIES; i++)
481 sidtab_destroy_tree(node->entries[i], level - 1);
484 struct sidtab_node_leaf *node = entry.ptr_leaf;
489 for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++)
490 sidtab_destroy_entry(&node->entries[i]);
495 void sidtab_destroy(struct sidtab *s)
499 for (i = 0; i < SECINITSID_NUM; i++)
501 sidtab_destroy_entry(&s->isids[i].entry);
503 level = SIDTAB_MAX_LEVEL;
504 while (level && !s->roots[level].ptr_inner)
507 sidtab_destroy_tree(s->roots[level], level);
509 * The context_to_sid hashtable's objects are all shared
510 * with the isids array and context tree, and so don't need
511 * to be cleaned up here.
515 #if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
517 void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
518 const char *str, u32 str_len)
520 struct sidtab_str_cache *cache, *victim = NULL;
523 /* do not cache invalid contexts */
524 if (entry->context.len)
527 spin_lock_irqsave(&s->cache_lock, flags);
529 cache = rcu_dereference_protected(entry->cache,
530 lockdep_is_held(&s->cache_lock));
532 /* entry in cache - just bump to the head of LRU list */
533 list_move(&cache->lru_member, &s->cache_lru_list);
537 cache = kmalloc(sizeof(struct sidtab_str_cache) + str_len, GFP_ATOMIC);
541 if (s->cache_free_slots == 0) {
542 /* pop a cache entry from the tail and free it */
543 victim = container_of(s->cache_lru_list.prev,
544 struct sidtab_str_cache, lru_member);
545 list_del(&victim->lru_member);
546 rcu_assign_pointer(victim->parent->cache, NULL);
548 s->cache_free_slots--;
550 cache->parent = entry;
551 cache->len = str_len;
552 memcpy(cache->str, str, str_len);
553 list_add(&cache->lru_member, &s->cache_lru_list);
555 rcu_assign_pointer(entry->cache, cache);
558 spin_unlock_irqrestore(&s->cache_lock, flags);
559 kfree_rcu(victim, rcu_member);
562 int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry,
563 char **out, u32 *out_len)
565 struct sidtab_str_cache *cache;
568 if (entry->context.len)
569 return -ENOENT; /* do not cache invalid contexts */
573 cache = rcu_dereference(entry->cache);
577 *out_len = cache->len;
579 *out = kmemdup(cache->str, cache->len, GFP_ATOMIC);
588 sidtab_sid2str_put(s, entry, *out, *out_len);
592 #endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */