1 // SPDX-License-Identifier: GPL-2.0-only
3 * AppArmor security module
5 * This file contains AppArmor functions for unpacking policy loaded from
8 * Copyright (C) 1998-2008 Novell/SUSE
9 * Copyright 2009-2010 Canonical Ltd.
11 * AppArmor uses a serialized binary format for loading policy. To find
12 * policy format documentation see Documentation/admin-guide/LSM/apparmor.rst
13 * All policy is validated before it is used.
16 #include <asm/unaligned.h>
17 #include <kunit/visibility.h>
18 #include <linux/ctype.h>
19 #include <linux/errno.h>
20 #include <linux/zlib.h>
22 #include "include/apparmor.h"
23 #include "include/audit.h"
24 #include "include/cred.h"
25 #include "include/crypto.h"
26 #include "include/match.h"
27 #include "include/path.h"
28 #include "include/policy.h"
29 #include "include/policy_unpack.h"
31 #define K_ABI_MASK 0x3ff
32 #define FORCE_COMPLAIN_FLAG 0x800
33 #define VERSION_LT(X, Y) (((X) & K_ABI_MASK) < ((Y) & K_ABI_MASK))
34 #define VERSION_GT(X, Y) (((X) & K_ABI_MASK) > ((Y) & K_ABI_MASK))
36 #define v5 5 /* base version */
37 #define v6 6 /* per entry policydb mediation check */
39 #define v8 8 /* full network masking */
41 /* audit callback for unpack fields */
42 static void audit_cb(struct audit_buffer *ab, void *va)
44 struct common_audit_data *sa = va;
46 if (aad(sa)->iface.ns) {
47 audit_log_format(ab, " ns=");
48 audit_log_untrustedstring(ab, aad(sa)->iface.ns);
51 audit_log_format(ab, " name=");
52 audit_log_untrustedstring(ab, aad(sa)->name);
54 if (aad(sa)->iface.pos)
55 audit_log_format(ab, " offset=%ld", aad(sa)->iface.pos);
59 * audit_iface - do audit message for policy unpacking/load/replace/remove
60 * @new: profile if it has been allocated (MAYBE NULL)
61 * @ns_name: name of the ns the profile is to be loaded to (MAY BE NULL)
62 * @name: name of the profile being manipulated (MAYBE NULL)
63 * @info: any extra info about the failure (MAYBE NULL)
64 * @e: buffer position info
67 * Returns: %0 or error
69 static int audit_iface(struct aa_profile *new, const char *ns_name,
70 const char *name, const char *info, struct aa_ext *e,
73 struct aa_profile *profile = labels_profile(aa_current_raw_label());
74 DEFINE_AUDIT_DATA(sa, LSM_AUDIT_DATA_NONE, NULL);
76 aad(&sa)->iface.pos = e->pos - e->start;
77 aad(&sa)->iface.ns = ns_name;
79 aad(&sa)->name = new->base.hname;
81 aad(&sa)->name = name;
82 aad(&sa)->info = info;
83 aad(&sa)->error = error;
85 return aa_audit(AUDIT_APPARMOR_STATUS, profile, &sa, audit_cb);
88 void __aa_loaddata_update(struct aa_loaddata *data, long revision)
92 AA_BUG(!mutex_is_locked(&data->ns->lock));
93 AA_BUG(data->revision > revision);
95 data->revision = revision;
96 if ((data->dents[AAFS_LOADDATA_REVISION])) {
97 d_inode(data->dents[AAFS_LOADDATA_DIR])->i_mtime =
98 current_time(d_inode(data->dents[AAFS_LOADDATA_DIR]));
99 d_inode(data->dents[AAFS_LOADDATA_REVISION])->i_mtime =
100 current_time(d_inode(data->dents[AAFS_LOADDATA_REVISION]));
104 bool aa_rawdata_eq(struct aa_loaddata *l, struct aa_loaddata *r)
106 if (l->size != r->size)
108 if (l->compressed_size != r->compressed_size)
110 if (aa_g_hash_policy && memcmp(l->hash, r->hash, aa_hash_size()) != 0)
112 return memcmp(l->data, r->data, r->compressed_size ?: r->size) == 0;
116 * need to take the ns mutex lock which is NOT safe most places that
117 * put_loaddata is called, so we have to delay freeing it
119 static void do_loaddata_free(struct work_struct *work)
121 struct aa_loaddata *d = container_of(work, struct aa_loaddata, work);
122 struct aa_ns *ns = aa_get_ns(d->ns);
125 mutex_lock_nested(&ns->lock, ns->level);
126 __aa_fs_remove_rawdata(d);
127 mutex_unlock(&ns->lock);
131 kfree_sensitive(d->hash);
132 kfree_sensitive(d->name);
137 void aa_loaddata_kref(struct kref *kref)
139 struct aa_loaddata *d = container_of(kref, struct aa_loaddata, count);
142 INIT_WORK(&d->work, do_loaddata_free);
143 schedule_work(&d->work);
147 struct aa_loaddata *aa_loaddata_alloc(size_t size)
149 struct aa_loaddata *d;
151 d = kzalloc(sizeof(*d), GFP_KERNEL);
153 return ERR_PTR(-ENOMEM);
154 d->data = kvzalloc(size, GFP_KERNEL);
157 return ERR_PTR(-ENOMEM);
159 kref_init(&d->count);
160 INIT_LIST_HEAD(&d->list);
165 /* test if read will be in packed data bounds */
166 VISIBLE_IF_KUNIT bool aa_inbounds(struct aa_ext *e, size_t size)
168 return (size <= e->end - e->pos);
170 EXPORT_SYMBOL_IF_KUNIT(aa_inbounds);
172 static void *kvmemdup(const void *src, size_t len)
174 void *p = kvmalloc(len, GFP_KERNEL);
182 * aa_unpack_u16_chunk - test and do bounds checking for a u16 size based chunk
183 * @e: serialized data read head (NOT NULL)
184 * @chunk: start address for chunk of data (NOT NULL)
186 * Returns: the size of chunk found with the read head at the end of the chunk.
188 VISIBLE_IF_KUNIT size_t aa_unpack_u16_chunk(struct aa_ext *e, char **chunk)
193 if (!aa_inbounds(e, sizeof(u16)))
195 size = le16_to_cpu(get_unaligned((__le16 *) e->pos));
196 e->pos += sizeof(__le16);
197 if (!aa_inbounds(e, size))
207 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u16_chunk);
209 /* unpack control byte */
210 VISIBLE_IF_KUNIT bool aa_unpack_X(struct aa_ext *e, enum aa_code code)
212 if (!aa_inbounds(e, 1))
214 if (*(u8 *) e->pos != code)
219 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_X);
222 * aa_unpack_nameX - check is the next element is of type X with a name of @name
223 * @e: serialized data extent information (NOT NULL)
225 * @name: name to match to the serialized element. (MAYBE NULL)
227 * check that the next serialized data element is of type X and has a tag
228 * name @name. If @name is specified then there must be a matching
229 * name element in the stream. If @name is NULL any name element will be
230 * skipped and only the typecode will be tested.
232 * Returns true on success (both type code and name tests match) and the read
233 * head is advanced past the headers
235 * Returns: false if either match fails, the read head does not move
237 VISIBLE_IF_KUNIT bool aa_unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
240 * May need to reset pos if name or type doesn't match
244 * Check for presence of a tagname, and if present name size
245 * AA_NAME tag value is a u16.
247 if (aa_unpack_X(e, AA_NAME)) {
249 size_t size = aa_unpack_u16_chunk(e, &tag);
250 /* if a name is specified it must match. otherwise skip tag */
251 if (name && (!size || tag[size-1] != '\0' || strcmp(name, tag)))
254 /* if a name is specified and there is no name tag fail */
258 /* now check if type code matches */
259 if (aa_unpack_X(e, code))
266 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_nameX);
268 static bool unpack_u8(struct aa_ext *e, u8 *data, const char *name)
272 if (aa_unpack_nameX(e, AA_U8, name)) {
273 if (!aa_inbounds(e, sizeof(u8)))
276 *data = *((u8 *)e->pos);
277 e->pos += sizeof(u8);
286 VISIBLE_IF_KUNIT bool aa_unpack_u32(struct aa_ext *e, u32 *data, const char *name)
290 if (aa_unpack_nameX(e, AA_U32, name)) {
291 if (!aa_inbounds(e, sizeof(u32)))
294 *data = le32_to_cpu(get_unaligned((__le32 *) e->pos));
295 e->pos += sizeof(u32);
303 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u32);
305 VISIBLE_IF_KUNIT bool aa_unpack_u64(struct aa_ext *e, u64 *data, const char *name)
309 if (aa_unpack_nameX(e, AA_U64, name)) {
310 if (!aa_inbounds(e, sizeof(u64)))
313 *data = le64_to_cpu(get_unaligned((__le64 *) e->pos));
314 e->pos += sizeof(u64);
322 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_u64);
324 VISIBLE_IF_KUNIT size_t aa_unpack_array(struct aa_ext *e, const char *name)
328 if (aa_unpack_nameX(e, AA_ARRAY, name)) {
330 if (!aa_inbounds(e, sizeof(u16)))
332 size = (int)le16_to_cpu(get_unaligned((__le16 *) e->pos));
333 e->pos += sizeof(u16);
341 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_array);
343 VISIBLE_IF_KUNIT size_t aa_unpack_blob(struct aa_ext *e, char **blob, const char *name)
347 if (aa_unpack_nameX(e, AA_BLOB, name)) {
349 if (!aa_inbounds(e, sizeof(u32)))
351 size = le32_to_cpu(get_unaligned((__le32 *) e->pos));
352 e->pos += sizeof(u32);
353 if (aa_inbounds(e, (size_t) size)) {
364 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_blob);
366 VISIBLE_IF_KUNIT int aa_unpack_str(struct aa_ext *e, const char **string, const char *name)
372 if (aa_unpack_nameX(e, AA_STRING, name)) {
373 size = aa_unpack_u16_chunk(e, &src_str);
375 /* strings are null terminated, length is size - 1 */
376 if (src_str[size - 1] != 0)
388 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_str);
390 VISIBLE_IF_KUNIT int aa_unpack_strdup(struct aa_ext *e, char **string, const char *name)
394 int res = aa_unpack_str(e, &tmp, name);
400 *string = kmemdup(tmp, res, GFP_KERNEL);
408 EXPORT_SYMBOL_IF_KUNIT(aa_unpack_strdup);
412 * unpack_dfa - unpack a file rule dfa
413 * @e: serialized data extent information (NOT NULL)
415 * returns dfa or ERR_PTR or NULL if no dfa
417 static struct aa_dfa *unpack_dfa(struct aa_ext *e)
421 struct aa_dfa *dfa = NULL;
423 size = aa_unpack_blob(e, &blob, "aadfa");
426 * The dfa is aligned with in the blob to 8 bytes
427 * from the beginning of the stream.
428 * alignment adjust needed by dfa unpack
430 size_t sz = blob - (char *) e->start -
431 ((e->pos - e->start) & 7);
432 size_t pad = ALIGN(sz, 8) - sz;
433 int flags = TO_ACCEPT1_FLAG(YYTD_DATA32) |
434 TO_ACCEPT2_FLAG(YYTD_DATA32);
435 if (aa_g_paranoid_load)
436 flags |= DFA_FLAG_VERIFY_STATES;
437 dfa = aa_dfa_unpack(blob + pad, size - pad, flags);
448 * unpack_trans_table - unpack a profile transition table
449 * @e: serialized data extent information (NOT NULL)
450 * @profile: profile to add the accept table to (NOT NULL)
452 * Returns: true if table successfully unpacked
454 static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
456 void *saved_pos = e->pos;
458 /* exec table is optional */
459 if (aa_unpack_nameX(e, AA_STRUCT, "xtable")) {
462 size = aa_unpack_array(e, NULL);
463 /* currently 4 exec bits and entries 0-3 are reserved iupcx */
466 profile->file.trans.table = kcalloc(size, sizeof(char *),
468 if (!profile->file.trans.table)
471 profile->file.trans.size = size;
472 for (i = 0; i < size; i++) {
474 int c, j, pos, size2 = aa_unpack_strdup(e, &str, NULL);
475 /* aa_unpack_strdup verifies that the last character is
476 * null termination byte.
480 profile->file.trans.table[i] = str;
481 /* verify that name doesn't start with space */
485 /* count internal # of internal \0 */
486 for (c = j = 0; j < size2 - 1; j++) {
493 /* first character after : must be valid */
496 /* beginning with : requires an embedded \0,
497 * verify that exactly 1 internal \0 exists
498 * trailing \0 already verified by aa_unpack_strdup
500 * convert \0 back to : for label_parse
507 /* fail - all other cases with embedded \0 */
510 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
512 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
518 aa_free_domain_entries(&profile->file.trans);
523 static bool unpack_xattrs(struct aa_ext *e, struct aa_profile *profile)
527 if (aa_unpack_nameX(e, AA_STRUCT, "xattrs")) {
530 size = aa_unpack_array(e, NULL);
531 profile->xattr_count = size;
532 profile->xattrs = kcalloc(size, sizeof(char *), GFP_KERNEL);
533 if (!profile->xattrs)
535 for (i = 0; i < size; i++) {
536 if (!aa_unpack_strdup(e, &profile->xattrs[i], NULL))
539 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
541 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
552 static bool unpack_secmark(struct aa_ext *e, struct aa_profile *profile)
557 if (aa_unpack_nameX(e, AA_STRUCT, "secmark")) {
558 size = aa_unpack_array(e, NULL);
560 profile->secmark = kcalloc(size, sizeof(struct aa_secmark),
562 if (!profile->secmark)
565 profile->secmark_count = size;
567 for (i = 0; i < size; i++) {
568 if (!unpack_u8(e, &profile->secmark[i].audit, NULL))
570 if (!unpack_u8(e, &profile->secmark[i].deny, NULL))
572 if (!aa_unpack_strdup(e, &profile->secmark[i].label, NULL))
575 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
577 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
584 if (profile->secmark) {
585 for (i = 0; i < size; i++)
586 kfree(profile->secmark[i].label);
587 kfree(profile->secmark);
588 profile->secmark_count = 0;
589 profile->secmark = NULL;
596 static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
600 /* rlimits are optional */
601 if (aa_unpack_nameX(e, AA_STRUCT, "rlimits")) {
604 if (!aa_unpack_u32(e, &tmp, NULL))
606 profile->rlimits.mask = tmp;
608 size = aa_unpack_array(e, NULL);
609 if (size > RLIM_NLIMITS)
611 for (i = 0; i < size; i++) {
613 int a = aa_map_resource(i);
614 if (!aa_unpack_u64(e, &tmp2, NULL))
616 profile->rlimits.limits[a].rlim_max = tmp2;
618 if (!aa_unpack_nameX(e, AA_ARRAYEND, NULL))
620 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
630 static u32 strhash(const void *data, u32 len, u32 seed)
632 const char * const *key = data;
634 return jhash(*key, strlen(*key), seed);
637 static int datacmp(struct rhashtable_compare_arg *arg, const void *obj)
639 const struct aa_data *data = obj;
640 const char * const *key = arg->key;
642 return strcmp(data->key, *key);
646 * unpack_profile - unpack a serialized profile
647 * @e: serialized data extent information (NOT NULL)
648 * @ns_name: pointer of newly allocated copy of %NULL in case of error
650 * NOTE: unpack profile sets audit struct if there is a failure
652 static struct aa_profile *unpack_profile(struct aa_ext *e, char **ns_name)
654 struct aa_profile *profile = NULL;
655 const char *tmpname, *tmpns = NULL, *name = NULL;
656 const char *info = "failed to unpack profile";
658 struct rhashtable_params params = { 0 };
660 struct aa_data *data;
661 int i, error = -EPROTO;
667 /* check that we have the right struct being passed */
668 if (!aa_unpack_nameX(e, AA_STRUCT, "profile"))
670 if (!aa_unpack_str(e, &name, NULL))
675 tmpname = aa_splitn_fqname(name, strlen(name), &tmpns, &ns_len);
677 *ns_name = kstrndup(tmpns, ns_len, GFP_KERNEL);
679 info = "out of memory";
685 profile = aa_alloc_profile(name, NULL, GFP_KERNEL);
687 return ERR_PTR(-ENOMEM);
689 /* profile renaming is optional */
690 (void) aa_unpack_str(e, &profile->rename, "rename");
692 /* attachment string is optional */
693 (void) aa_unpack_str(e, &profile->attach, "attach");
695 /* xmatch is optional and may be NULL */
696 profile->xmatch = unpack_dfa(e);
697 if (IS_ERR(profile->xmatch)) {
698 error = PTR_ERR(profile->xmatch);
699 profile->xmatch = NULL;
703 /* xmatch_len is not optional if xmatch is set */
704 if (profile->xmatch) {
705 if (!aa_unpack_u32(e, &tmp, NULL)) {
706 info = "missing xmatch len";
709 profile->xmatch_len = tmp;
712 /* disconnected attachment string is optional */
713 (void) aa_unpack_str(e, &profile->disconnected, "disconnected");
715 /* per profile debug flags (complain, audit) */
716 if (!aa_unpack_nameX(e, AA_STRUCT, "flags")) {
717 info = "profile missing flags";
720 info = "failed to unpack profile flags";
721 if (!aa_unpack_u32(e, &tmp, NULL))
723 if (tmp & PACKED_FLAG_HAT)
724 profile->label.flags |= FLAG_HAT;
725 if (tmp & PACKED_FLAG_DEBUG1)
726 profile->label.flags |= FLAG_DEBUG1;
727 if (tmp & PACKED_FLAG_DEBUG2)
728 profile->label.flags |= FLAG_DEBUG2;
729 if (!aa_unpack_u32(e, &tmp, NULL))
731 if (tmp == PACKED_MODE_COMPLAIN || (e->version & FORCE_COMPLAIN_FLAG)) {
732 profile->mode = APPARMOR_COMPLAIN;
733 } else if (tmp == PACKED_MODE_ENFORCE) {
734 profile->mode = APPARMOR_ENFORCE;
735 } else if (tmp == PACKED_MODE_KILL) {
736 profile->mode = APPARMOR_KILL;
737 } else if (tmp == PACKED_MODE_UNCONFINED) {
738 profile->mode = APPARMOR_UNCONFINED;
739 profile->label.flags |= FLAG_UNCONFINED;
743 if (!aa_unpack_u32(e, &tmp, NULL))
746 profile->audit = AUDIT_ALL;
748 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
751 /* path_flags is optional */
752 if (aa_unpack_u32(e, &profile->path_flags, "path_flags"))
753 profile->path_flags |= profile->label.flags &
754 PATH_MEDIATE_DELETED;
756 /* set a default value if path_flags field is not present */
757 profile->path_flags = PATH_MEDIATE_DELETED;
759 info = "failed to unpack profile capabilities";
760 if (!aa_unpack_u32(e, &(profile->caps.allow.cap[0]), NULL))
762 if (!aa_unpack_u32(e, &(profile->caps.audit.cap[0]), NULL))
764 if (!aa_unpack_u32(e, &(profile->caps.quiet.cap[0]), NULL))
766 if (!aa_unpack_u32(e, &tmpcap.cap[0], NULL))
769 info = "failed to unpack upper profile capabilities";
770 if (aa_unpack_nameX(e, AA_STRUCT, "caps64")) {
771 /* optional upper half of 64 bit caps */
772 if (!aa_unpack_u32(e, &(profile->caps.allow.cap[1]), NULL))
774 if (!aa_unpack_u32(e, &(profile->caps.audit.cap[1]), NULL))
776 if (!aa_unpack_u32(e, &(profile->caps.quiet.cap[1]), NULL))
778 if (!aa_unpack_u32(e, &(tmpcap.cap[1]), NULL))
780 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
784 info = "failed to unpack extended profile capabilities";
785 if (aa_unpack_nameX(e, AA_STRUCT, "capsx")) {
786 /* optional extended caps mediation mask */
787 if (!aa_unpack_u32(e, &(profile->caps.extended.cap[0]), NULL))
789 if (!aa_unpack_u32(e, &(profile->caps.extended.cap[1]), NULL))
791 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
795 if (!unpack_xattrs(e, profile)) {
796 info = "failed to unpack profile xattrs";
800 if (!unpack_rlimits(e, profile)) {
801 info = "failed to unpack profile rlimits";
805 if (!unpack_secmark(e, profile)) {
806 info = "failed to unpack profile secmark rules";
810 if (aa_unpack_nameX(e, AA_STRUCT, "policydb")) {
811 /* generic policy dfa - optional and may be NULL */
812 info = "failed to unpack policydb";
813 profile->policy.dfa = unpack_dfa(e);
814 if (IS_ERR(profile->policy.dfa)) {
815 error = PTR_ERR(profile->policy.dfa);
816 profile->policy.dfa = NULL;
818 } else if (!profile->policy.dfa) {
822 if (!aa_unpack_u32(e, &profile->policy.start[0], "start"))
823 /* default start state */
824 profile->policy.start[0] = DFA_START;
825 /* setup class index */
826 for (i = AA_CLASS_FILE; i <= AA_CLASS_LAST; i++) {
827 profile->policy.start[i] =
828 aa_dfa_next(profile->policy.dfa,
829 profile->policy.start[0],
832 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL))
835 profile->policy.dfa = aa_get_dfa(nulldfa);
838 profile->file.dfa = unpack_dfa(e);
839 if (IS_ERR(profile->file.dfa)) {
840 error = PTR_ERR(profile->file.dfa);
841 profile->file.dfa = NULL;
842 info = "failed to unpack profile file rules";
844 } else if (profile->file.dfa) {
845 if (!aa_unpack_u32(e, &profile->file.start, "dfa_start"))
846 /* default start state */
847 profile->file.start = DFA_START;
848 } else if (profile->policy.dfa &&
849 profile->policy.start[AA_CLASS_FILE]) {
850 profile->file.dfa = aa_get_dfa(profile->policy.dfa);
851 profile->file.start = profile->policy.start[AA_CLASS_FILE];
853 profile->file.dfa = aa_get_dfa(nulldfa);
855 if (!unpack_trans_table(e, profile)) {
856 info = "failed to unpack profile transition table";
860 if (aa_unpack_nameX(e, AA_STRUCT, "data")) {
861 info = "out of memory";
862 profile->data = kzalloc(sizeof(*profile->data), GFP_KERNEL);
866 params.nelem_hint = 3;
867 params.key_len = sizeof(void *);
868 params.key_offset = offsetof(struct aa_data, key);
869 params.head_offset = offsetof(struct aa_data, head);
870 params.hashfn = strhash;
871 params.obj_cmpfn = datacmp;
873 if (rhashtable_init(profile->data, ¶ms)) {
874 info = "failed to init key, value hash table";
878 while (aa_unpack_strdup(e, &key, NULL)) {
879 data = kzalloc(sizeof(*data), GFP_KERNEL);
881 kfree_sensitive(key);
886 data->size = aa_unpack_blob(e, &data->data, NULL);
887 data->data = kvmemdup(data->data, data->size);
888 if (data->size && !data->data) {
889 kfree_sensitive(data->key);
890 kfree_sensitive(data);
894 rhashtable_insert_fast(profile->data, &data->head,
898 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) {
899 info = "failed to unpack end of key, value data table";
904 if (!aa_unpack_nameX(e, AA_STRUCTEND, NULL)) {
905 info = "failed to unpack end of profile";
916 audit_iface(profile, NULL, name, info, e, error);
917 aa_free_profile(profile);
919 return ERR_PTR(error);
923 * verify_header - unpack serialized stream header
924 * @e: serialized data read head (NOT NULL)
925 * @required: whether the header is required or optional
926 * @ns: Returns - namespace if one is specified else NULL (NOT NULL)
928 * Returns: error or 0 if header is good
930 static int verify_header(struct aa_ext *e, int required, const char **ns)
932 int error = -EPROTONOSUPPORT;
933 const char *name = NULL;
936 /* get the interface version */
937 if (!aa_unpack_u32(e, &e->version, "version")) {
939 audit_iface(NULL, NULL, NULL, "invalid profile format",
945 /* Check that the interface version is currently supported.
946 * if not specified use previous version
947 * Mask off everything that is not kernel abi version
949 if (VERSION_LT(e->version, v5) || VERSION_GT(e->version, v7)) {
950 audit_iface(NULL, NULL, NULL, "unsupported interface version",
955 /* read the namespace if present */
956 if (aa_unpack_str(e, &name, "namespace")) {
958 audit_iface(NULL, NULL, NULL, "invalid namespace name",
962 if (*ns && strcmp(*ns, name)) {
963 audit_iface(NULL, NULL, NULL, "invalid ns change", e,
966 *ns = kstrdup(name, GFP_KERNEL);
975 static bool verify_xindex(int xindex, int table_size)
978 xtype = xindex & AA_X_TYPE_MASK;
979 index = xindex & AA_X_INDEX_MASK;
980 if (xtype == AA_X_TABLE && index >= table_size)
985 /* verify dfa xindexes are in range of transition tables */
986 static bool verify_dfa_xindex(struct aa_dfa *dfa, int table_size)
989 for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) {
990 if (!verify_xindex(dfa_user_xindex(dfa, i), table_size))
992 if (!verify_xindex(dfa_other_xindex(dfa, i), table_size))
999 * verify_profile - Do post unpack analysis to verify profile consistency
1000 * @profile: profile to verify (NOT NULL)
1002 * Returns: 0 if passes verification else error
1004 static int verify_profile(struct aa_profile *profile)
1006 if (profile->file.dfa &&
1007 !verify_dfa_xindex(profile->file.dfa,
1008 profile->file.trans.size)) {
1009 audit_iface(profile, NULL, NULL, "Invalid named transition",
1017 void aa_load_ent_free(struct aa_load_ent *ent)
1020 aa_put_profile(ent->rename);
1021 aa_put_profile(ent->old);
1022 aa_put_profile(ent->new);
1023 kfree(ent->ns_name);
1024 kfree_sensitive(ent);
1028 struct aa_load_ent *aa_load_ent_alloc(void)
1030 struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL);
1032 INIT_LIST_HEAD(&ent->list);
1036 static int deflate_compress(const char *src, size_t slen, char **dst,
1039 #ifdef CONFIG_SECURITY_APPARMOR_EXPORT_BINARY
1041 struct z_stream_s strm;
1042 void *stgbuf, *dstbuf;
1043 size_t stglen = deflateBound(slen);
1045 memset(&strm, 0, sizeof(strm));
1050 strm.workspace = kvzalloc(zlib_deflate_workspacesize(MAX_WBITS,
1053 if (!strm.workspace)
1056 error = zlib_deflateInit(&strm, aa_g_rawdata_compression_level);
1057 if (error != Z_OK) {
1059 goto fail_deflate_init;
1062 stgbuf = kvzalloc(stglen, GFP_KERNEL);
1065 goto fail_stg_alloc;
1069 strm.avail_in = slen;
1070 strm.next_out = stgbuf;
1071 strm.avail_out = stglen;
1073 error = zlib_deflate(&strm, Z_FINISH);
1074 if (error != Z_STREAM_END) {
1080 if (is_vmalloc_addr(stgbuf)) {
1081 dstbuf = kvzalloc(strm.total_out, GFP_KERNEL);
1083 memcpy(dstbuf, stgbuf, strm.total_out);
1088 * If the staging buffer was kmalloc'd, then using krealloc is
1089 * probably going to be faster. The destination buffer will
1090 * always be smaller, so it's just shrunk, avoiding a memcpy
1092 dstbuf = krealloc(stgbuf, strm.total_out, GFP_KERNEL);
1100 *dlen = strm.total_out;
1103 zlib_deflateEnd(&strm);
1105 kvfree(strm.workspace);
1110 goto fail_stg_alloc;
1117 static int compress_loaddata(struct aa_loaddata *data)
1120 AA_BUG(data->compressed_size > 0);
1123 * Shortcut the no compression case, else we increase the amount of
1124 * storage required by a small amount
1126 if (aa_g_rawdata_compression_level != 0) {
1127 void *udata = data->data;
1128 int error = deflate_compress(udata, data->size, &data->data,
1129 &data->compressed_size);
1133 if (udata != data->data)
1136 data->compressed_size = data->size;
1142 * aa_unpack - unpack packed binary profile(s) data loaded from user space
1143 * @udata: user data copied to kmem (NOT NULL)
1144 * @lh: list to place unpacked profiles in a aa_repl_ws
1145 * @ns: Returns namespace profile is in if specified else NULL (NOT NULL)
1147 * Unpack user data and return refcounted allocated profile(s) stored in
1148 * @lh in order of discovery, with the list chain stored in base.list
1151 * Returns: profile(s) on @lh else error pointer if fails to unpack
1153 int aa_unpack(struct aa_loaddata *udata, struct list_head *lh,
1156 struct aa_load_ent *tmp, *ent;
1157 struct aa_profile *profile = NULL;
1160 .start = udata->data,
1161 .end = udata->data + udata->size,
1166 while (e.pos < e.end) {
1167 char *ns_name = NULL;
1169 error = verify_header(&e, e.pos == e.start, ns);
1174 profile = unpack_profile(&e, &ns_name);
1175 if (IS_ERR(profile)) {
1176 error = PTR_ERR(profile);
1180 error = verify_profile(profile);
1184 if (aa_g_hash_policy)
1185 error = aa_calc_profile_hash(profile, e.version, start,
1190 ent = aa_load_ent_alloc();
1197 ent->ns_name = ns_name;
1198 list_add_tail(&ent->list, lh);
1200 udata->abi = e.version & K_ABI_MASK;
1201 if (aa_g_hash_policy) {
1202 udata->hash = aa_calc_hash(udata->data, udata->size);
1203 if (IS_ERR(udata->hash)) {
1204 error = PTR_ERR(udata->hash);
1210 if (aa_g_export_binary) {
1211 error = compress_loaddata(udata);
1218 aa_put_profile(profile);
1221 list_for_each_entry_safe(ent, tmp, lh, list) {
1222 list_del_init(&ent->list);
1223 aa_load_ent_free(ent);