1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2005,2006,2007,2008 IBM Corporation
6 * Mimi Zohar <zohar@us.ibm.com>
7 * Kylene Hall <kjhall@us.ibm.com>
10 * Calculates md5/sha1 file hash, template hash, boot-aggreate hash
13 #include <linux/kernel.h>
14 #include <linux/moduleparam.h>
15 #include <linux/ratelimit.h>
16 #include <linux/file.h>
17 #include <linux/crypto.h>
18 #include <linux/scatterlist.h>
19 #include <linux/err.h>
20 #include <linux/slab.h>
21 #include <crypto/hash.h>
25 /* minimum file size for ahash use */
26 static unsigned long ima_ahash_minsize;
27 module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644);
28 MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use");
30 /* default is 0 - 1 page. */
31 static int ima_maxorder;
32 static unsigned int ima_bufsize = PAGE_SIZE;
34 static int param_set_bufsize(const char *val, const struct kernel_param *kp)
36 unsigned long long size;
39 size = memparse(val, NULL);
40 order = get_order(size);
41 if (order >= MAX_ORDER)
44 ima_bufsize = PAGE_SIZE << order;
48 static const struct kernel_param_ops param_ops_bufsize = {
49 .set = param_set_bufsize,
50 .get = param_get_uint,
52 #define param_check_bufsize(name, p) __param_check(name, p, unsigned int)
54 module_param_named(ahash_bufsize, ima_bufsize, bufsize, 0644);
55 MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size");
57 static struct crypto_shash *ima_shash_tfm;
58 static struct crypto_ahash *ima_ahash_tfm;
60 struct ima_algo_desc {
61 struct crypto_shash *tfm;
65 int ima_sha1_idx __ro_after_init;
66 int ima_hash_algo_idx __ro_after_init;
68 * Additional number of slots reserved, as needed, for SHA1
69 * and IMA default algo.
71 int ima_extra_slots __ro_after_init;
73 static struct ima_algo_desc *ima_algo_array;
75 static int __init ima_init_ima_crypto(void)
79 ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0);
80 if (IS_ERR(ima_shash_tfm)) {
81 rc = PTR_ERR(ima_shash_tfm);
82 pr_err("Can not allocate %s (reason: %ld)\n",
83 hash_algo_name[ima_hash_algo], rc);
86 pr_info("Allocated hash algorithm: %s\n",
87 hash_algo_name[ima_hash_algo]);
91 static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
93 struct crypto_shash *tfm = ima_shash_tfm;
96 if (algo < 0 || algo >= HASH_ALGO__LAST)
99 if (algo == ima_hash_algo)
102 for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++)
103 if (ima_algo_array[i].tfm && ima_algo_array[i].algo == algo)
104 return ima_algo_array[i].tfm;
106 tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
109 pr_err("Can not allocate %s (reason: %d)\n",
110 hash_algo_name[algo], rc);
115 int __init ima_init_crypto(void)
121 rc = ima_init_ima_crypto();
126 ima_hash_algo_idx = -1;
128 for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) {
129 algo = ima_tpm_chip->allocated_banks[i].crypto_id;
130 if (algo == HASH_ALGO_SHA1)
133 if (algo == ima_hash_algo)
134 ima_hash_algo_idx = i;
137 if (ima_sha1_idx < 0) {
138 ima_sha1_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++;
139 if (ima_hash_algo == HASH_ALGO_SHA1)
140 ima_hash_algo_idx = ima_sha1_idx;
143 if (ima_hash_algo_idx < 0)
144 ima_hash_algo_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++;
146 ima_algo_array = kcalloc(NR_BANKS(ima_tpm_chip) + ima_extra_slots,
147 sizeof(*ima_algo_array), GFP_KERNEL);
148 if (!ima_algo_array) {
153 for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) {
154 algo = ima_tpm_chip->allocated_banks[i].crypto_id;
155 ima_algo_array[i].algo = algo;
157 /* unknown TPM algorithm */
158 if (algo == HASH_ALGO__LAST)
161 if (algo == ima_hash_algo) {
162 ima_algo_array[i].tfm = ima_shash_tfm;
166 ima_algo_array[i].tfm = ima_alloc_tfm(algo);
167 if (IS_ERR(ima_algo_array[i].tfm)) {
168 if (algo == HASH_ALGO_SHA1) {
169 rc = PTR_ERR(ima_algo_array[i].tfm);
170 ima_algo_array[i].tfm = NULL;
174 ima_algo_array[i].tfm = NULL;
178 if (ima_sha1_idx >= NR_BANKS(ima_tpm_chip)) {
179 if (ima_hash_algo == HASH_ALGO_SHA1) {
180 ima_algo_array[ima_sha1_idx].tfm = ima_shash_tfm;
182 ima_algo_array[ima_sha1_idx].tfm =
183 ima_alloc_tfm(HASH_ALGO_SHA1);
184 if (IS_ERR(ima_algo_array[ima_sha1_idx].tfm)) {
185 rc = PTR_ERR(ima_algo_array[ima_sha1_idx].tfm);
190 ima_algo_array[ima_sha1_idx].algo = HASH_ALGO_SHA1;
193 if (ima_hash_algo_idx >= NR_BANKS(ima_tpm_chip) &&
194 ima_hash_algo_idx != ima_sha1_idx) {
195 ima_algo_array[ima_hash_algo_idx].tfm = ima_shash_tfm;
196 ima_algo_array[ima_hash_algo_idx].algo = ima_hash_algo;
201 for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) {
202 if (!ima_algo_array[i].tfm ||
203 ima_algo_array[i].tfm == ima_shash_tfm)
206 crypto_free_shash(ima_algo_array[i].tfm);
209 crypto_free_shash(ima_shash_tfm);
213 static void ima_free_tfm(struct crypto_shash *tfm)
217 if (tfm == ima_shash_tfm)
220 for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++)
221 if (ima_algo_array[i].tfm == tfm)
224 crypto_free_shash(tfm);
228 * ima_alloc_pages() - Allocate contiguous pages.
229 * @max_size: Maximum amount of memory to allocate.
230 * @allocated_size: Returned size of actual allocation.
231 * @last_warn: Should the min_size allocation warn or not.
233 * Tries to do opportunistic allocation for memory first trying to allocate
234 * max_size amount of memory and then splitting that until zero order is
235 * reached. Allocation is tried without generating allocation warnings unless
236 * last_warn is set. Last_warn set affects only last allocation of zero order.
238 * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL)
240 * Return pointer to allocated memory, or NULL on failure.
242 static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size,
246 int order = ima_maxorder;
247 gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY;
250 order = min(get_order(max_size), order);
252 for (; order; order--) {
253 ptr = (void *)__get_free_pages(gfp_mask, order);
255 *allocated_size = PAGE_SIZE << order;
260 /* order is zero - one page */
262 gfp_mask = GFP_KERNEL;
265 gfp_mask |= __GFP_NOWARN;
267 ptr = (void *)__get_free_pages(gfp_mask, 0);
269 *allocated_size = PAGE_SIZE;
278 * ima_free_pages() - Free pages allocated by ima_alloc_pages().
279 * @ptr: Pointer to allocated pages.
280 * @size: Size of allocated buffer.
282 static void ima_free_pages(void *ptr, size_t size)
286 free_pages((unsigned long)ptr, get_order(size));
289 static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo)
291 struct crypto_ahash *tfm = ima_ahash_tfm;
294 if (algo < 0 || algo >= HASH_ALGO__LAST)
295 algo = ima_hash_algo;
297 if (algo != ima_hash_algo || !tfm) {
298 tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0);
300 if (algo == ima_hash_algo)
304 pr_err("Can not allocate %s (reason: %d)\n",
305 hash_algo_name[algo], rc);
311 static void ima_free_atfm(struct crypto_ahash *tfm)
313 if (tfm != ima_ahash_tfm)
314 crypto_free_ahash(tfm);
317 static inline int ahash_wait(int err, struct crypto_wait *wait)
320 err = crypto_wait_req(err, wait);
323 pr_crit_ratelimited("ahash calculation failed: err: %d\n", err);
328 static int ima_calc_file_hash_atfm(struct file *file,
329 struct ima_digest_data *hash,
330 struct crypto_ahash *tfm)
332 loff_t i_size, offset;
333 char *rbuf[2] = { NULL, };
334 int rc, rbuf_len, active = 0, ahash_rc = 0;
335 struct ahash_request *req;
336 struct scatterlist sg[1];
337 struct crypto_wait wait;
340 hash->length = crypto_ahash_digestsize(tfm);
342 req = ahash_request_alloc(tfm, GFP_KERNEL);
346 crypto_init_wait(&wait);
347 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
348 CRYPTO_TFM_REQ_MAY_SLEEP,
349 crypto_req_done, &wait);
351 rc = ahash_wait(crypto_ahash_init(req), &wait);
355 i_size = i_size_read(file_inode(file));
361 * Try to allocate maximum size of memory.
362 * Fail if even a single page cannot be allocated.
364 rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1);
370 /* Only allocate one buffer if that is enough. */
371 if (i_size > rbuf_size[0]) {
373 * Try to allocate secondary buffer. If that fails fallback to
374 * using single buffering. Use previous memory allocation size
375 * as baseline for possible allocation size.
377 rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0],
381 for (offset = 0; offset < i_size; offset += rbuf_len) {
382 if (!rbuf[1] && offset) {
383 /* Not using two buffers, and it is not the first
384 * read/request, wait for the completion of the
385 * previous ahash_update() request.
387 rc = ahash_wait(ahash_rc, &wait);
392 rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
393 rc = integrity_kernel_read(file, offset, rbuf[active],
395 if (rc != rbuf_len) {
399 * Forward current rc, do not overwrite with return value
402 ahash_wait(ahash_rc, &wait);
406 if (rbuf[1] && offset) {
407 /* Using two buffers, and it is not the first
408 * read/request, wait for the completion of the
409 * previous ahash_update() request.
411 rc = ahash_wait(ahash_rc, &wait);
416 sg_init_one(&sg[0], rbuf[active], rbuf_len);
417 ahash_request_set_crypt(req, sg, NULL, rbuf_len);
419 ahash_rc = crypto_ahash_update(req);
422 active = !active; /* swap buffers, if we use two */
424 /* wait for the last update request to complete */
425 rc = ahash_wait(ahash_rc, &wait);
427 ima_free_pages(rbuf[0], rbuf_size[0]);
428 ima_free_pages(rbuf[1], rbuf_size[1]);
431 ahash_request_set_crypt(req, NULL, hash->digest, 0);
432 rc = ahash_wait(crypto_ahash_final(req), &wait);
435 ahash_request_free(req);
439 static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash)
441 struct crypto_ahash *tfm;
444 tfm = ima_alloc_atfm(hash->algo);
448 rc = ima_calc_file_hash_atfm(file, hash, tfm);
455 static int ima_calc_file_hash_tfm(struct file *file,
456 struct ima_digest_data *hash,
457 struct crypto_shash *tfm)
459 loff_t i_size, offset = 0;
462 SHASH_DESC_ON_STACK(shash, tfm);
466 hash->length = crypto_shash_digestsize(tfm);
468 rc = crypto_shash_init(shash);
472 i_size = i_size_read(file_inode(file));
477 rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
481 while (offset < i_size) {
484 rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE);
489 if (rbuf_len == 0) { /* unexpected EOF */
495 rc = crypto_shash_update(shash, rbuf, rbuf_len);
502 rc = crypto_shash_final(shash, hash->digest);
506 static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash)
508 struct crypto_shash *tfm;
511 tfm = ima_alloc_tfm(hash->algo);
515 rc = ima_calc_file_hash_tfm(file, hash, tfm);
523 * ima_calc_file_hash - calculate file hash
525 * Asynchronous hash (ahash) allows using HW acceleration for calculating
526 * a hash. ahash performance varies for different data sizes on different
527 * crypto accelerators. shash performance might be better for smaller files.
528 * The 'ima.ahash_minsize' module parameter allows specifying the best
529 * minimum file size for using ahash on the system.
531 * If the ima.ahash_minsize parameter is not specified, this function uses
532 * shash for the hash calculation. If ahash fails, it falls back to using
535 int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
539 struct file *f = file;
540 bool new_file_instance = false;
543 * For consistency, fail file's opened with the O_DIRECT flag on
544 * filesystems mounted with/without DAX option.
546 if (file->f_flags & O_DIRECT) {
547 hash->length = hash_digest_size[ima_hash_algo];
548 hash->algo = ima_hash_algo;
552 /* Open a new file instance in O_RDONLY if we cannot read */
553 if (!(file->f_mode & FMODE_READ)) {
554 int flags = file->f_flags & ~(O_WRONLY | O_APPEND |
555 O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL);
557 f = dentry_open(&file->f_path, flags, file->f_cred);
561 new_file_instance = true;
564 i_size = i_size_read(file_inode(f));
566 if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
567 rc = ima_calc_file_ahash(f, hash);
572 rc = ima_calc_file_shash(f, hash);
574 if (new_file_instance)
580 * Calculate the hash of template data
582 static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
583 struct ima_template_entry *entry,
586 SHASH_DESC_ON_STACK(shash, ima_algo_array[tfm_idx].tfm);
587 struct ima_template_desc *td = entry->template_desc;
588 int num_fields = entry->template_desc->num_fields;
591 shash->tfm = ima_algo_array[tfm_idx].tfm;
593 rc = crypto_shash_init(shash);
597 for (i = 0; i < num_fields; i++) {
598 u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
599 u8 *data_to_hash = field_data[i].data;
600 u32 datalen = field_data[i].len;
601 u32 datalen_to_hash =
602 !ima_canonical_fmt ? datalen : cpu_to_le32(datalen);
604 if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
605 rc = crypto_shash_update(shash,
606 (const u8 *) &datalen_to_hash,
607 sizeof(datalen_to_hash));
610 } else if (strcmp(td->fields[i]->field_id, "n") == 0) {
611 memcpy(buffer, data_to_hash, datalen);
612 data_to_hash = buffer;
613 datalen = IMA_EVENT_NAME_LEN_MAX + 1;
615 rc = crypto_shash_update(shash, data_to_hash, datalen);
621 rc = crypto_shash_final(shash, entry->digests[tfm_idx].digest);
626 int ima_calc_field_array_hash(struct ima_field_data *field_data,
627 struct ima_template_entry *entry)
632 rc = ima_calc_field_array_hash_tfm(field_data, entry, ima_sha1_idx);
636 entry->digests[ima_sha1_idx].alg_id = TPM_ALG_SHA1;
638 for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) {
639 if (i == ima_sha1_idx)
642 if (i < NR_BANKS(ima_tpm_chip)) {
643 alg_id = ima_tpm_chip->allocated_banks[i].alg_id;
644 entry->digests[i].alg_id = alg_id;
647 /* for unmapped TPM algorithms digest is still a padded SHA1 */
648 if (!ima_algo_array[i].tfm) {
649 memcpy(entry->digests[i].digest,
650 entry->digests[ima_sha1_idx].digest,
655 rc = ima_calc_field_array_hash_tfm(field_data, entry, i);
662 static int calc_buffer_ahash_atfm(const void *buf, loff_t len,
663 struct ima_digest_data *hash,
664 struct crypto_ahash *tfm)
666 struct ahash_request *req;
667 struct scatterlist sg;
668 struct crypto_wait wait;
669 int rc, ahash_rc = 0;
671 hash->length = crypto_ahash_digestsize(tfm);
673 req = ahash_request_alloc(tfm, GFP_KERNEL);
677 crypto_init_wait(&wait);
678 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
679 CRYPTO_TFM_REQ_MAY_SLEEP,
680 crypto_req_done, &wait);
682 rc = ahash_wait(crypto_ahash_init(req), &wait);
686 sg_init_one(&sg, buf, len);
687 ahash_request_set_crypt(req, &sg, NULL, len);
689 ahash_rc = crypto_ahash_update(req);
691 /* wait for the update request to complete */
692 rc = ahash_wait(ahash_rc, &wait);
694 ahash_request_set_crypt(req, NULL, hash->digest, 0);
695 rc = ahash_wait(crypto_ahash_final(req), &wait);
698 ahash_request_free(req);
702 static int calc_buffer_ahash(const void *buf, loff_t len,
703 struct ima_digest_data *hash)
705 struct crypto_ahash *tfm;
708 tfm = ima_alloc_atfm(hash->algo);
712 rc = calc_buffer_ahash_atfm(buf, len, hash, tfm);
719 static int calc_buffer_shash_tfm(const void *buf, loff_t size,
720 struct ima_digest_data *hash,
721 struct crypto_shash *tfm)
723 SHASH_DESC_ON_STACK(shash, tfm);
729 hash->length = crypto_shash_digestsize(tfm);
731 rc = crypto_shash_init(shash);
736 len = size < PAGE_SIZE ? size : PAGE_SIZE;
737 rc = crypto_shash_update(shash, buf, len);
745 rc = crypto_shash_final(shash, hash->digest);
749 static int calc_buffer_shash(const void *buf, loff_t len,
750 struct ima_digest_data *hash)
752 struct crypto_shash *tfm;
755 tfm = ima_alloc_tfm(hash->algo);
759 rc = calc_buffer_shash_tfm(buf, len, hash, tfm);
765 int ima_calc_buffer_hash(const void *buf, loff_t len,
766 struct ima_digest_data *hash)
770 if (ima_ahash_minsize && len >= ima_ahash_minsize) {
771 rc = calc_buffer_ahash(buf, len, hash);
776 return calc_buffer_shash(buf, len, hash);
779 static void ima_pcrread(u32 idx, struct tpm_digest *d)
784 if (tpm_pcr_read(ima_tpm_chip, idx, d) != 0)
785 pr_err("Error Communicating to TPM chip\n");
789 * The boot_aggregate is a cumulative hash over TPM registers 0 - 7. With
790 * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with
791 * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks,
792 * allowing firmware to configure and enable different banks.
794 * Knowing which TPM bank is read to calculate the boot_aggregate digest
795 * needs to be conveyed to a verifier. For this reason, use the same
796 * hash algorithm for reading the TPM PCRs as for calculating the boot
797 * aggregate digest as stored in the measurement list.
799 static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
800 struct crypto_shash *tfm)
802 struct tpm_digest d = { .alg_id = alg_id, .digest = {0} };
805 SHASH_DESC_ON_STACK(shash, tfm);
809 pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n",
812 rc = crypto_shash_init(shash);
816 /* cumulative digest over TPM registers 0-7 */
817 for (i = TPM_PCR0; i < TPM_PCR8; i++) {
819 /* now accumulate with current aggregate */
820 rc = crypto_shash_update(shash, d.digest,
821 crypto_shash_digestsize(tfm));
826 * Extend cumulative digest over TPM registers 8-9, which contain
827 * measurement for the kernel command line (reg. 8) and image (reg. 9)
828 * in a typical PCR allocation. Registers 8-9 are only included in
829 * non-SHA1 boot_aggregate digests to avoid ambiguity.
831 if (alg_id != TPM_ALG_SHA1) {
832 for (i = TPM_PCR8; i < TPM_PCR10; i++) {
834 rc = crypto_shash_update(shash, d.digest,
835 crypto_shash_digestsize(tfm));
839 crypto_shash_final(shash, digest);
843 int ima_calc_boot_aggregate(struct ima_digest_data *hash)
845 struct crypto_shash *tfm;
846 u16 crypto_id, alg_id;
847 int rc, i, bank_idx = -1;
849 for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) {
850 crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id;
851 if (crypto_id == hash->algo) {
856 if (crypto_id == HASH_ALGO_SHA256)
859 if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1)
863 if (bank_idx == -1) {
864 pr_err("No suitable TPM algorithm for boot aggregate\n");
868 hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id;
870 tfm = ima_alloc_tfm(hash->algo);
874 hash->length = crypto_shash_digestsize(tfm);
875 alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id;
876 rc = ima_calc_boot_aggregate_tfm(hash->digest, alg_id, tfm);