1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2005,2006,2007,2008 IBM Corporation
6 * Mimi Zohar <zohar@us.ibm.com>
7 * Kylene Hall <kjhall@us.ibm.com>
10 * Calculates md5/sha1 file hash, template hash, boot-aggreate hash
13 #include <linux/kernel.h>
14 #include <linux/moduleparam.h>
15 #include <linux/ratelimit.h>
16 #include <linux/file.h>
17 #include <linux/crypto.h>
18 #include <linux/scatterlist.h>
19 #include <linux/err.h>
20 #include <linux/slab.h>
21 #include <crypto/hash.h>
25 /* minimum file size for ahash use */
26 static unsigned long ima_ahash_minsize;
27 module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644);
28 MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use");
30 /* default is 0 - 1 page. */
31 static int ima_maxorder;
32 static unsigned int ima_bufsize = PAGE_SIZE;
34 static int param_set_bufsize(const char *val, const struct kernel_param *kp)
36 unsigned long long size;
39 size = memparse(val, NULL);
40 order = get_order(size);
41 if (order >= MAX_ORDER)
44 ima_bufsize = PAGE_SIZE << order;
48 static const struct kernel_param_ops param_ops_bufsize = {
49 .set = param_set_bufsize,
50 .get = param_get_uint,
52 #define param_check_bufsize(name, p) __param_check(name, p, unsigned int)
54 module_param_named(ahash_bufsize, ima_bufsize, bufsize, 0644);
55 MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size");
57 static struct crypto_shash *ima_shash_tfm;
58 static struct crypto_ahash *ima_ahash_tfm;
60 struct ima_algo_desc {
61 struct crypto_shash *tfm;
65 int ima_sha1_idx __ro_after_init;
66 int ima_hash_algo_idx __ro_after_init;
68 * Additional number of slots reserved, as needed, for SHA1
69 * and IMA default algo.
71 int ima_extra_slots __ro_after_init;
73 static struct ima_algo_desc *ima_algo_array;
75 static int __init ima_init_ima_crypto(void)
79 ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0);
80 if (IS_ERR(ima_shash_tfm)) {
81 rc = PTR_ERR(ima_shash_tfm);
82 pr_err("Can not allocate %s (reason: %ld)\n",
83 hash_algo_name[ima_hash_algo], rc);
86 pr_info("Allocated hash algorithm: %s\n",
87 hash_algo_name[ima_hash_algo]);
91 static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
93 struct crypto_shash *tfm = ima_shash_tfm;
96 if (algo < 0 || algo >= HASH_ALGO__LAST)
99 if (algo == ima_hash_algo)
102 for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++)
103 if (ima_algo_array[i].tfm && ima_algo_array[i].algo == algo)
104 return ima_algo_array[i].tfm;
106 tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
109 pr_err("Can not allocate %s (reason: %d)\n",
110 hash_algo_name[algo], rc);
115 int __init ima_init_crypto(void)
121 rc = ima_init_ima_crypto();
126 ima_hash_algo_idx = -1;
128 for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) {
129 algo = ima_tpm_chip->allocated_banks[i].crypto_id;
130 if (algo == HASH_ALGO_SHA1)
133 if (algo == ima_hash_algo)
134 ima_hash_algo_idx = i;
137 if (ima_sha1_idx < 0) {
138 ima_sha1_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++;
139 if (ima_hash_algo == HASH_ALGO_SHA1)
140 ima_hash_algo_idx = ima_sha1_idx;
143 if (ima_hash_algo_idx < 0)
144 ima_hash_algo_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++;
146 ima_algo_array = kcalloc(NR_BANKS(ima_tpm_chip) + ima_extra_slots,
147 sizeof(*ima_algo_array), GFP_KERNEL);
148 if (!ima_algo_array) {
153 for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) {
154 algo = ima_tpm_chip->allocated_banks[i].crypto_id;
155 ima_algo_array[i].algo = algo;
157 /* unknown TPM algorithm */
158 if (algo == HASH_ALGO__LAST)
161 if (algo == ima_hash_algo) {
162 ima_algo_array[i].tfm = ima_shash_tfm;
166 ima_algo_array[i].tfm = ima_alloc_tfm(algo);
167 if (IS_ERR(ima_algo_array[i].tfm)) {
168 if (algo == HASH_ALGO_SHA1) {
169 rc = PTR_ERR(ima_algo_array[i].tfm);
170 ima_algo_array[i].tfm = NULL;
174 ima_algo_array[i].tfm = NULL;
178 if (ima_sha1_idx >= NR_BANKS(ima_tpm_chip)) {
179 if (ima_hash_algo == HASH_ALGO_SHA1) {
180 ima_algo_array[ima_sha1_idx].tfm = ima_shash_tfm;
182 ima_algo_array[ima_sha1_idx].tfm =
183 ima_alloc_tfm(HASH_ALGO_SHA1);
184 if (IS_ERR(ima_algo_array[ima_sha1_idx].tfm)) {
185 rc = PTR_ERR(ima_algo_array[ima_sha1_idx].tfm);
190 ima_algo_array[ima_sha1_idx].algo = HASH_ALGO_SHA1;
193 if (ima_hash_algo_idx >= NR_BANKS(ima_tpm_chip) &&
194 ima_hash_algo_idx != ima_sha1_idx) {
195 ima_algo_array[ima_hash_algo_idx].tfm = ima_shash_tfm;
196 ima_algo_array[ima_hash_algo_idx].algo = ima_hash_algo;
201 for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) {
202 if (!ima_algo_array[i].tfm ||
203 ima_algo_array[i].tfm == ima_shash_tfm)
206 crypto_free_shash(ima_algo_array[i].tfm);
208 kfree(ima_algo_array);
210 crypto_free_shash(ima_shash_tfm);
214 static void ima_free_tfm(struct crypto_shash *tfm)
218 if (tfm == ima_shash_tfm)
221 for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++)
222 if (ima_algo_array[i].tfm == tfm)
225 crypto_free_shash(tfm);
229 * ima_alloc_pages() - Allocate contiguous pages.
230 * @max_size: Maximum amount of memory to allocate.
231 * @allocated_size: Returned size of actual allocation.
232 * @last_warn: Should the min_size allocation warn or not.
234 * Tries to do opportunistic allocation for memory first trying to allocate
235 * max_size amount of memory and then splitting that until zero order is
236 * reached. Allocation is tried without generating allocation warnings unless
237 * last_warn is set. Last_warn set affects only last allocation of zero order.
239 * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL)
241 * Return pointer to allocated memory, or NULL on failure.
243 static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size,
247 int order = ima_maxorder;
248 gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY;
251 order = min(get_order(max_size), order);
253 for (; order; order--) {
254 ptr = (void *)__get_free_pages(gfp_mask, order);
256 *allocated_size = PAGE_SIZE << order;
261 /* order is zero - one page */
263 gfp_mask = GFP_KERNEL;
266 gfp_mask |= __GFP_NOWARN;
268 ptr = (void *)__get_free_pages(gfp_mask, 0);
270 *allocated_size = PAGE_SIZE;
279 * ima_free_pages() - Free pages allocated by ima_alloc_pages().
280 * @ptr: Pointer to allocated pages.
281 * @size: Size of allocated buffer.
283 static void ima_free_pages(void *ptr, size_t size)
287 free_pages((unsigned long)ptr, get_order(size));
290 static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo)
292 struct crypto_ahash *tfm = ima_ahash_tfm;
295 if (algo < 0 || algo >= HASH_ALGO__LAST)
296 algo = ima_hash_algo;
298 if (algo != ima_hash_algo || !tfm) {
299 tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0);
301 if (algo == ima_hash_algo)
305 pr_err("Can not allocate %s (reason: %d)\n",
306 hash_algo_name[algo], rc);
312 static void ima_free_atfm(struct crypto_ahash *tfm)
314 if (tfm != ima_ahash_tfm)
315 crypto_free_ahash(tfm);
318 static inline int ahash_wait(int err, struct crypto_wait *wait)
321 err = crypto_wait_req(err, wait);
324 pr_crit_ratelimited("ahash calculation failed: err: %d\n", err);
329 static int ima_calc_file_hash_atfm(struct file *file,
330 struct ima_digest_data *hash,
331 struct crypto_ahash *tfm)
333 loff_t i_size, offset;
334 char *rbuf[2] = { NULL, };
335 int rc, rbuf_len, active = 0, ahash_rc = 0;
336 struct ahash_request *req;
337 struct scatterlist sg[1];
338 struct crypto_wait wait;
341 hash->length = crypto_ahash_digestsize(tfm);
343 req = ahash_request_alloc(tfm, GFP_KERNEL);
347 crypto_init_wait(&wait);
348 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
349 CRYPTO_TFM_REQ_MAY_SLEEP,
350 crypto_req_done, &wait);
352 rc = ahash_wait(crypto_ahash_init(req), &wait);
356 i_size = i_size_read(file_inode(file));
362 * Try to allocate maximum size of memory.
363 * Fail if even a single page cannot be allocated.
365 rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1);
371 /* Only allocate one buffer if that is enough. */
372 if (i_size > rbuf_size[0]) {
374 * Try to allocate secondary buffer. If that fails fallback to
375 * using single buffering. Use previous memory allocation size
376 * as baseline for possible allocation size.
378 rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0],
382 for (offset = 0; offset < i_size; offset += rbuf_len) {
383 if (!rbuf[1] && offset) {
384 /* Not using two buffers, and it is not the first
385 * read/request, wait for the completion of the
386 * previous ahash_update() request.
388 rc = ahash_wait(ahash_rc, &wait);
393 rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
394 rc = integrity_kernel_read(file, offset, rbuf[active],
396 if (rc != rbuf_len) {
400 * Forward current rc, do not overwrite with return value
403 ahash_wait(ahash_rc, &wait);
407 if (rbuf[1] && offset) {
408 /* Using two buffers, and it is not the first
409 * read/request, wait for the completion of the
410 * previous ahash_update() request.
412 rc = ahash_wait(ahash_rc, &wait);
417 sg_init_one(&sg[0], rbuf[active], rbuf_len);
418 ahash_request_set_crypt(req, sg, NULL, rbuf_len);
420 ahash_rc = crypto_ahash_update(req);
423 active = !active; /* swap buffers, if we use two */
425 /* wait for the last update request to complete */
426 rc = ahash_wait(ahash_rc, &wait);
428 ima_free_pages(rbuf[0], rbuf_size[0]);
429 ima_free_pages(rbuf[1], rbuf_size[1]);
432 ahash_request_set_crypt(req, NULL, hash->digest, 0);
433 rc = ahash_wait(crypto_ahash_final(req), &wait);
436 ahash_request_free(req);
440 static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash)
442 struct crypto_ahash *tfm;
445 tfm = ima_alloc_atfm(hash->algo);
449 rc = ima_calc_file_hash_atfm(file, hash, tfm);
456 static int ima_calc_file_hash_tfm(struct file *file,
457 struct ima_digest_data *hash,
458 struct crypto_shash *tfm)
460 loff_t i_size, offset = 0;
463 SHASH_DESC_ON_STACK(shash, tfm);
467 hash->length = crypto_shash_digestsize(tfm);
469 rc = crypto_shash_init(shash);
473 i_size = i_size_read(file_inode(file));
478 rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
482 while (offset < i_size) {
485 rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE);
490 if (rbuf_len == 0) { /* unexpected EOF */
496 rc = crypto_shash_update(shash, rbuf, rbuf_len);
503 rc = crypto_shash_final(shash, hash->digest);
507 static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash)
509 struct crypto_shash *tfm;
512 tfm = ima_alloc_tfm(hash->algo);
516 rc = ima_calc_file_hash_tfm(file, hash, tfm);
524 * ima_calc_file_hash - calculate file hash
526 * Asynchronous hash (ahash) allows using HW acceleration for calculating
527 * a hash. ahash performance varies for different data sizes on different
528 * crypto accelerators. shash performance might be better for smaller files.
529 * The 'ima.ahash_minsize' module parameter allows specifying the best
530 * minimum file size for using ahash on the system.
532 * If the ima.ahash_minsize parameter is not specified, this function uses
533 * shash for the hash calculation. If ahash fails, it falls back to using
536 int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
540 struct file *f = file;
541 bool new_file_instance = false;
544 * For consistency, fail file's opened with the O_DIRECT flag on
545 * filesystems mounted with/without DAX option.
547 if (file->f_flags & O_DIRECT) {
548 hash->length = hash_digest_size[ima_hash_algo];
549 hash->algo = ima_hash_algo;
553 /* Open a new file instance in O_RDONLY if we cannot read */
554 if (!(file->f_mode & FMODE_READ)) {
555 int flags = file->f_flags & ~(O_WRONLY | O_APPEND |
556 O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL);
558 f = dentry_open(&file->f_path, flags, file->f_cred);
562 new_file_instance = true;
565 i_size = i_size_read(file_inode(f));
567 if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
568 rc = ima_calc_file_ahash(f, hash);
573 rc = ima_calc_file_shash(f, hash);
575 if (new_file_instance)
581 * Calculate the hash of template data
583 static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
584 struct ima_template_entry *entry,
587 SHASH_DESC_ON_STACK(shash, ima_algo_array[tfm_idx].tfm);
588 struct ima_template_desc *td = entry->template_desc;
589 int num_fields = entry->template_desc->num_fields;
592 shash->tfm = ima_algo_array[tfm_idx].tfm;
594 rc = crypto_shash_init(shash);
598 for (i = 0; i < num_fields; i++) {
599 u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
600 u8 *data_to_hash = field_data[i].data;
601 u32 datalen = field_data[i].len;
602 u32 datalen_to_hash = !ima_canonical_fmt ?
603 datalen : (__force u32)cpu_to_le32(datalen);
605 if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
606 rc = crypto_shash_update(shash,
607 (const u8 *) &datalen_to_hash,
608 sizeof(datalen_to_hash));
611 } else if (strcmp(td->fields[i]->field_id, "n") == 0) {
612 memcpy(buffer, data_to_hash, datalen);
613 data_to_hash = buffer;
614 datalen = IMA_EVENT_NAME_LEN_MAX + 1;
616 rc = crypto_shash_update(shash, data_to_hash, datalen);
622 rc = crypto_shash_final(shash, entry->digests[tfm_idx].digest);
627 int ima_calc_field_array_hash(struct ima_field_data *field_data,
628 struct ima_template_entry *entry)
633 rc = ima_calc_field_array_hash_tfm(field_data, entry, ima_sha1_idx);
637 entry->digests[ima_sha1_idx].alg_id = TPM_ALG_SHA1;
639 for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) {
640 if (i == ima_sha1_idx)
643 if (i < NR_BANKS(ima_tpm_chip)) {
644 alg_id = ima_tpm_chip->allocated_banks[i].alg_id;
645 entry->digests[i].alg_id = alg_id;
648 /* for unmapped TPM algorithms digest is still a padded SHA1 */
649 if (!ima_algo_array[i].tfm) {
650 memcpy(entry->digests[i].digest,
651 entry->digests[ima_sha1_idx].digest,
656 rc = ima_calc_field_array_hash_tfm(field_data, entry, i);
663 static int calc_buffer_ahash_atfm(const void *buf, loff_t len,
664 struct ima_digest_data *hash,
665 struct crypto_ahash *tfm)
667 struct ahash_request *req;
668 struct scatterlist sg;
669 struct crypto_wait wait;
670 int rc, ahash_rc = 0;
672 hash->length = crypto_ahash_digestsize(tfm);
674 req = ahash_request_alloc(tfm, GFP_KERNEL);
678 crypto_init_wait(&wait);
679 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
680 CRYPTO_TFM_REQ_MAY_SLEEP,
681 crypto_req_done, &wait);
683 rc = ahash_wait(crypto_ahash_init(req), &wait);
687 sg_init_one(&sg, buf, len);
688 ahash_request_set_crypt(req, &sg, NULL, len);
690 ahash_rc = crypto_ahash_update(req);
692 /* wait for the update request to complete */
693 rc = ahash_wait(ahash_rc, &wait);
695 ahash_request_set_crypt(req, NULL, hash->digest, 0);
696 rc = ahash_wait(crypto_ahash_final(req), &wait);
699 ahash_request_free(req);
703 static int calc_buffer_ahash(const void *buf, loff_t len,
704 struct ima_digest_data *hash)
706 struct crypto_ahash *tfm;
709 tfm = ima_alloc_atfm(hash->algo);
713 rc = calc_buffer_ahash_atfm(buf, len, hash, tfm);
720 static int calc_buffer_shash_tfm(const void *buf, loff_t size,
721 struct ima_digest_data *hash,
722 struct crypto_shash *tfm)
724 SHASH_DESC_ON_STACK(shash, tfm);
730 hash->length = crypto_shash_digestsize(tfm);
732 rc = crypto_shash_init(shash);
737 len = size < PAGE_SIZE ? size : PAGE_SIZE;
738 rc = crypto_shash_update(shash, buf, len);
746 rc = crypto_shash_final(shash, hash->digest);
750 static int calc_buffer_shash(const void *buf, loff_t len,
751 struct ima_digest_data *hash)
753 struct crypto_shash *tfm;
756 tfm = ima_alloc_tfm(hash->algo);
760 rc = calc_buffer_shash_tfm(buf, len, hash, tfm);
766 int ima_calc_buffer_hash(const void *buf, loff_t len,
767 struct ima_digest_data *hash)
771 if (ima_ahash_minsize && len >= ima_ahash_minsize) {
772 rc = calc_buffer_ahash(buf, len, hash);
777 return calc_buffer_shash(buf, len, hash);
780 static void ima_pcrread(u32 idx, struct tpm_digest *d)
785 if (tpm_pcr_read(ima_tpm_chip, idx, d) != 0)
786 pr_err("Error Communicating to TPM chip\n");
790 * The boot_aggregate is a cumulative hash over TPM registers 0 - 7. With
791 * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with
792 * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks,
793 * allowing firmware to configure and enable different banks.
795 * Knowing which TPM bank is read to calculate the boot_aggregate digest
796 * needs to be conveyed to a verifier. For this reason, use the same
797 * hash algorithm for reading the TPM PCRs as for calculating the boot
798 * aggregate digest as stored in the measurement list.
800 static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
801 struct crypto_shash *tfm)
803 struct tpm_digest d = { .alg_id = alg_id, .digest = {0} };
806 SHASH_DESC_ON_STACK(shash, tfm);
810 pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n",
813 rc = crypto_shash_init(shash);
817 /* cumulative digest over TPM registers 0-7 */
818 for (i = TPM_PCR0; i < TPM_PCR8; i++) {
820 /* now accumulate with current aggregate */
821 rc = crypto_shash_update(shash, d.digest,
822 crypto_shash_digestsize(tfm));
827 * Extend cumulative digest over TPM registers 8-9, which contain
828 * measurement for the kernel command line (reg. 8) and image (reg. 9)
829 * in a typical PCR allocation. Registers 8-9 are only included in
830 * non-SHA1 boot_aggregate digests to avoid ambiguity.
832 if (alg_id != TPM_ALG_SHA1) {
833 for (i = TPM_PCR8; i < TPM_PCR10; i++) {
835 rc = crypto_shash_update(shash, d.digest,
836 crypto_shash_digestsize(tfm));
840 crypto_shash_final(shash, digest);
844 int ima_calc_boot_aggregate(struct ima_digest_data *hash)
846 struct crypto_shash *tfm;
847 u16 crypto_id, alg_id;
848 int rc, i, bank_idx = -1;
850 for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) {
851 crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id;
852 if (crypto_id == hash->algo) {
857 if (crypto_id == HASH_ALGO_SHA256)
860 if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1)
864 if (bank_idx == -1) {
865 pr_err("No suitable TPM algorithm for boot aggregate\n");
869 hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id;
871 tfm = ima_alloc_tfm(hash->algo);
875 hash->length = crypto_shash_digestsize(tfm);
876 alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id;
877 rc = ima_calc_boot_aggregate_tfm(hash->digest, alg_id, tfm);