xref: /linux/security/integrity/ima/ima_crypto.c (revision 762f99f4f3cb41a775b5157dd761217beba65873)
1b886d83cSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
23323eec9SMimi Zohar /*
33323eec9SMimi Zohar  * Copyright (C) 2005,2006,2007,2008 IBM Corporation
43323eec9SMimi Zohar  *
53323eec9SMimi Zohar  * Authors:
63323eec9SMimi Zohar  * Mimi Zohar <zohar@us.ibm.com>
73323eec9SMimi Zohar  * Kylene Hall <kjhall@us.ibm.com>
83323eec9SMimi Zohar  *
93323eec9SMimi Zohar  * File: ima_crypto.c
103323eec9SMimi Zohar  *	Calculates md5/sha1 file hash, template hash, boot-aggreate hash
113323eec9SMimi Zohar  */
123323eec9SMimi Zohar 
133323eec9SMimi Zohar #include <linux/kernel.h>
143bcced39SDmitry Kasatkin #include <linux/moduleparam.h>
153bcced39SDmitry Kasatkin #include <linux/ratelimit.h>
163323eec9SMimi Zohar #include <linux/file.h>
173323eec9SMimi Zohar #include <linux/crypto.h>
183323eec9SMimi Zohar #include <linux/scatterlist.h>
193323eec9SMimi Zohar #include <linux/err.h>
205a0e3ad6STejun Heo #include <linux/slab.h>
2176bb28f6SDmitry Kasatkin #include <crypto/hash.h>
221525b06dSDmitry Kasatkin 
233323eec9SMimi Zohar #include "ima.h"
243323eec9SMimi Zohar 
253bcced39SDmitry Kasatkin /* minimum file size for ahash use */
263bcced39SDmitry Kasatkin static unsigned long ima_ahash_minsize;
273bcced39SDmitry Kasatkin module_param_named(ahash_minsize, ima_ahash_minsize, ulong, 0644);
283bcced39SDmitry Kasatkin MODULE_PARM_DESC(ahash_minsize, "Minimum file size for ahash use");
293bcced39SDmitry Kasatkin 
306edf7a89SDmitry Kasatkin /* default is 0 - 1 page. */
316edf7a89SDmitry Kasatkin static int ima_maxorder;
326edf7a89SDmitry Kasatkin static unsigned int ima_bufsize = PAGE_SIZE;
336edf7a89SDmitry Kasatkin 
346edf7a89SDmitry Kasatkin static int param_set_bufsize(const char *val, const struct kernel_param *kp)
356edf7a89SDmitry Kasatkin {
366edf7a89SDmitry Kasatkin 	unsigned long long size;
376edf7a89SDmitry Kasatkin 	int order;
386edf7a89SDmitry Kasatkin 
396edf7a89SDmitry Kasatkin 	size = memparse(val, NULL);
406edf7a89SDmitry Kasatkin 	order = get_order(size);
416edf7a89SDmitry Kasatkin 	if (order >= MAX_ORDER)
426edf7a89SDmitry Kasatkin 		return -EINVAL;
436edf7a89SDmitry Kasatkin 	ima_maxorder = order;
446edf7a89SDmitry Kasatkin 	ima_bufsize = PAGE_SIZE << order;
456edf7a89SDmitry Kasatkin 	return 0;
466edf7a89SDmitry Kasatkin }
476edf7a89SDmitry Kasatkin 
489c27847dSLuis R. Rodriguez static const struct kernel_param_ops param_ops_bufsize = {
496edf7a89SDmitry Kasatkin 	.set = param_set_bufsize,
506edf7a89SDmitry Kasatkin 	.get = param_get_uint,
516edf7a89SDmitry Kasatkin };
526edf7a89SDmitry Kasatkin #define param_check_bufsize(name, p) __param_check(name, p, unsigned int)
536edf7a89SDmitry Kasatkin 
546edf7a89SDmitry Kasatkin module_param_named(ahash_bufsize, ima_bufsize, bufsize, 0644);
556edf7a89SDmitry Kasatkin MODULE_PARM_DESC(ahash_bufsize, "Maximum ahash buffer size");
566edf7a89SDmitry Kasatkin 
5776bb28f6SDmitry Kasatkin static struct crypto_shash *ima_shash_tfm;
583bcced39SDmitry Kasatkin static struct crypto_ahash *ima_ahash_tfm;
593323eec9SMimi Zohar 
606d94809aSRoberto Sassu struct ima_algo_desc {
616d94809aSRoberto Sassu 	struct crypto_shash *tfm;
626d94809aSRoberto Sassu 	enum hash_algo algo;
636d94809aSRoberto Sassu };
646d94809aSRoberto Sassu 
65aa724fe1SRoberto Sassu int ima_sha1_idx __ro_after_init;
662592677cSRoberto Sassu int ima_hash_algo_idx __ro_after_init;
67aa724fe1SRoberto Sassu /*
68aa724fe1SRoberto Sassu  * Additional number of slots reserved, as needed, for SHA1
69aa724fe1SRoberto Sassu  * and IMA default algo.
70aa724fe1SRoberto Sassu  */
716d94809aSRoberto Sassu int ima_extra_slots __ro_after_init;
72aa724fe1SRoberto Sassu 
736d94809aSRoberto Sassu static struct ima_algo_desc *ima_algo_array;
746d94809aSRoberto Sassu 
756d94809aSRoberto Sassu static int __init ima_init_ima_crypto(void)
7676bb28f6SDmitry Kasatkin {
7776bb28f6SDmitry Kasatkin 	long rc;
7876bb28f6SDmitry Kasatkin 
79c7c8bb23SDmitry Kasatkin 	ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0);
8076bb28f6SDmitry Kasatkin 	if (IS_ERR(ima_shash_tfm)) {
8176bb28f6SDmitry Kasatkin 		rc = PTR_ERR(ima_shash_tfm);
82c7c8bb23SDmitry Kasatkin 		pr_err("Can not allocate %s (reason: %ld)\n",
83c7c8bb23SDmitry Kasatkin 		       hash_algo_name[ima_hash_algo], rc);
843323eec9SMimi Zohar 		return rc;
853323eec9SMimi Zohar 	}
86ab60368aSPetr Vorel 	pr_info("Allocated hash algorithm: %s\n",
87ab60368aSPetr Vorel 		hash_algo_name[ima_hash_algo]);
8876bb28f6SDmitry Kasatkin 	return 0;
893323eec9SMimi Zohar }
903323eec9SMimi Zohar 
91723326b9SDmitry Kasatkin static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
92723326b9SDmitry Kasatkin {
93723326b9SDmitry Kasatkin 	struct crypto_shash *tfm = ima_shash_tfm;
946d94809aSRoberto Sassu 	int rc, i;
95723326b9SDmitry Kasatkin 
9623c19e2cSDmitry Kasatkin 	if (algo < 0 || algo >= HASH_ALGO__LAST)
9723c19e2cSDmitry Kasatkin 		algo = ima_hash_algo;
9823c19e2cSDmitry Kasatkin 
996d94809aSRoberto Sassu 	if (algo == ima_hash_algo)
1006d94809aSRoberto Sassu 		return tfm;
1016d94809aSRoberto Sassu 
1026d94809aSRoberto Sassu 	for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++)
1036d94809aSRoberto Sassu 		if (ima_algo_array[i].tfm && ima_algo_array[i].algo == algo)
1046d94809aSRoberto Sassu 			return ima_algo_array[i].tfm;
1056d94809aSRoberto Sassu 
106723326b9SDmitry Kasatkin 	tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
107723326b9SDmitry Kasatkin 	if (IS_ERR(tfm)) {
108723326b9SDmitry Kasatkin 		rc = PTR_ERR(tfm);
109723326b9SDmitry Kasatkin 		pr_err("Can not allocate %s (reason: %d)\n",
110723326b9SDmitry Kasatkin 		       hash_algo_name[algo], rc);
111723326b9SDmitry Kasatkin 	}
112723326b9SDmitry Kasatkin 	return tfm;
113723326b9SDmitry Kasatkin }
114723326b9SDmitry Kasatkin 
1156d94809aSRoberto Sassu int __init ima_init_crypto(void)
1166d94809aSRoberto Sassu {
1176d94809aSRoberto Sassu 	enum hash_algo algo;
1186d94809aSRoberto Sassu 	long rc;
1196d94809aSRoberto Sassu 	int i;
1206d94809aSRoberto Sassu 
1216d94809aSRoberto Sassu 	rc = ima_init_ima_crypto();
1226d94809aSRoberto Sassu 	if (rc)
1236d94809aSRoberto Sassu 		return rc;
1246d94809aSRoberto Sassu 
1256d94809aSRoberto Sassu 	ima_sha1_idx = -1;
1262592677cSRoberto Sassu 	ima_hash_algo_idx = -1;
1276d94809aSRoberto Sassu 
1286d94809aSRoberto Sassu 	for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) {
1296d94809aSRoberto Sassu 		algo = ima_tpm_chip->allocated_banks[i].crypto_id;
1306d94809aSRoberto Sassu 		if (algo == HASH_ALGO_SHA1)
1316d94809aSRoberto Sassu 			ima_sha1_idx = i;
1322592677cSRoberto Sassu 
1332592677cSRoberto Sassu 		if (algo == ima_hash_algo)
1342592677cSRoberto Sassu 			ima_hash_algo_idx = i;
1356d94809aSRoberto Sassu 	}
1366d94809aSRoberto Sassu 
1372592677cSRoberto Sassu 	if (ima_sha1_idx < 0) {
1386d94809aSRoberto Sassu 		ima_sha1_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++;
1392592677cSRoberto Sassu 		if (ima_hash_algo == HASH_ALGO_SHA1)
1402592677cSRoberto Sassu 			ima_hash_algo_idx = ima_sha1_idx;
1412592677cSRoberto Sassu 	}
1422592677cSRoberto Sassu 
1432592677cSRoberto Sassu 	if (ima_hash_algo_idx < 0)
1442592677cSRoberto Sassu 		ima_hash_algo_idx = NR_BANKS(ima_tpm_chip) + ima_extra_slots++;
1456d94809aSRoberto Sassu 
1466d94809aSRoberto Sassu 	ima_algo_array = kcalloc(NR_BANKS(ima_tpm_chip) + ima_extra_slots,
1476d94809aSRoberto Sassu 				 sizeof(*ima_algo_array), GFP_KERNEL);
1486d94809aSRoberto Sassu 	if (!ima_algo_array) {
1496d94809aSRoberto Sassu 		rc = -ENOMEM;
1506d94809aSRoberto Sassu 		goto out;
1516d94809aSRoberto Sassu 	}
1526d94809aSRoberto Sassu 
1536d94809aSRoberto Sassu 	for (i = 0; i < NR_BANKS(ima_tpm_chip); i++) {
1546d94809aSRoberto Sassu 		algo = ima_tpm_chip->allocated_banks[i].crypto_id;
1556d94809aSRoberto Sassu 		ima_algo_array[i].algo = algo;
1566d94809aSRoberto Sassu 
1576d94809aSRoberto Sassu 		/* unknown TPM algorithm */
1586d94809aSRoberto Sassu 		if (algo == HASH_ALGO__LAST)
1596d94809aSRoberto Sassu 			continue;
1606d94809aSRoberto Sassu 
1616d94809aSRoberto Sassu 		if (algo == ima_hash_algo) {
1626d94809aSRoberto Sassu 			ima_algo_array[i].tfm = ima_shash_tfm;
1636d94809aSRoberto Sassu 			continue;
1646d94809aSRoberto Sassu 		}
1656d94809aSRoberto Sassu 
1666d94809aSRoberto Sassu 		ima_algo_array[i].tfm = ima_alloc_tfm(algo);
1676d94809aSRoberto Sassu 		if (IS_ERR(ima_algo_array[i].tfm)) {
1686d94809aSRoberto Sassu 			if (algo == HASH_ALGO_SHA1) {
1696d94809aSRoberto Sassu 				rc = PTR_ERR(ima_algo_array[i].tfm);
1706d94809aSRoberto Sassu 				ima_algo_array[i].tfm = NULL;
1716d94809aSRoberto Sassu 				goto out_array;
1726d94809aSRoberto Sassu 			}
1736d94809aSRoberto Sassu 
1746d94809aSRoberto Sassu 			ima_algo_array[i].tfm = NULL;
1756d94809aSRoberto Sassu 		}
1766d94809aSRoberto Sassu 	}
1776d94809aSRoberto Sassu 
1786d94809aSRoberto Sassu 	if (ima_sha1_idx >= NR_BANKS(ima_tpm_chip)) {
1796d94809aSRoberto Sassu 		if (ima_hash_algo == HASH_ALGO_SHA1) {
1806d94809aSRoberto Sassu 			ima_algo_array[ima_sha1_idx].tfm = ima_shash_tfm;
1816d94809aSRoberto Sassu 		} else {
1826d94809aSRoberto Sassu 			ima_algo_array[ima_sha1_idx].tfm =
1836d94809aSRoberto Sassu 						ima_alloc_tfm(HASH_ALGO_SHA1);
1846d94809aSRoberto Sassu 			if (IS_ERR(ima_algo_array[ima_sha1_idx].tfm)) {
1856d94809aSRoberto Sassu 				rc = PTR_ERR(ima_algo_array[ima_sha1_idx].tfm);
1866d94809aSRoberto Sassu 				goto out_array;
1876d94809aSRoberto Sassu 			}
1886d94809aSRoberto Sassu 		}
1896d94809aSRoberto Sassu 
1906d94809aSRoberto Sassu 		ima_algo_array[ima_sha1_idx].algo = HASH_ALGO_SHA1;
1916d94809aSRoberto Sassu 	}
1926d94809aSRoberto Sassu 
1932592677cSRoberto Sassu 	if (ima_hash_algo_idx >= NR_BANKS(ima_tpm_chip) &&
1942592677cSRoberto Sassu 	    ima_hash_algo_idx != ima_sha1_idx) {
1952592677cSRoberto Sassu 		ima_algo_array[ima_hash_algo_idx].tfm = ima_shash_tfm;
1962592677cSRoberto Sassu 		ima_algo_array[ima_hash_algo_idx].algo = ima_hash_algo;
1972592677cSRoberto Sassu 	}
1982592677cSRoberto Sassu 
1996d94809aSRoberto Sassu 	return 0;
2006d94809aSRoberto Sassu out_array:
2016d94809aSRoberto Sassu 	for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) {
2026d94809aSRoberto Sassu 		if (!ima_algo_array[i].tfm ||
2036d94809aSRoberto Sassu 		    ima_algo_array[i].tfm == ima_shash_tfm)
2046d94809aSRoberto Sassu 			continue;
2056d94809aSRoberto Sassu 
2066d94809aSRoberto Sassu 		crypto_free_shash(ima_algo_array[i].tfm);
2076d94809aSRoberto Sassu 	}
2086d94809aSRoberto Sassu out:
2096d94809aSRoberto Sassu 	crypto_free_shash(ima_shash_tfm);
2106d94809aSRoberto Sassu 	return rc;
2116d94809aSRoberto Sassu }
2126d94809aSRoberto Sassu 
213723326b9SDmitry Kasatkin static void ima_free_tfm(struct crypto_shash *tfm)
214723326b9SDmitry Kasatkin {
2156d94809aSRoberto Sassu 	int i;
2166d94809aSRoberto Sassu 
2176d94809aSRoberto Sassu 	if (tfm == ima_shash_tfm)
2186d94809aSRoberto Sassu 		return;
2196d94809aSRoberto Sassu 
2206d94809aSRoberto Sassu 	for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++)
2216d94809aSRoberto Sassu 		if (ima_algo_array[i].tfm == tfm)
2226d94809aSRoberto Sassu 			return;
2236d94809aSRoberto Sassu 
224723326b9SDmitry Kasatkin 	crypto_free_shash(tfm);
225723326b9SDmitry Kasatkin }
226723326b9SDmitry Kasatkin 
2276edf7a89SDmitry Kasatkin /**
2286edf7a89SDmitry Kasatkin  * ima_alloc_pages() - Allocate contiguous pages.
2296edf7a89SDmitry Kasatkin  * @max_size:       Maximum amount of memory to allocate.
2306edf7a89SDmitry Kasatkin  * @allocated_size: Returned size of actual allocation.
2316edf7a89SDmitry Kasatkin  * @last_warn:      Should the min_size allocation warn or not.
2326edf7a89SDmitry Kasatkin  *
2336edf7a89SDmitry Kasatkin  * Tries to do opportunistic allocation for memory first trying to allocate
2346edf7a89SDmitry Kasatkin  * max_size amount of memory and then splitting that until zero order is
2356edf7a89SDmitry Kasatkin  * reached. Allocation is tried without generating allocation warnings unless
2366edf7a89SDmitry Kasatkin  * last_warn is set. Last_warn set affects only last allocation of zero order.
2376edf7a89SDmitry Kasatkin  *
2386edf7a89SDmitry Kasatkin  * By default, ima_maxorder is 0 and it is equivalent to kmalloc(GFP_KERNEL)
2396edf7a89SDmitry Kasatkin  *
2406edf7a89SDmitry Kasatkin  * Return pointer to allocated memory, or NULL on failure.
2416edf7a89SDmitry Kasatkin  */
2426edf7a89SDmitry Kasatkin static void *ima_alloc_pages(loff_t max_size, size_t *allocated_size,
2436edf7a89SDmitry Kasatkin 			     int last_warn)
2446edf7a89SDmitry Kasatkin {
2456edf7a89SDmitry Kasatkin 	void *ptr;
2466edf7a89SDmitry Kasatkin 	int order = ima_maxorder;
24771baba4bSMel Gorman 	gfp_t gfp_mask = __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY;
2486edf7a89SDmitry Kasatkin 
2496edf7a89SDmitry Kasatkin 	if (order)
2506edf7a89SDmitry Kasatkin 		order = min(get_order(max_size), order);
2516edf7a89SDmitry Kasatkin 
2526edf7a89SDmitry Kasatkin 	for (; order; order--) {
2536edf7a89SDmitry Kasatkin 		ptr = (void *)__get_free_pages(gfp_mask, order);
2546edf7a89SDmitry Kasatkin 		if (ptr) {
2556edf7a89SDmitry Kasatkin 			*allocated_size = PAGE_SIZE << order;
2566edf7a89SDmitry Kasatkin 			return ptr;
2576edf7a89SDmitry Kasatkin 		}
2586edf7a89SDmitry Kasatkin 	}
2596edf7a89SDmitry Kasatkin 
2606edf7a89SDmitry Kasatkin 	/* order is zero - one page */
2616edf7a89SDmitry Kasatkin 
2626edf7a89SDmitry Kasatkin 	gfp_mask = GFP_KERNEL;
2636edf7a89SDmitry Kasatkin 
2646edf7a89SDmitry Kasatkin 	if (!last_warn)
2656edf7a89SDmitry Kasatkin 		gfp_mask |= __GFP_NOWARN;
2666edf7a89SDmitry Kasatkin 
2676edf7a89SDmitry Kasatkin 	ptr = (void *)__get_free_pages(gfp_mask, 0);
2686edf7a89SDmitry Kasatkin 	if (ptr) {
2696edf7a89SDmitry Kasatkin 		*allocated_size = PAGE_SIZE;
2706edf7a89SDmitry Kasatkin 		return ptr;
2716edf7a89SDmitry Kasatkin 	}
2726edf7a89SDmitry Kasatkin 
2736edf7a89SDmitry Kasatkin 	*allocated_size = 0;
2746edf7a89SDmitry Kasatkin 	return NULL;
2756edf7a89SDmitry Kasatkin }
2766edf7a89SDmitry Kasatkin 
2776edf7a89SDmitry Kasatkin /**
2786edf7a89SDmitry Kasatkin  * ima_free_pages() - Free pages allocated by ima_alloc_pages().
2796edf7a89SDmitry Kasatkin  * @ptr:  Pointer to allocated pages.
2806edf7a89SDmitry Kasatkin  * @size: Size of allocated buffer.
2816edf7a89SDmitry Kasatkin  */
2826edf7a89SDmitry Kasatkin static void ima_free_pages(void *ptr, size_t size)
2836edf7a89SDmitry Kasatkin {
2846edf7a89SDmitry Kasatkin 	if (!ptr)
2856edf7a89SDmitry Kasatkin 		return;
2866edf7a89SDmitry Kasatkin 	free_pages((unsigned long)ptr, get_order(size));
2876edf7a89SDmitry Kasatkin }
2886edf7a89SDmitry Kasatkin 
2893bcced39SDmitry Kasatkin static struct crypto_ahash *ima_alloc_atfm(enum hash_algo algo)
2903bcced39SDmitry Kasatkin {
2913bcced39SDmitry Kasatkin 	struct crypto_ahash *tfm = ima_ahash_tfm;
2923bcced39SDmitry Kasatkin 	int rc;
2933bcced39SDmitry Kasatkin 
2949a8d289fSMimi Zohar 	if (algo < 0 || algo >= HASH_ALGO__LAST)
2959a8d289fSMimi Zohar 		algo = ima_hash_algo;
2969a8d289fSMimi Zohar 
2979a8d289fSMimi Zohar 	if (algo != ima_hash_algo || !tfm) {
2983bcced39SDmitry Kasatkin 		tfm = crypto_alloc_ahash(hash_algo_name[algo], 0, 0);
2993bcced39SDmitry Kasatkin 		if (!IS_ERR(tfm)) {
3003bcced39SDmitry Kasatkin 			if (algo == ima_hash_algo)
3013bcced39SDmitry Kasatkin 				ima_ahash_tfm = tfm;
3023bcced39SDmitry Kasatkin 		} else {
3033bcced39SDmitry Kasatkin 			rc = PTR_ERR(tfm);
3043bcced39SDmitry Kasatkin 			pr_err("Can not allocate %s (reason: %d)\n",
3053bcced39SDmitry Kasatkin 			       hash_algo_name[algo], rc);
3063bcced39SDmitry Kasatkin 		}
3073bcced39SDmitry Kasatkin 	}
3083bcced39SDmitry Kasatkin 	return tfm;
3093bcced39SDmitry Kasatkin }
3103bcced39SDmitry Kasatkin 
3113bcced39SDmitry Kasatkin static void ima_free_atfm(struct crypto_ahash *tfm)
3123bcced39SDmitry Kasatkin {
3133bcced39SDmitry Kasatkin 	if (tfm != ima_ahash_tfm)
3143bcced39SDmitry Kasatkin 		crypto_free_ahash(tfm);
3153bcced39SDmitry Kasatkin }
3163bcced39SDmitry Kasatkin 
31746f1414cSGilad Ben-Yossef static inline int ahash_wait(int err, struct crypto_wait *wait)
3183bcced39SDmitry Kasatkin {
3193bcced39SDmitry Kasatkin 
32046f1414cSGilad Ben-Yossef 	err = crypto_wait_req(err, wait);
3213bcced39SDmitry Kasatkin 
32246f1414cSGilad Ben-Yossef 	if (err)
3233bcced39SDmitry Kasatkin 		pr_crit_ratelimited("ahash calculation failed: err: %d\n", err);
3243bcced39SDmitry Kasatkin 
3253bcced39SDmitry Kasatkin 	return err;
3263bcced39SDmitry Kasatkin }
3273bcced39SDmitry Kasatkin 
3283bcced39SDmitry Kasatkin static int ima_calc_file_hash_atfm(struct file *file,
3293bcced39SDmitry Kasatkin 				   struct ima_digest_data *hash,
3303bcced39SDmitry Kasatkin 				   struct crypto_ahash *tfm)
3313bcced39SDmitry Kasatkin {
3323bcced39SDmitry Kasatkin 	loff_t i_size, offset;
33332c2e675SDmitry Kasatkin 	char *rbuf[2] = { NULL, };
334a408e4a8SGoldwyn Rodrigues 	int rc, rbuf_len, active = 0, ahash_rc = 0;
3353bcced39SDmitry Kasatkin 	struct ahash_request *req;
3363bcced39SDmitry Kasatkin 	struct scatterlist sg[1];
33746f1414cSGilad Ben-Yossef 	struct crypto_wait wait;
33832c2e675SDmitry Kasatkin 	size_t rbuf_size[2];
3393bcced39SDmitry Kasatkin 
3403bcced39SDmitry Kasatkin 	hash->length = crypto_ahash_digestsize(tfm);
3413bcced39SDmitry Kasatkin 
3423bcced39SDmitry Kasatkin 	req = ahash_request_alloc(tfm, GFP_KERNEL);
3433bcced39SDmitry Kasatkin 	if (!req)
3443bcced39SDmitry Kasatkin 		return -ENOMEM;
3453bcced39SDmitry Kasatkin 
34646f1414cSGilad Ben-Yossef 	crypto_init_wait(&wait);
3473bcced39SDmitry Kasatkin 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
3483bcced39SDmitry Kasatkin 				   CRYPTO_TFM_REQ_MAY_SLEEP,
34946f1414cSGilad Ben-Yossef 				   crypto_req_done, &wait);
3503bcced39SDmitry Kasatkin 
35146f1414cSGilad Ben-Yossef 	rc = ahash_wait(crypto_ahash_init(req), &wait);
3523bcced39SDmitry Kasatkin 	if (rc)
3533bcced39SDmitry Kasatkin 		goto out1;
3543bcced39SDmitry Kasatkin 
3553bcced39SDmitry Kasatkin 	i_size = i_size_read(file_inode(file));
3563bcced39SDmitry Kasatkin 
3573bcced39SDmitry Kasatkin 	if (i_size == 0)
3583bcced39SDmitry Kasatkin 		goto out2;
3593bcced39SDmitry Kasatkin 
3606edf7a89SDmitry Kasatkin 	/*
3616edf7a89SDmitry Kasatkin 	 * Try to allocate maximum size of memory.
3626edf7a89SDmitry Kasatkin 	 * Fail if even a single page cannot be allocated.
3636edf7a89SDmitry Kasatkin 	 */
36432c2e675SDmitry Kasatkin 	rbuf[0] = ima_alloc_pages(i_size, &rbuf_size[0], 1);
36532c2e675SDmitry Kasatkin 	if (!rbuf[0]) {
3663bcced39SDmitry Kasatkin 		rc = -ENOMEM;
3673bcced39SDmitry Kasatkin 		goto out1;
3683bcced39SDmitry Kasatkin 	}
3693bcced39SDmitry Kasatkin 
37032c2e675SDmitry Kasatkin 	/* Only allocate one buffer if that is enough. */
37132c2e675SDmitry Kasatkin 	if (i_size > rbuf_size[0]) {
37232c2e675SDmitry Kasatkin 		/*
37332c2e675SDmitry Kasatkin 		 * Try to allocate secondary buffer. If that fails fallback to
37432c2e675SDmitry Kasatkin 		 * using single buffering. Use previous memory allocation size
37532c2e675SDmitry Kasatkin 		 * as baseline for possible allocation size.
37632c2e675SDmitry Kasatkin 		 */
37732c2e675SDmitry Kasatkin 		rbuf[1] = ima_alloc_pages(i_size - rbuf_size[0],
37832c2e675SDmitry Kasatkin 					  &rbuf_size[1], 0);
37932c2e675SDmitry Kasatkin 	}
38032c2e675SDmitry Kasatkin 
3813bcced39SDmitry Kasatkin 	for (offset = 0; offset < i_size; offset += rbuf_len) {
38232c2e675SDmitry Kasatkin 		if (!rbuf[1] && offset) {
38332c2e675SDmitry Kasatkin 			/* Not using two buffers, and it is not the first
38432c2e675SDmitry Kasatkin 			 * read/request, wait for the completion of the
38532c2e675SDmitry Kasatkin 			 * previous ahash_update() request.
38632c2e675SDmitry Kasatkin 			 */
38746f1414cSGilad Ben-Yossef 			rc = ahash_wait(ahash_rc, &wait);
38832c2e675SDmitry Kasatkin 			if (rc)
38932c2e675SDmitry Kasatkin 				goto out3;
3903bcced39SDmitry Kasatkin 		}
39132c2e675SDmitry Kasatkin 		/* read buffer */
39232c2e675SDmitry Kasatkin 		rbuf_len = min_t(loff_t, i_size - offset, rbuf_size[active]);
393e3c4abbfSDmitry Kasatkin 		rc = integrity_kernel_read(file, offset, rbuf[active],
394e3c4abbfSDmitry Kasatkin 					   rbuf_len);
395f5e10401SSascha Hauer 		if (rc != rbuf_len) {
396f5e10401SSascha Hauer 			if (rc >= 0)
397f5e10401SSascha Hauer 				rc = -EINVAL;
3984ece3125SSascha Hauer 			/*
3994ece3125SSascha Hauer 			 * Forward current rc, do not overwrite with return value
4004ece3125SSascha Hauer 			 * from ahash_wait()
4014ece3125SSascha Hauer 			 */
4024ece3125SSascha Hauer 			ahash_wait(ahash_rc, &wait);
40332c2e675SDmitry Kasatkin 			goto out3;
404f5e10401SSascha Hauer 		}
4053bcced39SDmitry Kasatkin 
40632c2e675SDmitry Kasatkin 		if (rbuf[1] && offset) {
40732c2e675SDmitry Kasatkin 			/* Using two buffers, and it is not the first
40832c2e675SDmitry Kasatkin 			 * read/request, wait for the completion of the
40932c2e675SDmitry Kasatkin 			 * previous ahash_update() request.
41032c2e675SDmitry Kasatkin 			 */
41146f1414cSGilad Ben-Yossef 			rc = ahash_wait(ahash_rc, &wait);
41232c2e675SDmitry Kasatkin 			if (rc)
41332c2e675SDmitry Kasatkin 				goto out3;
41432c2e675SDmitry Kasatkin 		}
41532c2e675SDmitry Kasatkin 
41632c2e675SDmitry Kasatkin 		sg_init_one(&sg[0], rbuf[active], rbuf_len);
4173bcced39SDmitry Kasatkin 		ahash_request_set_crypt(req, sg, NULL, rbuf_len);
4183bcced39SDmitry Kasatkin 
41932c2e675SDmitry Kasatkin 		ahash_rc = crypto_ahash_update(req);
42032c2e675SDmitry Kasatkin 
42132c2e675SDmitry Kasatkin 		if (rbuf[1])
42232c2e675SDmitry Kasatkin 			active = !active; /* swap buffers, if we use two */
4233bcced39SDmitry Kasatkin 	}
42432c2e675SDmitry Kasatkin 	/* wait for the last update request to complete */
42546f1414cSGilad Ben-Yossef 	rc = ahash_wait(ahash_rc, &wait);
42632c2e675SDmitry Kasatkin out3:
42732c2e675SDmitry Kasatkin 	ima_free_pages(rbuf[0], rbuf_size[0]);
42832c2e675SDmitry Kasatkin 	ima_free_pages(rbuf[1], rbuf_size[1]);
4293bcced39SDmitry Kasatkin out2:
4303bcced39SDmitry Kasatkin 	if (!rc) {
4313bcced39SDmitry Kasatkin 		ahash_request_set_crypt(req, NULL, hash->digest, 0);
43246f1414cSGilad Ben-Yossef 		rc = ahash_wait(crypto_ahash_final(req), &wait);
4333bcced39SDmitry Kasatkin 	}
4343bcced39SDmitry Kasatkin out1:
4353bcced39SDmitry Kasatkin 	ahash_request_free(req);
4363bcced39SDmitry Kasatkin 	return rc;
4373bcced39SDmitry Kasatkin }
4383bcced39SDmitry Kasatkin 
4393bcced39SDmitry Kasatkin static int ima_calc_file_ahash(struct file *file, struct ima_digest_data *hash)
4403bcced39SDmitry Kasatkin {
4413bcced39SDmitry Kasatkin 	struct crypto_ahash *tfm;
4423bcced39SDmitry Kasatkin 	int rc;
4433bcced39SDmitry Kasatkin 
4443bcced39SDmitry Kasatkin 	tfm = ima_alloc_atfm(hash->algo);
4453bcced39SDmitry Kasatkin 	if (IS_ERR(tfm))
4463bcced39SDmitry Kasatkin 		return PTR_ERR(tfm);
4473bcced39SDmitry Kasatkin 
4483bcced39SDmitry Kasatkin 	rc = ima_calc_file_hash_atfm(file, hash, tfm);
4493bcced39SDmitry Kasatkin 
4503bcced39SDmitry Kasatkin 	ima_free_atfm(tfm);
4513bcced39SDmitry Kasatkin 
4523bcced39SDmitry Kasatkin 	return rc;
4533bcced39SDmitry Kasatkin }
4543bcced39SDmitry Kasatkin 
455c7c8bb23SDmitry Kasatkin static int ima_calc_file_hash_tfm(struct file *file,
456c7c8bb23SDmitry Kasatkin 				  struct ima_digest_data *hash,
457c7c8bb23SDmitry Kasatkin 				  struct crypto_shash *tfm)
4583323eec9SMimi Zohar {
45916bfa38bSMimi Zohar 	loff_t i_size, offset = 0;
4603323eec9SMimi Zohar 	char *rbuf;
461a408e4a8SGoldwyn Rodrigues 	int rc;
462357aabedSBehan Webster 	SHASH_DESC_ON_STACK(shash, tfm);
4633323eec9SMimi Zohar 
464357aabedSBehan Webster 	shash->tfm = tfm;
46576bb28f6SDmitry Kasatkin 
466723326b9SDmitry Kasatkin 	hash->length = crypto_shash_digestsize(tfm);
467723326b9SDmitry Kasatkin 
468357aabedSBehan Webster 	rc = crypto_shash_init(shash);
4693323eec9SMimi Zohar 	if (rc != 0)
4703323eec9SMimi Zohar 		return rc;
4713323eec9SMimi Zohar 
4721d91ac62SDmitry Kasatkin 	i_size = i_size_read(file_inode(file));
4731d91ac62SDmitry Kasatkin 
4741d91ac62SDmitry Kasatkin 	if (i_size == 0)
4753323eec9SMimi Zohar 		goto out;
4761d91ac62SDmitry Kasatkin 
4771d91ac62SDmitry Kasatkin 	rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
4781d91ac62SDmitry Kasatkin 	if (!rbuf)
4791d91ac62SDmitry Kasatkin 		return -ENOMEM;
4801d91ac62SDmitry Kasatkin 
4813323eec9SMimi Zohar 	while (offset < i_size) {
4823323eec9SMimi Zohar 		int rbuf_len;
4833323eec9SMimi Zohar 
484e3c4abbfSDmitry Kasatkin 		rbuf_len = integrity_kernel_read(file, offset, rbuf, PAGE_SIZE);
4853323eec9SMimi Zohar 		if (rbuf_len < 0) {
4863323eec9SMimi Zohar 			rc = rbuf_len;
4873323eec9SMimi Zohar 			break;
4883323eec9SMimi Zohar 		}
48996c9e1deSPatrick Callaghan 		if (rbuf_len == 0) {	/* unexpected EOF */
49096c9e1deSPatrick Callaghan 			rc = -EINVAL;
49116bfa38bSMimi Zohar 			break;
49296c9e1deSPatrick Callaghan 		}
4933323eec9SMimi Zohar 		offset += rbuf_len;
4943323eec9SMimi Zohar 
495357aabedSBehan Webster 		rc = crypto_shash_update(shash, rbuf, rbuf_len);
4963323eec9SMimi Zohar 		if (rc)
4973323eec9SMimi Zohar 			break;
4983323eec9SMimi Zohar 	}
4991d91ac62SDmitry Kasatkin 	kfree(rbuf);
5003323eec9SMimi Zohar out:
5011d91ac62SDmitry Kasatkin 	if (!rc)
502357aabedSBehan Webster 		rc = crypto_shash_final(shash, hash->digest);
5033323eec9SMimi Zohar 	return rc;
5043323eec9SMimi Zohar }
5053323eec9SMimi Zohar 
5063bcced39SDmitry Kasatkin static int ima_calc_file_shash(struct file *file, struct ima_digest_data *hash)
507c7c8bb23SDmitry Kasatkin {
508723326b9SDmitry Kasatkin 	struct crypto_shash *tfm;
509c7c8bb23SDmitry Kasatkin 	int rc;
510c7c8bb23SDmitry Kasatkin 
511723326b9SDmitry Kasatkin 	tfm = ima_alloc_tfm(hash->algo);
512723326b9SDmitry Kasatkin 	if (IS_ERR(tfm))
513723326b9SDmitry Kasatkin 		return PTR_ERR(tfm);
514c7c8bb23SDmitry Kasatkin 
515c7c8bb23SDmitry Kasatkin 	rc = ima_calc_file_hash_tfm(file, hash, tfm);
516c7c8bb23SDmitry Kasatkin 
517723326b9SDmitry Kasatkin 	ima_free_tfm(tfm);
518c7c8bb23SDmitry Kasatkin 
519c7c8bb23SDmitry Kasatkin 	return rc;
520c7c8bb23SDmitry Kasatkin }
521c7c8bb23SDmitry Kasatkin 
5223323eec9SMimi Zohar /*
5233bcced39SDmitry Kasatkin  * ima_calc_file_hash - calculate file hash
5243bcced39SDmitry Kasatkin  *
5253bcced39SDmitry Kasatkin  * Asynchronous hash (ahash) allows using HW acceleration for calculating
5263bcced39SDmitry Kasatkin  * a hash. ahash performance varies for different data sizes on different
5273bcced39SDmitry Kasatkin  * crypto accelerators. shash performance might be better for smaller files.
5283bcced39SDmitry Kasatkin  * The 'ima.ahash_minsize' module parameter allows specifying the best
5293bcced39SDmitry Kasatkin  * minimum file size for using ahash on the system.
5303bcced39SDmitry Kasatkin  *
5313bcced39SDmitry Kasatkin  * If the ima.ahash_minsize parameter is not specified, this function uses
5323bcced39SDmitry Kasatkin  * shash for the hash calculation.  If ahash fails, it falls back to using
5333bcced39SDmitry Kasatkin  * shash.
5343bcced39SDmitry Kasatkin  */
5353bcced39SDmitry Kasatkin int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
5363bcced39SDmitry Kasatkin {
5373bcced39SDmitry Kasatkin 	loff_t i_size;
5383bcced39SDmitry Kasatkin 	int rc;
539a408e4a8SGoldwyn Rodrigues 	struct file *f = file;
540207cdd56SRoberto Sassu 	bool new_file_instance = false;
5413bcced39SDmitry Kasatkin 
542f3cc6b25SMimi Zohar 	/*
543f3cc6b25SMimi Zohar 	 * For consistency, fail file's opened with the O_DIRECT flag on
544f3cc6b25SMimi Zohar 	 * filesystems mounted with/without DAX option.
545f3cc6b25SMimi Zohar 	 */
546f3cc6b25SMimi Zohar 	if (file->f_flags & O_DIRECT) {
547f3cc6b25SMimi Zohar 		hash->length = hash_digest_size[ima_hash_algo];
548f3cc6b25SMimi Zohar 		hash->algo = ima_hash_algo;
549f3cc6b25SMimi Zohar 		return -EINVAL;
550f3cc6b25SMimi Zohar 	}
551f3cc6b25SMimi Zohar 
552a408e4a8SGoldwyn Rodrigues 	/* Open a new file instance in O_RDONLY if we cannot read */
553a408e4a8SGoldwyn Rodrigues 	if (!(file->f_mode & FMODE_READ)) {
554a408e4a8SGoldwyn Rodrigues 		int flags = file->f_flags & ~(O_WRONLY | O_APPEND |
555a408e4a8SGoldwyn Rodrigues 				O_TRUNC | O_CREAT | O_NOCTTY | O_EXCL);
556a408e4a8SGoldwyn Rodrigues 		flags |= O_RDONLY;
557a408e4a8SGoldwyn Rodrigues 		f = dentry_open(&file->f_path, flags, file->f_cred);
558207cdd56SRoberto Sassu 		if (IS_ERR(f))
559207cdd56SRoberto Sassu 			return PTR_ERR(f);
560207cdd56SRoberto Sassu 
561a408e4a8SGoldwyn Rodrigues 		new_file_instance = true;
562a408e4a8SGoldwyn Rodrigues 	}
5633bcced39SDmitry Kasatkin 
564a408e4a8SGoldwyn Rodrigues 	i_size = i_size_read(file_inode(f));
565a408e4a8SGoldwyn Rodrigues 
566a408e4a8SGoldwyn Rodrigues 	if (ima_ahash_minsize && i_size >= ima_ahash_minsize) {
567a408e4a8SGoldwyn Rodrigues 		rc = ima_calc_file_ahash(f, hash);
568a408e4a8SGoldwyn Rodrigues 		if (!rc)
569a408e4a8SGoldwyn Rodrigues 			goto out;
570a408e4a8SGoldwyn Rodrigues 	}
571a408e4a8SGoldwyn Rodrigues 
572a408e4a8SGoldwyn Rodrigues 	rc = ima_calc_file_shash(f, hash);
573a408e4a8SGoldwyn Rodrigues out:
574a408e4a8SGoldwyn Rodrigues 	if (new_file_instance)
575a408e4a8SGoldwyn Rodrigues 		fput(f);
576a408e4a8SGoldwyn Rodrigues 	return rc;
5773bcced39SDmitry Kasatkin }
5783bcced39SDmitry Kasatkin 
5793bcced39SDmitry Kasatkin /*
580a71dc65dSRoberto Sassu  * Calculate the hash of template data
5813323eec9SMimi Zohar  */
582a71dc65dSRoberto Sassu static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
5837ca79645SRoberto Sassu 					 struct ima_template_entry *entry,
5846d94809aSRoberto Sassu 					 int tfm_idx)
5853323eec9SMimi Zohar {
5866d94809aSRoberto Sassu 	SHASH_DESC_ON_STACK(shash, ima_algo_array[tfm_idx].tfm);
5877ca79645SRoberto Sassu 	struct ima_template_desc *td = entry->template_desc;
5887ca79645SRoberto Sassu 	int num_fields = entry->template_desc->num_fields;
589a71dc65dSRoberto Sassu 	int rc, i;
5903323eec9SMimi Zohar 
5916d94809aSRoberto Sassu 	shash->tfm = ima_algo_array[tfm_idx].tfm;
592c7c8bb23SDmitry Kasatkin 
593357aabedSBehan Webster 	rc = crypto_shash_init(shash);
594a71dc65dSRoberto Sassu 	if (rc != 0)
595a71dc65dSRoberto Sassu 		return rc;
596a71dc65dSRoberto Sassu 
597a71dc65dSRoberto Sassu 	for (i = 0; i < num_fields; i++) {
598e3b64c26SRoberto Sassu 		u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
599e3b64c26SRoberto Sassu 		u8 *data_to_hash = field_data[i].data;
600e3b64c26SRoberto Sassu 		u32 datalen = field_data[i].len;
601*6b26285fSRoberto Sassu 		u32 datalen_to_hash = !ima_canonical_fmt ?
602*6b26285fSRoberto Sassu 				datalen : (__force u32)cpu_to_le32(datalen);
603e3b64c26SRoberto Sassu 
604b6f8f16fSRoberto Sassu 		if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
605357aabedSBehan Webster 			rc = crypto_shash_update(shash,
60698e1d55dSAndreas Steffen 						(const u8 *) &datalen_to_hash,
60798e1d55dSAndreas Steffen 						sizeof(datalen_to_hash));
608b6f8f16fSRoberto Sassu 			if (rc)
609b6f8f16fSRoberto Sassu 				break;
610e3b64c26SRoberto Sassu 		} else if (strcmp(td->fields[i]->field_id, "n") == 0) {
611e3b64c26SRoberto Sassu 			memcpy(buffer, data_to_hash, datalen);
612e3b64c26SRoberto Sassu 			data_to_hash = buffer;
613e3b64c26SRoberto Sassu 			datalen = IMA_EVENT_NAME_LEN_MAX + 1;
614b6f8f16fSRoberto Sassu 		}
615357aabedSBehan Webster 		rc = crypto_shash_update(shash, data_to_hash, datalen);
616a71dc65dSRoberto Sassu 		if (rc)
617a71dc65dSRoberto Sassu 			break;
6183323eec9SMimi Zohar 	}
6193323eec9SMimi Zohar 
620a71dc65dSRoberto Sassu 	if (!rc)
6216d94809aSRoberto Sassu 		rc = crypto_shash_final(shash, entry->digests[tfm_idx].digest);
622a71dc65dSRoberto Sassu 
623a71dc65dSRoberto Sassu 	return rc;
624a71dc65dSRoberto Sassu }
625a71dc65dSRoberto Sassu 
626b6f8f16fSRoberto Sassu int ima_calc_field_array_hash(struct ima_field_data *field_data,
6277ca79645SRoberto Sassu 			      struct ima_template_entry *entry)
628ea593993SDmitry Kasatkin {
6291ea973dfSRoberto Sassu 	u16 alg_id;
6301ea973dfSRoberto Sassu 	int rc, i;
631ea593993SDmitry Kasatkin 
6326d94809aSRoberto Sassu 	rc = ima_calc_field_array_hash_tfm(field_data, entry, ima_sha1_idx);
6331ea973dfSRoberto Sassu 	if (rc)
6341ea973dfSRoberto Sassu 		return rc;
635ea593993SDmitry Kasatkin 
6361ea973dfSRoberto Sassu 	entry->digests[ima_sha1_idx].alg_id = TPM_ALG_SHA1;
637ea593993SDmitry Kasatkin 
6381ea973dfSRoberto Sassu 	for (i = 0; i < NR_BANKS(ima_tpm_chip) + ima_extra_slots; i++) {
6391ea973dfSRoberto Sassu 		if (i == ima_sha1_idx)
6401ea973dfSRoberto Sassu 			continue;
641ea593993SDmitry Kasatkin 
6421ea973dfSRoberto Sassu 		if (i < NR_BANKS(ima_tpm_chip)) {
6431ea973dfSRoberto Sassu 			alg_id = ima_tpm_chip->allocated_banks[i].alg_id;
6441ea973dfSRoberto Sassu 			entry->digests[i].alg_id = alg_id;
6451ea973dfSRoberto Sassu 		}
6461ea973dfSRoberto Sassu 
6471ea973dfSRoberto Sassu 		/* for unmapped TPM algorithms digest is still a padded SHA1 */
6481ea973dfSRoberto Sassu 		if (!ima_algo_array[i].tfm) {
6491ea973dfSRoberto Sassu 			memcpy(entry->digests[i].digest,
6501ea973dfSRoberto Sassu 			       entry->digests[ima_sha1_idx].digest,
6511ea973dfSRoberto Sassu 			       TPM_DIGEST_SIZE);
6521ea973dfSRoberto Sassu 			continue;
6531ea973dfSRoberto Sassu 		}
6541ea973dfSRoberto Sassu 
6551ea973dfSRoberto Sassu 		rc = ima_calc_field_array_hash_tfm(field_data, entry, i);
6561ea973dfSRoberto Sassu 		if (rc)
6571ea973dfSRoberto Sassu 			return rc;
6581ea973dfSRoberto Sassu 	}
659ea593993SDmitry Kasatkin 	return rc;
660ea593993SDmitry Kasatkin }
661ea593993SDmitry Kasatkin 
66298304bcfSMimi Zohar static int calc_buffer_ahash_atfm(const void *buf, loff_t len,
66398304bcfSMimi Zohar 				  struct ima_digest_data *hash,
66498304bcfSMimi Zohar 				  struct crypto_ahash *tfm)
66598304bcfSMimi Zohar {
66698304bcfSMimi Zohar 	struct ahash_request *req;
66798304bcfSMimi Zohar 	struct scatterlist sg;
66846f1414cSGilad Ben-Yossef 	struct crypto_wait wait;
66998304bcfSMimi Zohar 	int rc, ahash_rc = 0;
67098304bcfSMimi Zohar 
67198304bcfSMimi Zohar 	hash->length = crypto_ahash_digestsize(tfm);
67298304bcfSMimi Zohar 
67398304bcfSMimi Zohar 	req = ahash_request_alloc(tfm, GFP_KERNEL);
67498304bcfSMimi Zohar 	if (!req)
67598304bcfSMimi Zohar 		return -ENOMEM;
67698304bcfSMimi Zohar 
67746f1414cSGilad Ben-Yossef 	crypto_init_wait(&wait);
67898304bcfSMimi Zohar 	ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
67998304bcfSMimi Zohar 				   CRYPTO_TFM_REQ_MAY_SLEEP,
68046f1414cSGilad Ben-Yossef 				   crypto_req_done, &wait);
68198304bcfSMimi Zohar 
68246f1414cSGilad Ben-Yossef 	rc = ahash_wait(crypto_ahash_init(req), &wait);
68398304bcfSMimi Zohar 	if (rc)
68498304bcfSMimi Zohar 		goto out;
68598304bcfSMimi Zohar 
68698304bcfSMimi Zohar 	sg_init_one(&sg, buf, len);
68798304bcfSMimi Zohar 	ahash_request_set_crypt(req, &sg, NULL, len);
68898304bcfSMimi Zohar 
68998304bcfSMimi Zohar 	ahash_rc = crypto_ahash_update(req);
69098304bcfSMimi Zohar 
69198304bcfSMimi Zohar 	/* wait for the update request to complete */
69246f1414cSGilad Ben-Yossef 	rc = ahash_wait(ahash_rc, &wait);
69398304bcfSMimi Zohar 	if (!rc) {
69498304bcfSMimi Zohar 		ahash_request_set_crypt(req, NULL, hash->digest, 0);
69546f1414cSGilad Ben-Yossef 		rc = ahash_wait(crypto_ahash_final(req), &wait);
69698304bcfSMimi Zohar 	}
69798304bcfSMimi Zohar out:
69898304bcfSMimi Zohar 	ahash_request_free(req);
69998304bcfSMimi Zohar 	return rc;
70098304bcfSMimi Zohar }
70198304bcfSMimi Zohar 
70298304bcfSMimi Zohar static int calc_buffer_ahash(const void *buf, loff_t len,
70398304bcfSMimi Zohar 			     struct ima_digest_data *hash)
70498304bcfSMimi Zohar {
70598304bcfSMimi Zohar 	struct crypto_ahash *tfm;
70698304bcfSMimi Zohar 	int rc;
70798304bcfSMimi Zohar 
70898304bcfSMimi Zohar 	tfm = ima_alloc_atfm(hash->algo);
70998304bcfSMimi Zohar 	if (IS_ERR(tfm))
71098304bcfSMimi Zohar 		return PTR_ERR(tfm);
71198304bcfSMimi Zohar 
71298304bcfSMimi Zohar 	rc = calc_buffer_ahash_atfm(buf, len, hash, tfm);
71398304bcfSMimi Zohar 
71498304bcfSMimi Zohar 	ima_free_atfm(tfm);
71598304bcfSMimi Zohar 
71698304bcfSMimi Zohar 	return rc;
71798304bcfSMimi Zohar }
71898304bcfSMimi Zohar 
71911d7646dSDmitry Kasatkin static int calc_buffer_shash_tfm(const void *buf, loff_t size,
72011d7646dSDmitry Kasatkin 				struct ima_digest_data *hash,
72111d7646dSDmitry Kasatkin 				struct crypto_shash *tfm)
72211d7646dSDmitry Kasatkin {
72311d7646dSDmitry Kasatkin 	SHASH_DESC_ON_STACK(shash, tfm);
72411d7646dSDmitry Kasatkin 	unsigned int len;
72511d7646dSDmitry Kasatkin 	int rc;
72611d7646dSDmitry Kasatkin 
72711d7646dSDmitry Kasatkin 	shash->tfm = tfm;
72811d7646dSDmitry Kasatkin 
72911d7646dSDmitry Kasatkin 	hash->length = crypto_shash_digestsize(tfm);
73011d7646dSDmitry Kasatkin 
73111d7646dSDmitry Kasatkin 	rc = crypto_shash_init(shash);
73211d7646dSDmitry Kasatkin 	if (rc != 0)
73311d7646dSDmitry Kasatkin 		return rc;
73411d7646dSDmitry Kasatkin 
73511d7646dSDmitry Kasatkin 	while (size) {
73611d7646dSDmitry Kasatkin 		len = size < PAGE_SIZE ? size : PAGE_SIZE;
73711d7646dSDmitry Kasatkin 		rc = crypto_shash_update(shash, buf, len);
73811d7646dSDmitry Kasatkin 		if (rc)
73911d7646dSDmitry Kasatkin 			break;
74011d7646dSDmitry Kasatkin 		buf += len;
74111d7646dSDmitry Kasatkin 		size -= len;
74211d7646dSDmitry Kasatkin 	}
74311d7646dSDmitry Kasatkin 
74411d7646dSDmitry Kasatkin 	if (!rc)
74511d7646dSDmitry Kasatkin 		rc = crypto_shash_final(shash, hash->digest);
74611d7646dSDmitry Kasatkin 	return rc;
74711d7646dSDmitry Kasatkin }
74811d7646dSDmitry Kasatkin 
74998304bcfSMimi Zohar static int calc_buffer_shash(const void *buf, loff_t len,
75011d7646dSDmitry Kasatkin 			     struct ima_digest_data *hash)
75111d7646dSDmitry Kasatkin {
75211d7646dSDmitry Kasatkin 	struct crypto_shash *tfm;
75311d7646dSDmitry Kasatkin 	int rc;
75411d7646dSDmitry Kasatkin 
75511d7646dSDmitry Kasatkin 	tfm = ima_alloc_tfm(hash->algo);
75611d7646dSDmitry Kasatkin 	if (IS_ERR(tfm))
75711d7646dSDmitry Kasatkin 		return PTR_ERR(tfm);
75811d7646dSDmitry Kasatkin 
75911d7646dSDmitry Kasatkin 	rc = calc_buffer_shash_tfm(buf, len, hash, tfm);
76011d7646dSDmitry Kasatkin 
76111d7646dSDmitry Kasatkin 	ima_free_tfm(tfm);
76211d7646dSDmitry Kasatkin 	return rc;
76311d7646dSDmitry Kasatkin }
76411d7646dSDmitry Kasatkin 
76598304bcfSMimi Zohar int ima_calc_buffer_hash(const void *buf, loff_t len,
76698304bcfSMimi Zohar 			 struct ima_digest_data *hash)
76798304bcfSMimi Zohar {
76898304bcfSMimi Zohar 	int rc;
76998304bcfSMimi Zohar 
77098304bcfSMimi Zohar 	if (ima_ahash_minsize && len >= ima_ahash_minsize) {
77198304bcfSMimi Zohar 		rc = calc_buffer_ahash(buf, len, hash);
77298304bcfSMimi Zohar 		if (!rc)
77398304bcfSMimi Zohar 			return 0;
77498304bcfSMimi Zohar 	}
77598304bcfSMimi Zohar 
77698304bcfSMimi Zohar 	return calc_buffer_shash(buf, len, hash);
77798304bcfSMimi Zohar }
77898304bcfSMimi Zohar 
7798b8c704dSRoberto Sassu static void ima_pcrread(u32 idx, struct tpm_digest *d)
7803323eec9SMimi Zohar {
781ec403d8eSStefan Berger 	if (!ima_tpm_chip)
7823323eec9SMimi Zohar 		return;
7833323eec9SMimi Zohar 
784879b5892SRoberto Sassu 	if (tpm_pcr_read(ima_tpm_chip, idx, d) != 0)
78520ee451fSJoe Perches 		pr_err("Error Communicating to TPM chip\n");
7863323eec9SMimi Zohar }
7873323eec9SMimi Zohar 
7883323eec9SMimi Zohar /*
7896f1a1d10SRoberto Sassu  * The boot_aggregate is a cumulative hash over TPM registers 0 - 7.  With
7906f1a1d10SRoberto Sassu  * TPM 1.2 the boot_aggregate was based on reading the SHA1 PCRs, but with
7916f1a1d10SRoberto Sassu  * TPM 2.0 hash agility, TPM chips could support multiple TPM PCR banks,
7926f1a1d10SRoberto Sassu  * allowing firmware to configure and enable different banks.
7936f1a1d10SRoberto Sassu  *
7946f1a1d10SRoberto Sassu  * Knowing which TPM bank is read to calculate the boot_aggregate digest
7956f1a1d10SRoberto Sassu  * needs to be conveyed to a verifier.  For this reason, use the same
7966f1a1d10SRoberto Sassu  * hash algorithm for reading the TPM PCRs as for calculating the boot
7976f1a1d10SRoberto Sassu  * aggregate digest as stored in the measurement list.
7983323eec9SMimi Zohar  */
7996cc7c266SRoberto Sassu static int ima_calc_boot_aggregate_tfm(char *digest, u16 alg_id,
80009ef5435SDmitry Kasatkin 				       struct crypto_shash *tfm)
8013323eec9SMimi Zohar {
8026f1a1d10SRoberto Sassu 	struct tpm_digest d = { .alg_id = alg_id, .digest = {0} };
80395adc6b4STomas Winkler 	int rc;
80495adc6b4STomas Winkler 	u32 i;
805357aabedSBehan Webster 	SHASH_DESC_ON_STACK(shash, tfm);
8063323eec9SMimi Zohar 
807357aabedSBehan Webster 	shash->tfm = tfm;
80876bb28f6SDmitry Kasatkin 
8096f1a1d10SRoberto Sassu 	pr_devel("calculating the boot-aggregate based on TPM bank: %04x\n",
8106f1a1d10SRoberto Sassu 		 d.alg_id);
8116f1a1d10SRoberto Sassu 
812357aabedSBehan Webster 	rc = crypto_shash_init(shash);
8133323eec9SMimi Zohar 	if (rc != 0)
8143323eec9SMimi Zohar 		return rc;
8153323eec9SMimi Zohar 
81620c59ce0SMaurizio Drocco 	/* cumulative digest over TPM registers 0-7 */
8173323eec9SMimi Zohar 	for (i = TPM_PCR0; i < TPM_PCR8; i++) {
818879b5892SRoberto Sassu 		ima_pcrread(i, &d);
8193323eec9SMimi Zohar 		/* now accumulate with current aggregate */
8206f1a1d10SRoberto Sassu 		rc = crypto_shash_update(shash, d.digest,
8216f1a1d10SRoberto Sassu 					 crypto_shash_digestsize(tfm));
82260386b85SRoberto Sassu 		if (rc != 0)
82360386b85SRoberto Sassu 			return rc;
8243323eec9SMimi Zohar 	}
82520c59ce0SMaurizio Drocco 	/*
82620c59ce0SMaurizio Drocco 	 * Extend cumulative digest over TPM registers 8-9, which contain
82720c59ce0SMaurizio Drocco 	 * measurement for the kernel command line (reg. 8) and image (reg. 9)
82820c59ce0SMaurizio Drocco 	 * in a typical PCR allocation. Registers 8-9 are only included in
82920c59ce0SMaurizio Drocco 	 * non-SHA1 boot_aggregate digests to avoid ambiguity.
83020c59ce0SMaurizio Drocco 	 */
83120c59ce0SMaurizio Drocco 	if (alg_id != TPM_ALG_SHA1) {
83220c59ce0SMaurizio Drocco 		for (i = TPM_PCR8; i < TPM_PCR10; i++) {
83320c59ce0SMaurizio Drocco 			ima_pcrread(i, &d);
83420c59ce0SMaurizio Drocco 			rc = crypto_shash_update(shash, d.digest,
83520c59ce0SMaurizio Drocco 						crypto_shash_digestsize(tfm));
83620c59ce0SMaurizio Drocco 		}
83720c59ce0SMaurizio Drocco 	}
8383323eec9SMimi Zohar 	if (!rc)
839357aabedSBehan Webster 		crypto_shash_final(shash, digest);
8403323eec9SMimi Zohar 	return rc;
8413323eec9SMimi Zohar }
84209ef5435SDmitry Kasatkin 
8436cc7c266SRoberto Sassu int ima_calc_boot_aggregate(struct ima_digest_data *hash)
84409ef5435SDmitry Kasatkin {
84509ef5435SDmitry Kasatkin 	struct crypto_shash *tfm;
8466f1a1d10SRoberto Sassu 	u16 crypto_id, alg_id;
8476f1a1d10SRoberto Sassu 	int rc, i, bank_idx = -1;
8486f1a1d10SRoberto Sassu 
8496f1a1d10SRoberto Sassu 	for (i = 0; i < ima_tpm_chip->nr_allocated_banks; i++) {
8506f1a1d10SRoberto Sassu 		crypto_id = ima_tpm_chip->allocated_banks[i].crypto_id;
8516f1a1d10SRoberto Sassu 		if (crypto_id == hash->algo) {
8526f1a1d10SRoberto Sassu 			bank_idx = i;
8536f1a1d10SRoberto Sassu 			break;
8546f1a1d10SRoberto Sassu 		}
8556f1a1d10SRoberto Sassu 
8566f1a1d10SRoberto Sassu 		if (crypto_id == HASH_ALGO_SHA256)
8576f1a1d10SRoberto Sassu 			bank_idx = i;
8586f1a1d10SRoberto Sassu 
8596f1a1d10SRoberto Sassu 		if (bank_idx == -1 && crypto_id == HASH_ALGO_SHA1)
8606f1a1d10SRoberto Sassu 			bank_idx = i;
8616f1a1d10SRoberto Sassu 	}
8626f1a1d10SRoberto Sassu 
8636f1a1d10SRoberto Sassu 	if (bank_idx == -1) {
8646f1a1d10SRoberto Sassu 		pr_err("No suitable TPM algorithm for boot aggregate\n");
8656f1a1d10SRoberto Sassu 		return 0;
8666f1a1d10SRoberto Sassu 	}
8676f1a1d10SRoberto Sassu 
8686f1a1d10SRoberto Sassu 	hash->algo = ima_tpm_chip->allocated_banks[bank_idx].crypto_id;
86909ef5435SDmitry Kasatkin 
87009ef5435SDmitry Kasatkin 	tfm = ima_alloc_tfm(hash->algo);
87109ef5435SDmitry Kasatkin 	if (IS_ERR(tfm))
87209ef5435SDmitry Kasatkin 		return PTR_ERR(tfm);
87309ef5435SDmitry Kasatkin 
87409ef5435SDmitry Kasatkin 	hash->length = crypto_shash_digestsize(tfm);
8756f1a1d10SRoberto Sassu 	alg_id = ima_tpm_chip->allocated_banks[bank_idx].alg_id;
8766f1a1d10SRoberto Sassu 	rc = ima_calc_boot_aggregate_tfm(hash->digest, alg_id, tfm);
87709ef5435SDmitry Kasatkin 
87809ef5435SDmitry Kasatkin 	ima_free_tfm(tfm);
87909ef5435SDmitry Kasatkin 
88009ef5435SDmitry Kasatkin 	return rc;
88109ef5435SDmitry Kasatkin }
882