xref: /linux/drivers/crypto/caam/intern.h (revision 44a8c96edd0ee9320a1ad87afc7b10f38e55d5ec)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * CAAM/SEC 4.x driver backend
4  * Private/internal definitions between modules
5  *
6  * Copyright 2008-2011 Freescale Semiconductor, Inc.
7  * Copyright 2019, 2023 NXP
8  */
9 
10 #ifndef INTERN_H
11 #define INTERN_H
12 
13 #include "ctrl.h"
14 #include <crypto/engine.h>
15 
16 /* Currently comes from Kconfig param as a ^2 (driver-required) */
17 #define JOBR_DEPTH (1 << CONFIG_CRYPTO_DEV_FSL_CAAM_RINGSIZE)
18 
19 /*
20  * Maximum size for crypto-engine software queue based on Job Ring
21  * size (JOBR_DEPTH) and a THRESHOLD (reserved for the non-crypto-API
22  * requests that are not passed through crypto-engine)
23  */
24 #define THRESHOLD 15
25 #define CRYPTO_ENGINE_MAX_QLEN (JOBR_DEPTH - THRESHOLD)
26 
27 /* Kconfig params for interrupt coalescing if selected (else zero) */
28 #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_INTC
29 #define JOBR_INTC JRCFG_ICEN
30 #define JOBR_INTC_TIME_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_TIME_THLD
31 #define JOBR_INTC_COUNT_THLD CONFIG_CRYPTO_DEV_FSL_CAAM_INTC_COUNT_THLD
32 #else
33 #define JOBR_INTC 0
34 #define JOBR_INTC_TIME_THLD 0
35 #define JOBR_INTC_COUNT_THLD 0
36 #endif
37 
38 /*
39  * Storage for tracking each in-process entry moving across a ring
40  * Each entry on an output ring needs one of these
41  */
42 struct caam_jrentry_info {
43 	void (*callbk)(struct device *dev, u32 *desc, u32 status, void *arg);
44 	void *cbkarg;	/* Argument per ring entry */
45 	u32 *desc_addr_virt;	/* Stored virt addr for postprocessing */
46 	dma_addr_t desc_addr_dma;	/* Stored bus addr for done matching */
47 	u32 desc_size;	/* Stored size for postprocessing, header derived */
48 };
49 
50 struct caam_jr_state {
51 	dma_addr_t inpbusaddr;
52 	dma_addr_t outbusaddr;
53 };
54 
55 struct caam_jr_dequeue_params {
56 	struct device *dev;
57 	int enable_itr;
58 };
59 
60 /* Private sub-storage for a single JobR */
61 struct caam_drv_private_jr {
62 	struct list_head	list_node;	/* Job Ring device list */
63 	struct device		*dev;
64 	int ridx;
65 	struct caam_job_ring __iomem *rregs;	/* JobR's register space */
66 	struct tasklet_struct irqtask;
67 	struct caam_jr_dequeue_params tasklet_params;
68 	int irq;			/* One per queue */
69 	bool hwrng;
70 
71 	/* Number of scatterlist crypt transforms active on the JobR */
72 	atomic_t tfm_count ____cacheline_aligned;
73 
74 	/* Job ring info */
75 	struct caam_jrentry_info *entinfo;	/* Alloc'ed 1 per ring entry */
76 	spinlock_t inplock ____cacheline_aligned; /* Input ring index lock */
77 	u32 inpring_avail;	/* Number of free entries in input ring */
78 	int head;			/* entinfo (s/w ring) head index */
79 	void *inpring;			/* Base of input ring, alloc
80 					 * DMA-safe */
81 	int out_ring_read_index;	/* Output index "tail" */
82 	int tail;			/* entinfo (s/w ring) tail index */
83 	void *outring;			/* Base of output ring, DMA-safe */
84 	struct crypto_engine *engine;
85 
86 	struct caam_jr_state state;	/* State of the JR during PM */
87 };
88 
89 struct caam_ctl_state {
90 	struct masterid deco_mid[16];
91 	struct masterid jr_mid[4];
92 	u32 mcr;
93 	u32 scfgr;
94 };
95 
96 /*
97  * Driver-private storage for a single CAAM block instance
98  */
99 struct caam_drv_private {
100 	/* Physical-presence section */
101 	struct caam_ctrl __iomem *ctrl; /* controller region */
102 	struct caam_deco __iomem *deco; /* DECO/CCB views */
103 	struct caam_assurance __iomem *assure;
104 	struct caam_queue_if __iomem *qi; /* QI control region */
105 	struct caam_job_ring __iomem *jr[4];	/* JobR's register space */
106 
107 	struct iommu_domain *domain;
108 
109 	/*
110 	 * Detected geometry block. Filled in from device tree if powerpc,
111 	 * or from register-based version detection code
112 	 */
113 	u8 total_jobrs;		/* Total Job Rings in device */
114 	u8 qi_present;		/* Nonzero if QI present in device */
115 	u8 blob_present;	/* Nonzero if BLOB support present in device */
116 	u8 mc_en;		/* Nonzero if MC f/w is active */
117 	u8 optee_en;		/* Nonzero if OP-TEE f/w is active */
118 	u8 no_page0;		/* Nonzero if register page 0 is not controlled by Linux */
119 	bool pr_support;        /* RNG prediction resistance available */
120 	int secvio_irq;		/* Security violation interrupt number */
121 	int virt_en;		/* Virtualization enabled in CAAM */
122 	int era;		/* CAAM Era (internal HW revision) */
123 
124 #define	RNG4_MAX_HANDLES 2
125 	/* RNG4 block */
126 	u32 rng4_sh_init;	/* This bitmap shows which of the State
127 				   Handles of the RNG4 block are initialized
128 				   by this driver */
129 
130 	struct clk_bulk_data *clks;
131 	int num_clks;
132 	/*
133 	 * debugfs entries for developer view into driver/device
134 	 * variables at runtime.
135 	 */
136 #ifdef CONFIG_DEBUG_FS
137 	struct dentry *ctl; /* controller dir */
138 	struct debugfs_blob_wrapper ctl_kek_wrap, ctl_tkek_wrap, ctl_tdsk_wrap;
139 #endif
140 
141 	int caam_off_during_pm;		/* If the CAAM is reset after suspend */
142 	struct caam_ctl_state state;	/* State of the CTL during PM */
143 };
144 
145 #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API
146 
147 int caam_algapi_init(struct device *dev);
148 void caam_algapi_exit(void);
149 
150 #else
151 
caam_algapi_init(struct device * dev)152 static inline int caam_algapi_init(struct device *dev)
153 {
154 	return 0;
155 }
156 
caam_algapi_exit(void)157 static inline void caam_algapi_exit(void)
158 {
159 }
160 
161 #endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API */
162 
163 #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API
164 
165 int caam_algapi_hash_init(struct device *dev);
166 void caam_algapi_hash_exit(void);
167 
168 #else
169 
caam_algapi_hash_init(struct device * dev)170 static inline int caam_algapi_hash_init(struct device *dev)
171 {
172 	return 0;
173 }
174 
caam_algapi_hash_exit(void)175 static inline void caam_algapi_hash_exit(void)
176 {
177 }
178 
179 #endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_AHASH_API */
180 
181 #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API
182 
183 int caam_pkc_init(struct device *dev);
184 void caam_pkc_exit(void);
185 
186 #else
187 
caam_pkc_init(struct device * dev)188 static inline int caam_pkc_init(struct device *dev)
189 {
190 	return 0;
191 }
192 
caam_pkc_exit(void)193 static inline void caam_pkc_exit(void)
194 {
195 }
196 
197 #endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PKC_API */
198 
199 #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API
200 
201 int caam_rng_init(struct device *dev);
202 void caam_rng_exit(struct device *dev);
203 
204 #else
205 
caam_rng_init(struct device * dev)206 static inline int caam_rng_init(struct device *dev)
207 {
208 	return 0;
209 }
210 
caam_rng_exit(struct device * dev)211 static inline void caam_rng_exit(struct device *dev) {}
212 
213 #endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_RNG_API */
214 
215 #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_PRNG_API
216 
217 int caam_prng_register(struct device *dev);
218 void caam_prng_unregister(void *data);
219 
220 #else
221 
caam_prng_register(struct device * dev)222 static inline int caam_prng_register(struct device *dev)
223 {
224 	return 0;
225 }
226 
caam_prng_unregister(void * data)227 static inline void caam_prng_unregister(void *data) {}
228 #endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_PRNG_API */
229 
230 #ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI
231 
232 int caam_qi_algapi_init(struct device *dev);
233 void caam_qi_algapi_exit(void);
234 
235 #else
236 
caam_qi_algapi_init(struct device * dev)237 static inline int caam_qi_algapi_init(struct device *dev)
238 {
239 	return 0;
240 }
241 
caam_qi_algapi_exit(void)242 static inline void caam_qi_algapi_exit(void)
243 {
244 }
245 
246 #endif /* CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_QI */
247 
caam_get_dma_mask(struct device * dev)248 static inline u64 caam_get_dma_mask(struct device *dev)
249 {
250 	struct device_node *nprop = dev->of_node;
251 
252 	if (caam_ptr_sz != sizeof(u64))
253 		return DMA_BIT_MASK(32);
254 
255 	if (caam_dpaa2)
256 		return DMA_BIT_MASK(49);
257 
258 	if (of_device_is_compatible(nprop, "fsl,sec-v5.0-job-ring") ||
259 	    of_device_is_compatible(nprop, "fsl,sec-v5.0"))
260 		return DMA_BIT_MASK(40);
261 
262 	return DMA_BIT_MASK(36);
263 }
264 
265 
266 #endif /* INTERN_H */
267