xref: /linux/drivers/crypto/hisilicon/sec2/sec.h (revision 44a8c96edd0ee9320a1ad87afc7b10f38e55d5ec)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2019 HiSilicon Limited. */
3 
4 #ifndef __HISI_SEC_V2_H
5 #define __HISI_SEC_V2_H
6 
7 #include <linux/hisi_acc_qm.h>
8 #include "sec_crypto.h"
9 
10 #define SEC_PBUF_SZ		512
11 #define SEC_MAX_MAC_LEN		64
12 #define SEC_IV_SIZE		24
13 #define SEC_SGE_NR_NUM		4
14 #define SEC_SGL_ALIGN_SIZE	64
15 
16 /* Algorithm resource per hardware SEC queue */
17 struct sec_alg_res {
18 	u8 *pbuf;
19 	dma_addr_t pbuf_dma;
20 	u8 *c_ivin;
21 	dma_addr_t c_ivin_dma;
22 	u8 *a_ivin;
23 	dma_addr_t a_ivin_dma;
24 	u8 *out_mac;
25 	dma_addr_t out_mac_dma;
26 	u16 depth;
27 };
28 
29 struct sec_hw_sge {
30 	dma_addr_t buf;
31 	void *page_ctrl;
32 	__le32 len;
33 	__le32 pad;
34 	__le32 pad0;
35 	__le32 pad1;
36 };
37 
38 struct sec_hw_sgl {
39 	dma_addr_t next_dma;
40 	__le16 entry_sum_in_chain;
41 	__le16 entry_sum_in_sgl;
42 	__le16 entry_length_in_sgl;
43 	__le16 pad0;
44 	__le64 pad1[5];
45 	struct sec_hw_sgl *next;
46 	struct sec_hw_sge sge_entries[SEC_SGE_NR_NUM];
47 } __aligned(SEC_SGL_ALIGN_SIZE);
48 
49 struct sec_src_dst_buf {
50 	struct sec_hw_sgl in;
51 	struct sec_hw_sgl out;
52 };
53 
54 struct sec_request_buf {
55 	union {
56 		struct sec_src_dst_buf data_buf;
57 		__u8 pbuf[SEC_PBUF_SZ];
58 	};
59 	dma_addr_t in_dma;
60 	dma_addr_t out_dma;
61 };
62 
63 /* Cipher request of SEC private */
64 struct sec_cipher_req {
65 	struct hisi_acc_hw_sgl *c_out;
66 	dma_addr_t c_out_dma;
67 	u8 *c_ivin;
68 	dma_addr_t c_ivin_dma;
69 	struct skcipher_request *sk_req;
70 	u32 c_len;
71 	bool encrypt;
72 	__u8 c_ivin_buf[SEC_IV_SIZE];
73 };
74 
75 struct sec_aead_req {
76 	u8 *out_mac;
77 	dma_addr_t out_mac_dma;
78 	u8 *a_ivin;
79 	dma_addr_t a_ivin_dma;
80 	struct aead_request *aead_req;
81 	__u8 a_ivin_buf[SEC_IV_SIZE];
82 	__u8 out_mac_buf[SEC_MAX_MAC_LEN];
83 };
84 
85 struct sec_instance_backlog {
86 	struct list_head list;
87 	spinlock_t lock;
88 };
89 
90 /* SEC request of Crypto */
91 struct sec_req {
92 	union {
93 		struct sec_sqe sec_sqe;
94 		struct sec_sqe3 sec_sqe3;
95 	};
96 	struct sec_ctx *ctx;
97 	struct sec_qp_ctx *qp_ctx;
98 
99 	/**
100 	 * Common parameter of the SEC request.
101 	 */
102 	struct hisi_acc_hw_sgl *in;
103 	dma_addr_t in_dma;
104 	struct sec_cipher_req c_req;
105 	struct sec_aead_req aead_req;
106 	struct crypto_async_request *base;
107 
108 	int err_type;
109 	int req_id;
110 	u32 flag;
111 
112 	bool use_pbuf;
113 
114 	struct list_head list;
115 	struct sec_instance_backlog *backlog;
116 	struct sec_request_buf buf;
117 };
118 
119 /**
120  * struct sec_req_op - Operations for SEC request
121  * @buf_map: DMA map the SGL buffers of the request
122  * @buf_unmap: DMA unmap the SGL buffers of the request
123  * @bd_fill: Fill the SEC queue BD
124  * @bd_send: Send the SEC BD into the hardware queue
125  * @callback: Call back for the request
126  * @process: Main processing logic of Skcipher
127  */
128 struct sec_req_op {
129 	int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req);
130 	void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req);
131 	void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req);
132 	int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req);
133 	int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req);
134 	void (*callback)(struct sec_ctx *ctx, struct sec_req *req, int err);
135 	int (*process)(struct sec_ctx *ctx, struct sec_req *req);
136 };
137 
138 /* SEC auth context */
139 struct sec_auth_ctx {
140 	dma_addr_t a_key_dma;
141 	u8 *a_key;
142 	u8 a_key_len;
143 	u8 a_alg;
144 	struct crypto_shash *hash_tfm;
145 	struct crypto_aead *fallback_aead_tfm;
146 };
147 
148 /* SEC cipher context which cipher's relatives */
149 struct sec_cipher_ctx {
150 	u8 *c_key;
151 	dma_addr_t c_key_dma;
152 	sector_t iv_offset;
153 	u32 c_gran_size;
154 	u32 ivsize;
155 	u8 c_mode;
156 	u8 c_alg;
157 	u8 c_key_len;
158 
159 	/* add software support */
160 	bool fallback;
161 	struct crypto_sync_skcipher *fbtfm;
162 };
163 
164 /* SEC queue context which defines queue's relatives */
165 struct sec_qp_ctx {
166 	struct hisi_qp *qp;
167 	struct sec_req **req_list;
168 	struct idr req_idr;
169 	struct sec_alg_res *res;
170 	struct sec_ctx *ctx;
171 	spinlock_t req_lock;
172 	spinlock_t id_lock;
173 	struct hisi_acc_sgl_pool *c_in_pool;
174 	struct hisi_acc_sgl_pool *c_out_pool;
175 	struct sec_instance_backlog backlog;
176 	u16 send_head;
177 };
178 
179 enum sec_alg_type {
180 	SEC_SKCIPHER,
181 	SEC_AEAD
182 };
183 
184 /* SEC Crypto TFM context which defines queue and cipher .etc relatives */
185 struct sec_ctx {
186 	struct sec_qp_ctx *qp_ctx;
187 	struct sec_dev *sec;
188 	const struct sec_req_op *req_op;
189 	struct hisi_qp **qps;
190 
191 	/* Half queues for encipher, and half for decipher */
192 	u32 hlf_q_num;
193 
194 	/* Current cyclic index to select a queue for encipher */
195 	atomic_t enc_qcyclic;
196 
197 	 /* Current cyclic index to select a queue for decipher */
198 	atomic_t dec_qcyclic;
199 
200 	enum sec_alg_type alg_type;
201 	bool pbuf_supported;
202 	struct sec_cipher_ctx c_ctx;
203 	struct sec_auth_ctx a_ctx;
204 	u8 type_supported;
205 	struct device *dev;
206 };
207 
208 
209 enum sec_debug_file_index {
210 	SEC_CLEAR_ENABLE,
211 	SEC_DEBUG_FILE_NUM,
212 };
213 
214 struct sec_debug_file {
215 	enum sec_debug_file_index index;
216 	spinlock_t lock;
217 	struct hisi_qm *qm;
218 };
219 
220 struct sec_dfx {
221 	atomic64_t send_cnt;
222 	atomic64_t recv_cnt;
223 	atomic64_t send_busy_cnt;
224 	atomic64_t recv_busy_cnt;
225 	atomic64_t err_bd_cnt;
226 	atomic64_t invalid_req_cnt;
227 	atomic64_t done_flag_cnt;
228 };
229 
230 struct sec_debug {
231 	struct sec_dfx dfx;
232 	struct sec_debug_file files[SEC_DEBUG_FILE_NUM];
233 };
234 
235 struct sec_dev {
236 	struct hisi_qm qm;
237 	struct sec_debug debug;
238 	u32 ctx_q_num;
239 	bool iommu_used;
240 };
241 
242 enum sec_cap_type {
243 	SEC_QM_NFE_MASK_CAP = 0x0,
244 	SEC_QM_RESET_MASK_CAP,
245 	SEC_QM_OOO_SHUTDOWN_MASK_CAP,
246 	SEC_QM_CE_MASK_CAP,
247 	SEC_NFE_MASK_CAP,
248 	SEC_RESET_MASK_CAP,
249 	SEC_OOO_SHUTDOWN_MASK_CAP,
250 	SEC_CE_MASK_CAP,
251 	SEC_CLUSTER_NUM_CAP,
252 	SEC_CORE_TYPE_NUM_CAP,
253 	SEC_CORE_NUM_CAP,
254 	SEC_CORES_PER_CLUSTER_NUM_CAP,
255 	SEC_CORE_ENABLE_BITMAP,
256 	SEC_DRV_ALG_BITMAP_LOW,
257 	SEC_DRV_ALG_BITMAP_HIGH,
258 	SEC_DEV_ALG_BITMAP_LOW,
259 	SEC_DEV_ALG_BITMAP_HIGH,
260 	SEC_CORE1_ALG_BITMAP_LOW,
261 	SEC_CORE1_ALG_BITMAP_HIGH,
262 	SEC_CORE2_ALG_BITMAP_LOW,
263 	SEC_CORE2_ALG_BITMAP_HIGH,
264 	SEC_CORE3_ALG_BITMAP_LOW,
265 	SEC_CORE3_ALG_BITMAP_HIGH,
266 	SEC_CORE4_ALG_BITMAP_LOW,
267 	SEC_CORE4_ALG_BITMAP_HIGH,
268 };
269 
270 enum sec_cap_table_type {
271 	QM_RAS_NFE_TYPE = 0x0,
272 	QM_RAS_NFE_RESET,
273 	QM_RAS_CE_TYPE,
274 	SEC_RAS_NFE_TYPE,
275 	SEC_RAS_NFE_RESET,
276 	SEC_RAS_CE_TYPE,
277 	SEC_CORE_INFO,
278 	SEC_CORE_EN,
279 	SEC_DRV_ALG_BITMAP_LOW_TB,
280 	SEC_DRV_ALG_BITMAP_HIGH_TB,
281 	SEC_ALG_BITMAP_LOW,
282 	SEC_ALG_BITMAP_HIGH,
283 	SEC_CORE1_BITMAP_LOW,
284 	SEC_CORE1_BITMAP_HIGH,
285 	SEC_CORE2_BITMAP_LOW,
286 	SEC_CORE2_BITMAP_HIGH,
287 	SEC_CORE3_BITMAP_LOW,
288 	SEC_CORE3_BITMAP_HIGH,
289 	SEC_CORE4_BITMAP_LOW,
290 	SEC_CORE4_BITMAP_HIGH,
291 };
292 
293 void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
294 struct hisi_qp **sec_create_qps(void);
295 int sec_register_to_crypto(struct hisi_qm *qm);
296 void sec_unregister_from_crypto(struct hisi_qm *qm);
297 u64 sec_get_alg_bitmap(struct hisi_qm *qm, u32 high, u32 low);
298 #endif
299