xref: /linux/drivers/net/ovpn/crypto_aead.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0
2 /*  OpenVPN data channel offload
3  *
4  *  Copyright (C) 2020-2025 OpenVPN, Inc.
5  *
6  *  Author:	James Yonan <james@openvpn.net>
7  *		Antonio Quartulli <antonio@openvpn.net>
8  */
9 
10 #include <crypto/aead.h>
11 #include <linux/skbuff.h>
12 #include <net/ip.h>
13 #include <net/ipv6.h>
14 #include <net/udp.h>
15 
16 #include "ovpnpriv.h"
17 #include "main.h"
18 #include "io.h"
19 #include "pktid.h"
20 #include "crypto_aead.h"
21 #include "crypto.h"
22 #include "peer.h"
23 #include "proto.h"
24 #include "skb.h"
25 
26 #define OVPN_AUTH_TAG_SIZE	16
27 #define OVPN_AAD_SIZE		(OVPN_OPCODE_SIZE + OVPN_NONCE_WIRE_SIZE)
28 
29 #define ALG_NAME_AES		"gcm(aes)"
30 #define ALG_NAME_CHACHAPOLY	"rfc7539(chacha20,poly1305)"
31 
ovpn_aead_encap_overhead(const struct ovpn_crypto_key_slot * ks)32 static int ovpn_aead_encap_overhead(const struct ovpn_crypto_key_slot *ks)
33 {
34 	return  OVPN_OPCODE_SIZE +			/* OP header size */
35 		sizeof(u32) +				/* Packet ID */
36 		crypto_aead_authsize(ks->encrypt);	/* Auth Tag */
37 }
38 
ovpn_aead_encrypt(struct ovpn_peer * peer,struct ovpn_crypto_key_slot * ks,struct sk_buff * skb)39 int ovpn_aead_encrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks,
40 		      struct sk_buff *skb)
41 {
42 	const unsigned int tag_size = crypto_aead_authsize(ks->encrypt);
43 	struct aead_request *req;
44 	struct sk_buff *trailer;
45 	struct scatterlist *sg;
46 	int nfrags, ret;
47 	u32 pktid, op;
48 	u8 *iv;
49 
50 	ovpn_skb_cb(skb)->peer = peer;
51 	ovpn_skb_cb(skb)->ks = ks;
52 
53 	/* Sample AEAD header format:
54 	 * 48000001 00000005 7e7046bd 444a7e28 cc6387b1 64a4d6c1 380275a...
55 	 * [ OP32 ] [seq # ] [             auth tag            ] [ payload ... ]
56 	 *          [4-byte
57 	 *          IV head]
58 	 */
59 
60 	/* check that there's enough headroom in the skb for packet
61 	 * encapsulation
62 	 */
63 	if (unlikely(skb_cow_head(skb, OVPN_HEAD_ROOM)))
64 		return -ENOBUFS;
65 
66 	/* get number of skb frags and ensure that packet data is writable */
67 	nfrags = skb_cow_data(skb, 0, &trailer);
68 	if (unlikely(nfrags < 0))
69 		return nfrags;
70 
71 	if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2)))
72 		return -ENOSPC;
73 
74 	/* sg may be required by async crypto */
75 	ovpn_skb_cb(skb)->sg = kmalloc(sizeof(*ovpn_skb_cb(skb)->sg) *
76 				       (nfrags + 2), GFP_ATOMIC);
77 	if (unlikely(!ovpn_skb_cb(skb)->sg))
78 		return -ENOMEM;
79 
80 	sg = ovpn_skb_cb(skb)->sg;
81 
82 	/* sg table:
83 	 * 0: op, wire nonce (AD, len=OVPN_OP_SIZE_V2+OVPN_NONCE_WIRE_SIZE),
84 	 * 1, 2, 3, ..., n: payload,
85 	 * n+1: auth_tag (len=tag_size)
86 	 */
87 	sg_init_table(sg, nfrags + 2);
88 
89 	/* build scatterlist to encrypt packet payload */
90 	ret = skb_to_sgvec_nomark(skb, sg + 1, 0, skb->len);
91 	if (unlikely(ret < 0)) {
92 		netdev_err(peer->ovpn->dev,
93 			   "encrypt: cannot map skb to sg: %d\n", ret);
94 		return ret;
95 	}
96 
97 	/* append auth_tag onto scatterlist */
98 	__skb_push(skb, tag_size);
99 	sg_set_buf(sg + ret + 1, skb->data, tag_size);
100 
101 	/* obtain packet ID, which is used both as a first
102 	 * 4 bytes of nonce and last 4 bytes of associated data.
103 	 */
104 	ret = ovpn_pktid_xmit_next(&ks->pid_xmit, &pktid);
105 	if (unlikely(ret < 0))
106 		return ret;
107 
108 	/* iv may be required by async crypto */
109 	ovpn_skb_cb(skb)->iv = kmalloc(OVPN_NONCE_SIZE, GFP_ATOMIC);
110 	if (unlikely(!ovpn_skb_cb(skb)->iv))
111 		return -ENOMEM;
112 
113 	iv = ovpn_skb_cb(skb)->iv;
114 
115 	/* concat 4 bytes packet id and 8 bytes nonce tail into 12 bytes
116 	 * nonce
117 	 */
118 	ovpn_pktid_aead_write(pktid, ks->nonce_tail_xmit, iv);
119 
120 	/* make space for packet id and push it to the front */
121 	__skb_push(skb, OVPN_NONCE_WIRE_SIZE);
122 	memcpy(skb->data, iv, OVPN_NONCE_WIRE_SIZE);
123 
124 	/* add packet op as head of additional data */
125 	op = ovpn_opcode_compose(OVPN_DATA_V2, ks->key_id, peer->id);
126 	__skb_push(skb, OVPN_OPCODE_SIZE);
127 	BUILD_BUG_ON(sizeof(op) != OVPN_OPCODE_SIZE);
128 	*((__force __be32 *)skb->data) = htonl(op);
129 
130 	/* AEAD Additional data */
131 	sg_set_buf(sg, skb->data, OVPN_AAD_SIZE);
132 
133 	req = aead_request_alloc(ks->encrypt, GFP_ATOMIC);
134 	if (unlikely(!req))
135 		return -ENOMEM;
136 
137 	ovpn_skb_cb(skb)->req = req;
138 
139 	/* setup async crypto operation */
140 	aead_request_set_tfm(req, ks->encrypt);
141 	aead_request_set_callback(req, 0, ovpn_encrypt_post, skb);
142 	aead_request_set_crypt(req, sg, sg,
143 			       skb->len - ovpn_aead_encap_overhead(ks), iv);
144 	aead_request_set_ad(req, OVPN_AAD_SIZE);
145 
146 	/* encrypt it */
147 	return crypto_aead_encrypt(req);
148 }
149 
ovpn_aead_decrypt(struct ovpn_peer * peer,struct ovpn_crypto_key_slot * ks,struct sk_buff * skb)150 int ovpn_aead_decrypt(struct ovpn_peer *peer, struct ovpn_crypto_key_slot *ks,
151 		      struct sk_buff *skb)
152 {
153 	const unsigned int tag_size = crypto_aead_authsize(ks->decrypt);
154 	int ret, payload_len, nfrags;
155 	unsigned int payload_offset;
156 	struct aead_request *req;
157 	struct sk_buff *trailer;
158 	struct scatterlist *sg;
159 	u8 *iv;
160 
161 	payload_offset = OVPN_AAD_SIZE + tag_size;
162 	payload_len = skb->len - payload_offset;
163 
164 	ovpn_skb_cb(skb)->payload_offset = payload_offset;
165 	ovpn_skb_cb(skb)->peer = peer;
166 	ovpn_skb_cb(skb)->ks = ks;
167 
168 	/* sanity check on packet size, payload size must be >= 0 */
169 	if (unlikely(payload_len < 0))
170 		return -EINVAL;
171 
172 	/* Prepare the skb data buffer to be accessed up until the auth tag.
173 	 * This is required because this area is directly mapped into the sg
174 	 * list.
175 	 */
176 	if (unlikely(!pskb_may_pull(skb, payload_offset)))
177 		return -ENODATA;
178 
179 	/* get number of skb frags and ensure that packet data is writable */
180 	nfrags = skb_cow_data(skb, 0, &trailer);
181 	if (unlikely(nfrags < 0))
182 		return nfrags;
183 
184 	if (unlikely(nfrags + 2 > (MAX_SKB_FRAGS + 2)))
185 		return -ENOSPC;
186 
187 	/* sg may be required by async crypto */
188 	ovpn_skb_cb(skb)->sg = kmalloc(sizeof(*ovpn_skb_cb(skb)->sg) *
189 				       (nfrags + 2), GFP_ATOMIC);
190 	if (unlikely(!ovpn_skb_cb(skb)->sg))
191 		return -ENOMEM;
192 
193 	sg = ovpn_skb_cb(skb)->sg;
194 
195 	/* sg table:
196 	 * 0: op, wire nonce (AD, len=OVPN_OPCODE_SIZE+OVPN_NONCE_WIRE_SIZE),
197 	 * 1, 2, 3, ..., n: payload,
198 	 * n+1: auth_tag (len=tag_size)
199 	 */
200 	sg_init_table(sg, nfrags + 2);
201 
202 	/* packet op is head of additional data */
203 	sg_set_buf(sg, skb->data, OVPN_AAD_SIZE);
204 
205 	/* build scatterlist to decrypt packet payload */
206 	ret = skb_to_sgvec_nomark(skb, sg + 1, payload_offset, payload_len);
207 	if (unlikely(ret < 0)) {
208 		netdev_err(peer->ovpn->dev,
209 			   "decrypt: cannot map skb to sg: %d\n", ret);
210 		return ret;
211 	}
212 
213 	/* append auth_tag onto scatterlist */
214 	sg_set_buf(sg + ret + 1, skb->data + OVPN_AAD_SIZE, tag_size);
215 
216 	/* iv may be required by async crypto */
217 	ovpn_skb_cb(skb)->iv = kmalloc(OVPN_NONCE_SIZE, GFP_ATOMIC);
218 	if (unlikely(!ovpn_skb_cb(skb)->iv))
219 		return -ENOMEM;
220 
221 	iv = ovpn_skb_cb(skb)->iv;
222 
223 	/* copy nonce into IV buffer */
224 	memcpy(iv, skb->data + OVPN_OPCODE_SIZE, OVPN_NONCE_WIRE_SIZE);
225 	memcpy(iv + OVPN_NONCE_WIRE_SIZE, ks->nonce_tail_recv,
226 	       OVPN_NONCE_TAIL_SIZE);
227 
228 	req = aead_request_alloc(ks->decrypt, GFP_ATOMIC);
229 	if (unlikely(!req))
230 		return -ENOMEM;
231 
232 	ovpn_skb_cb(skb)->req = req;
233 
234 	/* setup async crypto operation */
235 	aead_request_set_tfm(req, ks->decrypt);
236 	aead_request_set_callback(req, 0, ovpn_decrypt_post, skb);
237 	aead_request_set_crypt(req, sg, sg, payload_len + tag_size, iv);
238 
239 	aead_request_set_ad(req, OVPN_AAD_SIZE);
240 
241 	/* decrypt it */
242 	return crypto_aead_decrypt(req);
243 }
244 
245 /* Initialize a struct crypto_aead object */
ovpn_aead_init(const char * title,const char * alg_name,const unsigned char * key,unsigned int keylen)246 static struct crypto_aead *ovpn_aead_init(const char *title,
247 					  const char *alg_name,
248 					  const unsigned char *key,
249 					  unsigned int keylen)
250 {
251 	struct crypto_aead *aead;
252 	int ret;
253 
254 	aead = crypto_alloc_aead(alg_name, 0, 0);
255 	if (IS_ERR(aead)) {
256 		ret = PTR_ERR(aead);
257 		pr_err("%s crypto_alloc_aead failed, err=%d\n", title, ret);
258 		aead = NULL;
259 		goto error;
260 	}
261 
262 	ret = crypto_aead_setkey(aead, key, keylen);
263 	if (ret) {
264 		pr_err("%s crypto_aead_setkey size=%u failed, err=%d\n", title,
265 		       keylen, ret);
266 		goto error;
267 	}
268 
269 	ret = crypto_aead_setauthsize(aead, OVPN_AUTH_TAG_SIZE);
270 	if (ret) {
271 		pr_err("%s crypto_aead_setauthsize failed, err=%d\n", title,
272 		       ret);
273 		goto error;
274 	}
275 
276 	/* basic AEAD assumption */
277 	if (crypto_aead_ivsize(aead) != OVPN_NONCE_SIZE) {
278 		pr_err("%s IV size must be %d\n", title, OVPN_NONCE_SIZE);
279 		ret = -EINVAL;
280 		goto error;
281 	}
282 
283 	pr_debug("********* Cipher %s (%s)\n", alg_name, title);
284 	pr_debug("*** IV size=%u\n", crypto_aead_ivsize(aead));
285 	pr_debug("*** req size=%u\n", crypto_aead_reqsize(aead));
286 	pr_debug("*** block size=%u\n", crypto_aead_blocksize(aead));
287 	pr_debug("*** auth size=%u\n", crypto_aead_authsize(aead));
288 	pr_debug("*** alignmask=0x%x\n", crypto_aead_alignmask(aead));
289 
290 	return aead;
291 
292 error:
293 	crypto_free_aead(aead);
294 	return ERR_PTR(ret);
295 }
296 
ovpn_aead_crypto_key_slot_destroy(struct ovpn_crypto_key_slot * ks)297 void ovpn_aead_crypto_key_slot_destroy(struct ovpn_crypto_key_slot *ks)
298 {
299 	if (!ks)
300 		return;
301 
302 	crypto_free_aead(ks->encrypt);
303 	crypto_free_aead(ks->decrypt);
304 	kfree(ks);
305 }
306 
307 struct ovpn_crypto_key_slot *
ovpn_aead_crypto_key_slot_new(const struct ovpn_key_config * kc)308 ovpn_aead_crypto_key_slot_new(const struct ovpn_key_config *kc)
309 {
310 	struct ovpn_crypto_key_slot *ks = NULL;
311 	const char *alg_name;
312 	int ret;
313 
314 	/* validate crypto alg */
315 	switch (kc->cipher_alg) {
316 	case OVPN_CIPHER_ALG_AES_GCM:
317 		alg_name = ALG_NAME_AES;
318 		break;
319 	case OVPN_CIPHER_ALG_CHACHA20_POLY1305:
320 		alg_name = ALG_NAME_CHACHAPOLY;
321 		break;
322 	default:
323 		return ERR_PTR(-EOPNOTSUPP);
324 	}
325 
326 	if (kc->encrypt.nonce_tail_size != OVPN_NONCE_TAIL_SIZE ||
327 	    kc->decrypt.nonce_tail_size != OVPN_NONCE_TAIL_SIZE)
328 		return ERR_PTR(-EINVAL);
329 
330 	/* build the key slot */
331 	ks = kmalloc(sizeof(*ks), GFP_KERNEL);
332 	if (!ks)
333 		return ERR_PTR(-ENOMEM);
334 
335 	ks->encrypt = NULL;
336 	ks->decrypt = NULL;
337 	kref_init(&ks->refcount);
338 	ks->key_id = kc->key_id;
339 
340 	ks->encrypt = ovpn_aead_init("encrypt", alg_name,
341 				     kc->encrypt.cipher_key,
342 				     kc->encrypt.cipher_key_size);
343 	if (IS_ERR(ks->encrypt)) {
344 		ret = PTR_ERR(ks->encrypt);
345 		ks->encrypt = NULL;
346 		goto destroy_ks;
347 	}
348 
349 	ks->decrypt = ovpn_aead_init("decrypt", alg_name,
350 				     kc->decrypt.cipher_key,
351 				     kc->decrypt.cipher_key_size);
352 	if (IS_ERR(ks->decrypt)) {
353 		ret = PTR_ERR(ks->decrypt);
354 		ks->decrypt = NULL;
355 		goto destroy_ks;
356 	}
357 
358 	memcpy(ks->nonce_tail_xmit, kc->encrypt.nonce_tail,
359 	       OVPN_NONCE_TAIL_SIZE);
360 	memcpy(ks->nonce_tail_recv, kc->decrypt.nonce_tail,
361 	       OVPN_NONCE_TAIL_SIZE);
362 
363 	/* init packet ID generation/validation */
364 	ovpn_pktid_xmit_init(&ks->pid_xmit);
365 	ovpn_pktid_recv_init(&ks->pid_recv);
366 
367 	return ks;
368 
369 destroy_ks:
370 	ovpn_aead_crypto_key_slot_destroy(ks);
371 	return ERR_PTR(ret);
372 }
373 
ovpn_aead_crypto_alg(struct ovpn_crypto_key_slot * ks)374 enum ovpn_cipher_alg ovpn_aead_crypto_alg(struct ovpn_crypto_key_slot *ks)
375 {
376 	const char *alg_name;
377 
378 	if (!ks->encrypt)
379 		return OVPN_CIPHER_ALG_NONE;
380 
381 	alg_name = crypto_tfm_alg_name(crypto_aead_tfm(ks->encrypt));
382 
383 	if (!strcmp(alg_name, ALG_NAME_AES))
384 		return OVPN_CIPHER_ALG_AES_GCM;
385 	else if (!strcmp(alg_name, ALG_NAME_CHACHAPOLY))
386 		return OVPN_CIPHER_ALG_CHACHA20_POLY1305;
387 	else
388 		return OVPN_CIPHER_ALG_NONE;
389 }
390