1 // SPDX-License-Identifier: GPL-2.0
2 /*  OpenVPN data channel offload
3  *
4  *  Copyright (C) 2019-2025 OpenVPN, Inc.
5  *
6  *  Author:	James Yonan <james@openvpn.net>
7  *		Antonio Quartulli <antonio@openvpn.net>
8  */
9 
10 #include <crypto/aead.h>
11 #include <linux/netdevice.h>
12 #include <linux/skbuff.h>
13 #include <net/gro_cells.h>
14 #include <net/gso.h>
15 #include <net/ip.h>
16 
17 #include "ovpnpriv.h"
18 #include "peer.h"
19 #include "io.h"
20 #include "bind.h"
21 #include "crypto.h"
22 #include "crypto_aead.h"
23 #include "netlink.h"
24 #include "proto.h"
25 #include "tcp.h"
26 #include "udp.h"
27 #include "skb.h"
28 #include "socket.h"
29 
30 const unsigned char ovpn_keepalive_message[OVPN_KEEPALIVE_SIZE] = {
31 	0x2a, 0x18, 0x7b, 0xf3, 0x64, 0x1e, 0xb4, 0xcb,
32 	0x07, 0xed, 0x2d, 0x0a, 0x98, 0x1f, 0xc7, 0x48
33 };
34 
35 /**
36  * ovpn_is_keepalive - check if skb contains a keepalive message
37  * @skb: packet to check
38  *
39  * Assumes that the first byte of skb->data is defined.
40  *
41  * Return: true if skb contains a keepalive or false otherwise
42  */
43 static bool ovpn_is_keepalive(struct sk_buff *skb)
44 {
45 	if (*skb->data != ovpn_keepalive_message[0])
46 		return false;
47 
48 	if (skb->len != OVPN_KEEPALIVE_SIZE)
49 		return false;
50 
51 	if (!pskb_may_pull(skb, OVPN_KEEPALIVE_SIZE))
52 		return false;
53 
54 	return !memcmp(skb->data, ovpn_keepalive_message, OVPN_KEEPALIVE_SIZE);
55 }
56 
57 /* Called after decrypt to write the IP packet to the device.
58  * This method is expected to manage/free the skb.
59  */
60 static void ovpn_netdev_write(struct ovpn_peer *peer, struct sk_buff *skb)
61 {
62 	unsigned int pkt_len;
63 	int ret;
64 
65 	/* we can't guarantee the packet wasn't corrupted before entering the
66 	 * VPN, therefore we give other layers a chance to check that
67 	 */
68 	skb->ip_summed = CHECKSUM_NONE;
69 
70 	/* skb hash for transport packet no longer valid after decapsulation */
71 	skb_clear_hash(skb);
72 
73 	/* post-decrypt scrub -- prepare to inject encapsulated packet onto the
74 	 * interface, based on __skb_tunnel_rx() in dst.h
75 	 */
76 	skb->dev = peer->ovpn->dev;
77 	skb_set_queue_mapping(skb, 0);
78 	skb_scrub_packet(skb, true);
79 
80 	/* network header reset in ovpn_decrypt_post() */
81 	skb_reset_transport_header(skb);
82 	skb_reset_inner_headers(skb);
83 
84 	/* cause packet to be "received" by the interface */
85 	pkt_len = skb->len;
86 	ret = gro_cells_receive(&peer->ovpn->gro_cells, skb);
87 	if (likely(ret == NET_RX_SUCCESS)) {
88 		/* update RX stats with the size of decrypted packet */
89 		ovpn_peer_stats_increment_rx(&peer->vpn_stats, pkt_len);
90 		dev_dstats_rx_add(peer->ovpn->dev, pkt_len);
91 	}
92 }
93 
94 void ovpn_decrypt_post(void *data, int ret)
95 {
96 	struct ovpn_crypto_key_slot *ks;
97 	unsigned int payload_offset = 0;
98 	struct sk_buff *skb = data;
99 	struct ovpn_socket *sock;
100 	struct ovpn_peer *peer;
101 	__be16 proto;
102 	__be32 *pid;
103 
104 	/* crypto is happening asynchronously. this function will be called
105 	 * again later by the crypto callback with a proper return code
106 	 */
107 	if (unlikely(ret == -EINPROGRESS))
108 		return;
109 
110 	payload_offset = ovpn_skb_cb(skb)->payload_offset;
111 	ks = ovpn_skb_cb(skb)->ks;
112 	peer = ovpn_skb_cb(skb)->peer;
113 
114 	/* crypto is done, cleanup skb CB and its members */
115 	kfree(ovpn_skb_cb(skb)->iv);
116 	kfree(ovpn_skb_cb(skb)->sg);
117 	aead_request_free(ovpn_skb_cb(skb)->req);
118 
119 	if (unlikely(ret < 0))
120 		goto drop;
121 
122 	/* PID sits after the op */
123 	pid = (__force __be32 *)(skb->data + OVPN_OPCODE_SIZE);
124 	ret = ovpn_pktid_recv(&ks->pid_recv, ntohl(*pid), 0);
125 	if (unlikely(ret < 0)) {
126 		net_err_ratelimited("%s: PKT ID RX error for peer %u: %d\n",
127 				    netdev_name(peer->ovpn->dev), peer->id,
128 				    ret);
129 		goto drop;
130 	}
131 
132 	/* keep track of last received authenticated packet for keepalive */
133 	WRITE_ONCE(peer->last_recv, ktime_get_real_seconds());
134 
135 	rcu_read_lock();
136 	sock = rcu_dereference(peer->sock);
137 	if (sock && sock->sk->sk_protocol == IPPROTO_UDP)
138 		/* check if this peer changed local or remote endpoint */
139 		ovpn_peer_endpoints_update(peer, skb);
140 	rcu_read_unlock();
141 
142 	/* point to encapsulated IP packet */
143 	__skb_pull(skb, payload_offset);
144 
145 	/* check if this is a valid datapacket that has to be delivered to the
146 	 * ovpn interface
147 	 */
148 	skb_reset_network_header(skb);
149 	proto = ovpn_ip_check_protocol(skb);
150 	if (unlikely(!proto)) {
151 		/* check if null packet */
152 		if (unlikely(!pskb_may_pull(skb, 1))) {
153 			net_info_ratelimited("%s: NULL packet received from peer %u\n",
154 					     netdev_name(peer->ovpn->dev),
155 					     peer->id);
156 			goto drop;
157 		}
158 
159 		if (ovpn_is_keepalive(skb)) {
160 			net_dbg_ratelimited("%s: ping received from peer %u\n",
161 					    netdev_name(peer->ovpn->dev),
162 					    peer->id);
163 			/* we drop the packet, but this is not a failure */
164 			consume_skb(skb);
165 			goto drop_nocount;
166 		}
167 
168 		net_info_ratelimited("%s: unsupported protocol received from peer %u\n",
169 				     netdev_name(peer->ovpn->dev), peer->id);
170 		goto drop;
171 	}
172 	skb->protocol = proto;
173 
174 	/* perform Reverse Path Filtering (RPF) */
175 	if (unlikely(!ovpn_peer_check_by_src(peer->ovpn, skb, peer))) {
176 		if (skb->protocol == htons(ETH_P_IPV6))
177 			net_dbg_ratelimited("%s: RPF dropped packet from peer %u, src: %pI6c\n",
178 					    netdev_name(peer->ovpn->dev),
179 					    peer->id, &ipv6_hdr(skb)->saddr);
180 		else
181 			net_dbg_ratelimited("%s: RPF dropped packet from peer %u, src: %pI4\n",
182 					    netdev_name(peer->ovpn->dev),
183 					    peer->id, &ip_hdr(skb)->saddr);
184 		goto drop;
185 	}
186 
187 	ovpn_netdev_write(peer, skb);
188 	/* skb is passed to upper layer - don't free it */
189 	skb = NULL;
190 drop:
191 	if (unlikely(skb))
192 		dev_dstats_rx_dropped(peer->ovpn->dev);
193 	kfree_skb(skb);
194 drop_nocount:
195 	if (likely(peer))
196 		ovpn_peer_put(peer);
197 	if (likely(ks))
198 		ovpn_crypto_key_slot_put(ks);
199 }
200 
201 /* RX path entry point: decrypt packet and forward it to the device */
202 void ovpn_recv(struct ovpn_peer *peer, struct sk_buff *skb)
203 {
204 	struct ovpn_crypto_key_slot *ks;
205 	u8 key_id;
206 
207 	ovpn_peer_stats_increment_rx(&peer->link_stats, skb->len);
208 
209 	/* get the key slot matching the key ID in the received packet */
210 	key_id = ovpn_key_id_from_skb(skb);
211 	ks = ovpn_crypto_key_id_to_slot(&peer->crypto, key_id);
212 	if (unlikely(!ks)) {
213 		net_info_ratelimited("%s: no available key for peer %u, key-id: %u\n",
214 				     netdev_name(peer->ovpn->dev), peer->id,
215 				     key_id);
216 		dev_dstats_rx_dropped(peer->ovpn->dev);
217 		kfree_skb(skb);
218 		ovpn_peer_put(peer);
219 		return;
220 	}
221 
222 	memset(ovpn_skb_cb(skb), 0, sizeof(struct ovpn_cb));
223 	ovpn_decrypt_post(skb, ovpn_aead_decrypt(peer, ks, skb));
224 }
225 
226 void ovpn_encrypt_post(void *data, int ret)
227 {
228 	struct ovpn_crypto_key_slot *ks;
229 	struct sk_buff *skb = data;
230 	struct ovpn_socket *sock;
231 	struct ovpn_peer *peer;
232 	unsigned int orig_len;
233 
234 	/* encryption is happening asynchronously. This function will be
235 	 * called later by the crypto callback with a proper return value
236 	 */
237 	if (unlikely(ret == -EINPROGRESS))
238 		return;
239 
240 	ks = ovpn_skb_cb(skb)->ks;
241 	peer = ovpn_skb_cb(skb)->peer;
242 
243 	/* crypto is done, cleanup skb CB and its members */
244 	kfree(ovpn_skb_cb(skb)->iv);
245 	kfree(ovpn_skb_cb(skb)->sg);
246 	aead_request_free(ovpn_skb_cb(skb)->req);
247 
248 	if (unlikely(ret == -ERANGE)) {
249 		/* we ran out of IVs and we must kill the key as it can't be
250 		 * use anymore
251 		 */
252 		netdev_warn(peer->ovpn->dev,
253 			    "killing key %u for peer %u\n", ks->key_id,
254 			    peer->id);
255 		if (ovpn_crypto_kill_key(&peer->crypto, ks->key_id))
256 			/* let userspace know so that a new key must be negotiated */
257 			ovpn_nl_key_swap_notify(peer, ks->key_id);
258 
259 		goto err;
260 	}
261 
262 	if (unlikely(ret < 0))
263 		goto err;
264 
265 	skb_mark_not_on_list(skb);
266 	orig_len = skb->len;
267 
268 	rcu_read_lock();
269 	sock = rcu_dereference(peer->sock);
270 	if (unlikely(!sock))
271 		goto err_unlock;
272 
273 	switch (sock->sk->sk_protocol) {
274 	case IPPROTO_UDP:
275 		ovpn_udp_send_skb(peer, sock->sk, skb);
276 		break;
277 	case IPPROTO_TCP:
278 		ovpn_tcp_send_skb(peer, sock->sk, skb);
279 		break;
280 	default:
281 		/* no transport configured yet */
282 		goto err_unlock;
283 	}
284 
285 	ovpn_peer_stats_increment_tx(&peer->link_stats, orig_len);
286 	/* keep track of last sent packet for keepalive */
287 	WRITE_ONCE(peer->last_sent, ktime_get_real_seconds());
288 	/* skb passed down the stack - don't free it */
289 	skb = NULL;
290 err_unlock:
291 	rcu_read_unlock();
292 err:
293 	if (unlikely(skb))
294 		dev_dstats_tx_dropped(peer->ovpn->dev);
295 	if (likely(peer))
296 		ovpn_peer_put(peer);
297 	if (likely(ks))
298 		ovpn_crypto_key_slot_put(ks);
299 	kfree_skb(skb);
300 }
301 
302 static bool ovpn_encrypt_one(struct ovpn_peer *peer, struct sk_buff *skb)
303 {
304 	struct ovpn_crypto_key_slot *ks;
305 
306 	/* get primary key to be used for encrypting data */
307 	ks = ovpn_crypto_key_slot_primary(&peer->crypto);
308 	if (unlikely(!ks))
309 		return false;
310 
311 	/* take a reference to the peer because the crypto code may run async.
312 	 * ovpn_encrypt_post() will release it upon completion
313 	 */
314 	if (unlikely(!ovpn_peer_hold(peer))) {
315 		DEBUG_NET_WARN_ON_ONCE(1);
316 		ovpn_crypto_key_slot_put(ks);
317 		return false;
318 	}
319 
320 	memset(ovpn_skb_cb(skb), 0, sizeof(struct ovpn_cb));
321 	ovpn_encrypt_post(skb, ovpn_aead_encrypt(peer, ks, skb));
322 	return true;
323 }
324 
325 /* send skb to connected peer, if any */
326 static void ovpn_send(struct ovpn_priv *ovpn, struct sk_buff *skb,
327 		      struct ovpn_peer *peer)
328 {
329 	struct sk_buff *curr, *next;
330 
331 	/* this might be a GSO-segmented skb list: process each skb
332 	 * independently
333 	 */
334 	skb_list_walk_safe(skb, curr, next) {
335 		if (unlikely(!ovpn_encrypt_one(peer, curr))) {
336 			dev_dstats_tx_dropped(ovpn->dev);
337 			kfree_skb(curr);
338 		}
339 	}
340 
341 	ovpn_peer_put(peer);
342 }
343 
344 /* Send user data to the network
345  */
346 netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev)
347 {
348 	struct ovpn_priv *ovpn = netdev_priv(dev);
349 	struct sk_buff *segments, *curr, *next;
350 	struct sk_buff_head skb_list;
351 	struct ovpn_peer *peer;
352 	__be16 proto;
353 	int ret;
354 
355 	/* reset netfilter state */
356 	nf_reset_ct(skb);
357 
358 	/* verify IP header size in network packet */
359 	proto = ovpn_ip_check_protocol(skb);
360 	if (unlikely(!proto || skb->protocol != proto))
361 		goto drop;
362 
363 	if (skb_is_gso(skb)) {
364 		segments = skb_gso_segment(skb, 0);
365 		if (IS_ERR(segments)) {
366 			ret = PTR_ERR(segments);
367 			net_err_ratelimited("%s: cannot segment payload packet: %d\n",
368 					    netdev_name(dev), ret);
369 			goto drop;
370 		}
371 
372 		consume_skb(skb);
373 		skb = segments;
374 	}
375 
376 	/* from this moment on, "skb" might be a list */
377 
378 	__skb_queue_head_init(&skb_list);
379 	skb_list_walk_safe(skb, curr, next) {
380 		skb_mark_not_on_list(curr);
381 
382 		curr = skb_share_check(curr, GFP_ATOMIC);
383 		if (unlikely(!curr)) {
384 			net_err_ratelimited("%s: skb_share_check failed for payload packet\n",
385 					    netdev_name(dev));
386 			dev_dstats_tx_dropped(ovpn->dev);
387 			continue;
388 		}
389 
390 		__skb_queue_tail(&skb_list, curr);
391 	}
392 	skb_list.prev->next = NULL;
393 
394 	/* retrieve peer serving the destination IP of this packet */
395 	peer = ovpn_peer_get_by_dst(ovpn, skb);
396 	if (unlikely(!peer)) {
397 		switch (skb->protocol) {
398 		case htons(ETH_P_IP):
399 			net_dbg_ratelimited("%s: no peer to send data to dst=%pI4\n",
400 					    netdev_name(ovpn->dev),
401 					    &ip_hdr(skb)->daddr);
402 			break;
403 		case htons(ETH_P_IPV6):
404 			net_dbg_ratelimited("%s: no peer to send data to dst=%pI6c\n",
405 					    netdev_name(ovpn->dev),
406 					    &ipv6_hdr(skb)->daddr);
407 			break;
408 		}
409 		goto drop;
410 	}
411 	/* dst was needed for peer selection - it can now be dropped */
412 	skb_dst_drop(skb);
413 
414 	ovpn_peer_stats_increment_tx(&peer->vpn_stats, skb->len);
415 	ovpn_send(ovpn, skb_list.next, peer);
416 
417 	return NETDEV_TX_OK;
418 
419 drop:
420 	dev_dstats_tx_dropped(ovpn->dev);
421 	skb_tx_error(skb);
422 	kfree_skb_list(skb);
423 	return NETDEV_TX_OK;
424 }
425 
426 /**
427  * ovpn_xmit_special - encrypt and transmit an out-of-band message to peer
428  * @peer: peer to send the message to
429  * @data: message content
430  * @len: message length
431  *
432  * Assumes that caller holds a reference to peer, which will be
433  * passed to ovpn_send()
434  */
435 void ovpn_xmit_special(struct ovpn_peer *peer, const void *data,
436 		       const unsigned int len)
437 {
438 	struct ovpn_priv *ovpn;
439 	struct sk_buff *skb;
440 
441 	ovpn = peer->ovpn;
442 	if (unlikely(!ovpn)) {
443 		ovpn_peer_put(peer);
444 		return;
445 	}
446 
447 	skb = alloc_skb(256 + len, GFP_ATOMIC);
448 	if (unlikely(!skb)) {
449 		ovpn_peer_put(peer);
450 		return;
451 	}
452 
453 	skb_reserve(skb, 128);
454 	skb->priority = TC_PRIO_BESTEFFORT;
455 	__skb_put_data(skb, data, len);
456 
457 	ovpn_send(ovpn, skb, peer);
458 }
459