1 // SPDX-License-Identifier: GPL-2.0
2 /* OpenVPN data channel offload
3 *
4 * Copyright (C) 2019-2025 OpenVPN, Inc.
5 *
6 * Author: James Yonan <james@openvpn.net>
7 * Antonio Quartulli <antonio@openvpn.net>
8 */
9
10 #include <crypto/aead.h>
11 #include <linux/netdevice.h>
12 #include <linux/skbuff.h>
13 #include <net/gro_cells.h>
14 #include <net/gso.h>
15 #include <net/ip.h>
16
17 #include "ovpnpriv.h"
18 #include "peer.h"
19 #include "io.h"
20 #include "bind.h"
21 #include "crypto.h"
22 #include "crypto_aead.h"
23 #include "netlink.h"
24 #include "proto.h"
25 #include "tcp.h"
26 #include "udp.h"
27 #include "skb.h"
28 #include "socket.h"
29
30 const unsigned char ovpn_keepalive_message[OVPN_KEEPALIVE_SIZE] = {
31 0x2a, 0x18, 0x7b, 0xf3, 0x64, 0x1e, 0xb4, 0xcb,
32 0x07, 0xed, 0x2d, 0x0a, 0x98, 0x1f, 0xc7, 0x48
33 };
34
35 /**
36 * ovpn_is_keepalive - check if skb contains a keepalive message
37 * @skb: packet to check
38 *
39 * Assumes that the first byte of skb->data is defined.
40 *
41 * Return: true if skb contains a keepalive or false otherwise
42 */
ovpn_is_keepalive(struct sk_buff * skb)43 static bool ovpn_is_keepalive(struct sk_buff *skb)
44 {
45 if (*skb->data != ovpn_keepalive_message[0])
46 return false;
47
48 if (skb->len != OVPN_KEEPALIVE_SIZE)
49 return false;
50
51 if (!pskb_may_pull(skb, OVPN_KEEPALIVE_SIZE))
52 return false;
53
54 return !memcmp(skb->data, ovpn_keepalive_message, OVPN_KEEPALIVE_SIZE);
55 }
56
57 /* Called after decrypt to write the IP packet to the device.
58 * This method is expected to manage/free the skb.
59 */
ovpn_netdev_write(struct ovpn_peer * peer,struct sk_buff * skb)60 static void ovpn_netdev_write(struct ovpn_peer *peer, struct sk_buff *skb)
61 {
62 unsigned int pkt_len;
63 int ret;
64
65 /*
66 * GSO state from the transport layer is not valid for the tunnel/data
67 * path. Reset all GSO fields to prevent any further GSO processing
68 * from entering an inconsistent state.
69 */
70 skb_gso_reset(skb);
71
72 /* we can't guarantee the packet wasn't corrupted before entering the
73 * VPN, therefore we give other layers a chance to check that
74 */
75 skb->ip_summed = CHECKSUM_NONE;
76
77 /* skb hash for transport packet no longer valid after decapsulation */
78 skb_clear_hash(skb);
79
80 /* post-decrypt scrub -- prepare to inject encapsulated packet onto the
81 * interface, based on __skb_tunnel_rx() in dst.h
82 */
83 skb->dev = peer->ovpn->dev;
84 skb_set_queue_mapping(skb, 0);
85 skb_scrub_packet(skb, true);
86
87 /* network header reset in ovpn_decrypt_post() */
88 skb_reset_transport_header(skb);
89 skb_reset_inner_headers(skb);
90
91 /* cause packet to be "received" by the interface */
92 pkt_len = skb->len;
93 ret = gro_cells_receive(&peer->ovpn->gro_cells, skb);
94 if (likely(ret == NET_RX_SUCCESS)) {
95 /* update RX stats with the size of decrypted packet */
96 ovpn_peer_stats_increment_rx(&peer->vpn_stats, pkt_len);
97 dev_dstats_rx_add(peer->ovpn->dev, pkt_len);
98 }
99 }
100
ovpn_decrypt_post(void * data,int ret)101 void ovpn_decrypt_post(void *data, int ret)
102 {
103 struct ovpn_crypto_key_slot *ks;
104 unsigned int payload_offset = 0;
105 struct sk_buff *skb = data;
106 struct ovpn_socket *sock;
107 struct ovpn_peer *peer;
108 __be16 proto;
109 __be32 *pid;
110
111 /* crypto is happening asynchronously. this function will be called
112 * again later by the crypto callback with a proper return code
113 */
114 if (unlikely(ret == -EINPROGRESS))
115 return;
116
117 payload_offset = ovpn_skb_cb(skb)->payload_offset;
118 ks = ovpn_skb_cb(skb)->ks;
119 peer = ovpn_skb_cb(skb)->peer;
120
121 /* crypto is done, cleanup skb CB and its members */
122 kfree(ovpn_skb_cb(skb)->iv);
123 kfree(ovpn_skb_cb(skb)->sg);
124 aead_request_free(ovpn_skb_cb(skb)->req);
125
126 if (unlikely(ret < 0))
127 goto drop;
128
129 /* PID sits after the op */
130 pid = (__force __be32 *)(skb->data + OVPN_OPCODE_SIZE);
131 ret = ovpn_pktid_recv(&ks->pid_recv, ntohl(*pid), 0);
132 if (unlikely(ret < 0)) {
133 net_err_ratelimited("%s: PKT ID RX error for peer %u: %d\n",
134 netdev_name(peer->ovpn->dev), peer->id,
135 ret);
136 goto drop;
137 }
138
139 /* keep track of last received authenticated packet for keepalive */
140 WRITE_ONCE(peer->last_recv, ktime_get_real_seconds());
141
142 rcu_read_lock();
143 sock = rcu_dereference(peer->sock);
144 if (sock && sock->sk->sk_protocol == IPPROTO_UDP)
145 /* check if this peer changed local or remote endpoint */
146 ovpn_peer_endpoints_update(peer, skb);
147 rcu_read_unlock();
148
149 /* point to encapsulated IP packet */
150 __skb_pull(skb, payload_offset);
151
152 /* check if this is a valid datapacket that has to be delivered to the
153 * ovpn interface
154 */
155 skb_reset_network_header(skb);
156 proto = ovpn_ip_check_protocol(skb);
157 if (unlikely(!proto)) {
158 /* check if null packet */
159 if (unlikely(!pskb_may_pull(skb, 1))) {
160 net_info_ratelimited("%s: NULL packet received from peer %u\n",
161 netdev_name(peer->ovpn->dev),
162 peer->id);
163 goto drop;
164 }
165
166 if (ovpn_is_keepalive(skb)) {
167 net_dbg_ratelimited("%s: ping received from peer %u\n",
168 netdev_name(peer->ovpn->dev),
169 peer->id);
170 /* we drop the packet, but this is not a failure */
171 consume_skb(skb);
172 goto drop_nocount;
173 }
174
175 net_info_ratelimited("%s: unsupported protocol received from peer %u\n",
176 netdev_name(peer->ovpn->dev), peer->id);
177 goto drop;
178 }
179 skb->protocol = proto;
180
181 /* perform Reverse Path Filtering (RPF) */
182 if (unlikely(!ovpn_peer_check_by_src(peer->ovpn, skb, peer))) {
183 if (skb->protocol == htons(ETH_P_IPV6))
184 net_dbg_ratelimited("%s: RPF dropped packet from peer %u, src: %pI6c\n",
185 netdev_name(peer->ovpn->dev),
186 peer->id, &ipv6_hdr(skb)->saddr);
187 else
188 net_dbg_ratelimited("%s: RPF dropped packet from peer %u, src: %pI4\n",
189 netdev_name(peer->ovpn->dev),
190 peer->id, &ip_hdr(skb)->saddr);
191 goto drop;
192 }
193
194 ovpn_netdev_write(peer, skb);
195 /* skb is passed to upper layer - don't free it */
196 skb = NULL;
197 drop:
198 if (unlikely(skb))
199 dev_dstats_rx_dropped(peer->ovpn->dev);
200 kfree_skb(skb);
201 drop_nocount:
202 if (likely(peer))
203 ovpn_peer_put(peer);
204 if (likely(ks))
205 ovpn_crypto_key_slot_put(ks);
206 }
207
208 /* RX path entry point: decrypt packet and forward it to the device */
ovpn_recv(struct ovpn_peer * peer,struct sk_buff * skb)209 void ovpn_recv(struct ovpn_peer *peer, struct sk_buff *skb)
210 {
211 struct ovpn_crypto_key_slot *ks;
212 u8 key_id;
213
214 ovpn_peer_stats_increment_rx(&peer->link_stats, skb->len);
215
216 /* get the key slot matching the key ID in the received packet */
217 key_id = ovpn_key_id_from_skb(skb);
218 ks = ovpn_crypto_key_id_to_slot(&peer->crypto, key_id);
219 if (unlikely(!ks)) {
220 net_info_ratelimited("%s: no available key for peer %u, key-id: %u\n",
221 netdev_name(peer->ovpn->dev), peer->id,
222 key_id);
223 dev_dstats_rx_dropped(peer->ovpn->dev);
224 kfree_skb(skb);
225 ovpn_peer_put(peer);
226 return;
227 }
228
229 memset(ovpn_skb_cb(skb), 0, sizeof(struct ovpn_cb));
230 ovpn_decrypt_post(skb, ovpn_aead_decrypt(peer, ks, skb));
231 }
232
ovpn_encrypt_post(void * data,int ret)233 void ovpn_encrypt_post(void *data, int ret)
234 {
235 struct ovpn_crypto_key_slot *ks;
236 struct sk_buff *skb = data;
237 struct ovpn_socket *sock;
238 struct ovpn_peer *peer;
239 unsigned int orig_len;
240
241 /* encryption is happening asynchronously. This function will be
242 * called later by the crypto callback with a proper return value
243 */
244 if (unlikely(ret == -EINPROGRESS))
245 return;
246
247 ks = ovpn_skb_cb(skb)->ks;
248 peer = ovpn_skb_cb(skb)->peer;
249
250 /* crypto is done, cleanup skb CB and its members */
251 kfree(ovpn_skb_cb(skb)->iv);
252 kfree(ovpn_skb_cb(skb)->sg);
253 aead_request_free(ovpn_skb_cb(skb)->req);
254
255 if (unlikely(ret == -ERANGE)) {
256 /* we ran out of IVs and we must kill the key as it can't be
257 * use anymore
258 */
259 netdev_warn(peer->ovpn->dev,
260 "killing key %u for peer %u\n", ks->key_id,
261 peer->id);
262 if (ovpn_crypto_kill_key(&peer->crypto, ks->key_id))
263 /* let userspace know so that a new key must be negotiated */
264 ovpn_nl_key_swap_notify(peer, ks->key_id);
265
266 goto err;
267 }
268
269 if (unlikely(ret < 0))
270 goto err;
271
272 skb_mark_not_on_list(skb);
273 orig_len = skb->len;
274
275 rcu_read_lock();
276 sock = rcu_dereference(peer->sock);
277 if (unlikely(!sock))
278 goto err_unlock;
279
280 switch (sock->sk->sk_protocol) {
281 case IPPROTO_UDP:
282 ovpn_udp_send_skb(peer, sock->sk, skb);
283 break;
284 case IPPROTO_TCP:
285 ovpn_tcp_send_skb(peer, sock->sk, skb);
286 break;
287 default:
288 /* no transport configured yet */
289 goto err_unlock;
290 }
291
292 ovpn_peer_stats_increment_tx(&peer->link_stats, orig_len);
293 /* keep track of last sent packet for keepalive */
294 WRITE_ONCE(peer->last_sent, ktime_get_real_seconds());
295 /* skb passed down the stack - don't free it */
296 skb = NULL;
297 err_unlock:
298 rcu_read_unlock();
299 err:
300 if (unlikely(skb))
301 dev_dstats_tx_dropped(peer->ovpn->dev);
302 if (likely(peer))
303 ovpn_peer_put(peer);
304 if (likely(ks))
305 ovpn_crypto_key_slot_put(ks);
306 kfree_skb(skb);
307 }
308
ovpn_encrypt_one(struct ovpn_peer * peer,struct sk_buff * skb)309 static bool ovpn_encrypt_one(struct ovpn_peer *peer, struct sk_buff *skb)
310 {
311 struct ovpn_crypto_key_slot *ks;
312
313 /* get primary key to be used for encrypting data */
314 ks = ovpn_crypto_key_slot_primary(&peer->crypto);
315 if (unlikely(!ks))
316 return false;
317
318 /* take a reference to the peer because the crypto code may run async.
319 * ovpn_encrypt_post() will release it upon completion
320 */
321 if (unlikely(!ovpn_peer_hold(peer))) {
322 DEBUG_NET_WARN_ON_ONCE(1);
323 ovpn_crypto_key_slot_put(ks);
324 return false;
325 }
326
327 memset(ovpn_skb_cb(skb), 0, sizeof(struct ovpn_cb));
328 ovpn_encrypt_post(skb, ovpn_aead_encrypt(peer, ks, skb));
329 return true;
330 }
331
332 /* send skb to connected peer, if any */
ovpn_send(struct ovpn_priv * ovpn,struct sk_buff * skb,struct ovpn_peer * peer)333 static void ovpn_send(struct ovpn_priv *ovpn, struct sk_buff *skb,
334 struct ovpn_peer *peer)
335 {
336 struct sk_buff *curr, *next;
337
338 /* this might be a GSO-segmented skb list: process each skb
339 * independently
340 */
341 skb_list_walk_safe(skb, curr, next) {
342 if (unlikely(!ovpn_encrypt_one(peer, curr))) {
343 dev_dstats_tx_dropped(ovpn->dev);
344 kfree_skb(curr);
345 }
346 }
347
348 ovpn_peer_put(peer);
349 }
350
351 /* Send user data to the network
352 */
ovpn_net_xmit(struct sk_buff * skb,struct net_device * dev)353 netdev_tx_t ovpn_net_xmit(struct sk_buff *skb, struct net_device *dev)
354 {
355 struct ovpn_priv *ovpn = netdev_priv(dev);
356 struct sk_buff *segments, *curr, *next;
357 struct sk_buff_head skb_list;
358 struct ovpn_peer *peer;
359 __be16 proto;
360 int ret;
361
362 /* reset netfilter state */
363 nf_reset_ct(skb);
364
365 /* verify IP header size in network packet */
366 proto = ovpn_ip_check_protocol(skb);
367 if (unlikely(!proto || skb->protocol != proto))
368 goto drop;
369
370 if (skb_is_gso(skb)) {
371 segments = skb_gso_segment(skb, 0);
372 if (IS_ERR(segments)) {
373 ret = PTR_ERR(segments);
374 net_err_ratelimited("%s: cannot segment payload packet: %d\n",
375 netdev_name(dev), ret);
376 goto drop;
377 }
378
379 consume_skb(skb);
380 skb = segments;
381 }
382
383 /* from this moment on, "skb" might be a list */
384
385 __skb_queue_head_init(&skb_list);
386 skb_list_walk_safe(skb, curr, next) {
387 skb_mark_not_on_list(curr);
388
389 curr = skb_share_check(curr, GFP_ATOMIC);
390 if (unlikely(!curr)) {
391 net_err_ratelimited("%s: skb_share_check failed for payload packet\n",
392 netdev_name(dev));
393 dev_dstats_tx_dropped(ovpn->dev);
394 continue;
395 }
396
397 __skb_queue_tail(&skb_list, curr);
398 }
399 skb_list.prev->next = NULL;
400
401 /* retrieve peer serving the destination IP of this packet */
402 peer = ovpn_peer_get_by_dst(ovpn, skb);
403 if (unlikely(!peer)) {
404 switch (skb->protocol) {
405 case htons(ETH_P_IP):
406 net_dbg_ratelimited("%s: no peer to send data to dst=%pI4\n",
407 netdev_name(ovpn->dev),
408 &ip_hdr(skb)->daddr);
409 break;
410 case htons(ETH_P_IPV6):
411 net_dbg_ratelimited("%s: no peer to send data to dst=%pI6c\n",
412 netdev_name(ovpn->dev),
413 &ipv6_hdr(skb)->daddr);
414 break;
415 }
416 goto drop;
417 }
418 /* dst was needed for peer selection - it can now be dropped */
419 skb_dst_drop(skb);
420
421 ovpn_peer_stats_increment_tx(&peer->vpn_stats, skb->len);
422 ovpn_send(ovpn, skb_list.next, peer);
423
424 return NETDEV_TX_OK;
425
426 drop:
427 dev_dstats_tx_dropped(ovpn->dev);
428 skb_tx_error(skb);
429 kfree_skb_list(skb);
430 return NETDEV_TX_OK;
431 }
432
433 /**
434 * ovpn_xmit_special - encrypt and transmit an out-of-band message to peer
435 * @peer: peer to send the message to
436 * @data: message content
437 * @len: message length
438 *
439 * Assumes that caller holds a reference to peer, which will be
440 * passed to ovpn_send()
441 */
ovpn_xmit_special(struct ovpn_peer * peer,const void * data,const unsigned int len)442 void ovpn_xmit_special(struct ovpn_peer *peer, const void *data,
443 const unsigned int len)
444 {
445 struct ovpn_priv *ovpn;
446 struct sk_buff *skb;
447
448 ovpn = peer->ovpn;
449 if (unlikely(!ovpn)) {
450 ovpn_peer_put(peer);
451 return;
452 }
453
454 skb = alloc_skb(256 + len, GFP_ATOMIC);
455 if (unlikely(!skb)) {
456 ovpn_peer_put(peer);
457 return;
458 }
459
460 skb_reserve(skb, 128);
461 skb->priority = TC_PRIO_BESTEFFORT;
462 __skb_put_data(skb, data, len);
463
464 ovpn_send(ovpn, skb, peer);
465 }
466