1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __NET_UDP_TUNNEL_H
3 #define __NET_UDP_TUNNEL_H
4
5 #include <net/ip_tunnels.h>
6 #include <net/udp.h>
7
8 #if IS_ENABLED(CONFIG_IPV6)
9 #include <net/ipv6.h>
10 #include <net/ipv6_stubs.h>
11 #endif
12
13 struct udp_port_cfg {
14 u8 family;
15
16 /* Used only for kernel-created sockets */
17 union {
18 struct in_addr local_ip;
19 #if IS_ENABLED(CONFIG_IPV6)
20 struct in6_addr local_ip6;
21 #endif
22 };
23
24 union {
25 struct in_addr peer_ip;
26 #if IS_ENABLED(CONFIG_IPV6)
27 struct in6_addr peer_ip6;
28 #endif
29 };
30
31 __be16 local_udp_port;
32 __be16 peer_udp_port;
33 int bind_ifindex;
34 unsigned int use_udp_checksums:1,
35 use_udp6_tx_checksums:1,
36 use_udp6_rx_checksums:1,
37 ipv6_v6only:1;
38 };
39
40 int udp_sock_create4(struct net *net, struct udp_port_cfg *cfg,
41 struct socket **sockp);
42
43 #if IS_ENABLED(CONFIG_IPV6)
44 int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
45 struct socket **sockp);
46 #else
udp_sock_create6(struct net * net,struct udp_port_cfg * cfg,struct socket ** sockp)47 static inline int udp_sock_create6(struct net *net, struct udp_port_cfg *cfg,
48 struct socket **sockp)
49 {
50 return 0;
51 }
52 #endif
53
udp_sock_create(struct net * net,struct udp_port_cfg * cfg,struct socket ** sockp)54 static inline int udp_sock_create(struct net *net,
55 struct udp_port_cfg *cfg,
56 struct socket **sockp)
57 {
58 if (cfg->family == AF_INET)
59 return udp_sock_create4(net, cfg, sockp);
60
61 if (cfg->family == AF_INET6)
62 return udp_sock_create6(net, cfg, sockp);
63
64 return -EPFNOSUPPORT;
65 }
66
67 typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
68 typedef int (*udp_tunnel_encap_err_lookup_t)(struct sock *sk,
69 struct sk_buff *skb);
70 typedef void (*udp_tunnel_encap_err_rcv_t)(struct sock *sk,
71 struct sk_buff *skb, int err,
72 __be16 port, u32 info, u8 *payload);
73 typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
74 typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
75 struct list_head *head,
76 struct sk_buff *skb);
77 typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
78 int nhoff);
79
80 struct udp_tunnel_sock_cfg {
81 void *sk_user_data; /* user data used by encap_rcv call back */
82 /* Used for setting up udp_sock fields, see udp.h for details */
83 __u8 encap_type;
84 udp_tunnel_encap_rcv_t encap_rcv;
85 udp_tunnel_encap_err_lookup_t encap_err_lookup;
86 udp_tunnel_encap_err_rcv_t encap_err_rcv;
87 udp_tunnel_encap_destroy_t encap_destroy;
88 udp_tunnel_gro_receive_t gro_receive;
89 udp_tunnel_gro_complete_t gro_complete;
90 };
91
92 /* Setup the given (UDP) sock to receive UDP encapsulated packets */
93 void setup_udp_tunnel_sock(struct net *net, struct socket *sock,
94 struct udp_tunnel_sock_cfg *sock_cfg);
95
96 /* -- List of parsable UDP tunnel types --
97 *
98 * Adding to this list will result in serious debate. The main issue is
99 * that this list is essentially a list of workarounds for either poorly
100 * designed tunnels, or poorly designed device offloads.
101 *
102 * The parsing supported via these types should really be used for Rx
103 * traffic only as the network stack will have already inserted offsets for
104 * the location of the headers in the skb. In addition any ports that are
105 * pushed should be kept within the namespace without leaking to other
106 * devices such as VFs or other ports on the same device.
107 *
108 * It is strongly encouraged to use CHECKSUM_COMPLETE for Rx to avoid the
109 * need to use this for Rx checksum offload. It should not be necessary to
110 * call this function to perform Tx offloads on outgoing traffic.
111 */
112 enum udp_parsable_tunnel_type {
113 UDP_TUNNEL_TYPE_VXLAN = BIT(0), /* RFC 7348 */
114 UDP_TUNNEL_TYPE_GENEVE = BIT(1), /* draft-ietf-nvo3-geneve */
115 UDP_TUNNEL_TYPE_VXLAN_GPE = BIT(2), /* draft-ietf-nvo3-vxlan-gpe */
116 };
117
118 struct udp_tunnel_info {
119 unsigned short type;
120 sa_family_t sa_family;
121 __be16 port;
122 u8 hw_priv;
123 };
124
125 /* Notify network devices of offloadable types */
126 void udp_tunnel_push_rx_port(struct net_device *dev, struct socket *sock,
127 unsigned short type);
128 void udp_tunnel_drop_rx_port(struct net_device *dev, struct socket *sock,
129 unsigned short type);
130 void udp_tunnel_notify_add_rx_port(struct socket *sock, unsigned short type);
131 void udp_tunnel_notify_del_rx_port(struct socket *sock, unsigned short type);
132
133 /* Transmit the skb using UDP encapsulation. */
134 void udp_tunnel_xmit_skb(struct rtable *rt, struct sock *sk, struct sk_buff *skb,
135 __be32 src, __be32 dst, __u8 tos, __u8 ttl,
136 __be16 df, __be16 src_port, __be16 dst_port,
137 bool xnet, bool nocheck, u16 ipcb_flags);
138
139 void udp_tunnel6_xmit_skb(struct dst_entry *dst, struct sock *sk,
140 struct sk_buff *skb,
141 struct net_device *dev,
142 const struct in6_addr *saddr,
143 const struct in6_addr *daddr,
144 __u8 prio, __u8 ttl, __be32 label,
145 __be16 src_port, __be16 dst_port, bool nocheck,
146 u16 ip6cb_flags);
147
148 void udp_tunnel_sock_release(struct socket *sock);
149
150 struct rtable *udp_tunnel_dst_lookup(struct sk_buff *skb,
151 struct net_device *dev,
152 struct net *net, int oif,
153 __be32 *saddr,
154 const struct ip_tunnel_key *key,
155 __be16 sport, __be16 dport, u8 tos,
156 struct dst_cache *dst_cache);
157 struct dst_entry *udp_tunnel6_dst_lookup(struct sk_buff *skb,
158 struct net_device *dev,
159 struct net *net,
160 struct socket *sock, int oif,
161 struct in6_addr *saddr,
162 const struct ip_tunnel_key *key,
163 __be16 sport, __be16 dport, u8 dsfield,
164 struct dst_cache *dst_cache);
165
166 struct metadata_dst *udp_tun_rx_dst(struct sk_buff *skb, unsigned short family,
167 const unsigned long *flags,
168 __be64 tunnel_id, int md_size);
169
170 #ifdef CONFIG_INET
udp_tunnel_handle_offloads(struct sk_buff * skb,bool udp_csum)171 static inline int udp_tunnel_handle_offloads(struct sk_buff *skb, bool udp_csum)
172 {
173 int type = udp_csum ? SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
174
175 return iptunnel_handle_offloads(skb, type);
176 }
177 #endif
178
179 #if IS_ENABLED(CONFIG_NET_UDP_TUNNEL)
180 void udp_tunnel_update_gro_lookup(struct net *net, struct sock *sk, bool add);
181 void udp_tunnel_update_gro_rcv(struct sock *sk, bool add);
182 #else
udp_tunnel_update_gro_lookup(struct net * net,struct sock * sk,bool add)183 static inline void udp_tunnel_update_gro_lookup(struct net *net,
184 struct sock *sk, bool add) {}
udp_tunnel_update_gro_rcv(struct sock * sk,bool add)185 static inline void udp_tunnel_update_gro_rcv(struct sock *sk, bool add) {}
186 #endif
187
udp_tunnel_cleanup_gro(struct sock * sk)188 static inline void udp_tunnel_cleanup_gro(struct sock *sk)
189 {
190 udp_tunnel_update_gro_rcv(sk, false);
191 udp_tunnel_update_gro_lookup(sock_net(sk), sk, false);
192 }
193
udp_tunnel_encap_enable(struct sock * sk)194 static inline void udp_tunnel_encap_enable(struct sock *sk)
195 {
196 if (udp_test_and_set_bit(ENCAP_ENABLED, sk))
197 return;
198
199 #if IS_ENABLED(CONFIG_IPV6)
200 if (READ_ONCE(sk->sk_family) == PF_INET6)
201 ipv6_stub->udpv6_encap_enable();
202 #endif
203 udp_encap_enable();
204 }
205
206 #define UDP_TUNNEL_NIC_MAX_TABLES 4
207
208 enum udp_tunnel_nic_info_flags {
209 /* Device only supports offloads when it's open, all ports
210 * will be removed before close and re-added after open.
211 */
212 UDP_TUNNEL_NIC_INFO_OPEN_ONLY = BIT(0),
213 /* Device supports only IPv4 tunnels */
214 UDP_TUNNEL_NIC_INFO_IPV4_ONLY = BIT(1),
215 /* Device has hard-coded the IANA VXLAN port (4789) as VXLAN.
216 * This port must not be counted towards n_entries of any table.
217 * Driver will not receive any callback associated with port 4789.
218 */
219 UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN = BIT(2),
220 };
221
222 struct udp_tunnel_nic;
223
224 #define UDP_TUNNEL_NIC_MAX_SHARING_DEVICES (U16_MAX / 2)
225
226 struct udp_tunnel_nic_shared {
227 struct udp_tunnel_nic *udp_tunnel_nic_info;
228
229 struct list_head devices;
230 };
231
232 struct udp_tunnel_nic_shared_node {
233 struct net_device *dev;
234 struct list_head list;
235 };
236
237 /**
238 * struct udp_tunnel_nic_info - driver UDP tunnel offload information
239 * @set_port: callback for adding a new port
240 * @unset_port: callback for removing a port
241 * @sync_table: callback for syncing the entire port table at once
242 * @shared: reference to device global state (optional)
243 * @flags: device flags from enum udp_tunnel_nic_info_flags
244 * @tables: UDP port tables this device has
245 * @tables.n_entries: number of entries in this table
246 * @tables.tunnel_types: types of tunnels this table accepts
247 *
248 * Drivers are expected to provide either @set_port and @unset_port callbacks
249 * or the @sync_table callback. Callbacks are invoked with rtnl lock held.
250 *
251 * Devices which (misguidedly) share the UDP tunnel port table across multiple
252 * netdevs should allocate an instance of struct udp_tunnel_nic_shared and
253 * point @shared at it.
254 * There must never be more than %UDP_TUNNEL_NIC_MAX_SHARING_DEVICES devices
255 * sharing a table.
256 *
257 * Known limitations:
258 * - UDP tunnel port notifications are fundamentally best-effort -
259 * it is likely the driver will both see skbs which use a UDP tunnel port,
260 * while not being a tunneled skb, and tunnel skbs from other ports -
261 * drivers should only use these ports for non-critical RX-side offloads,
262 * e.g. the checksum offload;
263 * - none of the devices care about the socket family at present, so we don't
264 * track it. Please extend this code if you care.
265 */
266 struct udp_tunnel_nic_info {
267 /* one-by-one */
268 int (*set_port)(struct net_device *dev,
269 unsigned int table, unsigned int entry,
270 struct udp_tunnel_info *ti);
271 int (*unset_port)(struct net_device *dev,
272 unsigned int table, unsigned int entry,
273 struct udp_tunnel_info *ti);
274
275 /* all at once */
276 int (*sync_table)(struct net_device *dev, unsigned int table);
277
278 struct udp_tunnel_nic_shared *shared;
279
280 unsigned int flags;
281
282 struct udp_tunnel_nic_table_info {
283 unsigned int n_entries;
284 unsigned int tunnel_types;
285 } tables[UDP_TUNNEL_NIC_MAX_TABLES];
286 };
287
288 /* UDP tunnel module dependencies
289 *
290 * Tunnel drivers are expected to have a hard dependency on the udp_tunnel
291 * module. NIC drivers are not, they just attach their
292 * struct udp_tunnel_nic_info to the netdev and wait for callbacks to come.
293 * Loading a tunnel driver will cause the udp_tunnel module to be loaded
294 * and only then will all the required state structures be allocated.
295 * Since we want a weak dependency from the drivers and the core to udp_tunnel
296 * we call things through the following stubs.
297 */
298 struct udp_tunnel_nic_ops {
299 void (*get_port)(struct net_device *dev, unsigned int table,
300 unsigned int idx, struct udp_tunnel_info *ti);
301 void (*set_port_priv)(struct net_device *dev, unsigned int table,
302 unsigned int idx, u8 priv);
303 void (*add_port)(struct net_device *dev, struct udp_tunnel_info *ti);
304 void (*del_port)(struct net_device *dev, struct udp_tunnel_info *ti);
305 void (*reset_ntf)(struct net_device *dev);
306
307 size_t (*dump_size)(struct net_device *dev, unsigned int table);
308 int (*dump_write)(struct net_device *dev, unsigned int table,
309 struct sk_buff *skb);
310 void (*assert_locked)(struct net_device *dev);
311 void (*lock)(struct net_device *dev);
312 void (*unlock)(struct net_device *dev);
313 };
314
315 #ifdef CONFIG_INET
316 extern const struct udp_tunnel_nic_ops *udp_tunnel_nic_ops;
317 #else
318 #define udp_tunnel_nic_ops ((struct udp_tunnel_nic_ops *)NULL)
319 #endif
320
321 static inline void
udp_tunnel_nic_get_port(struct net_device * dev,unsigned int table,unsigned int idx,struct udp_tunnel_info * ti)322 udp_tunnel_nic_get_port(struct net_device *dev, unsigned int table,
323 unsigned int idx, struct udp_tunnel_info *ti)
324 {
325 /* This helper is used from .sync_table, we indicate empty entries
326 * by zero'ed @ti. Drivers which need to know the details of a port
327 * when it gets deleted should use the .set_port / .unset_port
328 * callbacks.
329 * Zero out here, otherwise !CONFIG_INET causes uninitilized warnings.
330 */
331 memset(ti, 0, sizeof(*ti));
332
333 if (udp_tunnel_nic_ops)
334 udp_tunnel_nic_ops->get_port(dev, table, idx, ti);
335 }
336
337 static inline void
udp_tunnel_nic_set_port_priv(struct net_device * dev,unsigned int table,unsigned int idx,u8 priv)338 udp_tunnel_nic_set_port_priv(struct net_device *dev, unsigned int table,
339 unsigned int idx, u8 priv)
340 {
341 if (udp_tunnel_nic_ops) {
342 udp_tunnel_nic_ops->assert_locked(dev);
343 udp_tunnel_nic_ops->set_port_priv(dev, table, idx, priv);
344 }
345 }
346
udp_tunnel_nic_assert_locked(struct net_device * dev)347 static inline void udp_tunnel_nic_assert_locked(struct net_device *dev)
348 {
349 if (udp_tunnel_nic_ops)
350 udp_tunnel_nic_ops->assert_locked(dev);
351 }
352
udp_tunnel_nic_lock(struct net_device * dev)353 static inline void udp_tunnel_nic_lock(struct net_device *dev)
354 {
355 if (udp_tunnel_nic_ops)
356 udp_tunnel_nic_ops->lock(dev);
357 }
358
udp_tunnel_nic_unlock(struct net_device * dev)359 static inline void udp_tunnel_nic_unlock(struct net_device *dev)
360 {
361 if (udp_tunnel_nic_ops)
362 udp_tunnel_nic_ops->unlock(dev);
363 }
364
365 static inline void
udp_tunnel_nic_add_port(struct net_device * dev,struct udp_tunnel_info * ti)366 udp_tunnel_nic_add_port(struct net_device *dev, struct udp_tunnel_info *ti)
367 {
368 if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
369 return;
370 if (udp_tunnel_nic_ops)
371 udp_tunnel_nic_ops->add_port(dev, ti);
372 }
373
374 static inline void
udp_tunnel_nic_del_port(struct net_device * dev,struct udp_tunnel_info * ti)375 udp_tunnel_nic_del_port(struct net_device *dev, struct udp_tunnel_info *ti)
376 {
377 if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
378 return;
379 if (udp_tunnel_nic_ops)
380 udp_tunnel_nic_ops->del_port(dev, ti);
381 }
382
383 /**
384 * udp_tunnel_nic_reset_ntf() - device-originating reset notification
385 * @dev: network interface device structure
386 *
387 * Called by the driver to inform the core that the entire UDP tunnel port
388 * state has been lost, usually due to device reset. Core will assume device
389 * forgot all the ports and issue .set_port and .sync_table callbacks as
390 * necessary.
391 *
392 * This function must be called with rtnl lock held, and will issue all
393 * the callbacks before returning.
394 */
udp_tunnel_nic_reset_ntf(struct net_device * dev)395 static inline void udp_tunnel_nic_reset_ntf(struct net_device *dev)
396 {
397 if (udp_tunnel_nic_ops)
398 udp_tunnel_nic_ops->reset_ntf(dev);
399 }
400
401 static inline size_t
udp_tunnel_nic_dump_size(struct net_device * dev,unsigned int table)402 udp_tunnel_nic_dump_size(struct net_device *dev, unsigned int table)
403 {
404 size_t ret;
405
406 if (!udp_tunnel_nic_ops)
407 return 0;
408
409 udp_tunnel_nic_ops->lock(dev);
410 ret = udp_tunnel_nic_ops->dump_size(dev, table);
411 udp_tunnel_nic_ops->unlock(dev);
412
413 return ret;
414 }
415
416 static inline int
udp_tunnel_nic_dump_write(struct net_device * dev,unsigned int table,struct sk_buff * skb)417 udp_tunnel_nic_dump_write(struct net_device *dev, unsigned int table,
418 struct sk_buff *skb)
419 {
420 int ret;
421
422 if (!udp_tunnel_nic_ops)
423 return 0;
424
425 udp_tunnel_nic_ops->lock(dev);
426 ret = udp_tunnel_nic_ops->dump_write(dev, table, skb);
427 udp_tunnel_nic_ops->unlock(dev);
428
429 return ret;
430 }
431
udp_tunnel_get_rx_info(struct net_device * dev)432 static inline void udp_tunnel_get_rx_info(struct net_device *dev)
433 {
434 ASSERT_RTNL();
435 if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
436 return;
437 udp_tunnel_nic_assert_locked(dev);
438 call_netdevice_notifiers(NETDEV_UDP_TUNNEL_PUSH_INFO, dev);
439 }
440
udp_tunnel_drop_rx_info(struct net_device * dev)441 static inline void udp_tunnel_drop_rx_info(struct net_device *dev)
442 {
443 ASSERT_RTNL();
444 if (!(dev->features & NETIF_F_RX_UDP_TUNNEL_PORT))
445 return;
446 udp_tunnel_nic_assert_locked(dev);
447 call_netdevice_notifiers(NETDEV_UDP_TUNNEL_DROP_INFO, dev);
448 }
449
450 #endif
451