1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _NET_XFRM_H
3 #define _NET_XFRM_H
4 
5 #include <linux/compiler.h>
6 #include <linux/xfrm.h>
7 #include <linux/spinlock.h>
8 #include <linux/list.h>
9 #include <linux/skbuff.h>
10 #include <linux/socket.h>
11 #include <linux/pfkeyv2.h>
12 #include <linux/ipsec.h>
13 #include <linux/in6.h>
14 #include <linux/mutex.h>
15 #include <linux/audit.h>
16 #include <linux/slab.h>
17 #include <linux/refcount.h>
18 #include <linux/sockptr.h>
19 
20 #include <net/sock.h>
21 #include <net/dst.h>
22 #include <net/inet_dscp.h>
23 #include <net/ip.h>
24 #include <net/route.h>
25 #include <net/ipv6.h>
26 #include <net/ip6_fib.h>
27 #include <net/flow.h>
28 #include <net/gro_cells.h>
29 
30 #include <linux/interrupt.h>
31 
32 #ifdef CONFIG_XFRM_STATISTICS
33 #include <net/snmp.h>
34 #endif
35 
36 #define XFRM_PROTO_ESP		50
37 #define XFRM_PROTO_AH		51
38 #define XFRM_PROTO_COMP		108
39 #define XFRM_PROTO_IPIP		4
40 #define XFRM_PROTO_IPV6		41
41 #define XFRM_PROTO_IPTFS	IPPROTO_AGGFRAG
42 #define XFRM_PROTO_ROUTING	IPPROTO_ROUTING
43 #define XFRM_PROTO_DSTOPTS	IPPROTO_DSTOPTS
44 
45 #define XFRM_ALIGN4(len)	(((len) + 3) & ~3)
46 #define XFRM_ALIGN8(len)	(((len) + 7) & ~7)
47 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
48 	MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
49 #define MODULE_ALIAS_XFRM_TYPE(family, proto) \
50 	MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
51 #define MODULE_ALIAS_XFRM_OFFLOAD_TYPE(family, proto) \
52 	MODULE_ALIAS("xfrm-offload-" __stringify(family) "-" __stringify(proto))
53 
54 #ifdef CONFIG_XFRM_STATISTICS
55 #define XFRM_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
56 #define XFRM_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.xfrm_statistics, field, val)
57 #else
58 #define XFRM_INC_STATS(net, field)	((void)(net))
59 #define XFRM_ADD_STATS(net, field, val) ((void)(net))
60 #endif
61 
62 
63 /* Organization of SPD aka "XFRM rules"
64    ------------------------------------
65 
66    Basic objects:
67    - policy rule, struct xfrm_policy (=SPD entry)
68    - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
69    - instance of a transformer, struct xfrm_state (=SA)
70    - template to clone xfrm_state, struct xfrm_tmpl
71 
72    SPD is organized as hash table (for policies that meet minimum address prefix
73    length setting, net->xfrm.policy_hthresh).  Other policies are stored in
74    lists, sorted into rbtree ordered by destination and source address networks.
75    See net/xfrm/xfrm_policy.c for details.
76 
77    (To be compatible with existing pfkeyv2 implementations,
78    many rules with priority of 0x7fffffff are allowed to exist and
79    such rules are ordered in an unpredictable way, thanks to bsd folks.)
80 
81    If "action" is "block", then we prohibit the flow, otherwise:
82    if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
83    policy entry has list of up to XFRM_MAX_DEPTH transformations,
84    described by templates xfrm_tmpl. Each template is resolved
85    to a complete xfrm_state (see below) and we pack bundle of transformations
86    to a dst_entry returned to requester.
87 
88    dst -. xfrm  .-> xfrm_state #1
89     |---. child .-> dst -. xfrm .-> xfrm_state #2
90                      |---. child .-> dst -. xfrm .-> xfrm_state #3
91                                       |---. child .-> NULL
92 
93 
94    Resolution of xrfm_tmpl
95    -----------------------
96    Template contains:
97    1. ->mode		Mode: transport or tunnel
98    2. ->id.proto	Protocol: AH/ESP/IPCOMP
99    3. ->id.daddr	Remote tunnel endpoint, ignored for transport mode.
100       Q: allow to resolve security gateway?
101    4. ->id.spi          If not zero, static SPI.
102    5. ->saddr		Local tunnel endpoint, ignored for transport mode.
103    6. ->algos		List of allowed algos. Plain bitmask now.
104       Q: ealgos, aalgos, calgos. What a mess...
105    7. ->share		Sharing mode.
106       Q: how to implement private sharing mode? To add struct sock* to
107       flow id?
108 
109    Having this template we search through SAD searching for entries
110    with appropriate mode/proto/algo, permitted by selector.
111    If no appropriate entry found, it is requested from key manager.
112 
113    PROBLEMS:
114    Q: How to find all the bundles referring to a physical path for
115       PMTU discovery? Seems, dst should contain list of all parents...
116       and enter to infinite locking hierarchy disaster.
117       No! It is easier, we will not search for them, let them find us.
118       We add genid to each dst plus pointer to genid of raw IP route,
119       pmtu disc will update pmtu on raw IP route and increase its genid.
120       dst_check() will see this for top level and trigger resyncing
121       metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
122  */
123 
124 struct xfrm_state_walk {
125 	struct list_head	all;
126 	u8			state;
127 	u8			dying;
128 	u8			proto;
129 	u32			seq;
130 	struct xfrm_address_filter *filter;
131 };
132 
133 enum {
134 	XFRM_DEV_OFFLOAD_IN = 1,
135 	XFRM_DEV_OFFLOAD_OUT,
136 	XFRM_DEV_OFFLOAD_FWD,
137 };
138 
139 enum {
140 	XFRM_DEV_OFFLOAD_UNSPECIFIED,
141 	XFRM_DEV_OFFLOAD_CRYPTO,
142 	XFRM_DEV_OFFLOAD_PACKET,
143 };
144 
145 enum {
146 	XFRM_DEV_OFFLOAD_FLAG_ACQ = 1,
147 };
148 
149 struct xfrm_dev_offload {
150 	struct net_device	*dev;
151 	netdevice_tracker	dev_tracker;
152 	struct net_device	*real_dev;
153 	unsigned long		offload_handle;
154 	u8			dir : 2;
155 	u8			type : 2;
156 	u8			flags : 2;
157 };
158 
159 struct xfrm_mode {
160 	u8 encap;
161 	u8 family;
162 	u8 flags;
163 };
164 
165 /* Flags for xfrm_mode. */
166 enum {
167 	XFRM_MODE_FLAG_TUNNEL = 1,
168 };
169 
170 enum xfrm_replay_mode {
171 	XFRM_REPLAY_MODE_LEGACY,
172 	XFRM_REPLAY_MODE_BMP,
173 	XFRM_REPLAY_MODE_ESN,
174 };
175 
176 /* Full description of state of transformer. */
177 struct xfrm_state {
178 	possible_net_t		xs_net;
179 	union {
180 		struct hlist_node	gclist;
181 		struct hlist_node	bydst;
182 	};
183 	union {
184 		struct hlist_node	dev_gclist;
185 		struct hlist_node	bysrc;
186 	};
187 	struct hlist_node	byspi;
188 	struct hlist_node	byseq;
189 	struct hlist_node	state_cache;
190 	struct hlist_node	state_cache_input;
191 
192 	refcount_t		refcnt;
193 	spinlock_t		lock;
194 
195 	u32			pcpu_num;
196 	struct xfrm_id		id;
197 	struct xfrm_selector	sel;
198 	struct xfrm_mark	mark;
199 	u32			if_id;
200 	u32			tfcpad;
201 
202 	u32			genid;
203 
204 	/* Key manager bits */
205 	struct xfrm_state_walk	km;
206 
207 	/* Parameters of this state. */
208 	struct {
209 		u32		reqid;
210 		u8		mode;
211 		u8		replay_window;
212 		u8		aalgo, ealgo, calgo;
213 		u8		flags;
214 		u16		family;
215 		xfrm_address_t	saddr;
216 		int		header_len;
217 		int		enc_hdr_len;
218 		int		trailer_len;
219 		u32		extra_flags;
220 		struct xfrm_mark	smark;
221 	} props;
222 
223 	struct xfrm_lifetime_cfg lft;
224 
225 	/* Data for transformer */
226 	struct xfrm_algo_auth	*aalg;
227 	struct xfrm_algo	*ealg;
228 	struct xfrm_algo	*calg;
229 	struct xfrm_algo_aead	*aead;
230 	const char		*geniv;
231 
232 	/* mapping change rate limiting */
233 	__be16 new_mapping_sport;
234 	u32 new_mapping;	/* seconds */
235 	u32 mapping_maxage;	/* seconds for input SA */
236 
237 	/* Data for encapsulator */
238 	struct xfrm_encap_tmpl	*encap;
239 
240 	/* NAT keepalive */
241 	u32			nat_keepalive_interval; /* seconds */
242 	time64_t		nat_keepalive_expiration;
243 
244 	/* Data for care-of address */
245 	xfrm_address_t	*coaddr;
246 
247 	/* IPComp needs an IPIP tunnel for handling uncompressed packets */
248 	struct xfrm_state	*tunnel;
249 
250 	/* If a tunnel, number of users + 1 */
251 	atomic_t		tunnel_users;
252 
253 	/* State for replay detection */
254 	struct xfrm_replay_state replay;
255 	struct xfrm_replay_state_esn *replay_esn;
256 
257 	/* Replay detection state at the time we sent the last notification */
258 	struct xfrm_replay_state preplay;
259 	struct xfrm_replay_state_esn *preplay_esn;
260 
261 	/* replay detection mode */
262 	enum xfrm_replay_mode    repl_mode;
263 	/* internal flag that only holds state for delayed aevent at the
264 	 * moment
265 	*/
266 	u32			xflags;
267 
268 	/* Replay detection notification settings */
269 	u32			replay_maxage;
270 	u32			replay_maxdiff;
271 
272 	/* Replay detection notification timer */
273 	struct timer_list	rtimer;
274 
275 	/* Statistics */
276 	struct xfrm_stats	stats;
277 
278 	struct xfrm_lifetime_cur curlft;
279 	struct hrtimer		mtimer;
280 
281 	struct xfrm_dev_offload xso;
282 
283 	/* used to fix curlft->add_time when changing date */
284 	long		saved_tmo;
285 
286 	/* Last used time */
287 	time64_t		lastused;
288 
289 	struct page_frag xfrag;
290 
291 	/* Reference to data common to all the instances of this
292 	 * transformer. */
293 	const struct xfrm_type	*type;
294 	struct xfrm_mode	inner_mode;
295 	struct xfrm_mode	inner_mode_iaf;
296 	struct xfrm_mode	outer_mode;
297 
298 	const struct xfrm_type_offload	*type_offload;
299 
300 	/* Security context */
301 	struct xfrm_sec_ctx	*security;
302 
303 	/* Private data of this transformer, format is opaque,
304 	 * interpreted by xfrm_type methods. */
305 	void			*data;
306 	u8			dir;
307 
308 	const struct xfrm_mode_cbs	*mode_cbs;
309 	void				*mode_data;
310 };
311 
xs_net(struct xfrm_state * x)312 static inline struct net *xs_net(struct xfrm_state *x)
313 {
314 	return read_pnet(&x->xs_net);
315 }
316 
317 /* xflags - make enum if more show up */
318 #define XFRM_TIME_DEFER	1
319 #define XFRM_SOFT_EXPIRE 2
320 
321 enum {
322 	XFRM_STATE_VOID,
323 	XFRM_STATE_ACQ,
324 	XFRM_STATE_VALID,
325 	XFRM_STATE_ERROR,
326 	XFRM_STATE_EXPIRED,
327 	XFRM_STATE_DEAD
328 };
329 
330 /* callback structure passed from either netlink or pfkey */
331 struct km_event {
332 	union {
333 		u32 hard;
334 		u32 proto;
335 		u32 byid;
336 		u32 aevent;
337 		u32 type;
338 	} data;
339 
340 	u32	seq;
341 	u32	portid;
342 	u32	event;
343 	struct net *net;
344 };
345 
346 struct xfrm_if_decode_session_result {
347 	struct net *net;
348 	u32 if_id;
349 };
350 
351 struct xfrm_if_cb {
352 	bool (*decode_session)(struct sk_buff *skb,
353 			       unsigned short family,
354 			       struct xfrm_if_decode_session_result *res);
355 };
356 
357 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
358 void xfrm_if_unregister_cb(void);
359 
360 struct xfrm_dst_lookup_params {
361 	struct net *net;
362 	dscp_t dscp;
363 	int oif;
364 	xfrm_address_t *saddr;
365 	xfrm_address_t *daddr;
366 	u32 mark;
367 	__u8 ipproto;
368 	union flowi_uli uli;
369 };
370 
371 struct net_device;
372 struct xfrm_type;
373 struct xfrm_dst;
374 struct xfrm_policy_afinfo {
375 	struct dst_ops		*dst_ops;
376 	struct dst_entry	*(*dst_lookup)(const struct xfrm_dst_lookup_params *params);
377 	int			(*get_saddr)(xfrm_address_t *saddr,
378 					     const struct xfrm_dst_lookup_params *params);
379 	int			(*fill_dst)(struct xfrm_dst *xdst,
380 					    struct net_device *dev,
381 					    const struct flowi *fl);
382 	struct dst_entry	*(*blackhole_route)(struct net *net, struct dst_entry *orig);
383 };
384 
385 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family);
386 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
387 void km_policy_notify(struct xfrm_policy *xp, int dir,
388 		      const struct km_event *c);
389 void km_state_notify(struct xfrm_state *x, const struct km_event *c);
390 
391 struct xfrm_tmpl;
392 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t,
393 	     struct xfrm_policy *pol);
394 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
395 int __xfrm_state_delete(struct xfrm_state *x);
396 
397 struct xfrm_state_afinfo {
398 	u8				family;
399 	u8				proto;
400 
401 	const struct xfrm_type_offload *type_offload_esp;
402 
403 	const struct xfrm_type		*type_esp;
404 	const struct xfrm_type		*type_ipip;
405 	const struct xfrm_type		*type_ipip6;
406 	const struct xfrm_type		*type_comp;
407 	const struct xfrm_type		*type_ah;
408 	const struct xfrm_type		*type_routing;
409 	const struct xfrm_type		*type_dstopts;
410 
411 	int			(*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
412 	int			(*transport_finish)(struct sk_buff *skb,
413 						    int async);
414 	void			(*local_error)(struct sk_buff *skb, u32 mtu);
415 };
416 
417 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
418 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
419 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
420 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family);
421 
422 struct xfrm_input_afinfo {
423 	u8			family;
424 	bool			is_ipip;
425 	int			(*callback)(struct sk_buff *skb, u8 protocol,
426 					    int err);
427 };
428 
429 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo);
430 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo);
431 
432 void xfrm_flush_gc(void);
433 void xfrm_state_delete_tunnel(struct xfrm_state *x);
434 
435 struct xfrm_type {
436 	struct module		*owner;
437 	u8			proto;
438 	u8			flags;
439 #define XFRM_TYPE_NON_FRAGMENT	1
440 #define XFRM_TYPE_REPLAY_PROT	2
441 #define XFRM_TYPE_LOCAL_COADDR	4
442 #define XFRM_TYPE_REMOTE_COADDR	8
443 
444 	int			(*init_state)(struct xfrm_state *x,
445 					      struct netlink_ext_ack *extack);
446 	void			(*destructor)(struct xfrm_state *);
447 	int			(*input)(struct xfrm_state *, struct sk_buff *skb);
448 	int			(*output)(struct xfrm_state *, struct sk_buff *pskb);
449 	int			(*reject)(struct xfrm_state *, struct sk_buff *,
450 					  const struct flowi *);
451 };
452 
453 int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
454 void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
455 
456 struct xfrm_type_offload {
457 	struct module	*owner;
458 	u8		proto;
459 	void		(*encap)(struct xfrm_state *, struct sk_buff *pskb);
460 	int		(*input_tail)(struct xfrm_state *x, struct sk_buff *skb);
461 	int		(*xmit)(struct xfrm_state *, struct sk_buff *pskb, netdev_features_t features);
462 };
463 
464 int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family);
465 void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family);
466 void xfrm_set_type_offload(struct xfrm_state *x);
xfrm_unset_type_offload(struct xfrm_state * x)467 static inline void xfrm_unset_type_offload(struct xfrm_state *x)
468 {
469 	if (!x->type_offload)
470 		return;
471 
472 	module_put(x->type_offload->owner);
473 	x->type_offload = NULL;
474 }
475 
476 /**
477  * struct xfrm_mode_cbs - XFRM mode callbacks
478  * @owner: module owner or NULL
479  * @init_state: Add/init mode specific state in `xfrm_state *x`
480  * @clone_state: Copy mode specific values from `orig` to new state `x`
481  * @destroy_state: Cleanup mode specific state from `xfrm_state *x`
482  * @user_init: Process mode specific netlink attributes from user
483  * @copy_to_user: Add netlink attributes to `attrs` based on state in `x`
484  * @sa_len: Return space required to store mode specific netlink attributes
485  * @get_inner_mtu: Return avail payload space after removing encap overhead
486  * @input: Process received packet from SA using mode
487  * @output: Output given packet using mode
488  * @prepare_output: Add mode specific encapsulation to packet in skb. On return
489  *	`transport_header` should point at ESP header, `network_header` should
490  *	point at outer IP header and `mac_header` should opint at the
491  *	protocol/nexthdr field of the outer IP.
492  *
493  * One should examine and understand the specific uses of these callbacks in
494  * xfrm for further detail on how and when these functions are called. RTSL.
495  */
496 struct xfrm_mode_cbs {
497 	struct module	*owner;
498 	int	(*init_state)(struct xfrm_state *x);
499 	int	(*clone_state)(struct xfrm_state *x, struct xfrm_state *orig);
500 	void	(*destroy_state)(struct xfrm_state *x);
501 	int	(*user_init)(struct net *net, struct xfrm_state *x,
502 			     struct nlattr **attrs,
503 			     struct netlink_ext_ack *extack);
504 	int	(*copy_to_user)(struct xfrm_state *x, struct sk_buff *skb);
505 	unsigned int (*sa_len)(const struct xfrm_state *x);
506 	u32	(*get_inner_mtu)(struct xfrm_state *x, int outer_mtu);
507 	int	(*input)(struct xfrm_state *x, struct sk_buff *skb);
508 	int	(*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
509 	int	(*prepare_output)(struct xfrm_state *x, struct sk_buff *skb);
510 };
511 
512 int xfrm_register_mode_cbs(u8 mode, const struct xfrm_mode_cbs *mode_cbs);
513 void xfrm_unregister_mode_cbs(u8 mode);
514 
xfrm_af2proto(unsigned int family)515 static inline int xfrm_af2proto(unsigned int family)
516 {
517 	switch(family) {
518 	case AF_INET:
519 		return IPPROTO_IPIP;
520 	case AF_INET6:
521 		return IPPROTO_IPV6;
522 	default:
523 		return 0;
524 	}
525 }
526 
xfrm_ip2inner_mode(struct xfrm_state * x,int ipproto)527 static inline const struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
528 {
529 	if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
530 	    (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6))
531 		return &x->inner_mode;
532 	else
533 		return &x->inner_mode_iaf;
534 }
535 
536 struct xfrm_tmpl {
537 /* id in template is interpreted as:
538  * daddr - destination of tunnel, may be zero for transport mode.
539  * spi   - zero to acquire spi. Not zero if spi is static, then
540  *	   daddr must be fixed too.
541  * proto - AH/ESP/IPCOMP
542  */
543 	struct xfrm_id		id;
544 
545 /* Source address of tunnel. Ignored, if it is not a tunnel. */
546 	xfrm_address_t		saddr;
547 
548 	unsigned short		encap_family;
549 
550 	u32			reqid;
551 
552 /* Mode: transport, tunnel etc. */
553 	u8			mode;
554 
555 /* Sharing mode: unique, this session only, this user only etc. */
556 	u8			share;
557 
558 /* May skip this transfomration if no SA is found */
559 	u8			optional;
560 
561 /* Skip aalgos/ealgos/calgos checks. */
562 	u8			allalgs;
563 
564 /* Bit mask of algos allowed for acquisition */
565 	u32			aalgos;
566 	u32			ealgos;
567 	u32			calgos;
568 };
569 
570 #define XFRM_MAX_DEPTH		6
571 #define XFRM_MAX_OFFLOAD_DEPTH	1
572 
573 struct xfrm_policy_walk_entry {
574 	struct list_head	all;
575 	u8			dead;
576 };
577 
578 struct xfrm_policy_walk {
579 	struct xfrm_policy_walk_entry walk;
580 	u8 type;
581 	u32 seq;
582 };
583 
584 struct xfrm_policy_queue {
585 	struct sk_buff_head	hold_queue;
586 	struct timer_list	hold_timer;
587 	unsigned long		timeout;
588 };
589 
590 /**
591  *	struct xfrm_policy - xfrm policy
592  *	@xp_net: network namespace the policy lives in
593  *	@bydst: hlist node for SPD hash table or rbtree list
594  *	@byidx: hlist node for index hash table
595  *	@state_cache_list: hlist head for policy cached xfrm states
596  *	@lock: serialize changes to policy structure members
597  *	@refcnt: reference count, freed once it reaches 0
598  *	@pos: kernel internal tie-breaker to determine age of policy
599  *	@timer: timer
600  *	@genid: generation, used to invalidate old policies
601  *	@priority: priority, set by userspace
602  *	@index:  policy index (autogenerated)
603  *	@if_id: virtual xfrm interface id
604  *	@mark: packet mark
605  *	@selector: selector
606  *	@lft: liftime configuration data
607  *	@curlft: liftime state
608  *	@walk: list head on pernet policy list
609  *	@polq: queue to hold packets while aqcuire operaion in progress
610  *	@bydst_reinsert: policy tree node needs to be merged
611  *	@type: XFRM_POLICY_TYPE_MAIN or _SUB
612  *	@action: XFRM_POLICY_ALLOW or _BLOCK
613  *	@flags: XFRM_POLICY_LOCALOK, XFRM_POLICY_ICMP
614  *	@xfrm_nr: number of used templates in @xfrm_vec
615  *	@family: protocol family
616  *	@security: SELinux security label
617  *	@xfrm_vec: array of templates to resolve state
618  *	@rcu: rcu head, used to defer memory release
619  *	@xdo: hardware offload state
620  */
621 struct xfrm_policy {
622 	possible_net_t		xp_net;
623 	struct hlist_node	bydst;
624 	struct hlist_node	byidx;
625 
626 	struct hlist_head	state_cache_list;
627 
628 	/* This lock only affects elements except for entry. */
629 	rwlock_t		lock;
630 	refcount_t		refcnt;
631 	u32			pos;
632 	struct timer_list	timer;
633 
634 	atomic_t		genid;
635 	u32			priority;
636 	u32			index;
637 	u32			if_id;
638 	struct xfrm_mark	mark;
639 	struct xfrm_selector	selector;
640 	struct xfrm_lifetime_cfg lft;
641 	struct xfrm_lifetime_cur curlft;
642 	struct xfrm_policy_walk_entry walk;
643 	struct xfrm_policy_queue polq;
644 	bool                    bydst_reinsert;
645 	u8			type;
646 	u8			action;
647 	u8			flags;
648 	u8			xfrm_nr;
649 	u16			family;
650 	struct xfrm_sec_ctx	*security;
651 	struct xfrm_tmpl       	xfrm_vec[XFRM_MAX_DEPTH];
652 	struct rcu_head		rcu;
653 
654 	struct xfrm_dev_offload xdo;
655 };
656 
xp_net(const struct xfrm_policy * xp)657 static inline struct net *xp_net(const struct xfrm_policy *xp)
658 {
659 	return read_pnet(&xp->xp_net);
660 }
661 
662 struct xfrm_kmaddress {
663 	xfrm_address_t          local;
664 	xfrm_address_t          remote;
665 	u32			reserved;
666 	u16			family;
667 };
668 
669 struct xfrm_migrate {
670 	xfrm_address_t		old_daddr;
671 	xfrm_address_t		old_saddr;
672 	xfrm_address_t		new_daddr;
673 	xfrm_address_t		new_saddr;
674 	u8			proto;
675 	u8			mode;
676 	u16			reserved;
677 	u32			reqid;
678 	u16			old_family;
679 	u16			new_family;
680 };
681 
682 #define XFRM_KM_TIMEOUT                30
683 /* what happened */
684 #define XFRM_REPLAY_UPDATE	XFRM_AE_CR
685 #define XFRM_REPLAY_TIMEOUT	XFRM_AE_CE
686 
687 /* default aevent timeout in units of 100ms */
688 #define XFRM_AE_ETIME			10
689 /* Async Event timer multiplier */
690 #define XFRM_AE_ETH_M			10
691 /* default seq threshold size */
692 #define XFRM_AE_SEQT_SIZE		2
693 
694 struct xfrm_mgr {
695 	struct list_head	list;
696 	int			(*notify)(struct xfrm_state *x, const struct km_event *c);
697 	int			(*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp);
698 	struct xfrm_policy	*(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
699 	int			(*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
700 	int			(*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
701 	int			(*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
702 	int			(*migrate)(const struct xfrm_selector *sel,
703 					   u8 dir, u8 type,
704 					   const struct xfrm_migrate *m,
705 					   int num_bundles,
706 					   const struct xfrm_kmaddress *k,
707 					   const struct xfrm_encap_tmpl *encap);
708 	bool			(*is_alive)(const struct km_event *c);
709 };
710 
711 void xfrm_register_km(struct xfrm_mgr *km);
712 void xfrm_unregister_km(struct xfrm_mgr *km);
713 
714 struct xfrm_tunnel_skb_cb {
715 	union {
716 		struct inet_skb_parm h4;
717 		struct inet6_skb_parm h6;
718 	} header;
719 
720 	union {
721 		struct ip_tunnel *ip4;
722 		struct ip6_tnl *ip6;
723 	} tunnel;
724 };
725 
726 #define XFRM_TUNNEL_SKB_CB(__skb) ((struct xfrm_tunnel_skb_cb *)&((__skb)->cb[0]))
727 
728 /*
729  * This structure is used for the duration where packets are being
730  * transformed by IPsec.  As soon as the packet leaves IPsec the
731  * area beyond the generic IP part may be overwritten.
732  */
733 struct xfrm_skb_cb {
734 	struct xfrm_tunnel_skb_cb header;
735 
736         /* Sequence number for replay protection. */
737 	union {
738 		struct {
739 			__u32 low;
740 			__u32 hi;
741 		} output;
742 		struct {
743 			__be32 low;
744 			__be32 hi;
745 		} input;
746 	} seq;
747 };
748 
749 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
750 
751 /*
752  * This structure is used by the afinfo prepare_input/prepare_output functions
753  * to transmit header information to the mode input/output functions.
754  */
755 struct xfrm_mode_skb_cb {
756 	struct xfrm_tunnel_skb_cb header;
757 
758 	/* Copied from header for IPv4, always set to zero and DF for IPv6. */
759 	__be16 id;
760 	__be16 frag_off;
761 
762 	/* IP header length (excluding options or extension headers). */
763 	u8 ihl;
764 
765 	/* TOS for IPv4, class for IPv6. */
766 	u8 tos;
767 
768 	/* TTL for IPv4, hop limitfor IPv6. */
769 	u8 ttl;
770 
771 	/* Protocol for IPv4, NH for IPv6. */
772 	u8 protocol;
773 
774 	/* Option length for IPv4, zero for IPv6. */
775 	u8 optlen;
776 
777 	/* Used by IPv6 only, zero for IPv4. */
778 	u8 flow_lbl[3];
779 };
780 
781 #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
782 
783 /*
784  * This structure is used by the input processing to locate the SPI and
785  * related information.
786  */
787 struct xfrm_spi_skb_cb {
788 	struct xfrm_tunnel_skb_cb header;
789 
790 	unsigned int daddroff;
791 	unsigned int family;
792 	__be32 seq;
793 };
794 
795 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
796 
797 #ifdef CONFIG_AUDITSYSCALL
xfrm_audit_start(const char * op)798 static inline struct audit_buffer *xfrm_audit_start(const char *op)
799 {
800 	struct audit_buffer *audit_buf = NULL;
801 
802 	if (audit_enabled == AUDIT_OFF)
803 		return NULL;
804 	audit_buf = audit_log_start(audit_context(), GFP_ATOMIC,
805 				    AUDIT_MAC_IPSEC_EVENT);
806 	if (audit_buf == NULL)
807 		return NULL;
808 	audit_log_format(audit_buf, "op=%s", op);
809 	return audit_buf;
810 }
811 
xfrm_audit_helper_usrinfo(bool task_valid,struct audit_buffer * audit_buf)812 static inline void xfrm_audit_helper_usrinfo(bool task_valid,
813 					     struct audit_buffer *audit_buf)
814 {
815 	const unsigned int auid = from_kuid(&init_user_ns, task_valid ?
816 					    audit_get_loginuid(current) :
817 					    INVALID_UID);
818 	const unsigned int ses = task_valid ? audit_get_sessionid(current) :
819 		AUDIT_SID_UNSET;
820 
821 	audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
822 	audit_log_task_context(audit_buf);
823 }
824 
825 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid);
826 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
827 			      bool task_valid);
828 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid);
829 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid);
830 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
831 				      struct sk_buff *skb);
832 void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb,
833 			     __be32 net_seq);
834 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
835 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi,
836 			       __be32 net_seq);
837 void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb,
838 			      u8 proto);
839 #else
840 
xfrm_audit_policy_add(struct xfrm_policy * xp,int result,bool task_valid)841 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
842 					 bool task_valid)
843 {
844 }
845 
xfrm_audit_policy_delete(struct xfrm_policy * xp,int result,bool task_valid)846 static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
847 					    bool task_valid)
848 {
849 }
850 
xfrm_audit_state_add(struct xfrm_state * x,int result,bool task_valid)851 static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
852 					bool task_valid)
853 {
854 }
855 
xfrm_audit_state_delete(struct xfrm_state * x,int result,bool task_valid)856 static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
857 					   bool task_valid)
858 {
859 }
860 
xfrm_audit_state_replay_overflow(struct xfrm_state * x,struct sk_buff * skb)861 static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
862 					     struct sk_buff *skb)
863 {
864 }
865 
xfrm_audit_state_replay(struct xfrm_state * x,struct sk_buff * skb,__be32 net_seq)866 static inline void xfrm_audit_state_replay(struct xfrm_state *x,
867 					   struct sk_buff *skb, __be32 net_seq)
868 {
869 }
870 
xfrm_audit_state_notfound_simple(struct sk_buff * skb,u16 family)871 static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
872 				      u16 family)
873 {
874 }
875 
xfrm_audit_state_notfound(struct sk_buff * skb,u16 family,__be32 net_spi,__be32 net_seq)876 static inline void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
877 				      __be32 net_spi, __be32 net_seq)
878 {
879 }
880 
xfrm_audit_state_icvfail(struct xfrm_state * x,struct sk_buff * skb,u8 proto)881 static inline void xfrm_audit_state_icvfail(struct xfrm_state *x,
882 				     struct sk_buff *skb, u8 proto)
883 {
884 }
885 #endif /* CONFIG_AUDITSYSCALL */
886 
xfrm_pol_hold(struct xfrm_policy * policy)887 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
888 {
889 	if (likely(policy != NULL))
890 		refcount_inc(&policy->refcnt);
891 }
892 
893 void xfrm_policy_destroy(struct xfrm_policy *policy);
894 
xfrm_pol_put(struct xfrm_policy * policy)895 static inline void xfrm_pol_put(struct xfrm_policy *policy)
896 {
897 	if (refcount_dec_and_test(&policy->refcnt))
898 		xfrm_policy_destroy(policy);
899 }
900 
xfrm_pols_put(struct xfrm_policy ** pols,int npols)901 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
902 {
903 	int i;
904 	for (i = npols - 1; i >= 0; --i)
905 		xfrm_pol_put(pols[i]);
906 }
907 
908 void __xfrm_state_destroy(struct xfrm_state *, bool);
909 
__xfrm_state_put(struct xfrm_state * x)910 static inline void __xfrm_state_put(struct xfrm_state *x)
911 {
912 	refcount_dec(&x->refcnt);
913 }
914 
xfrm_state_put(struct xfrm_state * x)915 static inline void xfrm_state_put(struct xfrm_state *x)
916 {
917 	if (refcount_dec_and_test(&x->refcnt))
918 		__xfrm_state_destroy(x, false);
919 }
920 
xfrm_state_put_sync(struct xfrm_state * x)921 static inline void xfrm_state_put_sync(struct xfrm_state *x)
922 {
923 	if (refcount_dec_and_test(&x->refcnt))
924 		__xfrm_state_destroy(x, true);
925 }
926 
xfrm_state_hold(struct xfrm_state * x)927 static inline void xfrm_state_hold(struct xfrm_state *x)
928 {
929 	refcount_inc(&x->refcnt);
930 }
931 
addr_match(const void * token1,const void * token2,unsigned int prefixlen)932 static inline bool addr_match(const void *token1, const void *token2,
933 			      unsigned int prefixlen)
934 {
935 	const __be32 *a1 = token1;
936 	const __be32 *a2 = token2;
937 	unsigned int pdw;
938 	unsigned int pbi;
939 
940 	pdw = prefixlen >> 5;	  /* num of whole u32 in prefix */
941 	pbi = prefixlen &  0x1f;  /* num of bits in incomplete u32 in prefix */
942 
943 	if (pdw)
944 		if (memcmp(a1, a2, pdw << 2))
945 			return false;
946 
947 	if (pbi) {
948 		__be32 mask;
949 
950 		mask = htonl((0xffffffff) << (32 - pbi));
951 
952 		if ((a1[pdw] ^ a2[pdw]) & mask)
953 			return false;
954 	}
955 
956 	return true;
957 }
958 
addr4_match(__be32 a1,__be32 a2,u8 prefixlen)959 static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
960 {
961 	/* C99 6.5.7 (3): u32 << 32 is undefined behaviour */
962 	if (sizeof(long) == 4 && prefixlen == 0)
963 		return true;
964 	return !((a1 ^ a2) & htonl(~0UL << (32 - prefixlen)));
965 }
966 
967 static __inline__
xfrm_flowi_sport(const struct flowi * fl,const union flowi_uli * uli)968 __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
969 {
970 	__be16 port;
971 	switch(fl->flowi_proto) {
972 	case IPPROTO_TCP:
973 	case IPPROTO_UDP:
974 	case IPPROTO_UDPLITE:
975 	case IPPROTO_SCTP:
976 		port = uli->ports.sport;
977 		break;
978 	case IPPROTO_ICMP:
979 	case IPPROTO_ICMPV6:
980 		port = htons(uli->icmpt.type);
981 		break;
982 	case IPPROTO_MH:
983 		port = htons(uli->mht.type);
984 		break;
985 	case IPPROTO_GRE:
986 		port = htons(ntohl(uli->gre_key) >> 16);
987 		break;
988 	default:
989 		port = 0;	/*XXX*/
990 	}
991 	return port;
992 }
993 
994 static __inline__
xfrm_flowi_dport(const struct flowi * fl,const union flowi_uli * uli)995 __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli)
996 {
997 	__be16 port;
998 	switch(fl->flowi_proto) {
999 	case IPPROTO_TCP:
1000 	case IPPROTO_UDP:
1001 	case IPPROTO_UDPLITE:
1002 	case IPPROTO_SCTP:
1003 		port = uli->ports.dport;
1004 		break;
1005 	case IPPROTO_ICMP:
1006 	case IPPROTO_ICMPV6:
1007 		port = htons(uli->icmpt.code);
1008 		break;
1009 	case IPPROTO_GRE:
1010 		port = htons(ntohl(uli->gre_key) & 0xffff);
1011 		break;
1012 	default:
1013 		port = 0;	/*XXX*/
1014 	}
1015 	return port;
1016 }
1017 
1018 bool xfrm_selector_match(const struct xfrm_selector *sel,
1019 			 const struct flowi *fl, unsigned short family);
1020 
1021 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1022 /*	If neither has a context --> match
1023  * 	Otherwise, both must have a context and the sids, doi, alg must match
1024  */
xfrm_sec_ctx_match(struct xfrm_sec_ctx * s1,struct xfrm_sec_ctx * s2)1025 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
1026 {
1027 	return ((!s1 && !s2) ||
1028 		(s1 && s2 &&
1029 		 (s1->ctx_sid == s2->ctx_sid) &&
1030 		 (s1->ctx_doi == s2->ctx_doi) &&
1031 		 (s1->ctx_alg == s2->ctx_alg)));
1032 }
1033 #else
xfrm_sec_ctx_match(struct xfrm_sec_ctx * s1,struct xfrm_sec_ctx * s2)1034 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
1035 {
1036 	return true;
1037 }
1038 #endif
1039 
1040 /* A struct encoding bundle of transformations to apply to some set of flow.
1041  *
1042  * xdst->child points to the next element of bundle.
1043  * dst->xfrm  points to an instanse of transformer.
1044  *
1045  * Due to unfortunate limitations of current routing cache, which we
1046  * have no time to fix, it mirrors struct rtable and bound to the same
1047  * routing key, including saddr,daddr. However, we can have many of
1048  * bundles differing by session id. All the bundles grow from a parent
1049  * policy rule.
1050  */
1051 struct xfrm_dst {
1052 	union {
1053 		struct dst_entry	dst;
1054 		struct rtable		rt;
1055 		struct rt6_info		rt6;
1056 	} u;
1057 	struct dst_entry *route;
1058 	struct dst_entry *child;
1059 	struct dst_entry *path;
1060 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1061 	int num_pols, num_xfrms;
1062 	u32 xfrm_genid;
1063 	u32 policy_genid;
1064 	u32 route_mtu_cached;
1065 	u32 child_mtu_cached;
1066 	u32 route_cookie;
1067 	u32 path_cookie;
1068 };
1069 
xfrm_dst_path(const struct dst_entry * dst)1070 static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
1071 {
1072 #ifdef CONFIG_XFRM
1073 	if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
1074 		const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
1075 
1076 		return xdst->path;
1077 	}
1078 #endif
1079 	return (struct dst_entry *) dst;
1080 }
1081 
xfrm_dst_child(const struct dst_entry * dst)1082 static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
1083 {
1084 #ifdef CONFIG_XFRM
1085 	if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
1086 		struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1087 		return xdst->child;
1088 	}
1089 #endif
1090 	return NULL;
1091 }
1092 
1093 #ifdef CONFIG_XFRM
xfrm_dst_set_child(struct xfrm_dst * xdst,struct dst_entry * child)1094 static inline void xfrm_dst_set_child(struct xfrm_dst *xdst, struct dst_entry *child)
1095 {
1096 	xdst->child = child;
1097 }
1098 
xfrm_dst_destroy(struct xfrm_dst * xdst)1099 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
1100 {
1101 	xfrm_pols_put(xdst->pols, xdst->num_pols);
1102 	dst_release(xdst->route);
1103 	if (likely(xdst->u.dst.xfrm))
1104 		xfrm_state_put(xdst->u.dst.xfrm);
1105 }
1106 #endif
1107 
1108 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
1109 
1110 struct xfrm_if_parms {
1111 	int link;		/* ifindex of underlying L2 interface */
1112 	u32 if_id;		/* interface identifier */
1113 	bool collect_md;
1114 };
1115 
1116 struct xfrm_if {
1117 	struct xfrm_if __rcu *next;	/* next interface in list */
1118 	struct net_device *dev;		/* virtual device associated with interface */
1119 	struct net *net;		/* netns for packet i/o */
1120 	struct xfrm_if_parms p;		/* interface parms */
1121 
1122 	struct gro_cells gro_cells;
1123 };
1124 
1125 struct xfrm_offload {
1126 	/* Output sequence number for replay protection on offloading. */
1127 	struct {
1128 		__u32 low;
1129 		__u32 hi;
1130 	} seq;
1131 
1132 	__u32			flags;
1133 #define	SA_DELETE_REQ		1
1134 #define	CRYPTO_DONE		2
1135 #define	CRYPTO_NEXT_DONE	4
1136 #define	CRYPTO_FALLBACK		8
1137 #define	XFRM_GSO_SEGMENT	16
1138 #define	XFRM_GRO		32
1139 /* 64 is free */
1140 #define	XFRM_DEV_RESUME		128
1141 #define	XFRM_XMIT		256
1142 
1143 	__u32			status;
1144 #define CRYPTO_SUCCESS				1
1145 #define CRYPTO_GENERIC_ERROR			2
1146 #define CRYPTO_TRANSPORT_AH_AUTH_FAILED		4
1147 #define CRYPTO_TRANSPORT_ESP_AUTH_FAILED	8
1148 #define CRYPTO_TUNNEL_AH_AUTH_FAILED		16
1149 #define CRYPTO_TUNNEL_ESP_AUTH_FAILED		32
1150 #define CRYPTO_INVALID_PACKET_SYNTAX		64
1151 #define CRYPTO_INVALID_PROTOCOL			128
1152 
1153 	/* Used to keep whole l2 header for transport mode GRO */
1154 	__u32			orig_mac_len;
1155 
1156 	__u8			proto;
1157 	__u8			inner_ipproto;
1158 };
1159 
1160 struct sec_path {
1161 	int			len;
1162 	int			olen;
1163 	int			verified_cnt;
1164 
1165 	struct xfrm_state	*xvec[XFRM_MAX_DEPTH];
1166 	struct xfrm_offload	ovec[XFRM_MAX_OFFLOAD_DEPTH];
1167 };
1168 
1169 struct sec_path *secpath_set(struct sk_buff *skb);
1170 
1171 static inline void
secpath_reset(struct sk_buff * skb)1172 secpath_reset(struct sk_buff *skb)
1173 {
1174 #ifdef CONFIG_XFRM
1175 	skb_ext_del(skb, SKB_EXT_SEC_PATH);
1176 #endif
1177 }
1178 
1179 static inline int
xfrm_addr_any(const xfrm_address_t * addr,unsigned short family)1180 xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
1181 {
1182 	switch (family) {
1183 	case AF_INET:
1184 		return addr->a4 == 0;
1185 	case AF_INET6:
1186 		return ipv6_addr_any(&addr->in6);
1187 	}
1188 	return 0;
1189 }
1190 
1191 static inline int
__xfrm4_state_addr_cmp(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x)1192 __xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1193 {
1194 	return	(tmpl->saddr.a4 &&
1195 		 tmpl->saddr.a4 != x->props.saddr.a4);
1196 }
1197 
1198 static inline int
__xfrm6_state_addr_cmp(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x)1199 __xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1200 {
1201 	return	(!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
1202 		 !ipv6_addr_equal((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
1203 }
1204 
1205 static inline int
xfrm_state_addr_cmp(const struct xfrm_tmpl * tmpl,const struct xfrm_state * x,unsigned short family)1206 xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family)
1207 {
1208 	switch (family) {
1209 	case AF_INET:
1210 		return __xfrm4_state_addr_cmp(tmpl, x);
1211 	case AF_INET6:
1212 		return __xfrm6_state_addr_cmp(tmpl, x);
1213 	}
1214 	return !0;
1215 }
1216 
1217 #ifdef CONFIG_XFRM
xfrm_input_state(struct sk_buff * skb)1218 static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
1219 {
1220 	struct sec_path *sp = skb_sec_path(skb);
1221 
1222 	return sp->xvec[sp->len - 1];
1223 }
1224 #endif
1225 
xfrm_offload(struct sk_buff * skb)1226 static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
1227 {
1228 #ifdef CONFIG_XFRM
1229 	struct sec_path *sp = skb_sec_path(skb);
1230 
1231 	if (!sp || !sp->olen || sp->len != sp->olen)
1232 		return NULL;
1233 
1234 	return &sp->ovec[sp->olen - 1];
1235 #else
1236 	return NULL;
1237 #endif
1238 }
1239 
1240 #ifdef CONFIG_XFRM
1241 int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
1242 			unsigned short family);
1243 
__xfrm_check_nopolicy(struct net * net,struct sk_buff * skb,int dir)1244 static inline bool __xfrm_check_nopolicy(struct net *net, struct sk_buff *skb,
1245 					 int dir)
1246 {
1247 	if (!net->xfrm.policy_count[dir] && !secpath_exists(skb))
1248 		return net->xfrm.policy_default[dir] == XFRM_USERPOLICY_ACCEPT;
1249 
1250 	return false;
1251 }
1252 
__xfrm_check_dev_nopolicy(struct sk_buff * skb,int dir,unsigned short family)1253 static inline bool __xfrm_check_dev_nopolicy(struct sk_buff *skb,
1254 					     int dir, unsigned short family)
1255 {
1256 	if (dir != XFRM_POLICY_OUT && family == AF_INET) {
1257 		/* same dst may be used for traffic originating from
1258 		 * devices with different policy settings.
1259 		 */
1260 		return IPCB(skb)->flags & IPSKB_NOPOLICY;
1261 	}
1262 	return skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY);
1263 }
1264 
__xfrm_policy_check2(struct sock * sk,int dir,struct sk_buff * skb,unsigned int family,int reverse)1265 static inline int __xfrm_policy_check2(struct sock *sk, int dir,
1266 				       struct sk_buff *skb,
1267 				       unsigned int family, int reverse)
1268 {
1269 	struct net *net = dev_net(skb->dev);
1270 	int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
1271 	struct xfrm_offload *xo = xfrm_offload(skb);
1272 	struct xfrm_state *x;
1273 
1274 	if (sk && sk->sk_policy[XFRM_POLICY_IN])
1275 		return __xfrm_policy_check(sk, ndir, skb, family);
1276 
1277 	if (xo) {
1278 		x = xfrm_input_state(skb);
1279 		if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
1280 			bool check = (xo->flags & CRYPTO_DONE) &&
1281 				     (xo->status & CRYPTO_SUCCESS);
1282 
1283 			/* The packets here are plain ones and secpath was
1284 			 * needed to indicate that hardware already handled
1285 			 * them and there is no need to do nothing in addition.
1286 			 *
1287 			 * Consume secpath which was set by drivers.
1288 			 */
1289 			secpath_reset(skb);
1290 			return check;
1291 		}
1292 	}
1293 
1294 	return __xfrm_check_nopolicy(net, skb, dir) ||
1295 	       __xfrm_check_dev_nopolicy(skb, dir, family) ||
1296 	       __xfrm_policy_check(sk, ndir, skb, family);
1297 }
1298 
xfrm_policy_check(struct sock * sk,int dir,struct sk_buff * skb,unsigned short family)1299 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1300 {
1301 	return __xfrm_policy_check2(sk, dir, skb, family, 0);
1302 }
1303 
xfrm4_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1304 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1305 {
1306 	return xfrm_policy_check(sk, dir, skb, AF_INET);
1307 }
1308 
xfrm6_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1309 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1310 {
1311 	return xfrm_policy_check(sk, dir, skb, AF_INET6);
1312 }
1313 
xfrm4_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1314 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1315 					     struct sk_buff *skb)
1316 {
1317 	return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1);
1318 }
1319 
xfrm6_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1320 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1321 					     struct sk_buff *skb)
1322 {
1323 	return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
1324 }
1325 
1326 int __xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl,
1327 			  unsigned int family, int reverse);
1328 
xfrm_decode_session(struct net * net,struct sk_buff * skb,struct flowi * fl,unsigned int family)1329 static inline int xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl,
1330 				      unsigned int family)
1331 {
1332 	return __xfrm_decode_session(net, skb, fl, family, 0);
1333 }
1334 
xfrm_decode_session_reverse(struct net * net,struct sk_buff * skb,struct flowi * fl,unsigned int family)1335 static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb,
1336 					      struct flowi *fl,
1337 					      unsigned int family)
1338 {
1339 	return __xfrm_decode_session(net, skb, fl, family, 1);
1340 }
1341 
1342 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
1343 
xfrm_route_forward(struct sk_buff * skb,unsigned short family)1344 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1345 {
1346 	struct net *net = dev_net(skb->dev);
1347 
1348 	if (!net->xfrm.policy_count[XFRM_POLICY_OUT] &&
1349 	    net->xfrm.policy_default[XFRM_POLICY_OUT] == XFRM_USERPOLICY_ACCEPT)
1350 		return true;
1351 
1352 	return (skb_dst(skb)->flags & DST_NOXFRM) ||
1353 	       __xfrm_route_forward(skb, family);
1354 }
1355 
xfrm4_route_forward(struct sk_buff * skb)1356 static inline int xfrm4_route_forward(struct sk_buff *skb)
1357 {
1358 	return xfrm_route_forward(skb, AF_INET);
1359 }
1360 
xfrm6_route_forward(struct sk_buff * skb)1361 static inline int xfrm6_route_forward(struct sk_buff *skb)
1362 {
1363 	return xfrm_route_forward(skb, AF_INET6);
1364 }
1365 
1366 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk);
1367 
xfrm_sk_clone_policy(struct sock * sk,const struct sock * osk)1368 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1369 {
1370 	if (!sk_fullsock(osk))
1371 		return 0;
1372 	sk->sk_policy[0] = NULL;
1373 	sk->sk_policy[1] = NULL;
1374 	if (unlikely(osk->sk_policy[0] || osk->sk_policy[1]))
1375 		return __xfrm_sk_clone_policy(sk, osk);
1376 	return 0;
1377 }
1378 
1379 int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
1380 
xfrm_sk_free_policy(struct sock * sk)1381 static inline void xfrm_sk_free_policy(struct sock *sk)
1382 {
1383 	struct xfrm_policy *pol;
1384 
1385 	pol = rcu_dereference_protected(sk->sk_policy[0], 1);
1386 	if (unlikely(pol != NULL)) {
1387 		xfrm_policy_delete(pol, XFRM_POLICY_MAX);
1388 		sk->sk_policy[0] = NULL;
1389 	}
1390 	pol = rcu_dereference_protected(sk->sk_policy[1], 1);
1391 	if (unlikely(pol != NULL)) {
1392 		xfrm_policy_delete(pol, XFRM_POLICY_MAX+1);
1393 		sk->sk_policy[1] = NULL;
1394 	}
1395 }
1396 
1397 #else
1398 
xfrm_sk_free_policy(struct sock * sk)1399 static inline void xfrm_sk_free_policy(struct sock *sk) {}
xfrm_sk_clone_policy(struct sock * sk,const struct sock * osk)1400 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; }
xfrm6_route_forward(struct sk_buff * skb)1401 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
xfrm4_route_forward(struct sk_buff * skb)1402 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
xfrm6_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1403 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1404 {
1405 	return 1;
1406 }
xfrm4_policy_check(struct sock * sk,int dir,struct sk_buff * skb)1407 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1408 {
1409 	return 1;
1410 }
xfrm_policy_check(struct sock * sk,int dir,struct sk_buff * skb,unsigned short family)1411 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1412 {
1413 	return 1;
1414 }
xfrm_decode_session_reverse(struct net * net,struct sk_buff * skb,struct flowi * fl,unsigned int family)1415 static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb,
1416 					      struct flowi *fl,
1417 					      unsigned int family)
1418 {
1419 	return -ENOSYS;
1420 }
xfrm4_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1421 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1422 					     struct sk_buff *skb)
1423 {
1424 	return 1;
1425 }
xfrm6_policy_check_reverse(struct sock * sk,int dir,struct sk_buff * skb)1426 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1427 					     struct sk_buff *skb)
1428 {
1429 	return 1;
1430 }
1431 #endif
1432 
1433 static __inline__
xfrm_flowi_daddr(const struct flowi * fl,unsigned short family)1434 xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family)
1435 {
1436 	switch (family){
1437 	case AF_INET:
1438 		return (xfrm_address_t *)&fl->u.ip4.daddr;
1439 	case AF_INET6:
1440 		return (xfrm_address_t *)&fl->u.ip6.daddr;
1441 	}
1442 	return NULL;
1443 }
1444 
1445 static __inline__
xfrm_flowi_saddr(const struct flowi * fl,unsigned short family)1446 xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family)
1447 {
1448 	switch (family){
1449 	case AF_INET:
1450 		return (xfrm_address_t *)&fl->u.ip4.saddr;
1451 	case AF_INET6:
1452 		return (xfrm_address_t *)&fl->u.ip6.saddr;
1453 	}
1454 	return NULL;
1455 }
1456 
1457 static __inline__
xfrm_flowi_addr_get(const struct flowi * fl,xfrm_address_t * saddr,xfrm_address_t * daddr,unsigned short family)1458 void xfrm_flowi_addr_get(const struct flowi *fl,
1459 			 xfrm_address_t *saddr, xfrm_address_t *daddr,
1460 			 unsigned short family)
1461 {
1462 	switch(family) {
1463 	case AF_INET:
1464 		memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4));
1465 		memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
1466 		break;
1467 	case AF_INET6:
1468 		saddr->in6 = fl->u.ip6.saddr;
1469 		daddr->in6 = fl->u.ip6.daddr;
1470 		break;
1471 	}
1472 }
1473 
1474 static __inline__ int
__xfrm4_state_addr_check(const struct xfrm_state * x,const xfrm_address_t * daddr,const xfrm_address_t * saddr)1475 __xfrm4_state_addr_check(const struct xfrm_state *x,
1476 			 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1477 {
1478 	if (daddr->a4 == x->id.daddr.a4 &&
1479 	    (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
1480 		return 1;
1481 	return 0;
1482 }
1483 
1484 static __inline__ int
__xfrm6_state_addr_check(const struct xfrm_state * x,const xfrm_address_t * daddr,const xfrm_address_t * saddr)1485 __xfrm6_state_addr_check(const struct xfrm_state *x,
1486 			 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1487 {
1488 	if (ipv6_addr_equal((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
1489 	    (ipv6_addr_equal((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr) ||
1490 	     ipv6_addr_any((struct in6_addr *)saddr) ||
1491 	     ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
1492 		return 1;
1493 	return 0;
1494 }
1495 
1496 static __inline__ int
xfrm_state_addr_check(const struct xfrm_state * x,const xfrm_address_t * daddr,const xfrm_address_t * saddr,unsigned short family)1497 xfrm_state_addr_check(const struct xfrm_state *x,
1498 		      const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1499 		      unsigned short family)
1500 {
1501 	switch (family) {
1502 	case AF_INET:
1503 		return __xfrm4_state_addr_check(x, daddr, saddr);
1504 	case AF_INET6:
1505 		return __xfrm6_state_addr_check(x, daddr, saddr);
1506 	}
1507 	return 0;
1508 }
1509 
1510 static __inline__ int
xfrm_state_addr_flow_check(const struct xfrm_state * x,const struct flowi * fl,unsigned short family)1511 xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl,
1512 			   unsigned short family)
1513 {
1514 	switch (family) {
1515 	case AF_INET:
1516 		return __xfrm4_state_addr_check(x,
1517 						(const xfrm_address_t *)&fl->u.ip4.daddr,
1518 						(const xfrm_address_t *)&fl->u.ip4.saddr);
1519 	case AF_INET6:
1520 		return __xfrm6_state_addr_check(x,
1521 						(const xfrm_address_t *)&fl->u.ip6.daddr,
1522 						(const xfrm_address_t *)&fl->u.ip6.saddr);
1523 	}
1524 	return 0;
1525 }
1526 
xfrm_state_kern(const struct xfrm_state * x)1527 static inline int xfrm_state_kern(const struct xfrm_state *x)
1528 {
1529 	return atomic_read(&x->tunnel_users);
1530 }
1531 
xfrm_id_proto_valid(u8 proto)1532 static inline bool xfrm_id_proto_valid(u8 proto)
1533 {
1534 	switch (proto) {
1535 	case IPPROTO_AH:
1536 	case IPPROTO_ESP:
1537 	case IPPROTO_COMP:
1538 #if IS_ENABLED(CONFIG_IPV6)
1539 	case IPPROTO_ROUTING:
1540 	case IPPROTO_DSTOPTS:
1541 #endif
1542 		return true;
1543 	default:
1544 		return false;
1545 	}
1546 }
1547 
1548 /* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
xfrm_id_proto_match(u8 proto,u8 userproto)1549 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
1550 {
1551 	return (!userproto || proto == userproto ||
1552 		(userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
1553 						  proto == IPPROTO_ESP ||
1554 						  proto == IPPROTO_COMP)));
1555 }
1556 
1557 /*
1558  * xfrm algorithm information
1559  */
1560 struct xfrm_algo_aead_info {
1561 	char *geniv;
1562 	u16 icv_truncbits;
1563 };
1564 
1565 struct xfrm_algo_auth_info {
1566 	u16 icv_truncbits;
1567 	u16 icv_fullbits;
1568 };
1569 
1570 struct xfrm_algo_encr_info {
1571 	char *geniv;
1572 	u16 blockbits;
1573 	u16 defkeybits;
1574 };
1575 
1576 struct xfrm_algo_comp_info {
1577 	u16 threshold;
1578 };
1579 
1580 struct xfrm_algo_desc {
1581 	char *name;
1582 	char *compat;
1583 	u8 available:1;
1584 	u8 pfkey_supported:1;
1585 	union {
1586 		struct xfrm_algo_aead_info aead;
1587 		struct xfrm_algo_auth_info auth;
1588 		struct xfrm_algo_encr_info encr;
1589 		struct xfrm_algo_comp_info comp;
1590 	} uinfo;
1591 	struct sadb_alg desc;
1592 };
1593 
1594 /* XFRM protocol handlers.  */
1595 struct xfrm4_protocol {
1596 	int (*handler)(struct sk_buff *skb);
1597 	int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
1598 			     int encap_type);
1599 	int (*cb_handler)(struct sk_buff *skb, int err);
1600 	int (*err_handler)(struct sk_buff *skb, u32 info);
1601 
1602 	struct xfrm4_protocol __rcu *next;
1603 	int priority;
1604 };
1605 
1606 struct xfrm6_protocol {
1607 	int (*handler)(struct sk_buff *skb);
1608 	int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
1609 			     int encap_type);
1610 	int (*cb_handler)(struct sk_buff *skb, int err);
1611 	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1612 			   u8 type, u8 code, int offset, __be32 info);
1613 
1614 	struct xfrm6_protocol __rcu *next;
1615 	int priority;
1616 };
1617 
1618 /* XFRM tunnel handlers.  */
1619 struct xfrm_tunnel {
1620 	int (*handler)(struct sk_buff *skb);
1621 	int (*cb_handler)(struct sk_buff *skb, int err);
1622 	int (*err_handler)(struct sk_buff *skb, u32 info);
1623 
1624 	struct xfrm_tunnel __rcu *next;
1625 	int priority;
1626 };
1627 
1628 struct xfrm6_tunnel {
1629 	int (*handler)(struct sk_buff *skb);
1630 	int (*cb_handler)(struct sk_buff *skb, int err);
1631 	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1632 			   u8 type, u8 code, int offset, __be32 info);
1633 	struct xfrm6_tunnel __rcu *next;
1634 	int priority;
1635 };
1636 
1637 void xfrm_init(void);
1638 void xfrm4_init(void);
1639 int xfrm_state_init(struct net *net);
1640 void xfrm_state_fini(struct net *net);
1641 void xfrm4_state_init(void);
1642 void xfrm4_protocol_init(void);
1643 #ifdef CONFIG_XFRM
1644 int xfrm6_init(void);
1645 void xfrm6_fini(void);
1646 int xfrm6_state_init(void);
1647 void xfrm6_state_fini(void);
1648 int xfrm6_protocol_init(void);
1649 void xfrm6_protocol_fini(void);
1650 #else
xfrm6_init(void)1651 static inline int xfrm6_init(void)
1652 {
1653 	return 0;
1654 }
xfrm6_fini(void)1655 static inline void xfrm6_fini(void)
1656 {
1657 	;
1658 }
1659 #endif
1660 
1661 #ifdef CONFIG_XFRM_STATISTICS
1662 int xfrm_proc_init(struct net *net);
1663 void xfrm_proc_fini(struct net *net);
1664 #endif
1665 
1666 int xfrm_sysctl_init(struct net *net);
1667 #ifdef CONFIG_SYSCTL
1668 void xfrm_sysctl_fini(struct net *net);
1669 #else
xfrm_sysctl_fini(struct net * net)1670 static inline void xfrm_sysctl_fini(struct net *net)
1671 {
1672 }
1673 #endif
1674 
1675 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
1676 			  struct xfrm_address_filter *filter);
1677 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1678 		    int (*func)(struct xfrm_state *, int, void*), void *);
1679 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
1680 struct xfrm_state *xfrm_state_alloc(struct net *net);
1681 void xfrm_state_free(struct xfrm_state *x);
1682 struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
1683 				   const xfrm_address_t *saddr,
1684 				   const struct flowi *fl,
1685 				   struct xfrm_tmpl *tmpl,
1686 				   struct xfrm_policy *pol, int *err,
1687 				   unsigned short family, u32 if_id);
1688 struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
1689 				       xfrm_address_t *daddr,
1690 				       xfrm_address_t *saddr,
1691 				       unsigned short family,
1692 				       u8 mode, u8 proto, u32 reqid);
1693 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1694 					      unsigned short family);
1695 int xfrm_state_check_expire(struct xfrm_state *x);
1696 void xfrm_state_update_stats(struct net *net);
1697 #ifdef CONFIG_XFRM_OFFLOAD
xfrm_dev_state_update_stats(struct xfrm_state * x)1698 static inline void xfrm_dev_state_update_stats(struct xfrm_state *x)
1699 {
1700 	struct xfrm_dev_offload *xdo = &x->xso;
1701 	struct net_device *dev = READ_ONCE(xdo->dev);
1702 
1703 	if (dev && dev->xfrmdev_ops &&
1704 	    dev->xfrmdev_ops->xdo_dev_state_update_stats)
1705 		dev->xfrmdev_ops->xdo_dev_state_update_stats(x);
1706 
1707 }
1708 #else
xfrm_dev_state_update_stats(struct xfrm_state * x)1709 static inline void xfrm_dev_state_update_stats(struct xfrm_state *x) {}
1710 #endif
1711 void xfrm_state_insert(struct xfrm_state *x);
1712 int xfrm_state_add(struct xfrm_state *x);
1713 int xfrm_state_update(struct xfrm_state *x);
1714 struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
1715 				     const xfrm_address_t *daddr, __be32 spi,
1716 				     u8 proto, unsigned short family);
1717 struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark,
1718 					   const xfrm_address_t *daddr,
1719 					   __be32 spi, u8 proto,
1720 					   unsigned short family);
1721 struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1722 					    const xfrm_address_t *daddr,
1723 					    const xfrm_address_t *saddr,
1724 					    u8 proto,
1725 					    unsigned short family);
1726 #ifdef CONFIG_XFRM_SUB_POLICY
1727 void xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1728 		    unsigned short family);
1729 void xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1730 		     unsigned short family);
1731 #else
xfrm_tmpl_sort(struct xfrm_tmpl ** d,struct xfrm_tmpl ** s,int n,unsigned short family)1732 static inline void xfrm_tmpl_sort(struct xfrm_tmpl **d, struct xfrm_tmpl **s,
1733 				  int n, unsigned short family)
1734 {
1735 }
1736 
xfrm_state_sort(struct xfrm_state ** d,struct xfrm_state ** s,int n,unsigned short family)1737 static inline void xfrm_state_sort(struct xfrm_state **d, struct xfrm_state **s,
1738 				   int n, unsigned short family)
1739 {
1740 }
1741 #endif
1742 
1743 struct xfrmk_sadinfo {
1744 	u32 sadhcnt; /* current hash bkts */
1745 	u32 sadhmcnt; /* max allowed hash bkts */
1746 	u32 sadcnt; /* current running count */
1747 };
1748 
1749 struct xfrmk_spdinfo {
1750 	u32 incnt;
1751 	u32 outcnt;
1752 	u32 fwdcnt;
1753 	u32 inscnt;
1754 	u32 outscnt;
1755 	u32 fwdscnt;
1756 	u32 spdhcnt;
1757 	u32 spdhmcnt;
1758 };
1759 
1760 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num);
1761 int xfrm_state_delete(struct xfrm_state *x);
1762 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
1763 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
1764 int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
1765 			  bool task_valid);
1766 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
1767 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
1768 u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
1769 int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack);
1770 u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
1771 int __xfrm_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack);
1772 int xfrm_init_state(struct xfrm_state *x);
1773 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
1774 int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1775 int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
1776 			 int (*finish)(struct net *, struct sock *,
1777 				       struct sk_buff *));
1778 int xfrm_trans_queue(struct sk_buff *skb,
1779 		     int (*finish)(struct net *, struct sock *,
1780 				   struct sk_buff *));
1781 int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err);
1782 int xfrm_output(struct sock *sk, struct sk_buff *skb);
1783 int xfrm4_tunnel_check_size(struct sk_buff *skb);
1784 #if IS_ENABLED(CONFIG_IPV6)
1785 int xfrm6_tunnel_check_size(struct sk_buff *skb);
1786 #else
xfrm6_tunnel_check_size(struct sk_buff * skb)1787 static inline int xfrm6_tunnel_check_size(struct sk_buff *skb)
1788 {
1789 	return -EMSGSIZE;
1790 }
1791 #endif
1792 
1793 #if IS_ENABLED(CONFIG_NET_PKTGEN)
1794 int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb);
1795 #endif
1796 
1797 void xfrm_local_error(struct sk_buff *skb, int mtu);
1798 int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1799 		    int encap_type);
1800 int xfrm4_transport_finish(struct sk_buff *skb, int async);
1801 int xfrm4_rcv(struct sk_buff *skb);
1802 
xfrm4_rcv_spi(struct sk_buff * skb,int nexthdr,__be32 spi)1803 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1804 {
1805 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
1806 	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
1807 	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
1808 	return xfrm_input(skb, nexthdr, spi, 0);
1809 }
1810 
1811 int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1812 int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
1813 int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
1814 int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
1815 int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1816 void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
1817 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
1818 		  struct ip6_tnl *t);
1819 int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1820 		    int encap_type);
1821 int xfrm6_transport_finish(struct sk_buff *skb, int async);
1822 int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t);
1823 int xfrm6_rcv(struct sk_buff *skb);
1824 int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1825 		     xfrm_address_t *saddr, u8 proto);
1826 void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
1827 int xfrm6_protocol_register(struct xfrm6_protocol *handler, unsigned char protocol);
1828 int xfrm6_protocol_deregister(struct xfrm6_protocol *handler, unsigned char protocol);
1829 int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
1830 int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
1831 __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
1832 __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
1833 int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1834 
1835 #ifdef CONFIG_XFRM
1836 void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu);
1837 int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1838 int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1839 struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
1840 					struct sk_buff *skb);
1841 struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
1842 					struct sk_buff *skb);
1843 int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval,
1844 		     int optlen);
1845 #else
xfrm_user_policy(struct sock * sk,int optname,sockptr_t optval,int optlen)1846 static inline int xfrm_user_policy(struct sock *sk, int optname,
1847 				   sockptr_t optval, int optlen)
1848 {
1849  	return -ENOPROTOOPT;
1850 }
1851 #endif
1852 
1853 struct dst_entry *__xfrm_dst_lookup(int family, const struct xfrm_dst_lookup_params *params);
1854 
1855 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
1856 
1857 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
1858 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1859 		     int (*func)(struct xfrm_policy *, int, int, void*),
1860 		     void *);
1861 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
1862 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1863 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net,
1864 					  const struct xfrm_mark *mark,
1865 					  u32 if_id, u8 type, int dir,
1866 					  struct xfrm_selector *sel,
1867 					  struct xfrm_sec_ctx *ctx, int delete,
1868 					  int *err);
1869 struct xfrm_policy *xfrm_policy_byid(struct net *net,
1870 				     const struct xfrm_mark *mark, u32 if_id,
1871 				     u8 type, int dir, u32 id, int delete,
1872 				     int *err);
1873 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
1874 void xfrm_policy_hash_rebuild(struct net *net);
1875 u32 xfrm_get_acqseq(void);
1876 int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack);
1877 int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi,
1878 		   struct netlink_ext_ack *extack);
1879 struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
1880 				 u8 mode, u32 reqid, u32 if_id, u32 pcpu_num, u8 proto,
1881 				 const xfrm_address_t *daddr,
1882 				 const xfrm_address_t *saddr, int create,
1883 				 unsigned short family);
1884 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
1885 
1886 #ifdef CONFIG_XFRM_MIGRATE
1887 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1888 	       const struct xfrm_migrate *m, int num_bundles,
1889 	       const struct xfrm_kmaddress *k,
1890 	       const struct xfrm_encap_tmpl *encap);
1891 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
1892 						u32 if_id);
1893 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
1894 				      struct xfrm_migrate *m,
1895 				      struct xfrm_encap_tmpl *encap);
1896 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1897 		 struct xfrm_migrate *m, int num_bundles,
1898 		 struct xfrm_kmaddress *k, struct net *net,
1899 		 struct xfrm_encap_tmpl *encap, u32 if_id,
1900 		 struct netlink_ext_ack *extack);
1901 #endif
1902 
1903 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
1904 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
1905 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel,
1906 	      xfrm_address_t *addr);
1907 
1908 void xfrm_input_init(void);
1909 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
1910 
1911 void xfrm_probe_algs(void);
1912 int xfrm_count_pfkey_auth_supported(void);
1913 int xfrm_count_pfkey_enc_supported(void);
1914 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
1915 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
1916 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
1917 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
1918 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
1919 struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
1920 struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
1921 struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
1922 struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
1923 					    int probe);
1924 
xfrm6_addr_equal(const xfrm_address_t * a,const xfrm_address_t * b)1925 static inline bool xfrm6_addr_equal(const xfrm_address_t *a,
1926 				    const xfrm_address_t *b)
1927 {
1928 	return ipv6_addr_equal((const struct in6_addr *)a,
1929 			       (const struct in6_addr *)b);
1930 }
1931 
xfrm_addr_equal(const xfrm_address_t * a,const xfrm_address_t * b,sa_family_t family)1932 static inline bool xfrm_addr_equal(const xfrm_address_t *a,
1933 				   const xfrm_address_t *b,
1934 				   sa_family_t family)
1935 {
1936 	switch (family) {
1937 	default:
1938 	case AF_INET:
1939 		return ((__force u32)a->a4 ^ (__force u32)b->a4) == 0;
1940 	case AF_INET6:
1941 		return xfrm6_addr_equal(a, b);
1942 	}
1943 }
1944 
xfrm_policy_id2dir(u32 index)1945 static inline int xfrm_policy_id2dir(u32 index)
1946 {
1947 	return index & 7;
1948 }
1949 
1950 #ifdef CONFIG_XFRM
1951 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq);
1952 int xfrm_replay_check(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
1953 void xfrm_replay_notify(struct xfrm_state *x, int event);
1954 int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb);
1955 int xfrm_replay_recheck(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
1956 
xfrm_aevent_is_on(struct net * net)1957 static inline int xfrm_aevent_is_on(struct net *net)
1958 {
1959 	struct sock *nlsk;
1960 	int ret = 0;
1961 
1962 	rcu_read_lock();
1963 	nlsk = rcu_dereference(net->xfrm.nlsk);
1964 	if (nlsk)
1965 		ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1966 	rcu_read_unlock();
1967 	return ret;
1968 }
1969 
xfrm_acquire_is_on(struct net * net)1970 static inline int xfrm_acquire_is_on(struct net *net)
1971 {
1972 	struct sock *nlsk;
1973 	int ret = 0;
1974 
1975 	rcu_read_lock();
1976 	nlsk = rcu_dereference(net->xfrm.nlsk);
1977 	if (nlsk)
1978 		ret = netlink_has_listeners(nlsk, XFRMNLGRP_ACQUIRE);
1979 	rcu_read_unlock();
1980 
1981 	return ret;
1982 }
1983 #endif
1984 
aead_len(struct xfrm_algo_aead * alg)1985 static inline unsigned int aead_len(struct xfrm_algo_aead *alg)
1986 {
1987 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1988 }
1989 
xfrm_alg_len(const struct xfrm_algo * alg)1990 static inline unsigned int xfrm_alg_len(const struct xfrm_algo *alg)
1991 {
1992 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1993 }
1994 
xfrm_alg_auth_len(const struct xfrm_algo_auth * alg)1995 static inline unsigned int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg)
1996 {
1997 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
1998 }
1999 
xfrm_replay_state_esn_len(struct xfrm_replay_state_esn * replay_esn)2000 static inline unsigned int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay_esn)
2001 {
2002 	return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32);
2003 }
2004 
2005 #ifdef CONFIG_XFRM_MIGRATE
xfrm_replay_clone(struct xfrm_state * x,struct xfrm_state * orig)2006 static inline int xfrm_replay_clone(struct xfrm_state *x,
2007 				     struct xfrm_state *orig)
2008 {
2009 
2010 	x->replay_esn = kmemdup(orig->replay_esn,
2011 				xfrm_replay_state_esn_len(orig->replay_esn),
2012 				GFP_KERNEL);
2013 	if (!x->replay_esn)
2014 		return -ENOMEM;
2015 	x->preplay_esn = kmemdup(orig->preplay_esn,
2016 				 xfrm_replay_state_esn_len(orig->preplay_esn),
2017 				 GFP_KERNEL);
2018 	if (!x->preplay_esn)
2019 		return -ENOMEM;
2020 
2021 	return 0;
2022 }
2023 
xfrm_algo_aead_clone(struct xfrm_algo_aead * orig)2024 static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig)
2025 {
2026 	return kmemdup(orig, aead_len(orig), GFP_KERNEL);
2027 }
2028 
2029 
xfrm_algo_clone(struct xfrm_algo * orig)2030 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
2031 {
2032 	return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
2033 }
2034 
xfrm_algo_auth_clone(struct xfrm_algo_auth * orig)2035 static inline struct xfrm_algo_auth *xfrm_algo_auth_clone(struct xfrm_algo_auth *orig)
2036 {
2037 	return kmemdup(orig, xfrm_alg_auth_len(orig), GFP_KERNEL);
2038 }
2039 
xfrm_states_put(struct xfrm_state ** states,int n)2040 static inline void xfrm_states_put(struct xfrm_state **states, int n)
2041 {
2042 	int i;
2043 	for (i = 0; i < n; i++)
2044 		xfrm_state_put(*(states + i));
2045 }
2046 
xfrm_states_delete(struct xfrm_state ** states,int n)2047 static inline void xfrm_states_delete(struct xfrm_state **states, int n)
2048 {
2049 	int i;
2050 	for (i = 0; i < n; i++)
2051 		xfrm_state_delete(*(states + i));
2052 }
2053 #endif
2054 
2055 void __init xfrm_dev_init(void);
2056 
2057 #ifdef CONFIG_XFRM_OFFLOAD
2058 void xfrm_dev_resume(struct sk_buff *skb);
2059 void xfrm_dev_backlog(struct softnet_data *sd);
2060 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
2061 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
2062 		       struct xfrm_user_offload *xuo,
2063 		       struct netlink_ext_ack *extack);
2064 int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
2065 			struct xfrm_user_offload *xuo, u8 dir,
2066 			struct netlink_ext_ack *extack);
2067 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
2068 void xfrm_dev_state_delete(struct xfrm_state *x);
2069 void xfrm_dev_state_free(struct xfrm_state *x);
2070 
xfrm_dev_state_advance_esn(struct xfrm_state * x)2071 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
2072 {
2073 	struct xfrm_dev_offload *xso = &x->xso;
2074 	struct net_device *dev = READ_ONCE(xso->dev);
2075 
2076 	if (dev && dev->xfrmdev_ops->xdo_dev_state_advance_esn)
2077 		dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
2078 }
2079 
xfrm_dst_offload_ok(struct dst_entry * dst)2080 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
2081 {
2082 	struct xfrm_state *x = dst->xfrm;
2083 	struct xfrm_dst *xdst;
2084 
2085 	if (!x || !x->type_offload)
2086 		return false;
2087 
2088 	xdst = (struct xfrm_dst *) dst;
2089 	if (!x->xso.offload_handle && !xdst->child->xfrm)
2090 		return true;
2091 	if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) &&
2092 	    !xdst->child->xfrm)
2093 		return true;
2094 
2095 	return false;
2096 }
2097 
xfrm_dev_policy_delete(struct xfrm_policy * x)2098 static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
2099 {
2100 	struct xfrm_dev_offload *xdo = &x->xdo;
2101 	struct net_device *dev = xdo->dev;
2102 
2103 	if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_policy_delete)
2104 		dev->xfrmdev_ops->xdo_dev_policy_delete(x);
2105 }
2106 
xfrm_dev_policy_free(struct xfrm_policy * x)2107 static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
2108 {
2109 	struct xfrm_dev_offload *xdo = &x->xdo;
2110 	struct net_device *dev = xdo->dev;
2111 
2112 	if (dev && dev->xfrmdev_ops) {
2113 		if (dev->xfrmdev_ops->xdo_dev_policy_free)
2114 			dev->xfrmdev_ops->xdo_dev_policy_free(x);
2115 		xdo->dev = NULL;
2116 		netdev_put(dev, &xdo->dev_tracker);
2117 	}
2118 }
2119 #else
xfrm_dev_resume(struct sk_buff * skb)2120 static inline void xfrm_dev_resume(struct sk_buff *skb)
2121 {
2122 }
2123 
xfrm_dev_backlog(struct softnet_data * sd)2124 static inline void xfrm_dev_backlog(struct softnet_data *sd)
2125 {
2126 }
2127 
validate_xmit_xfrm(struct sk_buff * skb,netdev_features_t features,bool * again)2128 static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
2129 {
2130 	return skb;
2131 }
2132 
xfrm_dev_state_add(struct net * net,struct xfrm_state * x,struct xfrm_user_offload * xuo,struct netlink_ext_ack * extack)2133 static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo, struct netlink_ext_ack *extack)
2134 {
2135 	return 0;
2136 }
2137 
xfrm_dev_state_delete(struct xfrm_state * x)2138 static inline void xfrm_dev_state_delete(struct xfrm_state *x)
2139 {
2140 }
2141 
xfrm_dev_state_free(struct xfrm_state * x)2142 static inline void xfrm_dev_state_free(struct xfrm_state *x)
2143 {
2144 }
2145 
xfrm_dev_policy_add(struct net * net,struct xfrm_policy * xp,struct xfrm_user_offload * xuo,u8 dir,struct netlink_ext_ack * extack)2146 static inline int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
2147 				      struct xfrm_user_offload *xuo, u8 dir,
2148 				      struct netlink_ext_ack *extack)
2149 {
2150 	return 0;
2151 }
2152 
xfrm_dev_policy_delete(struct xfrm_policy * x)2153 static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
2154 {
2155 }
2156 
xfrm_dev_policy_free(struct xfrm_policy * x)2157 static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
2158 {
2159 }
2160 
xfrm_dev_offload_ok(struct sk_buff * skb,struct xfrm_state * x)2161 static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
2162 {
2163 	return false;
2164 }
2165 
xfrm_dev_state_advance_esn(struct xfrm_state * x)2166 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
2167 {
2168 }
2169 
xfrm_dst_offload_ok(struct dst_entry * dst)2170 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
2171 {
2172 	return false;
2173 }
2174 #endif
2175 
xfrm_mark_get(struct nlattr ** attrs,struct xfrm_mark * m)2176 static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
2177 {
2178 	if (attrs[XFRMA_MARK])
2179 		memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark));
2180 	else
2181 		m->v = m->m = 0;
2182 
2183 	return m->v & m->m;
2184 }
2185 
xfrm_mark_put(struct sk_buff * skb,const struct xfrm_mark * m)2186 static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
2187 {
2188 	int ret = 0;
2189 
2190 	if (m->m | m->v)
2191 		ret = nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
2192 	return ret;
2193 }
2194 
xfrm_smark_get(__u32 mark,struct xfrm_state * x)2195 static inline __u32 xfrm_smark_get(__u32 mark, struct xfrm_state *x)
2196 {
2197 	struct xfrm_mark *m = &x->props.smark;
2198 
2199 	return (m->v & m->m) | (mark & ~m->m);
2200 }
2201 
xfrm_if_id_put(struct sk_buff * skb,__u32 if_id)2202 static inline int xfrm_if_id_put(struct sk_buff *skb, __u32 if_id)
2203 {
2204 	int ret = 0;
2205 
2206 	if (if_id)
2207 		ret = nla_put_u32(skb, XFRMA_IF_ID, if_id);
2208 	return ret;
2209 }
2210 
xfrm_tunnel_check(struct sk_buff * skb,struct xfrm_state * x,unsigned int family)2211 static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
2212 				    unsigned int family)
2213 {
2214 	bool tunnel = false;
2215 
2216 	switch(family) {
2217 	case AF_INET:
2218 		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
2219 			tunnel = true;
2220 		break;
2221 	case AF_INET6:
2222 		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
2223 			tunnel = true;
2224 		break;
2225 	}
2226 	if (tunnel && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL))
2227 		return -EINVAL;
2228 
2229 	return 0;
2230 }
2231 
2232 extern const int xfrm_msg_min[XFRM_NR_MSGTYPES];
2233 extern const struct nla_policy xfrma_policy[XFRMA_MAX+1];
2234 
2235 struct xfrm_translator {
2236 	/* Allocate frag_list and put compat translation there */
2237 	int (*alloc_compat)(struct sk_buff *skb, const struct nlmsghdr *src);
2238 
2239 	/* Allocate nlmsg with 64-bit translaton of received 32-bit message */
2240 	struct nlmsghdr *(*rcv_msg_compat)(const struct nlmsghdr *nlh,
2241 			int maxtype, const struct nla_policy *policy,
2242 			struct netlink_ext_ack *extack);
2243 
2244 	/* Translate 32-bit user_policy from sockptr */
2245 	int (*xlate_user_policy_sockptr)(u8 **pdata32, int optlen);
2246 
2247 	struct module *owner;
2248 };
2249 
2250 #if IS_ENABLED(CONFIG_XFRM_USER_COMPAT)
2251 extern int xfrm_register_translator(struct xfrm_translator *xtr);
2252 extern int xfrm_unregister_translator(struct xfrm_translator *xtr);
2253 extern struct xfrm_translator *xfrm_get_translator(void);
2254 extern void xfrm_put_translator(struct xfrm_translator *xtr);
2255 #else
xfrm_get_translator(void)2256 static inline struct xfrm_translator *xfrm_get_translator(void)
2257 {
2258 	return NULL;
2259 }
xfrm_put_translator(struct xfrm_translator * xtr)2260 static inline void xfrm_put_translator(struct xfrm_translator *xtr)
2261 {
2262 }
2263 #endif
2264 
2265 #if IS_ENABLED(CONFIG_IPV6)
xfrm6_local_dontfrag(const struct sock * sk)2266 static inline bool xfrm6_local_dontfrag(const struct sock *sk)
2267 {
2268 	int proto;
2269 
2270 	if (!sk || sk->sk_family != AF_INET6)
2271 		return false;
2272 
2273 	proto = sk->sk_protocol;
2274 	if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
2275 		return inet6_test_bit(DONTFRAG, sk);
2276 
2277 	return false;
2278 }
2279 #endif
2280 
2281 #if (IS_BUILTIN(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
2282     (IS_MODULE(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
2283 
2284 extern struct metadata_dst __percpu *xfrm_bpf_md_dst;
2285 
2286 int register_xfrm_interface_bpf(void);
2287 
2288 #else
2289 
register_xfrm_interface_bpf(void)2290 static inline int register_xfrm_interface_bpf(void)
2291 {
2292 	return 0;
2293 }
2294 
2295 #endif
2296 
2297 #if IS_ENABLED(CONFIG_DEBUG_INFO_BTF)
2298 int register_xfrm_state_bpf(void);
2299 #else
register_xfrm_state_bpf(void)2300 static inline int register_xfrm_state_bpf(void)
2301 {
2302 	return 0;
2303 }
2304 #endif
2305 
2306 int xfrm_nat_keepalive_init(unsigned short family);
2307 void xfrm_nat_keepalive_fini(unsigned short family);
2308 int xfrm_nat_keepalive_net_init(struct net *net);
2309 int xfrm_nat_keepalive_net_fini(struct net *net);
2310 void xfrm_nat_keepalive_state_updated(struct xfrm_state *x);
2311 
2312 #endif	/* _NET_XFRM_H */
2313