1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _NET_XFRM_H
3 #define _NET_XFRM_H
4 
5 #include <linux/compiler.h>
6 #include <linux/xfrm.h>
7 #include <linux/spinlock.h>
8 #include <linux/list.h>
9 #include <linux/skbuff.h>
10 #include <linux/socket.h>
11 #include <linux/pfkeyv2.h>
12 #include <linux/ipsec.h>
13 #include <linux/in6.h>
14 #include <linux/mutex.h>
15 #include <linux/audit.h>
16 #include <linux/slab.h>
17 #include <linux/refcount.h>
18 #include <linux/sockptr.h>
19 
20 #include <net/sock.h>
21 #include <net/dst.h>
22 #include <net/inet_dscp.h>
23 #include <net/ip.h>
24 #include <net/route.h>
25 #include <net/ipv6.h>
26 #include <net/ip6_fib.h>
27 #include <net/flow.h>
28 #include <net/gro_cells.h>
29 
30 #include <linux/interrupt.h>
31 
32 #ifdef CONFIG_XFRM_STATISTICS
33 #include <net/snmp.h>
34 #endif
35 
36 #define XFRM_PROTO_ESP		50
37 #define XFRM_PROTO_AH		51
38 #define XFRM_PROTO_COMP		108
39 #define XFRM_PROTO_IPIP		4
40 #define XFRM_PROTO_IPV6		41
41 #define XFRM_PROTO_IPTFS	IPPROTO_AGGFRAG
42 #define XFRM_PROTO_ROUTING	IPPROTO_ROUTING
43 #define XFRM_PROTO_DSTOPTS	IPPROTO_DSTOPTS
44 
45 #define XFRM_ALIGN4(len)	(((len) + 3) & ~3)
46 #define XFRM_ALIGN8(len)	(((len) + 7) & ~7)
47 #define MODULE_ALIAS_XFRM_MODE(family, encap) \
48 	MODULE_ALIAS("xfrm-mode-" __stringify(family) "-" __stringify(encap))
49 #define MODULE_ALIAS_XFRM_TYPE(family, proto) \
50 	MODULE_ALIAS("xfrm-type-" __stringify(family) "-" __stringify(proto))
51 #define MODULE_ALIAS_XFRM_OFFLOAD_TYPE(family, proto) \
52 	MODULE_ALIAS("xfrm-offload-" __stringify(family) "-" __stringify(proto))
53 
54 #ifdef CONFIG_XFRM_STATISTICS
55 #define XFRM_INC_STATS(net, field)	SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
56 #define XFRM_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.xfrm_statistics, field, val)
57 #else
58 #define XFRM_INC_STATS(net, field)	((void)(net))
59 #define XFRM_ADD_STATS(net, field, val) ((void)(net))
60 #endif
61 
62 
63 /* Organization of SPD aka "XFRM rules"
64    ------------------------------------
65 
66    Basic objects:
67    - policy rule, struct xfrm_policy (=SPD entry)
68    - bundle of transformations, struct dst_entry == struct xfrm_dst (=SA bundle)
69    - instance of a transformer, struct xfrm_state (=SA)
70    - template to clone xfrm_state, struct xfrm_tmpl
71 
72    SPD is organized as hash table (for policies that meet minimum address prefix
73    length setting, net->xfrm.policy_hthresh).  Other policies are stored in
74    lists, sorted into rbtree ordered by destination and source address networks.
75    See net/xfrm/xfrm_policy.c for details.
76 
77    (To be compatible with existing pfkeyv2 implementations,
78    many rules with priority of 0x7fffffff are allowed to exist and
79    such rules are ordered in an unpredictable way, thanks to bsd folks.)
80 
81    If "action" is "block", then we prohibit the flow, otherwise:
82    if "xfrms_nr" is zero, the flow passes untransformed. Otherwise,
83    policy entry has list of up to XFRM_MAX_DEPTH transformations,
84    described by templates xfrm_tmpl. Each template is resolved
85    to a complete xfrm_state (see below) and we pack bundle of transformations
86    to a dst_entry returned to requester.
87 
88    dst -. xfrm  .-> xfrm_state #1
89     |---. child .-> dst -. xfrm .-> xfrm_state #2
90                      |---. child .-> dst -. xfrm .-> xfrm_state #3
91                                       |---. child .-> NULL
92 
93 
94    Resolution of xrfm_tmpl
95    -----------------------
96    Template contains:
97    1. ->mode		Mode: transport or tunnel
98    2. ->id.proto	Protocol: AH/ESP/IPCOMP
99    3. ->id.daddr	Remote tunnel endpoint, ignored for transport mode.
100       Q: allow to resolve security gateway?
101    4. ->id.spi          If not zero, static SPI.
102    5. ->saddr		Local tunnel endpoint, ignored for transport mode.
103    6. ->algos		List of allowed algos. Plain bitmask now.
104       Q: ealgos, aalgos, calgos. What a mess...
105    7. ->share		Sharing mode.
106       Q: how to implement private sharing mode? To add struct sock* to
107       flow id?
108 
109    Having this template we search through SAD searching for entries
110    with appropriate mode/proto/algo, permitted by selector.
111    If no appropriate entry found, it is requested from key manager.
112 
113    PROBLEMS:
114    Q: How to find all the bundles referring to a physical path for
115       PMTU discovery? Seems, dst should contain list of all parents...
116       and enter to infinite locking hierarchy disaster.
117       No! It is easier, we will not search for them, let them find us.
118       We add genid to each dst plus pointer to genid of raw IP route,
119       pmtu disc will update pmtu on raw IP route and increase its genid.
120       dst_check() will see this for top level and trigger resyncing
121       metrics. Plus, it will be made via sk->sk_dst_cache. Solved.
122  */
123 
124 struct xfrm_state_walk {
125 	struct list_head	all;
126 	u8			state;
127 	u8			dying;
128 	u8			proto;
129 	u32			seq;
130 	struct xfrm_address_filter *filter;
131 };
132 
133 enum {
134 	XFRM_DEV_OFFLOAD_IN = 1,
135 	XFRM_DEV_OFFLOAD_OUT,
136 	XFRM_DEV_OFFLOAD_FWD,
137 };
138 
139 enum {
140 	XFRM_DEV_OFFLOAD_UNSPECIFIED,
141 	XFRM_DEV_OFFLOAD_CRYPTO,
142 	XFRM_DEV_OFFLOAD_PACKET,
143 };
144 
145 enum {
146 	XFRM_DEV_OFFLOAD_FLAG_ACQ = 1,
147 };
148 
149 struct xfrm_dev_offload {
150 	/* The device for this offload.
151 	 * Device drivers should not use this directly, as that will prevent
152 	 * them from working with bonding device. Instead, the device passed
153 	 * to the add/delete callbacks should be used.
154 	 */
155 	struct net_device	*dev;
156 	netdevice_tracker	dev_tracker;
157 	/* This is a private pointer used by the bonding driver (and eventually
158 	 * should be moved there). Device drivers should not use it.
159 	 * Protected by xfrm_state.lock AND bond.ipsec_lock in most cases,
160 	 * except in the .xdo_dev_state_del() flow, where only xfrm_state.lock
161 	 * is held.
162 	 */
163 	struct net_device	*real_dev;
164 	unsigned long		offload_handle;
165 	u8			dir : 2;
166 	u8			type : 2;
167 	u8			flags : 2;
168 };
169 
170 struct xfrm_mode {
171 	u8 encap;
172 	u8 family;
173 	u8 flags;
174 };
175 
176 /* Flags for xfrm_mode. */
177 enum {
178 	XFRM_MODE_FLAG_TUNNEL = 1,
179 };
180 
181 enum xfrm_replay_mode {
182 	XFRM_REPLAY_MODE_LEGACY,
183 	XFRM_REPLAY_MODE_BMP,
184 	XFRM_REPLAY_MODE_ESN,
185 };
186 
187 /* Full description of state of transformer. */
188 struct xfrm_state {
189 	possible_net_t		xs_net;
190 	union {
191 		struct hlist_node	gclist;
192 		struct hlist_node	bydst;
193 	};
194 	union {
195 		struct hlist_node	dev_gclist;
196 		struct hlist_node	bysrc;
197 	};
198 	struct hlist_node	byspi;
199 	struct hlist_node	byseq;
200 	struct hlist_node	state_cache;
201 	struct hlist_node	state_cache_input;
202 
203 	refcount_t		refcnt;
204 	spinlock_t		lock;
205 
206 	u32			pcpu_num;
207 	struct xfrm_id		id;
208 	struct xfrm_selector	sel;
209 	struct xfrm_mark	mark;
210 	u32			if_id;
211 	u32			tfcpad;
212 
213 	u32			genid;
214 
215 	/* Key manager bits */
216 	struct xfrm_state_walk	km;
217 
218 	/* Parameters of this state. */
219 	struct {
220 		u32		reqid;
221 		u8		mode;
222 		u8		replay_window;
223 		u8		aalgo, ealgo, calgo;
224 		u8		flags;
225 		u16		family;
226 		xfrm_address_t	saddr;
227 		int		header_len;
228 		int		enc_hdr_len;
229 		int		trailer_len;
230 		u32		extra_flags;
231 		struct xfrm_mark	smark;
232 	} props;
233 
234 	struct xfrm_lifetime_cfg lft;
235 
236 	/* Data for transformer */
237 	struct xfrm_algo_auth	*aalg;
238 	struct xfrm_algo	*ealg;
239 	struct xfrm_algo	*calg;
240 	struct xfrm_algo_aead	*aead;
241 	const char		*geniv;
242 
243 	/* mapping change rate limiting */
244 	__be16 new_mapping_sport;
245 	u32 new_mapping;	/* seconds */
246 	u32 mapping_maxage;	/* seconds for input SA */
247 
248 	/* Data for encapsulator */
249 	struct xfrm_encap_tmpl	*encap;
250 
251 	/* NAT keepalive */
252 	u32			nat_keepalive_interval; /* seconds */
253 	time64_t		nat_keepalive_expiration;
254 
255 	/* Data for care-of address */
256 	xfrm_address_t	*coaddr;
257 
258 	/* IPComp needs an IPIP tunnel for handling uncompressed packets */
259 	struct xfrm_state	*tunnel;
260 
261 	/* If a tunnel, number of users + 1 */
262 	atomic_t		tunnel_users;
263 
264 	/* State for replay detection */
265 	struct xfrm_replay_state replay;
266 	struct xfrm_replay_state_esn *replay_esn;
267 
268 	/* Replay detection state at the time we sent the last notification */
269 	struct xfrm_replay_state preplay;
270 	struct xfrm_replay_state_esn *preplay_esn;
271 
272 	/* replay detection mode */
273 	enum xfrm_replay_mode    repl_mode;
274 	/* internal flag that only holds state for delayed aevent at the
275 	 * moment
276 	*/
277 	u32			xflags;
278 
279 	/* Replay detection notification settings */
280 	u32			replay_maxage;
281 	u32			replay_maxdiff;
282 
283 	/* Replay detection notification timer */
284 	struct timer_list	rtimer;
285 
286 	/* Statistics */
287 	struct xfrm_stats	stats;
288 
289 	struct xfrm_lifetime_cur curlft;
290 	struct hrtimer		mtimer;
291 
292 	struct xfrm_dev_offload xso;
293 
294 	/* used to fix curlft->add_time when changing date */
295 	long		saved_tmo;
296 
297 	/* Last used time */
298 	time64_t		lastused;
299 
300 	struct page_frag xfrag;
301 
302 	/* Reference to data common to all the instances of this
303 	 * transformer. */
304 	const struct xfrm_type	*type;
305 	struct xfrm_mode	inner_mode;
306 	struct xfrm_mode	inner_mode_iaf;
307 	struct xfrm_mode	outer_mode;
308 
309 	const struct xfrm_type_offload	*type_offload;
310 
311 	/* Security context */
312 	struct xfrm_sec_ctx	*security;
313 
314 	/* Private data of this transformer, format is opaque,
315 	 * interpreted by xfrm_type methods. */
316 	void			*data;
317 	u8			dir;
318 
319 	const struct xfrm_mode_cbs	*mode_cbs;
320 	void				*mode_data;
321 };
322 
323 static inline struct net *xs_net(struct xfrm_state *x)
324 {
325 	return read_pnet(&x->xs_net);
326 }
327 
328 /* xflags - make enum if more show up */
329 #define XFRM_TIME_DEFER	1
330 #define XFRM_SOFT_EXPIRE 2
331 
332 enum {
333 	XFRM_STATE_VOID,
334 	XFRM_STATE_ACQ,
335 	XFRM_STATE_VALID,
336 	XFRM_STATE_ERROR,
337 	XFRM_STATE_EXPIRED,
338 	XFRM_STATE_DEAD
339 };
340 
341 /* callback structure passed from either netlink or pfkey */
342 struct km_event {
343 	union {
344 		u32 hard;
345 		u32 proto;
346 		u32 byid;
347 		u32 aevent;
348 		u32 type;
349 	} data;
350 
351 	u32	seq;
352 	u32	portid;
353 	u32	event;
354 	struct net *net;
355 };
356 
357 struct xfrm_if_decode_session_result {
358 	struct net *net;
359 	u32 if_id;
360 };
361 
362 struct xfrm_if_cb {
363 	bool (*decode_session)(struct sk_buff *skb,
364 			       unsigned short family,
365 			       struct xfrm_if_decode_session_result *res);
366 };
367 
368 void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
369 void xfrm_if_unregister_cb(void);
370 
371 struct xfrm_dst_lookup_params {
372 	struct net *net;
373 	dscp_t dscp;
374 	int oif;
375 	xfrm_address_t *saddr;
376 	xfrm_address_t *daddr;
377 	u32 mark;
378 	__u8 ipproto;
379 	union flowi_uli uli;
380 };
381 
382 struct net_device;
383 struct xfrm_type;
384 struct xfrm_dst;
385 struct xfrm_policy_afinfo {
386 	struct dst_ops		*dst_ops;
387 	struct dst_entry	*(*dst_lookup)(const struct xfrm_dst_lookup_params *params);
388 	int			(*get_saddr)(xfrm_address_t *saddr,
389 					     const struct xfrm_dst_lookup_params *params);
390 	int			(*fill_dst)(struct xfrm_dst *xdst,
391 					    struct net_device *dev,
392 					    const struct flowi *fl);
393 	struct dst_entry	*(*blackhole_route)(struct net *net, struct dst_entry *orig);
394 };
395 
396 int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int family);
397 void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
398 void km_policy_notify(struct xfrm_policy *xp, int dir,
399 		      const struct km_event *c);
400 void km_state_notify(struct xfrm_state *x, const struct km_event *c);
401 
402 struct xfrm_tmpl;
403 int km_query(struct xfrm_state *x, struct xfrm_tmpl *t,
404 	     struct xfrm_policy *pol);
405 void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
406 int __xfrm_state_delete(struct xfrm_state *x);
407 
408 struct xfrm_state_afinfo {
409 	u8				family;
410 	u8				proto;
411 
412 	const struct xfrm_type_offload *type_offload_esp;
413 
414 	const struct xfrm_type		*type_esp;
415 	const struct xfrm_type		*type_ipip;
416 	const struct xfrm_type		*type_ipip6;
417 	const struct xfrm_type		*type_comp;
418 	const struct xfrm_type		*type_ah;
419 	const struct xfrm_type		*type_routing;
420 	const struct xfrm_type		*type_dstopts;
421 
422 	int			(*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
423 	int			(*transport_finish)(struct sk_buff *skb,
424 						    int async);
425 	void			(*local_error)(struct sk_buff *skb, u32 mtu);
426 };
427 
428 int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
429 int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
430 struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
431 struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family);
432 
433 struct xfrm_input_afinfo {
434 	u8			family;
435 	bool			is_ipip;
436 	int			(*callback)(struct sk_buff *skb, u8 protocol,
437 					    int err);
438 };
439 
440 int xfrm_input_register_afinfo(const struct xfrm_input_afinfo *afinfo);
441 int xfrm_input_unregister_afinfo(const struct xfrm_input_afinfo *afinfo);
442 
443 void xfrm_flush_gc(void);
444 void xfrm_state_delete_tunnel(struct xfrm_state *x);
445 
446 struct xfrm_type {
447 	struct module		*owner;
448 	u8			proto;
449 	u8			flags;
450 #define XFRM_TYPE_NON_FRAGMENT	1
451 #define XFRM_TYPE_REPLAY_PROT	2
452 #define XFRM_TYPE_LOCAL_COADDR	4
453 #define XFRM_TYPE_REMOTE_COADDR	8
454 
455 	int			(*init_state)(struct xfrm_state *x,
456 					      struct netlink_ext_ack *extack);
457 	void			(*destructor)(struct xfrm_state *);
458 	int			(*input)(struct xfrm_state *, struct sk_buff *skb);
459 	int			(*output)(struct xfrm_state *, struct sk_buff *pskb);
460 	int			(*reject)(struct xfrm_state *, struct sk_buff *,
461 					  const struct flowi *);
462 };
463 
464 int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
465 void xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
466 
467 struct xfrm_type_offload {
468 	struct module	*owner;
469 	u8		proto;
470 	void		(*encap)(struct xfrm_state *, struct sk_buff *pskb);
471 	int		(*input_tail)(struct xfrm_state *x, struct sk_buff *skb);
472 	int		(*xmit)(struct xfrm_state *, struct sk_buff *pskb, netdev_features_t features);
473 };
474 
475 int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family);
476 void xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family);
477 void xfrm_set_type_offload(struct xfrm_state *x);
478 static inline void xfrm_unset_type_offload(struct xfrm_state *x)
479 {
480 	if (!x->type_offload)
481 		return;
482 
483 	module_put(x->type_offload->owner);
484 	x->type_offload = NULL;
485 }
486 
487 /**
488  * struct xfrm_mode_cbs - XFRM mode callbacks
489  * @owner: module owner or NULL
490  * @init_state: Add/init mode specific state in `xfrm_state *x`
491  * @clone_state: Copy mode specific values from `orig` to new state `x`
492  * @destroy_state: Cleanup mode specific state from `xfrm_state *x`
493  * @user_init: Process mode specific netlink attributes from user
494  * @copy_to_user: Add netlink attributes to `attrs` based on state in `x`
495  * @sa_len: Return space required to store mode specific netlink attributes
496  * @get_inner_mtu: Return avail payload space after removing encap overhead
497  * @input: Process received packet from SA using mode
498  * @output: Output given packet using mode
499  * @prepare_output: Add mode specific encapsulation to packet in skb. On return
500  *	`transport_header` should point at ESP header, `network_header` should
501  *	point at outer IP header and `mac_header` should opint at the
502  *	protocol/nexthdr field of the outer IP.
503  *
504  * One should examine and understand the specific uses of these callbacks in
505  * xfrm for further detail on how and when these functions are called. RTSL.
506  */
507 struct xfrm_mode_cbs {
508 	struct module	*owner;
509 	int	(*init_state)(struct xfrm_state *x);
510 	int	(*clone_state)(struct xfrm_state *x, struct xfrm_state *orig);
511 	void	(*destroy_state)(struct xfrm_state *x);
512 	int	(*user_init)(struct net *net, struct xfrm_state *x,
513 			     struct nlattr **attrs,
514 			     struct netlink_ext_ack *extack);
515 	int	(*copy_to_user)(struct xfrm_state *x, struct sk_buff *skb);
516 	unsigned int (*sa_len)(const struct xfrm_state *x);
517 	u32	(*get_inner_mtu)(struct xfrm_state *x, int outer_mtu);
518 	int	(*input)(struct xfrm_state *x, struct sk_buff *skb);
519 	int	(*output)(struct net *net, struct sock *sk, struct sk_buff *skb);
520 	int	(*prepare_output)(struct xfrm_state *x, struct sk_buff *skb);
521 };
522 
523 int xfrm_register_mode_cbs(u8 mode, const struct xfrm_mode_cbs *mode_cbs);
524 void xfrm_unregister_mode_cbs(u8 mode);
525 
526 static inline int xfrm_af2proto(unsigned int family)
527 {
528 	switch(family) {
529 	case AF_INET:
530 		return IPPROTO_IPIP;
531 	case AF_INET6:
532 		return IPPROTO_IPV6;
533 	default:
534 		return 0;
535 	}
536 }
537 
538 static inline const struct xfrm_mode *xfrm_ip2inner_mode(struct xfrm_state *x, int ipproto)
539 {
540 	if ((ipproto == IPPROTO_IPIP && x->props.family == AF_INET) ||
541 	    (ipproto == IPPROTO_IPV6 && x->props.family == AF_INET6))
542 		return &x->inner_mode;
543 	else
544 		return &x->inner_mode_iaf;
545 }
546 
547 struct xfrm_tmpl {
548 /* id in template is interpreted as:
549  * daddr - destination of tunnel, may be zero for transport mode.
550  * spi   - zero to acquire spi. Not zero if spi is static, then
551  *	   daddr must be fixed too.
552  * proto - AH/ESP/IPCOMP
553  */
554 	struct xfrm_id		id;
555 
556 /* Source address of tunnel. Ignored, if it is not a tunnel. */
557 	xfrm_address_t		saddr;
558 
559 	unsigned short		encap_family;
560 
561 	u32			reqid;
562 
563 /* Mode: transport, tunnel etc. */
564 	u8			mode;
565 
566 /* Sharing mode: unique, this session only, this user only etc. */
567 	u8			share;
568 
569 /* May skip this transfomration if no SA is found */
570 	u8			optional;
571 
572 /* Skip aalgos/ealgos/calgos checks. */
573 	u8			allalgs;
574 
575 /* Bit mask of algos allowed for acquisition */
576 	u32			aalgos;
577 	u32			ealgos;
578 	u32			calgos;
579 };
580 
581 #define XFRM_MAX_DEPTH		6
582 #define XFRM_MAX_OFFLOAD_DEPTH	1
583 
584 struct xfrm_policy_walk_entry {
585 	struct list_head	all;
586 	u8			dead;
587 };
588 
589 struct xfrm_policy_walk {
590 	struct xfrm_policy_walk_entry walk;
591 	u8 type;
592 	u32 seq;
593 };
594 
595 struct xfrm_policy_queue {
596 	struct sk_buff_head	hold_queue;
597 	struct timer_list	hold_timer;
598 	unsigned long		timeout;
599 };
600 
601 /**
602  *	struct xfrm_policy - xfrm policy
603  *	@xp_net: network namespace the policy lives in
604  *	@bydst: hlist node for SPD hash table or rbtree list
605  *	@byidx: hlist node for index hash table
606  *	@state_cache_list: hlist head for policy cached xfrm states
607  *	@lock: serialize changes to policy structure members
608  *	@refcnt: reference count, freed once it reaches 0
609  *	@pos: kernel internal tie-breaker to determine age of policy
610  *	@timer: timer
611  *	@genid: generation, used to invalidate old policies
612  *	@priority: priority, set by userspace
613  *	@index:  policy index (autogenerated)
614  *	@if_id: virtual xfrm interface id
615  *	@mark: packet mark
616  *	@selector: selector
617  *	@lft: liftime configuration data
618  *	@curlft: liftime state
619  *	@walk: list head on pernet policy list
620  *	@polq: queue to hold packets while aqcuire operaion in progress
621  *	@bydst_reinsert: policy tree node needs to be merged
622  *	@type: XFRM_POLICY_TYPE_MAIN or _SUB
623  *	@action: XFRM_POLICY_ALLOW or _BLOCK
624  *	@flags: XFRM_POLICY_LOCALOK, XFRM_POLICY_ICMP
625  *	@xfrm_nr: number of used templates in @xfrm_vec
626  *	@family: protocol family
627  *	@security: SELinux security label
628  *	@xfrm_vec: array of templates to resolve state
629  *	@rcu: rcu head, used to defer memory release
630  *	@xdo: hardware offload state
631  */
632 struct xfrm_policy {
633 	possible_net_t		xp_net;
634 	struct hlist_node	bydst;
635 	struct hlist_node	byidx;
636 
637 	struct hlist_head	state_cache_list;
638 
639 	/* This lock only affects elements except for entry. */
640 	rwlock_t		lock;
641 	refcount_t		refcnt;
642 	u32			pos;
643 	struct timer_list	timer;
644 
645 	atomic_t		genid;
646 	u32			priority;
647 	u32			index;
648 	u32			if_id;
649 	struct xfrm_mark	mark;
650 	struct xfrm_selector	selector;
651 	struct xfrm_lifetime_cfg lft;
652 	struct xfrm_lifetime_cur curlft;
653 	struct xfrm_policy_walk_entry walk;
654 	struct xfrm_policy_queue polq;
655 	bool                    bydst_reinsert;
656 	u8			type;
657 	u8			action;
658 	u8			flags;
659 	u8			xfrm_nr;
660 	u16			family;
661 	struct xfrm_sec_ctx	*security;
662 	struct xfrm_tmpl       	xfrm_vec[XFRM_MAX_DEPTH];
663 	struct rcu_head		rcu;
664 
665 	struct xfrm_dev_offload xdo;
666 };
667 
668 static inline struct net *xp_net(const struct xfrm_policy *xp)
669 {
670 	return read_pnet(&xp->xp_net);
671 }
672 
673 struct xfrm_kmaddress {
674 	xfrm_address_t          local;
675 	xfrm_address_t          remote;
676 	u32			reserved;
677 	u16			family;
678 };
679 
680 struct xfrm_migrate {
681 	xfrm_address_t		old_daddr;
682 	xfrm_address_t		old_saddr;
683 	xfrm_address_t		new_daddr;
684 	xfrm_address_t		new_saddr;
685 	u8			proto;
686 	u8			mode;
687 	u16			reserved;
688 	u32			reqid;
689 	u16			old_family;
690 	u16			new_family;
691 };
692 
693 #define XFRM_KM_TIMEOUT                30
694 /* what happened */
695 #define XFRM_REPLAY_UPDATE	XFRM_AE_CR
696 #define XFRM_REPLAY_TIMEOUT	XFRM_AE_CE
697 
698 /* default aevent timeout in units of 100ms */
699 #define XFRM_AE_ETIME			10
700 /* Async Event timer multiplier */
701 #define XFRM_AE_ETH_M			10
702 /* default seq threshold size */
703 #define XFRM_AE_SEQT_SIZE		2
704 
705 struct xfrm_mgr {
706 	struct list_head	list;
707 	int			(*notify)(struct xfrm_state *x, const struct km_event *c);
708 	int			(*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp);
709 	struct xfrm_policy	*(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
710 	int			(*new_mapping)(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
711 	int			(*notify_policy)(struct xfrm_policy *x, int dir, const struct km_event *c);
712 	int			(*report)(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address_t *addr);
713 	int			(*migrate)(const struct xfrm_selector *sel,
714 					   u8 dir, u8 type,
715 					   const struct xfrm_migrate *m,
716 					   int num_bundles,
717 					   const struct xfrm_kmaddress *k,
718 					   const struct xfrm_encap_tmpl *encap);
719 	bool			(*is_alive)(const struct km_event *c);
720 };
721 
722 void xfrm_register_km(struct xfrm_mgr *km);
723 void xfrm_unregister_km(struct xfrm_mgr *km);
724 
725 struct xfrm_tunnel_skb_cb {
726 	union {
727 		struct inet_skb_parm h4;
728 		struct inet6_skb_parm h6;
729 	} header;
730 
731 	union {
732 		struct ip_tunnel *ip4;
733 		struct ip6_tnl *ip6;
734 	} tunnel;
735 };
736 
737 #define XFRM_TUNNEL_SKB_CB(__skb) ((struct xfrm_tunnel_skb_cb *)&((__skb)->cb[0]))
738 
739 /*
740  * This structure is used for the duration where packets are being
741  * transformed by IPsec.  As soon as the packet leaves IPsec the
742  * area beyond the generic IP part may be overwritten.
743  */
744 struct xfrm_skb_cb {
745 	struct xfrm_tunnel_skb_cb header;
746 
747         /* Sequence number for replay protection. */
748 	union {
749 		struct {
750 			__u32 low;
751 			__u32 hi;
752 		} output;
753 		struct {
754 			__be32 low;
755 			__be32 hi;
756 		} input;
757 	} seq;
758 };
759 
760 #define XFRM_SKB_CB(__skb) ((struct xfrm_skb_cb *)&((__skb)->cb[0]))
761 
762 /*
763  * This structure is used by the afinfo prepare_input/prepare_output functions
764  * to transmit header information to the mode input/output functions.
765  */
766 struct xfrm_mode_skb_cb {
767 	struct xfrm_tunnel_skb_cb header;
768 
769 	/* Copied from header for IPv4, always set to zero and DF for IPv6. */
770 	__be16 id;
771 	__be16 frag_off;
772 
773 	/* IP header length (excluding options or extension headers). */
774 	u8 ihl;
775 
776 	/* TOS for IPv4, class for IPv6. */
777 	u8 tos;
778 
779 	/* TTL for IPv4, hop limitfor IPv6. */
780 	u8 ttl;
781 
782 	/* Protocol for IPv4, NH for IPv6. */
783 	u8 protocol;
784 
785 	/* Option length for IPv4, zero for IPv6. */
786 	u8 optlen;
787 
788 	/* Used by IPv6 only, zero for IPv4. */
789 	u8 flow_lbl[3];
790 };
791 
792 #define XFRM_MODE_SKB_CB(__skb) ((struct xfrm_mode_skb_cb *)&((__skb)->cb[0]))
793 
794 /*
795  * This structure is used by the input processing to locate the SPI and
796  * related information.
797  */
798 struct xfrm_spi_skb_cb {
799 	struct xfrm_tunnel_skb_cb header;
800 
801 	unsigned int daddroff;
802 	unsigned int family;
803 	__be32 seq;
804 };
805 
806 #define XFRM_SPI_SKB_CB(__skb) ((struct xfrm_spi_skb_cb *)&((__skb)->cb[0]))
807 
808 #ifdef CONFIG_AUDITSYSCALL
809 static inline struct audit_buffer *xfrm_audit_start(const char *op)
810 {
811 	struct audit_buffer *audit_buf = NULL;
812 
813 	if (audit_enabled == AUDIT_OFF)
814 		return NULL;
815 	audit_buf = audit_log_start(audit_context(), GFP_ATOMIC,
816 				    AUDIT_MAC_IPSEC_EVENT);
817 	if (audit_buf == NULL)
818 		return NULL;
819 	audit_log_format(audit_buf, "op=%s", op);
820 	return audit_buf;
821 }
822 
823 static inline void xfrm_audit_helper_usrinfo(bool task_valid,
824 					     struct audit_buffer *audit_buf)
825 {
826 	const unsigned int auid = from_kuid(&init_user_ns, task_valid ?
827 					    audit_get_loginuid(current) :
828 					    INVALID_UID);
829 	const unsigned int ses = task_valid ? audit_get_sessionid(current) :
830 		AUDIT_SID_UNSET;
831 
832 	audit_log_format(audit_buf, " auid=%u ses=%u", auid, ses);
833 	audit_log_task_context(audit_buf);
834 }
835 
836 void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, bool task_valid);
837 void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
838 			      bool task_valid);
839 void xfrm_audit_state_add(struct xfrm_state *x, int result, bool task_valid);
840 void xfrm_audit_state_delete(struct xfrm_state *x, int result, bool task_valid);
841 void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
842 				      struct sk_buff *skb);
843 void xfrm_audit_state_replay(struct xfrm_state *x, struct sk_buff *skb,
844 			     __be32 net_seq);
845 void xfrm_audit_state_notfound_simple(struct sk_buff *skb, u16 family);
846 void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family, __be32 net_spi,
847 			       __be32 net_seq);
848 void xfrm_audit_state_icvfail(struct xfrm_state *x, struct sk_buff *skb,
849 			      u8 proto);
850 #else
851 
852 static inline void xfrm_audit_policy_add(struct xfrm_policy *xp, int result,
853 					 bool task_valid)
854 {
855 }
856 
857 static inline void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result,
858 					    bool task_valid)
859 {
860 }
861 
862 static inline void xfrm_audit_state_add(struct xfrm_state *x, int result,
863 					bool task_valid)
864 {
865 }
866 
867 static inline void xfrm_audit_state_delete(struct xfrm_state *x, int result,
868 					   bool task_valid)
869 {
870 }
871 
872 static inline void xfrm_audit_state_replay_overflow(struct xfrm_state *x,
873 					     struct sk_buff *skb)
874 {
875 }
876 
877 static inline void xfrm_audit_state_replay(struct xfrm_state *x,
878 					   struct sk_buff *skb, __be32 net_seq)
879 {
880 }
881 
882 static inline void xfrm_audit_state_notfound_simple(struct sk_buff *skb,
883 				      u16 family)
884 {
885 }
886 
887 static inline void xfrm_audit_state_notfound(struct sk_buff *skb, u16 family,
888 				      __be32 net_spi, __be32 net_seq)
889 {
890 }
891 
892 static inline void xfrm_audit_state_icvfail(struct xfrm_state *x,
893 				     struct sk_buff *skb, u8 proto)
894 {
895 }
896 #endif /* CONFIG_AUDITSYSCALL */
897 
898 static inline void xfrm_pol_hold(struct xfrm_policy *policy)
899 {
900 	if (likely(policy != NULL))
901 		refcount_inc(&policy->refcnt);
902 }
903 
904 void xfrm_policy_destroy(struct xfrm_policy *policy);
905 
906 static inline void xfrm_pol_put(struct xfrm_policy *policy)
907 {
908 	if (refcount_dec_and_test(&policy->refcnt))
909 		xfrm_policy_destroy(policy);
910 }
911 
912 static inline void xfrm_pols_put(struct xfrm_policy **pols, int npols)
913 {
914 	int i;
915 	for (i = npols - 1; i >= 0; --i)
916 		xfrm_pol_put(pols[i]);
917 }
918 
919 void __xfrm_state_destroy(struct xfrm_state *, bool);
920 
921 static inline void __xfrm_state_put(struct xfrm_state *x)
922 {
923 	refcount_dec(&x->refcnt);
924 }
925 
926 static inline void xfrm_state_put(struct xfrm_state *x)
927 {
928 	if (refcount_dec_and_test(&x->refcnt))
929 		__xfrm_state_destroy(x, false);
930 }
931 
932 static inline void xfrm_state_put_sync(struct xfrm_state *x)
933 {
934 	if (refcount_dec_and_test(&x->refcnt))
935 		__xfrm_state_destroy(x, true);
936 }
937 
938 static inline void xfrm_state_hold(struct xfrm_state *x)
939 {
940 	refcount_inc(&x->refcnt);
941 }
942 
943 static inline bool addr_match(const void *token1, const void *token2,
944 			      unsigned int prefixlen)
945 {
946 	const __be32 *a1 = token1;
947 	const __be32 *a2 = token2;
948 	unsigned int pdw;
949 	unsigned int pbi;
950 
951 	pdw = prefixlen >> 5;	  /* num of whole u32 in prefix */
952 	pbi = prefixlen &  0x1f;  /* num of bits in incomplete u32 in prefix */
953 
954 	if (pdw)
955 		if (memcmp(a1, a2, pdw << 2))
956 			return false;
957 
958 	if (pbi) {
959 		__be32 mask;
960 
961 		mask = htonl((0xffffffff) << (32 - pbi));
962 
963 		if ((a1[pdw] ^ a2[pdw]) & mask)
964 			return false;
965 	}
966 
967 	return true;
968 }
969 
970 static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
971 {
972 	/* C99 6.5.7 (3): u32 << 32 is undefined behaviour */
973 	if (sizeof(long) == 4 && prefixlen == 0)
974 		return true;
975 	return !((a1 ^ a2) & htonl(~0UL << (32 - prefixlen)));
976 }
977 
978 static __inline__
979 __be16 xfrm_flowi_sport(const struct flowi *fl, const union flowi_uli *uli)
980 {
981 	__be16 port;
982 	switch(fl->flowi_proto) {
983 	case IPPROTO_TCP:
984 	case IPPROTO_UDP:
985 	case IPPROTO_UDPLITE:
986 	case IPPROTO_SCTP:
987 		port = uli->ports.sport;
988 		break;
989 	case IPPROTO_ICMP:
990 	case IPPROTO_ICMPV6:
991 		port = htons(uli->icmpt.type);
992 		break;
993 	case IPPROTO_MH:
994 		port = htons(uli->mht.type);
995 		break;
996 	case IPPROTO_GRE:
997 		port = htons(ntohl(uli->gre_key) >> 16);
998 		break;
999 	default:
1000 		port = 0;	/*XXX*/
1001 	}
1002 	return port;
1003 }
1004 
1005 static __inline__
1006 __be16 xfrm_flowi_dport(const struct flowi *fl, const union flowi_uli *uli)
1007 {
1008 	__be16 port;
1009 	switch(fl->flowi_proto) {
1010 	case IPPROTO_TCP:
1011 	case IPPROTO_UDP:
1012 	case IPPROTO_UDPLITE:
1013 	case IPPROTO_SCTP:
1014 		port = uli->ports.dport;
1015 		break;
1016 	case IPPROTO_ICMP:
1017 	case IPPROTO_ICMPV6:
1018 		port = htons(uli->icmpt.code);
1019 		break;
1020 	case IPPROTO_GRE:
1021 		port = htons(ntohl(uli->gre_key) & 0xffff);
1022 		break;
1023 	default:
1024 		port = 0;	/*XXX*/
1025 	}
1026 	return port;
1027 }
1028 
1029 bool xfrm_selector_match(const struct xfrm_selector *sel,
1030 			 const struct flowi *fl, unsigned short family);
1031 
1032 #ifdef CONFIG_SECURITY_NETWORK_XFRM
1033 /*	If neither has a context --> match
1034  * 	Otherwise, both must have a context and the sids, doi, alg must match
1035  */
1036 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
1037 {
1038 	return ((!s1 && !s2) ||
1039 		(s1 && s2 &&
1040 		 (s1->ctx_sid == s2->ctx_sid) &&
1041 		 (s1->ctx_doi == s2->ctx_doi) &&
1042 		 (s1->ctx_alg == s2->ctx_alg)));
1043 }
1044 #else
1045 static inline bool xfrm_sec_ctx_match(struct xfrm_sec_ctx *s1, struct xfrm_sec_ctx *s2)
1046 {
1047 	return true;
1048 }
1049 #endif
1050 
1051 /* A struct encoding bundle of transformations to apply to some set of flow.
1052  *
1053  * xdst->child points to the next element of bundle.
1054  * dst->xfrm  points to an instanse of transformer.
1055  *
1056  * Due to unfortunate limitations of current routing cache, which we
1057  * have no time to fix, it mirrors struct rtable and bound to the same
1058  * routing key, including saddr,daddr. However, we can have many of
1059  * bundles differing by session id. All the bundles grow from a parent
1060  * policy rule.
1061  */
1062 struct xfrm_dst {
1063 	union {
1064 		struct dst_entry	dst;
1065 		struct rtable		rt;
1066 		struct rt6_info		rt6;
1067 	} u;
1068 	struct dst_entry *route;
1069 	struct dst_entry *child;
1070 	struct dst_entry *path;
1071 	struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX];
1072 	int num_pols, num_xfrms;
1073 	u32 xfrm_genid;
1074 	u32 policy_genid;
1075 	u32 route_mtu_cached;
1076 	u32 child_mtu_cached;
1077 	u32 route_cookie;
1078 	u32 path_cookie;
1079 };
1080 
1081 static inline struct dst_entry *xfrm_dst_path(const struct dst_entry *dst)
1082 {
1083 #ifdef CONFIG_XFRM
1084 	if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
1085 		const struct xfrm_dst *xdst = (const struct xfrm_dst *) dst;
1086 
1087 		return xdst->path;
1088 	}
1089 #endif
1090 	return (struct dst_entry *) dst;
1091 }
1092 
1093 static inline struct dst_entry *xfrm_dst_child(const struct dst_entry *dst)
1094 {
1095 #ifdef CONFIG_XFRM
1096 	if (dst->xfrm || (dst->flags & DST_XFRM_QUEUE)) {
1097 		struct xfrm_dst *xdst = (struct xfrm_dst *) dst;
1098 		return xdst->child;
1099 	}
1100 #endif
1101 	return NULL;
1102 }
1103 
1104 #ifdef CONFIG_XFRM
1105 static inline void xfrm_dst_set_child(struct xfrm_dst *xdst, struct dst_entry *child)
1106 {
1107 	xdst->child = child;
1108 }
1109 
1110 static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
1111 {
1112 	xfrm_pols_put(xdst->pols, xdst->num_pols);
1113 	dst_release(xdst->route);
1114 	if (likely(xdst->u.dst.xfrm))
1115 		xfrm_state_put(xdst->u.dst.xfrm);
1116 }
1117 #endif
1118 
1119 void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
1120 
1121 struct xfrm_if_parms {
1122 	int link;		/* ifindex of underlying L2 interface */
1123 	u32 if_id;		/* interface identifier */
1124 	bool collect_md;
1125 };
1126 
1127 struct xfrm_if {
1128 	struct xfrm_if __rcu *next;	/* next interface in list */
1129 	struct net_device *dev;		/* virtual device associated with interface */
1130 	struct net *net;		/* netns for packet i/o */
1131 	struct xfrm_if_parms p;		/* interface parms */
1132 
1133 	struct gro_cells gro_cells;
1134 };
1135 
1136 struct xfrm_offload {
1137 	/* Output sequence number for replay protection on offloading. */
1138 	struct {
1139 		__u32 low;
1140 		__u32 hi;
1141 	} seq;
1142 
1143 	__u32			flags;
1144 #define	SA_DELETE_REQ		1
1145 #define	CRYPTO_DONE		2
1146 #define	CRYPTO_NEXT_DONE	4
1147 #define	CRYPTO_FALLBACK		8
1148 #define	XFRM_GSO_SEGMENT	16
1149 #define	XFRM_GRO		32
1150 /* 64 is free */
1151 #define	XFRM_DEV_RESUME		128
1152 #define	XFRM_XMIT		256
1153 
1154 	__u32			status;
1155 #define CRYPTO_SUCCESS				1
1156 #define CRYPTO_GENERIC_ERROR			2
1157 #define CRYPTO_TRANSPORT_AH_AUTH_FAILED		4
1158 #define CRYPTO_TRANSPORT_ESP_AUTH_FAILED	8
1159 #define CRYPTO_TUNNEL_AH_AUTH_FAILED		16
1160 #define CRYPTO_TUNNEL_ESP_AUTH_FAILED		32
1161 #define CRYPTO_INVALID_PACKET_SYNTAX		64
1162 #define CRYPTO_INVALID_PROTOCOL			128
1163 
1164 	/* Used to keep whole l2 header for transport mode GRO */
1165 	__u32			orig_mac_len;
1166 
1167 	__u8			proto;
1168 	__u8			inner_ipproto;
1169 };
1170 
1171 struct sec_path {
1172 	int			len;
1173 	int			olen;
1174 	int			verified_cnt;
1175 
1176 	struct xfrm_state	*xvec[XFRM_MAX_DEPTH];
1177 	struct xfrm_offload	ovec[XFRM_MAX_OFFLOAD_DEPTH];
1178 };
1179 
1180 struct sec_path *secpath_set(struct sk_buff *skb);
1181 
1182 static inline void
1183 secpath_reset(struct sk_buff *skb)
1184 {
1185 #ifdef CONFIG_XFRM
1186 	skb_ext_del(skb, SKB_EXT_SEC_PATH);
1187 #endif
1188 }
1189 
1190 static inline int
1191 xfrm_addr_any(const xfrm_address_t *addr, unsigned short family)
1192 {
1193 	switch (family) {
1194 	case AF_INET:
1195 		return addr->a4 == 0;
1196 	case AF_INET6:
1197 		return ipv6_addr_any(&addr->in6);
1198 	}
1199 	return 0;
1200 }
1201 
1202 static inline int
1203 __xfrm4_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1204 {
1205 	return	(tmpl->saddr.a4 &&
1206 		 tmpl->saddr.a4 != x->props.saddr.a4);
1207 }
1208 
1209 static inline int
1210 __xfrm6_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x)
1211 {
1212 	return	(!ipv6_addr_any((struct in6_addr*)&tmpl->saddr) &&
1213 		 !ipv6_addr_equal((struct in6_addr *)&tmpl->saddr, (struct in6_addr*)&x->props.saddr));
1214 }
1215 
1216 static inline int
1217 xfrm_state_addr_cmp(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, unsigned short family)
1218 {
1219 	switch (family) {
1220 	case AF_INET:
1221 		return __xfrm4_state_addr_cmp(tmpl, x);
1222 	case AF_INET6:
1223 		return __xfrm6_state_addr_cmp(tmpl, x);
1224 	}
1225 	return !0;
1226 }
1227 
1228 #ifdef CONFIG_XFRM
1229 static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
1230 {
1231 	struct sec_path *sp = skb_sec_path(skb);
1232 
1233 	return sp->xvec[sp->len - 1];
1234 }
1235 #endif
1236 
1237 static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
1238 {
1239 #ifdef CONFIG_XFRM
1240 	struct sec_path *sp = skb_sec_path(skb);
1241 
1242 	if (!sp || !sp->olen || sp->len != sp->olen)
1243 		return NULL;
1244 
1245 	return &sp->ovec[sp->olen - 1];
1246 #else
1247 	return NULL;
1248 #endif
1249 }
1250 
1251 #ifdef CONFIG_XFRM
1252 int __xfrm_policy_check(struct sock *, int dir, struct sk_buff *skb,
1253 			unsigned short family);
1254 
1255 static inline bool __xfrm_check_nopolicy(struct net *net, struct sk_buff *skb,
1256 					 int dir)
1257 {
1258 	if (!net->xfrm.policy_count[dir] && !secpath_exists(skb))
1259 		return net->xfrm.policy_default[dir] == XFRM_USERPOLICY_ACCEPT;
1260 
1261 	return false;
1262 }
1263 
1264 static inline bool __xfrm_check_dev_nopolicy(struct sk_buff *skb,
1265 					     int dir, unsigned short family)
1266 {
1267 	if (dir != XFRM_POLICY_OUT && family == AF_INET) {
1268 		/* same dst may be used for traffic originating from
1269 		 * devices with different policy settings.
1270 		 */
1271 		return IPCB(skb)->flags & IPSKB_NOPOLICY;
1272 	}
1273 	return skb_dst(skb) && (skb_dst(skb)->flags & DST_NOPOLICY);
1274 }
1275 
1276 static inline int __xfrm_policy_check2(struct sock *sk, int dir,
1277 				       struct sk_buff *skb,
1278 				       unsigned int family, int reverse)
1279 {
1280 	struct net *net = dev_net(skb->dev);
1281 	int ndir = dir | (reverse ? XFRM_POLICY_MASK + 1 : 0);
1282 	struct xfrm_offload *xo = xfrm_offload(skb);
1283 	struct xfrm_state *x;
1284 
1285 	if (sk && sk->sk_policy[XFRM_POLICY_IN])
1286 		return __xfrm_policy_check(sk, ndir, skb, family);
1287 
1288 	if (xo) {
1289 		x = xfrm_input_state(skb);
1290 		if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET) {
1291 			bool check = (xo->flags & CRYPTO_DONE) &&
1292 				     (xo->status & CRYPTO_SUCCESS);
1293 
1294 			/* The packets here are plain ones and secpath was
1295 			 * needed to indicate that hardware already handled
1296 			 * them and there is no need to do nothing in addition.
1297 			 *
1298 			 * Consume secpath which was set by drivers.
1299 			 */
1300 			secpath_reset(skb);
1301 			return check;
1302 		}
1303 	}
1304 
1305 	return __xfrm_check_nopolicy(net, skb, dir) ||
1306 	       __xfrm_check_dev_nopolicy(skb, dir, family) ||
1307 	       __xfrm_policy_check(sk, ndir, skb, family);
1308 }
1309 
1310 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1311 {
1312 	return __xfrm_policy_check2(sk, dir, skb, family, 0);
1313 }
1314 
1315 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1316 {
1317 	return xfrm_policy_check(sk, dir, skb, AF_INET);
1318 }
1319 
1320 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1321 {
1322 	return xfrm_policy_check(sk, dir, skb, AF_INET6);
1323 }
1324 
1325 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1326 					     struct sk_buff *skb)
1327 {
1328 	return __xfrm_policy_check2(sk, dir, skb, AF_INET, 1);
1329 }
1330 
1331 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1332 					     struct sk_buff *skb)
1333 {
1334 	return __xfrm_policy_check2(sk, dir, skb, AF_INET6, 1);
1335 }
1336 
1337 int __xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl,
1338 			  unsigned int family, int reverse);
1339 
1340 static inline int xfrm_decode_session(struct net *net, struct sk_buff *skb, struct flowi *fl,
1341 				      unsigned int family)
1342 {
1343 	return __xfrm_decode_session(net, skb, fl, family, 0);
1344 }
1345 
1346 static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb,
1347 					      struct flowi *fl,
1348 					      unsigned int family)
1349 {
1350 	return __xfrm_decode_session(net, skb, fl, family, 1);
1351 }
1352 
1353 int __xfrm_route_forward(struct sk_buff *skb, unsigned short family);
1354 
1355 static inline int xfrm_route_forward(struct sk_buff *skb, unsigned short family)
1356 {
1357 	struct net *net = dev_net(skb->dev);
1358 
1359 	if (!net->xfrm.policy_count[XFRM_POLICY_OUT] &&
1360 	    net->xfrm.policy_default[XFRM_POLICY_OUT] == XFRM_USERPOLICY_ACCEPT)
1361 		return true;
1362 
1363 	return (skb_dst(skb)->flags & DST_NOXFRM) ||
1364 	       __xfrm_route_forward(skb, family);
1365 }
1366 
1367 static inline int xfrm4_route_forward(struct sk_buff *skb)
1368 {
1369 	return xfrm_route_forward(skb, AF_INET);
1370 }
1371 
1372 static inline int xfrm6_route_forward(struct sk_buff *skb)
1373 {
1374 	return xfrm_route_forward(skb, AF_INET6);
1375 }
1376 
1377 int __xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk);
1378 
1379 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk)
1380 {
1381 	if (!sk_fullsock(osk))
1382 		return 0;
1383 	sk->sk_policy[0] = NULL;
1384 	sk->sk_policy[1] = NULL;
1385 	if (unlikely(osk->sk_policy[0] || osk->sk_policy[1]))
1386 		return __xfrm_sk_clone_policy(sk, osk);
1387 	return 0;
1388 }
1389 
1390 int xfrm_policy_delete(struct xfrm_policy *pol, int dir);
1391 
1392 static inline void xfrm_sk_free_policy(struct sock *sk)
1393 {
1394 	struct xfrm_policy *pol;
1395 
1396 	pol = rcu_dereference_protected(sk->sk_policy[0], 1);
1397 	if (unlikely(pol != NULL)) {
1398 		xfrm_policy_delete(pol, XFRM_POLICY_MAX);
1399 		sk->sk_policy[0] = NULL;
1400 	}
1401 	pol = rcu_dereference_protected(sk->sk_policy[1], 1);
1402 	if (unlikely(pol != NULL)) {
1403 		xfrm_policy_delete(pol, XFRM_POLICY_MAX+1);
1404 		sk->sk_policy[1] = NULL;
1405 	}
1406 }
1407 
1408 #else
1409 
1410 static inline void xfrm_sk_free_policy(struct sock *sk) {}
1411 static inline int xfrm_sk_clone_policy(struct sock *sk, const struct sock *osk) { return 0; }
1412 static inline int xfrm6_route_forward(struct sk_buff *skb) { return 1; }
1413 static inline int xfrm4_route_forward(struct sk_buff *skb) { return 1; }
1414 static inline int xfrm6_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1415 {
1416 	return 1;
1417 }
1418 static inline int xfrm4_policy_check(struct sock *sk, int dir, struct sk_buff *skb)
1419 {
1420 	return 1;
1421 }
1422 static inline int xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, unsigned short family)
1423 {
1424 	return 1;
1425 }
1426 static inline int xfrm_decode_session_reverse(struct net *net, struct sk_buff *skb,
1427 					      struct flowi *fl,
1428 					      unsigned int family)
1429 {
1430 	return -ENOSYS;
1431 }
1432 static inline int xfrm4_policy_check_reverse(struct sock *sk, int dir,
1433 					     struct sk_buff *skb)
1434 {
1435 	return 1;
1436 }
1437 static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
1438 					     struct sk_buff *skb)
1439 {
1440 	return 1;
1441 }
1442 #endif
1443 
1444 static __inline__
1445 xfrm_address_t *xfrm_flowi_daddr(const struct flowi *fl, unsigned short family)
1446 {
1447 	switch (family){
1448 	case AF_INET:
1449 		return (xfrm_address_t *)&fl->u.ip4.daddr;
1450 	case AF_INET6:
1451 		return (xfrm_address_t *)&fl->u.ip6.daddr;
1452 	}
1453 	return NULL;
1454 }
1455 
1456 static __inline__
1457 xfrm_address_t *xfrm_flowi_saddr(const struct flowi *fl, unsigned short family)
1458 {
1459 	switch (family){
1460 	case AF_INET:
1461 		return (xfrm_address_t *)&fl->u.ip4.saddr;
1462 	case AF_INET6:
1463 		return (xfrm_address_t *)&fl->u.ip6.saddr;
1464 	}
1465 	return NULL;
1466 }
1467 
1468 static __inline__
1469 void xfrm_flowi_addr_get(const struct flowi *fl,
1470 			 xfrm_address_t *saddr, xfrm_address_t *daddr,
1471 			 unsigned short family)
1472 {
1473 	switch(family) {
1474 	case AF_INET:
1475 		memcpy(&saddr->a4, &fl->u.ip4.saddr, sizeof(saddr->a4));
1476 		memcpy(&daddr->a4, &fl->u.ip4.daddr, sizeof(daddr->a4));
1477 		break;
1478 	case AF_INET6:
1479 		saddr->in6 = fl->u.ip6.saddr;
1480 		daddr->in6 = fl->u.ip6.daddr;
1481 		break;
1482 	}
1483 }
1484 
1485 static __inline__ int
1486 __xfrm4_state_addr_check(const struct xfrm_state *x,
1487 			 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1488 {
1489 	if (daddr->a4 == x->id.daddr.a4 &&
1490 	    (saddr->a4 == x->props.saddr.a4 || !saddr->a4 || !x->props.saddr.a4))
1491 		return 1;
1492 	return 0;
1493 }
1494 
1495 static __inline__ int
1496 __xfrm6_state_addr_check(const struct xfrm_state *x,
1497 			 const xfrm_address_t *daddr, const xfrm_address_t *saddr)
1498 {
1499 	if (ipv6_addr_equal((struct in6_addr *)daddr, (struct in6_addr *)&x->id.daddr) &&
1500 	    (ipv6_addr_equal((struct in6_addr *)saddr, (struct in6_addr *)&x->props.saddr) ||
1501 	     ipv6_addr_any((struct in6_addr *)saddr) ||
1502 	     ipv6_addr_any((struct in6_addr *)&x->props.saddr)))
1503 		return 1;
1504 	return 0;
1505 }
1506 
1507 static __inline__ int
1508 xfrm_state_addr_check(const struct xfrm_state *x,
1509 		      const xfrm_address_t *daddr, const xfrm_address_t *saddr,
1510 		      unsigned short family)
1511 {
1512 	switch (family) {
1513 	case AF_INET:
1514 		return __xfrm4_state_addr_check(x, daddr, saddr);
1515 	case AF_INET6:
1516 		return __xfrm6_state_addr_check(x, daddr, saddr);
1517 	}
1518 	return 0;
1519 }
1520 
1521 static __inline__ int
1522 xfrm_state_addr_flow_check(const struct xfrm_state *x, const struct flowi *fl,
1523 			   unsigned short family)
1524 {
1525 	switch (family) {
1526 	case AF_INET:
1527 		return __xfrm4_state_addr_check(x,
1528 						(const xfrm_address_t *)&fl->u.ip4.daddr,
1529 						(const xfrm_address_t *)&fl->u.ip4.saddr);
1530 	case AF_INET6:
1531 		return __xfrm6_state_addr_check(x,
1532 						(const xfrm_address_t *)&fl->u.ip6.daddr,
1533 						(const xfrm_address_t *)&fl->u.ip6.saddr);
1534 	}
1535 	return 0;
1536 }
1537 
1538 static inline int xfrm_state_kern(const struct xfrm_state *x)
1539 {
1540 	return atomic_read(&x->tunnel_users);
1541 }
1542 
1543 static inline bool xfrm_id_proto_valid(u8 proto)
1544 {
1545 	switch (proto) {
1546 	case IPPROTO_AH:
1547 	case IPPROTO_ESP:
1548 	case IPPROTO_COMP:
1549 #if IS_ENABLED(CONFIG_IPV6)
1550 	case IPPROTO_ROUTING:
1551 	case IPPROTO_DSTOPTS:
1552 #endif
1553 		return true;
1554 	default:
1555 		return false;
1556 	}
1557 }
1558 
1559 /* IPSEC_PROTO_ANY only matches 3 IPsec protocols, 0 could match all. */
1560 static inline int xfrm_id_proto_match(u8 proto, u8 userproto)
1561 {
1562 	return (!userproto || proto == userproto ||
1563 		(userproto == IPSEC_PROTO_ANY && (proto == IPPROTO_AH ||
1564 						  proto == IPPROTO_ESP ||
1565 						  proto == IPPROTO_COMP)));
1566 }
1567 
1568 /*
1569  * xfrm algorithm information
1570  */
1571 struct xfrm_algo_aead_info {
1572 	char *geniv;
1573 	u16 icv_truncbits;
1574 };
1575 
1576 struct xfrm_algo_auth_info {
1577 	u16 icv_truncbits;
1578 	u16 icv_fullbits;
1579 };
1580 
1581 struct xfrm_algo_encr_info {
1582 	char *geniv;
1583 	u16 blockbits;
1584 	u16 defkeybits;
1585 };
1586 
1587 struct xfrm_algo_comp_info {
1588 	u16 threshold;
1589 };
1590 
1591 struct xfrm_algo_desc {
1592 	char *name;
1593 	char *compat;
1594 	u8 available:1;
1595 	u8 pfkey_supported:1;
1596 	union {
1597 		struct xfrm_algo_aead_info aead;
1598 		struct xfrm_algo_auth_info auth;
1599 		struct xfrm_algo_encr_info encr;
1600 		struct xfrm_algo_comp_info comp;
1601 	} uinfo;
1602 	struct sadb_alg desc;
1603 };
1604 
1605 /* XFRM protocol handlers.  */
1606 struct xfrm4_protocol {
1607 	int (*handler)(struct sk_buff *skb);
1608 	int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
1609 			     int encap_type);
1610 	int (*cb_handler)(struct sk_buff *skb, int err);
1611 	int (*err_handler)(struct sk_buff *skb, u32 info);
1612 
1613 	struct xfrm4_protocol __rcu *next;
1614 	int priority;
1615 };
1616 
1617 struct xfrm6_protocol {
1618 	int (*handler)(struct sk_buff *skb);
1619 	int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
1620 			     int encap_type);
1621 	int (*cb_handler)(struct sk_buff *skb, int err);
1622 	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1623 			   u8 type, u8 code, int offset, __be32 info);
1624 
1625 	struct xfrm6_protocol __rcu *next;
1626 	int priority;
1627 };
1628 
1629 /* XFRM tunnel handlers.  */
1630 struct xfrm_tunnel {
1631 	int (*handler)(struct sk_buff *skb);
1632 	int (*cb_handler)(struct sk_buff *skb, int err);
1633 	int (*err_handler)(struct sk_buff *skb, u32 info);
1634 
1635 	struct xfrm_tunnel __rcu *next;
1636 	int priority;
1637 };
1638 
1639 struct xfrm6_tunnel {
1640 	int (*handler)(struct sk_buff *skb);
1641 	int (*cb_handler)(struct sk_buff *skb, int err);
1642 	int (*err_handler)(struct sk_buff *skb, struct inet6_skb_parm *opt,
1643 			   u8 type, u8 code, int offset, __be32 info);
1644 	struct xfrm6_tunnel __rcu *next;
1645 	int priority;
1646 };
1647 
1648 void xfrm_init(void);
1649 void xfrm4_init(void);
1650 int xfrm_state_init(struct net *net);
1651 void xfrm_state_fini(struct net *net);
1652 void xfrm4_state_init(void);
1653 void xfrm4_protocol_init(void);
1654 #ifdef CONFIG_XFRM
1655 int xfrm6_init(void);
1656 void xfrm6_fini(void);
1657 int xfrm6_state_init(void);
1658 void xfrm6_state_fini(void);
1659 int xfrm6_protocol_init(void);
1660 void xfrm6_protocol_fini(void);
1661 #else
1662 static inline int xfrm6_init(void)
1663 {
1664 	return 0;
1665 }
1666 static inline void xfrm6_fini(void)
1667 {
1668 	;
1669 }
1670 #endif
1671 
1672 #ifdef CONFIG_XFRM_STATISTICS
1673 int xfrm_proc_init(struct net *net);
1674 void xfrm_proc_fini(struct net *net);
1675 #endif
1676 
1677 int xfrm_sysctl_init(struct net *net);
1678 #ifdef CONFIG_SYSCTL
1679 void xfrm_sysctl_fini(struct net *net);
1680 #else
1681 static inline void xfrm_sysctl_fini(struct net *net)
1682 {
1683 }
1684 #endif
1685 
1686 void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
1687 			  struct xfrm_address_filter *filter);
1688 int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
1689 		    int (*func)(struct xfrm_state *, int, void*), void *);
1690 void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
1691 struct xfrm_state *xfrm_state_alloc(struct net *net);
1692 void xfrm_state_free(struct xfrm_state *x);
1693 struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
1694 				   const xfrm_address_t *saddr,
1695 				   const struct flowi *fl,
1696 				   struct xfrm_tmpl *tmpl,
1697 				   struct xfrm_policy *pol, int *err,
1698 				   unsigned short family, u32 if_id);
1699 struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
1700 				       xfrm_address_t *daddr,
1701 				       xfrm_address_t *saddr,
1702 				       unsigned short family,
1703 				       u8 mode, u8 proto, u32 reqid);
1704 struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
1705 					      unsigned short family);
1706 int xfrm_state_check_expire(struct xfrm_state *x);
1707 void xfrm_state_update_stats(struct net *net);
1708 #ifdef CONFIG_XFRM_OFFLOAD
1709 static inline void xfrm_dev_state_update_stats(struct xfrm_state *x)
1710 {
1711 	struct xfrm_dev_offload *xdo = &x->xso;
1712 	struct net_device *dev = READ_ONCE(xdo->dev);
1713 
1714 	if (dev && dev->xfrmdev_ops &&
1715 	    dev->xfrmdev_ops->xdo_dev_state_update_stats)
1716 		dev->xfrmdev_ops->xdo_dev_state_update_stats(x);
1717 
1718 }
1719 #else
1720 static inline void xfrm_dev_state_update_stats(struct xfrm_state *x) {}
1721 #endif
1722 void xfrm_state_insert(struct xfrm_state *x);
1723 int xfrm_state_add(struct xfrm_state *x);
1724 int xfrm_state_update(struct xfrm_state *x);
1725 struct xfrm_state *xfrm_state_lookup(struct net *net, u32 mark,
1726 				     const xfrm_address_t *daddr, __be32 spi,
1727 				     u8 proto, unsigned short family);
1728 struct xfrm_state *xfrm_input_state_lookup(struct net *net, u32 mark,
1729 					   const xfrm_address_t *daddr,
1730 					   __be32 spi, u8 proto,
1731 					   unsigned short family);
1732 struct xfrm_state *xfrm_state_lookup_byaddr(struct net *net, u32 mark,
1733 					    const xfrm_address_t *daddr,
1734 					    const xfrm_address_t *saddr,
1735 					    u8 proto,
1736 					    unsigned short family);
1737 #ifdef CONFIG_XFRM_SUB_POLICY
1738 void xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
1739 		    unsigned short family);
1740 void xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
1741 		     unsigned short family);
1742 #else
1743 static inline void xfrm_tmpl_sort(struct xfrm_tmpl **d, struct xfrm_tmpl **s,
1744 				  int n, unsigned short family)
1745 {
1746 }
1747 
1748 static inline void xfrm_state_sort(struct xfrm_state **d, struct xfrm_state **s,
1749 				   int n, unsigned short family)
1750 {
1751 }
1752 #endif
1753 
1754 struct xfrmk_sadinfo {
1755 	u32 sadhcnt; /* current hash bkts */
1756 	u32 sadhmcnt; /* max allowed hash bkts */
1757 	u32 sadcnt; /* current running count */
1758 };
1759 
1760 struct xfrmk_spdinfo {
1761 	u32 incnt;
1762 	u32 outcnt;
1763 	u32 fwdcnt;
1764 	u32 inscnt;
1765 	u32 outscnt;
1766 	u32 fwdscnt;
1767 	u32 spdhcnt;
1768 	u32 spdhmcnt;
1769 };
1770 
1771 struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq, u32 pcpu_num);
1772 int xfrm_state_delete(struct xfrm_state *x);
1773 int xfrm_state_flush(struct net *net, u8 proto, bool task_valid, bool sync);
1774 int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
1775 int xfrm_dev_policy_flush(struct net *net, struct net_device *dev,
1776 			  bool task_valid);
1777 void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
1778 void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
1779 u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
1780 int xfrm_init_replay(struct xfrm_state *x, struct netlink_ext_ack *extack);
1781 u32 xfrm_state_mtu(struct xfrm_state *x, int mtu);
1782 int __xfrm_init_state(struct xfrm_state *x, struct netlink_ext_ack *extack);
1783 int xfrm_init_state(struct xfrm_state *x);
1784 int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
1785 int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
1786 int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
1787 			 int (*finish)(struct net *, struct sock *,
1788 				       struct sk_buff *));
1789 int xfrm_trans_queue(struct sk_buff *skb,
1790 		     int (*finish)(struct net *, struct sock *,
1791 				   struct sk_buff *));
1792 int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err);
1793 int xfrm_output(struct sock *sk, struct sk_buff *skb);
1794 int xfrm4_tunnel_check_size(struct sk_buff *skb);
1795 #if IS_ENABLED(CONFIG_IPV6)
1796 int xfrm6_tunnel_check_size(struct sk_buff *skb);
1797 #else
1798 static inline int xfrm6_tunnel_check_size(struct sk_buff *skb)
1799 {
1800 	return -EMSGSIZE;
1801 }
1802 #endif
1803 
1804 #if IS_ENABLED(CONFIG_NET_PKTGEN)
1805 int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb);
1806 #endif
1807 
1808 void xfrm_local_error(struct sk_buff *skb, int mtu);
1809 int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1810 		    int encap_type);
1811 int xfrm4_transport_finish(struct sk_buff *skb, int async);
1812 int xfrm4_rcv(struct sk_buff *skb);
1813 
1814 static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
1815 {
1816 	XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
1817 	XFRM_SPI_SKB_CB(skb)->family = AF_INET;
1818 	XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
1819 	return xfrm_input(skb, nexthdr, spi, 0);
1820 }
1821 
1822 int xfrm4_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1823 int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
1824 int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
1825 int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
1826 int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
1827 void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
1828 int xfrm6_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi,
1829 		  struct ip6_tnl *t);
1830 int xfrm6_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
1831 		    int encap_type);
1832 int xfrm6_transport_finish(struct sk_buff *skb, int async);
1833 int xfrm6_rcv_tnl(struct sk_buff *skb, struct ip6_tnl *t);
1834 int xfrm6_rcv(struct sk_buff *skb);
1835 int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr,
1836 		     xfrm_address_t *saddr, u8 proto);
1837 void xfrm6_local_error(struct sk_buff *skb, u32 mtu);
1838 int xfrm6_protocol_register(struct xfrm6_protocol *handler, unsigned char protocol);
1839 int xfrm6_protocol_deregister(struct xfrm6_protocol *handler, unsigned char protocol);
1840 int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family);
1841 int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family);
1842 __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr);
1843 __be32 xfrm6_tunnel_spi_lookup(struct net *net, const xfrm_address_t *saddr);
1844 int xfrm6_output(struct net *net, struct sock *sk, struct sk_buff *skb);
1845 
1846 #ifdef CONFIG_XFRM
1847 void xfrm6_local_rxpmtu(struct sk_buff *skb, u32 mtu);
1848 int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1849 int xfrm6_udp_encap_rcv(struct sock *sk, struct sk_buff *skb);
1850 struct sk_buff *xfrm4_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
1851 					struct sk_buff *skb);
1852 struct sk_buff *xfrm6_gro_udp_encap_rcv(struct sock *sk, struct list_head *head,
1853 					struct sk_buff *skb);
1854 int xfrm_user_policy(struct sock *sk, int optname, sockptr_t optval,
1855 		     int optlen);
1856 #else
1857 static inline int xfrm_user_policy(struct sock *sk, int optname,
1858 				   sockptr_t optval, int optlen)
1859 {
1860  	return -ENOPROTOOPT;
1861 }
1862 #endif
1863 
1864 struct dst_entry *__xfrm_dst_lookup(int family, const struct xfrm_dst_lookup_params *params);
1865 
1866 struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
1867 
1868 void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
1869 int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
1870 		     int (*func)(struct xfrm_policy *, int, int, void*),
1871 		     void *);
1872 void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
1873 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
1874 struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net,
1875 					  const struct xfrm_mark *mark,
1876 					  u32 if_id, u8 type, int dir,
1877 					  struct xfrm_selector *sel,
1878 					  struct xfrm_sec_ctx *ctx, int delete,
1879 					  int *err);
1880 struct xfrm_policy *xfrm_policy_byid(struct net *net,
1881 				     const struct xfrm_mark *mark, u32 if_id,
1882 				     u8 type, int dir, u32 id, int delete,
1883 				     int *err);
1884 int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
1885 void xfrm_policy_hash_rebuild(struct net *net);
1886 u32 xfrm_get_acqseq(void);
1887 int verify_spi_info(u8 proto, u32 min, u32 max, struct netlink_ext_ack *extack);
1888 int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi,
1889 		   struct netlink_ext_ack *extack);
1890 struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
1891 				 u8 mode, u32 reqid, u32 if_id, u32 pcpu_num, u8 proto,
1892 				 const xfrm_address_t *daddr,
1893 				 const xfrm_address_t *saddr, int create,
1894 				 unsigned short family);
1895 int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol);
1896 
1897 #ifdef CONFIG_XFRM_MIGRATE
1898 int km_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1899 	       const struct xfrm_migrate *m, int num_bundles,
1900 	       const struct xfrm_kmaddress *k,
1901 	       const struct xfrm_encap_tmpl *encap);
1902 struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net,
1903 						u32 if_id);
1904 struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
1905 				      struct xfrm_migrate *m,
1906 				      struct xfrm_encap_tmpl *encap,
1907 				      struct net *net,
1908 				      struct xfrm_user_offload *xuo,
1909 				      struct netlink_ext_ack *extack);
1910 int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
1911 		 struct xfrm_migrate *m, int num_bundles,
1912 		 struct xfrm_kmaddress *k, struct net *net,
1913 		 struct xfrm_encap_tmpl *encap, u32 if_id,
1914 		 struct netlink_ext_ack *extack,
1915 		 struct xfrm_user_offload *xuo);
1916 #endif
1917 
1918 int km_new_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr, __be16 sport);
1919 void km_policy_expired(struct xfrm_policy *pol, int dir, int hard, u32 portid);
1920 int km_report(struct net *net, u8 proto, struct xfrm_selector *sel,
1921 	      xfrm_address_t *addr);
1922 
1923 void xfrm_input_init(void);
1924 int xfrm_parse_spi(struct sk_buff *skb, u8 nexthdr, __be32 *spi, __be32 *seq);
1925 
1926 void xfrm_probe_algs(void);
1927 int xfrm_count_pfkey_auth_supported(void);
1928 int xfrm_count_pfkey_enc_supported(void);
1929 struct xfrm_algo_desc *xfrm_aalg_get_byidx(unsigned int idx);
1930 struct xfrm_algo_desc *xfrm_ealg_get_byidx(unsigned int idx);
1931 struct xfrm_algo_desc *xfrm_aalg_get_byid(int alg_id);
1932 struct xfrm_algo_desc *xfrm_ealg_get_byid(int alg_id);
1933 struct xfrm_algo_desc *xfrm_calg_get_byid(int alg_id);
1934 struct xfrm_algo_desc *xfrm_aalg_get_byname(const char *name, int probe);
1935 struct xfrm_algo_desc *xfrm_ealg_get_byname(const char *name, int probe);
1936 struct xfrm_algo_desc *xfrm_calg_get_byname(const char *name, int probe);
1937 struct xfrm_algo_desc *xfrm_aead_get_byname(const char *name, int icv_len,
1938 					    int probe);
1939 
1940 static inline bool xfrm6_addr_equal(const xfrm_address_t *a,
1941 				    const xfrm_address_t *b)
1942 {
1943 	return ipv6_addr_equal((const struct in6_addr *)a,
1944 			       (const struct in6_addr *)b);
1945 }
1946 
1947 static inline bool xfrm_addr_equal(const xfrm_address_t *a,
1948 				   const xfrm_address_t *b,
1949 				   sa_family_t family)
1950 {
1951 	switch (family) {
1952 	default:
1953 	case AF_INET:
1954 		return ((__force u32)a->a4 ^ (__force u32)b->a4) == 0;
1955 	case AF_INET6:
1956 		return xfrm6_addr_equal(a, b);
1957 	}
1958 }
1959 
1960 static inline int xfrm_policy_id2dir(u32 index)
1961 {
1962 	return index & 7;
1963 }
1964 
1965 #ifdef CONFIG_XFRM
1966 void xfrm_replay_advance(struct xfrm_state *x, __be32 net_seq);
1967 int xfrm_replay_check(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
1968 void xfrm_replay_notify(struct xfrm_state *x, int event);
1969 int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb);
1970 int xfrm_replay_recheck(struct xfrm_state *x, struct sk_buff *skb, __be32 net_seq);
1971 
1972 static inline int xfrm_aevent_is_on(struct net *net)
1973 {
1974 	struct sock *nlsk;
1975 	int ret = 0;
1976 
1977 	rcu_read_lock();
1978 	nlsk = rcu_dereference(net->xfrm.nlsk);
1979 	if (nlsk)
1980 		ret = netlink_has_listeners(nlsk, XFRMNLGRP_AEVENTS);
1981 	rcu_read_unlock();
1982 	return ret;
1983 }
1984 
1985 static inline int xfrm_acquire_is_on(struct net *net)
1986 {
1987 	struct sock *nlsk;
1988 	int ret = 0;
1989 
1990 	rcu_read_lock();
1991 	nlsk = rcu_dereference(net->xfrm.nlsk);
1992 	if (nlsk)
1993 		ret = netlink_has_listeners(nlsk, XFRMNLGRP_ACQUIRE);
1994 	rcu_read_unlock();
1995 
1996 	return ret;
1997 }
1998 #endif
1999 
2000 static inline unsigned int aead_len(struct xfrm_algo_aead *alg)
2001 {
2002 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
2003 }
2004 
2005 static inline unsigned int xfrm_alg_len(const struct xfrm_algo *alg)
2006 {
2007 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
2008 }
2009 
2010 static inline unsigned int xfrm_alg_auth_len(const struct xfrm_algo_auth *alg)
2011 {
2012 	return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
2013 }
2014 
2015 static inline unsigned int xfrm_replay_state_esn_len(struct xfrm_replay_state_esn *replay_esn)
2016 {
2017 	return sizeof(*replay_esn) + replay_esn->bmp_len * sizeof(__u32);
2018 }
2019 
2020 #ifdef CONFIG_XFRM_MIGRATE
2021 static inline int xfrm_replay_clone(struct xfrm_state *x,
2022 				     struct xfrm_state *orig)
2023 {
2024 
2025 	x->replay_esn = kmemdup(orig->replay_esn,
2026 				xfrm_replay_state_esn_len(orig->replay_esn),
2027 				GFP_KERNEL);
2028 	if (!x->replay_esn)
2029 		return -ENOMEM;
2030 	x->preplay_esn = kmemdup(orig->preplay_esn,
2031 				 xfrm_replay_state_esn_len(orig->preplay_esn),
2032 				 GFP_KERNEL);
2033 	if (!x->preplay_esn)
2034 		return -ENOMEM;
2035 
2036 	return 0;
2037 }
2038 
2039 static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig)
2040 {
2041 	return kmemdup(orig, aead_len(orig), GFP_KERNEL);
2042 }
2043 
2044 
2045 static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
2046 {
2047 	return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
2048 }
2049 
2050 static inline struct xfrm_algo_auth *xfrm_algo_auth_clone(struct xfrm_algo_auth *orig)
2051 {
2052 	return kmemdup(orig, xfrm_alg_auth_len(orig), GFP_KERNEL);
2053 }
2054 
2055 static inline void xfrm_states_put(struct xfrm_state **states, int n)
2056 {
2057 	int i;
2058 	for (i = 0; i < n; i++)
2059 		xfrm_state_put(*(states + i));
2060 }
2061 
2062 static inline void xfrm_states_delete(struct xfrm_state **states, int n)
2063 {
2064 	int i;
2065 	for (i = 0; i < n; i++)
2066 		xfrm_state_delete(*(states + i));
2067 }
2068 #endif
2069 
2070 void __init xfrm_dev_init(void);
2071 
2072 #ifdef CONFIG_XFRM_OFFLOAD
2073 void xfrm_dev_resume(struct sk_buff *skb);
2074 void xfrm_dev_backlog(struct softnet_data *sd);
2075 struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again);
2076 int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
2077 		       struct xfrm_user_offload *xuo,
2078 		       struct netlink_ext_ack *extack);
2079 int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
2080 			struct xfrm_user_offload *xuo, u8 dir,
2081 			struct netlink_ext_ack *extack);
2082 bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
2083 void xfrm_dev_state_delete(struct xfrm_state *x);
2084 void xfrm_dev_state_free(struct xfrm_state *x);
2085 
2086 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
2087 {
2088 	struct xfrm_dev_offload *xso = &x->xso;
2089 	struct net_device *dev = READ_ONCE(xso->dev);
2090 
2091 	if (dev && dev->xfrmdev_ops->xdo_dev_state_advance_esn)
2092 		dev->xfrmdev_ops->xdo_dev_state_advance_esn(x);
2093 }
2094 
2095 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
2096 {
2097 	struct xfrm_state *x = dst->xfrm;
2098 	struct xfrm_dst *xdst;
2099 
2100 	if (!x || !x->type_offload)
2101 		return false;
2102 
2103 	xdst = (struct xfrm_dst *) dst;
2104 	if (!x->xso.offload_handle && !xdst->child->xfrm)
2105 		return true;
2106 	if (x->xso.offload_handle && (x->xso.dev == xfrm_dst_path(dst)->dev) &&
2107 	    !xdst->child->xfrm)
2108 		return true;
2109 
2110 	return false;
2111 }
2112 
2113 static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
2114 {
2115 	struct xfrm_dev_offload *xdo = &x->xdo;
2116 	struct net_device *dev = xdo->dev;
2117 
2118 	if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_policy_delete)
2119 		dev->xfrmdev_ops->xdo_dev_policy_delete(x);
2120 }
2121 
2122 static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
2123 {
2124 	struct xfrm_dev_offload *xdo = &x->xdo;
2125 	struct net_device *dev = xdo->dev;
2126 
2127 	if (dev && dev->xfrmdev_ops) {
2128 		if (dev->xfrmdev_ops->xdo_dev_policy_free)
2129 			dev->xfrmdev_ops->xdo_dev_policy_free(x);
2130 		xdo->dev = NULL;
2131 		netdev_put(dev, &xdo->dev_tracker);
2132 	}
2133 }
2134 #else
2135 static inline void xfrm_dev_resume(struct sk_buff *skb)
2136 {
2137 }
2138 
2139 static inline void xfrm_dev_backlog(struct softnet_data *sd)
2140 {
2141 }
2142 
2143 static inline struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
2144 {
2145 	return skb;
2146 }
2147 
2148 static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo, struct netlink_ext_ack *extack)
2149 {
2150 	return 0;
2151 }
2152 
2153 static inline void xfrm_dev_state_delete(struct xfrm_state *x)
2154 {
2155 }
2156 
2157 static inline void xfrm_dev_state_free(struct xfrm_state *x)
2158 {
2159 }
2160 
2161 static inline int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp,
2162 				      struct xfrm_user_offload *xuo, u8 dir,
2163 				      struct netlink_ext_ack *extack)
2164 {
2165 	return 0;
2166 }
2167 
2168 static inline void xfrm_dev_policy_delete(struct xfrm_policy *x)
2169 {
2170 }
2171 
2172 static inline void xfrm_dev_policy_free(struct xfrm_policy *x)
2173 {
2174 }
2175 
2176 static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
2177 {
2178 	return false;
2179 }
2180 
2181 static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x)
2182 {
2183 }
2184 
2185 static inline bool xfrm_dst_offload_ok(struct dst_entry *dst)
2186 {
2187 	return false;
2188 }
2189 #endif
2190 
2191 static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
2192 {
2193 	if (attrs[XFRMA_MARK])
2194 		memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark));
2195 	else
2196 		m->v = m->m = 0;
2197 
2198 	return m->v & m->m;
2199 }
2200 
2201 static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
2202 {
2203 	int ret = 0;
2204 
2205 	if (m->m | m->v)
2206 		ret = nla_put(skb, XFRMA_MARK, sizeof(struct xfrm_mark), m);
2207 	return ret;
2208 }
2209 
2210 static inline __u32 xfrm_smark_get(__u32 mark, struct xfrm_state *x)
2211 {
2212 	struct xfrm_mark *m = &x->props.smark;
2213 
2214 	return (m->v & m->m) | (mark & ~m->m);
2215 }
2216 
2217 static inline int xfrm_if_id_put(struct sk_buff *skb, __u32 if_id)
2218 {
2219 	int ret = 0;
2220 
2221 	if (if_id)
2222 		ret = nla_put_u32(skb, XFRMA_IF_ID, if_id);
2223 	return ret;
2224 }
2225 
2226 static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
2227 				    unsigned int family)
2228 {
2229 	bool tunnel = false;
2230 
2231 	switch(family) {
2232 	case AF_INET:
2233 		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
2234 			tunnel = true;
2235 		break;
2236 	case AF_INET6:
2237 		if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
2238 			tunnel = true;
2239 		break;
2240 	}
2241 	if (tunnel && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL))
2242 		return -EINVAL;
2243 
2244 	return 0;
2245 }
2246 
2247 extern const int xfrm_msg_min[XFRM_NR_MSGTYPES];
2248 extern const struct nla_policy xfrma_policy[XFRMA_MAX+1];
2249 
2250 struct xfrm_translator {
2251 	/* Allocate frag_list and put compat translation there */
2252 	int (*alloc_compat)(struct sk_buff *skb, const struct nlmsghdr *src);
2253 
2254 	/* Allocate nlmsg with 64-bit translaton of received 32-bit message */
2255 	struct nlmsghdr *(*rcv_msg_compat)(const struct nlmsghdr *nlh,
2256 			int maxtype, const struct nla_policy *policy,
2257 			struct netlink_ext_ack *extack);
2258 
2259 	/* Translate 32-bit user_policy from sockptr */
2260 	int (*xlate_user_policy_sockptr)(u8 **pdata32, int optlen);
2261 
2262 	struct module *owner;
2263 };
2264 
2265 #if IS_ENABLED(CONFIG_XFRM_USER_COMPAT)
2266 extern int xfrm_register_translator(struct xfrm_translator *xtr);
2267 extern int xfrm_unregister_translator(struct xfrm_translator *xtr);
2268 extern struct xfrm_translator *xfrm_get_translator(void);
2269 extern void xfrm_put_translator(struct xfrm_translator *xtr);
2270 #else
2271 static inline struct xfrm_translator *xfrm_get_translator(void)
2272 {
2273 	return NULL;
2274 }
2275 static inline void xfrm_put_translator(struct xfrm_translator *xtr)
2276 {
2277 }
2278 #endif
2279 
2280 #if IS_ENABLED(CONFIG_IPV6)
2281 static inline bool xfrm6_local_dontfrag(const struct sock *sk)
2282 {
2283 	int proto;
2284 
2285 	if (!sk || sk->sk_family != AF_INET6)
2286 		return false;
2287 
2288 	proto = sk->sk_protocol;
2289 	if (proto == IPPROTO_UDP || proto == IPPROTO_RAW)
2290 		return inet6_test_bit(DONTFRAG, sk);
2291 
2292 	return false;
2293 }
2294 #endif
2295 
2296 #if (IS_BUILTIN(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \
2297     (IS_MODULE(CONFIG_XFRM_INTERFACE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES))
2298 
2299 extern struct metadata_dst __percpu *xfrm_bpf_md_dst;
2300 
2301 int register_xfrm_interface_bpf(void);
2302 
2303 #else
2304 
2305 static inline int register_xfrm_interface_bpf(void)
2306 {
2307 	return 0;
2308 }
2309 
2310 #endif
2311 
2312 #if IS_ENABLED(CONFIG_DEBUG_INFO_BTF)
2313 int register_xfrm_state_bpf(void);
2314 #else
2315 static inline int register_xfrm_state_bpf(void)
2316 {
2317 	return 0;
2318 }
2319 #endif
2320 
2321 int xfrm_nat_keepalive_init(unsigned short family);
2322 void xfrm_nat_keepalive_fini(unsigned short family);
2323 int xfrm_nat_keepalive_net_init(struct net *net);
2324 int xfrm_nat_keepalive_net_fini(struct net *net);
2325 void xfrm_nat_keepalive_state_updated(struct xfrm_state *x);
2326 
2327 #endif	/* _NET_XFRM_H */
2328