xref: /linux/net/ipv6/route.c (revision a0b0f6c7d7f29f1ade9ec59699d02e3b153ee8e4) !
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *	Linux INET6 implementation
4  *	FIB front-end.
5  *
6  *	Authors:
7  *	Pedro Roque		<roque@di.fc.ul.pt>
8  */
9 
10 /*	Changes:
11  *
12  *	YOSHIFUJI Hideaki @USAGI
13  *		reworked default router selection.
14  *		- respect outgoing interface
15  *		- select from (probably) reachable routers (i.e.
16  *		routers in REACHABLE, STALE, DELAY or PROBE states).
17  *		- always select the same router if it is (probably)
18  *		reachable.  otherwise, round-robin the list.
19  *	Ville Nuorvala
20  *		Fixed routing subtrees.
21  */
22 
23 #define pr_fmt(fmt) "IPv6: " fmt
24 
25 #include <linux/capability.h>
26 #include <linux/errno.h>
27 #include <linux/export.h>
28 #include <linux/types.h>
29 #include <linux/times.h>
30 #include <linux/socket.h>
31 #include <linux/sockios.h>
32 #include <linux/net.h>
33 #include <linux/route.h>
34 #include <linux/netdevice.h>
35 #include <linux/in6.h>
36 #include <linux/mroute6.h>
37 #include <linux/init.h>
38 #include <linux/if_arp.h>
39 #include <linux/proc_fs.h>
40 #include <linux/seq_file.h>
41 #include <linux/nsproxy.h>
42 #include <linux/slab.h>
43 #include <linux/jhash.h>
44 #include <linux/siphash.h>
45 #include <net/net_namespace.h>
46 #include <net/snmp.h>
47 #include <net/ipv6.h>
48 #include <net/ip6_fib.h>
49 #include <net/ip6_route.h>
50 #include <net/ndisc.h>
51 #include <net/addrconf.h>
52 #include <net/tcp.h>
53 #include <linux/rtnetlink.h>
54 #include <net/dst.h>
55 #include <net/dst_metadata.h>
56 #include <net/xfrm.h>
57 #include <net/netevent.h>
58 #include <net/netlink.h>
59 #include <net/rtnh.h>
60 #include <net/lwtunnel.h>
61 #include <net/ip_tunnels.h>
62 #include <net/l3mdev.h>
63 #include <net/ip.h>
64 #include <linux/uaccess.h>
65 #include <linux/btf_ids.h>
66 
67 #ifdef CONFIG_SYSCTL
68 #include <linux/sysctl.h>
69 #endif
70 
71 static int ip6_rt_type_to_error(u8 fib6_type);
72 
73 #define CREATE_TRACE_POINTS
74 #include <trace/events/fib6.h>
75 EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
76 #undef CREATE_TRACE_POINTS
77 
78 enum rt6_nud_state {
79 	RT6_NUD_FAIL_HARD = -3,
80 	RT6_NUD_FAIL_PROBE = -2,
81 	RT6_NUD_FAIL_DO_RR = -1,
82 	RT6_NUD_SUCCEED = 1
83 };
84 
85 INDIRECT_CALLABLE_SCOPE
86 struct dst_entry	*ip6_dst_check(struct dst_entry *dst, u32 cookie);
87 static unsigned int	 ip6_default_advmss(const struct dst_entry *dst);
88 INDIRECT_CALLABLE_SCOPE
89 unsigned int		ip6_mtu(const struct dst_entry *dst);
90 static void		ip6_negative_advice(struct sock *sk,
91 					    struct dst_entry *dst);
92 static void		ip6_dst_destroy(struct dst_entry *);
93 static void		ip6_dst_ifdown(struct dst_entry *,
94 				       struct net_device *dev);
95 static void		 ip6_dst_gc(struct dst_ops *ops);
96 
97 static int		ip6_pkt_discard(struct sk_buff *skb);
98 static int		ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
99 static int		ip6_pkt_prohibit(struct sk_buff *skb);
100 static int		ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
101 static void		ip6_link_failure(struct sk_buff *skb);
102 static void		ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
103 					   struct sk_buff *skb, u32 mtu,
104 					   bool confirm_neigh);
105 static void		rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
106 					struct sk_buff *skb);
107 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
108 			   int strict);
109 static size_t rt6_nlmsg_size(struct fib6_info *f6i);
110 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
111 			 struct fib6_info *rt, struct dst_entry *dst,
112 			 struct in6_addr *dest, struct in6_addr *src,
113 			 int iif, int type, u32 portid, u32 seq,
114 			 unsigned int flags);
115 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
116 					   const struct in6_addr *daddr,
117 					   const struct in6_addr *saddr);
118 
119 #ifdef CONFIG_IPV6_ROUTE_INFO
120 static struct fib6_info *rt6_add_route_info(struct net *net,
121 					   const struct in6_addr *prefix, int prefixlen,
122 					   const struct in6_addr *gwaddr,
123 					   struct net_device *dev,
124 					   unsigned int pref);
125 static struct fib6_info *rt6_get_route_info(struct net *net,
126 					   const struct in6_addr *prefix, int prefixlen,
127 					   const struct in6_addr *gwaddr,
128 					   struct net_device *dev);
129 #endif
130 
131 struct uncached_list {
132 	spinlock_t		lock;
133 	struct list_head	head;
134 };
135 
136 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
137 
rt6_uncached_list_add(struct rt6_info * rt)138 void rt6_uncached_list_add(struct rt6_info *rt)
139 {
140 	struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
141 
142 	rt->dst.rt_uncached_list = ul;
143 
144 	spin_lock_bh(&ul->lock);
145 	list_add_tail(&rt->dst.rt_uncached, &ul->head);
146 	spin_unlock_bh(&ul->lock);
147 }
148 
rt6_uncached_list_del(struct rt6_info * rt)149 void rt6_uncached_list_del(struct rt6_info *rt)
150 {
151 	struct uncached_list *ul = rt->dst.rt_uncached_list;
152 
153 	if (ul) {
154 		spin_lock_bh(&ul->lock);
155 		list_del_init(&rt->dst.rt_uncached);
156 		spin_unlock_bh(&ul->lock);
157 	}
158 }
159 
rt6_uncached_list_flush_dev(struct net_device * dev)160 static void rt6_uncached_list_flush_dev(struct net_device *dev)
161 {
162 	int cpu;
163 
164 	for_each_possible_cpu(cpu) {
165 		struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
166 		struct rt6_info *rt, *safe;
167 
168 		if (list_empty(&ul->head))
169 			continue;
170 
171 		spin_lock_bh(&ul->lock);
172 		list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) {
173 			struct inet6_dev *rt_idev = rt->rt6i_idev;
174 			struct net_device *rt_dev = rt->dst.dev;
175 			bool handled = false;
176 
177 			if (rt_idev && rt_idev->dev == dev) {
178 				rt->rt6i_idev = in6_dev_get(blackhole_netdev);
179 				in6_dev_put(rt_idev);
180 				handled = true;
181 			}
182 
183 			if (rt_dev == dev) {
184 				rt->dst.dev = blackhole_netdev;
185 				netdev_ref_replace(rt_dev, blackhole_netdev,
186 						   &rt->dst.dev_tracker,
187 						   GFP_ATOMIC);
188 				handled = true;
189 			}
190 			if (handled)
191 				list_del_init(&rt->dst.rt_uncached);
192 		}
193 		spin_unlock_bh(&ul->lock);
194 	}
195 }
196 
choose_neigh_daddr(const struct in6_addr * p,struct sk_buff * skb,const void * daddr)197 static inline const void *choose_neigh_daddr(const struct in6_addr *p,
198 					     struct sk_buff *skb,
199 					     const void *daddr)
200 {
201 	if (!ipv6_addr_any(p))
202 		return (const void *) p;
203 	else if (skb)
204 		return &ipv6_hdr(skb)->daddr;
205 	return daddr;
206 }
207 
ip6_neigh_lookup(const struct in6_addr * gw,struct net_device * dev,struct sk_buff * skb,const void * daddr)208 struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
209 				   struct net_device *dev,
210 				   struct sk_buff *skb,
211 				   const void *daddr)
212 {
213 	struct neighbour *n;
214 
215 	daddr = choose_neigh_daddr(gw, skb, daddr);
216 	n = __ipv6_neigh_lookup(dev, daddr);
217 	if (n)
218 		return n;
219 
220 	n = neigh_create(&nd_tbl, daddr, dev);
221 	return IS_ERR(n) ? NULL : n;
222 }
223 
ip6_dst_neigh_lookup(const struct dst_entry * dst,struct sk_buff * skb,const void * daddr)224 static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
225 					      struct sk_buff *skb,
226 					      const void *daddr)
227 {
228 	const struct rt6_info *rt = dst_rt6_info(dst);
229 
230 	return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
231 				dst_dev(dst), skb, daddr);
232 }
233 
ip6_confirm_neigh(const struct dst_entry * dst,const void * daddr)234 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
235 {
236 	const struct rt6_info *rt = dst_rt6_info(dst);
237 	struct net_device *dev = dst_dev(dst);
238 
239 	daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr);
240 	if (!daddr)
241 		return;
242 	if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
243 		return;
244 	if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
245 		return;
246 	__ipv6_confirm_neigh(dev, daddr);
247 }
248 
249 static struct dst_ops ip6_dst_ops_template = {
250 	.family			=	AF_INET6,
251 	.gc			=	ip6_dst_gc,
252 	.gc_thresh		=	1024,
253 	.check			=	ip6_dst_check,
254 	.default_advmss		=	ip6_default_advmss,
255 	.mtu			=	ip6_mtu,
256 	.cow_metrics		=	dst_cow_metrics_generic,
257 	.destroy		=	ip6_dst_destroy,
258 	.ifdown			=	ip6_dst_ifdown,
259 	.negative_advice	=	ip6_negative_advice,
260 	.link_failure		=	ip6_link_failure,
261 	.update_pmtu		=	ip6_rt_update_pmtu,
262 	.redirect		=	rt6_do_redirect,
263 	.local_out		=	__ip6_local_out,
264 	.neigh_lookup		=	ip6_dst_neigh_lookup,
265 	.confirm_neigh		=	ip6_confirm_neigh,
266 };
267 
268 static struct dst_ops ip6_dst_blackhole_ops = {
269 	.family			= AF_INET6,
270 	.default_advmss		= ip6_default_advmss,
271 	.neigh_lookup		= ip6_dst_neigh_lookup,
272 	.check			= ip6_dst_check,
273 	.destroy		= ip6_dst_destroy,
274 	.cow_metrics		= dst_cow_metrics_generic,
275 	.update_pmtu		= dst_blackhole_update_pmtu,
276 	.redirect		= dst_blackhole_redirect,
277 	.mtu			= dst_blackhole_mtu,
278 };
279 
280 static const u32 ip6_template_metrics[RTAX_MAX] = {
281 	[RTAX_HOPLIMIT - 1] = 0,
282 };
283 
284 static const struct fib6_info fib6_null_entry_template = {
285 	.fib6_flags	= (RTF_REJECT | RTF_NONEXTHOP),
286 	.fib6_protocol  = RTPROT_KERNEL,
287 	.fib6_metric	= ~(u32)0,
288 	.fib6_ref	= REFCOUNT_INIT(1),
289 	.fib6_type	= RTN_UNREACHABLE,
290 	.fib6_metrics	= (struct dst_metrics *)&dst_default_metrics,
291 };
292 
293 static const struct rt6_info ip6_null_entry_template = {
294 	.dst = {
295 		.__rcuref	= RCUREF_INIT(1),
296 		.__use		= 1,
297 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
298 		.error		= -ENETUNREACH,
299 		.input		= ip6_pkt_discard,
300 		.output		= ip6_pkt_discard_out,
301 	},
302 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
303 };
304 
305 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
306 
307 static const struct rt6_info ip6_prohibit_entry_template = {
308 	.dst = {
309 		.__rcuref	= RCUREF_INIT(1),
310 		.__use		= 1,
311 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
312 		.error		= -EACCES,
313 		.input		= ip6_pkt_prohibit,
314 		.output		= ip6_pkt_prohibit_out,
315 	},
316 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
317 };
318 
319 static const struct rt6_info ip6_blk_hole_entry_template = {
320 	.dst = {
321 		.__rcuref	= RCUREF_INIT(1),
322 		.__use		= 1,
323 		.obsolete	= DST_OBSOLETE_FORCE_CHK,
324 		.error		= -EINVAL,
325 		.input		= dst_discard,
326 		.output		= dst_discard_out,
327 	},
328 	.rt6i_flags	= (RTF_REJECT | RTF_NONEXTHOP),
329 };
330 
331 #endif
332 
rt6_info_init(struct rt6_info * rt)333 static void rt6_info_init(struct rt6_info *rt)
334 {
335 	memset_after(rt, 0, dst);
336 }
337 
338 /* allocate dst with ip6_dst_ops */
ip6_dst_alloc(struct net * net,struct net_device * dev,int flags)339 struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
340 			       int flags)
341 {
342 	struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
343 					DST_OBSOLETE_FORCE_CHK, flags);
344 
345 	if (rt) {
346 		rt6_info_init(rt);
347 		atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
348 	}
349 
350 	return rt;
351 }
352 EXPORT_SYMBOL(ip6_dst_alloc);
353 
ip6_dst_destroy(struct dst_entry * dst)354 static void ip6_dst_destroy(struct dst_entry *dst)
355 {
356 	struct rt6_info *rt = dst_rt6_info(dst);
357 	struct fib6_info *from;
358 	struct inet6_dev *idev;
359 
360 	ip_dst_metrics_put(dst);
361 	rt6_uncached_list_del(rt);
362 
363 	idev = rt->rt6i_idev;
364 	if (idev) {
365 		rt->rt6i_idev = NULL;
366 		in6_dev_put(idev);
367 	}
368 
369 	from = unrcu_pointer(xchg(&rt->from, NULL));
370 	fib6_info_release(from);
371 }
372 
ip6_dst_ifdown(struct dst_entry * dst,struct net_device * dev)373 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev)
374 {
375 	struct rt6_info *rt = dst_rt6_info(dst);
376 	struct inet6_dev *idev = rt->rt6i_idev;
377 	struct fib6_info *from;
378 
379 	if (idev && idev->dev != blackhole_netdev) {
380 		struct inet6_dev *blackhole_idev = in6_dev_get(blackhole_netdev);
381 
382 		if (blackhole_idev) {
383 			rt->rt6i_idev = blackhole_idev;
384 			in6_dev_put(idev);
385 		}
386 	}
387 	from = unrcu_pointer(xchg(&rt->from, NULL));
388 	fib6_info_release(from);
389 }
390 
__rt6_check_expired(const struct rt6_info * rt)391 static bool __rt6_check_expired(const struct rt6_info *rt)
392 {
393 	if (rt->rt6i_flags & RTF_EXPIRES)
394 		return time_after(jiffies, READ_ONCE(rt->dst.expires));
395 	return false;
396 }
397 
rt6_check_expired(const struct rt6_info * rt)398 static bool rt6_check_expired(const struct rt6_info *rt)
399 {
400 	struct fib6_info *from;
401 
402 	from = rcu_dereference(rt->from);
403 
404 	if (rt->rt6i_flags & RTF_EXPIRES) {
405 		if (time_after(jiffies, READ_ONCE(rt->dst.expires)))
406 			return true;
407 	} else if (from) {
408 		return READ_ONCE(rt->dst.obsolete) != DST_OBSOLETE_FORCE_CHK ||
409 			fib6_check_expired(from);
410 	}
411 	return false;
412 }
413 
414 static struct fib6_info *
rt6_multipath_first_sibling_rcu(const struct fib6_info * rt)415 rt6_multipath_first_sibling_rcu(const struct fib6_info *rt)
416 {
417 	struct fib6_info *iter;
418 	struct fib6_node *fn;
419 
420 	fn = rcu_dereference(rt->fib6_node);
421 	if (!fn)
422 		goto out;
423 	iter = rcu_dereference(fn->leaf);
424 	if (!iter)
425 		goto out;
426 
427 	while (iter) {
428 		if (iter->fib6_metric == rt->fib6_metric &&
429 		    rt6_qualify_for_ecmp(iter))
430 			return iter;
431 		iter = rcu_dereference(iter->fib6_next);
432 	}
433 
434 out:
435 	return NULL;
436 }
437 
fib6_select_path(const struct net * net,struct fib6_result * res,struct flowi6 * fl6,int oif,bool have_oif_match,const struct sk_buff * skb,int strict)438 void fib6_select_path(const struct net *net, struct fib6_result *res,
439 		      struct flowi6 *fl6, int oif, bool have_oif_match,
440 		      const struct sk_buff *skb, int strict)
441 {
442 	struct fib6_info *first, *match = res->f6i;
443 	struct fib6_info *sibling;
444 	int hash;
445 
446 	if (!match->nh && (!match->fib6_nsiblings || have_oif_match))
447 		goto out;
448 
449 	if (match->nh && have_oif_match && res->nh)
450 		return;
451 
452 	if (skb)
453 		IP6CB(skb)->flags |= IP6SKB_MULTIPATH;
454 
455 	/* We might have already computed the hash for ICMPv6 errors. In such
456 	 * case it will always be non-zero. Otherwise now is the time to do it.
457 	 */
458 	if (!fl6->mp_hash &&
459 	    (!match->nh || nexthop_is_multipath(match->nh)))
460 		fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
461 
462 	if (unlikely(match->nh)) {
463 		nexthop_path_fib6_result(res, fl6->mp_hash);
464 		return;
465 	}
466 
467 	first = rt6_multipath_first_sibling_rcu(match);
468 	if (!first)
469 		goto out;
470 
471 	hash = fl6->mp_hash;
472 	if (hash <= atomic_read(&first->fib6_nh->fib_nh_upper_bound)) {
473 		if (rt6_score_route(first->fib6_nh, first->fib6_flags, oif,
474 				    strict) >= 0)
475 			match = first;
476 		goto out;
477 	}
478 
479 	list_for_each_entry_rcu(sibling, &first->fib6_siblings,
480 				fib6_siblings) {
481 		const struct fib6_nh *nh = sibling->fib6_nh;
482 		int nh_upper_bound;
483 
484 		nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
485 		if (hash > nh_upper_bound)
486 			continue;
487 		if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
488 			break;
489 		match = sibling;
490 		break;
491 	}
492 
493 out:
494 	res->f6i = match;
495 	res->nh = match->fib6_nh;
496 }
497 
498 /*
499  *	Route lookup. rcu_read_lock() should be held.
500  */
501 
__rt6_device_match(struct net * net,const struct fib6_nh * nh,const struct in6_addr * saddr,int oif,int flags)502 static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
503 			       const struct in6_addr *saddr, int oif, int flags)
504 {
505 	const struct net_device *dev;
506 
507 	if (nh->fib_nh_flags & RTNH_F_DEAD)
508 		return false;
509 
510 	dev = nh->fib_nh_dev;
511 	if (oif) {
512 		if (dev->ifindex == oif)
513 			return true;
514 	} else {
515 		if (ipv6_chk_addr(net, saddr, dev,
516 				  flags & RT6_LOOKUP_F_IFACE))
517 			return true;
518 	}
519 
520 	return false;
521 }
522 
523 struct fib6_nh_dm_arg {
524 	struct net		*net;
525 	const struct in6_addr	*saddr;
526 	int			oif;
527 	int			flags;
528 	struct fib6_nh		*nh;
529 };
530 
__rt6_nh_dev_match(struct fib6_nh * nh,void * _arg)531 static int __rt6_nh_dev_match(struct fib6_nh *nh, void *_arg)
532 {
533 	struct fib6_nh_dm_arg *arg = _arg;
534 
535 	arg->nh = nh;
536 	return __rt6_device_match(arg->net, nh, arg->saddr, arg->oif,
537 				  arg->flags);
538 }
539 
540 /* returns fib6_nh from nexthop or NULL */
rt6_nh_dev_match(struct net * net,struct nexthop * nh,struct fib6_result * res,const struct in6_addr * saddr,int oif,int flags)541 static struct fib6_nh *rt6_nh_dev_match(struct net *net, struct nexthop *nh,
542 					struct fib6_result *res,
543 					const struct in6_addr *saddr,
544 					int oif, int flags)
545 {
546 	struct fib6_nh_dm_arg arg = {
547 		.net   = net,
548 		.saddr = saddr,
549 		.oif   = oif,
550 		.flags = flags,
551 	};
552 
553 	if (nexthop_is_blackhole(nh))
554 		return NULL;
555 
556 	if (nexthop_for_each_fib6_nh(nh, __rt6_nh_dev_match, &arg))
557 		return arg.nh;
558 
559 	return NULL;
560 }
561 
rt6_device_match(struct net * net,struct fib6_result * res,const struct in6_addr * saddr,int oif,int flags)562 static void rt6_device_match(struct net *net, struct fib6_result *res,
563 			     const struct in6_addr *saddr, int oif, int flags)
564 {
565 	struct fib6_info *f6i = res->f6i;
566 	struct fib6_info *spf6i;
567 	struct fib6_nh *nh;
568 
569 	if (!oif && ipv6_addr_any(saddr)) {
570 		if (unlikely(f6i->nh)) {
571 			nh = nexthop_fib6_nh(f6i->nh);
572 			if (nexthop_is_blackhole(f6i->nh))
573 				goto out_blackhole;
574 		} else {
575 			nh = f6i->fib6_nh;
576 		}
577 		if (!(nh->fib_nh_flags & RTNH_F_DEAD))
578 			goto out;
579 	}
580 
581 	for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) {
582 		bool matched = false;
583 
584 		if (unlikely(spf6i->nh)) {
585 			nh = rt6_nh_dev_match(net, spf6i->nh, res, saddr,
586 					      oif, flags);
587 			if (nh)
588 				matched = true;
589 		} else {
590 			nh = spf6i->fib6_nh;
591 			if (__rt6_device_match(net, nh, saddr, oif, flags))
592 				matched = true;
593 		}
594 		if (matched) {
595 			res->f6i = spf6i;
596 			goto out;
597 		}
598 	}
599 
600 	if (oif && flags & RT6_LOOKUP_F_IFACE) {
601 		res->f6i = net->ipv6.fib6_null_entry;
602 		nh = res->f6i->fib6_nh;
603 		goto out;
604 	}
605 
606 	if (unlikely(f6i->nh)) {
607 		nh = nexthop_fib6_nh(f6i->nh);
608 		if (nexthop_is_blackhole(f6i->nh))
609 			goto out_blackhole;
610 	} else {
611 		nh = f6i->fib6_nh;
612 	}
613 
614 	if (nh->fib_nh_flags & RTNH_F_DEAD) {
615 		res->f6i = net->ipv6.fib6_null_entry;
616 		nh = res->f6i->fib6_nh;
617 	}
618 out:
619 	res->nh = nh;
620 	res->fib6_type = res->f6i->fib6_type;
621 	res->fib6_flags = res->f6i->fib6_flags;
622 	return;
623 
624 out_blackhole:
625 	res->fib6_flags |= RTF_REJECT;
626 	res->fib6_type = RTN_BLACKHOLE;
627 	res->nh = nh;
628 }
629 
630 #ifdef CONFIG_IPV6_ROUTER_PREF
631 struct __rt6_probe_work {
632 	struct work_struct work;
633 	struct in6_addr target;
634 	struct net_device *dev;
635 	netdevice_tracker dev_tracker;
636 };
637 
rt6_probe_deferred(struct work_struct * w)638 static void rt6_probe_deferred(struct work_struct *w)
639 {
640 	struct in6_addr mcaddr;
641 	struct __rt6_probe_work *work =
642 		container_of(w, struct __rt6_probe_work, work);
643 
644 	addrconf_addr_solict_mult(&work->target, &mcaddr);
645 	ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
646 	netdev_put(work->dev, &work->dev_tracker);
647 	kfree(work);
648 }
649 
rt6_probe(struct fib6_nh * fib6_nh)650 static void rt6_probe(struct fib6_nh *fib6_nh)
651 {
652 	struct __rt6_probe_work *work = NULL;
653 	const struct in6_addr *nh_gw;
654 	unsigned long last_probe;
655 	struct neighbour *neigh;
656 	struct net_device *dev;
657 	struct inet6_dev *idev;
658 
659 	/*
660 	 * Okay, this does not seem to be appropriate
661 	 * for now, however, we need to check if it
662 	 * is really so; aka Router Reachability Probing.
663 	 *
664 	 * Router Reachability Probe MUST be rate-limited
665 	 * to no more than one per minute.
666 	 */
667 	if (!fib6_nh->fib_nh_gw_family)
668 		return;
669 
670 	nh_gw = &fib6_nh->fib_nh_gw6;
671 	dev = fib6_nh->fib_nh_dev;
672 	rcu_read_lock();
673 	last_probe = READ_ONCE(fib6_nh->last_probe);
674 	idev = __in6_dev_get(dev);
675 	if (!idev)
676 		goto out;
677 	neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
678 	if (neigh) {
679 		if (READ_ONCE(neigh->nud_state) & NUD_VALID)
680 			goto out;
681 
682 		write_lock_bh(&neigh->lock);
683 		if (!(neigh->nud_state & NUD_VALID) &&
684 		    time_after(jiffies,
685 			       neigh->updated +
686 			       READ_ONCE(idev->cnf.rtr_probe_interval))) {
687 			work = kmalloc_obj(*work, GFP_ATOMIC);
688 			if (work)
689 				__neigh_set_probe_once(neigh);
690 		}
691 		write_unlock_bh(&neigh->lock);
692 	} else if (time_after(jiffies, last_probe +
693 				       READ_ONCE(idev->cnf.rtr_probe_interval))) {
694 		work = kmalloc_obj(*work, GFP_ATOMIC);
695 	}
696 
697 	if (!work || cmpxchg(&fib6_nh->last_probe,
698 			     last_probe, jiffies) != last_probe) {
699 		kfree(work);
700 	} else {
701 		INIT_WORK(&work->work, rt6_probe_deferred);
702 		work->target = *nh_gw;
703 		netdev_hold(dev, &work->dev_tracker, GFP_ATOMIC);
704 		work->dev = dev;
705 		schedule_work(&work->work);
706 	}
707 
708 out:
709 	rcu_read_unlock();
710 }
711 #else
rt6_probe(struct fib6_nh * fib6_nh)712 static inline void rt6_probe(struct fib6_nh *fib6_nh)
713 {
714 }
715 #endif
716 
717 /*
718  * Default Router Selection (RFC 2461 6.3.6)
719  */
rt6_check_neigh(const struct fib6_nh * fib6_nh)720 static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh)
721 {
722 	enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
723 	struct neighbour *neigh;
724 
725 	rcu_read_lock();
726 	neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev,
727 					  &fib6_nh->fib_nh_gw6);
728 	if (neigh) {
729 		u8 nud_state = READ_ONCE(neigh->nud_state);
730 
731 		if (nud_state & NUD_VALID)
732 			ret = RT6_NUD_SUCCEED;
733 #ifdef CONFIG_IPV6_ROUTER_PREF
734 		else if (!(nud_state & NUD_FAILED))
735 			ret = RT6_NUD_SUCCEED;
736 		else
737 			ret = RT6_NUD_FAIL_PROBE;
738 #endif
739 	} else {
740 		ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
741 		      RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
742 	}
743 	rcu_read_unlock();
744 
745 	return ret;
746 }
747 
rt6_score_route(const struct fib6_nh * nh,u32 fib6_flags,int oif,int strict)748 static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
749 			   int strict)
750 {
751 	int m = 0;
752 
753 	if (!oif || nh->fib_nh_dev->ifindex == oif)
754 		m = 2;
755 
756 	if (!m && (strict & RT6_LOOKUP_F_IFACE))
757 		return RT6_NUD_FAIL_HARD;
758 #ifdef CONFIG_IPV6_ROUTER_PREF
759 	m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2;
760 #endif
761 	if ((strict & RT6_LOOKUP_F_REACHABLE) &&
762 	    !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) {
763 		int n = rt6_check_neigh(nh);
764 		if (n < 0)
765 			return n;
766 	}
767 	return m;
768 }
769 
find_match(struct fib6_nh * nh,u32 fib6_flags,int oif,int strict,int * mpri,bool * do_rr)770 static bool find_match(struct fib6_nh *nh, u32 fib6_flags,
771 		       int oif, int strict, int *mpri, bool *do_rr)
772 {
773 	bool match_do_rr = false;
774 	bool rc = false;
775 	int m;
776 
777 	if (nh->fib_nh_flags & RTNH_F_DEAD)
778 		goto out;
779 
780 	if (ip6_ignore_linkdown(nh->fib_nh_dev) &&
781 	    nh->fib_nh_flags & RTNH_F_LINKDOWN &&
782 	    !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
783 		goto out;
784 
785 	m = rt6_score_route(nh, fib6_flags, oif, strict);
786 	if (m == RT6_NUD_FAIL_DO_RR) {
787 		match_do_rr = true;
788 		m = 0; /* lowest valid score */
789 	} else if (m == RT6_NUD_FAIL_HARD) {
790 		goto out;
791 	}
792 
793 	if (strict & RT6_LOOKUP_F_REACHABLE)
794 		rt6_probe(nh);
795 
796 	/* note that m can be RT6_NUD_FAIL_PROBE at this point */
797 	if (m > *mpri) {
798 		*do_rr = match_do_rr;
799 		*mpri = m;
800 		rc = true;
801 	}
802 out:
803 	return rc;
804 }
805 
806 struct fib6_nh_frl_arg {
807 	u32		flags;
808 	int		oif;
809 	int		strict;
810 	int		*mpri;
811 	bool		*do_rr;
812 	struct fib6_nh	*nh;
813 };
814 
rt6_nh_find_match(struct fib6_nh * nh,void * _arg)815 static int rt6_nh_find_match(struct fib6_nh *nh, void *_arg)
816 {
817 	struct fib6_nh_frl_arg *arg = _arg;
818 
819 	arg->nh = nh;
820 	return find_match(nh, arg->flags, arg->oif, arg->strict,
821 			  arg->mpri, arg->do_rr);
822 }
823 
__find_rr_leaf(struct fib6_info * f6i_start,struct fib6_info * nomatch,u32 metric,struct fib6_result * res,struct fib6_info ** cont,int oif,int strict,bool * do_rr,int * mpri)824 static void __find_rr_leaf(struct fib6_info *f6i_start,
825 			   struct fib6_info *nomatch, u32 metric,
826 			   struct fib6_result *res, struct fib6_info **cont,
827 			   int oif, int strict, bool *do_rr, int *mpri)
828 {
829 	struct fib6_info *f6i;
830 
831 	for (f6i = f6i_start;
832 	     f6i && f6i != nomatch;
833 	     f6i = rcu_dereference(f6i->fib6_next)) {
834 		bool matched = false;
835 		struct fib6_nh *nh;
836 
837 		if (cont && f6i->fib6_metric != metric) {
838 			*cont = f6i;
839 			return;
840 		}
841 
842 		if (fib6_check_expired(f6i))
843 			continue;
844 
845 		if (unlikely(f6i->nh)) {
846 			struct fib6_nh_frl_arg arg = {
847 				.flags  = f6i->fib6_flags,
848 				.oif    = oif,
849 				.strict = strict,
850 				.mpri   = mpri,
851 				.do_rr  = do_rr
852 			};
853 
854 			if (nexthop_is_blackhole(f6i->nh)) {
855 				res->fib6_flags = RTF_REJECT;
856 				res->fib6_type = RTN_BLACKHOLE;
857 				res->f6i = f6i;
858 				res->nh = nexthop_fib6_nh(f6i->nh);
859 				return;
860 			}
861 			if (nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_find_match,
862 						     &arg)) {
863 				matched = true;
864 				nh = arg.nh;
865 			}
866 		} else {
867 			nh = f6i->fib6_nh;
868 			if (find_match(nh, f6i->fib6_flags, oif, strict,
869 				       mpri, do_rr))
870 				matched = true;
871 		}
872 		if (matched) {
873 			res->f6i = f6i;
874 			res->nh = nh;
875 			res->fib6_flags = f6i->fib6_flags;
876 			res->fib6_type = f6i->fib6_type;
877 		}
878 	}
879 }
880 
find_rr_leaf(struct fib6_node * fn,struct fib6_info * leaf,struct fib6_info * rr_head,int oif,int strict,bool * do_rr,struct fib6_result * res)881 static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf,
882 			 struct fib6_info *rr_head, int oif, int strict,
883 			 bool *do_rr, struct fib6_result *res)
884 {
885 	u32 metric = rr_head->fib6_metric;
886 	struct fib6_info *cont = NULL;
887 	int mpri = -1;
888 
889 	__find_rr_leaf(rr_head, NULL, metric, res, &cont,
890 		       oif, strict, do_rr, &mpri);
891 
892 	__find_rr_leaf(leaf, rr_head, metric, res, &cont,
893 		       oif, strict, do_rr, &mpri);
894 
895 	if (res->f6i || !cont)
896 		return;
897 
898 	__find_rr_leaf(cont, NULL, metric, res, NULL,
899 		       oif, strict, do_rr, &mpri);
900 }
901 
rt6_select(struct net * net,struct fib6_node * fn,int oif,struct fib6_result * res,int strict)902 static void rt6_select(struct net *net, struct fib6_node *fn, int oif,
903 		       struct fib6_result *res, int strict)
904 {
905 	struct fib6_info *leaf = rcu_dereference(fn->leaf);
906 	struct fib6_info *rt0;
907 	bool do_rr = false;
908 	int key_plen;
909 
910 	/* make sure this function or its helpers sets f6i */
911 	res->f6i = NULL;
912 
913 	if (!leaf || leaf == net->ipv6.fib6_null_entry)
914 		goto out;
915 
916 	rt0 = rcu_dereference(fn->rr_ptr);
917 	if (!rt0)
918 		rt0 = leaf;
919 
920 	/* Double check to make sure fn is not an intermediate node
921 	 * and fn->leaf does not points to its child's leaf
922 	 * (This might happen if all routes under fn are deleted from
923 	 * the tree and fib6_repair_tree() is called on the node.)
924 	 */
925 	key_plen = rt0->fib6_dst.plen;
926 #ifdef CONFIG_IPV6_SUBTREES
927 	if (rt0->fib6_src.plen)
928 		key_plen = rt0->fib6_src.plen;
929 #endif
930 	if (fn->fn_bit != key_plen)
931 		goto out;
932 
933 	find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res);
934 	if (do_rr) {
935 		struct fib6_info *next = rcu_dereference(rt0->fib6_next);
936 
937 		/* no entries matched; do round-robin */
938 		if (!next || next->fib6_metric != rt0->fib6_metric)
939 			next = leaf;
940 
941 		if (next != rt0) {
942 			spin_lock_bh(&leaf->fib6_table->tb6_lock);
943 			/* make sure next is not being deleted from the tree */
944 			if (next->fib6_node)
945 				rcu_assign_pointer(fn->rr_ptr, next);
946 			spin_unlock_bh(&leaf->fib6_table->tb6_lock);
947 		}
948 	}
949 
950 out:
951 	if (!res->f6i) {
952 		res->f6i = net->ipv6.fib6_null_entry;
953 		res->nh = res->f6i->fib6_nh;
954 		res->fib6_flags = res->f6i->fib6_flags;
955 		res->fib6_type = res->f6i->fib6_type;
956 	}
957 }
958 
rt6_is_gw_or_nonexthop(const struct fib6_result * res)959 static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
960 {
961 	return (res->f6i->fib6_flags & RTF_NONEXTHOP) ||
962 	       res->nh->fib_nh_gw_family;
963 }
964 
965 #ifdef CONFIG_IPV6_ROUTE_INFO
rt6_route_rcv(struct net_device * dev,u8 * opt,int len,const struct in6_addr * gwaddr)966 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
967 		  const struct in6_addr *gwaddr)
968 {
969 	struct net *net = dev_net(dev);
970 	struct route_info *rinfo = (struct route_info *) opt;
971 	struct in6_addr prefix_buf, *prefix;
972 	struct fib6_table *table;
973 	unsigned int pref;
974 	unsigned long lifetime;
975 	struct fib6_info *rt;
976 
977 	if (len < sizeof(struct route_info)) {
978 		return -EINVAL;
979 	}
980 
981 	/* Sanity check for prefix_len and length */
982 	if (rinfo->length > 3) {
983 		return -EINVAL;
984 	} else if (rinfo->prefix_len > 128) {
985 		return -EINVAL;
986 	} else if (rinfo->prefix_len > 64) {
987 		if (rinfo->length < 2) {
988 			return -EINVAL;
989 		}
990 	} else if (rinfo->prefix_len > 0) {
991 		if (rinfo->length < 1) {
992 			return -EINVAL;
993 		}
994 	}
995 
996 	pref = rinfo->route_pref;
997 	if (pref == ICMPV6_ROUTER_PREF_INVALID)
998 		return -EINVAL;
999 
1000 	lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
1001 
1002 	if (rinfo->length == 3)
1003 		prefix = (struct in6_addr *)rinfo->prefix;
1004 	else {
1005 		/* this function is safe */
1006 		ipv6_addr_prefix(&prefix_buf,
1007 				 (struct in6_addr *)rinfo->prefix,
1008 				 rinfo->prefix_len);
1009 		prefix = &prefix_buf;
1010 	}
1011 
1012 	if (rinfo->prefix_len == 0)
1013 		rt = rt6_get_dflt_router(net, gwaddr, dev);
1014 	else
1015 		rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
1016 					gwaddr, dev);
1017 
1018 	if (rt && !lifetime) {
1019 		ip6_del_rt(net, rt, false);
1020 		rt = NULL;
1021 	}
1022 
1023 	if (!rt && lifetime)
1024 		rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
1025 					dev, pref);
1026 	else if (rt)
1027 		rt->fib6_flags = RTF_ROUTEINFO |
1028 				 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
1029 
1030 	if (rt) {
1031 		table = rt->fib6_table;
1032 		spin_lock_bh(&table->tb6_lock);
1033 
1034 		if (!addrconf_finite_timeout(lifetime)) {
1035 			fib6_clean_expires(rt);
1036 			fib6_may_remove_gc_list(net, rt);
1037 		} else {
1038 			fib6_set_expires(rt, jiffies + HZ * lifetime);
1039 			fib6_add_gc_list(rt);
1040 		}
1041 
1042 		spin_unlock_bh(&table->tb6_lock);
1043 
1044 		fib6_info_release(rt);
1045 	}
1046 	return 0;
1047 }
1048 #endif
1049 
1050 /*
1051  *	Misc support functions
1052  */
1053 
1054 /* called with rcu_lock held */
ip6_rt_get_dev_rcu(const struct fib6_result * res)1055 static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
1056 {
1057 	struct net_device *dev = res->nh->fib_nh_dev;
1058 
1059 	if (res->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
1060 		/* for copies of local routes, dst->dev needs to be the
1061 		 * device if it is a master device, the master device if
1062 		 * device is enslaved, and the loopback as the default
1063 		 */
1064 		if (netif_is_l3_slave(dev) &&
1065 		    !rt6_need_strict(&res->f6i->fib6_dst.addr))
1066 			dev = l3mdev_master_dev_rcu(dev) ? :
1067 			      dev_net(dev)->loopback_dev;
1068 		else if (!netif_is_l3_master(dev))
1069 			dev = dev_net(dev)->loopback_dev;
1070 		/* last case is netif_is_l3_master(dev) is true in which
1071 		 * case we want dev returned to be dev
1072 		 */
1073 	}
1074 
1075 	return dev;
1076 }
1077 
1078 static const int fib6_prop[RTN_MAX + 1] = {
1079 	[RTN_UNSPEC]	= 0,
1080 	[RTN_UNICAST]	= 0,
1081 	[RTN_LOCAL]	= 0,
1082 	[RTN_BROADCAST]	= 0,
1083 	[RTN_ANYCAST]	= 0,
1084 	[RTN_MULTICAST]	= 0,
1085 	[RTN_BLACKHOLE]	= -EINVAL,
1086 	[RTN_UNREACHABLE] = -EHOSTUNREACH,
1087 	[RTN_PROHIBIT]	= -EACCES,
1088 	[RTN_THROW]	= -EAGAIN,
1089 	[RTN_NAT]	= -EINVAL,
1090 	[RTN_XRESOLVE]	= -EINVAL,
1091 };
1092 
ip6_rt_type_to_error(u8 fib6_type)1093 static int ip6_rt_type_to_error(u8 fib6_type)
1094 {
1095 	return fib6_prop[fib6_type];
1096 }
1097 
fib6_info_dst_flags(struct fib6_info * rt)1098 static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
1099 {
1100 	unsigned short flags = 0;
1101 
1102 	if (rt->dst_nocount)
1103 		flags |= DST_NOCOUNT;
1104 	if (rt->dst_nopolicy)
1105 		flags |= DST_NOPOLICY;
1106 
1107 	return flags;
1108 }
1109 
ip6_rt_init_dst_reject(struct rt6_info * rt,u8 fib6_type)1110 static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type)
1111 {
1112 	rt->dst.error = ip6_rt_type_to_error(fib6_type);
1113 
1114 	switch (fib6_type) {
1115 	case RTN_BLACKHOLE:
1116 		rt->dst.output = dst_discard_out;
1117 		rt->dst.input = dst_discard;
1118 		break;
1119 	case RTN_PROHIBIT:
1120 		rt->dst.output = ip6_pkt_prohibit_out;
1121 		rt->dst.input = ip6_pkt_prohibit;
1122 		break;
1123 	case RTN_THROW:
1124 	case RTN_UNREACHABLE:
1125 	default:
1126 		rt->dst.output = ip6_pkt_discard_out;
1127 		rt->dst.input = ip6_pkt_discard;
1128 		break;
1129 	}
1130 }
1131 
ip6_rt_init_dst(struct rt6_info * rt,const struct fib6_result * res)1132 static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
1133 {
1134 	struct fib6_info *f6i = res->f6i;
1135 
1136 	if (res->fib6_flags & RTF_REJECT) {
1137 		ip6_rt_init_dst_reject(rt, res->fib6_type);
1138 		return;
1139 	}
1140 
1141 	rt->dst.error = 0;
1142 	rt->dst.output = ip6_output;
1143 
1144 	if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) {
1145 		rt->dst.input = ip6_input;
1146 	} else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
1147 		rt->dst.input = ip6_mc_input;
1148 		rt->dst.output = ip6_mr_output;
1149 	} else {
1150 		rt->dst.input = ip6_forward;
1151 	}
1152 
1153 	if (res->nh->fib_nh_lws) {
1154 		rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws);
1155 		lwtunnel_set_redirect(&rt->dst);
1156 	}
1157 
1158 	rt->dst.lastuse = jiffies;
1159 }
1160 
1161 /* Caller must already hold reference to @from */
rt6_set_from(struct rt6_info * rt,struct fib6_info * from)1162 static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
1163 {
1164 	rt->rt6i_flags &= ~RTF_EXPIRES;
1165 	rcu_assign_pointer(rt->from, from);
1166 	ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
1167 }
1168 
1169 /* Caller must already hold reference to f6i in result */
ip6_rt_copy_init(struct rt6_info * rt,const struct fib6_result * res)1170 static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res)
1171 {
1172 	const struct fib6_nh *nh = res->nh;
1173 	const struct net_device *dev = nh->fib_nh_dev;
1174 	struct fib6_info *f6i = res->f6i;
1175 
1176 	ip6_rt_init_dst(rt, res);
1177 
1178 	rt->rt6i_dst = f6i->fib6_dst;
1179 	rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
1180 	rt->rt6i_flags = res->fib6_flags;
1181 	if (nh->fib_nh_gw_family) {
1182 		rt->rt6i_gateway = nh->fib_nh_gw6;
1183 		rt->rt6i_flags |= RTF_GATEWAY;
1184 	}
1185 	rt6_set_from(rt, f6i);
1186 #ifdef CONFIG_IPV6_SUBTREES
1187 	rt->rt6i_src = f6i->fib6_src;
1188 #endif
1189 }
1190 
fib6_backtrack(struct fib6_node * fn,struct in6_addr * saddr)1191 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
1192 					struct in6_addr *saddr)
1193 {
1194 	struct fib6_node *pn, *sn;
1195 	while (1) {
1196 		if (fn->fn_flags & RTN_TL_ROOT)
1197 			return NULL;
1198 		pn = rcu_dereference(fn->parent);
1199 		sn = FIB6_SUBTREE(pn);
1200 		if (sn && sn != fn)
1201 			fn = fib6_node_lookup(sn, NULL, saddr);
1202 		else
1203 			fn = pn;
1204 		if (fn->fn_flags & RTN_RTINFO)
1205 			return fn;
1206 	}
1207 }
1208 
ip6_hold_safe(struct net * net,struct rt6_info ** prt)1209 static bool ip6_hold_safe(struct net *net, struct rt6_info **prt)
1210 {
1211 	struct rt6_info *rt = *prt;
1212 
1213 	if (dst_hold_safe(&rt->dst))
1214 		return true;
1215 	if (net) {
1216 		rt = net->ipv6.ip6_null_entry;
1217 		dst_hold(&rt->dst);
1218 	} else {
1219 		rt = NULL;
1220 	}
1221 	*prt = rt;
1222 	return false;
1223 }
1224 
1225 /* called with rcu_lock held */
ip6_create_rt_rcu(const struct fib6_result * res)1226 static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res)
1227 {
1228 	struct net_device *dev = res->nh->fib_nh_dev;
1229 	struct fib6_info *f6i = res->f6i;
1230 	unsigned short flags;
1231 	struct rt6_info *nrt;
1232 
1233 	if (!fib6_info_hold_safe(f6i))
1234 		goto fallback;
1235 
1236 	flags = fib6_info_dst_flags(f6i);
1237 	nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
1238 	if (!nrt) {
1239 		fib6_info_release(f6i);
1240 		goto fallback;
1241 	}
1242 
1243 	ip6_rt_copy_init(nrt, res);
1244 	return nrt;
1245 
1246 fallback:
1247 	nrt = dev_net(dev)->ipv6.ip6_null_entry;
1248 	dst_hold(&nrt->dst);
1249 	return nrt;
1250 }
1251 
ip6_pol_route_lookup(struct net * net,struct fib6_table * table,struct flowi6 * fl6,const struct sk_buff * skb,int flags)1252 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_lookup(struct net *net,
1253 					     struct fib6_table *table,
1254 					     struct flowi6 *fl6,
1255 					     const struct sk_buff *skb,
1256 					     int flags)
1257 {
1258 	struct fib6_result res = {};
1259 	struct fib6_node *fn;
1260 	struct rt6_info *rt;
1261 
1262 	rcu_read_lock();
1263 	fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1264 restart:
1265 	res.f6i = rcu_dereference(fn->leaf);
1266 	if (!res.f6i)
1267 		res.f6i = net->ipv6.fib6_null_entry;
1268 	else
1269 		rt6_device_match(net, &res, &fl6->saddr, fl6->flowi6_oif,
1270 				 flags);
1271 
1272 	if (res.f6i == net->ipv6.fib6_null_entry) {
1273 		fn = fib6_backtrack(fn, &fl6->saddr);
1274 		if (fn)
1275 			goto restart;
1276 
1277 		rt = net->ipv6.ip6_null_entry;
1278 		dst_hold(&rt->dst);
1279 		goto out;
1280 	} else if (res.fib6_flags & RTF_REJECT) {
1281 		goto do_create;
1282 	}
1283 
1284 	fib6_select_path(net, &res, fl6, fl6->flowi6_oif,
1285 			 fl6->flowi6_oif != 0, skb, flags);
1286 
1287 	/* Search through exception table */
1288 	rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
1289 	if (rt) {
1290 		if (ip6_hold_safe(net, &rt))
1291 			dst_use_noref(&rt->dst, jiffies);
1292 	} else {
1293 do_create:
1294 		rt = ip6_create_rt_rcu(&res);
1295 	}
1296 
1297 out:
1298 	trace_fib6_table_lookup(net, &res, table, fl6);
1299 
1300 	rcu_read_unlock();
1301 
1302 	return rt;
1303 }
1304 
ip6_route_lookup(struct net * net,struct flowi6 * fl6,const struct sk_buff * skb,int flags)1305 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
1306 				   const struct sk_buff *skb, int flags)
1307 {
1308 	return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
1309 }
1310 EXPORT_SYMBOL_GPL(ip6_route_lookup);
1311 
rt6_lookup(struct net * net,const struct in6_addr * daddr,const struct in6_addr * saddr,int oif,const struct sk_buff * skb,int strict)1312 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
1313 			    const struct in6_addr *saddr, int oif,
1314 			    const struct sk_buff *skb, int strict)
1315 {
1316 	struct flowi6 fl6 = {
1317 		.flowi6_oif = oif,
1318 		.daddr = *daddr,
1319 	};
1320 	struct dst_entry *dst;
1321 	int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
1322 
1323 	if (saddr) {
1324 		memcpy(&fl6.saddr, saddr, sizeof(*saddr));
1325 		flags |= RT6_LOOKUP_F_HAS_SADDR;
1326 	}
1327 
1328 	dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
1329 	if (dst->error == 0)
1330 		return dst_rt6_info(dst);
1331 
1332 	dst_release(dst);
1333 
1334 	return NULL;
1335 }
1336 EXPORT_SYMBOL(rt6_lookup);
1337 
1338 /* ip6_ins_rt is called with FREE table->tb6_lock.
1339  * It takes new route entry, the addition fails by any reason the
1340  * route is released.
1341  * Caller must hold dst before calling it.
1342  */
1343 
__ip6_ins_rt(struct fib6_info * rt,struct nl_info * info,struct netlink_ext_ack * extack)1344 static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
1345 			struct netlink_ext_ack *extack)
1346 {
1347 	int err;
1348 	struct fib6_table *table;
1349 
1350 	table = rt->fib6_table;
1351 	spin_lock_bh(&table->tb6_lock);
1352 	err = fib6_add(&table->tb6_root, rt, info, extack);
1353 	spin_unlock_bh(&table->tb6_lock);
1354 
1355 	return err;
1356 }
1357 
ip6_ins_rt(struct net * net,struct fib6_info * rt)1358 int ip6_ins_rt(struct net *net, struct fib6_info *rt)
1359 {
1360 	struct nl_info info = {	.nl_net = net, };
1361 
1362 	return __ip6_ins_rt(rt, &info, NULL);
1363 }
1364 
ip6_rt_cache_alloc(const struct fib6_result * res,const struct in6_addr * daddr,const struct in6_addr * saddr)1365 static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res,
1366 					   const struct in6_addr *daddr,
1367 					   const struct in6_addr *saddr)
1368 {
1369 	struct fib6_info *f6i = res->f6i;
1370 	struct net_device *dev;
1371 	struct rt6_info *rt;
1372 
1373 	/*
1374 	 *	Clone the route.
1375 	 */
1376 
1377 	if (!fib6_info_hold_safe(f6i))
1378 		return NULL;
1379 
1380 	dev = ip6_rt_get_dev_rcu(res);
1381 	rt = ip6_dst_alloc(dev_net(dev), dev, 0);
1382 	if (!rt) {
1383 		fib6_info_release(f6i);
1384 		return NULL;
1385 	}
1386 
1387 	ip6_rt_copy_init(rt, res);
1388 	rt->rt6i_flags |= RTF_CACHE;
1389 	rt->rt6i_dst.addr = *daddr;
1390 	rt->rt6i_dst.plen = 128;
1391 
1392 	if (!rt6_is_gw_or_nonexthop(res)) {
1393 		if (f6i->fib6_dst.plen != 128 &&
1394 		    ipv6_addr_equal(&f6i->fib6_dst.addr, daddr))
1395 			rt->rt6i_flags |= RTF_ANYCAST;
1396 #ifdef CONFIG_IPV6_SUBTREES
1397 		if (rt->rt6i_src.plen && saddr) {
1398 			rt->rt6i_src.addr = *saddr;
1399 			rt->rt6i_src.plen = 128;
1400 		}
1401 #endif
1402 	}
1403 
1404 	return rt;
1405 }
1406 
ip6_rt_pcpu_alloc(const struct fib6_result * res)1407 static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
1408 {
1409 	struct fib6_info *f6i = res->f6i;
1410 	unsigned short flags = fib6_info_dst_flags(f6i);
1411 	struct net_device *dev;
1412 	struct rt6_info *pcpu_rt;
1413 
1414 	if (!fib6_info_hold_safe(f6i))
1415 		return NULL;
1416 
1417 	rcu_read_lock();
1418 	dev = ip6_rt_get_dev_rcu(res);
1419 	pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags | DST_NOCOUNT);
1420 	rcu_read_unlock();
1421 	if (!pcpu_rt) {
1422 		fib6_info_release(f6i);
1423 		return NULL;
1424 	}
1425 	ip6_rt_copy_init(pcpu_rt, res);
1426 	pcpu_rt->rt6i_flags |= RTF_PCPU;
1427 
1428 	if (f6i->nh)
1429 		pcpu_rt->sernum = rt_genid_ipv6(dev_net(dev));
1430 
1431 	return pcpu_rt;
1432 }
1433 
rt6_is_valid(const struct rt6_info * rt6)1434 static bool rt6_is_valid(const struct rt6_info *rt6)
1435 {
1436 	return rt6->sernum == rt_genid_ipv6(dev_net(rt6->dst.dev));
1437 }
1438 
1439 /* It should be called with rcu_read_lock() acquired */
rt6_get_pcpu_route(const struct fib6_result * res)1440 static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
1441 {
1442 	struct rt6_info *pcpu_rt;
1443 
1444 	pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
1445 
1446 	if (pcpu_rt && pcpu_rt->sernum && !rt6_is_valid(pcpu_rt)) {
1447 		struct rt6_info *prev, **p;
1448 
1449 		p = this_cpu_ptr(res->nh->rt6i_pcpu);
1450 		/* Paired with READ_ONCE() in __fib6_drop_pcpu_from() */
1451 		prev = xchg(p, NULL);
1452 		if (prev) {
1453 			dst_dev_put(&prev->dst);
1454 			dst_release(&prev->dst);
1455 		}
1456 
1457 		pcpu_rt = NULL;
1458 	}
1459 
1460 	return pcpu_rt;
1461 }
1462 
rt6_make_pcpu_route(struct net * net,const struct fib6_result * res)1463 static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1464 					    const struct fib6_result *res)
1465 {
1466 	struct rt6_info *pcpu_rt, *prev, **p;
1467 
1468 	pcpu_rt = ip6_rt_pcpu_alloc(res);
1469 	if (!pcpu_rt)
1470 		return NULL;
1471 
1472 	p = this_cpu_ptr(res->nh->rt6i_pcpu);
1473 	prev = cmpxchg(p, NULL, pcpu_rt);
1474 	if (unlikely(prev)) {
1475 		/*
1476 		 * Another task on this CPU already installed a pcpu_rt.
1477 		 * This can happen on PREEMPT_RT where preemption is possible.
1478 		 * Free our allocation and return the existing one.
1479 		 */
1480 		WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT_RT));
1481 
1482 		dst_dev_put(&pcpu_rt->dst);
1483 		dst_release(&pcpu_rt->dst);
1484 		return prev;
1485 	}
1486 
1487 	if (res->f6i->fib6_destroying) {
1488 		struct fib6_info *from;
1489 
1490 		from = unrcu_pointer(xchg(&pcpu_rt->from, NULL));
1491 		fib6_info_release(from);
1492 	}
1493 
1494 	return pcpu_rt;
1495 }
1496 
1497 /* exception hash table implementation
1498  */
1499 static DEFINE_SPINLOCK(rt6_exception_lock);
1500 
1501 /* Remove rt6_ex from hash table and free the memory
1502  * Caller must hold rt6_exception_lock
1503  */
rt6_remove_exception(struct rt6_exception_bucket * bucket,struct rt6_exception * rt6_ex)1504 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1505 				 struct rt6_exception *rt6_ex)
1506 {
1507 	struct net *net;
1508 
1509 	if (!bucket || !rt6_ex)
1510 		return;
1511 
1512 	net = dev_net(rt6_ex->rt6i->dst.dev);
1513 	net->ipv6.rt6_stats->fib_rt_cache--;
1514 
1515 	/* purge completely the exception to allow releasing the held resources:
1516 	 * some [sk] cache may keep the dst around for unlimited time
1517 	 */
1518 	dst_dev_put(&rt6_ex->rt6i->dst);
1519 
1520 	hlist_del_rcu(&rt6_ex->hlist);
1521 	dst_release(&rt6_ex->rt6i->dst);
1522 	kfree_rcu(rt6_ex, rcu);
1523 	WARN_ON_ONCE(!bucket->depth);
1524 	bucket->depth--;
1525 }
1526 
1527 /* Remove oldest rt6_ex in bucket and free the memory
1528  * Caller must hold rt6_exception_lock
1529  */
rt6_exception_remove_oldest(struct rt6_exception_bucket * bucket)1530 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1531 {
1532 	struct rt6_exception *rt6_ex, *oldest = NULL;
1533 
1534 	if (!bucket)
1535 		return;
1536 
1537 	hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1538 		if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1539 			oldest = rt6_ex;
1540 	}
1541 	rt6_remove_exception(bucket, oldest);
1542 }
1543 
rt6_exception_hash(const struct in6_addr * dst,const struct in6_addr * src)1544 static u32 rt6_exception_hash(const struct in6_addr *dst,
1545 			      const struct in6_addr *src)
1546 {
1547 	static siphash_aligned_key_t rt6_exception_key;
1548 	struct {
1549 		struct in6_addr dst;
1550 		struct in6_addr src;
1551 	} __aligned(SIPHASH_ALIGNMENT) combined = {
1552 		.dst = *dst,
1553 	};
1554 	u64 val;
1555 
1556 	net_get_random_once(&rt6_exception_key, sizeof(rt6_exception_key));
1557 
1558 #ifdef CONFIG_IPV6_SUBTREES
1559 	if (src)
1560 		combined.src = *src;
1561 #endif
1562 	val = siphash(&combined, sizeof(combined), &rt6_exception_key);
1563 
1564 	return hash_64(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1565 }
1566 
1567 /* Helper function to find the cached rt in the hash table
1568  * and update bucket pointer to point to the bucket for this
1569  * (daddr, saddr) pair
1570  * Caller must hold rt6_exception_lock
1571  */
1572 static struct rt6_exception *
__rt6_find_exception_spinlock(struct rt6_exception_bucket ** bucket,const struct in6_addr * daddr,const struct in6_addr * saddr)1573 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1574 			      const struct in6_addr *daddr,
1575 			      const struct in6_addr *saddr)
1576 {
1577 	struct rt6_exception *rt6_ex;
1578 	u32 hval;
1579 
1580 	if (!(*bucket) || !daddr)
1581 		return NULL;
1582 
1583 	hval = rt6_exception_hash(daddr, saddr);
1584 	*bucket += hval;
1585 
1586 	hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1587 		struct rt6_info *rt6 = rt6_ex->rt6i;
1588 		bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1589 
1590 #ifdef CONFIG_IPV6_SUBTREES
1591 		if (matched && saddr)
1592 			matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1593 #endif
1594 		if (matched)
1595 			return rt6_ex;
1596 	}
1597 	return NULL;
1598 }
1599 
1600 /* Helper function to find the cached rt in the hash table
1601  * and update bucket pointer to point to the bucket for this
1602  * (daddr, saddr) pair
1603  * Caller must hold rcu_read_lock()
1604  */
1605 static struct rt6_exception *
__rt6_find_exception_rcu(struct rt6_exception_bucket ** bucket,const struct in6_addr * daddr,const struct in6_addr * saddr)1606 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1607 			 const struct in6_addr *daddr,
1608 			 const struct in6_addr *saddr)
1609 {
1610 	struct rt6_exception *rt6_ex;
1611 	u32 hval;
1612 
1613 	WARN_ON_ONCE(!rcu_read_lock_held());
1614 
1615 	if (!(*bucket) || !daddr)
1616 		return NULL;
1617 
1618 	hval = rt6_exception_hash(daddr, saddr);
1619 	*bucket += hval;
1620 
1621 	hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1622 		struct rt6_info *rt6 = rt6_ex->rt6i;
1623 		bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1624 
1625 #ifdef CONFIG_IPV6_SUBTREES
1626 		if (matched && saddr)
1627 			matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1628 #endif
1629 		if (matched)
1630 			return rt6_ex;
1631 	}
1632 	return NULL;
1633 }
1634 
fib6_mtu(const struct fib6_result * res)1635 static unsigned int fib6_mtu(const struct fib6_result *res)
1636 {
1637 	const struct fib6_nh *nh = res->nh;
1638 	unsigned int mtu;
1639 
1640 	if (res->f6i->fib6_pmtu) {
1641 		mtu = res->f6i->fib6_pmtu;
1642 	} else {
1643 		struct net_device *dev = nh->fib_nh_dev;
1644 		struct inet6_dev *idev;
1645 
1646 		rcu_read_lock();
1647 		idev = __in6_dev_get(dev);
1648 		mtu = READ_ONCE(idev->cnf.mtu6);
1649 		rcu_read_unlock();
1650 	}
1651 
1652 	mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1653 
1654 	return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
1655 }
1656 
1657 #define FIB6_EXCEPTION_BUCKET_FLUSHED  0x1UL
1658 
1659 /* used when the flushed bit is not relevant, only access to the bucket
1660  * (ie., all bucket users except rt6_insert_exception);
1661  *
1662  * called under rcu lock; sometimes called with rt6_exception_lock held
1663  */
1664 static
fib6_nh_get_excptn_bucket(const struct fib6_nh * nh,spinlock_t * lock)1665 struct rt6_exception_bucket *fib6_nh_get_excptn_bucket(const struct fib6_nh *nh,
1666 						       spinlock_t *lock)
1667 {
1668 	struct rt6_exception_bucket *bucket;
1669 
1670 	if (lock)
1671 		bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1672 						   lockdep_is_held(lock));
1673 	else
1674 		bucket = rcu_dereference(nh->rt6i_exception_bucket);
1675 
1676 	/* remove bucket flushed bit if set */
1677 	if (bucket) {
1678 		unsigned long p = (unsigned long)bucket;
1679 
1680 		p &= ~FIB6_EXCEPTION_BUCKET_FLUSHED;
1681 		bucket = (struct rt6_exception_bucket *)p;
1682 	}
1683 
1684 	return bucket;
1685 }
1686 
fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket * bucket)1687 static bool fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket)
1688 {
1689 	unsigned long p = (unsigned long)bucket;
1690 
1691 	return !!(p & FIB6_EXCEPTION_BUCKET_FLUSHED);
1692 }
1693 
1694 /* called with rt6_exception_lock held */
fib6_nh_excptn_bucket_set_flushed(struct fib6_nh * nh,spinlock_t * lock)1695 static void fib6_nh_excptn_bucket_set_flushed(struct fib6_nh *nh,
1696 					      spinlock_t *lock)
1697 {
1698 	struct rt6_exception_bucket *bucket;
1699 	unsigned long p;
1700 
1701 	bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1702 					   lockdep_is_held(lock));
1703 
1704 	p = (unsigned long)bucket;
1705 	p |= FIB6_EXCEPTION_BUCKET_FLUSHED;
1706 	bucket = (struct rt6_exception_bucket *)p;
1707 	rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1708 }
1709 
rt6_insert_exception(struct rt6_info * nrt,const struct fib6_result * res)1710 static int rt6_insert_exception(struct rt6_info *nrt,
1711 				const struct fib6_result *res)
1712 {
1713 	struct net *net = dev_net(nrt->dst.dev);
1714 	struct rt6_exception_bucket *bucket;
1715 	struct fib6_info *f6i = res->f6i;
1716 	struct in6_addr *src_key = NULL;
1717 	struct rt6_exception *rt6_ex;
1718 	struct fib6_nh *nh = res->nh;
1719 	int max_depth;
1720 	int err = 0;
1721 
1722 	spin_lock_bh(&rt6_exception_lock);
1723 
1724 	bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1725 					  lockdep_is_held(&rt6_exception_lock));
1726 	if (!bucket) {
1727 		bucket = kzalloc_objs(*bucket, FIB6_EXCEPTION_BUCKET_SIZE,
1728 				      GFP_ATOMIC);
1729 		if (!bucket) {
1730 			err = -ENOMEM;
1731 			goto out;
1732 		}
1733 		rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1734 	} else if (fib6_nh_excptn_bucket_flushed(bucket)) {
1735 		err = -EINVAL;
1736 		goto out;
1737 	}
1738 
1739 #ifdef CONFIG_IPV6_SUBTREES
1740 	/* fib6_src.plen != 0 indicates f6i is in subtree
1741 	 * and exception table is indexed by a hash of
1742 	 * both fib6_dst and fib6_src.
1743 	 * Otherwise, the exception table is indexed by
1744 	 * a hash of only fib6_dst.
1745 	 */
1746 	if (f6i->fib6_src.plen)
1747 		src_key = &nrt->rt6i_src.addr;
1748 #endif
1749 	/* rt6_mtu_change() might lower mtu on f6i.
1750 	 * Only insert this exception route if its mtu
1751 	 * is less than f6i's mtu value.
1752 	 */
1753 	if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) {
1754 		err = -EINVAL;
1755 		goto out;
1756 	}
1757 
1758 	rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1759 					       src_key);
1760 	if (rt6_ex)
1761 		rt6_remove_exception(bucket, rt6_ex);
1762 
1763 	rt6_ex = kzalloc_obj(*rt6_ex, GFP_ATOMIC);
1764 	if (!rt6_ex) {
1765 		err = -ENOMEM;
1766 		goto out;
1767 	}
1768 	rt6_ex->rt6i = nrt;
1769 	rt6_ex->stamp = jiffies;
1770 	hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1771 	bucket->depth++;
1772 	net->ipv6.rt6_stats->fib_rt_cache++;
1773 
1774 	/* Randomize max depth to avoid some side channels attacks. */
1775 	max_depth = FIB6_MAX_DEPTH + get_random_u32_below(FIB6_MAX_DEPTH);
1776 	while (bucket->depth > max_depth)
1777 		rt6_exception_remove_oldest(bucket);
1778 
1779 out:
1780 	spin_unlock_bh(&rt6_exception_lock);
1781 
1782 	/* Update fn->fn_sernum to invalidate all cached dst */
1783 	if (!err) {
1784 		spin_lock_bh(&f6i->fib6_table->tb6_lock);
1785 		fib6_update_sernum(net, f6i);
1786 		fib6_add_gc_list(f6i);
1787 		spin_unlock_bh(&f6i->fib6_table->tb6_lock);
1788 		fib6_force_start_gc(net);
1789 	}
1790 
1791 	return err;
1792 }
1793 
fib6_nh_flush_exceptions(struct fib6_nh * nh,struct fib6_info * from)1794 static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from)
1795 {
1796 	struct rt6_exception_bucket *bucket;
1797 	struct rt6_exception *rt6_ex;
1798 	struct hlist_node *tmp;
1799 	int i;
1800 
1801 	spin_lock_bh(&rt6_exception_lock);
1802 
1803 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1804 	if (!bucket)
1805 		goto out;
1806 
1807 	/* Prevent rt6_insert_exception() to recreate the bucket list */
1808 	if (!from)
1809 		fib6_nh_excptn_bucket_set_flushed(nh, &rt6_exception_lock);
1810 
1811 	for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1812 		hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) {
1813 			if (!from ||
1814 			    rcu_access_pointer(rt6_ex->rt6i->from) == from)
1815 				rt6_remove_exception(bucket, rt6_ex);
1816 		}
1817 		WARN_ON_ONCE(!from && bucket->depth);
1818 		bucket++;
1819 	}
1820 out:
1821 	spin_unlock_bh(&rt6_exception_lock);
1822 }
1823 
rt6_nh_flush_exceptions(struct fib6_nh * nh,void * arg)1824 static int rt6_nh_flush_exceptions(struct fib6_nh *nh, void *arg)
1825 {
1826 	struct fib6_info *f6i = arg;
1827 
1828 	fib6_nh_flush_exceptions(nh, f6i);
1829 
1830 	return 0;
1831 }
1832 
rt6_flush_exceptions(struct fib6_info * f6i)1833 void rt6_flush_exceptions(struct fib6_info *f6i)
1834 {
1835 	if (f6i->nh) {
1836 		rcu_read_lock();
1837 		nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_flush_exceptions, f6i);
1838 		rcu_read_unlock();
1839 	} else {
1840 		fib6_nh_flush_exceptions(f6i->fib6_nh, f6i);
1841 	}
1842 }
1843 
1844 /* Find cached rt in the hash table inside passed in rt
1845  * Caller has to hold rcu_read_lock()
1846  */
rt6_find_cached_rt(const struct fib6_result * res,const struct in6_addr * daddr,const struct in6_addr * saddr)1847 static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
1848 					   const struct in6_addr *daddr,
1849 					   const struct in6_addr *saddr)
1850 {
1851 	const struct in6_addr *src_key = NULL;
1852 	struct rt6_exception_bucket *bucket;
1853 	struct rt6_exception *rt6_ex;
1854 	struct rt6_info *ret = NULL;
1855 
1856 #ifdef CONFIG_IPV6_SUBTREES
1857 	/* fib6i_src.plen != 0 indicates f6i is in subtree
1858 	 * and exception table is indexed by a hash of
1859 	 * both fib6_dst and fib6_src.
1860 	 * However, the src addr used to create the hash
1861 	 * might not be exactly the passed in saddr which
1862 	 * is a /128 addr from the flow.
1863 	 * So we need to use f6i->fib6_src to redo lookup
1864 	 * if the passed in saddr does not find anything.
1865 	 * (See the logic in ip6_rt_cache_alloc() on how
1866 	 * rt->rt6i_src is updated.)
1867 	 */
1868 	if (res->f6i->fib6_src.plen)
1869 		src_key = saddr;
1870 find_ex:
1871 #endif
1872 	bucket = fib6_nh_get_excptn_bucket(res->nh, NULL);
1873 	rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1874 
1875 	if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1876 		ret = rt6_ex->rt6i;
1877 
1878 #ifdef CONFIG_IPV6_SUBTREES
1879 	/* Use fib6_src as src_key and redo lookup */
1880 	if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) {
1881 		src_key = &res->f6i->fib6_src.addr;
1882 		goto find_ex;
1883 	}
1884 #endif
1885 
1886 	return ret;
1887 }
1888 
1889 /* Remove the passed in cached rt from the hash table that contains it */
fib6_nh_remove_exception(const struct fib6_nh * nh,int plen,const struct rt6_info * rt)1890 static int fib6_nh_remove_exception(const struct fib6_nh *nh, int plen,
1891 				    const struct rt6_info *rt)
1892 {
1893 	const struct in6_addr *src_key = NULL;
1894 	struct rt6_exception_bucket *bucket;
1895 	struct rt6_exception *rt6_ex;
1896 	int err;
1897 
1898 	if (!rcu_access_pointer(nh->rt6i_exception_bucket))
1899 		return -ENOENT;
1900 
1901 	spin_lock_bh(&rt6_exception_lock);
1902 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1903 
1904 #ifdef CONFIG_IPV6_SUBTREES
1905 	/* rt6i_src.plen != 0 indicates 'from' is in subtree
1906 	 * and exception table is indexed by a hash of
1907 	 * both rt6i_dst and rt6i_src.
1908 	 * Otherwise, the exception table is indexed by
1909 	 * a hash of only rt6i_dst.
1910 	 */
1911 	if (plen)
1912 		src_key = &rt->rt6i_src.addr;
1913 #endif
1914 	rt6_ex = __rt6_find_exception_spinlock(&bucket,
1915 					       &rt->rt6i_dst.addr,
1916 					       src_key);
1917 	if (rt6_ex) {
1918 		rt6_remove_exception(bucket, rt6_ex);
1919 		err = 0;
1920 	} else {
1921 		err = -ENOENT;
1922 	}
1923 
1924 	spin_unlock_bh(&rt6_exception_lock);
1925 	return err;
1926 }
1927 
1928 struct fib6_nh_excptn_arg {
1929 	struct rt6_info	*rt;
1930 	int		plen;
1931 };
1932 
rt6_nh_remove_exception_rt(struct fib6_nh * nh,void * _arg)1933 static int rt6_nh_remove_exception_rt(struct fib6_nh *nh, void *_arg)
1934 {
1935 	struct fib6_nh_excptn_arg *arg = _arg;
1936 	int err;
1937 
1938 	err = fib6_nh_remove_exception(nh, arg->plen, arg->rt);
1939 	if (err == 0)
1940 		return 1;
1941 
1942 	return 0;
1943 }
1944 
rt6_remove_exception_rt(struct rt6_info * rt)1945 static int rt6_remove_exception_rt(struct rt6_info *rt)
1946 {
1947 	struct fib6_info *from;
1948 
1949 	from = rcu_dereference(rt->from);
1950 	if (!from || !(rt->rt6i_flags & RTF_CACHE))
1951 		return -EINVAL;
1952 
1953 	if (from->nh) {
1954 		struct fib6_nh_excptn_arg arg = {
1955 			.rt = rt,
1956 			.plen = from->fib6_src.plen
1957 		};
1958 		int rc;
1959 
1960 		/* rc = 1 means an entry was found */
1961 		rc = nexthop_for_each_fib6_nh(from->nh,
1962 					      rt6_nh_remove_exception_rt,
1963 					      &arg);
1964 		return rc ? 0 : -ENOENT;
1965 	}
1966 
1967 	return fib6_nh_remove_exception(from->fib6_nh,
1968 					from->fib6_src.plen, rt);
1969 }
1970 
1971 /* Find rt6_ex which contains the passed in rt cache and
1972  * refresh its stamp
1973  */
fib6_nh_update_exception(const struct fib6_nh * nh,int plen,const struct rt6_info * rt)1974 static void fib6_nh_update_exception(const struct fib6_nh *nh, int plen,
1975 				     const struct rt6_info *rt)
1976 {
1977 	const struct in6_addr *src_key = NULL;
1978 	struct rt6_exception_bucket *bucket;
1979 	struct rt6_exception *rt6_ex;
1980 
1981 	bucket = fib6_nh_get_excptn_bucket(nh, NULL);
1982 #ifdef CONFIG_IPV6_SUBTREES
1983 	/* rt6i_src.plen != 0 indicates 'from' is in subtree
1984 	 * and exception table is indexed by a hash of
1985 	 * both rt6i_dst and rt6i_src.
1986 	 * Otherwise, the exception table is indexed by
1987 	 * a hash of only rt6i_dst.
1988 	 */
1989 	if (plen)
1990 		src_key = &rt->rt6i_src.addr;
1991 #endif
1992 	rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key);
1993 	if (rt6_ex)
1994 		rt6_ex->stamp = jiffies;
1995 }
1996 
1997 struct fib6_nh_match_arg {
1998 	const struct net_device *dev;
1999 	const struct in6_addr	*gw;
2000 	struct fib6_nh		*match;
2001 };
2002 
2003 /* determine if fib6_nh has given device and gateway */
fib6_nh_find_match(struct fib6_nh * nh,void * _arg)2004 static int fib6_nh_find_match(struct fib6_nh *nh, void *_arg)
2005 {
2006 	struct fib6_nh_match_arg *arg = _arg;
2007 
2008 	if (arg->dev != nh->fib_nh_dev ||
2009 	    (arg->gw && !nh->fib_nh_gw_family) ||
2010 	    (!arg->gw && nh->fib_nh_gw_family) ||
2011 	    (arg->gw && !ipv6_addr_equal(arg->gw, &nh->fib_nh_gw6)))
2012 		return 0;
2013 
2014 	arg->match = nh;
2015 
2016 	/* found a match, break the loop */
2017 	return 1;
2018 }
2019 
rt6_update_exception_stamp_rt(struct rt6_info * rt)2020 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
2021 {
2022 	struct fib6_info *from;
2023 	struct fib6_nh *fib6_nh;
2024 
2025 	rcu_read_lock();
2026 
2027 	from = rcu_dereference(rt->from);
2028 	if (!from || !(rt->rt6i_flags & RTF_CACHE))
2029 		goto unlock;
2030 
2031 	if (from->nh) {
2032 		struct fib6_nh_match_arg arg = {
2033 			.dev = rt->dst.dev,
2034 			.gw = &rt->rt6i_gateway,
2035 		};
2036 
2037 		nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg);
2038 
2039 		if (!arg.match)
2040 			goto unlock;
2041 		fib6_nh = arg.match;
2042 	} else {
2043 		fib6_nh = from->fib6_nh;
2044 	}
2045 	fib6_nh_update_exception(fib6_nh, from->fib6_src.plen, rt);
2046 unlock:
2047 	rcu_read_unlock();
2048 }
2049 
rt6_mtu_change_route_allowed(struct inet6_dev * idev,struct rt6_info * rt,int mtu)2050 static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
2051 					 struct rt6_info *rt, int mtu)
2052 {
2053 	u32 dmtu = dst6_mtu(&rt->dst);
2054 
2055 	/* If the new MTU is lower than the route PMTU, this new MTU will be the
2056 	 * lowest MTU in the path: always allow updating the route PMTU to
2057 	 * reflect PMTU decreases.
2058 	 *
2059 	 * If the new MTU is higher, and the route PMTU is equal to the local
2060 	 * MTU, this means the old MTU is the lowest in the path, so allow
2061 	 * updating it: if other nodes now have lower MTUs, PMTU discovery will
2062 	 * handle this.
2063 	 */
2064 
2065 	if (dmtu >= mtu)
2066 		return true;
2067 
2068 	if (dmtu == idev->cnf.mtu6)
2069 		return true;
2070 
2071 	return false;
2072 }
2073 
rt6_exceptions_update_pmtu(struct inet6_dev * idev,const struct fib6_nh * nh,int mtu)2074 static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
2075 				       const struct fib6_nh *nh, int mtu)
2076 {
2077 	struct rt6_exception_bucket *bucket;
2078 	struct rt6_exception *rt6_ex;
2079 	int i;
2080 
2081 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2082 	if (!bucket)
2083 		return;
2084 
2085 	for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2086 		hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
2087 			struct rt6_info *entry = rt6_ex->rt6i;
2088 
2089 			/* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
2090 			 * route), the metrics of its rt->from have already
2091 			 * been updated.
2092 			 */
2093 			if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
2094 			    rt6_mtu_change_route_allowed(idev, entry, mtu))
2095 				dst_metric_set(&entry->dst, RTAX_MTU, mtu);
2096 		}
2097 		bucket++;
2098 	}
2099 }
2100 
2101 #define RTF_CACHE_GATEWAY	(RTF_GATEWAY | RTF_CACHE)
2102 
fib6_nh_exceptions_clean_tohost(const struct fib6_nh * nh,const struct in6_addr * gateway)2103 static void fib6_nh_exceptions_clean_tohost(const struct fib6_nh *nh,
2104 					    const struct in6_addr *gateway)
2105 {
2106 	struct rt6_exception_bucket *bucket;
2107 	struct rt6_exception *rt6_ex;
2108 	struct hlist_node *tmp;
2109 	int i;
2110 
2111 	if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2112 		return;
2113 
2114 	spin_lock_bh(&rt6_exception_lock);
2115 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2116 	if (bucket) {
2117 		for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2118 			hlist_for_each_entry_safe(rt6_ex, tmp,
2119 						  &bucket->chain, hlist) {
2120 				struct rt6_info *entry = rt6_ex->rt6i;
2121 
2122 				if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
2123 				    RTF_CACHE_GATEWAY &&
2124 				    ipv6_addr_equal(gateway,
2125 						    &entry->rt6i_gateway)) {
2126 					rt6_remove_exception(bucket, rt6_ex);
2127 				}
2128 			}
2129 			bucket++;
2130 		}
2131 	}
2132 
2133 	spin_unlock_bh(&rt6_exception_lock);
2134 }
2135 
rt6_age_examine_exception(struct rt6_exception_bucket * bucket,struct rt6_exception * rt6_ex,struct fib6_gc_args * gc_args,unsigned long now)2136 static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
2137 				      struct rt6_exception *rt6_ex,
2138 				      struct fib6_gc_args *gc_args,
2139 				      unsigned long now)
2140 {
2141 	struct rt6_info *rt = rt6_ex->rt6i;
2142 
2143 	/* we are pruning and obsoleting aged-out and non gateway exceptions
2144 	 * even if others have still references to them, so that on next
2145 	 * dst_check() such references can be dropped.
2146 	 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
2147 	 * expired, independently from their aging, as per RFC 8201 section 4
2148 	 */
2149 	if (!(rt->rt6i_flags & RTF_EXPIRES)) {
2150 		if (time_after_eq(now, READ_ONCE(rt->dst.lastuse) +
2151 				       gc_args->timeout)) {
2152 			pr_debug("aging clone %p\n", rt);
2153 			rt6_remove_exception(bucket, rt6_ex);
2154 			return;
2155 		}
2156 	} else if (time_after(jiffies, READ_ONCE(rt->dst.expires))) {
2157 		pr_debug("purging expired route %p\n", rt);
2158 		rt6_remove_exception(bucket, rt6_ex);
2159 		return;
2160 	}
2161 
2162 	if (rt->rt6i_flags & RTF_GATEWAY) {
2163 		struct neighbour *neigh;
2164 
2165 		neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
2166 
2167 		if (!(neigh && (neigh->flags & NTF_ROUTER))) {
2168 			pr_debug("purging route %p via non-router but gateway\n",
2169 				 rt);
2170 			rt6_remove_exception(bucket, rt6_ex);
2171 			return;
2172 		}
2173 	}
2174 
2175 	gc_args->more++;
2176 }
2177 
fib6_nh_age_exceptions(const struct fib6_nh * nh,struct fib6_gc_args * gc_args,unsigned long now)2178 static void fib6_nh_age_exceptions(const struct fib6_nh *nh,
2179 				   struct fib6_gc_args *gc_args,
2180 				   unsigned long now)
2181 {
2182 	struct rt6_exception_bucket *bucket;
2183 	struct rt6_exception *rt6_ex;
2184 	struct hlist_node *tmp;
2185 	int i;
2186 
2187 	if (!rcu_access_pointer(nh->rt6i_exception_bucket))
2188 		return;
2189 
2190 	rcu_read_lock_bh();
2191 	spin_lock(&rt6_exception_lock);
2192 	bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
2193 	if (bucket) {
2194 		for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2195 			hlist_for_each_entry_safe(rt6_ex, tmp,
2196 						  &bucket->chain, hlist) {
2197 				rt6_age_examine_exception(bucket, rt6_ex,
2198 							  gc_args, now);
2199 			}
2200 			bucket++;
2201 		}
2202 	}
2203 	spin_unlock(&rt6_exception_lock);
2204 	rcu_read_unlock_bh();
2205 }
2206 
2207 struct fib6_nh_age_excptn_arg {
2208 	struct fib6_gc_args	*gc_args;
2209 	unsigned long		now;
2210 };
2211 
rt6_nh_age_exceptions(struct fib6_nh * nh,void * _arg)2212 static int rt6_nh_age_exceptions(struct fib6_nh *nh, void *_arg)
2213 {
2214 	struct fib6_nh_age_excptn_arg *arg = _arg;
2215 
2216 	fib6_nh_age_exceptions(nh, arg->gc_args, arg->now);
2217 	return 0;
2218 }
2219 
rt6_age_exceptions(struct fib6_info * f6i,struct fib6_gc_args * gc_args,unsigned long now)2220 void rt6_age_exceptions(struct fib6_info *f6i,
2221 			struct fib6_gc_args *gc_args,
2222 			unsigned long now)
2223 {
2224 	if (f6i->nh) {
2225 		struct fib6_nh_age_excptn_arg arg = {
2226 			.gc_args = gc_args,
2227 			.now = now
2228 		};
2229 
2230 		nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_age_exceptions,
2231 					 &arg);
2232 	} else {
2233 		fib6_nh_age_exceptions(f6i->fib6_nh, gc_args, now);
2234 	}
2235 }
2236 
2237 /* must be called with rcu lock held */
fib6_table_lookup(struct net * net,struct fib6_table * table,int oif,struct flowi6 * fl6,struct fib6_result * res,int strict)2238 int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
2239 		      struct flowi6 *fl6, struct fib6_result *res, int strict)
2240 {
2241 	struct fib6_node *fn, *saved_fn;
2242 
2243 	fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2244 	saved_fn = fn;
2245 
2246 redo_rt6_select:
2247 	rt6_select(net, fn, oif, res, strict);
2248 	if (res->f6i == net->ipv6.fib6_null_entry) {
2249 		fn = fib6_backtrack(fn, &fl6->saddr);
2250 		if (fn)
2251 			goto redo_rt6_select;
2252 		else if (strict & RT6_LOOKUP_F_REACHABLE) {
2253 			/* also consider unreachable route */
2254 			strict &= ~RT6_LOOKUP_F_REACHABLE;
2255 			fn = saved_fn;
2256 			goto redo_rt6_select;
2257 		}
2258 	}
2259 
2260 	trace_fib6_table_lookup(net, res, table, fl6);
2261 
2262 	return 0;
2263 }
2264 
ip6_pol_route(struct net * net,struct fib6_table * table,int oif,struct flowi6 * fl6,const struct sk_buff * skb,int flags)2265 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
2266 			       int oif, struct flowi6 *fl6,
2267 			       const struct sk_buff *skb, int flags)
2268 {
2269 	struct fib6_result res = {};
2270 	struct rt6_info *rt = NULL;
2271 	int strict = 0;
2272 
2273 	WARN_ON_ONCE((flags & RT6_LOOKUP_F_DST_NOREF) &&
2274 		     !rcu_read_lock_held());
2275 
2276 	strict |= flags & RT6_LOOKUP_F_IFACE;
2277 	strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
2278 	if (READ_ONCE(net->ipv6.devconf_all->forwarding) == 0)
2279 		strict |= RT6_LOOKUP_F_REACHABLE;
2280 
2281 	rcu_read_lock();
2282 
2283 	fib6_table_lookup(net, table, oif, fl6, &res, strict);
2284 	if (res.f6i == net->ipv6.fib6_null_entry)
2285 		goto out;
2286 
2287 	fib6_select_path(net, &res, fl6, oif, false, skb, strict);
2288 
2289 	/*Search through exception table */
2290 	rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
2291 	if (rt) {
2292 		goto out;
2293 	} else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
2294 			    !res.nh->fib_nh_gw_family)) {
2295 		/* Create a RTF_CACHE clone which will not be
2296 		 * owned by the fib6 tree.  It is for the special case where
2297 		 * the daddr in the skb during the neighbor look-up is different
2298 		 * from the fl6->daddr used to look-up route here.
2299 		 */
2300 		rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
2301 
2302 		if (rt) {
2303 			/* 1 refcnt is taken during ip6_rt_cache_alloc().
2304 			 * As rt6_uncached_list_add() does not consume refcnt,
2305 			 * this refcnt is always returned to the caller even
2306 			 * if caller sets RT6_LOOKUP_F_DST_NOREF flag.
2307 			 */
2308 			rt6_uncached_list_add(rt);
2309 			rcu_read_unlock();
2310 
2311 			return rt;
2312 		}
2313 	} else {
2314 		/* Get a percpu copy */
2315 		local_bh_disable();
2316 		rt = rt6_get_pcpu_route(&res);
2317 
2318 		if (!rt)
2319 			rt = rt6_make_pcpu_route(net, &res);
2320 
2321 		local_bh_enable();
2322 	}
2323 out:
2324 	if (!rt)
2325 		rt = net->ipv6.ip6_null_entry;
2326 	if (!(flags & RT6_LOOKUP_F_DST_NOREF))
2327 		ip6_hold_safe(net, &rt);
2328 	rcu_read_unlock();
2329 
2330 	return rt;
2331 }
2332 EXPORT_SYMBOL_GPL(ip6_pol_route);
2333 
ip6_pol_route_input(struct net * net,struct fib6_table * table,struct flowi6 * fl6,const struct sk_buff * skb,int flags)2334 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_input(struct net *net,
2335 					    struct fib6_table *table,
2336 					    struct flowi6 *fl6,
2337 					    const struct sk_buff *skb,
2338 					    int flags)
2339 {
2340 	return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
2341 }
2342 
ip6_route_input_lookup(struct net * net,struct net_device * dev,struct flowi6 * fl6,const struct sk_buff * skb,int flags)2343 struct dst_entry *ip6_route_input_lookup(struct net *net,
2344 					 struct net_device *dev,
2345 					 struct flowi6 *fl6,
2346 					 const struct sk_buff *skb,
2347 					 int flags)
2348 {
2349 	if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
2350 		flags |= RT6_LOOKUP_F_IFACE;
2351 
2352 	return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
2353 }
2354 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
2355 
ip6_multipath_l3_keys(const struct sk_buff * skb,struct flow_keys * keys,struct flow_keys * flkeys)2356 static void ip6_multipath_l3_keys(const struct sk_buff *skb,
2357 				  struct flow_keys *keys,
2358 				  struct flow_keys *flkeys)
2359 {
2360 	const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
2361 	const struct ipv6hdr *key_iph = outer_iph;
2362 	struct flow_keys *_flkeys = flkeys;
2363 	const struct ipv6hdr *inner_iph;
2364 	const struct icmp6hdr *icmph;
2365 	struct ipv6hdr _inner_iph;
2366 	struct icmp6hdr _icmph;
2367 
2368 	if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
2369 		goto out;
2370 
2371 	icmph = skb_header_pointer(skb, skb_transport_offset(skb),
2372 				   sizeof(_icmph), &_icmph);
2373 	if (!icmph)
2374 		goto out;
2375 
2376 	if (!icmpv6_is_err(icmph->icmp6_type))
2377 		goto out;
2378 
2379 	inner_iph = skb_header_pointer(skb,
2380 				       skb_transport_offset(skb) + sizeof(*icmph),
2381 				       sizeof(_inner_iph), &_inner_iph);
2382 	if (!inner_iph)
2383 		goto out;
2384 
2385 	key_iph = inner_iph;
2386 	_flkeys = NULL;
2387 out:
2388 	if (_flkeys) {
2389 		keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
2390 		keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
2391 		keys->tags.flow_label = _flkeys->tags.flow_label;
2392 		keys->basic.ip_proto = _flkeys->basic.ip_proto;
2393 	} else {
2394 		keys->addrs.v6addrs.src = key_iph->saddr;
2395 		keys->addrs.v6addrs.dst = key_iph->daddr;
2396 		keys->tags.flow_label = ip6_flowlabel(key_iph);
2397 		keys->basic.ip_proto = key_iph->nexthdr;
2398 	}
2399 }
2400 
rt6_multipath_custom_hash_outer(const struct net * net,const struct sk_buff * skb,bool * p_has_inner)2401 static u32 rt6_multipath_custom_hash_outer(const struct net *net,
2402 					   const struct sk_buff *skb,
2403 					   bool *p_has_inner)
2404 {
2405 	u32 hash_fields = ip6_multipath_hash_fields(net);
2406 	struct flow_keys keys, hash_keys;
2407 
2408 	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
2409 		return 0;
2410 
2411 	memset(&hash_keys, 0, sizeof(hash_keys));
2412 	skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
2413 
2414 	hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2415 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
2416 		hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2417 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
2418 		hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2419 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
2420 		hash_keys.basic.ip_proto = keys.basic.ip_proto;
2421 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
2422 		hash_keys.tags.flow_label = keys.tags.flow_label;
2423 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
2424 		hash_keys.ports.src = keys.ports.src;
2425 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2426 		hash_keys.ports.dst = keys.ports.dst;
2427 
2428 	*p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
2429 	return fib_multipath_hash_from_keys(net, &hash_keys);
2430 }
2431 
rt6_multipath_custom_hash_inner(const struct net * net,const struct sk_buff * skb,bool has_inner)2432 static u32 rt6_multipath_custom_hash_inner(const struct net *net,
2433 					   const struct sk_buff *skb,
2434 					   bool has_inner)
2435 {
2436 	u32 hash_fields = ip6_multipath_hash_fields(net);
2437 	struct flow_keys keys, hash_keys;
2438 
2439 	/* We assume the packet carries an encapsulation, but if none was
2440 	 * encountered during dissection of the outer flow, then there is no
2441 	 * point in calling the flow dissector again.
2442 	 */
2443 	if (!has_inner)
2444 		return 0;
2445 
2446 	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK))
2447 		return 0;
2448 
2449 	memset(&hash_keys, 0, sizeof(hash_keys));
2450 	skb_flow_dissect_flow_keys(skb, &keys, 0);
2451 
2452 	if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION))
2453 		return 0;
2454 
2455 	if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2456 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2457 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
2458 			hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
2459 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
2460 			hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
2461 	} else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2462 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2463 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
2464 			hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2465 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
2466 			hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2467 		if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
2468 			hash_keys.tags.flow_label = keys.tags.flow_label;
2469 	}
2470 
2471 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
2472 		hash_keys.basic.ip_proto = keys.basic.ip_proto;
2473 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
2474 		hash_keys.ports.src = keys.ports.src;
2475 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
2476 		hash_keys.ports.dst = keys.ports.dst;
2477 
2478 	return fib_multipath_hash_from_keys(net, &hash_keys);
2479 }
2480 
rt6_multipath_custom_hash_skb(const struct net * net,const struct sk_buff * skb)2481 static u32 rt6_multipath_custom_hash_skb(const struct net *net,
2482 					 const struct sk_buff *skb)
2483 {
2484 	u32 mhash, mhash_inner;
2485 	bool has_inner = true;
2486 
2487 	mhash = rt6_multipath_custom_hash_outer(net, skb, &has_inner);
2488 	mhash_inner = rt6_multipath_custom_hash_inner(net, skb, has_inner);
2489 
2490 	return jhash_2words(mhash, mhash_inner, 0);
2491 }
2492 
rt6_multipath_custom_hash_fl6(const struct net * net,const struct flowi6 * fl6)2493 static u32 rt6_multipath_custom_hash_fl6(const struct net *net,
2494 					 const struct flowi6 *fl6)
2495 {
2496 	u32 hash_fields = ip6_multipath_hash_fields(net);
2497 	struct flow_keys hash_keys;
2498 
2499 	if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
2500 		return 0;
2501 
2502 	memset(&hash_keys, 0, sizeof(hash_keys));
2503 	hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2504 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
2505 		hash_keys.addrs.v6addrs.src = fl6->saddr;
2506 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
2507 		hash_keys.addrs.v6addrs.dst = fl6->daddr;
2508 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
2509 		hash_keys.basic.ip_proto = fl6->flowi6_proto;
2510 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_FLOWLABEL)
2511 		hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2512 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT) {
2513 		if (fl6->flowi6_flags & FLOWI_FLAG_ANY_SPORT)
2514 			hash_keys.ports.src = (__force __be16)get_random_u16();
2515 		else
2516 			hash_keys.ports.src = fl6->fl6_sport;
2517 	}
2518 	if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2519 		hash_keys.ports.dst = fl6->fl6_dport;
2520 
2521 	return fib_multipath_hash_from_keys(net, &hash_keys);
2522 }
2523 
2524 /* if skb is set it will be used and fl6 can be NULL */
rt6_multipath_hash(const struct net * net,const struct flowi6 * fl6,const struct sk_buff * skb,struct flow_keys * flkeys)2525 u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
2526 		       const struct sk_buff *skb, struct flow_keys *flkeys)
2527 {
2528 	struct flow_keys hash_keys;
2529 	u32 mhash = 0;
2530 
2531 	switch (ip6_multipath_hash_policy(net)) {
2532 	case 0:
2533 		memset(&hash_keys, 0, sizeof(hash_keys));
2534 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2535 		if (skb) {
2536 			ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2537 		} else {
2538 			hash_keys.addrs.v6addrs.src = fl6->saddr;
2539 			hash_keys.addrs.v6addrs.dst = fl6->daddr;
2540 			hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2541 			hash_keys.basic.ip_proto = fl6->flowi6_proto;
2542 		}
2543 		mhash = fib_multipath_hash_from_keys(net, &hash_keys);
2544 		break;
2545 	case 1:
2546 		if (skb) {
2547 			unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2548 			struct flow_keys keys;
2549 
2550 			/* short-circuit if we already have L4 hash present */
2551 			if (skb->l4_hash)
2552 				return skb_get_hash_raw(skb) >> 1;
2553 
2554 			memset(&hash_keys, 0, sizeof(hash_keys));
2555 
2556 			if (!flkeys) {
2557 				skb_flow_dissect_flow_keys(skb, &keys, flag);
2558 				flkeys = &keys;
2559 			}
2560 			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2561 			hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2562 			hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2563 			hash_keys.ports.src = flkeys->ports.src;
2564 			hash_keys.ports.dst = flkeys->ports.dst;
2565 			hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2566 		} else {
2567 			memset(&hash_keys, 0, sizeof(hash_keys));
2568 			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2569 			hash_keys.addrs.v6addrs.src = fl6->saddr;
2570 			hash_keys.addrs.v6addrs.dst = fl6->daddr;
2571 			if (fl6->flowi6_flags & FLOWI_FLAG_ANY_SPORT)
2572 				hash_keys.ports.src = (__force __be16)get_random_u16();
2573 			else
2574 				hash_keys.ports.src = fl6->fl6_sport;
2575 			hash_keys.ports.dst = fl6->fl6_dport;
2576 			hash_keys.basic.ip_proto = fl6->flowi6_proto;
2577 		}
2578 		mhash = fib_multipath_hash_from_keys(net, &hash_keys);
2579 		break;
2580 	case 2:
2581 		memset(&hash_keys, 0, sizeof(hash_keys));
2582 		hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2583 		if (skb) {
2584 			struct flow_keys keys;
2585 
2586 			if (!flkeys) {
2587 				skb_flow_dissect_flow_keys(skb, &keys, 0);
2588 				flkeys = &keys;
2589 			}
2590 
2591 			/* Inner can be v4 or v6 */
2592 			if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2593 				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2594 				hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2595 				hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2596 			} else if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2597 				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2598 				hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2599 				hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2600 				hash_keys.tags.flow_label = flkeys->tags.flow_label;
2601 				hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2602 			} else {
2603 				/* Same as case 0 */
2604 				hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2605 				ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2606 			}
2607 		} else {
2608 			/* Same as case 0 */
2609 			hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2610 			hash_keys.addrs.v6addrs.src = fl6->saddr;
2611 			hash_keys.addrs.v6addrs.dst = fl6->daddr;
2612 			hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2613 			hash_keys.basic.ip_proto = fl6->flowi6_proto;
2614 		}
2615 		mhash = fib_multipath_hash_from_keys(net, &hash_keys);
2616 		break;
2617 	case 3:
2618 		if (skb)
2619 			mhash = rt6_multipath_custom_hash_skb(net, skb);
2620 		else
2621 			mhash = rt6_multipath_custom_hash_fl6(net, fl6);
2622 		break;
2623 	}
2624 
2625 	return mhash >> 1;
2626 }
2627 
2628 /* Called with rcu held */
ip6_route_input(struct sk_buff * skb)2629 void ip6_route_input(struct sk_buff *skb)
2630 {
2631 	const struct ipv6hdr *iph = ipv6_hdr(skb);
2632 	struct net *net = dev_net(skb->dev);
2633 	int flags = RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_DST_NOREF;
2634 	struct ip_tunnel_info *tun_info;
2635 	struct flowi6 fl6 = {
2636 		.flowi6_iif = skb->dev->ifindex,
2637 		.daddr = iph->daddr,
2638 		.saddr = iph->saddr,
2639 		.flowlabel = ip6_flowinfo(iph),
2640 		.flowi6_mark = skb->mark,
2641 		.flowi6_proto = iph->nexthdr,
2642 	};
2643 	struct flow_keys *flkeys = NULL, _flkeys;
2644 
2645 	tun_info = skb_tunnel_info(skb);
2646 	if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2647 		fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
2648 
2649 	if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2650 		flkeys = &_flkeys;
2651 
2652 	if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
2653 		fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
2654 	skb_dst_drop(skb);
2655 	skb_dst_set_noref(skb, ip6_route_input_lookup(net, skb->dev,
2656 						      &fl6, skb, flags));
2657 }
2658 EXPORT_SYMBOL_GPL(ip6_route_input);
2659 
ip6_pol_route_output(struct net * net,struct fib6_table * table,struct flowi6 * fl6,const struct sk_buff * skb,int flags)2660 INDIRECT_CALLABLE_SCOPE struct rt6_info *ip6_pol_route_output(struct net *net,
2661 					     struct fib6_table *table,
2662 					     struct flowi6 *fl6,
2663 					     const struct sk_buff *skb,
2664 					     int flags)
2665 {
2666 	return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
2667 }
2668 
ip6_route_output_flags_noref(struct net * net,const struct sock * sk,struct flowi6 * fl6,int flags)2669 static struct dst_entry *ip6_route_output_flags_noref(struct net *net,
2670 						      const struct sock *sk,
2671 						      struct flowi6 *fl6,
2672 						      int flags)
2673 {
2674 	bool any_src;
2675 
2676 	if (ipv6_addr_type(&fl6->daddr) &
2677 	    (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
2678 		struct dst_entry *dst;
2679 
2680 		/* This function does not take refcnt on the dst */
2681 		dst = l3mdev_link_scope_lookup(net, fl6);
2682 		if (dst)
2683 			return dst;
2684 	}
2685 
2686 	fl6->flowi6_iif = LOOPBACK_IFINDEX;
2687 
2688 	flags |= RT6_LOOKUP_F_DST_NOREF;
2689 	any_src = ipv6_addr_any(&fl6->saddr);
2690 	if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
2691 	    (fl6->flowi6_oif && any_src))
2692 		flags |= RT6_LOOKUP_F_IFACE;
2693 
2694 	if (!any_src)
2695 		flags |= RT6_LOOKUP_F_HAS_SADDR;
2696 	else if (sk)
2697 		flags |= rt6_srcprefs2flags(READ_ONCE(inet6_sk(sk)->srcprefs));
2698 
2699 	return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
2700 }
2701 
ip6_route_output_flags(struct net * net,const struct sock * sk,struct flowi6 * fl6,int flags)2702 struct dst_entry *ip6_route_output_flags(struct net *net,
2703 					 const struct sock *sk,
2704 					 struct flowi6 *fl6,
2705 					 int flags)
2706 {
2707 	struct dst_entry *dst;
2708 	struct rt6_info *rt6;
2709 
2710 	rcu_read_lock();
2711 	dst = ip6_route_output_flags_noref(net, sk, fl6, flags);
2712 	rt6 = dst_rt6_info(dst);
2713 	/* For dst cached in uncached_list, refcnt is already taken. */
2714 	if (list_empty(&rt6->dst.rt_uncached) && !dst_hold_safe(dst)) {
2715 		dst = &net->ipv6.ip6_null_entry->dst;
2716 		dst_hold(dst);
2717 	}
2718 	rcu_read_unlock();
2719 
2720 	return dst;
2721 }
2722 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
2723 
ip6_blackhole_route(struct net * net,struct dst_entry * dst_orig)2724 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2725 {
2726 	struct rt6_info *rt, *ort = dst_rt6_info(dst_orig);
2727 	struct net_device *loopback_dev = net->loopback_dev;
2728 	struct dst_entry *new = NULL;
2729 
2730 	rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev,
2731 		       DST_OBSOLETE_DEAD, 0);
2732 	if (rt) {
2733 		rt6_info_init(rt);
2734 		atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
2735 
2736 		new = &rt->dst;
2737 		new->__use = 1;
2738 		new->input = dst_discard;
2739 		new->output = dst_discard_out;
2740 
2741 		dst_copy_metrics(new, &ort->dst);
2742 
2743 		rt->rt6i_idev = in6_dev_get(loopback_dev);
2744 		rt->rt6i_gateway = ort->rt6i_gateway;
2745 		rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
2746 
2747 		memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
2748 #ifdef CONFIG_IPV6_SUBTREES
2749 		memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
2750 #endif
2751 	}
2752 
2753 	dst_release(dst_orig);
2754 	return new ? new : ERR_PTR(-ENOMEM);
2755 }
2756 
2757 /*
2758  *	Destination cache support functions
2759  */
2760 
fib6_check(struct fib6_info * f6i,u32 cookie)2761 static bool fib6_check(struct fib6_info *f6i, u32 cookie)
2762 {
2763 	u32 rt_cookie = 0;
2764 
2765 	if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
2766 		return false;
2767 
2768 	if (fib6_check_expired(f6i))
2769 		return false;
2770 
2771 	return true;
2772 }
2773 
rt6_check(struct rt6_info * rt,struct fib6_info * from,u32 cookie)2774 static struct dst_entry *rt6_check(struct rt6_info *rt,
2775 				   struct fib6_info *from,
2776 				   u32 cookie)
2777 {
2778 	u32 rt_cookie = 0;
2779 
2780 	if (!from || !fib6_get_cookie_safe(from, &rt_cookie) ||
2781 	    rt_cookie != cookie)
2782 		return NULL;
2783 
2784 	if (rt6_check_expired(rt))
2785 		return NULL;
2786 
2787 	return &rt->dst;
2788 }
2789 
rt6_dst_from_check(struct rt6_info * rt,struct fib6_info * from,u32 cookie)2790 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
2791 					    struct fib6_info *from,
2792 					    u32 cookie)
2793 {
2794 	if (!__rt6_check_expired(rt) &&
2795 	    READ_ONCE(rt->dst.obsolete) == DST_OBSOLETE_FORCE_CHK &&
2796 	    fib6_check(from, cookie))
2797 		return &rt->dst;
2798 	return NULL;
2799 }
2800 
ip6_dst_check(struct dst_entry * dst,u32 cookie)2801 INDIRECT_CALLABLE_SCOPE struct dst_entry *ip6_dst_check(struct dst_entry *dst,
2802 							u32 cookie)
2803 {
2804 	struct dst_entry *dst_ret;
2805 	struct fib6_info *from;
2806 	struct rt6_info *rt;
2807 
2808 	rt = dst_rt6_info(dst);
2809 
2810 	if (rt->sernum)
2811 		return rt6_is_valid(rt) ? dst : NULL;
2812 
2813 	rcu_read_lock();
2814 
2815 	/* All IPV6 dsts are created with ->obsolete set to the value
2816 	 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2817 	 * into this function always.
2818 	 */
2819 
2820 	from = rcu_dereference(rt->from);
2821 
2822 	if (from && (rt->rt6i_flags & RTF_PCPU ||
2823 	    unlikely(!list_empty(&rt->dst.rt_uncached))))
2824 		dst_ret = rt6_dst_from_check(rt, from, cookie);
2825 	else
2826 		dst_ret = rt6_check(rt, from, cookie);
2827 
2828 	rcu_read_unlock();
2829 
2830 	return dst_ret;
2831 }
2832 EXPORT_INDIRECT_CALLABLE(ip6_dst_check);
2833 
ip6_negative_advice(struct sock * sk,struct dst_entry * dst)2834 static void ip6_negative_advice(struct sock *sk,
2835 				struct dst_entry *dst)
2836 {
2837 	struct rt6_info *rt = dst_rt6_info(dst);
2838 
2839 	if (rt->rt6i_flags & RTF_CACHE) {
2840 		rcu_read_lock();
2841 		if (rt6_check_expired(rt)) {
2842 			/* rt/dst can not be destroyed yet,
2843 			 * because of rcu_read_lock()
2844 			 */
2845 			sk_dst_reset(sk);
2846 			rt6_remove_exception_rt(rt);
2847 		}
2848 		rcu_read_unlock();
2849 		return;
2850 	}
2851 	sk_dst_reset(sk);
2852 }
2853 
ip6_link_failure(struct sk_buff * skb)2854 static void ip6_link_failure(struct sk_buff *skb)
2855 {
2856 	struct rt6_info *rt;
2857 
2858 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
2859 
2860 	rt = dst_rt6_info(skb_dst(skb));
2861 	if (rt) {
2862 		rcu_read_lock();
2863 		if (rt->rt6i_flags & RTF_CACHE) {
2864 			rt6_remove_exception_rt(rt);
2865 		} else {
2866 			struct fib6_info *from;
2867 			struct fib6_node *fn;
2868 
2869 			from = rcu_dereference(rt->from);
2870 			if (from) {
2871 				fn = rcu_dereference(from->fib6_node);
2872 				if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2873 					WRITE_ONCE(fn->fn_sernum, -1);
2874 			}
2875 		}
2876 		rcu_read_unlock();
2877 	}
2878 }
2879 
rt6_update_expires(struct rt6_info * rt0,int timeout)2880 static void rt6_update_expires(struct rt6_info *rt0, int timeout)
2881 {
2882 	if (!(rt0->rt6i_flags & RTF_EXPIRES)) {
2883 		struct fib6_info *from;
2884 
2885 		rcu_read_lock();
2886 		from = rcu_dereference(rt0->from);
2887 		if (from)
2888 			WRITE_ONCE(rt0->dst.expires, from->expires);
2889 		rcu_read_unlock();
2890 	}
2891 
2892 	dst_set_expires(&rt0->dst, timeout);
2893 	rt0->rt6i_flags |= RTF_EXPIRES;
2894 }
2895 
rt6_do_update_pmtu(struct rt6_info * rt,u32 mtu)2896 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2897 {
2898 	struct net *net = dev_net(rt->dst.dev);
2899 
2900 	dst_metric_set(&rt->dst, RTAX_MTU, mtu);
2901 	rt->rt6i_flags |= RTF_MODIFIED;
2902 	rt6_update_expires(rt, READ_ONCE(net->ipv6.sysctl.ip6_rt_mtu_expires));
2903 }
2904 
rt6_cache_allowed_for_pmtu(const struct rt6_info * rt)2905 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2906 {
2907 	return !(rt->rt6i_flags & RTF_CACHE) &&
2908 		(rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from));
2909 }
2910 
__ip6_rt_update_pmtu(struct dst_entry * dst,const struct sock * sk,const struct ipv6hdr * iph,u32 mtu,bool confirm_neigh)2911 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2912 				 const struct ipv6hdr *iph, u32 mtu,
2913 				 bool confirm_neigh)
2914 {
2915 	const struct in6_addr *daddr, *saddr;
2916 	struct rt6_info *rt6 = dst_rt6_info(dst);
2917 
2918 	/* Note: do *NOT* check dst_metric_locked(dst, RTAX_MTU)
2919 	 * IPv6 pmtu discovery isn't optional, so 'mtu lock' cannot disable it.
2920 	 * [see also comment in rt6_mtu_change_route()]
2921 	 */
2922 
2923 	if (iph) {
2924 		daddr = &iph->daddr;
2925 		saddr = &iph->saddr;
2926 	} else if (sk) {
2927 		daddr = &sk->sk_v6_daddr;
2928 		saddr = &inet6_sk(sk)->saddr;
2929 	} else {
2930 		daddr = NULL;
2931 		saddr = NULL;
2932 	}
2933 
2934 	if (confirm_neigh)
2935 		dst_confirm_neigh(dst, daddr);
2936 
2937 	if (mtu < IPV6_MIN_MTU)
2938 		return;
2939 	if (mtu >= dst6_mtu(dst))
2940 		return;
2941 
2942 	if (!rt6_cache_allowed_for_pmtu(rt6)) {
2943 		rt6_do_update_pmtu(rt6, mtu);
2944 		/* update rt6_ex->stamp for cache */
2945 		if (rt6->rt6i_flags & RTF_CACHE)
2946 			rt6_update_exception_stamp_rt(rt6);
2947 	} else if (daddr) {
2948 		struct fib6_result res = {};
2949 		struct rt6_info *nrt6;
2950 
2951 		rcu_read_lock();
2952 		res.f6i = rcu_dereference(rt6->from);
2953 		if (!res.f6i)
2954 			goto out_unlock;
2955 
2956 		res.fib6_flags = res.f6i->fib6_flags;
2957 		res.fib6_type = res.f6i->fib6_type;
2958 
2959 		if (res.f6i->nh) {
2960 			struct fib6_nh_match_arg arg = {
2961 				.dev = dst_dev_rcu(dst),
2962 				.gw = &rt6->rt6i_gateway,
2963 			};
2964 
2965 			nexthop_for_each_fib6_nh(res.f6i->nh,
2966 						 fib6_nh_find_match, &arg);
2967 
2968 			/* fib6_info uses a nexthop that does not have fib6_nh
2969 			 * using the dst->dev + gw. Should be impossible.
2970 			 */
2971 			if (!arg.match)
2972 				goto out_unlock;
2973 
2974 			res.nh = arg.match;
2975 		} else {
2976 			res.nh = res.f6i->fib6_nh;
2977 		}
2978 
2979 		nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
2980 		if (nrt6) {
2981 			rt6_do_update_pmtu(nrt6, mtu);
2982 			if (rt6_insert_exception(nrt6, &res))
2983 				dst_release_immediate(&nrt6->dst);
2984 		}
2985 out_unlock:
2986 		rcu_read_unlock();
2987 	}
2988 }
2989 
ip6_rt_update_pmtu(struct dst_entry * dst,struct sock * sk,struct sk_buff * skb,u32 mtu,bool confirm_neigh)2990 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2991 			       struct sk_buff *skb, u32 mtu,
2992 			       bool confirm_neigh)
2993 {
2994 	__ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu,
2995 			     confirm_neigh);
2996 }
2997 
ip6_update_pmtu(struct sk_buff * skb,struct net * net,__be32 mtu,int oif,u32 mark,kuid_t uid)2998 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2999 		     int oif, u32 mark, kuid_t uid)
3000 {
3001 	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
3002 	struct dst_entry *dst;
3003 	struct flowi6 fl6 = {
3004 		.flowi6_oif = oif,
3005 		.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
3006 		.daddr = iph->daddr,
3007 		.saddr = iph->saddr,
3008 		.flowlabel = ip6_flowinfo(iph),
3009 		.flowi6_uid = uid,
3010 	};
3011 
3012 	dst = ip6_route_output(net, NULL, &fl6);
3013 	if (!dst->error)
3014 		__ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true);
3015 	dst_release(dst);
3016 }
3017 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
3018 
ip6_sk_update_pmtu(struct sk_buff * skb,struct sock * sk,__be32 mtu)3019 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
3020 {
3021 	int oif = sk->sk_bound_dev_if;
3022 	struct dst_entry *dst;
3023 
3024 	if (!oif && skb->dev)
3025 		oif = l3mdev_master_ifindex(skb->dev);
3026 
3027 	ip6_update_pmtu(skb, sock_net(sk), mtu, oif, READ_ONCE(sk->sk_mark),
3028 			sk_uid(sk));
3029 
3030 	dst = __sk_dst_get(sk);
3031 	if (!dst || !READ_ONCE(dst->obsolete) ||
3032 	    dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
3033 		return;
3034 
3035 	bh_lock_sock(sk);
3036 	if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
3037 		ip6_datagram_dst_update(sk, false);
3038 	bh_unlock_sock(sk);
3039 }
3040 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
3041 
ip6_sk_dst_store_flow(struct sock * sk,struct dst_entry * dst,const struct flowi6 * fl6)3042 void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
3043 			   const struct flowi6 *fl6)
3044 {
3045 #ifdef CONFIG_IPV6_SUBTREES
3046 	struct ipv6_pinfo *np = inet6_sk(sk);
3047 #endif
3048 
3049 	ip6_dst_store(sk, dst,
3050 		      ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr),
3051 #ifdef CONFIG_IPV6_SUBTREES
3052 		      ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
3053 		      true :
3054 #endif
3055 		      false);
3056 }
3057 
ip6_redirect_nh_match(const struct fib6_result * res,struct flowi6 * fl6,const struct in6_addr * gw,struct rt6_info ** ret)3058 static bool ip6_redirect_nh_match(const struct fib6_result *res,
3059 				  struct flowi6 *fl6,
3060 				  const struct in6_addr *gw,
3061 				  struct rt6_info **ret)
3062 {
3063 	const struct fib6_nh *nh = res->nh;
3064 
3065 	if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family ||
3066 	    fl6->flowi6_oif != nh->fib_nh_dev->ifindex)
3067 		return false;
3068 
3069 	/* rt_cache's gateway might be different from its 'parent'
3070 	 * in the case of an ip redirect.
3071 	 * So we keep searching in the exception table if the gateway
3072 	 * is different.
3073 	 */
3074 	if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) {
3075 		struct rt6_info *rt_cache;
3076 
3077 		rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr);
3078 		if (rt_cache &&
3079 		    ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) {
3080 			*ret = rt_cache;
3081 			return true;
3082 		}
3083 		return false;
3084 	}
3085 	return true;
3086 }
3087 
3088 struct fib6_nh_rd_arg {
3089 	struct fib6_result	*res;
3090 	struct flowi6		*fl6;
3091 	const struct in6_addr	*gw;
3092 	struct rt6_info		**ret;
3093 };
3094 
fib6_nh_redirect_match(struct fib6_nh * nh,void * _arg)3095 static int fib6_nh_redirect_match(struct fib6_nh *nh, void *_arg)
3096 {
3097 	struct fib6_nh_rd_arg *arg = _arg;
3098 
3099 	arg->res->nh = nh;
3100 	return ip6_redirect_nh_match(arg->res, arg->fl6, arg->gw, arg->ret);
3101 }
3102 
3103 /* Handle redirects */
3104 struct ip6rd_flowi {
3105 	struct flowi6 fl6;
3106 	struct in6_addr gateway;
3107 };
3108 
__ip6_route_redirect(struct net * net,struct fib6_table * table,struct flowi6 * fl6,const struct sk_buff * skb,int flags)3109 INDIRECT_CALLABLE_SCOPE struct rt6_info *__ip6_route_redirect(struct net *net,
3110 					     struct fib6_table *table,
3111 					     struct flowi6 *fl6,
3112 					     const struct sk_buff *skb,
3113 					     int flags)
3114 {
3115 	struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
3116 	struct rt6_info *ret = NULL;
3117 	struct fib6_result res = {};
3118 	struct fib6_nh_rd_arg arg = {
3119 		.res = &res,
3120 		.fl6 = fl6,
3121 		.gw  = &rdfl->gateway,
3122 		.ret = &ret
3123 	};
3124 	struct fib6_info *rt;
3125 	struct fib6_node *fn;
3126 
3127 	/* Get the "current" route for this destination and
3128 	 * check if the redirect has come from appropriate router.
3129 	 *
3130 	 * RFC 4861 specifies that redirects should only be
3131 	 * accepted if they come from the nexthop to the target.
3132 	 * Due to the way the routes are chosen, this notion
3133 	 * is a bit fuzzy and one might need to check all possible
3134 	 * routes.
3135 	 */
3136 
3137 	rcu_read_lock();
3138 	fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
3139 restart:
3140 	for_each_fib6_node_rt_rcu(fn) {
3141 		res.f6i = rt;
3142 		if (fib6_check_expired(rt))
3143 			continue;
3144 		if (rt->fib6_flags & RTF_REJECT)
3145 			break;
3146 		if (unlikely(rt->nh)) {
3147 			if (nexthop_is_blackhole(rt->nh))
3148 				continue;
3149 			/* on match, res->nh is filled in and potentially ret */
3150 			if (nexthop_for_each_fib6_nh(rt->nh,
3151 						     fib6_nh_redirect_match,
3152 						     &arg))
3153 				goto out;
3154 		} else {
3155 			res.nh = rt->fib6_nh;
3156 			if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway,
3157 						  &ret))
3158 				goto out;
3159 		}
3160 	}
3161 
3162 	if (!rt)
3163 		rt = net->ipv6.fib6_null_entry;
3164 	else if (rt->fib6_flags & RTF_REJECT) {
3165 		ret = net->ipv6.ip6_null_entry;
3166 		goto out;
3167 	}
3168 
3169 	if (rt == net->ipv6.fib6_null_entry) {
3170 		fn = fib6_backtrack(fn, &fl6->saddr);
3171 		if (fn)
3172 			goto restart;
3173 	}
3174 
3175 	res.f6i = rt;
3176 	res.nh = rt->fib6_nh;
3177 out:
3178 	if (ret) {
3179 		ip6_hold_safe(net, &ret);
3180 	} else {
3181 		res.fib6_flags = res.f6i->fib6_flags;
3182 		res.fib6_type = res.f6i->fib6_type;
3183 		ret = ip6_create_rt_rcu(&res);
3184 	}
3185 
3186 	rcu_read_unlock();
3187 
3188 	trace_fib6_table_lookup(net, &res, table, fl6);
3189 	return ret;
3190 };
3191 
ip6_route_redirect(struct net * net,const struct flowi6 * fl6,const struct sk_buff * skb,const struct in6_addr * gateway)3192 static struct dst_entry *ip6_route_redirect(struct net *net,
3193 					    const struct flowi6 *fl6,
3194 					    const struct sk_buff *skb,
3195 					    const struct in6_addr *gateway)
3196 {
3197 	int flags = RT6_LOOKUP_F_HAS_SADDR;
3198 	struct ip6rd_flowi rdfl;
3199 
3200 	rdfl.fl6 = *fl6;
3201 	rdfl.gateway = *gateway;
3202 
3203 	return fib6_rule_lookup(net, &rdfl.fl6, skb,
3204 				flags, __ip6_route_redirect);
3205 }
3206 
ip6_redirect(struct sk_buff * skb,struct net * net,int oif,u32 mark,kuid_t uid)3207 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
3208 		  kuid_t uid)
3209 {
3210 	const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
3211 	struct dst_entry *dst;
3212 	struct flowi6 fl6 = {
3213 		.flowi6_iif = LOOPBACK_IFINDEX,
3214 		.flowi6_oif = oif,
3215 		.flowi6_mark = mark,
3216 		.daddr = iph->daddr,
3217 		.saddr = iph->saddr,
3218 		.flowlabel = ip6_flowinfo(iph),
3219 		.flowi6_uid = uid,
3220 	};
3221 
3222 	dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
3223 	rt6_do_redirect(dst, NULL, skb);
3224 	dst_release(dst);
3225 }
3226 EXPORT_SYMBOL_GPL(ip6_redirect);
3227 
ip6_redirect_no_header(struct sk_buff * skb,struct net * net,int oif)3228 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
3229 {
3230 	const struct ipv6hdr *iph = ipv6_hdr(skb);
3231 	const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
3232 	struct dst_entry *dst;
3233 	struct flowi6 fl6 = {
3234 		.flowi6_iif = LOOPBACK_IFINDEX,
3235 		.flowi6_oif = oif,
3236 		.daddr = msg->dest,
3237 		.saddr = iph->daddr,
3238 		.flowi6_uid = sock_net_uid(net, NULL),
3239 	};
3240 
3241 	dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
3242 	rt6_do_redirect(dst, NULL, skb);
3243 	dst_release(dst);
3244 }
3245 
ip6_sk_redirect(struct sk_buff * skb,struct sock * sk)3246 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
3247 {
3248 	ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if,
3249 		     READ_ONCE(sk->sk_mark), sk_uid(sk));
3250 }
3251 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
3252 
ip6_default_advmss(const struct dst_entry * dst)3253 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
3254 {
3255 	unsigned int mtu = dst6_mtu(dst);
3256 	struct net *net;
3257 
3258 	mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
3259 
3260 	rcu_read_lock();
3261 
3262 	net = dst_dev_net_rcu(dst);
3263 	mtu = max_t(unsigned int, mtu,
3264 		    READ_ONCE(net->ipv6.sysctl.ip6_rt_min_advmss));
3265 
3266 	rcu_read_unlock();
3267 
3268 	/*
3269 	 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
3270 	 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
3271 	 * IPV6_MAXPLEN is also valid and means: "any MSS,
3272 	 * rely only on pmtu discovery"
3273 	 */
3274 	if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
3275 		mtu = IPV6_MAXPLEN;
3276 	return mtu;
3277 }
3278 
ip6_mtu(const struct dst_entry * dst)3279 INDIRECT_CALLABLE_SCOPE unsigned int ip6_mtu(const struct dst_entry *dst)
3280 {
3281 	return ip6_dst_mtu_maybe_forward(dst, false);
3282 }
3283 EXPORT_INDIRECT_CALLABLE(ip6_mtu);
3284 
3285 /* MTU selection:
3286  * 1. mtu on route is locked - use it
3287  * 2. mtu from nexthop exception
3288  * 3. mtu from egress device
3289  *
3290  * based on ip6_dst_mtu_forward and exception logic of
3291  * rt6_find_cached_rt; called with rcu_read_lock
3292  */
ip6_mtu_from_fib6(const struct fib6_result * res,const struct in6_addr * daddr,const struct in6_addr * saddr)3293 u32 ip6_mtu_from_fib6(const struct fib6_result *res,
3294 		      const struct in6_addr *daddr,
3295 		      const struct in6_addr *saddr)
3296 {
3297 	const struct fib6_nh *nh = res->nh;
3298 	struct fib6_info *f6i = res->f6i;
3299 	struct inet6_dev *idev;
3300 	struct rt6_info *rt;
3301 	u32 mtu = 0;
3302 
3303 	if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
3304 		mtu = f6i->fib6_pmtu;
3305 		if (mtu)
3306 			goto out;
3307 	}
3308 
3309 	rt = rt6_find_cached_rt(res, daddr, saddr);
3310 	if (unlikely(rt)) {
3311 		mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
3312 	} else {
3313 		struct net_device *dev = nh->fib_nh_dev;
3314 
3315 		mtu = IPV6_MIN_MTU;
3316 		idev = __in6_dev_get(dev);
3317 		if (idev)
3318 			mtu = max_t(u32, mtu, READ_ONCE(idev->cnf.mtu6));
3319 	}
3320 
3321 	mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
3322 out:
3323 	return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
3324 }
3325 
icmp6_dst_alloc(struct net_device * dev,struct flowi6 * fl6)3326 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
3327 				  struct flowi6 *fl6)
3328 {
3329 	struct dst_entry *dst;
3330 	struct rt6_info *rt;
3331 	struct inet6_dev *idev = in6_dev_get(dev);
3332 	struct net *net = dev_net(dev);
3333 
3334 	if (unlikely(!idev))
3335 		return ERR_PTR(-ENODEV);
3336 
3337 	rt = ip6_dst_alloc(net, dev, 0);
3338 	if (unlikely(!rt)) {
3339 		in6_dev_put(idev);
3340 		dst = ERR_PTR(-ENOMEM);
3341 		goto out;
3342 	}
3343 
3344 	rt->dst.input = ip6_input;
3345 	rt->dst.output  = ip6_output;
3346 	rt->rt6i_gateway  = fl6->daddr;
3347 	rt->rt6i_dst.addr = fl6->daddr;
3348 	rt->rt6i_dst.plen = 128;
3349 	rt->rt6i_idev     = idev;
3350 	dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
3351 
3352 	/* Add this dst into uncached_list so that rt6_disable_ip() can
3353 	 * do proper release of the net_device
3354 	 */
3355 	rt6_uncached_list_add(rt);
3356 
3357 	dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
3358 
3359 out:
3360 	return dst;
3361 }
3362 
ip6_dst_gc(struct dst_ops * ops)3363 static void ip6_dst_gc(struct dst_ops *ops)
3364 {
3365 	struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
3366 	int rt_min_interval = READ_ONCE(net->ipv6.sysctl.ip6_rt_gc_min_interval);
3367 	int rt_elasticity = READ_ONCE(net->ipv6.sysctl.ip6_rt_gc_elasticity);
3368 	int rt_gc_timeout = READ_ONCE(net->ipv6.sysctl.ip6_rt_gc_timeout);
3369 	unsigned long rt_last_gc = READ_ONCE(net->ipv6.ip6_rt_last_gc);
3370 	unsigned int val;
3371 	int entries;
3372 
3373 	if (time_after(rt_last_gc + rt_min_interval, jiffies))
3374 		goto out;
3375 
3376 	fib6_run_gc(atomic_inc_return(&net->ipv6.ip6_rt_gc_expire), net, true);
3377 	entries = dst_entries_get_slow(ops);
3378 	if (entries < ops->gc_thresh)
3379 		atomic_set(&net->ipv6.ip6_rt_gc_expire, rt_gc_timeout >> 1);
3380 out:
3381 	val = atomic_read(&net->ipv6.ip6_rt_gc_expire);
3382 	atomic_set(&net->ipv6.ip6_rt_gc_expire, val - (val >> rt_elasticity));
3383 }
3384 
ip6_nh_lookup_table(struct net * net,struct fib6_config * cfg,const struct in6_addr * gw_addr,u32 tbid,int flags,struct fib6_result * res)3385 static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg,
3386 			       const struct in6_addr *gw_addr, u32 tbid,
3387 			       int flags, struct fib6_result *res)
3388 {
3389 	struct flowi6 fl6 = {
3390 		.flowi6_oif = cfg->fc_ifindex,
3391 		.daddr = *gw_addr,
3392 		.saddr = cfg->fc_prefsrc,
3393 	};
3394 	struct fib6_table *table;
3395 	int err;
3396 
3397 	table = fib6_get_table(net, tbid);
3398 	if (!table)
3399 		return -EINVAL;
3400 
3401 	if (!ipv6_addr_any(&cfg->fc_prefsrc))
3402 		flags |= RT6_LOOKUP_F_HAS_SADDR;
3403 
3404 	flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
3405 
3406 	err = fib6_table_lookup(net, table, cfg->fc_ifindex, &fl6, res, flags);
3407 	if (!err && res->f6i != net->ipv6.fib6_null_entry)
3408 		fib6_select_path(net, res, &fl6, cfg->fc_ifindex,
3409 				 cfg->fc_ifindex != 0, NULL, flags);
3410 
3411 	return err;
3412 }
3413 
ip6_route_check_nh_onlink(struct net * net,struct fib6_config * cfg,const struct net_device * dev,struct netlink_ext_ack * extack)3414 static int ip6_route_check_nh_onlink(struct net *net,
3415 				     struct fib6_config *cfg,
3416 				     const struct net_device *dev,
3417 				     struct netlink_ext_ack *extack)
3418 {
3419 	u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
3420 	const struct in6_addr *gw_addr = &cfg->fc_gateway;
3421 	struct fib6_result res = {};
3422 	int err;
3423 
3424 	err = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0, &res);
3425 	if (!err && !(res.fib6_flags & RTF_REJECT) &&
3426 	    res.fib6_type != RTN_UNICAST) {
3427 		NL_SET_ERR_MSG(extack, "Nexthop has invalid gateway");
3428 		err = -EINVAL;
3429 	}
3430 
3431 	return err;
3432 }
3433 
ip6_route_check_nh(struct net * net,struct fib6_config * cfg,struct net_device ** _dev,netdevice_tracker * dev_tracker,struct inet6_dev ** idev)3434 static int ip6_route_check_nh(struct net *net,
3435 			      struct fib6_config *cfg,
3436 			      struct net_device **_dev,
3437 			      netdevice_tracker *dev_tracker,
3438 			      struct inet6_dev **idev)
3439 {
3440 	const struct in6_addr *gw_addr = &cfg->fc_gateway;
3441 	struct net_device *dev = _dev ? *_dev : NULL;
3442 	int flags = RT6_LOOKUP_F_IFACE;
3443 	struct fib6_result res = {};
3444 	int err = -EHOSTUNREACH;
3445 
3446 	if (cfg->fc_table) {
3447 		err = ip6_nh_lookup_table(net, cfg, gw_addr,
3448 					  cfg->fc_table, flags, &res);
3449 		/* gw_addr can not require a gateway or resolve to a reject
3450 		 * route. If a device is given, it must match the result.
3451 		 */
3452 		if (err || res.fib6_flags & RTF_REJECT ||
3453 		    res.nh->fib_nh_gw_family ||
3454 		    (dev && dev != res.nh->fib_nh_dev))
3455 			err = -EHOSTUNREACH;
3456 	}
3457 
3458 	if (err < 0) {
3459 		struct flowi6 fl6 = {
3460 			.flowi6_oif = cfg->fc_ifindex,
3461 			.daddr = *gw_addr,
3462 		};
3463 
3464 		err = fib6_lookup(net, cfg->fc_ifindex, &fl6, &res, flags);
3465 		if (err || res.fib6_flags & RTF_REJECT ||
3466 		    res.nh->fib_nh_gw_family)
3467 			err = -EHOSTUNREACH;
3468 
3469 		if (err)
3470 			return err;
3471 
3472 		fib6_select_path(net, &res, &fl6, cfg->fc_ifindex,
3473 				 cfg->fc_ifindex != 0, NULL, flags);
3474 	}
3475 
3476 	err = 0;
3477 	if (dev) {
3478 		if (dev != res.nh->fib_nh_dev)
3479 			err = -EHOSTUNREACH;
3480 	} else {
3481 		*_dev = dev = res.nh->fib_nh_dev;
3482 		netdev_hold(dev, dev_tracker, GFP_ATOMIC);
3483 		*idev = in6_dev_get(dev);
3484 	}
3485 
3486 	return err;
3487 }
3488 
ip6_validate_gw(struct net * net,struct fib6_config * cfg,struct net_device ** _dev,netdevice_tracker * dev_tracker,struct inet6_dev ** idev,struct netlink_ext_ack * extack)3489 static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
3490 			   struct net_device **_dev,
3491 			   netdevice_tracker *dev_tracker,
3492 			   struct inet6_dev **idev,
3493 			   struct netlink_ext_ack *extack)
3494 {
3495 	const struct in6_addr *gw_addr = &cfg->fc_gateway;
3496 	int gwa_type = ipv6_addr_type(gw_addr);
3497 	bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
3498 	const struct net_device *dev = *_dev;
3499 	bool need_addr_check = !dev;
3500 	int err = -EINVAL;
3501 
3502 	/* if gw_addr is local we will fail to detect this in case
3503 	 * address is still TENTATIVE (DAD in progress). rt6_lookup()
3504 	 * will return already-added prefix route via interface that
3505 	 * prefix route was assigned to, which might be non-loopback.
3506 	 */
3507 	if (dev &&
3508 	    ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3509 		NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3510 		goto out;
3511 	}
3512 
3513 	if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
3514 		/* IPv6 strictly inhibits using not link-local
3515 		 * addresses as nexthop address.
3516 		 * Otherwise, router will not able to send redirects.
3517 		 * It is very good, but in some (rare!) circumstances
3518 		 * (SIT, PtP, NBMA NOARP links) it is handy to allow
3519 		 * some exceptions. --ANK
3520 		 * We allow IPv4-mapped nexthops to support RFC4798-type
3521 		 * addressing
3522 		 */
3523 		if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
3524 			NL_SET_ERR_MSG(extack, "Invalid gateway address");
3525 			goto out;
3526 		}
3527 
3528 		rcu_read_lock();
3529 
3530 		if (cfg->fc_flags & RTNH_F_ONLINK)
3531 			err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
3532 		else
3533 			err = ip6_route_check_nh(net, cfg, _dev, dev_tracker,
3534 						 idev);
3535 
3536 		rcu_read_unlock();
3537 
3538 		if (err)
3539 			goto out;
3540 	}
3541 
3542 	/* reload in case device was changed */
3543 	dev = *_dev;
3544 
3545 	err = -EINVAL;
3546 	if (!dev) {
3547 		NL_SET_ERR_MSG(extack, "Egress device not specified");
3548 		goto out;
3549 	} else if (dev->flags & IFF_LOOPBACK) {
3550 		NL_SET_ERR_MSG(extack,
3551 			       "Egress device can not be loopback device for this route");
3552 		goto out;
3553 	}
3554 
3555 	/* if we did not check gw_addr above, do so now that the
3556 	 * egress device has been resolved.
3557 	 */
3558 	if (need_addr_check &&
3559 	    ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3560 		NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3561 		goto out;
3562 	}
3563 
3564 	err = 0;
3565 out:
3566 	return err;
3567 }
3568 
fib6_is_reject(u32 flags,struct net_device * dev,int addr_type)3569 static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
3570 {
3571 	if ((flags & RTF_REJECT) ||
3572 	    (dev && (dev->flags & IFF_LOOPBACK) &&
3573 	     !(addr_type & IPV6_ADDR_LOOPBACK) &&
3574 	     !(flags & (RTF_ANYCAST | RTF_LOCAL))))
3575 		return true;
3576 
3577 	return false;
3578 }
3579 
fib6_nh_init(struct net * net,struct fib6_nh * fib6_nh,struct fib6_config * cfg,gfp_t gfp_flags,struct netlink_ext_ack * extack)3580 int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
3581 		 struct fib6_config *cfg, gfp_t gfp_flags,
3582 		 struct netlink_ext_ack *extack)
3583 {
3584 	netdevice_tracker *dev_tracker = &fib6_nh->fib_nh_dev_tracker;
3585 	struct net_device *dev = NULL;
3586 	struct inet6_dev *idev = NULL;
3587 	int err;
3588 
3589 	if (!ipv6_mod_enabled()) {
3590 		NL_SET_ERR_MSG(extack, "IPv6 support not enabled in kernel");
3591 		return -EAFNOSUPPORT;
3592 	}
3593 
3594 	fib6_nh->fib_nh_family = AF_INET6;
3595 #ifdef CONFIG_IPV6_ROUTER_PREF
3596 	fib6_nh->last_probe = jiffies;
3597 #endif
3598 	if (cfg->fc_is_fdb) {
3599 		fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3600 		fib6_nh->fib_nh_gw_family = AF_INET6;
3601 		return 0;
3602 	}
3603 
3604 	err = -ENODEV;
3605 	if (cfg->fc_ifindex) {
3606 		dev = netdev_get_by_index(net, cfg->fc_ifindex,
3607 					  dev_tracker, gfp_flags);
3608 		if (!dev)
3609 			goto out;
3610 		idev = in6_dev_get(dev);
3611 		if (!idev)
3612 			goto out;
3613 	}
3614 
3615 	if (cfg->fc_flags & RTNH_F_ONLINK) {
3616 		if (!dev) {
3617 			NL_SET_ERR_MSG(extack,
3618 				       "Nexthop device required for onlink");
3619 			goto out;
3620 		}
3621 
3622 		if (!(dev->flags & IFF_UP)) {
3623 			NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3624 			err = -ENETDOWN;
3625 			goto out;
3626 		}
3627 
3628 		fib6_nh->fib_nh_flags |= RTNH_F_ONLINK;
3629 	}
3630 
3631 	fib6_nh->fib_nh_weight = 1;
3632 
3633 	/* Reset the nexthop device to the loopback device in case of reject
3634 	 * routes.
3635 	 */
3636 	if (cfg->fc_flags & RTF_REJECT) {
3637 		/* hold loopback dev/idev if we haven't done so. */
3638 		if (dev != net->loopback_dev) {
3639 			if (dev) {
3640 				netdev_put(dev, dev_tracker);
3641 				in6_dev_put(idev);
3642 			}
3643 			dev = net->loopback_dev;
3644 			netdev_hold(dev, dev_tracker, gfp_flags);
3645 			idev = in6_dev_get(dev);
3646 			if (!idev) {
3647 				err = -ENODEV;
3648 				goto out;
3649 			}
3650 		}
3651 		goto pcpu_alloc;
3652 	}
3653 
3654 	if (cfg->fc_flags & RTF_GATEWAY) {
3655 		err = ip6_validate_gw(net, cfg, &dev, dev_tracker,
3656 				      &idev, extack);
3657 		if (err)
3658 			goto out;
3659 
3660 		fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
3661 		fib6_nh->fib_nh_gw_family = AF_INET6;
3662 	}
3663 
3664 	err = -ENODEV;
3665 	if (!dev)
3666 		goto out;
3667 
3668 	if (!idev || idev->cnf.disable_ipv6) {
3669 		NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
3670 		err = -EACCES;
3671 		goto out;
3672 	}
3673 
3674 	if (!(dev->flags & IFF_UP) && !cfg->fc_ignore_dev_down) {
3675 		NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3676 		err = -ENETDOWN;
3677 		goto out;
3678 	}
3679 
3680 	if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3681 	    !netif_carrier_ok(dev))
3682 		fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
3683 
3684 	err = fib_nh_common_init(net, &fib6_nh->nh_common, cfg->fc_encap,
3685 				 cfg->fc_encap_type, cfg, gfp_flags, extack);
3686 	if (err)
3687 		goto out;
3688 
3689 pcpu_alloc:
3690 	fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags);
3691 	if (!fib6_nh->rt6i_pcpu) {
3692 		err = -ENOMEM;
3693 		goto out;
3694 	}
3695 
3696 	fib6_nh->fib_nh_dev = dev;
3697 	fib6_nh->fib_nh_oif = dev->ifindex;
3698 	err = 0;
3699 out:
3700 	if (idev)
3701 		in6_dev_put(idev);
3702 
3703 	if (err) {
3704 		fib_nh_common_release(&fib6_nh->nh_common);
3705 		fib6_nh->nh_common.nhc_pcpu_rth_output = NULL;
3706 		fib6_nh->fib_nh_lws = NULL;
3707 		netdev_put(dev, dev_tracker);
3708 	}
3709 
3710 	return err;
3711 }
3712 
fib6_nh_release(struct fib6_nh * fib6_nh)3713 void fib6_nh_release(struct fib6_nh *fib6_nh)
3714 {
3715 	struct rt6_exception_bucket *bucket;
3716 
3717 	rcu_read_lock();
3718 
3719 	fib6_nh_flush_exceptions(fib6_nh, NULL);
3720 	bucket = fib6_nh_get_excptn_bucket(fib6_nh, NULL);
3721 	if (bucket) {
3722 		rcu_assign_pointer(fib6_nh->rt6i_exception_bucket, NULL);
3723 		kfree(bucket);
3724 	}
3725 
3726 	rcu_read_unlock();
3727 
3728 	fib6_nh_release_dsts(fib6_nh);
3729 	free_percpu(fib6_nh->rt6i_pcpu);
3730 
3731 	fib_nh_common_release(&fib6_nh->nh_common);
3732 }
3733 
fib6_nh_release_dsts(struct fib6_nh * fib6_nh)3734 void fib6_nh_release_dsts(struct fib6_nh *fib6_nh)
3735 {
3736 	int cpu;
3737 
3738 	if (!fib6_nh->rt6i_pcpu)
3739 		return;
3740 
3741 	for_each_possible_cpu(cpu) {
3742 		struct rt6_info *pcpu_rt, **ppcpu_rt;
3743 
3744 		ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
3745 		pcpu_rt = xchg(ppcpu_rt, NULL);
3746 		if (pcpu_rt) {
3747 			dst_dev_put(&pcpu_rt->dst);
3748 			dst_release(&pcpu_rt->dst);
3749 		}
3750 	}
3751 }
3752 
fib6_config_validate(struct fib6_config * cfg,struct netlink_ext_ack * extack)3753 static int fib6_config_validate(struct fib6_config *cfg,
3754 				struct netlink_ext_ack *extack)
3755 {
3756 	/* RTF_PCPU is an internal flag; can not be set by userspace */
3757 	if (cfg->fc_flags & RTF_PCPU) {
3758 		NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
3759 		goto errout;
3760 	}
3761 
3762 	/* RTF_CACHE is an internal flag; can not be set by userspace */
3763 	if (cfg->fc_flags & RTF_CACHE) {
3764 		NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
3765 		goto errout;
3766 	}
3767 
3768 	if (cfg->fc_type > RTN_MAX) {
3769 		NL_SET_ERR_MSG(extack, "Invalid route type");
3770 		goto errout;
3771 	}
3772 
3773 	if (cfg->fc_dst_len > 128) {
3774 		NL_SET_ERR_MSG(extack, "Invalid prefix length");
3775 		goto errout;
3776 	}
3777 
3778 #ifdef CONFIG_IPV6_SUBTREES
3779 	if (cfg->fc_src_len > 128) {
3780 		NL_SET_ERR_MSG(extack, "Invalid source address length");
3781 		goto errout;
3782 	}
3783 
3784 	if (cfg->fc_nh_id && cfg->fc_src_len) {
3785 		NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
3786 		goto errout;
3787 	}
3788 #else
3789 	if (cfg->fc_src_len) {
3790 		NL_SET_ERR_MSG(extack,
3791 			       "Specifying source address requires IPV6_SUBTREES to be enabled");
3792 		goto errout;
3793 	}
3794 #endif
3795 	return 0;
3796 errout:
3797 	return -EINVAL;
3798 }
3799 
ip6_route_info_create(struct fib6_config * cfg,gfp_t gfp_flags,struct netlink_ext_ack * extack)3800 static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
3801 					       gfp_t gfp_flags,
3802 					       struct netlink_ext_ack *extack)
3803 {
3804 	struct net *net = cfg->fc_nlinfo.nl_net;
3805 	struct fib6_table *table;
3806 	struct fib6_info *rt;
3807 	int err;
3808 
3809 	if (cfg->fc_nlinfo.nlh &&
3810 	    !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
3811 		table = fib6_get_table(net, cfg->fc_table);
3812 		if (!table) {
3813 			pr_warn("NLM_F_CREATE should be specified when creating new route\n");
3814 			table = fib6_new_table(net, cfg->fc_table);
3815 		}
3816 	} else {
3817 		table = fib6_new_table(net, cfg->fc_table);
3818 	}
3819 	if (!table) {
3820 		err = -ENOBUFS;
3821 		goto err;
3822 	}
3823 
3824 	rt = fib6_info_alloc(gfp_flags, !cfg->fc_nh_id);
3825 	if (!rt) {
3826 		err = -ENOMEM;
3827 		goto err;
3828 	}
3829 
3830 	rt->fib6_metrics = ip_fib_metrics_init(cfg->fc_mx, cfg->fc_mx_len,
3831 					       extack);
3832 	if (IS_ERR(rt->fib6_metrics)) {
3833 		err = PTR_ERR(rt->fib6_metrics);
3834 		goto free;
3835 	}
3836 
3837 	if (cfg->fc_flags & RTF_ADDRCONF)
3838 		rt->dst_nocount = true;
3839 
3840 	if (cfg->fc_flags & RTF_EXPIRES)
3841 		fib6_set_expires(rt, jiffies +
3842 				 clock_t_to_jiffies(cfg->fc_expires));
3843 
3844 	if (cfg->fc_protocol == RTPROT_UNSPEC)
3845 		cfg->fc_protocol = RTPROT_BOOT;
3846 
3847 	rt->fib6_protocol = cfg->fc_protocol;
3848 	rt->fib6_table = table;
3849 	rt->fib6_metric = cfg->fc_metric;
3850 	rt->fib6_type = cfg->fc_type ? : RTN_UNICAST;
3851 	rt->fib6_flags = cfg->fc_flags & ~RTF_GATEWAY;
3852 
3853 	ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
3854 	rt->fib6_dst.plen = cfg->fc_dst_len;
3855 
3856 #ifdef CONFIG_IPV6_SUBTREES
3857 	ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
3858 	rt->fib6_src.plen = cfg->fc_src_len;
3859 #endif
3860 	return rt;
3861 free:
3862 	kfree(rt);
3863 err:
3864 	return ERR_PTR(err);
3865 }
3866 
ip6_route_info_create_nh(struct fib6_info * rt,struct fib6_config * cfg,gfp_t gfp_flags,struct netlink_ext_ack * extack)3867 static int ip6_route_info_create_nh(struct fib6_info *rt,
3868 				    struct fib6_config *cfg,
3869 				    gfp_t gfp_flags,
3870 				    struct netlink_ext_ack *extack)
3871 {
3872 	struct net *net = cfg->fc_nlinfo.nl_net;
3873 	struct fib6_nh *fib6_nh;
3874 	int err;
3875 
3876 	if (cfg->fc_nh_id) {
3877 		struct nexthop *nh;
3878 
3879 		rcu_read_lock();
3880 
3881 		nh = nexthop_find_by_id(net, cfg->fc_nh_id);
3882 		if (!nh) {
3883 			err = -EINVAL;
3884 			NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
3885 			goto out_free;
3886 		}
3887 
3888 		err = fib6_check_nexthop(nh, cfg, extack);
3889 		if (err)
3890 			goto out_free;
3891 
3892 		if (!nexthop_get(nh)) {
3893 			NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
3894 			err = -ENOENT;
3895 			goto out_free;
3896 		}
3897 
3898 		rt->nh = nh;
3899 		fib6_nh = nexthop_fib6_nh(rt->nh);
3900 
3901 		rcu_read_unlock();
3902 	} else {
3903 		int addr_type;
3904 
3905 		err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack);
3906 		if (err)
3907 			goto out_release;
3908 
3909 		fib6_nh = rt->fib6_nh;
3910 
3911 		/* We cannot add true routes via loopback here, they would
3912 		 * result in kernel looping; promote them to reject routes
3913 		 */
3914 		addr_type = ipv6_addr_type(&cfg->fc_dst);
3915 		if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh->fib_nh_dev,
3916 				   addr_type))
3917 			rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
3918 	}
3919 
3920 	if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
3921 		struct net_device *dev = fib6_nh->fib_nh_dev;
3922 
3923 		if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
3924 			NL_SET_ERR_MSG(extack, "Invalid source address");
3925 			err = -EINVAL;
3926 			goto out_release;
3927 		}
3928 		rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
3929 		rt->fib6_prefsrc.plen = 128;
3930 	}
3931 
3932 	return 0;
3933 out_release:
3934 	fib6_info_release(rt);
3935 	return err;
3936 out_free:
3937 	rcu_read_unlock();
3938 	ip_fib_metrics_put(rt->fib6_metrics);
3939 	kfree(rt);
3940 	return err;
3941 }
3942 
ip6_route_add(struct fib6_config * cfg,gfp_t gfp_flags,struct netlink_ext_ack * extack)3943 int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
3944 		  struct netlink_ext_ack *extack)
3945 {
3946 	struct fib6_info *rt;
3947 	int err;
3948 
3949 	err = fib6_config_validate(cfg, extack);
3950 	if (err)
3951 		return err;
3952 
3953 	rt = ip6_route_info_create(cfg, gfp_flags, extack);
3954 	if (IS_ERR(rt))
3955 		return PTR_ERR(rt);
3956 
3957 	err = ip6_route_info_create_nh(rt, cfg, gfp_flags, extack);
3958 	if (err)
3959 		return err;
3960 
3961 	err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
3962 	fib6_info_release(rt);
3963 
3964 	return err;
3965 }
3966 
__ip6_del_rt(struct fib6_info * rt,struct nl_info * info)3967 static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
3968 {
3969 	struct net *net = info->nl_net;
3970 	struct fib6_table *table;
3971 	int err;
3972 
3973 	if (rt == net->ipv6.fib6_null_entry) {
3974 		err = -ENOENT;
3975 		goto out;
3976 	}
3977 
3978 	table = rt->fib6_table;
3979 	spin_lock_bh(&table->tb6_lock);
3980 	err = fib6_del(rt, info);
3981 	spin_unlock_bh(&table->tb6_lock);
3982 
3983 out:
3984 	fib6_info_release(rt);
3985 	return err;
3986 }
3987 
ip6_del_rt(struct net * net,struct fib6_info * rt,bool skip_notify)3988 int ip6_del_rt(struct net *net, struct fib6_info *rt, bool skip_notify)
3989 {
3990 	struct nl_info info = {
3991 		.nl_net = net,
3992 		.skip_notify = skip_notify
3993 	};
3994 
3995 	return __ip6_del_rt(rt, &info);
3996 }
3997 
__ip6_del_rt_siblings(struct fib6_info * rt,struct fib6_config * cfg)3998 static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
3999 {
4000 	struct nl_info *info = &cfg->fc_nlinfo;
4001 	struct net *net = info->nl_net;
4002 	struct sk_buff *skb = NULL;
4003 	struct fib6_table *table;
4004 	int err = -ENOENT;
4005 
4006 	if (rt == net->ipv6.fib6_null_entry)
4007 		goto out_put;
4008 	table = rt->fib6_table;
4009 	spin_lock_bh(&table->tb6_lock);
4010 
4011 	if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
4012 		struct fib6_info *sibling, *next_sibling;
4013 		struct fib6_node *fn;
4014 
4015 		/* prefer to send a single notification with all hops */
4016 		skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
4017 		if (skb) {
4018 			u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
4019 
4020 			if (rt6_fill_node(net, skb, rt, NULL,
4021 					  NULL, NULL, 0, RTM_DELROUTE,
4022 					  info->portid, seq, 0) < 0) {
4023 				kfree_skb(skb);
4024 				skb = NULL;
4025 			} else
4026 				info->skip_notify = 1;
4027 		}
4028 
4029 		/* 'rt' points to the first sibling route. If it is not the
4030 		 * leaf, then we do not need to send a notification. Otherwise,
4031 		 * we need to check if the last sibling has a next route or not
4032 		 * and emit a replace or delete notification, respectively.
4033 		 */
4034 		info->skip_notify_kernel = 1;
4035 		fn = rcu_dereference_protected(rt->fib6_node,
4036 					    lockdep_is_held(&table->tb6_lock));
4037 		if (rcu_access_pointer(fn->leaf) == rt) {
4038 			struct fib6_info *last_sibling, *replace_rt;
4039 
4040 			last_sibling = list_last_entry(&rt->fib6_siblings,
4041 						       struct fib6_info,
4042 						       fib6_siblings);
4043 			replace_rt = rcu_dereference_protected(
4044 					    last_sibling->fib6_next,
4045 					    lockdep_is_held(&table->tb6_lock));
4046 			if (replace_rt)
4047 				call_fib6_entry_notifiers_replace(net,
4048 								  replace_rt);
4049 			else
4050 				call_fib6_multipath_entry_notifiers(net,
4051 						       FIB_EVENT_ENTRY_DEL,
4052 						       rt, rt->fib6_nsiblings,
4053 						       NULL);
4054 		}
4055 		list_for_each_entry_safe(sibling, next_sibling,
4056 					 &rt->fib6_siblings,
4057 					 fib6_siblings) {
4058 			err = fib6_del(sibling, info);
4059 			if (err)
4060 				goto out_unlock;
4061 		}
4062 	}
4063 
4064 	err = fib6_del(rt, info);
4065 out_unlock:
4066 	spin_unlock_bh(&table->tb6_lock);
4067 out_put:
4068 	fib6_info_release(rt);
4069 
4070 	if (skb) {
4071 		rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
4072 			    info->nlh, gfp_any());
4073 	}
4074 	return err;
4075 }
4076 
__ip6_del_cached_rt(struct rt6_info * rt,struct fib6_config * cfg)4077 static int __ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
4078 {
4079 	int rc = -ESRCH;
4080 
4081 	if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
4082 		goto out;
4083 
4084 	if (cfg->fc_flags & RTF_GATEWAY &&
4085 	    !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
4086 		goto out;
4087 
4088 	rc = rt6_remove_exception_rt(rt);
4089 out:
4090 	return rc;
4091 }
4092 
ip6_del_cached_rt(struct fib6_config * cfg,struct fib6_info * rt,struct fib6_nh * nh)4093 static int ip6_del_cached_rt(struct fib6_config *cfg, struct fib6_info *rt,
4094 			     struct fib6_nh *nh)
4095 {
4096 	struct fib6_result res = {
4097 		.f6i = rt,
4098 		.nh = nh,
4099 	};
4100 	struct rt6_info *rt_cache;
4101 
4102 	rt_cache = rt6_find_cached_rt(&res, &cfg->fc_dst, &cfg->fc_src);
4103 	if (rt_cache)
4104 		return __ip6_del_cached_rt(rt_cache, cfg);
4105 
4106 	return 0;
4107 }
4108 
4109 struct fib6_nh_del_cached_rt_arg {
4110 	struct fib6_config *cfg;
4111 	struct fib6_info *f6i;
4112 };
4113 
fib6_nh_del_cached_rt(struct fib6_nh * nh,void * _arg)4114 static int fib6_nh_del_cached_rt(struct fib6_nh *nh, void *_arg)
4115 {
4116 	struct fib6_nh_del_cached_rt_arg *arg = _arg;
4117 	int rc;
4118 
4119 	rc = ip6_del_cached_rt(arg->cfg, arg->f6i, nh);
4120 	return rc != -ESRCH ? rc : 0;
4121 }
4122 
ip6_del_cached_rt_nh(struct fib6_config * cfg,struct fib6_info * f6i)4123 static int ip6_del_cached_rt_nh(struct fib6_config *cfg, struct fib6_info *f6i)
4124 {
4125 	struct fib6_nh_del_cached_rt_arg arg = {
4126 		.cfg = cfg,
4127 		.f6i = f6i
4128 	};
4129 
4130 	return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_del_cached_rt, &arg);
4131 }
4132 
ip6_route_del(struct fib6_config * cfg,struct netlink_ext_ack * extack)4133 static int ip6_route_del(struct fib6_config *cfg,
4134 			 struct netlink_ext_ack *extack)
4135 {
4136 	struct fib6_table *table;
4137 	struct fib6_info *rt;
4138 	struct fib6_node *fn;
4139 	int err = -ESRCH;
4140 
4141 	table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
4142 	if (!table) {
4143 		NL_SET_ERR_MSG(extack, "FIB table does not exist");
4144 		return err;
4145 	}
4146 
4147 	rcu_read_lock();
4148 
4149 	fn = fib6_locate(&table->tb6_root,
4150 			 &cfg->fc_dst, cfg->fc_dst_len,
4151 			 &cfg->fc_src, cfg->fc_src_len,
4152 			 !(cfg->fc_flags & RTF_CACHE));
4153 
4154 	if (fn) {
4155 		for_each_fib6_node_rt_rcu(fn) {
4156 			struct fib6_nh *nh;
4157 
4158 			if (rt->nh && cfg->fc_nh_id &&
4159 			    rt->nh->id != cfg->fc_nh_id)
4160 				continue;
4161 
4162 			if (cfg->fc_flags & RTF_CACHE) {
4163 				int rc = 0;
4164 
4165 				if (rt->nh) {
4166 					rc = ip6_del_cached_rt_nh(cfg, rt);
4167 				} else if (cfg->fc_nh_id) {
4168 					continue;
4169 				} else {
4170 					nh = rt->fib6_nh;
4171 					rc = ip6_del_cached_rt(cfg, rt, nh);
4172 				}
4173 				if (rc != -ESRCH) {
4174 					rcu_read_unlock();
4175 					return rc;
4176 				}
4177 				continue;
4178 			}
4179 
4180 			if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
4181 				continue;
4182 			if (cfg->fc_protocol &&
4183 			    cfg->fc_protocol != rt->fib6_protocol)
4184 				continue;
4185 
4186 			if (rt->nh) {
4187 				if (!fib6_info_hold_safe(rt))
4188 					continue;
4189 
4190 				err =  __ip6_del_rt(rt, &cfg->fc_nlinfo);
4191 				break;
4192 			}
4193 			if (cfg->fc_nh_id)
4194 				continue;
4195 
4196 			nh = rt->fib6_nh;
4197 			if (cfg->fc_ifindex &&
4198 			    (!nh->fib_nh_dev ||
4199 			     nh->fib_nh_dev->ifindex != cfg->fc_ifindex))
4200 				continue;
4201 			if (cfg->fc_flags & RTF_GATEWAY &&
4202 			    !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6))
4203 				continue;
4204 			if (!fib6_info_hold_safe(rt))
4205 				continue;
4206 
4207 			/* if gateway was specified only delete the one hop */
4208 			if (cfg->fc_flags & RTF_GATEWAY)
4209 				err = __ip6_del_rt(rt, &cfg->fc_nlinfo);
4210 			else
4211 				err = __ip6_del_rt_siblings(rt, cfg);
4212 			break;
4213 		}
4214 	}
4215 	rcu_read_unlock();
4216 
4217 	return err;
4218 }
4219 
rt6_do_redirect(struct dst_entry * dst,struct sock * sk,struct sk_buff * skb)4220 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
4221 {
4222 	struct netevent_redirect netevent;
4223 	struct rt6_info *rt, *nrt = NULL;
4224 	struct fib6_result res = {};
4225 	struct ndisc_options ndopts;
4226 	struct inet6_dev *in6_dev;
4227 	struct neighbour *neigh;
4228 	struct rd_msg *msg;
4229 	int optlen, on_link;
4230 	u8 *lladdr;
4231 
4232 	optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
4233 	optlen -= sizeof(*msg);
4234 
4235 	if (optlen < 0) {
4236 		net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
4237 		return;
4238 	}
4239 
4240 	msg = (struct rd_msg *)icmp6_hdr(skb);
4241 
4242 	if (ipv6_addr_is_multicast(&msg->dest)) {
4243 		net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
4244 		return;
4245 	}
4246 
4247 	on_link = 0;
4248 	if (ipv6_addr_equal(&msg->dest, &msg->target)) {
4249 		on_link = 1;
4250 	} else if (ipv6_addr_type(&msg->target) !=
4251 		   (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
4252 		net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
4253 		return;
4254 	}
4255 
4256 	in6_dev = __in6_dev_get(skb->dev);
4257 	if (!in6_dev)
4258 		return;
4259 	if (READ_ONCE(in6_dev->cnf.forwarding) ||
4260 	    !READ_ONCE(in6_dev->cnf.accept_redirects))
4261 		return;
4262 
4263 	/* RFC2461 8.1:
4264 	 *	The IP source address of the Redirect MUST be the same as the current
4265 	 *	first-hop router for the specified ICMP Destination Address.
4266 	 */
4267 
4268 	if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
4269 		net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
4270 		return;
4271 	}
4272 
4273 	lladdr = NULL;
4274 	if (ndopts.nd_opts_tgt_lladdr) {
4275 		lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
4276 					     skb->dev);
4277 		if (!lladdr) {
4278 			net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
4279 			return;
4280 		}
4281 	}
4282 
4283 	rt = dst_rt6_info(dst);
4284 	if (rt->rt6i_flags & RTF_REJECT) {
4285 		net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
4286 		return;
4287 	}
4288 
4289 	/* Redirect received -> path was valid.
4290 	 * Look, redirects are sent only in response to data packets,
4291 	 * so that this nexthop apparently is reachable. --ANK
4292 	 */
4293 	dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
4294 
4295 	neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
4296 	if (!neigh)
4297 		return;
4298 
4299 	/*
4300 	 *	We have finally decided to accept it.
4301 	 */
4302 
4303 	ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
4304 		     NEIGH_UPDATE_F_WEAK_OVERRIDE|
4305 		     NEIGH_UPDATE_F_OVERRIDE|
4306 		     (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
4307 				     NEIGH_UPDATE_F_ISROUTER)),
4308 		     NDISC_REDIRECT, &ndopts);
4309 
4310 	rcu_read_lock();
4311 	res.f6i = rcu_dereference(rt->from);
4312 	if (!res.f6i)
4313 		goto out;
4314 
4315 	if (res.f6i->nh) {
4316 		struct fib6_nh_match_arg arg = {
4317 			.dev = dst_dev_rcu(dst),
4318 			.gw = &rt->rt6i_gateway,
4319 		};
4320 
4321 		nexthop_for_each_fib6_nh(res.f6i->nh,
4322 					 fib6_nh_find_match, &arg);
4323 
4324 		/* fib6_info uses a nexthop that does not have fib6_nh
4325 		 * using the dst->dev. Should be impossible
4326 		 */
4327 		if (!arg.match)
4328 			goto out;
4329 		res.nh = arg.match;
4330 	} else {
4331 		res.nh = res.f6i->fib6_nh;
4332 	}
4333 
4334 	res.fib6_flags = res.f6i->fib6_flags;
4335 	res.fib6_type = res.f6i->fib6_type;
4336 	nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL);
4337 	if (!nrt)
4338 		goto out;
4339 
4340 	nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
4341 	if (on_link)
4342 		nrt->rt6i_flags &= ~RTF_GATEWAY;
4343 
4344 	nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
4345 
4346 	/* rt6_insert_exception() will take care of duplicated exceptions */
4347 	if (rt6_insert_exception(nrt, &res)) {
4348 		dst_release_immediate(&nrt->dst);
4349 		goto out;
4350 	}
4351 
4352 	netevent.old = &rt->dst;
4353 	netevent.new = &nrt->dst;
4354 	netevent.daddr = &msg->dest;
4355 	netevent.neigh = neigh;
4356 	call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
4357 
4358 out:
4359 	rcu_read_unlock();
4360 	neigh_release(neigh);
4361 }
4362 
4363 #ifdef CONFIG_IPV6_ROUTE_INFO
rt6_get_route_info(struct net * net,const struct in6_addr * prefix,int prefixlen,const struct in6_addr * gwaddr,struct net_device * dev)4364 static struct fib6_info *rt6_get_route_info(struct net *net,
4365 					   const struct in6_addr *prefix, int prefixlen,
4366 					   const struct in6_addr *gwaddr,
4367 					   struct net_device *dev)
4368 {
4369 	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4370 	int ifindex = dev->ifindex;
4371 	struct fib6_node *fn;
4372 	struct fib6_info *rt = NULL;
4373 	struct fib6_table *table;
4374 
4375 	table = fib6_get_table(net, tb_id);
4376 	if (!table)
4377 		return NULL;
4378 
4379 	rcu_read_lock();
4380 	fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
4381 	if (!fn)
4382 		goto out;
4383 
4384 	for_each_fib6_node_rt_rcu(fn) {
4385 		/* these routes do not use nexthops */
4386 		if (rt->nh)
4387 			continue;
4388 		if (rt->fib6_nh->fib_nh_dev->ifindex != ifindex)
4389 			continue;
4390 		if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
4391 		    !rt->fib6_nh->fib_nh_gw_family)
4392 			continue;
4393 		if (!ipv6_addr_equal(&rt->fib6_nh->fib_nh_gw6, gwaddr))
4394 			continue;
4395 		if (!fib6_info_hold_safe(rt))
4396 			continue;
4397 		break;
4398 	}
4399 out:
4400 	rcu_read_unlock();
4401 	return rt;
4402 }
4403 
rt6_add_route_info(struct net * net,const struct in6_addr * prefix,int prefixlen,const struct in6_addr * gwaddr,struct net_device * dev,unsigned int pref)4404 static struct fib6_info *rt6_add_route_info(struct net *net,
4405 					   const struct in6_addr *prefix, int prefixlen,
4406 					   const struct in6_addr *gwaddr,
4407 					   struct net_device *dev,
4408 					   unsigned int pref)
4409 {
4410 	struct fib6_config cfg = {
4411 		.fc_metric	= IP6_RT_PRIO_USER,
4412 		.fc_ifindex	= dev->ifindex,
4413 		.fc_dst_len	= prefixlen,
4414 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
4415 				  RTF_UP | RTF_PREF(pref),
4416 		.fc_protocol = RTPROT_RA,
4417 		.fc_type = RTN_UNICAST,
4418 		.fc_nlinfo.portid = 0,
4419 		.fc_nlinfo.nlh = NULL,
4420 		.fc_nlinfo.nl_net = net,
4421 	};
4422 
4423 	cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4424 	cfg.fc_dst = *prefix;
4425 	cfg.fc_gateway = *gwaddr;
4426 
4427 	/* We should treat it as a default route if prefix length is 0. */
4428 	if (!prefixlen)
4429 		cfg.fc_flags |= RTF_DEFAULT;
4430 
4431 	ip6_route_add(&cfg, GFP_ATOMIC, NULL);
4432 
4433 	return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
4434 }
4435 #endif
4436 
rt6_get_dflt_router(struct net * net,const struct in6_addr * addr,struct net_device * dev)4437 struct fib6_info *rt6_get_dflt_router(struct net *net,
4438 				     const struct in6_addr *addr,
4439 				     struct net_device *dev)
4440 {
4441 	u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
4442 	struct fib6_info *rt;
4443 	struct fib6_table *table;
4444 
4445 	table = fib6_get_table(net, tb_id);
4446 	if (!table)
4447 		return NULL;
4448 
4449 	rcu_read_lock();
4450 	for_each_fib6_node_rt_rcu(&table->tb6_root) {
4451 		struct fib6_nh *nh;
4452 
4453 		/* RA routes do not use nexthops */
4454 		if (rt->nh)
4455 			continue;
4456 
4457 		nh = rt->fib6_nh;
4458 		if (dev == nh->fib_nh_dev &&
4459 		    ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
4460 		    ipv6_addr_equal(&nh->fib_nh_gw6, addr))
4461 			break;
4462 	}
4463 	if (rt && !fib6_info_hold_safe(rt))
4464 		rt = NULL;
4465 	rcu_read_unlock();
4466 	return rt;
4467 }
4468 
rt6_add_dflt_router(struct net * net,const struct in6_addr * gwaddr,struct net_device * dev,unsigned int pref,u32 defrtr_usr_metric,int lifetime)4469 struct fib6_info *rt6_add_dflt_router(struct net *net,
4470 				     const struct in6_addr *gwaddr,
4471 				     struct net_device *dev,
4472 				     unsigned int pref,
4473 				     u32 defrtr_usr_metric,
4474 				     int lifetime)
4475 {
4476 	struct fib6_config cfg = {
4477 		.fc_table	= l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
4478 		.fc_metric	= defrtr_usr_metric,
4479 		.fc_ifindex	= dev->ifindex,
4480 		.fc_flags	= RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
4481 				  RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
4482 		.fc_protocol = RTPROT_RA,
4483 		.fc_type = RTN_UNICAST,
4484 		.fc_nlinfo.portid = 0,
4485 		.fc_nlinfo.nlh = NULL,
4486 		.fc_nlinfo.nl_net = net,
4487 		.fc_expires = jiffies_to_clock_t(lifetime * HZ),
4488 	};
4489 
4490 	cfg.fc_gateway = *gwaddr;
4491 
4492 	if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
4493 		struct fib6_table *table;
4494 
4495 		table = fib6_get_table(dev_net(dev), cfg.fc_table);
4496 		if (table)
4497 			table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
4498 	}
4499 
4500 	return rt6_get_dflt_router(net, gwaddr, dev);
4501 }
4502 
__rt6_purge_dflt_routers(struct net * net,struct fib6_table * table)4503 static void __rt6_purge_dflt_routers(struct net *net,
4504 				     struct fib6_table *table)
4505 {
4506 	struct fib6_info *rt;
4507 
4508 restart:
4509 	rcu_read_lock();
4510 	for_each_fib6_node_rt_rcu(&table->tb6_root) {
4511 		struct net_device *dev = fib6_info_nh_dev(rt);
4512 		struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
4513 
4514 		if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
4515 		    (!idev || idev->cnf.accept_ra != 2) &&
4516 		    fib6_info_hold_safe(rt)) {
4517 			rcu_read_unlock();
4518 			ip6_del_rt(net, rt, false);
4519 			goto restart;
4520 		}
4521 	}
4522 	rcu_read_unlock();
4523 
4524 	table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
4525 }
4526 
rt6_purge_dflt_routers(struct net * net)4527 void rt6_purge_dflt_routers(struct net *net)
4528 {
4529 	struct fib6_table *table;
4530 	struct hlist_head *head;
4531 	unsigned int h;
4532 
4533 	rcu_read_lock();
4534 
4535 	for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
4536 		head = &net->ipv6.fib_table_hash[h];
4537 		hlist_for_each_entry_rcu(table, head, tb6_hlist) {
4538 			if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
4539 				__rt6_purge_dflt_routers(net, table);
4540 		}
4541 	}
4542 
4543 	rcu_read_unlock();
4544 }
4545 
rtmsg_to_fib6_config(struct net * net,struct in6_rtmsg * rtmsg,struct fib6_config * cfg)4546 static void rtmsg_to_fib6_config(struct net *net,
4547 				 struct in6_rtmsg *rtmsg,
4548 				 struct fib6_config *cfg)
4549 {
4550 	*cfg = (struct fib6_config){
4551 		.fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
4552 			 : RT6_TABLE_MAIN,
4553 		.fc_ifindex = rtmsg->rtmsg_ifindex,
4554 		.fc_metric = rtmsg->rtmsg_metric,
4555 		.fc_expires = rtmsg->rtmsg_info,
4556 		.fc_dst_len = rtmsg->rtmsg_dst_len,
4557 		.fc_src_len = rtmsg->rtmsg_src_len,
4558 		.fc_flags = rtmsg->rtmsg_flags,
4559 		.fc_type = rtmsg->rtmsg_type,
4560 
4561 		.fc_nlinfo.nl_net = net,
4562 
4563 		.fc_dst = rtmsg->rtmsg_dst,
4564 		.fc_src = rtmsg->rtmsg_src,
4565 		.fc_gateway = rtmsg->rtmsg_gateway,
4566 	};
4567 }
4568 
ipv6_route_ioctl(struct net * net,unsigned int cmd,struct in6_rtmsg * rtmsg)4569 int ipv6_route_ioctl(struct net *net, unsigned int cmd, struct in6_rtmsg *rtmsg)
4570 {
4571 	struct fib6_config cfg;
4572 	int err;
4573 
4574 	if (cmd != SIOCADDRT && cmd != SIOCDELRT)
4575 		return -EINVAL;
4576 	if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
4577 		return -EPERM;
4578 
4579 	rtmsg_to_fib6_config(net, rtmsg, &cfg);
4580 
4581 	switch (cmd) {
4582 	case SIOCADDRT:
4583 		/* Only do the default setting of fc_metric in route adding */
4584 		if (cfg.fc_metric == 0)
4585 			cfg.fc_metric = IP6_RT_PRIO_USER;
4586 		err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
4587 		break;
4588 	case SIOCDELRT:
4589 		err = ip6_route_del(&cfg, NULL);
4590 		break;
4591 	}
4592 
4593 	return err;
4594 }
4595 
4596 /*
4597  *	Drop the packet on the floor
4598  */
4599 
ip6_pkt_drop(struct sk_buff * skb,u8 code,int ipstats_mib_noroutes)4600 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
4601 {
4602 	struct dst_entry *dst = skb_dst(skb);
4603 	struct net_device *dev = dst_dev(dst);
4604 	struct net *net = dev_net(dev);
4605 	struct inet6_dev *idev;
4606 	SKB_DR(reason);
4607 	int type;
4608 
4609 	if (netif_is_l3_master(skb->dev) ||
4610 	    dev == net->loopback_dev)
4611 		idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
4612 	else
4613 		idev = ip6_dst_idev(dst);
4614 
4615 	switch (ipstats_mib_noroutes) {
4616 	case IPSTATS_MIB_INNOROUTES:
4617 		type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
4618 		if (type == IPV6_ADDR_ANY) {
4619 			SKB_DR_SET(reason, IP_INADDRERRORS);
4620 			IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
4621 			break;
4622 		}
4623 		SKB_DR_SET(reason, IP_INNOROUTES);
4624 		fallthrough;
4625 	case IPSTATS_MIB_OUTNOROUTES:
4626 		SKB_DR_OR(reason, IP_OUTNOROUTES);
4627 		IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
4628 		break;
4629 	}
4630 
4631 	/* Start over by dropping the dst for l3mdev case */
4632 	if (netif_is_l3_master(skb->dev))
4633 		skb_dst_drop(skb);
4634 
4635 	icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
4636 	kfree_skb_reason(skb, reason);
4637 	return 0;
4638 }
4639 
ip6_pkt_discard(struct sk_buff * skb)4640 static int ip6_pkt_discard(struct sk_buff *skb)
4641 {
4642 	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
4643 }
4644 
ip6_pkt_discard_out(struct net * net,struct sock * sk,struct sk_buff * skb)4645 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4646 {
4647 	skb->dev = skb_dst_dev(skb);
4648 	return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
4649 }
4650 
ip6_pkt_prohibit(struct sk_buff * skb)4651 static int ip6_pkt_prohibit(struct sk_buff *skb)
4652 {
4653 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
4654 }
4655 
ip6_pkt_prohibit_out(struct net * net,struct sock * sk,struct sk_buff * skb)4656 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
4657 {
4658 	skb->dev = skb_dst_dev(skb);
4659 	return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
4660 }
4661 
4662 /*
4663  *	Allocate a dst for local (unicast / anycast) address.
4664  */
4665 
addrconf_f6i_alloc(struct net * net,struct inet6_dev * idev,const struct in6_addr * addr,bool anycast,gfp_t gfp_flags,struct netlink_ext_ack * extack)4666 struct fib6_info *addrconf_f6i_alloc(struct net *net,
4667 				     struct inet6_dev *idev,
4668 				     const struct in6_addr *addr,
4669 				     bool anycast, gfp_t gfp_flags,
4670 				     struct netlink_ext_ack *extack)
4671 {
4672 	struct fib6_config cfg = {
4673 		.fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
4674 		.fc_ifindex = idev->dev->ifindex,
4675 		.fc_flags = RTF_UP | RTF_NONEXTHOP,
4676 		.fc_dst = *addr,
4677 		.fc_dst_len = 128,
4678 		.fc_protocol = RTPROT_KERNEL,
4679 		.fc_nlinfo.nl_net = net,
4680 		.fc_ignore_dev_down = true,
4681 	};
4682 	struct fib6_info *f6i;
4683 	int err;
4684 
4685 	if (anycast) {
4686 		cfg.fc_type = RTN_ANYCAST;
4687 		cfg.fc_flags |= RTF_ANYCAST;
4688 	} else {
4689 		cfg.fc_type = RTN_LOCAL;
4690 		cfg.fc_flags |= RTF_LOCAL;
4691 	}
4692 
4693 	f6i = ip6_route_info_create(&cfg, gfp_flags, extack);
4694 	if (IS_ERR(f6i))
4695 		return f6i;
4696 
4697 	err = ip6_route_info_create_nh(f6i, &cfg, gfp_flags, extack);
4698 	if (err)
4699 		return ERR_PTR(err);
4700 
4701 	f6i->dst_nocount = true;
4702 
4703 	if (!anycast &&
4704 	    (READ_ONCE(net->ipv6.devconf_all->disable_policy) ||
4705 	     READ_ONCE(idev->cnf.disable_policy)))
4706 		f6i->dst_nopolicy = true;
4707 
4708 	return f6i;
4709 }
4710 
4711 /* remove deleted ip from prefsrc entries */
4712 struct arg_dev_net_ip {
4713 	struct net *net;
4714 	struct in6_addr *addr;
4715 };
4716 
fib6_remove_prefsrc(struct fib6_info * rt,void * arg)4717 static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
4718 {
4719 	struct net *net = ((struct arg_dev_net_ip *)arg)->net;
4720 	struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
4721 
4722 	if (!rt->nh &&
4723 	    rt != net->ipv6.fib6_null_entry &&
4724 	    ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr) &&
4725 	    !ipv6_chk_addr(net, addr, rt->fib6_nh->fib_nh_dev, 0)) {
4726 		spin_lock_bh(&rt6_exception_lock);
4727 		/* remove prefsrc entry */
4728 		rt->fib6_prefsrc.plen = 0;
4729 		spin_unlock_bh(&rt6_exception_lock);
4730 	}
4731 	return 0;
4732 }
4733 
rt6_remove_prefsrc(struct inet6_ifaddr * ifp)4734 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
4735 {
4736 	struct net *net = dev_net(ifp->idev->dev);
4737 	struct arg_dev_net_ip adni = {
4738 		.net = net,
4739 		.addr = &ifp->addr,
4740 	};
4741 	fib6_clean_all(net, fib6_remove_prefsrc, &adni);
4742 }
4743 
4744 #define RTF_RA_ROUTER		(RTF_ADDRCONF | RTF_DEFAULT)
4745 
4746 /* Remove routers and update dst entries when gateway turn into host. */
fib6_clean_tohost(struct fib6_info * rt,void * arg)4747 static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
4748 {
4749 	struct in6_addr *gateway = (struct in6_addr *)arg;
4750 	struct fib6_nh *nh;
4751 
4752 	/* RA routes do not use nexthops */
4753 	if (rt->nh)
4754 		return 0;
4755 
4756 	nh = rt->fib6_nh;
4757 	if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
4758 	    nh->fib_nh_gw_family && ipv6_addr_equal(gateway, &nh->fib_nh_gw6))
4759 		return -1;
4760 
4761 	/* Further clean up cached routes in exception table.
4762 	 * This is needed because cached route may have a different
4763 	 * gateway than its 'parent' in the case of an ip redirect.
4764 	 */
4765 	fib6_nh_exceptions_clean_tohost(nh, gateway);
4766 
4767 	return 0;
4768 }
4769 
rt6_clean_tohost(struct net * net,struct in6_addr * gateway)4770 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
4771 {
4772 	fib6_clean_all(net, fib6_clean_tohost, gateway);
4773 }
4774 
4775 struct arg_netdev_event {
4776 	const struct net_device *dev;
4777 	union {
4778 		unsigned char nh_flags;
4779 		unsigned long event;
4780 	};
4781 };
4782 
rt6_multipath_first_sibling(const struct fib6_info * rt)4783 static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
4784 {
4785 	struct fib6_info *iter;
4786 	struct fib6_node *fn;
4787 
4788 	fn = rcu_dereference_protected(rt->fib6_node,
4789 			lockdep_is_held(&rt->fib6_table->tb6_lock));
4790 	iter = rcu_dereference_protected(fn->leaf,
4791 			lockdep_is_held(&rt->fib6_table->tb6_lock));
4792 	while (iter) {
4793 		if (iter->fib6_metric == rt->fib6_metric &&
4794 		    rt6_qualify_for_ecmp(iter))
4795 			return iter;
4796 		iter = rcu_dereference_protected(iter->fib6_next,
4797 				lockdep_is_held(&rt->fib6_table->tb6_lock));
4798 	}
4799 
4800 	return NULL;
4801 }
4802 
4803 /* only called for fib entries with builtin fib6_nh */
rt6_is_dead(const struct fib6_info * rt)4804 static bool rt6_is_dead(const struct fib6_info *rt)
4805 {
4806 	if (rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD ||
4807 	    (rt->fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN &&
4808 	     ip6_ignore_linkdown(rt->fib6_nh->fib_nh_dev)))
4809 		return true;
4810 
4811 	return false;
4812 }
4813 
rt6_multipath_total_weight(const struct fib6_info * rt)4814 static int rt6_multipath_total_weight(const struct fib6_info *rt)
4815 {
4816 	struct fib6_info *iter;
4817 	int total = 0;
4818 
4819 	if (!rt6_is_dead(rt))
4820 		total += rt->fib6_nh->fib_nh_weight;
4821 
4822 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
4823 		if (!rt6_is_dead(iter))
4824 			total += iter->fib6_nh->fib_nh_weight;
4825 	}
4826 
4827 	return total;
4828 }
4829 
rt6_upper_bound_set(struct fib6_info * rt,int * weight,int total)4830 static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
4831 {
4832 	int upper_bound = -1;
4833 
4834 	if (!rt6_is_dead(rt)) {
4835 		*weight += rt->fib6_nh->fib_nh_weight;
4836 		upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
4837 						    total) - 1;
4838 	}
4839 	atomic_set(&rt->fib6_nh->fib_nh_upper_bound, upper_bound);
4840 }
4841 
rt6_multipath_upper_bound_set(struct fib6_info * rt,int total)4842 static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
4843 {
4844 	struct fib6_info *iter;
4845 	int weight = 0;
4846 
4847 	rt6_upper_bound_set(rt, &weight, total);
4848 
4849 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4850 		rt6_upper_bound_set(iter, &weight, total);
4851 }
4852 
rt6_multipath_rebalance(struct fib6_info * rt)4853 void rt6_multipath_rebalance(struct fib6_info *rt)
4854 {
4855 	struct fib6_info *first;
4856 	int total;
4857 
4858 	/* In case the entire multipath route was marked for flushing,
4859 	 * then there is no need to rebalance upon the removal of every
4860 	 * sibling route.
4861 	 */
4862 	if (!rt->fib6_nsiblings || rt->should_flush)
4863 		return;
4864 
4865 	/* During lookup routes are evaluated in order, so we need to
4866 	 * make sure upper bounds are assigned from the first sibling
4867 	 * onwards.
4868 	 */
4869 	first = rt6_multipath_first_sibling(rt);
4870 	if (WARN_ON_ONCE(!first))
4871 		return;
4872 
4873 	total = rt6_multipath_total_weight(first);
4874 	rt6_multipath_upper_bound_set(first, total);
4875 }
4876 
fib6_ifup(struct fib6_info * rt,void * p_arg)4877 static int fib6_ifup(struct fib6_info *rt, void *p_arg)
4878 {
4879 	const struct arg_netdev_event *arg = p_arg;
4880 	struct net *net = dev_net(arg->dev);
4881 
4882 	if (rt != net->ipv6.fib6_null_entry && !rt->nh &&
4883 	    rt->fib6_nh->fib_nh_dev == arg->dev) {
4884 		rt->fib6_nh->fib_nh_flags &= ~arg->nh_flags;
4885 		fib6_update_sernum_upto_root(net, rt);
4886 		rt6_multipath_rebalance(rt);
4887 	}
4888 
4889 	return 0;
4890 }
4891 
rt6_sync_up(struct net_device * dev,unsigned char nh_flags)4892 void rt6_sync_up(struct net_device *dev, unsigned char nh_flags)
4893 {
4894 	struct arg_netdev_event arg = {
4895 		.dev = dev,
4896 		{
4897 			.nh_flags = nh_flags,
4898 		},
4899 	};
4900 
4901 	if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
4902 		arg.nh_flags |= RTNH_F_LINKDOWN;
4903 
4904 	fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
4905 }
4906 
4907 /* only called for fib entries with inline fib6_nh */
rt6_multipath_uses_dev(const struct fib6_info * rt,const struct net_device * dev)4908 static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
4909 				   const struct net_device *dev)
4910 {
4911 	struct fib6_info *iter;
4912 
4913 	if (rt->fib6_nh->fib_nh_dev == dev)
4914 		return true;
4915 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4916 		if (iter->fib6_nh->fib_nh_dev == dev)
4917 			return true;
4918 
4919 	return false;
4920 }
4921 
rt6_multipath_flush(struct fib6_info * rt)4922 static void rt6_multipath_flush(struct fib6_info *rt)
4923 {
4924 	struct fib6_info *iter;
4925 
4926 	rt->should_flush = 1;
4927 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4928 		iter->should_flush = 1;
4929 }
4930 
rt6_multipath_dead_count(const struct fib6_info * rt,const struct net_device * down_dev)4931 static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
4932 					     const struct net_device *down_dev)
4933 {
4934 	struct fib6_info *iter;
4935 	unsigned int dead = 0;
4936 
4937 	if (rt->fib6_nh->fib_nh_dev == down_dev ||
4938 	    rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4939 		dead++;
4940 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4941 		if (iter->fib6_nh->fib_nh_dev == down_dev ||
4942 		    iter->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
4943 			dead++;
4944 
4945 	return dead;
4946 }
4947 
rt6_multipath_nh_flags_set(struct fib6_info * rt,const struct net_device * dev,unsigned char nh_flags)4948 static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
4949 				       const struct net_device *dev,
4950 				       unsigned char nh_flags)
4951 {
4952 	struct fib6_info *iter;
4953 
4954 	if (rt->fib6_nh->fib_nh_dev == dev)
4955 		rt->fib6_nh->fib_nh_flags |= nh_flags;
4956 	list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
4957 		if (iter->fib6_nh->fib_nh_dev == dev)
4958 			iter->fib6_nh->fib_nh_flags |= nh_flags;
4959 }
4960 
4961 /* called with write lock held for table with rt */
fib6_ifdown(struct fib6_info * rt,void * p_arg)4962 static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
4963 {
4964 	const struct arg_netdev_event *arg = p_arg;
4965 	const struct net_device *dev = arg->dev;
4966 	struct net *net = dev_net(dev);
4967 
4968 	if (rt == net->ipv6.fib6_null_entry || rt->nh)
4969 		return 0;
4970 
4971 	switch (arg->event) {
4972 	case NETDEV_UNREGISTER:
4973 		return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4974 	case NETDEV_DOWN:
4975 		if (rt->should_flush)
4976 			return -1;
4977 		if (!rt->fib6_nsiblings)
4978 			return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
4979 		if (rt6_multipath_uses_dev(rt, dev)) {
4980 			unsigned int count;
4981 
4982 			count = rt6_multipath_dead_count(rt, dev);
4983 			if (rt->fib6_nsiblings + 1 == count) {
4984 				rt6_multipath_flush(rt);
4985 				return -1;
4986 			}
4987 			rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
4988 						   RTNH_F_LINKDOWN);
4989 			fib6_update_sernum(net, rt);
4990 			rt6_multipath_rebalance(rt);
4991 		}
4992 		return -2;
4993 	case NETDEV_CHANGE:
4994 		if (rt->fib6_nh->fib_nh_dev != dev ||
4995 		    rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
4996 			break;
4997 		rt->fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
4998 		rt6_multipath_rebalance(rt);
4999 		break;
5000 	}
5001 
5002 	return 0;
5003 }
5004 
rt6_sync_down_dev(struct net_device * dev,unsigned long event)5005 void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
5006 {
5007 	struct arg_netdev_event arg = {
5008 		.dev = dev,
5009 		{
5010 			.event = event,
5011 		},
5012 	};
5013 	struct net *net = dev_net(dev);
5014 
5015 	if (READ_ONCE(net->ipv6.sysctl.skip_notify_on_dev_down))
5016 		fib6_clean_all_skip_notify(net, fib6_ifdown, &arg);
5017 	else
5018 		fib6_clean_all(net, fib6_ifdown, &arg);
5019 }
5020 
rt6_disable_ip(struct net_device * dev,unsigned long event)5021 void rt6_disable_ip(struct net_device *dev, unsigned long event)
5022 {
5023 	rt6_sync_down_dev(dev, event);
5024 	rt6_uncached_list_flush_dev(dev);
5025 	neigh_ifdown(&nd_tbl, dev);
5026 }
5027 
5028 struct rt6_mtu_change_arg {
5029 	struct net_device *dev;
5030 	unsigned int mtu;
5031 	struct fib6_info *f6i;
5032 };
5033 
fib6_nh_mtu_change(struct fib6_nh * nh,void * _arg)5034 static int fib6_nh_mtu_change(struct fib6_nh *nh, void *_arg)
5035 {
5036 	struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *)_arg;
5037 	struct fib6_info *f6i = arg->f6i;
5038 
5039 	/* For administrative MTU increase, there is no way to discover
5040 	 * IPv6 PMTU increase, so PMTU increase should be updated here.
5041 	 * Since RFC 1981 doesn't include administrative MTU increase
5042 	 * update PMTU increase is a MUST. (i.e. jumbo frame)
5043 	 */
5044 	if (nh->fib_nh_dev == arg->dev) {
5045 		struct inet6_dev *idev = __in6_dev_get(arg->dev);
5046 		u32 mtu = f6i->fib6_pmtu;
5047 
5048 		if (mtu >= arg->mtu ||
5049 		    (mtu < arg->mtu && mtu == idev->cnf.mtu6))
5050 			fib6_metric_set(f6i, RTAX_MTU, arg->mtu);
5051 
5052 		spin_lock_bh(&rt6_exception_lock);
5053 		rt6_exceptions_update_pmtu(idev, nh, arg->mtu);
5054 		spin_unlock_bh(&rt6_exception_lock);
5055 	}
5056 
5057 	return 0;
5058 }
5059 
rt6_mtu_change_route(struct fib6_info * f6i,void * p_arg)5060 static int rt6_mtu_change_route(struct fib6_info *f6i, void *p_arg)
5061 {
5062 	struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
5063 	struct inet6_dev *idev;
5064 
5065 	/* In IPv6 pmtu discovery is not optional,
5066 	   so that RTAX_MTU lock cannot disable it.
5067 	   We still use this lock to block changes
5068 	   caused by addrconf/ndisc.
5069 	*/
5070 
5071 	idev = __in6_dev_get(arg->dev);
5072 	if (!idev)
5073 		return 0;
5074 
5075 	if (fib6_metric_locked(f6i, RTAX_MTU))
5076 		return 0;
5077 
5078 	arg->f6i = f6i;
5079 	if (f6i->nh) {
5080 		/* fib6_nh_mtu_change only returns 0, so this is safe */
5081 		return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_mtu_change,
5082 						arg);
5083 	}
5084 
5085 	return fib6_nh_mtu_change(f6i->fib6_nh, arg);
5086 }
5087 
rt6_mtu_change(struct net_device * dev,unsigned int mtu)5088 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
5089 {
5090 	struct rt6_mtu_change_arg arg = {
5091 		.dev = dev,
5092 		.mtu = mtu,
5093 	};
5094 
5095 	fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
5096 }
5097 
5098 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
5099 	[RTA_UNSPEC]		= { .strict_start_type = RTA_DPORT + 1 },
5100 	[RTA_GATEWAY]           = { .len = sizeof(struct in6_addr) },
5101 	[RTA_PREFSRC]		= { .len = sizeof(struct in6_addr) },
5102 	[RTA_OIF]               = { .type = NLA_U32 },
5103 	[RTA_IIF]		= { .type = NLA_U32 },
5104 	[RTA_PRIORITY]          = { .type = NLA_U32 },
5105 	[RTA_METRICS]           = { .type = NLA_NESTED },
5106 	[RTA_MULTIPATH]		= { .len = sizeof(struct rtnexthop) },
5107 	[RTA_PREF]              = { .type = NLA_U8 },
5108 	[RTA_ENCAP_TYPE]	= { .type = NLA_U16 },
5109 	[RTA_ENCAP]		= { .type = NLA_NESTED },
5110 	[RTA_EXPIRES]		= { .type = NLA_U32 },
5111 	[RTA_UID]		= { .type = NLA_U32 },
5112 	[RTA_MARK]		= { .type = NLA_U32 },
5113 	[RTA_TABLE]		= { .type = NLA_U32 },
5114 	[RTA_IP_PROTO]		= { .type = NLA_U8 },
5115 	[RTA_SPORT]		= { .type = NLA_U16 },
5116 	[RTA_DPORT]		= { .type = NLA_U16 },
5117 	[RTA_NH_ID]		= { .type = NLA_U32 },
5118 	[RTA_FLOWLABEL]		= { .type = NLA_BE32 },
5119 };
5120 
rtm_to_fib6_multipath_config(struct fib6_config * cfg,struct netlink_ext_ack * extack,bool newroute)5121 static int rtm_to_fib6_multipath_config(struct fib6_config *cfg,
5122 					struct netlink_ext_ack *extack,
5123 					bool newroute)
5124 {
5125 	struct rtnexthop *rtnh;
5126 	int remaining;
5127 
5128 	remaining = cfg->fc_mp_len;
5129 	rtnh = (struct rtnexthop *)cfg->fc_mp;
5130 
5131 	if (!rtnh_ok(rtnh, remaining)) {
5132 		NL_SET_ERR_MSG(extack, "Invalid nexthop configuration - no valid nexthops");
5133 		return -EINVAL;
5134 	}
5135 
5136 	do {
5137 		bool has_gateway = cfg->fc_flags & RTF_GATEWAY;
5138 		int attrlen = rtnh_attrlen(rtnh);
5139 
5140 		if (attrlen > 0) {
5141 			struct nlattr *nla, *attrs;
5142 
5143 			attrs = rtnh_attrs(rtnh);
5144 			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5145 			if (nla) {
5146 				if (nla_len(nla) < sizeof(cfg->fc_gateway)) {
5147 					NL_SET_ERR_MSG(extack,
5148 						       "Invalid IPv6 address in RTA_GATEWAY");
5149 					return -EINVAL;
5150 				}
5151 
5152 				has_gateway = true;
5153 			}
5154 		}
5155 
5156 		if (newroute && (cfg->fc_nh_id || !has_gateway)) {
5157 			NL_SET_ERR_MSG(extack,
5158 				       "Device only routes can not be added for IPv6 using the multipath API.");
5159 			return -EINVAL;
5160 		}
5161 
5162 		rtnh = rtnh_next(rtnh, &remaining);
5163 	} while (rtnh_ok(rtnh, remaining));
5164 
5165 	return lwtunnel_valid_encap_type_attr(cfg->fc_mp, cfg->fc_mp_len, extack);
5166 }
5167 
rtm_to_fib6_config(struct sk_buff * skb,struct nlmsghdr * nlh,struct fib6_config * cfg,struct netlink_ext_ack * extack)5168 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
5169 			      struct fib6_config *cfg,
5170 			      struct netlink_ext_ack *extack)
5171 {
5172 	bool newroute = nlh->nlmsg_type == RTM_NEWROUTE;
5173 	struct nlattr *tb[RTA_MAX+1];
5174 	struct rtmsg *rtm;
5175 	unsigned int pref;
5176 	int err;
5177 
5178 	err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
5179 				     rtm_ipv6_policy, extack);
5180 	if (err < 0)
5181 		goto errout;
5182 
5183 	err = -EINVAL;
5184 	rtm = nlmsg_data(nlh);
5185 
5186 	if (rtm->rtm_tos) {
5187 		NL_SET_ERR_MSG(extack,
5188 			       "Invalid dsfield (tos): option not available for IPv6");
5189 		goto errout;
5190 	}
5191 
5192 	if (tb[RTA_FLOWLABEL]) {
5193 		NL_SET_ERR_MSG_ATTR(extack, tb[RTA_FLOWLABEL],
5194 				    "Flow label cannot be specified for this operation");
5195 		goto errout;
5196 	}
5197 
5198 	*cfg = (struct fib6_config){
5199 		.fc_table = rtm->rtm_table,
5200 		.fc_dst_len = rtm->rtm_dst_len,
5201 		.fc_src_len = rtm->rtm_src_len,
5202 		.fc_flags = RTF_UP,
5203 		.fc_protocol = rtm->rtm_protocol,
5204 		.fc_type = rtm->rtm_type,
5205 
5206 		.fc_nlinfo.portid = NETLINK_CB(skb).portid,
5207 		.fc_nlinfo.nlh = nlh,
5208 		.fc_nlinfo.nl_net = sock_net(skb->sk),
5209 	};
5210 
5211 	if (rtm->rtm_type == RTN_UNREACHABLE ||
5212 	    rtm->rtm_type == RTN_BLACKHOLE ||
5213 	    rtm->rtm_type == RTN_PROHIBIT ||
5214 	    rtm->rtm_type == RTN_THROW)
5215 		cfg->fc_flags |= RTF_REJECT;
5216 
5217 	if (rtm->rtm_type == RTN_LOCAL)
5218 		cfg->fc_flags |= RTF_LOCAL;
5219 
5220 	if (rtm->rtm_flags & RTM_F_CLONED)
5221 		cfg->fc_flags |= RTF_CACHE;
5222 
5223 	cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
5224 
5225 	if (tb[RTA_NH_ID]) {
5226 		if (tb[RTA_GATEWAY]   || tb[RTA_OIF] ||
5227 		    tb[RTA_MULTIPATH] || tb[RTA_ENCAP]) {
5228 			NL_SET_ERR_MSG(extack,
5229 				       "Nexthop specification and nexthop id are mutually exclusive");
5230 			goto errout;
5231 		}
5232 		cfg->fc_nh_id = nla_get_u32(tb[RTA_NH_ID]);
5233 	}
5234 
5235 	if (tb[RTA_GATEWAY]) {
5236 		cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
5237 		cfg->fc_flags |= RTF_GATEWAY;
5238 	}
5239 	if (tb[RTA_VIA]) {
5240 		NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
5241 		goto errout;
5242 	}
5243 
5244 	if (tb[RTA_DST]) {
5245 		int plen = (rtm->rtm_dst_len + 7) >> 3;
5246 
5247 		if (nla_len(tb[RTA_DST]) < plen)
5248 			goto errout;
5249 
5250 		nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
5251 	}
5252 
5253 	if (tb[RTA_SRC]) {
5254 		int plen = (rtm->rtm_src_len + 7) >> 3;
5255 
5256 		if (nla_len(tb[RTA_SRC]) < plen)
5257 			goto errout;
5258 
5259 		nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
5260 	}
5261 
5262 	if (tb[RTA_PREFSRC])
5263 		cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
5264 
5265 	if (tb[RTA_OIF])
5266 		cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
5267 
5268 	if (tb[RTA_PRIORITY])
5269 		cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
5270 
5271 	if (tb[RTA_METRICS]) {
5272 		cfg->fc_mx = nla_data(tb[RTA_METRICS]);
5273 		cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
5274 	}
5275 
5276 	if (tb[RTA_TABLE])
5277 		cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
5278 
5279 	if (tb[RTA_MULTIPATH]) {
5280 		cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
5281 		cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
5282 
5283 		err = rtm_to_fib6_multipath_config(cfg, extack, newroute);
5284 		if (err < 0)
5285 			goto errout;
5286 	}
5287 
5288 	if (tb[RTA_PREF]) {
5289 		pref = nla_get_u8(tb[RTA_PREF]);
5290 		if (pref != ICMPV6_ROUTER_PREF_LOW &&
5291 		    pref != ICMPV6_ROUTER_PREF_HIGH)
5292 			pref = ICMPV6_ROUTER_PREF_MEDIUM;
5293 		cfg->fc_flags |= RTF_PREF(pref);
5294 	}
5295 
5296 	if (tb[RTA_ENCAP])
5297 		cfg->fc_encap = tb[RTA_ENCAP];
5298 
5299 	if (tb[RTA_ENCAP_TYPE]) {
5300 		cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
5301 
5302 		err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
5303 		if (err < 0)
5304 			goto errout;
5305 	}
5306 
5307 	if (tb[RTA_EXPIRES]) {
5308 		unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
5309 
5310 		if (addrconf_finite_timeout(timeout)) {
5311 			cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
5312 			cfg->fc_flags |= RTF_EXPIRES;
5313 		}
5314 	}
5315 
5316 	err = 0;
5317 errout:
5318 	return err;
5319 }
5320 
5321 struct rt6_nh {
5322 	struct fib6_info *fib6_info;
5323 	struct fib6_config r_cfg;
5324 	struct list_head list;
5325 };
5326 
ip6_route_info_append(struct list_head * rt6_nh_list,struct fib6_info * rt,struct fib6_config * r_cfg)5327 static int ip6_route_info_append(struct list_head *rt6_nh_list,
5328 				 struct fib6_info *rt,
5329 				 struct fib6_config *r_cfg)
5330 {
5331 	struct rt6_nh *nh;
5332 
5333 	list_for_each_entry(nh, rt6_nh_list, list) {
5334 		/* check if fib6_info already exists */
5335 		if (rt6_duplicate_nexthop(nh->fib6_info, rt))
5336 			return -EEXIST;
5337 	}
5338 
5339 	nh = kzalloc_obj(*nh);
5340 	if (!nh)
5341 		return -ENOMEM;
5342 
5343 	nh->fib6_info = rt;
5344 	memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
5345 	list_add_tail(&nh->list, rt6_nh_list);
5346 
5347 	return 0;
5348 }
5349 
ip6_route_mpath_notify(struct fib6_info * rt,struct fib6_info * rt_last,struct nl_info * info,__u16 nlflags)5350 static void ip6_route_mpath_notify(struct fib6_info *rt,
5351 				   struct fib6_info *rt_last,
5352 				   struct nl_info *info,
5353 				   __u16 nlflags)
5354 {
5355 	/* if this is an APPEND route, then rt points to the first route
5356 	 * inserted and rt_last points to last route inserted. Userspace
5357 	 * wants a consistent dump of the route which starts at the first
5358 	 * nexthop. Since sibling routes are always added at the end of
5359 	 * the list, find the first sibling of the last route appended
5360 	 */
5361 	rcu_read_lock();
5362 
5363 	if ((nlflags & NLM_F_APPEND) && rt_last &&
5364 	    READ_ONCE(rt_last->fib6_nsiblings)) {
5365 		rt = list_first_or_null_rcu(&rt_last->fib6_siblings,
5366 					    struct fib6_info,
5367 					    fib6_siblings);
5368 	}
5369 
5370 	if (rt)
5371 		inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
5372 
5373 	rcu_read_unlock();
5374 }
5375 
ip6_route_mpath_should_notify(const struct fib6_info * rt)5376 static bool ip6_route_mpath_should_notify(const struct fib6_info *rt)
5377 {
5378 	bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
5379 	bool should_notify = false;
5380 	struct fib6_info *leaf;
5381 	struct fib6_node *fn;
5382 
5383 	rcu_read_lock();
5384 	fn = rcu_dereference(rt->fib6_node);
5385 	if (!fn)
5386 		goto out;
5387 
5388 	leaf = rcu_dereference(fn->leaf);
5389 	if (!leaf)
5390 		goto out;
5391 
5392 	if (rt == leaf ||
5393 	    (rt_can_ecmp && rt->fib6_metric == leaf->fib6_metric &&
5394 	     rt6_qualify_for_ecmp(leaf)))
5395 		should_notify = true;
5396 out:
5397 	rcu_read_unlock();
5398 
5399 	return should_notify;
5400 }
5401 
ip6_route_multipath_add(struct fib6_config * cfg,struct netlink_ext_ack * extack)5402 static int ip6_route_multipath_add(struct fib6_config *cfg,
5403 				   struct netlink_ext_ack *extack)
5404 {
5405 	struct fib6_info *rt_notif = NULL, *rt_last = NULL;
5406 	struct nl_info *info = &cfg->fc_nlinfo;
5407 	struct rt6_nh *nh, *nh_safe;
5408 	struct fib6_config r_cfg;
5409 	struct rtnexthop *rtnh;
5410 	LIST_HEAD(rt6_nh_list);
5411 	struct rt6_nh *err_nh;
5412 	struct fib6_info *rt;
5413 	__u16 nlflags;
5414 	int remaining;
5415 	int attrlen;
5416 	int replace;
5417 	int nhn = 0;
5418 	int err;
5419 
5420 	err = fib6_config_validate(cfg, extack);
5421 	if (err)
5422 		return err;
5423 
5424 	replace = (cfg->fc_nlinfo.nlh &&
5425 		   (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
5426 
5427 	nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
5428 	if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
5429 		nlflags |= NLM_F_APPEND;
5430 
5431 	remaining = cfg->fc_mp_len;
5432 	rtnh = (struct rtnexthop *)cfg->fc_mp;
5433 
5434 	/* Parse a Multipath Entry and build a list (rt6_nh_list) of
5435 	 * fib6_info structs per nexthop
5436 	 */
5437 	while (rtnh_ok(rtnh, remaining)) {
5438 		memcpy(&r_cfg, cfg, sizeof(*cfg));
5439 		if (rtnh->rtnh_ifindex)
5440 			r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5441 
5442 		attrlen = rtnh_attrlen(rtnh);
5443 		if (attrlen > 0) {
5444 			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5445 
5446 			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5447 			if (nla) {
5448 				r_cfg.fc_gateway = nla_get_in6_addr(nla);
5449 				r_cfg.fc_flags |= RTF_GATEWAY;
5450 			}
5451 
5452 			r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
5453 			nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
5454 			if (nla)
5455 				r_cfg.fc_encap_type = nla_get_u16(nla);
5456 		}
5457 
5458 		r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
5459 		rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
5460 		if (IS_ERR(rt)) {
5461 			err = PTR_ERR(rt);
5462 			rt = NULL;
5463 			goto cleanup;
5464 		}
5465 
5466 		err = ip6_route_info_create_nh(rt, &r_cfg, GFP_KERNEL, extack);
5467 		if (err) {
5468 			rt = NULL;
5469 			goto cleanup;
5470 		}
5471 
5472 		rt->fib6_nh->fib_nh_weight = rtnh->rtnh_hops + 1;
5473 
5474 		err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
5475 		if (err) {
5476 			fib6_info_release(rt);
5477 			goto cleanup;
5478 		}
5479 
5480 		rtnh = rtnh_next(rtnh, &remaining);
5481 	}
5482 
5483 	/* for add and replace send one notification with all nexthops.
5484 	 * Skip the notification in fib6_add_rt2node and send one with
5485 	 * the full route when done
5486 	 */
5487 	info->skip_notify = 1;
5488 
5489 	/* For add and replace, send one notification with all nexthops. For
5490 	 * append, send one notification with all appended nexthops.
5491 	 */
5492 	info->skip_notify_kernel = 1;
5493 
5494 	err_nh = NULL;
5495 	list_for_each_entry(nh, &rt6_nh_list, list) {
5496 		err = __ip6_ins_rt(nh->fib6_info, info, extack);
5497 
5498 		if (err) {
5499 			if (replace && nhn)
5500 				NL_SET_ERR_MSG_MOD(extack,
5501 						   "multipath route replace failed (check consistency of installed routes)");
5502 			err_nh = nh;
5503 			goto add_errout;
5504 		}
5505 		/* save reference to last route successfully inserted */
5506 		rt_last = nh->fib6_info;
5507 
5508 		/* save reference to first route for notification */
5509 		if (!rt_notif)
5510 			rt_notif = nh->fib6_info;
5511 
5512 		/* Because each route is added like a single route we remove
5513 		 * these flags after the first nexthop: if there is a collision,
5514 		 * we have already failed to add the first nexthop:
5515 		 * fib6_add_rt2node() has rejected it; when replacing, old
5516 		 * nexthops have been replaced by first new, the rest should
5517 		 * be added to it.
5518 		 */
5519 		if (cfg->fc_nlinfo.nlh) {
5520 			cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
5521 							     NLM_F_REPLACE);
5522 			cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
5523 		}
5524 		nhn++;
5525 	}
5526 
5527 	/* An in-kernel notification should only be sent in case the new
5528 	 * multipath route is added as the first route in the node, or if
5529 	 * it was appended to it. We pass 'rt_notif' since it is the first
5530 	 * sibling and might allow us to skip some checks in the replace case.
5531 	 */
5532 	if (ip6_route_mpath_should_notify(rt_notif)) {
5533 		enum fib_event_type fib_event;
5534 
5535 		if (rt_notif->fib6_nsiblings != nhn - 1)
5536 			fib_event = FIB_EVENT_ENTRY_APPEND;
5537 		else
5538 			fib_event = FIB_EVENT_ENTRY_REPLACE;
5539 
5540 		err = call_fib6_multipath_entry_notifiers(info->nl_net,
5541 							  fib_event, rt_notif,
5542 							  nhn - 1, extack);
5543 		if (err) {
5544 			/* Delete all the siblings that were just added */
5545 			err_nh = NULL;
5546 			goto add_errout;
5547 		}
5548 	}
5549 
5550 	/* success ... tell user about new route */
5551 	ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5552 	goto cleanup;
5553 
5554 add_errout:
5555 	/* send notification for routes that were added so that
5556 	 * the delete notifications sent by ip6_route_del are
5557 	 * coherent
5558 	 */
5559 	if (rt_notif)
5560 		ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5561 
5562 	/* Delete routes that were already added */
5563 	list_for_each_entry(nh, &rt6_nh_list, list) {
5564 		if (err_nh == nh)
5565 			break;
5566 		ip6_route_del(&nh->r_cfg, extack);
5567 	}
5568 
5569 cleanup:
5570 	list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, list) {
5571 		fib6_info_release(nh->fib6_info);
5572 		list_del(&nh->list);
5573 		kfree(nh);
5574 	}
5575 
5576 	return err;
5577 }
5578 
ip6_route_multipath_del(struct fib6_config * cfg,struct netlink_ext_ack * extack)5579 static int ip6_route_multipath_del(struct fib6_config *cfg,
5580 				   struct netlink_ext_ack *extack)
5581 {
5582 	struct fib6_config r_cfg;
5583 	struct rtnexthop *rtnh;
5584 	int last_err = 0;
5585 	int remaining;
5586 	int attrlen;
5587 	int err;
5588 
5589 	remaining = cfg->fc_mp_len;
5590 	rtnh = (struct rtnexthop *)cfg->fc_mp;
5591 
5592 	/* Parse a Multipath Entry */
5593 	while (rtnh_ok(rtnh, remaining)) {
5594 		memcpy(&r_cfg, cfg, sizeof(*cfg));
5595 		if (rtnh->rtnh_ifindex)
5596 			r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5597 
5598 		attrlen = rtnh_attrlen(rtnh);
5599 		if (attrlen > 0) {
5600 			struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5601 
5602 			nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5603 			if (nla) {
5604 				r_cfg.fc_gateway = nla_get_in6_addr(nla);
5605 				r_cfg.fc_flags |= RTF_GATEWAY;
5606 			}
5607 		}
5608 
5609 		err = ip6_route_del(&r_cfg, extack);
5610 		if (err)
5611 			last_err = err;
5612 
5613 		rtnh = rtnh_next(rtnh, &remaining);
5614 	}
5615 
5616 	return last_err;
5617 }
5618 
inet6_rtm_delroute(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)5619 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5620 			      struct netlink_ext_ack *extack)
5621 {
5622 	struct fib6_config cfg;
5623 	int err;
5624 
5625 	err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5626 	if (err < 0)
5627 		return err;
5628 
5629 	if (cfg.fc_nh_id) {
5630 		rcu_read_lock();
5631 		err = !nexthop_find_by_id(sock_net(skb->sk), cfg.fc_nh_id);
5632 		rcu_read_unlock();
5633 
5634 		if (err) {
5635 			NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
5636 			return -EINVAL;
5637 		}
5638 	}
5639 
5640 	if (cfg.fc_mp) {
5641 		return ip6_route_multipath_del(&cfg, extack);
5642 	} else {
5643 		cfg.fc_delete_all_nh = 1;
5644 		return ip6_route_del(&cfg, extack);
5645 	}
5646 }
5647 
inet6_rtm_newroute(struct sk_buff * skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)5648 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5649 			      struct netlink_ext_ack *extack)
5650 {
5651 	struct fib6_config cfg;
5652 	int err;
5653 
5654 	err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
5655 	if (err < 0)
5656 		return err;
5657 
5658 	if (cfg.fc_metric == 0)
5659 		cfg.fc_metric = IP6_RT_PRIO_USER;
5660 
5661 	if (cfg.fc_mp)
5662 		return ip6_route_multipath_add(&cfg, extack);
5663 	else
5664 		return ip6_route_add(&cfg, GFP_KERNEL, extack);
5665 }
5666 
5667 /* add the overhead of this fib6_nh to nexthop_len */
rt6_nh_nlmsg_size(struct fib6_nh * nh,void * arg)5668 static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg)
5669 {
5670 	int *nexthop_len = arg;
5671 
5672 	*nexthop_len += nla_total_size(0)	 /* RTA_MULTIPATH */
5673 		     + NLA_ALIGN(sizeof(struct rtnexthop))
5674 		     + nla_total_size(16); /* RTA_GATEWAY */
5675 
5676 	if (nh->fib_nh_lws) {
5677 		/* RTA_ENCAP_TYPE */
5678 		*nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5679 		/* RTA_ENCAP */
5680 		*nexthop_len += nla_total_size(2);
5681 	}
5682 
5683 	return 0;
5684 }
5685 
rt6_nlmsg_size(struct fib6_info * f6i)5686 static size_t rt6_nlmsg_size(struct fib6_info *f6i)
5687 {
5688 	struct fib6_info *sibling;
5689 	struct fib6_nh *nh;
5690 	int nexthop_len;
5691 
5692 	if (f6i->nh) {
5693 		nexthop_len = nla_total_size(4); /* RTA_NH_ID */
5694 		nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
5695 					 &nexthop_len);
5696 		goto common;
5697 	}
5698 
5699 	rcu_read_lock();
5700 retry:
5701 	nh = f6i->fib6_nh;
5702 	nexthop_len = 0;
5703 	if (READ_ONCE(f6i->fib6_nsiblings)) {
5704 		rt6_nh_nlmsg_size(nh, &nexthop_len);
5705 
5706 		list_for_each_entry_rcu(sibling, &f6i->fib6_siblings,
5707 					fib6_siblings) {
5708 			rt6_nh_nlmsg_size(sibling->fib6_nh, &nexthop_len);
5709 			if (!READ_ONCE(f6i->fib6_nsiblings))
5710 				goto retry;
5711 		}
5712 	}
5713 	rcu_read_unlock();
5714 	nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5715 common:
5716 	return NLMSG_ALIGN(sizeof(struct rtmsg))
5717 	       + nla_total_size(16) /* RTA_SRC */
5718 	       + nla_total_size(16) /* RTA_DST */
5719 	       + nla_total_size(16) /* RTA_GATEWAY */
5720 	       + nla_total_size(16) /* RTA_PREFSRC */
5721 	       + nla_total_size(4) /* RTA_TABLE */
5722 	       + nla_total_size(4) /* RTA_IIF */
5723 	       + nla_total_size(4) /* RTA_OIF */
5724 	       + nla_total_size(4) /* RTA_PRIORITY */
5725 	       + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
5726 	       + nla_total_size(sizeof(struct rta_cacheinfo))
5727 	       + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
5728 	       + nla_total_size(1) /* RTA_PREF */
5729 	       + nexthop_len;
5730 }
5731 
rt6_fill_node_nexthop(struct sk_buff * skb,struct nexthop * nh,unsigned char * flags)5732 static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
5733 				 unsigned char *flags)
5734 {
5735 	if (nexthop_is_multipath(nh)) {
5736 		struct nlattr *mp;
5737 
5738 		mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5739 		if (!mp)
5740 			goto nla_put_failure;
5741 
5742 		if (nexthop_mpath_fill_node(skb, nh, AF_INET6))
5743 			goto nla_put_failure;
5744 
5745 		nla_nest_end(skb, mp);
5746 	} else {
5747 		struct fib6_nh *fib6_nh;
5748 
5749 		fib6_nh = nexthop_fib6_nh(nh);
5750 		if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6,
5751 				     flags, false) < 0)
5752 			goto nla_put_failure;
5753 	}
5754 
5755 	return 0;
5756 
5757 nla_put_failure:
5758 	return -EMSGSIZE;
5759 }
5760 
rt6_fill_node(struct net * net,struct sk_buff * skb,struct fib6_info * rt,struct dst_entry * dst,struct in6_addr * dest,struct in6_addr * src,int iif,int type,u32 portid,u32 seq,unsigned int flags)5761 static int rt6_fill_node(struct net *net, struct sk_buff *skb,
5762 			 struct fib6_info *rt, struct dst_entry *dst,
5763 			 struct in6_addr *dest, struct in6_addr *src,
5764 			 int iif, int type, u32 portid, u32 seq,
5765 			 unsigned int flags)
5766 {
5767 	struct rt6_info *rt6 = dst_rt6_info(dst);
5768 	struct rt6key *rt6_dst, *rt6_src;
5769 	u32 *pmetrics, table, rt6_flags;
5770 	unsigned char nh_flags = 0;
5771 	struct nlmsghdr *nlh;
5772 	struct rtmsg *rtm;
5773 	long expires = 0;
5774 
5775 	nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
5776 	if (!nlh)
5777 		return -EMSGSIZE;
5778 
5779 	if (rt6) {
5780 		rt6_dst = &rt6->rt6i_dst;
5781 		rt6_src = &rt6->rt6i_src;
5782 		rt6_flags = rt6->rt6i_flags;
5783 	} else {
5784 		rt6_dst = &rt->fib6_dst;
5785 		rt6_src = &rt->fib6_src;
5786 		rt6_flags = rt->fib6_flags;
5787 	}
5788 
5789 	rtm = nlmsg_data(nlh);
5790 	rtm->rtm_family = AF_INET6;
5791 	rtm->rtm_dst_len = rt6_dst->plen;
5792 	rtm->rtm_src_len = rt6_src->plen;
5793 	rtm->rtm_tos = 0;
5794 	if (rt->fib6_table)
5795 		table = rt->fib6_table->tb6_id;
5796 	else
5797 		table = RT6_TABLE_UNSPEC;
5798 	rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
5799 	if (nla_put_u32(skb, RTA_TABLE, table))
5800 		goto nla_put_failure;
5801 
5802 	rtm->rtm_type = rt->fib6_type;
5803 	rtm->rtm_flags = 0;
5804 	rtm->rtm_scope = RT_SCOPE_UNIVERSE;
5805 	rtm->rtm_protocol = rt->fib6_protocol;
5806 
5807 	if (rt6_flags & RTF_CACHE)
5808 		rtm->rtm_flags |= RTM_F_CLONED;
5809 
5810 	if (dest) {
5811 		if (nla_put_in6_addr(skb, RTA_DST, dest))
5812 			goto nla_put_failure;
5813 		rtm->rtm_dst_len = 128;
5814 	} else if (rtm->rtm_dst_len)
5815 		if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
5816 			goto nla_put_failure;
5817 #ifdef CONFIG_IPV6_SUBTREES
5818 	if (src) {
5819 		if (nla_put_in6_addr(skb, RTA_SRC, src))
5820 			goto nla_put_failure;
5821 		rtm->rtm_src_len = 128;
5822 	} else if (rtm->rtm_src_len &&
5823 		   nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
5824 		goto nla_put_failure;
5825 #endif
5826 	if (iif) {
5827 #ifdef CONFIG_IPV6_MROUTE
5828 		if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
5829 			int err = ip6mr_get_route(net, skb, rtm, portid);
5830 
5831 			if (err == 0)
5832 				return 0;
5833 			if (err < 0)
5834 				goto nla_put_failure;
5835 		} else
5836 #endif
5837 			if (nla_put_u32(skb, RTA_IIF, iif))
5838 				goto nla_put_failure;
5839 	} else if (dest) {
5840 		struct in6_addr saddr_buf;
5841 		if (ip6_route_get_saddr(net, rt, dest, 0, 0, &saddr_buf) == 0 &&
5842 		    nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5843 			goto nla_put_failure;
5844 	}
5845 
5846 	if (rt->fib6_prefsrc.plen) {
5847 		struct in6_addr saddr_buf;
5848 		saddr_buf = rt->fib6_prefsrc.addr;
5849 		if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
5850 			goto nla_put_failure;
5851 	}
5852 
5853 	pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
5854 	if (rtnetlink_put_metrics(skb, pmetrics) < 0)
5855 		goto nla_put_failure;
5856 
5857 	if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
5858 		goto nla_put_failure;
5859 
5860 	/* For multipath routes, walk the siblings list and add
5861 	 * each as a nexthop within RTA_MULTIPATH.
5862 	 */
5863 	if (rt6) {
5864 		struct net_device *dev;
5865 
5866 		if (rt6_flags & RTF_GATEWAY &&
5867 		    nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
5868 			goto nla_put_failure;
5869 
5870 		dev = dst_dev(dst);
5871 		if (dev && nla_put_u32(skb, RTA_OIF, dev->ifindex))
5872 			goto nla_put_failure;
5873 
5874 		if (lwtunnel_fill_encap(skb, dst->lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
5875 			goto nla_put_failure;
5876 	} else if (READ_ONCE(rt->fib6_nsiblings)) {
5877 		struct fib6_info *sibling;
5878 		struct nlattr *mp;
5879 
5880 		mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
5881 		if (!mp)
5882 			goto nla_put_failure;
5883 
5884 		if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
5885 				    rt->fib6_nh->fib_nh_weight, AF_INET6,
5886 				    0) < 0)
5887 			goto nla_put_failure;
5888 
5889 		rcu_read_lock();
5890 
5891 		list_for_each_entry_rcu(sibling, &rt->fib6_siblings,
5892 					fib6_siblings) {
5893 			if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
5894 					    sibling->fib6_nh->fib_nh_weight,
5895 					    AF_INET6, 0) < 0) {
5896 				rcu_read_unlock();
5897 
5898 				goto nla_put_failure;
5899 			}
5900 		}
5901 
5902 		rcu_read_unlock();
5903 
5904 		nla_nest_end(skb, mp);
5905 	} else if (rt->nh) {
5906 		if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
5907 			goto nla_put_failure;
5908 
5909 		if (nexthop_is_blackhole(rt->nh))
5910 			rtm->rtm_type = RTN_BLACKHOLE;
5911 
5912 		if (READ_ONCE(net->ipv4.sysctl_nexthop_compat_mode) &&
5913 		    rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0)
5914 			goto nla_put_failure;
5915 
5916 		rtm->rtm_flags |= nh_flags;
5917 	} else {
5918 		if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6,
5919 				     &nh_flags, false) < 0)
5920 			goto nla_put_failure;
5921 
5922 		rtm->rtm_flags |= nh_flags;
5923 	}
5924 
5925 	if (rt6_flags & RTF_EXPIRES) {
5926 		expires = dst ? READ_ONCE(dst->expires) : rt->expires;
5927 		expires -= jiffies;
5928 	}
5929 
5930 	if (!dst) {
5931 		if (READ_ONCE(rt->offload))
5932 			rtm->rtm_flags |= RTM_F_OFFLOAD;
5933 		if (READ_ONCE(rt->trap))
5934 			rtm->rtm_flags |= RTM_F_TRAP;
5935 		if (READ_ONCE(rt->offload_failed))
5936 			rtm->rtm_flags |= RTM_F_OFFLOAD_FAILED;
5937 	}
5938 
5939 	if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
5940 		goto nla_put_failure;
5941 
5942 	if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
5943 		goto nla_put_failure;
5944 
5945 
5946 	nlmsg_end(skb, nlh);
5947 	return 0;
5948 
5949 nla_put_failure:
5950 	nlmsg_cancel(skb, nlh);
5951 	return -EMSGSIZE;
5952 }
5953 
fib6_info_nh_uses_dev(struct fib6_nh * nh,void * arg)5954 static int fib6_info_nh_uses_dev(struct fib6_nh *nh, void *arg)
5955 {
5956 	const struct net_device *dev = arg;
5957 
5958 	if (nh->fib_nh_dev == dev)
5959 		return 1;
5960 
5961 	return 0;
5962 }
5963 
fib6_info_uses_dev(const struct fib6_info * f6i,const struct net_device * dev)5964 static bool fib6_info_uses_dev(const struct fib6_info *f6i,
5965 			       const struct net_device *dev)
5966 {
5967 	if (f6i->nh) {
5968 		struct net_device *_dev = (struct net_device *)dev;
5969 
5970 		return !!nexthop_for_each_fib6_nh(f6i->nh,
5971 						  fib6_info_nh_uses_dev,
5972 						  _dev);
5973 	}
5974 
5975 	if (f6i->fib6_nh->fib_nh_dev == dev)
5976 		return true;
5977 
5978 	if (READ_ONCE(f6i->fib6_nsiblings)) {
5979 		const struct fib6_info *sibling;
5980 
5981 		rcu_read_lock();
5982 		list_for_each_entry_rcu(sibling, &f6i->fib6_siblings,
5983 					fib6_siblings) {
5984 			if (sibling->fib6_nh->fib_nh_dev == dev) {
5985 				rcu_read_unlock();
5986 				return true;
5987 			}
5988 			if (!READ_ONCE(f6i->fib6_nsiblings))
5989 				break;
5990 		}
5991 		rcu_read_unlock();
5992 	}
5993 	return false;
5994 }
5995 
5996 struct fib6_nh_exception_dump_walker {
5997 	struct rt6_rtnl_dump_arg *dump;
5998 	struct fib6_info *rt;
5999 	unsigned int flags;
6000 	unsigned int skip;
6001 	unsigned int count;
6002 };
6003 
rt6_nh_dump_exceptions(struct fib6_nh * nh,void * arg)6004 static int rt6_nh_dump_exceptions(struct fib6_nh *nh, void *arg)
6005 {
6006 	struct fib6_nh_exception_dump_walker *w = arg;
6007 	struct rt6_rtnl_dump_arg *dump = w->dump;
6008 	struct rt6_exception_bucket *bucket;
6009 	struct rt6_exception *rt6_ex;
6010 	int i, err;
6011 
6012 	bucket = fib6_nh_get_excptn_bucket(nh, NULL);
6013 	if (!bucket)
6014 		return 0;
6015 
6016 	for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
6017 		hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
6018 			if (w->skip) {
6019 				w->skip--;
6020 				continue;
6021 			}
6022 
6023 			/* Expiration of entries doesn't bump sernum, insertion
6024 			 * does. Removal is triggered by insertion, so we can
6025 			 * rely on the fact that if entries change between two
6026 			 * partial dumps, this node is scanned again completely,
6027 			 * see rt6_insert_exception() and fib6_dump_table().
6028 			 *
6029 			 * Count expired entries we go through as handled
6030 			 * entries that we'll skip next time, in case of partial
6031 			 * node dump. Otherwise, if entries expire meanwhile,
6032 			 * we'll skip the wrong amount.
6033 			 */
6034 			if (rt6_check_expired(rt6_ex->rt6i)) {
6035 				w->count++;
6036 				continue;
6037 			}
6038 
6039 			err = rt6_fill_node(dump->net, dump->skb, w->rt,
6040 					    &rt6_ex->rt6i->dst, NULL, NULL, 0,
6041 					    RTM_NEWROUTE,
6042 					    NETLINK_CB(dump->cb->skb).portid,
6043 					    dump->cb->nlh->nlmsg_seq, w->flags);
6044 			if (err)
6045 				return err;
6046 
6047 			w->count++;
6048 		}
6049 		bucket++;
6050 	}
6051 
6052 	return 0;
6053 }
6054 
6055 /* Return -1 if done with node, number of handled routes on partial dump */
rt6_dump_route(struct fib6_info * rt,void * p_arg,unsigned int skip)6056 int rt6_dump_route(struct fib6_info *rt, void *p_arg, unsigned int skip)
6057 {
6058 	struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
6059 	struct fib_dump_filter *filter = &arg->filter;
6060 	unsigned int flags = NLM_F_MULTI;
6061 	struct net *net = arg->net;
6062 	int count = 0;
6063 
6064 	if (rt == net->ipv6.fib6_null_entry)
6065 		return -1;
6066 
6067 	if ((filter->flags & RTM_F_PREFIX) &&
6068 	    !(rt->fib6_flags & RTF_PREFIX_RT)) {
6069 		/* success since this is not a prefix route */
6070 		return -1;
6071 	}
6072 	if (filter->filter_set &&
6073 	    ((filter->rt_type  && rt->fib6_type != filter->rt_type) ||
6074 	     (filter->dev      && !fib6_info_uses_dev(rt, filter->dev)) ||
6075 	     (filter->protocol && rt->fib6_protocol != filter->protocol))) {
6076 		return -1;
6077 	}
6078 
6079 	if (filter->filter_set ||
6080 	    !filter->dump_routes || !filter->dump_exceptions) {
6081 		flags |= NLM_F_DUMP_FILTERED;
6082 	}
6083 
6084 	if (filter->dump_routes) {
6085 		if (skip) {
6086 			skip--;
6087 		} else {
6088 			if (rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL,
6089 					  0, RTM_NEWROUTE,
6090 					  NETLINK_CB(arg->cb->skb).portid,
6091 					  arg->cb->nlh->nlmsg_seq, flags)) {
6092 				return 0;
6093 			}
6094 			count++;
6095 		}
6096 	}
6097 
6098 	if (filter->dump_exceptions) {
6099 		struct fib6_nh_exception_dump_walker w = { .dump = arg,
6100 							   .rt = rt,
6101 							   .flags = flags,
6102 							   .skip = skip,
6103 							   .count = 0 };
6104 		int err;
6105 
6106 		rcu_read_lock();
6107 		if (rt->nh) {
6108 			err = nexthop_for_each_fib6_nh(rt->nh,
6109 						       rt6_nh_dump_exceptions,
6110 						       &w);
6111 		} else {
6112 			err = rt6_nh_dump_exceptions(rt->fib6_nh, &w);
6113 		}
6114 		rcu_read_unlock();
6115 
6116 		if (err)
6117 			return count + w.count;
6118 	}
6119 
6120 	return -1;
6121 }
6122 
inet6_rtm_valid_getroute_req(struct sk_buff * skb,const struct nlmsghdr * nlh,struct nlattr ** tb,struct netlink_ext_ack * extack)6123 static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
6124 					const struct nlmsghdr *nlh,
6125 					struct nlattr **tb,
6126 					struct netlink_ext_ack *extack)
6127 {
6128 	struct rtmsg *rtm;
6129 	int i, err;
6130 
6131 	rtm = nlmsg_payload(nlh, sizeof(*rtm));
6132 	if (!rtm) {
6133 		NL_SET_ERR_MSG_MOD(extack,
6134 				   "Invalid header for get route request");
6135 		return -EINVAL;
6136 	}
6137 
6138 	if (!netlink_strict_get_check(skb))
6139 		return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
6140 					      rtm_ipv6_policy, extack);
6141 
6142 	if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
6143 	    (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
6144 	    rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
6145 	    rtm->rtm_type) {
6146 		NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
6147 		return -EINVAL;
6148 	}
6149 	if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
6150 		NL_SET_ERR_MSG_MOD(extack,
6151 				   "Invalid flags for get route request");
6152 		return -EINVAL;
6153 	}
6154 
6155 	err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
6156 					    rtm_ipv6_policy, extack);
6157 	if (err)
6158 		return err;
6159 
6160 	if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
6161 	    (tb[RTA_DST] && !rtm->rtm_dst_len)) {
6162 		NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
6163 		return -EINVAL;
6164 	}
6165 
6166 	if (tb[RTA_FLOWLABEL] &&
6167 	    (nla_get_be32(tb[RTA_FLOWLABEL]) & ~IPV6_FLOWLABEL_MASK)) {
6168 		NL_SET_ERR_MSG_ATTR(extack, tb[RTA_FLOWLABEL],
6169 				    "Invalid flow label");
6170 		return -EINVAL;
6171 	}
6172 
6173 	for (i = 0; i <= RTA_MAX; i++) {
6174 		if (!tb[i])
6175 			continue;
6176 
6177 		switch (i) {
6178 		case RTA_SRC:
6179 		case RTA_DST:
6180 		case RTA_IIF:
6181 		case RTA_OIF:
6182 		case RTA_MARK:
6183 		case RTA_UID:
6184 		case RTA_SPORT:
6185 		case RTA_DPORT:
6186 		case RTA_IP_PROTO:
6187 		case RTA_FLOWLABEL:
6188 			break;
6189 		default:
6190 			NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
6191 			return -EINVAL;
6192 		}
6193 	}
6194 
6195 	return 0;
6196 }
6197 
inet6_rtm_getroute(struct sk_buff * in_skb,struct nlmsghdr * nlh,struct netlink_ext_ack * extack)6198 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
6199 			      struct netlink_ext_ack *extack)
6200 {
6201 	struct net *net = sock_net(in_skb->sk);
6202 	struct nlattr *tb[RTA_MAX+1];
6203 	int err, iif = 0, oif = 0;
6204 	struct fib6_info *from;
6205 	struct dst_entry *dst;
6206 	struct rt6_info *rt;
6207 	struct sk_buff *skb;
6208 	struct rtmsg *rtm;
6209 	struct flowi6 fl6 = {};
6210 	__be32 flowlabel;
6211 	bool fibmatch;
6212 
6213 	err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
6214 	if (err < 0)
6215 		goto errout;
6216 
6217 	err = -EINVAL;
6218 	rtm = nlmsg_data(nlh);
6219 	fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
6220 
6221 	if (tb[RTA_SRC]) {
6222 		if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
6223 			goto errout;
6224 
6225 		fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
6226 	}
6227 
6228 	if (tb[RTA_DST]) {
6229 		if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
6230 			goto errout;
6231 
6232 		fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
6233 	}
6234 
6235 	if (tb[RTA_IIF])
6236 		iif = nla_get_u32(tb[RTA_IIF]);
6237 
6238 	if (tb[RTA_OIF])
6239 		oif = nla_get_u32(tb[RTA_OIF]);
6240 
6241 	if (tb[RTA_MARK])
6242 		fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
6243 
6244 	if (tb[RTA_UID])
6245 		fl6.flowi6_uid = make_kuid(current_user_ns(),
6246 					   nla_get_u32(tb[RTA_UID]));
6247 	else
6248 		fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
6249 
6250 	if (tb[RTA_SPORT])
6251 		fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);
6252 
6253 	if (tb[RTA_DPORT])
6254 		fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);
6255 
6256 	if (tb[RTA_IP_PROTO]) {
6257 		err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
6258 						  &fl6.flowi6_proto, AF_INET6,
6259 						  extack);
6260 		if (err)
6261 			goto errout;
6262 	}
6263 
6264 	flowlabel = nla_get_be32_default(tb[RTA_FLOWLABEL], 0);
6265 	fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, flowlabel);
6266 
6267 	if (iif) {
6268 		struct net_device *dev;
6269 		int flags = 0;
6270 
6271 		rcu_read_lock();
6272 
6273 		dev = dev_get_by_index_rcu(net, iif);
6274 		if (!dev) {
6275 			rcu_read_unlock();
6276 			err = -ENODEV;
6277 			goto errout;
6278 		}
6279 
6280 		fl6.flowi6_iif = iif;
6281 
6282 		if (!ipv6_addr_any(&fl6.saddr))
6283 			flags |= RT6_LOOKUP_F_HAS_SADDR;
6284 
6285 		dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
6286 
6287 		rcu_read_unlock();
6288 	} else {
6289 		fl6.flowi6_oif = oif;
6290 
6291 		dst = ip6_route_output(net, NULL, &fl6);
6292 	}
6293 
6294 
6295 	rt = dst_rt6_info(dst);
6296 	if (rt->dst.error) {
6297 		err = rt->dst.error;
6298 		ip6_rt_put(rt);
6299 		goto errout;
6300 	}
6301 
6302 	if (rt == net->ipv6.ip6_null_entry) {
6303 		err = rt->dst.error;
6304 		ip6_rt_put(rt);
6305 		goto errout;
6306 	}
6307 
6308 	skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
6309 	if (!skb) {
6310 		ip6_rt_put(rt);
6311 		err = -ENOBUFS;
6312 		goto errout;
6313 	}
6314 
6315 	skb_dst_set(skb, &rt->dst);
6316 
6317 	rcu_read_lock();
6318 	from = rcu_dereference(rt->from);
6319 	if (from) {
6320 		if (fibmatch)
6321 			err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
6322 					    iif, RTM_NEWROUTE,
6323 					    NETLINK_CB(in_skb).portid,
6324 					    nlh->nlmsg_seq, 0);
6325 		else
6326 			err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
6327 					    &fl6.saddr, iif, RTM_NEWROUTE,
6328 					    NETLINK_CB(in_skb).portid,
6329 					    nlh->nlmsg_seq, 0);
6330 	} else {
6331 		err = -ENETUNREACH;
6332 	}
6333 	rcu_read_unlock();
6334 
6335 	if (err < 0) {
6336 		kfree_skb(skb);
6337 		goto errout;
6338 	}
6339 
6340 	err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
6341 errout:
6342 	return err;
6343 }
6344 
inet6_rt_notify(int event,struct fib6_info * rt,struct nl_info * info,unsigned int nlm_flags)6345 void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
6346 		     unsigned int nlm_flags)
6347 {
6348 	struct net *net = info->nl_net;
6349 	struct sk_buff *skb;
6350 	size_t sz;
6351 	u32 seq;
6352 	int err;
6353 
6354 	err = -ENOBUFS;
6355 	seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6356 
6357 	rcu_read_lock();
6358 	sz = rt6_nlmsg_size(rt);
6359 retry:
6360 	skb = nlmsg_new(sz, GFP_ATOMIC);
6361 	if (!skb)
6362 		goto errout;
6363 
6364 	err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6365 			    event, info->portid, seq, nlm_flags);
6366 	if (err < 0) {
6367 		kfree_skb(skb);
6368 		/* -EMSGSIZE implies needed space grew under us. */
6369 		if (err == -EMSGSIZE) {
6370 			sz = max(rt6_nlmsg_size(rt), sz << 1);
6371 			goto retry;
6372 		}
6373 		goto errout;
6374 	}
6375 
6376 	rcu_read_unlock();
6377 
6378 	rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6379 		    info->nlh, GFP_ATOMIC);
6380 	return;
6381 errout:
6382 	rcu_read_unlock();
6383 	rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6384 }
6385 
fib6_rt_update(struct net * net,struct fib6_info * rt,struct nl_info * info)6386 void fib6_rt_update(struct net *net, struct fib6_info *rt,
6387 		    struct nl_info *info)
6388 {
6389 	u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6390 	struct sk_buff *skb;
6391 	int err = -ENOBUFS;
6392 
6393 	skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
6394 	if (!skb)
6395 		goto errout;
6396 
6397 	err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6398 			    RTM_NEWROUTE, info->portid, seq, NLM_F_REPLACE);
6399 	if (err < 0) {
6400 		/* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6401 		WARN_ON(err == -EMSGSIZE);
6402 		kfree_skb(skb);
6403 		goto errout;
6404 	}
6405 	rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6406 		    info->nlh, gfp_any());
6407 	return;
6408 errout:
6409 	rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6410 }
6411 
fib6_info_hw_flags_set(struct net * net,struct fib6_info * f6i,bool offload,bool trap,bool offload_failed)6412 void fib6_info_hw_flags_set(struct net *net, struct fib6_info *f6i,
6413 			    bool offload, bool trap, bool offload_failed)
6414 {
6415 	u8 fib_notify_on_flag_change;
6416 	struct sk_buff *skb;
6417 	int err;
6418 
6419 	if (READ_ONCE(f6i->offload) == offload &&
6420 	    READ_ONCE(f6i->trap) == trap &&
6421 	    READ_ONCE(f6i->offload_failed) == offload_failed)
6422 		return;
6423 
6424 	WRITE_ONCE(f6i->offload, offload);
6425 	WRITE_ONCE(f6i->trap, trap);
6426 
6427 	fib_notify_on_flag_change = READ_ONCE(net->ipv6.sysctl.fib_notify_on_flag_change);
6428 	/* 2 means send notifications only if offload_failed was changed. */
6429 	if (fib_notify_on_flag_change == 2 &&
6430 	    READ_ONCE(f6i->offload_failed) == offload_failed)
6431 		return;
6432 
6433 	WRITE_ONCE(f6i->offload_failed, offload_failed);
6434 
6435 	if (!rcu_access_pointer(f6i->fib6_node))
6436 		/* The route was removed from the tree, do not send
6437 		 * notification.
6438 		 */
6439 		return;
6440 
6441 	if (!fib_notify_on_flag_change)
6442 		return;
6443 
6444 	skb = nlmsg_new(rt6_nlmsg_size(f6i), GFP_KERNEL);
6445 	if (!skb) {
6446 		err = -ENOBUFS;
6447 		goto errout;
6448 	}
6449 
6450 	err = rt6_fill_node(net, skb, f6i, NULL, NULL, NULL, 0, RTM_NEWROUTE, 0,
6451 			    0, 0);
6452 	if (err < 0) {
6453 		/* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6454 		WARN_ON(err == -EMSGSIZE);
6455 		kfree_skb(skb);
6456 		goto errout;
6457 	}
6458 
6459 	rtnl_notify(skb, net, 0, RTNLGRP_IPV6_ROUTE, NULL, GFP_KERNEL);
6460 	return;
6461 
6462 errout:
6463 	rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6464 }
6465 EXPORT_SYMBOL(fib6_info_hw_flags_set);
6466 
ip6_route_dev_notify(struct notifier_block * this,unsigned long event,void * ptr)6467 static int ip6_route_dev_notify(struct notifier_block *this,
6468 				unsigned long event, void *ptr)
6469 {
6470 	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6471 	struct net *net = dev_net(dev);
6472 
6473 	if (!(dev->flags & IFF_LOOPBACK))
6474 		return NOTIFY_OK;
6475 
6476 	if (event == NETDEV_REGISTER) {
6477 		net->ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = dev;
6478 		net->ipv6.ip6_null_entry->dst.dev = dev;
6479 		net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
6480 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6481 		net->ipv6.ip6_prohibit_entry->dst.dev = dev;
6482 		net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
6483 		net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
6484 		net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
6485 #endif
6486 	 } else if (event == NETDEV_UNREGISTER &&
6487 		    dev->reg_state != NETREG_UNREGISTERED) {
6488 		/* NETDEV_UNREGISTER could be fired for multiple times by
6489 		 * netdev_wait_allrefs(). Make sure we only call this once.
6490 		 */
6491 		in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
6492 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6493 		in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
6494 		in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
6495 #endif
6496 	}
6497 
6498 	return NOTIFY_OK;
6499 }
6500 
6501 /*
6502  *	/proc
6503  */
6504 
6505 #ifdef CONFIG_PROC_FS
rt6_stats_seq_show(struct seq_file * seq,void * v)6506 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
6507 {
6508 	struct net *net = (struct net *)seq->private;
6509 	seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
6510 		   net->ipv6.rt6_stats->fib_nodes,
6511 		   net->ipv6.rt6_stats->fib_route_nodes,
6512 		   atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
6513 		   net->ipv6.rt6_stats->fib_rt_entries,
6514 		   net->ipv6.rt6_stats->fib_rt_cache,
6515 		   dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
6516 		   net->ipv6.rt6_stats->fib_discarded_routes);
6517 
6518 	return 0;
6519 }
6520 #endif	/* CONFIG_PROC_FS */
6521 
6522 #ifdef CONFIG_SYSCTL
6523 
ipv6_sysctl_rtcache_flush(const struct ctl_table * ctl,int write,void * buffer,size_t * lenp,loff_t * ppos)6524 static int ipv6_sysctl_rtcache_flush(const struct ctl_table *ctl, int write,
6525 			      void *buffer, size_t *lenp, loff_t *ppos)
6526 {
6527 	struct net *net;
6528 	int delay;
6529 	int ret;
6530 	if (!write)
6531 		return -EINVAL;
6532 
6533 	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6534 	if (ret)
6535 		return ret;
6536 
6537 	net = (struct net *)ctl->extra1;
6538 	delay = READ_ONCE(net->ipv6.sysctl.flush_delay);
6539 	fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
6540 	return 0;
6541 }
6542 
6543 static struct ctl_table ipv6_route_table_template[] = {
6544 	{
6545 		.procname	=	"max_size",
6546 		.data		=	&init_net.ipv6.sysctl.ip6_rt_max_size,
6547 		.maxlen		=	sizeof(int),
6548 		.mode		=	0644,
6549 		.proc_handler	=	proc_dointvec,
6550 	},
6551 	{
6552 		.procname	=	"gc_thresh",
6553 		.data		=	&ip6_dst_ops_template.gc_thresh,
6554 		.maxlen		=	sizeof(int),
6555 		.mode		=	0644,
6556 		.proc_handler	=	proc_dointvec,
6557 	},
6558 	{
6559 		.procname	=	"flush",
6560 		.data		=	&init_net.ipv6.sysctl.flush_delay,
6561 		.maxlen		=	sizeof(int),
6562 		.mode		=	0200,
6563 		.proc_handler	=	ipv6_sysctl_rtcache_flush
6564 	},
6565 	{
6566 		.procname	=	"gc_min_interval",
6567 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6568 		.maxlen		=	sizeof(int),
6569 		.mode		=	0644,
6570 		.proc_handler	=	proc_dointvec_jiffies,
6571 	},
6572 	{
6573 		.procname	=	"gc_timeout",
6574 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_timeout,
6575 		.maxlen		=	sizeof(int),
6576 		.mode		=	0644,
6577 		.proc_handler	=	proc_dointvec_jiffies,
6578 	},
6579 	{
6580 		.procname	=	"gc_interval",
6581 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_interval,
6582 		.maxlen		=	sizeof(int),
6583 		.mode		=	0644,
6584 		.proc_handler	=	proc_dointvec_jiffies,
6585 	},
6586 	{
6587 		.procname	=	"gc_elasticity",
6588 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
6589 		.maxlen		=	sizeof(int),
6590 		.mode		=	0644,
6591 		.proc_handler	=	proc_dointvec,
6592 	},
6593 	{
6594 		.procname	=	"mtu_expires",
6595 		.data		=	&init_net.ipv6.sysctl.ip6_rt_mtu_expires,
6596 		.maxlen		=	sizeof(int),
6597 		.mode		=	0644,
6598 		.proc_handler	=	proc_dointvec_jiffies,
6599 	},
6600 	{
6601 		.procname	=	"min_adv_mss",
6602 		.data		=	&init_net.ipv6.sysctl.ip6_rt_min_advmss,
6603 		.maxlen		=	sizeof(int),
6604 		.mode		=	0644,
6605 		.proc_handler	=	proc_dointvec,
6606 	},
6607 	{
6608 		.procname	=	"gc_min_interval_ms",
6609 		.data		=	&init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
6610 		.maxlen		=	sizeof(int),
6611 		.mode		=	0644,
6612 		.proc_handler	=	proc_dointvec_ms_jiffies,
6613 	},
6614 	{
6615 		.procname	=	"skip_notify_on_dev_down",
6616 		.data		=	&init_net.ipv6.sysctl.skip_notify_on_dev_down,
6617 		.maxlen		=	sizeof(u8),
6618 		.mode		=	0644,
6619 		.proc_handler	=	proc_dou8vec_minmax,
6620 		.extra1		=	SYSCTL_ZERO,
6621 		.extra2		=	SYSCTL_ONE,
6622 	},
6623 };
6624 
ipv6_route_sysctl_init(struct net * net)6625 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
6626 {
6627 	struct ctl_table *table;
6628 
6629 	table = kmemdup(ipv6_route_table_template,
6630 			sizeof(ipv6_route_table_template),
6631 			GFP_KERNEL);
6632 
6633 	if (table) {
6634 		table[0].data = &net->ipv6.sysctl.ip6_rt_max_size;
6635 		table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
6636 		table[2].data = &net->ipv6.sysctl.flush_delay;
6637 		table[2].extra1 = net;
6638 		table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6639 		table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
6640 		table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
6641 		table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
6642 		table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
6643 		table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
6644 		table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6645 		table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
6646 	}
6647 
6648 	return table;
6649 }
6650 
ipv6_route_sysctl_table_size(struct net * net)6651 size_t ipv6_route_sysctl_table_size(struct net *net)
6652 {
6653 	/* Don't export sysctls to unprivileged users */
6654 	if (net->user_ns != &init_user_ns)
6655 		return 1;
6656 
6657 	return ARRAY_SIZE(ipv6_route_table_template);
6658 }
6659 #endif
6660 
ip6_route_net_init(struct net * net)6661 static int __net_init ip6_route_net_init(struct net *net)
6662 {
6663 	int ret = -ENOMEM;
6664 
6665 	memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
6666 	       sizeof(net->ipv6.ip6_dst_ops));
6667 
6668 	if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
6669 		goto out_ip6_dst_ops;
6670 
6671 	net->ipv6.fib6_null_entry = fib6_info_alloc(GFP_KERNEL, true);
6672 	if (!net->ipv6.fib6_null_entry)
6673 		goto out_ip6_dst_entries;
6674 	memcpy(net->ipv6.fib6_null_entry, &fib6_null_entry_template,
6675 	       sizeof(*net->ipv6.fib6_null_entry));
6676 
6677 	net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
6678 					   sizeof(*net->ipv6.ip6_null_entry),
6679 					   GFP_KERNEL);
6680 	if (!net->ipv6.ip6_null_entry)
6681 		goto out_fib6_null_entry;
6682 	net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6683 	dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
6684 			 ip6_template_metrics, true);
6685 	INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->dst.rt_uncached);
6686 
6687 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6688 	net->ipv6.fib6_has_custom_rules = false;
6689 	net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
6690 					       sizeof(*net->ipv6.ip6_prohibit_entry),
6691 					       GFP_KERNEL);
6692 	if (!net->ipv6.ip6_prohibit_entry)
6693 		goto out_ip6_null_entry;
6694 	net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6695 	dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
6696 			 ip6_template_metrics, true);
6697 	INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->dst.rt_uncached);
6698 
6699 	net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
6700 					       sizeof(*net->ipv6.ip6_blk_hole_entry),
6701 					       GFP_KERNEL);
6702 	if (!net->ipv6.ip6_blk_hole_entry)
6703 		goto out_ip6_prohibit_entry;
6704 	net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
6705 	dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
6706 			 ip6_template_metrics, true);
6707 	INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->dst.rt_uncached);
6708 #ifdef CONFIG_IPV6_SUBTREES
6709 	net->ipv6.fib6_routes_require_src = 0;
6710 #endif
6711 #endif
6712 
6713 	net->ipv6.sysctl.flush_delay = 0;
6714 	net->ipv6.sysctl.ip6_rt_max_size = INT_MAX;
6715 	net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
6716 	net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
6717 	net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
6718 	net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
6719 	net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
6720 	net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
6721 	net->ipv6.sysctl.skip_notify_on_dev_down = 0;
6722 
6723 	atomic_set(&net->ipv6.ip6_rt_gc_expire, 30*HZ);
6724 
6725 	ret = 0;
6726 out:
6727 	return ret;
6728 
6729 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6730 out_ip6_prohibit_entry:
6731 	kfree(net->ipv6.ip6_prohibit_entry);
6732 out_ip6_null_entry:
6733 	kfree(net->ipv6.ip6_null_entry);
6734 #endif
6735 out_fib6_null_entry:
6736 	kfree(net->ipv6.fib6_null_entry);
6737 out_ip6_dst_entries:
6738 	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6739 out_ip6_dst_ops:
6740 	goto out;
6741 }
6742 
ip6_route_net_exit(struct net * net)6743 static void __net_exit ip6_route_net_exit(struct net *net)
6744 {
6745 	kfree(net->ipv6.fib6_null_entry);
6746 	kfree(net->ipv6.ip6_null_entry);
6747 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6748 	kfree(net->ipv6.ip6_prohibit_entry);
6749 	kfree(net->ipv6.ip6_blk_hole_entry);
6750 #endif
6751 	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
6752 }
6753 
ip6_route_net_init_late(struct net * net)6754 static int __net_init ip6_route_net_init_late(struct net *net)
6755 {
6756 #ifdef CONFIG_PROC_FS
6757 	if (!proc_create_net("ipv6_route", 0, net->proc_net,
6758 			     &ipv6_route_seq_ops,
6759 			     sizeof(struct ipv6_route_iter)))
6760 		return -ENOMEM;
6761 
6762 	if (!proc_create_net_single("rt6_stats", 0444, net->proc_net,
6763 				    rt6_stats_seq_show, NULL)) {
6764 		remove_proc_entry("ipv6_route", net->proc_net);
6765 		return -ENOMEM;
6766 	}
6767 #endif
6768 	return 0;
6769 }
6770 
ip6_route_net_exit_late(struct net * net)6771 static void __net_exit ip6_route_net_exit_late(struct net *net)
6772 {
6773 #ifdef CONFIG_PROC_FS
6774 	remove_proc_entry("ipv6_route", net->proc_net);
6775 	remove_proc_entry("rt6_stats", net->proc_net);
6776 #endif
6777 }
6778 
6779 static struct pernet_operations ip6_route_net_ops = {
6780 	.init = ip6_route_net_init,
6781 	.exit = ip6_route_net_exit,
6782 };
6783 
ipv6_inetpeer_init(struct net * net)6784 static int __net_init ipv6_inetpeer_init(struct net *net)
6785 {
6786 	struct inet_peer_base *bp = kmalloc_obj(*bp);
6787 
6788 	if (!bp)
6789 		return -ENOMEM;
6790 	inet_peer_base_init(bp);
6791 	net->ipv6.peers = bp;
6792 	return 0;
6793 }
6794 
ipv6_inetpeer_exit(struct net * net)6795 static void __net_exit ipv6_inetpeer_exit(struct net *net)
6796 {
6797 	struct inet_peer_base *bp = net->ipv6.peers;
6798 
6799 	net->ipv6.peers = NULL;
6800 	inetpeer_invalidate_tree(bp);
6801 	kfree(bp);
6802 }
6803 
6804 static struct pernet_operations ipv6_inetpeer_ops = {
6805 	.init	=	ipv6_inetpeer_init,
6806 	.exit	=	ipv6_inetpeer_exit,
6807 };
6808 
6809 static struct pernet_operations ip6_route_net_late_ops = {
6810 	.init = ip6_route_net_init_late,
6811 	.exit = ip6_route_net_exit_late,
6812 };
6813 
6814 static struct notifier_block ip6_route_dev_notifier = {
6815 	.notifier_call = ip6_route_dev_notify,
6816 	.priority = ADDRCONF_NOTIFY_PRIORITY - 10,
6817 };
6818 
ip6_route_init_special_entries(void)6819 void __init ip6_route_init_special_entries(void)
6820 {
6821 	/* Registering of the loopback is done before this portion of code,
6822 	 * the loopback reference in rt6_info will not be taken, do it
6823 	 * manually for init_net */
6824 	init_net.ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = init_net.loopback_dev;
6825 	init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
6826 	init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6827   #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6828 	init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
6829 	init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6830 	init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
6831 	init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6832   #endif
6833 }
6834 
6835 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6836 DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *rt)
6837 
6838 BTF_ID_LIST_SINGLE(btf_fib6_info_id, struct, fib6_info)
6839 
6840 static const struct bpf_iter_seq_info ipv6_route_seq_info = {
6841 	.seq_ops		= &ipv6_route_seq_ops,
6842 	.init_seq_private	= bpf_iter_init_seq_net,
6843 	.fini_seq_private	= bpf_iter_fini_seq_net,
6844 	.seq_priv_size		= sizeof(struct ipv6_route_iter),
6845 };
6846 
6847 static struct bpf_iter_reg ipv6_route_reg_info = {
6848 	.target			= "ipv6_route",
6849 	.ctx_arg_info_size	= 1,
6850 	.ctx_arg_info		= {
6851 		{ offsetof(struct bpf_iter__ipv6_route, rt),
6852 		  PTR_TO_BTF_ID_OR_NULL },
6853 	},
6854 	.seq_info		= &ipv6_route_seq_info,
6855 };
6856 
bpf_iter_register(void)6857 static int __init bpf_iter_register(void)
6858 {
6859 	ipv6_route_reg_info.ctx_arg_info[0].btf_id = *btf_fib6_info_id;
6860 	return bpf_iter_reg_target(&ipv6_route_reg_info);
6861 }
6862 
bpf_iter_unregister(void)6863 static void bpf_iter_unregister(void)
6864 {
6865 	bpf_iter_unreg_target(&ipv6_route_reg_info);
6866 }
6867 #endif
6868 
6869 static const struct rtnl_msg_handler ip6_route_rtnl_msg_handlers[] __initconst_or_module = {
6870 	{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_NEWROUTE,
6871 	 .doit = inet6_rtm_newroute, .flags = RTNL_FLAG_DOIT_UNLOCKED},
6872 	{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_DELROUTE,
6873 	 .doit = inet6_rtm_delroute, .flags = RTNL_FLAG_DOIT_UNLOCKED},
6874 	{.owner = THIS_MODULE, .protocol = PF_INET6, .msgtype = RTM_GETROUTE,
6875 	 .doit = inet6_rtm_getroute, .flags = RTNL_FLAG_DOIT_UNLOCKED},
6876 };
6877 
ip6_route_init(void)6878 int __init ip6_route_init(void)
6879 {
6880 	int ret;
6881 	int cpu;
6882 
6883 	ret = -ENOMEM;
6884 	ip6_dst_ops_template.kmem_cachep =
6885 		kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
6886 				  SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT, NULL);
6887 	if (!ip6_dst_ops_template.kmem_cachep)
6888 		goto out;
6889 
6890 	ret = dst_entries_init(&ip6_dst_blackhole_ops);
6891 	if (ret)
6892 		goto out_kmem_cache;
6893 
6894 	ret = register_pernet_subsys(&ipv6_inetpeer_ops);
6895 	if (ret)
6896 		goto out_dst_entries;
6897 
6898 	ret = register_pernet_subsys(&ip6_route_net_ops);
6899 	if (ret)
6900 		goto out_register_inetpeer;
6901 
6902 	ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
6903 
6904 	ret = fib6_init();
6905 	if (ret)
6906 		goto out_register_subsys;
6907 
6908 	ret = xfrm6_init();
6909 	if (ret)
6910 		goto out_fib6_init;
6911 
6912 	ret = fib6_rules_init();
6913 	if (ret)
6914 		goto xfrm6_init;
6915 
6916 	ret = register_pernet_subsys(&ip6_route_net_late_ops);
6917 	if (ret)
6918 		goto fib6_rules_init;
6919 
6920 	ret = rtnl_register_many(ip6_route_rtnl_msg_handlers);
6921 	if (ret < 0)
6922 		goto out_register_late_subsys;
6923 
6924 	ret = register_netdevice_notifier(&ip6_route_dev_notifier);
6925 	if (ret)
6926 		goto out_register_late_subsys;
6927 
6928 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6929 	ret = bpf_iter_register();
6930 	if (ret)
6931 		goto out_register_late_subsys;
6932 #endif
6933 
6934 	for_each_possible_cpu(cpu) {
6935 		struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
6936 
6937 		INIT_LIST_HEAD(&ul->head);
6938 		spin_lock_init(&ul->lock);
6939 	}
6940 
6941 out:
6942 	return ret;
6943 
6944 out_register_late_subsys:
6945 	rtnl_unregister_all(PF_INET6);
6946 	unregister_pernet_subsys(&ip6_route_net_late_ops);
6947 fib6_rules_init:
6948 	fib6_rules_cleanup();
6949 xfrm6_init:
6950 	xfrm6_fini();
6951 out_fib6_init:
6952 	fib6_gc_cleanup();
6953 out_register_subsys:
6954 	unregister_pernet_subsys(&ip6_route_net_ops);
6955 out_register_inetpeer:
6956 	unregister_pernet_subsys(&ipv6_inetpeer_ops);
6957 out_dst_entries:
6958 	dst_entries_destroy(&ip6_dst_blackhole_ops);
6959 out_kmem_cache:
6960 	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6961 	goto out;
6962 }
6963 
ip6_route_cleanup(void)6964 void ip6_route_cleanup(void)
6965 {
6966 #if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6967 	bpf_iter_unregister();
6968 #endif
6969 	unregister_netdevice_notifier(&ip6_route_dev_notifier);
6970 	unregister_pernet_subsys(&ip6_route_net_late_ops);
6971 	fib6_rules_cleanup();
6972 	xfrm6_fini();
6973 	fib6_gc_cleanup();
6974 	unregister_pernet_subsys(&ipv6_inetpeer_ops);
6975 	unregister_pernet_subsys(&ip6_route_net_ops);
6976 	dst_entries_destroy(&ip6_dst_blackhole_ops);
6977 	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
6978 }
6979