1 /*
2  *	Handle firewalling
3  *	Linux ethernet bridge
4  *
5  *	Authors:
6  *	Lennert Buytenhek		<buytenh@gnu.org>
7  *	Bart De Schuymer		<bdschuym@pandora.be>
8  *
9  *	This program is free software; you can redistribute it and/or
10  *	modify it under the terms of the GNU General Public License
11  *	as published by the Free Software Foundation; either version
12  *	2 of the License, or (at your option) any later version.
13  *
14  *	Lennert dedicates this file to Kerstin Wurdinger.
15  */
16 
17 #include <linux/module.h>
18 #include <linux/kernel.h>
19 #include <linux/slab.h>
20 #include <linux/ip.h>
21 #include <linux/netdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/if_arp.h>
24 #include <linux/if_ether.h>
25 #include <linux/if_vlan.h>
26 #include <linux/if_pppox.h>
27 #include <linux/ppp_defs.h>
28 #include <linux/netfilter_bridge.h>
29 #include <linux/netfilter_ipv4.h>
30 #include <linux/netfilter_ipv6.h>
31 #include <linux/netfilter_arp.h>
32 #include <linux/in_route.h>
33 #include <linux/inetdevice.h>
34 
35 #include <net/ip.h>
36 #include <net/ipv6.h>
37 #include <net/route.h>
38 
39 #include <asm/uaccess.h>
40 #include "br_private.h"
41 #ifdef CONFIG_SYSCTL
42 #include <linux/sysctl.h>
43 #endif
44 
45 #define skb_origaddr(skb)	 (((struct bridge_skb_cb *) \
46 				 (skb->nf_bridge->data))->daddr.ipv4)
47 #define store_orig_dstaddr(skb)	 (skb_origaddr(skb) = ip_hdr(skb)->daddr)
48 #define dnat_took_place(skb)	 (skb_origaddr(skb) != ip_hdr(skb)->daddr)
49 
50 #ifdef CONFIG_SYSCTL
51 static struct ctl_table_header *brnf_sysctl_header;
52 static int brnf_call_iptables __read_mostly = 1;
53 static int brnf_call_ip6tables __read_mostly = 1;
54 static int brnf_call_arptables __read_mostly = 1;
55 static int brnf_filter_vlan_tagged __read_mostly = 0;
56 static int brnf_filter_pppoe_tagged __read_mostly = 0;
57 #else
58 #define brnf_call_iptables 1
59 #define brnf_call_ip6tables 1
60 #define brnf_call_arptables 1
61 #define brnf_filter_vlan_tagged 0
62 #define brnf_filter_pppoe_tagged 0
63 #endif
64 
65 #define IS_IP(skb) \
66 	(!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
67 
68 #define IS_IPV6(skb) \
69 	(!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
70 
71 #define IS_ARP(skb) \
72 	(!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
73 
vlan_proto(const struct sk_buff * skb)74 static inline __be16 vlan_proto(const struct sk_buff *skb)
75 {
76 	if (vlan_tx_tag_present(skb))
77 		return skb->protocol;
78 	else if (skb->protocol == htons(ETH_P_8021Q))
79 		return vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
80 	else
81 		return 0;
82 }
83 
84 #define IS_VLAN_IP(skb) \
85 	(vlan_proto(skb) == htons(ETH_P_IP) && \
86 	 brnf_filter_vlan_tagged)
87 
88 #define IS_VLAN_IPV6(skb) \
89 	(vlan_proto(skb) == htons(ETH_P_IPV6) && \
90 	 brnf_filter_vlan_tagged)
91 
92 #define IS_VLAN_ARP(skb) \
93 	(vlan_proto(skb) == htons(ETH_P_ARP) &&	\
94 	 brnf_filter_vlan_tagged)
95 
pppoe_proto(const struct sk_buff * skb)96 static inline __be16 pppoe_proto(const struct sk_buff *skb)
97 {
98 	return *((__be16 *)(skb_mac_header(skb) + ETH_HLEN +
99 			    sizeof(struct pppoe_hdr)));
100 }
101 
102 #define IS_PPPOE_IP(skb) \
103 	(skb->protocol == htons(ETH_P_PPP_SES) && \
104 	 pppoe_proto(skb) == htons(PPP_IP) && \
105 	 brnf_filter_pppoe_tagged)
106 
107 #define IS_PPPOE_IPV6(skb) \
108 	(skb->protocol == htons(ETH_P_PPP_SES) && \
109 	 pppoe_proto(skb) == htons(PPP_IPV6) && \
110 	 brnf_filter_pppoe_tagged)
111 
fake_update_pmtu(struct dst_entry * dst,u32 mtu)112 static void fake_update_pmtu(struct dst_entry *dst, u32 mtu)
113 {
114 }
115 
fake_cow_metrics(struct dst_entry * dst,unsigned long old)116 static u32 *fake_cow_metrics(struct dst_entry *dst, unsigned long old)
117 {
118 	return NULL;
119 }
120 
fake_neigh_lookup(const struct dst_entry * dst,const void * daddr)121 static struct neighbour *fake_neigh_lookup(const struct dst_entry *dst, const void *daddr)
122 {
123 	return NULL;
124 }
125 
fake_mtu(const struct dst_entry * dst)126 static unsigned int fake_mtu(const struct dst_entry *dst)
127 {
128 	return dst->dev->mtu;
129 }
130 
131 static struct dst_ops fake_dst_ops = {
132 	.family =		AF_INET,
133 	.protocol =		cpu_to_be16(ETH_P_IP),
134 	.update_pmtu =		fake_update_pmtu,
135 	.cow_metrics =		fake_cow_metrics,
136 	.neigh_lookup =		fake_neigh_lookup,
137 	.mtu =			fake_mtu,
138 };
139 
140 /*
141  * Initialize bogus route table used to keep netfilter happy.
142  * Currently, we fill in the PMTU entry because netfilter
143  * refragmentation needs it, and the rt_flags entry because
144  * ipt_REJECT needs it.  Future netfilter modules might
145  * require us to fill additional fields.
146  */
147 static const u32 br_dst_default_metrics[RTAX_MAX] = {
148 	[RTAX_MTU - 1] = 1500,
149 };
150 
br_netfilter_rtable_init(struct net_bridge * br)151 void br_netfilter_rtable_init(struct net_bridge *br)
152 {
153 	struct rtable *rt = &br->fake_rtable;
154 
155 	atomic_set(&rt->dst.__refcnt, 1);
156 	rt->dst.dev = br->dev;
157 	rt->dst.path = &rt->dst;
158 	dst_init_metrics(&rt->dst, br_dst_default_metrics, true);
159 	rt->dst.flags	= DST_NOXFRM | DST_NOPEER;
160 	rt->dst.ops = &fake_dst_ops;
161 }
162 
bridge_parent_rtable(const struct net_device * dev)163 static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
164 {
165 	struct net_bridge_port *port;
166 
167 	port = br_port_get_rcu(dev);
168 	return port ? &port->br->fake_rtable : NULL;
169 }
170 
bridge_parent(const struct net_device * dev)171 static inline struct net_device *bridge_parent(const struct net_device *dev)
172 {
173 	struct net_bridge_port *port;
174 
175 	port = br_port_get_rcu(dev);
176 	return port ? port->br->dev : NULL;
177 }
178 
nf_bridge_alloc(struct sk_buff * skb)179 static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
180 {
181 	skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
182 	if (likely(skb->nf_bridge))
183 		atomic_set(&(skb->nf_bridge->use), 1);
184 
185 	return skb->nf_bridge;
186 }
187 
nf_bridge_unshare(struct sk_buff * skb)188 static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
189 {
190 	struct nf_bridge_info *nf_bridge = skb->nf_bridge;
191 
192 	if (atomic_read(&nf_bridge->use) > 1) {
193 		struct nf_bridge_info *tmp = nf_bridge_alloc(skb);
194 
195 		if (tmp) {
196 			memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
197 			atomic_set(&tmp->use, 1);
198 		}
199 		nf_bridge_put(nf_bridge);
200 		nf_bridge = tmp;
201 	}
202 	return nf_bridge;
203 }
204 
nf_bridge_push_encap_header(struct sk_buff * skb)205 static inline void nf_bridge_push_encap_header(struct sk_buff *skb)
206 {
207 	unsigned int len = nf_bridge_encap_header_len(skb);
208 
209 	skb_push(skb, len);
210 	skb->network_header -= len;
211 }
212 
nf_bridge_pull_encap_header(struct sk_buff * skb)213 static inline void nf_bridge_pull_encap_header(struct sk_buff *skb)
214 {
215 	unsigned int len = nf_bridge_encap_header_len(skb);
216 
217 	skb_pull(skb, len);
218 	skb->network_header += len;
219 }
220 
nf_bridge_pull_encap_header_rcsum(struct sk_buff * skb)221 static inline void nf_bridge_pull_encap_header_rcsum(struct sk_buff *skb)
222 {
223 	unsigned int len = nf_bridge_encap_header_len(skb);
224 
225 	skb_pull_rcsum(skb, len);
226 	skb->network_header += len;
227 }
228 
nf_bridge_save_header(struct sk_buff * skb)229 static inline void nf_bridge_save_header(struct sk_buff *skb)
230 {
231 	int header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
232 
233 	skb_copy_from_linear_data_offset(skb, -header_size,
234 					 skb->nf_bridge->data, header_size);
235 }
236 
nf_bridge_update_protocol(struct sk_buff * skb)237 static inline void nf_bridge_update_protocol(struct sk_buff *skb)
238 {
239 	if (skb->nf_bridge->mask & BRNF_8021Q)
240 		skb->protocol = htons(ETH_P_8021Q);
241 	else if (skb->nf_bridge->mask & BRNF_PPPoE)
242 		skb->protocol = htons(ETH_P_PPP_SES);
243 }
244 
245 /* When handing a packet over to the IP layer
246  * check whether we have a skb that is in the
247  * expected format
248  */
249 
br_parse_ip_options(struct sk_buff * skb)250 static int br_parse_ip_options(struct sk_buff *skb)
251 {
252 	struct ip_options *opt;
253 	const struct iphdr *iph;
254 	struct net_device *dev = skb->dev;
255 	u32 len;
256 
257 	iph = ip_hdr(skb);
258 	opt = &(IPCB(skb)->opt);
259 
260 	/* Basic sanity checks */
261 	if (iph->ihl < 5 || iph->version != 4)
262 		goto inhdr_error;
263 
264 	if (!pskb_may_pull(skb, iph->ihl*4))
265 		goto inhdr_error;
266 
267 	iph = ip_hdr(skb);
268 	if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
269 		goto inhdr_error;
270 
271 	len = ntohs(iph->tot_len);
272 	if (skb->len < len) {
273 		IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INTRUNCATEDPKTS);
274 		goto drop;
275 	} else if (len < (iph->ihl*4))
276 		goto inhdr_error;
277 
278 	if (pskb_trim_rcsum(skb, len)) {
279 		IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS);
280 		goto drop;
281 	}
282 
283 	memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
284 	if (iph->ihl == 5)
285 		return 0;
286 
287 	opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
288 	if (ip_options_compile(dev_net(dev), opt, skb))
289 		goto inhdr_error;
290 
291 	/* Check correct handling of SRR option */
292 	if (unlikely(opt->srr)) {
293 		struct in_device *in_dev = __in_dev_get_rcu(dev);
294 		if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev))
295 			goto drop;
296 
297 		if (ip_options_rcv_srr(skb))
298 			goto drop;
299 	}
300 
301 	return 0;
302 
303 inhdr_error:
304 	IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
305 drop:
306 	return -1;
307 }
308 
309 /* Fill in the header for fragmented IP packets handled by
310  * the IPv4 connection tracking code.
311  */
nf_bridge_copy_header(struct sk_buff * skb)312 int nf_bridge_copy_header(struct sk_buff *skb)
313 {
314 	int err;
315 	unsigned int header_size;
316 
317 	nf_bridge_update_protocol(skb);
318 	header_size = ETH_HLEN + nf_bridge_encap_header_len(skb);
319 	err = skb_cow_head(skb, header_size);
320 	if (err)
321 		return err;
322 
323 	skb_copy_to_linear_data_offset(skb, -header_size,
324 				       skb->nf_bridge->data, header_size);
325 	__skb_push(skb, nf_bridge_encap_header_len(skb));
326 	return 0;
327 }
328 
329 /* PF_BRIDGE/PRE_ROUTING *********************************************/
330 /* Undo the changes made for ip6tables PREROUTING and continue the
331  * bridge PRE_ROUTING hook. */
br_nf_pre_routing_finish_ipv6(struct sk_buff * skb)332 static int br_nf_pre_routing_finish_ipv6(struct sk_buff *skb)
333 {
334 	struct nf_bridge_info *nf_bridge = skb->nf_bridge;
335 	struct rtable *rt;
336 
337 	if (nf_bridge->mask & BRNF_PKT_TYPE) {
338 		skb->pkt_type = PACKET_OTHERHOST;
339 		nf_bridge->mask ^= BRNF_PKT_TYPE;
340 	}
341 	nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
342 
343 	rt = bridge_parent_rtable(nf_bridge->physindev);
344 	if (!rt) {
345 		kfree_skb(skb);
346 		return 0;
347 	}
348 	skb_dst_set_noref(skb, &rt->dst);
349 
350 	skb->dev = nf_bridge->physindev;
351 	nf_bridge_update_protocol(skb);
352 	nf_bridge_push_encap_header(skb);
353 	NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
354 		       br_handle_frame_finish, 1);
355 
356 	return 0;
357 }
358 
359 /* Obtain the correct destination MAC address, while preserving the original
360  * source MAC address. If we already know this address, we just copy it. If we
361  * don't, we use the neighbour framework to find out. In both cases, we make
362  * sure that br_handle_frame_finish() is called afterwards.
363  */
br_nf_pre_routing_finish_bridge(struct sk_buff * skb)364 static int br_nf_pre_routing_finish_bridge(struct sk_buff *skb)
365 {
366 	struct nf_bridge_info *nf_bridge = skb->nf_bridge;
367 	struct neighbour *neigh;
368 	struct dst_entry *dst;
369 
370 	skb->dev = bridge_parent(skb->dev);
371 	if (!skb->dev)
372 		goto free_skb;
373 	dst = skb_dst(skb);
374 	neigh = dst_get_neighbour_noref(dst);
375 	if (neigh->hh.hh_len) {
376 		neigh_hh_bridge(&neigh->hh, skb);
377 		skb->dev = nf_bridge->physindev;
378 		return br_handle_frame_finish(skb);
379 	} else {
380 		/* the neighbour function below overwrites the complete
381 		 * MAC header, so we save the Ethernet source address and
382 		 * protocol number. */
383 		skb_copy_from_linear_data_offset(skb, -(ETH_HLEN-ETH_ALEN), skb->nf_bridge->data, ETH_HLEN-ETH_ALEN);
384 		/* tell br_dev_xmit to continue with forwarding */
385 		nf_bridge->mask |= BRNF_BRIDGED_DNAT;
386 		return neigh->output(neigh, skb);
387 	}
388 free_skb:
389 	kfree_skb(skb);
390 	return 0;
391 }
392 
393 /* This requires some explaining. If DNAT has taken place,
394  * we will need to fix up the destination Ethernet address.
395  *
396  * There are two cases to consider:
397  * 1. The packet was DNAT'ed to a device in the same bridge
398  *    port group as it was received on. We can still bridge
399  *    the packet.
400  * 2. The packet was DNAT'ed to a different device, either
401  *    a non-bridged device or another bridge port group.
402  *    The packet will need to be routed.
403  *
404  * The correct way of distinguishing between these two cases is to
405  * call ip_route_input() and to look at skb->dst->dev, which is
406  * changed to the destination device if ip_route_input() succeeds.
407  *
408  * Let's first consider the case that ip_route_input() succeeds:
409  *
410  * If the output device equals the logical bridge device the packet
411  * came in on, we can consider this bridging. The corresponding MAC
412  * address will be obtained in br_nf_pre_routing_finish_bridge.
413  * Otherwise, the packet is considered to be routed and we just
414  * change the destination MAC address so that the packet will
415  * later be passed up to the IP stack to be routed. For a redirected
416  * packet, ip_route_input() will give back the localhost as output device,
417  * which differs from the bridge device.
418  *
419  * Let's now consider the case that ip_route_input() fails:
420  *
421  * This can be because the destination address is martian, in which case
422  * the packet will be dropped.
423  * If IP forwarding is disabled, ip_route_input() will fail, while
424  * ip_route_output_key() can return success. The source
425  * address for ip_route_output_key() is set to zero, so ip_route_output_key()
426  * thinks we're handling a locally generated packet and won't care
427  * if IP forwarding is enabled. If the output device equals the logical bridge
428  * device, we proceed as if ip_route_input() succeeded. If it differs from the
429  * logical bridge port or if ip_route_output_key() fails we drop the packet.
430  */
br_nf_pre_routing_finish(struct sk_buff * skb)431 static int br_nf_pre_routing_finish(struct sk_buff *skb)
432 {
433 	struct net_device *dev = skb->dev;
434 	struct iphdr *iph = ip_hdr(skb);
435 	struct nf_bridge_info *nf_bridge = skb->nf_bridge;
436 	struct rtable *rt;
437 	int err;
438 
439 	if (nf_bridge->mask & BRNF_PKT_TYPE) {
440 		skb->pkt_type = PACKET_OTHERHOST;
441 		nf_bridge->mask ^= BRNF_PKT_TYPE;
442 	}
443 	nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
444 	if (dnat_took_place(skb)) {
445 		if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
446 			struct in_device *in_dev = __in_dev_get_rcu(dev);
447 
448 			/* If err equals -EHOSTUNREACH the error is due to a
449 			 * martian destination or due to the fact that
450 			 * forwarding is disabled. For most martian packets,
451 			 * ip_route_output_key() will fail. It won't fail for 2 types of
452 			 * martian destinations: loopback destinations and destination
453 			 * 0.0.0.0. In both cases the packet will be dropped because the
454 			 * destination is the loopback device and not the bridge. */
455 			if (err != -EHOSTUNREACH || !in_dev || IN_DEV_FORWARD(in_dev))
456 				goto free_skb;
457 
458 			rt = ip_route_output(dev_net(dev), iph->daddr, 0,
459 					     RT_TOS(iph->tos), 0);
460 			if (!IS_ERR(rt)) {
461 				/* - Bridged-and-DNAT'ed traffic doesn't
462 				 *   require ip_forwarding. */
463 				if (rt->dst.dev == dev) {
464 					skb_dst_set(skb, &rt->dst);
465 					goto bridged_dnat;
466 				}
467 				ip_rt_put(rt);
468 			}
469 free_skb:
470 			kfree_skb(skb);
471 			return 0;
472 		} else {
473 			if (skb_dst(skb)->dev == dev) {
474 bridged_dnat:
475 				skb->dev = nf_bridge->physindev;
476 				nf_bridge_update_protocol(skb);
477 				nf_bridge_push_encap_header(skb);
478 				NF_HOOK_THRESH(NFPROTO_BRIDGE,
479 					       NF_BR_PRE_ROUTING,
480 					       skb, skb->dev, NULL,
481 					       br_nf_pre_routing_finish_bridge,
482 					       1);
483 				return 0;
484 			}
485 			memcpy(eth_hdr(skb)->h_dest, dev->dev_addr, ETH_ALEN);
486 			skb->pkt_type = PACKET_HOST;
487 		}
488 	} else {
489 		rt = bridge_parent_rtable(nf_bridge->physindev);
490 		if (!rt) {
491 			kfree_skb(skb);
492 			return 0;
493 		}
494 		skb_dst_set_noref(skb, &rt->dst);
495 	}
496 
497 	skb->dev = nf_bridge->physindev;
498 	nf_bridge_update_protocol(skb);
499 	nf_bridge_push_encap_header(skb);
500 	NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_PRE_ROUTING, skb, skb->dev, NULL,
501 		       br_handle_frame_finish, 1);
502 
503 	return 0;
504 }
505 
506 /* Some common code for IPv4/IPv6 */
setup_pre_routing(struct sk_buff * skb)507 static struct net_device *setup_pre_routing(struct sk_buff *skb)
508 {
509 	struct nf_bridge_info *nf_bridge = skb->nf_bridge;
510 
511 	if (skb->pkt_type == PACKET_OTHERHOST) {
512 		skb->pkt_type = PACKET_HOST;
513 		nf_bridge->mask |= BRNF_PKT_TYPE;
514 	}
515 
516 	nf_bridge->mask |= BRNF_NF_BRIDGE_PREROUTING;
517 	nf_bridge->physindev = skb->dev;
518 	skb->dev = bridge_parent(skb->dev);
519 	if (skb->protocol == htons(ETH_P_8021Q))
520 		nf_bridge->mask |= BRNF_8021Q;
521 	else if (skb->protocol == htons(ETH_P_PPP_SES))
522 		nf_bridge->mask |= BRNF_PPPoE;
523 
524 	return skb->dev;
525 }
526 
527 /* We only check the length. A bridge shouldn't do any hop-by-hop stuff anyway */
check_hbh_len(struct sk_buff * skb)528 static int check_hbh_len(struct sk_buff *skb)
529 {
530 	unsigned char *raw = (u8 *)(ipv6_hdr(skb) + 1);
531 	u32 pkt_len;
532 	const unsigned char *nh = skb_network_header(skb);
533 	int off = raw - nh;
534 	int len = (raw[1] + 1) << 3;
535 
536 	if ((raw + len) - skb->data > skb_headlen(skb))
537 		goto bad;
538 
539 	off += 2;
540 	len -= 2;
541 
542 	while (len > 0) {
543 		int optlen = nh[off + 1] + 2;
544 
545 		switch (nh[off]) {
546 		case IPV6_TLV_PAD0:
547 			optlen = 1;
548 			break;
549 
550 		case IPV6_TLV_PADN:
551 			break;
552 
553 		case IPV6_TLV_JUMBO:
554 			if (nh[off + 1] != 4 || (off & 3) != 2)
555 				goto bad;
556 			pkt_len = ntohl(*(__be32 *) (nh + off + 2));
557 			if (pkt_len <= IPV6_MAXPLEN ||
558 			    ipv6_hdr(skb)->payload_len)
559 				goto bad;
560 			if (pkt_len > skb->len - sizeof(struct ipv6hdr))
561 				goto bad;
562 			if (pskb_trim_rcsum(skb,
563 					    pkt_len + sizeof(struct ipv6hdr)))
564 				goto bad;
565 			nh = skb_network_header(skb);
566 			break;
567 		default:
568 			if (optlen > len)
569 				goto bad;
570 			break;
571 		}
572 		off += optlen;
573 		len -= optlen;
574 	}
575 	if (len == 0)
576 		return 0;
577 bad:
578 	return -1;
579 
580 }
581 
582 /* Replicate the checks that IPv6 does on packet reception and pass the packet
583  * to ip6tables, which doesn't support NAT, so things are fairly simple. */
br_nf_pre_routing_ipv6(unsigned int hook,struct sk_buff * skb,const struct net_device * in,const struct net_device * out,int (* okfn)(struct sk_buff *))584 static unsigned int br_nf_pre_routing_ipv6(unsigned int hook,
585 					   struct sk_buff *skb,
586 					   const struct net_device *in,
587 					   const struct net_device *out,
588 					   int (*okfn)(struct sk_buff *))
589 {
590 	const struct ipv6hdr *hdr;
591 	u32 pkt_len;
592 
593 	if (skb->len < sizeof(struct ipv6hdr))
594 		return NF_DROP;
595 
596 	if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
597 		return NF_DROP;
598 
599 	hdr = ipv6_hdr(skb);
600 
601 	if (hdr->version != 6)
602 		return NF_DROP;
603 
604 	pkt_len = ntohs(hdr->payload_len);
605 
606 	if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
607 		if (pkt_len + sizeof(struct ipv6hdr) > skb->len)
608 			return NF_DROP;
609 		if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr)))
610 			return NF_DROP;
611 	}
612 	if (hdr->nexthdr == NEXTHDR_HOP && check_hbh_len(skb))
613 		return NF_DROP;
614 
615 	nf_bridge_put(skb->nf_bridge);
616 	if (!nf_bridge_alloc(skb))
617 		return NF_DROP;
618 	if (!setup_pre_routing(skb))
619 		return NF_DROP;
620 
621 	skb->protocol = htons(ETH_P_IPV6);
622 	NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
623 		br_nf_pre_routing_finish_ipv6);
624 
625 	return NF_STOLEN;
626 }
627 
628 /* Direct IPv6 traffic to br_nf_pre_routing_ipv6.
629  * Replicate the checks that IPv4 does on packet reception.
630  * Set skb->dev to the bridge device (i.e. parent of the
631  * receiving device) to make netfilter happy, the REDIRECT
632  * target in particular.  Save the original destination IP
633  * address to be able to detect DNAT afterwards. */
br_nf_pre_routing(unsigned int hook,struct sk_buff * skb,const struct net_device * in,const struct net_device * out,int (* okfn)(struct sk_buff *))634 static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
635 				      const struct net_device *in,
636 				      const struct net_device *out,
637 				      int (*okfn)(struct sk_buff *))
638 {
639 	struct net_bridge_port *p;
640 	struct net_bridge *br;
641 	__u32 len = nf_bridge_encap_header_len(skb);
642 
643 	if (unlikely(!pskb_may_pull(skb, len)))
644 		return NF_DROP;
645 
646 	p = br_port_get_rcu(in);
647 	if (p == NULL)
648 		return NF_DROP;
649 	br = p->br;
650 
651 	if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) {
652 		if (!brnf_call_ip6tables && !br->nf_call_ip6tables)
653 			return NF_ACCEPT;
654 
655 		nf_bridge_pull_encap_header_rcsum(skb);
656 		return br_nf_pre_routing_ipv6(hook, skb, in, out, okfn);
657 	}
658 
659 	if (!brnf_call_iptables && !br->nf_call_iptables)
660 		return NF_ACCEPT;
661 
662 	if (!IS_IP(skb) && !IS_VLAN_IP(skb) && !IS_PPPOE_IP(skb))
663 		return NF_ACCEPT;
664 
665 	nf_bridge_pull_encap_header_rcsum(skb);
666 
667 	if (br_parse_ip_options(skb))
668 		return NF_DROP;
669 
670 	nf_bridge_put(skb->nf_bridge);
671 	if (!nf_bridge_alloc(skb))
672 		return NF_DROP;
673 	if (!setup_pre_routing(skb))
674 		return NF_DROP;
675 	store_orig_dstaddr(skb);
676 	skb->protocol = htons(ETH_P_IP);
677 
678 	NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, skb->dev, NULL,
679 		br_nf_pre_routing_finish);
680 
681 	return NF_STOLEN;
682 }
683 
684 
685 /* PF_BRIDGE/LOCAL_IN ************************************************/
686 /* The packet is locally destined, which requires a real
687  * dst_entry, so detach the fake one.  On the way up, the
688  * packet would pass through PRE_ROUTING again (which already
689  * took place when the packet entered the bridge), but we
690  * register an IPv4 PRE_ROUTING 'sabotage' hook that will
691  * prevent this from happening. */
br_nf_local_in(unsigned int hook,struct sk_buff * skb,const struct net_device * in,const struct net_device * out,int (* okfn)(struct sk_buff *))692 static unsigned int br_nf_local_in(unsigned int hook, struct sk_buff *skb,
693 				   const struct net_device *in,
694 				   const struct net_device *out,
695 				   int (*okfn)(struct sk_buff *))
696 {
697 	struct rtable *rt = skb_rtable(skb);
698 
699 	if (rt && rt == bridge_parent_rtable(in))
700 		skb_dst_drop(skb);
701 
702 	return NF_ACCEPT;
703 }
704 
705 /* PF_BRIDGE/FORWARD *************************************************/
br_nf_forward_finish(struct sk_buff * skb)706 static int br_nf_forward_finish(struct sk_buff *skb)
707 {
708 	struct nf_bridge_info *nf_bridge = skb->nf_bridge;
709 	struct net_device *in;
710 
711 	if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
712 		in = nf_bridge->physindev;
713 		if (nf_bridge->mask & BRNF_PKT_TYPE) {
714 			skb->pkt_type = PACKET_OTHERHOST;
715 			nf_bridge->mask ^= BRNF_PKT_TYPE;
716 		}
717 		nf_bridge_update_protocol(skb);
718 	} else {
719 		in = *((struct net_device **)(skb->cb));
720 	}
721 	nf_bridge_push_encap_header(skb);
722 
723 	NF_HOOK_THRESH(NFPROTO_BRIDGE, NF_BR_FORWARD, skb, in,
724 		       skb->dev, br_forward_finish, 1);
725 	return 0;
726 }
727 
728 
729 /* This is the 'purely bridged' case.  For IP, we pass the packet to
730  * netfilter with indev and outdev set to the bridge device,
731  * but we are still able to filter on the 'real' indev/outdev
732  * because of the physdev module. For ARP, indev and outdev are the
733  * bridge ports. */
br_nf_forward_ip(unsigned int hook,struct sk_buff * skb,const struct net_device * in,const struct net_device * out,int (* okfn)(struct sk_buff *))734 static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
735 				     const struct net_device *in,
736 				     const struct net_device *out,
737 				     int (*okfn)(struct sk_buff *))
738 {
739 	struct nf_bridge_info *nf_bridge;
740 	struct net_device *parent;
741 	u_int8_t pf;
742 
743 	if (!skb->nf_bridge)
744 		return NF_ACCEPT;
745 
746 	/* Need exclusive nf_bridge_info since we might have multiple
747 	 * different physoutdevs. */
748 	if (!nf_bridge_unshare(skb))
749 		return NF_DROP;
750 
751 	parent = bridge_parent(out);
752 	if (!parent)
753 		return NF_DROP;
754 
755 	if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
756 		pf = PF_INET;
757 	else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
758 		pf = PF_INET6;
759 	else
760 		return NF_ACCEPT;
761 
762 	nf_bridge_pull_encap_header(skb);
763 
764 	nf_bridge = skb->nf_bridge;
765 	if (skb->pkt_type == PACKET_OTHERHOST) {
766 		skb->pkt_type = PACKET_HOST;
767 		nf_bridge->mask |= BRNF_PKT_TYPE;
768 	}
769 
770 	if (pf == PF_INET && br_parse_ip_options(skb))
771 		return NF_DROP;
772 
773 	/* The physdev module checks on this */
774 	nf_bridge->mask |= BRNF_BRIDGED;
775 	nf_bridge->physoutdev = skb->dev;
776 	if (pf == PF_INET)
777 		skb->protocol = htons(ETH_P_IP);
778 	else
779 		skb->protocol = htons(ETH_P_IPV6);
780 
781 	NF_HOOK(pf, NF_INET_FORWARD, skb, bridge_parent(in), parent,
782 		br_nf_forward_finish);
783 
784 	return NF_STOLEN;
785 }
786 
br_nf_forward_arp(unsigned int hook,struct sk_buff * skb,const struct net_device * in,const struct net_device * out,int (* okfn)(struct sk_buff *))787 static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
788 				      const struct net_device *in,
789 				      const struct net_device *out,
790 				      int (*okfn)(struct sk_buff *))
791 {
792 	struct net_bridge_port *p;
793 	struct net_bridge *br;
794 	struct net_device **d = (struct net_device **)(skb->cb);
795 
796 	p = br_port_get_rcu(out);
797 	if (p == NULL)
798 		return NF_ACCEPT;
799 	br = p->br;
800 
801 	if (!brnf_call_arptables && !br->nf_call_arptables)
802 		return NF_ACCEPT;
803 
804 	if (!IS_ARP(skb)) {
805 		if (!IS_VLAN_ARP(skb))
806 			return NF_ACCEPT;
807 		nf_bridge_pull_encap_header(skb);
808 	}
809 
810 	if (arp_hdr(skb)->ar_pln != 4) {
811 		if (IS_VLAN_ARP(skb))
812 			nf_bridge_push_encap_header(skb);
813 		return NF_ACCEPT;
814 	}
815 	*d = (struct net_device *)in;
816 	NF_HOOK(NFPROTO_ARP, NF_ARP_FORWARD, skb, (struct net_device *)in,
817 		(struct net_device *)out, br_nf_forward_finish);
818 
819 	return NF_STOLEN;
820 }
821 
822 #if IS_ENABLED(CONFIG_NF_CONNTRACK_IPV4)
br_nf_dev_queue_xmit(struct sk_buff * skb)823 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
824 {
825 	int ret;
826 
827 	if (skb->nfct != NULL && skb->protocol == htons(ETH_P_IP) &&
828 	    skb->len + nf_bridge_mtu_reduction(skb) > skb->dev->mtu &&
829 	    !skb_is_gso(skb)) {
830 		if (br_parse_ip_options(skb))
831 			/* Drop invalid packet */
832 			return NF_DROP;
833 		ret = ip_fragment(skb, br_dev_queue_push_xmit);
834 	} else
835 		ret = br_dev_queue_push_xmit(skb);
836 
837 	return ret;
838 }
839 #else
br_nf_dev_queue_xmit(struct sk_buff * skb)840 static int br_nf_dev_queue_xmit(struct sk_buff *skb)
841 {
842         return br_dev_queue_push_xmit(skb);
843 }
844 #endif
845 
846 /* PF_BRIDGE/POST_ROUTING ********************************************/
br_nf_post_routing(unsigned int hook,struct sk_buff * skb,const struct net_device * in,const struct net_device * out,int (* okfn)(struct sk_buff *))847 static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
848 				       const struct net_device *in,
849 				       const struct net_device *out,
850 				       int (*okfn)(struct sk_buff *))
851 {
852 	struct nf_bridge_info *nf_bridge = skb->nf_bridge;
853 	struct net_device *realoutdev = bridge_parent(skb->dev);
854 	u_int8_t pf;
855 
856 	if (!nf_bridge || !(nf_bridge->mask & BRNF_BRIDGED))
857 		return NF_ACCEPT;
858 
859 	if (!realoutdev)
860 		return NF_DROP;
861 
862 	if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
863 		pf = PF_INET;
864 	else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
865 		pf = PF_INET6;
866 	else
867 		return NF_ACCEPT;
868 
869 	/* We assume any code from br_dev_queue_push_xmit onwards doesn't care
870 	 * about the value of skb->pkt_type. */
871 	if (skb->pkt_type == PACKET_OTHERHOST) {
872 		skb->pkt_type = PACKET_HOST;
873 		nf_bridge->mask |= BRNF_PKT_TYPE;
874 	}
875 
876 	nf_bridge_pull_encap_header(skb);
877 	nf_bridge_save_header(skb);
878 	if (pf == PF_INET)
879 		skb->protocol = htons(ETH_P_IP);
880 	else
881 		skb->protocol = htons(ETH_P_IPV6);
882 
883 	NF_HOOK(pf, NF_INET_POST_ROUTING, skb, NULL, realoutdev,
884 		br_nf_dev_queue_xmit);
885 
886 	return NF_STOLEN;
887 }
888 
889 /* IP/SABOTAGE *****************************************************/
890 /* Don't hand locally destined packets to PF_INET(6)/PRE_ROUTING
891  * for the second time. */
ip_sabotage_in(unsigned int hook,struct sk_buff * skb,const struct net_device * in,const struct net_device * out,int (* okfn)(struct sk_buff *))892 static unsigned int ip_sabotage_in(unsigned int hook, struct sk_buff *skb,
893 				   const struct net_device *in,
894 				   const struct net_device *out,
895 				   int (*okfn)(struct sk_buff *))
896 {
897 	if (skb->nf_bridge &&
898 	    !(skb->nf_bridge->mask & BRNF_NF_BRIDGE_PREROUTING)) {
899 		return NF_STOP;
900 	}
901 
902 	return NF_ACCEPT;
903 }
904 
905 /* For br_nf_post_routing, we need (prio = NF_BR_PRI_LAST), because
906  * br_dev_queue_push_xmit is called afterwards */
907 static struct nf_hook_ops br_nf_ops[] __read_mostly = {
908 	{
909 		.hook = br_nf_pre_routing,
910 		.owner = THIS_MODULE,
911 		.pf = PF_BRIDGE,
912 		.hooknum = NF_BR_PRE_ROUTING,
913 		.priority = NF_BR_PRI_BRNF,
914 	},
915 	{
916 		.hook = br_nf_local_in,
917 		.owner = THIS_MODULE,
918 		.pf = PF_BRIDGE,
919 		.hooknum = NF_BR_LOCAL_IN,
920 		.priority = NF_BR_PRI_BRNF,
921 	},
922 	{
923 		.hook = br_nf_forward_ip,
924 		.owner = THIS_MODULE,
925 		.pf = PF_BRIDGE,
926 		.hooknum = NF_BR_FORWARD,
927 		.priority = NF_BR_PRI_BRNF - 1,
928 	},
929 	{
930 		.hook = br_nf_forward_arp,
931 		.owner = THIS_MODULE,
932 		.pf = PF_BRIDGE,
933 		.hooknum = NF_BR_FORWARD,
934 		.priority = NF_BR_PRI_BRNF,
935 	},
936 	{
937 		.hook = br_nf_post_routing,
938 		.owner = THIS_MODULE,
939 		.pf = PF_BRIDGE,
940 		.hooknum = NF_BR_POST_ROUTING,
941 		.priority = NF_BR_PRI_LAST,
942 	},
943 	{
944 		.hook = ip_sabotage_in,
945 		.owner = THIS_MODULE,
946 		.pf = PF_INET,
947 		.hooknum = NF_INET_PRE_ROUTING,
948 		.priority = NF_IP_PRI_FIRST,
949 	},
950 	{
951 		.hook = ip_sabotage_in,
952 		.owner = THIS_MODULE,
953 		.pf = PF_INET6,
954 		.hooknum = NF_INET_PRE_ROUTING,
955 		.priority = NF_IP6_PRI_FIRST,
956 	},
957 };
958 
959 #ifdef CONFIG_SYSCTL
960 static
brnf_sysctl_call_tables(ctl_table * ctl,int write,void __user * buffer,size_t * lenp,loff_t * ppos)961 int brnf_sysctl_call_tables(ctl_table * ctl, int write,
962 			    void __user * buffer, size_t * lenp, loff_t * ppos)
963 {
964 	int ret;
965 
966 	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
967 
968 	if (write && *(int *)(ctl->data))
969 		*(int *)(ctl->data) = 1;
970 	return ret;
971 }
972 
973 static ctl_table brnf_table[] = {
974 	{
975 		.procname	= "bridge-nf-call-arptables",
976 		.data		= &brnf_call_arptables,
977 		.maxlen		= sizeof(int),
978 		.mode		= 0644,
979 		.proc_handler	= brnf_sysctl_call_tables,
980 	},
981 	{
982 		.procname	= "bridge-nf-call-iptables",
983 		.data		= &brnf_call_iptables,
984 		.maxlen		= sizeof(int),
985 		.mode		= 0644,
986 		.proc_handler	= brnf_sysctl_call_tables,
987 	},
988 	{
989 		.procname	= "bridge-nf-call-ip6tables",
990 		.data		= &brnf_call_ip6tables,
991 		.maxlen		= sizeof(int),
992 		.mode		= 0644,
993 		.proc_handler	= brnf_sysctl_call_tables,
994 	},
995 	{
996 		.procname	= "bridge-nf-filter-vlan-tagged",
997 		.data		= &brnf_filter_vlan_tagged,
998 		.maxlen		= sizeof(int),
999 		.mode		= 0644,
1000 		.proc_handler	= brnf_sysctl_call_tables,
1001 	},
1002 	{
1003 		.procname	= "bridge-nf-filter-pppoe-tagged",
1004 		.data		= &brnf_filter_pppoe_tagged,
1005 		.maxlen		= sizeof(int),
1006 		.mode		= 0644,
1007 		.proc_handler	= brnf_sysctl_call_tables,
1008 	},
1009 	{ }
1010 };
1011 
1012 static struct ctl_path brnf_path[] = {
1013 	{ .procname = "net", },
1014 	{ .procname = "bridge", },
1015 	{ }
1016 };
1017 #endif
1018 
br_netfilter_init(void)1019 int __init br_netfilter_init(void)
1020 {
1021 	int ret;
1022 
1023 	ret = dst_entries_init(&fake_dst_ops);
1024 	if (ret < 0)
1025 		return ret;
1026 
1027 	ret = nf_register_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1028 	if (ret < 0) {
1029 		dst_entries_destroy(&fake_dst_ops);
1030 		return ret;
1031 	}
1032 #ifdef CONFIG_SYSCTL
1033 	brnf_sysctl_header = register_sysctl_paths(brnf_path, brnf_table);
1034 	if (brnf_sysctl_header == NULL) {
1035 		printk(KERN_WARNING
1036 		       "br_netfilter: can't register to sysctl.\n");
1037 		nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1038 		dst_entries_destroy(&fake_dst_ops);
1039 		return -ENOMEM;
1040 	}
1041 #endif
1042 	printk(KERN_NOTICE "Bridge firewalling registered\n");
1043 	return 0;
1044 }
1045 
br_netfilter_fini(void)1046 void br_netfilter_fini(void)
1047 {
1048 	nf_unregister_hooks(br_nf_ops, ARRAY_SIZE(br_nf_ops));
1049 #ifdef CONFIG_SYSCTL
1050 	unregister_sysctl_table(brnf_sysctl_header);
1051 #endif
1052 	dst_entries_destroy(&fake_dst_ops);
1053 }
1054