1 /*
2  * Packet matching code.
3  *
4  * Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
5  * Copyright (C) 2000-2005 Netfilter Core Team <coreteam@netfilter.org>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
13 #include <linux/in.h>
14 #include <linux/skbuff.h>
15 #include <linux/kmod.h>
16 #include <linux/vmalloc.h>
17 #include <linux/netdevice.h>
18 #include <linux/module.h>
19 #include <linux/poison.h>
20 #include <linux/icmpv6.h>
21 #include <net/ipv6.h>
22 #include <net/compat.h>
23 #include <asm/uaccess.h>
24 #include <linux/mutex.h>
25 #include <linux/proc_fs.h>
26 #include <linux/err.h>
27 #include <linux/cpumask.h>
28 
29 #include <linux/netfilter_ipv6/ip6_tables.h>
30 #include <linux/netfilter/x_tables.h>
31 #include <net/netfilter/nf_log.h>
32 #include "../../netfilter/xt_repldata.h"
33 
34 MODULE_LICENSE("GPL");
35 MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
36 MODULE_DESCRIPTION("IPv6 packet filter");
37 
38 /*#define DEBUG_IP_FIREWALL*/
39 /*#define DEBUG_ALLOW_ALL*/ /* Useful for remote debugging */
40 /*#define DEBUG_IP_FIREWALL_USER*/
41 
42 #ifdef DEBUG_IP_FIREWALL
43 #define dprintf(format, args...) pr_info(format , ## args)
44 #else
45 #define dprintf(format, args...)
46 #endif
47 
48 #ifdef DEBUG_IP_FIREWALL_USER
49 #define duprintf(format, args...) pr_info(format , ## args)
50 #else
51 #define duprintf(format, args...)
52 #endif
53 
54 #ifdef CONFIG_NETFILTER_DEBUG
55 #define IP_NF_ASSERT(x)	WARN_ON(!(x))
56 #else
57 #define IP_NF_ASSERT(x)
58 #endif
59 
60 #if 0
61 /* All the better to debug you with... */
62 #define static
63 #define inline
64 #endif
65 
ip6t_alloc_initial_table(const struct xt_table * info)66 void *ip6t_alloc_initial_table(const struct xt_table *info)
67 {
68 	return xt_alloc_initial_table(ip6t, IP6T);
69 }
70 EXPORT_SYMBOL_GPL(ip6t_alloc_initial_table);
71 
72 /*
73    We keep a set of rules for each CPU, so we can avoid write-locking
74    them in the softirq when updating the counters and therefore
75    only need to read-lock in the softirq; doing a write_lock_bh() in user
76    context stops packets coming through and allows user context to read
77    the counters or update the rules.
78 
79    Hence the start of any table is given by get_table() below.  */
80 
81 /* Check for an extension */
82 int
ip6t_ext_hdr(u8 nexthdr)83 ip6t_ext_hdr(u8 nexthdr)
84 {
85 	return  (nexthdr == IPPROTO_HOPOPTS)   ||
86 		(nexthdr == IPPROTO_ROUTING)   ||
87 		(nexthdr == IPPROTO_FRAGMENT)  ||
88 		(nexthdr == IPPROTO_ESP)       ||
89 		(nexthdr == IPPROTO_AH)        ||
90 		(nexthdr == IPPROTO_NONE)      ||
91 		(nexthdr == IPPROTO_DSTOPTS);
92 }
93 
94 /* Returns whether matches rule or not. */
95 /* Performance critical - called for every packet */
96 static inline bool
ip6_packet_match(const struct sk_buff * skb,const char * indev,const char * outdev,const struct ip6t_ip6 * ip6info,unsigned int * protoff,int * fragoff,bool * hotdrop)97 ip6_packet_match(const struct sk_buff *skb,
98 		 const char *indev,
99 		 const char *outdev,
100 		 const struct ip6t_ip6 *ip6info,
101 		 unsigned int *protoff,
102 		 int *fragoff, bool *hotdrop)
103 {
104 	unsigned long ret;
105 	const struct ipv6hdr *ipv6 = ipv6_hdr(skb);
106 
107 #define FWINV(bool, invflg) ((bool) ^ !!(ip6info->invflags & (invflg)))
108 
109 	if (FWINV(ipv6_masked_addr_cmp(&ipv6->saddr, &ip6info->smsk,
110 				       &ip6info->src), IP6T_INV_SRCIP) ||
111 	    FWINV(ipv6_masked_addr_cmp(&ipv6->daddr, &ip6info->dmsk,
112 				       &ip6info->dst), IP6T_INV_DSTIP)) {
113 		dprintf("Source or dest mismatch.\n");
114 /*
115 		dprintf("SRC: %u. Mask: %u. Target: %u.%s\n", ip->saddr,
116 			ipinfo->smsk.s_addr, ipinfo->src.s_addr,
117 			ipinfo->invflags & IP6T_INV_SRCIP ? " (INV)" : "");
118 		dprintf("DST: %u. Mask: %u. Target: %u.%s\n", ip->daddr,
119 			ipinfo->dmsk.s_addr, ipinfo->dst.s_addr,
120 			ipinfo->invflags & IP6T_INV_DSTIP ? " (INV)" : "");*/
121 		return false;
122 	}
123 
124 	ret = ifname_compare_aligned(indev, ip6info->iniface, ip6info->iniface_mask);
125 
126 	if (FWINV(ret != 0, IP6T_INV_VIA_IN)) {
127 		dprintf("VIA in mismatch (%s vs %s).%s\n",
128 			indev, ip6info->iniface,
129 			ip6info->invflags&IP6T_INV_VIA_IN ?" (INV)":"");
130 		return false;
131 	}
132 
133 	ret = ifname_compare_aligned(outdev, ip6info->outiface, ip6info->outiface_mask);
134 
135 	if (FWINV(ret != 0, IP6T_INV_VIA_OUT)) {
136 		dprintf("VIA out mismatch (%s vs %s).%s\n",
137 			outdev, ip6info->outiface,
138 			ip6info->invflags&IP6T_INV_VIA_OUT ?" (INV)":"");
139 		return false;
140 	}
141 
142 /* ... might want to do something with class and flowlabel here ... */
143 
144 	/* look for the desired protocol header */
145 	if((ip6info->flags & IP6T_F_PROTO)) {
146 		int protohdr;
147 		unsigned short _frag_off;
148 
149 		protohdr = ipv6_find_hdr(skb, protoff, -1, &_frag_off);
150 		if (protohdr < 0) {
151 			if (_frag_off == 0)
152 				*hotdrop = true;
153 			return false;
154 		}
155 		*fragoff = _frag_off;
156 
157 		dprintf("Packet protocol %hi ?= %s%hi.\n",
158 				protohdr,
159 				ip6info->invflags & IP6T_INV_PROTO ? "!":"",
160 				ip6info->proto);
161 
162 		if (ip6info->proto == protohdr) {
163 			if(ip6info->invflags & IP6T_INV_PROTO) {
164 				return false;
165 			}
166 			return true;
167 		}
168 
169 		/* We need match for the '-p all', too! */
170 		if ((ip6info->proto != 0) &&
171 			!(ip6info->invflags & IP6T_INV_PROTO))
172 			return false;
173 	}
174 	return true;
175 }
176 
177 /* should be ip6 safe */
178 static bool
ip6_checkentry(const struct ip6t_ip6 * ipv6)179 ip6_checkentry(const struct ip6t_ip6 *ipv6)
180 {
181 	if (ipv6->flags & ~IP6T_F_MASK) {
182 		duprintf("Unknown flag bits set: %08X\n",
183 			 ipv6->flags & ~IP6T_F_MASK);
184 		return false;
185 	}
186 	if (ipv6->invflags & ~IP6T_INV_MASK) {
187 		duprintf("Unknown invflag bits set: %08X\n",
188 			 ipv6->invflags & ~IP6T_INV_MASK);
189 		return false;
190 	}
191 	return true;
192 }
193 
194 static unsigned int
ip6t_error(struct sk_buff * skb,const struct xt_action_param * par)195 ip6t_error(struct sk_buff *skb, const struct xt_action_param *par)
196 {
197 	if (net_ratelimit())
198 		pr_info("error: `%s'\n", (const char *)par->targinfo);
199 
200 	return NF_DROP;
201 }
202 
203 static inline struct ip6t_entry *
get_entry(const void * base,unsigned int offset)204 get_entry(const void *base, unsigned int offset)
205 {
206 	return (struct ip6t_entry *)(base + offset);
207 }
208 
209 /* All zeroes == unconditional rule. */
210 /* Mildly perf critical (only if packet tracing is on) */
unconditional(const struct ip6t_ip6 * ipv6)211 static inline bool unconditional(const struct ip6t_ip6 *ipv6)
212 {
213 	static const struct ip6t_ip6 uncond;
214 
215 	return memcmp(ipv6, &uncond, sizeof(uncond)) == 0;
216 }
217 
218 static inline const struct xt_entry_target *
ip6t_get_target_c(const struct ip6t_entry * e)219 ip6t_get_target_c(const struct ip6t_entry *e)
220 {
221 	return ip6t_get_target((struct ip6t_entry *)e);
222 }
223 
224 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
225     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
226 /* This cries for unification! */
227 static const char *const hooknames[] = {
228 	[NF_INET_PRE_ROUTING]		= "PREROUTING",
229 	[NF_INET_LOCAL_IN]		= "INPUT",
230 	[NF_INET_FORWARD]		= "FORWARD",
231 	[NF_INET_LOCAL_OUT]		= "OUTPUT",
232 	[NF_INET_POST_ROUTING]		= "POSTROUTING",
233 };
234 
235 enum nf_ip_trace_comments {
236 	NF_IP6_TRACE_COMMENT_RULE,
237 	NF_IP6_TRACE_COMMENT_RETURN,
238 	NF_IP6_TRACE_COMMENT_POLICY,
239 };
240 
241 static const char *const comments[] = {
242 	[NF_IP6_TRACE_COMMENT_RULE]	= "rule",
243 	[NF_IP6_TRACE_COMMENT_RETURN]	= "return",
244 	[NF_IP6_TRACE_COMMENT_POLICY]	= "policy",
245 };
246 
247 static struct nf_loginfo trace_loginfo = {
248 	.type = NF_LOG_TYPE_LOG,
249 	.u = {
250 		.log = {
251 			.level = 4,
252 			.logflags = NF_LOG_MASK,
253 		},
254 	},
255 };
256 
257 /* Mildly perf critical (only if packet tracing is on) */
258 static inline int
get_chainname_rulenum(const struct ip6t_entry * s,const struct ip6t_entry * e,const char * hookname,const char ** chainname,const char ** comment,unsigned int * rulenum)259 get_chainname_rulenum(const struct ip6t_entry *s, const struct ip6t_entry *e,
260 		      const char *hookname, const char **chainname,
261 		      const char **comment, unsigned int *rulenum)
262 {
263 	const struct xt_standard_target *t = (void *)ip6t_get_target_c(s);
264 
265 	if (strcmp(t->target.u.kernel.target->name, XT_ERROR_TARGET) == 0) {
266 		/* Head of user chain: ERROR target with chainname */
267 		*chainname = t->target.data;
268 		(*rulenum) = 0;
269 	} else if (s == e) {
270 		(*rulenum)++;
271 
272 		if (s->target_offset == sizeof(struct ip6t_entry) &&
273 		    strcmp(t->target.u.kernel.target->name,
274 			   XT_STANDARD_TARGET) == 0 &&
275 		    t->verdict < 0 &&
276 		    unconditional(&s->ipv6)) {
277 			/* Tail of chains: STANDARD target (return/policy) */
278 			*comment = *chainname == hookname
279 				? comments[NF_IP6_TRACE_COMMENT_POLICY]
280 				: comments[NF_IP6_TRACE_COMMENT_RETURN];
281 		}
282 		return 1;
283 	} else
284 		(*rulenum)++;
285 
286 	return 0;
287 }
288 
trace_packet(const struct sk_buff * skb,unsigned int hook,const struct net_device * in,const struct net_device * out,const char * tablename,const struct xt_table_info * private,const struct ip6t_entry * e)289 static void trace_packet(const struct sk_buff *skb,
290 			 unsigned int hook,
291 			 const struct net_device *in,
292 			 const struct net_device *out,
293 			 const char *tablename,
294 			 const struct xt_table_info *private,
295 			 const struct ip6t_entry *e)
296 {
297 	const void *table_base;
298 	const struct ip6t_entry *root;
299 	const char *hookname, *chainname, *comment;
300 	const struct ip6t_entry *iter;
301 	unsigned int rulenum = 0;
302 
303 	table_base = private->entries[smp_processor_id()];
304 	root = get_entry(table_base, private->hook_entry[hook]);
305 
306 	hookname = chainname = hooknames[hook];
307 	comment = comments[NF_IP6_TRACE_COMMENT_RULE];
308 
309 	xt_entry_foreach(iter, root, private->size - private->hook_entry[hook])
310 		if (get_chainname_rulenum(iter, e, hookname,
311 		    &chainname, &comment, &rulenum) != 0)
312 			break;
313 
314 	nf_log_packet(AF_INET6, hook, skb, in, out, &trace_loginfo,
315 		      "TRACE: %s:%s:%s:%u ",
316 		      tablename, chainname, comment, rulenum);
317 }
318 #endif
319 
320 static inline __pure struct ip6t_entry *
ip6t_next_entry(const struct ip6t_entry * entry)321 ip6t_next_entry(const struct ip6t_entry *entry)
322 {
323 	return (void *)entry + entry->next_offset;
324 }
325 
326 /* Returns one of the generic firewall policies, like NF_ACCEPT. */
327 unsigned int
ip6t_do_table(struct sk_buff * skb,unsigned int hook,const struct net_device * in,const struct net_device * out,struct xt_table * table)328 ip6t_do_table(struct sk_buff *skb,
329 	      unsigned int hook,
330 	      const struct net_device *in,
331 	      const struct net_device *out,
332 	      struct xt_table *table)
333 {
334 	static const char nulldevname[IFNAMSIZ] __attribute__((aligned(sizeof(long))));
335 	/* Initializing verdict to NF_DROP keeps gcc happy. */
336 	unsigned int verdict = NF_DROP;
337 	const char *indev, *outdev;
338 	const void *table_base;
339 	struct ip6t_entry *e, **jumpstack;
340 	unsigned int *stackptr, origptr, cpu;
341 	const struct xt_table_info *private;
342 	struct xt_action_param acpar;
343 	unsigned int addend;
344 
345 	/* Initialization */
346 	indev = in ? in->name : nulldevname;
347 	outdev = out ? out->name : nulldevname;
348 	/* We handle fragments by dealing with the first fragment as
349 	 * if it was a normal packet.  All other fragments are treated
350 	 * normally, except that they will NEVER match rules that ask
351 	 * things we don't know, ie. tcp syn flag or ports).  If the
352 	 * rule is also a fragment-specific rule, non-fragments won't
353 	 * match it. */
354 	acpar.hotdrop = false;
355 	acpar.in      = in;
356 	acpar.out     = out;
357 	acpar.family  = NFPROTO_IPV6;
358 	acpar.hooknum = hook;
359 
360 	IP_NF_ASSERT(table->valid_hooks & (1 << hook));
361 
362 	local_bh_disable();
363 	addend = xt_write_recseq_begin();
364 	private = table->private;
365 	cpu        = smp_processor_id();
366 	table_base = private->entries[cpu];
367 	jumpstack  = (struct ip6t_entry **)private->jumpstack[cpu];
368 	stackptr   = per_cpu_ptr(private->stackptr, cpu);
369 	origptr    = *stackptr;
370 
371 	e = get_entry(table_base, private->hook_entry[hook]);
372 
373 	do {
374 		const struct xt_entry_target *t;
375 		const struct xt_entry_match *ematch;
376 
377 		IP_NF_ASSERT(e);
378 		if (!ip6_packet_match(skb, indev, outdev, &e->ipv6,
379 		    &acpar.thoff, &acpar.fragoff, &acpar.hotdrop)) {
380  no_match:
381 			e = ip6t_next_entry(e);
382 			continue;
383 		}
384 
385 		xt_ematch_foreach(ematch, e) {
386 			acpar.match     = ematch->u.kernel.match;
387 			acpar.matchinfo = ematch->data;
388 			if (!acpar.match->match(skb, &acpar))
389 				goto no_match;
390 		}
391 
392 		ADD_COUNTER(e->counters, skb->len, 1);
393 
394 		t = ip6t_get_target_c(e);
395 		IP_NF_ASSERT(t->u.kernel.target);
396 
397 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
398     defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
399 		/* The packet is traced: log it */
400 		if (unlikely(skb->nf_trace))
401 			trace_packet(skb, hook, in, out,
402 				     table->name, private, e);
403 #endif
404 		/* Standard target? */
405 		if (!t->u.kernel.target->target) {
406 			int v;
407 
408 			v = ((struct xt_standard_target *)t)->verdict;
409 			if (v < 0) {
410 				/* Pop from stack? */
411 				if (v != XT_RETURN) {
412 					verdict = (unsigned)(-v) - 1;
413 					break;
414 				}
415 				if (*stackptr <= origptr)
416 					e = get_entry(table_base,
417 					    private->underflow[hook]);
418 				else
419 					e = ip6t_next_entry(jumpstack[--*stackptr]);
420 				continue;
421 			}
422 			if (table_base + v != ip6t_next_entry(e) &&
423 			    !(e->ipv6.flags & IP6T_F_GOTO)) {
424 				if (*stackptr >= private->stacksize) {
425 					verdict = NF_DROP;
426 					break;
427 				}
428 				jumpstack[(*stackptr)++] = e;
429 			}
430 
431 			e = get_entry(table_base, v);
432 			continue;
433 		}
434 
435 		acpar.target   = t->u.kernel.target;
436 		acpar.targinfo = t->data;
437 
438 		verdict = t->u.kernel.target->target(skb, &acpar);
439 		if (verdict == XT_CONTINUE)
440 			e = ip6t_next_entry(e);
441 		else
442 			/* Verdict */
443 			break;
444 	} while (!acpar.hotdrop);
445 
446 	*stackptr = origptr;
447 
448  	xt_write_recseq_end(addend);
449  	local_bh_enable();
450 
451 #ifdef DEBUG_ALLOW_ALL
452 	return NF_ACCEPT;
453 #else
454 	if (acpar.hotdrop)
455 		return NF_DROP;
456 	else return verdict;
457 #endif
458 }
459 
460 /* Figures out from what hook each rule can be called: returns 0 if
461    there are loops.  Puts hook bitmask in comefrom. */
462 static int
mark_source_chains(const struct xt_table_info * newinfo,unsigned int valid_hooks,void * entry0)463 mark_source_chains(const struct xt_table_info *newinfo,
464 		   unsigned int valid_hooks, void *entry0)
465 {
466 	unsigned int hook;
467 
468 	/* No recursion; use packet counter to save back ptrs (reset
469 	   to 0 as we leave), and comefrom to save source hook bitmask */
470 	for (hook = 0; hook < NF_INET_NUMHOOKS; hook++) {
471 		unsigned int pos = newinfo->hook_entry[hook];
472 		struct ip6t_entry *e = (struct ip6t_entry *)(entry0 + pos);
473 
474 		if (!(valid_hooks & (1 << hook)))
475 			continue;
476 
477 		/* Set initial back pointer. */
478 		e->counters.pcnt = pos;
479 
480 		for (;;) {
481 			const struct xt_standard_target *t
482 				= (void *)ip6t_get_target_c(e);
483 			int visited = e->comefrom & (1 << hook);
484 
485 			if (e->comefrom & (1 << NF_INET_NUMHOOKS)) {
486 				pr_err("iptables: loop hook %u pos %u %08X.\n",
487 				       hook, pos, e->comefrom);
488 				return 0;
489 			}
490 			e->comefrom |= ((1 << hook) | (1 << NF_INET_NUMHOOKS));
491 
492 			/* Unconditional return/END. */
493 			if ((e->target_offset == sizeof(struct ip6t_entry) &&
494 			     (strcmp(t->target.u.user.name,
495 				     XT_STANDARD_TARGET) == 0) &&
496 			     t->verdict < 0 &&
497 			     unconditional(&e->ipv6)) || visited) {
498 				unsigned int oldpos, size;
499 
500 				if ((strcmp(t->target.u.user.name,
501 					    XT_STANDARD_TARGET) == 0) &&
502 				    t->verdict < -NF_MAX_VERDICT - 1) {
503 					duprintf("mark_source_chains: bad "
504 						"negative verdict (%i)\n",
505 								t->verdict);
506 					return 0;
507 				}
508 
509 				/* Return: backtrack through the last
510 				   big jump. */
511 				do {
512 					e->comefrom ^= (1<<NF_INET_NUMHOOKS);
513 #ifdef DEBUG_IP_FIREWALL_USER
514 					if (e->comefrom
515 					    & (1 << NF_INET_NUMHOOKS)) {
516 						duprintf("Back unset "
517 							 "on hook %u "
518 							 "rule %u\n",
519 							 hook, pos);
520 					}
521 #endif
522 					oldpos = pos;
523 					pos = e->counters.pcnt;
524 					e->counters.pcnt = 0;
525 
526 					/* We're at the start. */
527 					if (pos == oldpos)
528 						goto next;
529 
530 					e = (struct ip6t_entry *)
531 						(entry0 + pos);
532 				} while (oldpos == pos + e->next_offset);
533 
534 				/* Move along one */
535 				size = e->next_offset;
536 				e = (struct ip6t_entry *)
537 					(entry0 + pos + size);
538 				e->counters.pcnt = pos;
539 				pos += size;
540 			} else {
541 				int newpos = t->verdict;
542 
543 				if (strcmp(t->target.u.user.name,
544 					   XT_STANDARD_TARGET) == 0 &&
545 				    newpos >= 0) {
546 					if (newpos > newinfo->size -
547 						sizeof(struct ip6t_entry)) {
548 						duprintf("mark_source_chains: "
549 							"bad verdict (%i)\n",
550 								newpos);
551 						return 0;
552 					}
553 					/* This a jump; chase it. */
554 					duprintf("Jump rule %u -> %u\n",
555 						 pos, newpos);
556 				} else {
557 					/* ... this is a fallthru */
558 					newpos = pos + e->next_offset;
559 				}
560 				e = (struct ip6t_entry *)
561 					(entry0 + newpos);
562 				e->counters.pcnt = pos;
563 				pos = newpos;
564 			}
565 		}
566 		next:
567 		duprintf("Finished chain %u\n", hook);
568 	}
569 	return 1;
570 }
571 
cleanup_match(struct xt_entry_match * m,struct net * net)572 static void cleanup_match(struct xt_entry_match *m, struct net *net)
573 {
574 	struct xt_mtdtor_param par;
575 
576 	par.net       = net;
577 	par.match     = m->u.kernel.match;
578 	par.matchinfo = m->data;
579 	par.family    = NFPROTO_IPV6;
580 	if (par.match->destroy != NULL)
581 		par.match->destroy(&par);
582 	module_put(par.match->me);
583 }
584 
585 static int
check_entry(const struct ip6t_entry * e,const char * name)586 check_entry(const struct ip6t_entry *e, const char *name)
587 {
588 	const struct xt_entry_target *t;
589 
590 	if (!ip6_checkentry(&e->ipv6)) {
591 		duprintf("ip_tables: ip check failed %p %s.\n", e, name);
592 		return -EINVAL;
593 	}
594 
595 	if (e->target_offset + sizeof(struct xt_entry_target) >
596 	    e->next_offset)
597 		return -EINVAL;
598 
599 	t = ip6t_get_target_c(e);
600 	if (e->target_offset + t->u.target_size > e->next_offset)
601 		return -EINVAL;
602 
603 	return 0;
604 }
605 
check_match(struct xt_entry_match * m,struct xt_mtchk_param * par)606 static int check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
607 {
608 	const struct ip6t_ip6 *ipv6 = par->entryinfo;
609 	int ret;
610 
611 	par->match     = m->u.kernel.match;
612 	par->matchinfo = m->data;
613 
614 	ret = xt_check_match(par, m->u.match_size - sizeof(*m),
615 			     ipv6->proto, ipv6->invflags & IP6T_INV_PROTO);
616 	if (ret < 0) {
617 		duprintf("ip_tables: check failed for `%s'.\n",
618 			 par.match->name);
619 		return ret;
620 	}
621 	return 0;
622 }
623 
624 static int
find_check_match(struct xt_entry_match * m,struct xt_mtchk_param * par)625 find_check_match(struct xt_entry_match *m, struct xt_mtchk_param *par)
626 {
627 	struct xt_match *match;
628 	int ret;
629 
630 	match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
631 				      m->u.user.revision);
632 	if (IS_ERR(match)) {
633 		duprintf("find_check_match: `%s' not found\n", m->u.user.name);
634 		return PTR_ERR(match);
635 	}
636 	m->u.kernel.match = match;
637 
638 	ret = check_match(m, par);
639 	if (ret)
640 		goto err;
641 
642 	return 0;
643 err:
644 	module_put(m->u.kernel.match->me);
645 	return ret;
646 }
647 
check_target(struct ip6t_entry * e,struct net * net,const char * name)648 static int check_target(struct ip6t_entry *e, struct net *net, const char *name)
649 {
650 	struct xt_entry_target *t = ip6t_get_target(e);
651 	struct xt_tgchk_param par = {
652 		.net       = net,
653 		.table     = name,
654 		.entryinfo = e,
655 		.target    = t->u.kernel.target,
656 		.targinfo  = t->data,
657 		.hook_mask = e->comefrom,
658 		.family    = NFPROTO_IPV6,
659 	};
660 	int ret;
661 
662 	t = ip6t_get_target(e);
663 	ret = xt_check_target(&par, t->u.target_size - sizeof(*t),
664 	      e->ipv6.proto, e->ipv6.invflags & IP6T_INV_PROTO);
665 	if (ret < 0) {
666 		duprintf("ip_tables: check failed for `%s'.\n",
667 			 t->u.kernel.target->name);
668 		return ret;
669 	}
670 	return 0;
671 }
672 
673 static int
find_check_entry(struct ip6t_entry * e,struct net * net,const char * name,unsigned int size)674 find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
675 		 unsigned int size)
676 {
677 	struct xt_entry_target *t;
678 	struct xt_target *target;
679 	int ret;
680 	unsigned int j;
681 	struct xt_mtchk_param mtpar;
682 	struct xt_entry_match *ematch;
683 
684 	ret = check_entry(e, name);
685 	if (ret)
686 		return ret;
687 
688 	j = 0;
689 	mtpar.net	= net;
690 	mtpar.table     = name;
691 	mtpar.entryinfo = &e->ipv6;
692 	mtpar.hook_mask = e->comefrom;
693 	mtpar.family    = NFPROTO_IPV6;
694 	xt_ematch_foreach(ematch, e) {
695 		ret = find_check_match(ematch, &mtpar);
696 		if (ret != 0)
697 			goto cleanup_matches;
698 		++j;
699 	}
700 
701 	t = ip6t_get_target(e);
702 	target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
703 					t->u.user.revision);
704 	if (IS_ERR(target)) {
705 		duprintf("find_check_entry: `%s' not found\n", t->u.user.name);
706 		ret = PTR_ERR(target);
707 		goto cleanup_matches;
708 	}
709 	t->u.kernel.target = target;
710 
711 	ret = check_target(e, net, name);
712 	if (ret)
713 		goto err;
714 	return 0;
715  err:
716 	module_put(t->u.kernel.target->me);
717  cleanup_matches:
718 	xt_ematch_foreach(ematch, e) {
719 		if (j-- == 0)
720 			break;
721 		cleanup_match(ematch, net);
722 	}
723 	return ret;
724 }
725 
check_underflow(const struct ip6t_entry * e)726 static bool check_underflow(const struct ip6t_entry *e)
727 {
728 	const struct xt_entry_target *t;
729 	unsigned int verdict;
730 
731 	if (!unconditional(&e->ipv6))
732 		return false;
733 	t = ip6t_get_target_c(e);
734 	if (strcmp(t->u.user.name, XT_STANDARD_TARGET) != 0)
735 		return false;
736 	verdict = ((struct xt_standard_target *)t)->verdict;
737 	verdict = -verdict - 1;
738 	return verdict == NF_DROP || verdict == NF_ACCEPT;
739 }
740 
741 static int
check_entry_size_and_hooks(struct ip6t_entry * e,struct xt_table_info * newinfo,const unsigned char * base,const unsigned char * limit,const unsigned int * hook_entries,const unsigned int * underflows,unsigned int valid_hooks)742 check_entry_size_and_hooks(struct ip6t_entry *e,
743 			   struct xt_table_info *newinfo,
744 			   const unsigned char *base,
745 			   const unsigned char *limit,
746 			   const unsigned int *hook_entries,
747 			   const unsigned int *underflows,
748 			   unsigned int valid_hooks)
749 {
750 	unsigned int h;
751 
752 	if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
753 	    (unsigned char *)e + sizeof(struct ip6t_entry) >= limit) {
754 		duprintf("Bad offset %p\n", e);
755 		return -EINVAL;
756 	}
757 
758 	if (e->next_offset
759 	    < sizeof(struct ip6t_entry) + sizeof(struct xt_entry_target)) {
760 		duprintf("checking: element %p size %u\n",
761 			 e, e->next_offset);
762 		return -EINVAL;
763 	}
764 
765 	/* Check hooks & underflows */
766 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
767 		if (!(valid_hooks & (1 << h)))
768 			continue;
769 		if ((unsigned char *)e - base == hook_entries[h])
770 			newinfo->hook_entry[h] = hook_entries[h];
771 		if ((unsigned char *)e - base == underflows[h]) {
772 			if (!check_underflow(e)) {
773 				pr_err("Underflows must be unconditional and "
774 				       "use the STANDARD target with "
775 				       "ACCEPT/DROP\n");
776 				return -EINVAL;
777 			}
778 			newinfo->underflow[h] = underflows[h];
779 		}
780 	}
781 
782 	/* Clear counters and comefrom */
783 	e->counters = ((struct xt_counters) { 0, 0 });
784 	e->comefrom = 0;
785 	return 0;
786 }
787 
cleanup_entry(struct ip6t_entry * e,struct net * net)788 static void cleanup_entry(struct ip6t_entry *e, struct net *net)
789 {
790 	struct xt_tgdtor_param par;
791 	struct xt_entry_target *t;
792 	struct xt_entry_match *ematch;
793 
794 	/* Cleanup all matches */
795 	xt_ematch_foreach(ematch, e)
796 		cleanup_match(ematch, net);
797 	t = ip6t_get_target(e);
798 
799 	par.net      = net;
800 	par.target   = t->u.kernel.target;
801 	par.targinfo = t->data;
802 	par.family   = NFPROTO_IPV6;
803 	if (par.target->destroy != NULL)
804 		par.target->destroy(&par);
805 	module_put(par.target->me);
806 }
807 
808 /* Checks and translates the user-supplied table segment (held in
809    newinfo) */
810 static int
translate_table(struct net * net,struct xt_table_info * newinfo,void * entry0,const struct ip6t_replace * repl)811 translate_table(struct net *net, struct xt_table_info *newinfo, void *entry0,
812                 const struct ip6t_replace *repl)
813 {
814 	struct ip6t_entry *iter;
815 	unsigned int i;
816 	int ret = 0;
817 
818 	newinfo->size = repl->size;
819 	newinfo->number = repl->num_entries;
820 
821 	/* Init all hooks to impossible value. */
822 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
823 		newinfo->hook_entry[i] = 0xFFFFFFFF;
824 		newinfo->underflow[i] = 0xFFFFFFFF;
825 	}
826 
827 	duprintf("translate_table: size %u\n", newinfo->size);
828 	i = 0;
829 	/* Walk through entries, checking offsets. */
830 	xt_entry_foreach(iter, entry0, newinfo->size) {
831 		ret = check_entry_size_and_hooks(iter, newinfo, entry0,
832 						 entry0 + repl->size,
833 						 repl->hook_entry,
834 						 repl->underflow,
835 						 repl->valid_hooks);
836 		if (ret != 0)
837 			return ret;
838 		++i;
839 		if (strcmp(ip6t_get_target(iter)->u.user.name,
840 		    XT_ERROR_TARGET) == 0)
841 			++newinfo->stacksize;
842 	}
843 
844 	if (i != repl->num_entries) {
845 		duprintf("translate_table: %u not %u entries\n",
846 			 i, repl->num_entries);
847 		return -EINVAL;
848 	}
849 
850 	/* Check hooks all assigned */
851 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
852 		/* Only hooks which are valid */
853 		if (!(repl->valid_hooks & (1 << i)))
854 			continue;
855 		if (newinfo->hook_entry[i] == 0xFFFFFFFF) {
856 			duprintf("Invalid hook entry %u %u\n",
857 				 i, repl->hook_entry[i]);
858 			return -EINVAL;
859 		}
860 		if (newinfo->underflow[i] == 0xFFFFFFFF) {
861 			duprintf("Invalid underflow %u %u\n",
862 				 i, repl->underflow[i]);
863 			return -EINVAL;
864 		}
865 	}
866 
867 	if (!mark_source_chains(newinfo, repl->valid_hooks, entry0))
868 		return -ELOOP;
869 
870 	/* Finally, each sanity check must pass */
871 	i = 0;
872 	xt_entry_foreach(iter, entry0, newinfo->size) {
873 		ret = find_check_entry(iter, net, repl->name, repl->size);
874 		if (ret != 0)
875 			break;
876 		++i;
877 	}
878 
879 	if (ret != 0) {
880 		xt_entry_foreach(iter, entry0, newinfo->size) {
881 			if (i-- == 0)
882 				break;
883 			cleanup_entry(iter, net);
884 		}
885 		return ret;
886 	}
887 
888 	/* And one copy for every other CPU */
889 	for_each_possible_cpu(i) {
890 		if (newinfo->entries[i] && newinfo->entries[i] != entry0)
891 			memcpy(newinfo->entries[i], entry0, newinfo->size);
892 	}
893 
894 	return ret;
895 }
896 
897 static void
get_counters(const struct xt_table_info * t,struct xt_counters counters[])898 get_counters(const struct xt_table_info *t,
899 	     struct xt_counters counters[])
900 {
901 	struct ip6t_entry *iter;
902 	unsigned int cpu;
903 	unsigned int i;
904 
905 	for_each_possible_cpu(cpu) {
906 		seqcount_t *s = &per_cpu(xt_recseq, cpu);
907 
908 		i = 0;
909 		xt_entry_foreach(iter, t->entries[cpu], t->size) {
910 			u64 bcnt, pcnt;
911 			unsigned int start;
912 
913 			do {
914 				start = read_seqcount_begin(s);
915 				bcnt = iter->counters.bcnt;
916 				pcnt = iter->counters.pcnt;
917 			} while (read_seqcount_retry(s, start));
918 
919 			ADD_COUNTER(counters[i], bcnt, pcnt);
920 			++i;
921 		}
922 	}
923 }
924 
alloc_counters(const struct xt_table * table)925 static struct xt_counters *alloc_counters(const struct xt_table *table)
926 {
927 	unsigned int countersize;
928 	struct xt_counters *counters;
929 	const struct xt_table_info *private = table->private;
930 
931 	/* We need atomic snapshot of counters: rest doesn't change
932 	   (other than comefrom, which userspace doesn't care
933 	   about). */
934 	countersize = sizeof(struct xt_counters) * private->number;
935 	counters = vzalloc(countersize);
936 
937 	if (counters == NULL)
938 		return ERR_PTR(-ENOMEM);
939 
940 	get_counters(private, counters);
941 
942 	return counters;
943 }
944 
945 static int
copy_entries_to_user(unsigned int total_size,const struct xt_table * table,void __user * userptr)946 copy_entries_to_user(unsigned int total_size,
947 		     const struct xt_table *table,
948 		     void __user *userptr)
949 {
950 	unsigned int off, num;
951 	const struct ip6t_entry *e;
952 	struct xt_counters *counters;
953 	const struct xt_table_info *private = table->private;
954 	int ret = 0;
955 	const void *loc_cpu_entry;
956 
957 	counters = alloc_counters(table);
958 	if (IS_ERR(counters))
959 		return PTR_ERR(counters);
960 
961 	/* choose the copy that is on our node/cpu, ...
962 	 * This choice is lazy (because current thread is
963 	 * allowed to migrate to another cpu)
964 	 */
965 	loc_cpu_entry = private->entries[raw_smp_processor_id()];
966 	if (copy_to_user(userptr, loc_cpu_entry, total_size) != 0) {
967 		ret = -EFAULT;
968 		goto free_counters;
969 	}
970 
971 	/* FIXME: use iterator macros --RR */
972 	/* ... then go back and fix counters and names */
973 	for (off = 0, num = 0; off < total_size; off += e->next_offset, num++){
974 		unsigned int i;
975 		const struct xt_entry_match *m;
976 		const struct xt_entry_target *t;
977 
978 		e = (struct ip6t_entry *)(loc_cpu_entry + off);
979 		if (copy_to_user(userptr + off
980 				 + offsetof(struct ip6t_entry, counters),
981 				 &counters[num],
982 				 sizeof(counters[num])) != 0) {
983 			ret = -EFAULT;
984 			goto free_counters;
985 		}
986 
987 		for (i = sizeof(struct ip6t_entry);
988 		     i < e->target_offset;
989 		     i += m->u.match_size) {
990 			m = (void *)e + i;
991 
992 			if (copy_to_user(userptr + off + i
993 					 + offsetof(struct xt_entry_match,
994 						    u.user.name),
995 					 m->u.kernel.match->name,
996 					 strlen(m->u.kernel.match->name)+1)
997 			    != 0) {
998 				ret = -EFAULT;
999 				goto free_counters;
1000 			}
1001 		}
1002 
1003 		t = ip6t_get_target_c(e);
1004 		if (copy_to_user(userptr + off + e->target_offset
1005 				 + offsetof(struct xt_entry_target,
1006 					    u.user.name),
1007 				 t->u.kernel.target->name,
1008 				 strlen(t->u.kernel.target->name)+1) != 0) {
1009 			ret = -EFAULT;
1010 			goto free_counters;
1011 		}
1012 	}
1013 
1014  free_counters:
1015 	vfree(counters);
1016 	return ret;
1017 }
1018 
1019 #ifdef CONFIG_COMPAT
compat_standard_from_user(void * dst,const void * src)1020 static void compat_standard_from_user(void *dst, const void *src)
1021 {
1022 	int v = *(compat_int_t *)src;
1023 
1024 	if (v > 0)
1025 		v += xt_compat_calc_jump(AF_INET6, v);
1026 	memcpy(dst, &v, sizeof(v));
1027 }
1028 
compat_standard_to_user(void __user * dst,const void * src)1029 static int compat_standard_to_user(void __user *dst, const void *src)
1030 {
1031 	compat_int_t cv = *(int *)src;
1032 
1033 	if (cv > 0)
1034 		cv -= xt_compat_calc_jump(AF_INET6, cv);
1035 	return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
1036 }
1037 
compat_calc_entry(const struct ip6t_entry * e,const struct xt_table_info * info,const void * base,struct xt_table_info * newinfo)1038 static int compat_calc_entry(const struct ip6t_entry *e,
1039 			     const struct xt_table_info *info,
1040 			     const void *base, struct xt_table_info *newinfo)
1041 {
1042 	const struct xt_entry_match *ematch;
1043 	const struct xt_entry_target *t;
1044 	unsigned int entry_offset;
1045 	int off, i, ret;
1046 
1047 	off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1048 	entry_offset = (void *)e - base;
1049 	xt_ematch_foreach(ematch, e)
1050 		off += xt_compat_match_offset(ematch->u.kernel.match);
1051 	t = ip6t_get_target_c(e);
1052 	off += xt_compat_target_offset(t->u.kernel.target);
1053 	newinfo->size -= off;
1054 	ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1055 	if (ret)
1056 		return ret;
1057 
1058 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1059 		if (info->hook_entry[i] &&
1060 		    (e < (struct ip6t_entry *)(base + info->hook_entry[i])))
1061 			newinfo->hook_entry[i] -= off;
1062 		if (info->underflow[i] &&
1063 		    (e < (struct ip6t_entry *)(base + info->underflow[i])))
1064 			newinfo->underflow[i] -= off;
1065 	}
1066 	return 0;
1067 }
1068 
compat_table_info(const struct xt_table_info * info,struct xt_table_info * newinfo)1069 static int compat_table_info(const struct xt_table_info *info,
1070 			     struct xt_table_info *newinfo)
1071 {
1072 	struct ip6t_entry *iter;
1073 	void *loc_cpu_entry;
1074 	int ret;
1075 
1076 	if (!newinfo || !info)
1077 		return -EINVAL;
1078 
1079 	/* we dont care about newinfo->entries[] */
1080 	memcpy(newinfo, info, offsetof(struct xt_table_info, entries));
1081 	newinfo->initial_entries = 0;
1082 	loc_cpu_entry = info->entries[raw_smp_processor_id()];
1083 	xt_compat_init_offsets(AF_INET6, info->number);
1084 	xt_entry_foreach(iter, loc_cpu_entry, info->size) {
1085 		ret = compat_calc_entry(iter, info, loc_cpu_entry, newinfo);
1086 		if (ret != 0)
1087 			return ret;
1088 	}
1089 	return 0;
1090 }
1091 #endif
1092 
get_info(struct net * net,void __user * user,const int * len,int compat)1093 static int get_info(struct net *net, void __user *user,
1094                     const int *len, int compat)
1095 {
1096 	char name[XT_TABLE_MAXNAMELEN];
1097 	struct xt_table *t;
1098 	int ret;
1099 
1100 	if (*len != sizeof(struct ip6t_getinfo)) {
1101 		duprintf("length %u != %zu\n", *len,
1102 			 sizeof(struct ip6t_getinfo));
1103 		return -EINVAL;
1104 	}
1105 
1106 	if (copy_from_user(name, user, sizeof(name)) != 0)
1107 		return -EFAULT;
1108 
1109 	name[XT_TABLE_MAXNAMELEN-1] = '\0';
1110 #ifdef CONFIG_COMPAT
1111 	if (compat)
1112 		xt_compat_lock(AF_INET6);
1113 #endif
1114 	t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1115 				    "ip6table_%s", name);
1116 	if (t && !IS_ERR(t)) {
1117 		struct ip6t_getinfo info;
1118 		const struct xt_table_info *private = t->private;
1119 #ifdef CONFIG_COMPAT
1120 		struct xt_table_info tmp;
1121 
1122 		if (compat) {
1123 			ret = compat_table_info(private, &tmp);
1124 			xt_compat_flush_offsets(AF_INET6);
1125 			private = &tmp;
1126 		}
1127 #endif
1128 		memset(&info, 0, sizeof(info));
1129 		info.valid_hooks = t->valid_hooks;
1130 		memcpy(info.hook_entry, private->hook_entry,
1131 		       sizeof(info.hook_entry));
1132 		memcpy(info.underflow, private->underflow,
1133 		       sizeof(info.underflow));
1134 		info.num_entries = private->number;
1135 		info.size = private->size;
1136 		strcpy(info.name, name);
1137 
1138 		if (copy_to_user(user, &info, *len) != 0)
1139 			ret = -EFAULT;
1140 		else
1141 			ret = 0;
1142 
1143 		xt_table_unlock(t);
1144 		module_put(t->me);
1145 	} else
1146 		ret = t ? PTR_ERR(t) : -ENOENT;
1147 #ifdef CONFIG_COMPAT
1148 	if (compat)
1149 		xt_compat_unlock(AF_INET6);
1150 #endif
1151 	return ret;
1152 }
1153 
1154 static int
get_entries(struct net * net,struct ip6t_get_entries __user * uptr,const int * len)1155 get_entries(struct net *net, struct ip6t_get_entries __user *uptr,
1156             const int *len)
1157 {
1158 	int ret;
1159 	struct ip6t_get_entries get;
1160 	struct xt_table *t;
1161 
1162 	if (*len < sizeof(get)) {
1163 		duprintf("get_entries: %u < %zu\n", *len, sizeof(get));
1164 		return -EINVAL;
1165 	}
1166 	if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1167 		return -EFAULT;
1168 	if (*len != sizeof(struct ip6t_get_entries) + get.size) {
1169 		duprintf("get_entries: %u != %zu\n",
1170 			 *len, sizeof(get) + get.size);
1171 		return -EINVAL;
1172 	}
1173 
1174 	t = xt_find_table_lock(net, AF_INET6, get.name);
1175 	if (t && !IS_ERR(t)) {
1176 		struct xt_table_info *private = t->private;
1177 		duprintf("t->private->number = %u\n", private->number);
1178 		if (get.size == private->size)
1179 			ret = copy_entries_to_user(private->size,
1180 						   t, uptr->entrytable);
1181 		else {
1182 			duprintf("get_entries: I've got %u not %u!\n",
1183 				 private->size, get.size);
1184 			ret = -EAGAIN;
1185 		}
1186 		module_put(t->me);
1187 		xt_table_unlock(t);
1188 	} else
1189 		ret = t ? PTR_ERR(t) : -ENOENT;
1190 
1191 	return ret;
1192 }
1193 
1194 static int
__do_replace(struct net * net,const char * name,unsigned int valid_hooks,struct xt_table_info * newinfo,unsigned int num_counters,void __user * counters_ptr)1195 __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1196 	     struct xt_table_info *newinfo, unsigned int num_counters,
1197 	     void __user *counters_ptr)
1198 {
1199 	int ret;
1200 	struct xt_table *t;
1201 	struct xt_table_info *oldinfo;
1202 	struct xt_counters *counters;
1203 	const void *loc_cpu_old_entry;
1204 	struct ip6t_entry *iter;
1205 
1206 	ret = 0;
1207 	counters = vzalloc(num_counters * sizeof(struct xt_counters));
1208 	if (!counters) {
1209 		ret = -ENOMEM;
1210 		goto out;
1211 	}
1212 
1213 	t = try_then_request_module(xt_find_table_lock(net, AF_INET6, name),
1214 				    "ip6table_%s", name);
1215 	if (!t || IS_ERR(t)) {
1216 		ret = t ? PTR_ERR(t) : -ENOENT;
1217 		goto free_newinfo_counters_untrans;
1218 	}
1219 
1220 	/* You lied! */
1221 	if (valid_hooks != t->valid_hooks) {
1222 		duprintf("Valid hook crap: %08X vs %08X\n",
1223 			 valid_hooks, t->valid_hooks);
1224 		ret = -EINVAL;
1225 		goto put_module;
1226 	}
1227 
1228 	oldinfo = xt_replace_table(t, num_counters, newinfo, &ret);
1229 	if (!oldinfo)
1230 		goto put_module;
1231 
1232 	/* Update module usage count based on number of rules */
1233 	duprintf("do_replace: oldnum=%u, initnum=%u, newnum=%u\n",
1234 		oldinfo->number, oldinfo->initial_entries, newinfo->number);
1235 	if ((oldinfo->number > oldinfo->initial_entries) ||
1236 	    (newinfo->number <= oldinfo->initial_entries))
1237 		module_put(t->me);
1238 	if ((oldinfo->number > oldinfo->initial_entries) &&
1239 	    (newinfo->number <= oldinfo->initial_entries))
1240 		module_put(t->me);
1241 
1242 	/* Get the old counters, and synchronize with replace */
1243 	get_counters(oldinfo, counters);
1244 
1245 	/* Decrease module usage counts and free resource */
1246 	loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];
1247 	xt_entry_foreach(iter, loc_cpu_old_entry, oldinfo->size)
1248 		cleanup_entry(iter, net);
1249 
1250 	xt_free_table_info(oldinfo);
1251 	if (copy_to_user(counters_ptr, counters,
1252 			 sizeof(struct xt_counters) * num_counters) != 0)
1253 		ret = -EFAULT;
1254 	vfree(counters);
1255 	xt_table_unlock(t);
1256 	return ret;
1257 
1258  put_module:
1259 	module_put(t->me);
1260 	xt_table_unlock(t);
1261  free_newinfo_counters_untrans:
1262 	vfree(counters);
1263  out:
1264 	return ret;
1265 }
1266 
1267 static int
do_replace(struct net * net,const void __user * user,unsigned int len)1268 do_replace(struct net *net, const void __user *user, unsigned int len)
1269 {
1270 	int ret;
1271 	struct ip6t_replace tmp;
1272 	struct xt_table_info *newinfo;
1273 	void *loc_cpu_entry;
1274 	struct ip6t_entry *iter;
1275 
1276 	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1277 		return -EFAULT;
1278 
1279 	/* overflow check */
1280 	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1281 		return -ENOMEM;
1282 	tmp.name[sizeof(tmp.name)-1] = 0;
1283 
1284 	newinfo = xt_alloc_table_info(tmp.size);
1285 	if (!newinfo)
1286 		return -ENOMEM;
1287 
1288 	/* choose the copy that is on our node/cpu */
1289 	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1290 	if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1291 			   tmp.size) != 0) {
1292 		ret = -EFAULT;
1293 		goto free_newinfo;
1294 	}
1295 
1296 	ret = translate_table(net, newinfo, loc_cpu_entry, &tmp);
1297 	if (ret != 0)
1298 		goto free_newinfo;
1299 
1300 	duprintf("ip_tables: Translated table\n");
1301 
1302 	ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1303 			   tmp.num_counters, tmp.counters);
1304 	if (ret)
1305 		goto free_newinfo_untrans;
1306 	return 0;
1307 
1308  free_newinfo_untrans:
1309 	xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1310 		cleanup_entry(iter, net);
1311  free_newinfo:
1312 	xt_free_table_info(newinfo);
1313 	return ret;
1314 }
1315 
1316 static int
do_add_counters(struct net * net,const void __user * user,unsigned int len,int compat)1317 do_add_counters(struct net *net, const void __user *user, unsigned int len,
1318 		int compat)
1319 {
1320 	unsigned int i, curcpu;
1321 	struct xt_counters_info tmp;
1322 	struct xt_counters *paddc;
1323 	unsigned int num_counters;
1324 	char *name;
1325 	int size;
1326 	void *ptmp;
1327 	struct xt_table *t;
1328 	const struct xt_table_info *private;
1329 	int ret = 0;
1330 	const void *loc_cpu_entry;
1331 	struct ip6t_entry *iter;
1332 	unsigned int addend;
1333 #ifdef CONFIG_COMPAT
1334 	struct compat_xt_counters_info compat_tmp;
1335 
1336 	if (compat) {
1337 		ptmp = &compat_tmp;
1338 		size = sizeof(struct compat_xt_counters_info);
1339 	} else
1340 #endif
1341 	{
1342 		ptmp = &tmp;
1343 		size = sizeof(struct xt_counters_info);
1344 	}
1345 
1346 	if (copy_from_user(ptmp, user, size) != 0)
1347 		return -EFAULT;
1348 
1349 #ifdef CONFIG_COMPAT
1350 	if (compat) {
1351 		num_counters = compat_tmp.num_counters;
1352 		name = compat_tmp.name;
1353 	} else
1354 #endif
1355 	{
1356 		num_counters = tmp.num_counters;
1357 		name = tmp.name;
1358 	}
1359 
1360 	if (len != size + num_counters * sizeof(struct xt_counters))
1361 		return -EINVAL;
1362 
1363 	paddc = vmalloc(len - size);
1364 	if (!paddc)
1365 		return -ENOMEM;
1366 
1367 	if (copy_from_user(paddc, user + size, len - size) != 0) {
1368 		ret = -EFAULT;
1369 		goto free;
1370 	}
1371 
1372 	t = xt_find_table_lock(net, AF_INET6, name);
1373 	if (!t || IS_ERR(t)) {
1374 		ret = t ? PTR_ERR(t) : -ENOENT;
1375 		goto free;
1376 	}
1377 
1378 
1379 	local_bh_disable();
1380 	private = t->private;
1381 	if (private->number != num_counters) {
1382 		ret = -EINVAL;
1383 		goto unlock_up_free;
1384 	}
1385 
1386 	i = 0;
1387 	/* Choose the copy that is on our node */
1388 	curcpu = smp_processor_id();
1389 	addend = xt_write_recseq_begin();
1390 	loc_cpu_entry = private->entries[curcpu];
1391 	xt_entry_foreach(iter, loc_cpu_entry, private->size) {
1392 		ADD_COUNTER(iter->counters, paddc[i].bcnt, paddc[i].pcnt);
1393 		++i;
1394 	}
1395 	xt_write_recseq_end(addend);
1396 
1397  unlock_up_free:
1398 	local_bh_enable();
1399 	xt_table_unlock(t);
1400 	module_put(t->me);
1401  free:
1402 	vfree(paddc);
1403 
1404 	return ret;
1405 }
1406 
1407 #ifdef CONFIG_COMPAT
1408 struct compat_ip6t_replace {
1409 	char			name[XT_TABLE_MAXNAMELEN];
1410 	u32			valid_hooks;
1411 	u32			num_entries;
1412 	u32			size;
1413 	u32			hook_entry[NF_INET_NUMHOOKS];
1414 	u32			underflow[NF_INET_NUMHOOKS];
1415 	u32			num_counters;
1416 	compat_uptr_t		counters;	/* struct xt_counters * */
1417 	struct compat_ip6t_entry entries[0];
1418 };
1419 
1420 static int
compat_copy_entry_to_user(struct ip6t_entry * e,void __user ** dstptr,unsigned int * size,struct xt_counters * counters,unsigned int i)1421 compat_copy_entry_to_user(struct ip6t_entry *e, void __user **dstptr,
1422 			  unsigned int *size, struct xt_counters *counters,
1423 			  unsigned int i)
1424 {
1425 	struct xt_entry_target *t;
1426 	struct compat_ip6t_entry __user *ce;
1427 	u_int16_t target_offset, next_offset;
1428 	compat_uint_t origsize;
1429 	const struct xt_entry_match *ematch;
1430 	int ret = 0;
1431 
1432 	origsize = *size;
1433 	ce = (struct compat_ip6t_entry __user *)*dstptr;
1434 	if (copy_to_user(ce, e, sizeof(struct ip6t_entry)) != 0 ||
1435 	    copy_to_user(&ce->counters, &counters[i],
1436 	    sizeof(counters[i])) != 0)
1437 		return -EFAULT;
1438 
1439 	*dstptr += sizeof(struct compat_ip6t_entry);
1440 	*size -= sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1441 
1442 	xt_ematch_foreach(ematch, e) {
1443 		ret = xt_compat_match_to_user(ematch, dstptr, size);
1444 		if (ret != 0)
1445 			return ret;
1446 	}
1447 	target_offset = e->target_offset - (origsize - *size);
1448 	t = ip6t_get_target(e);
1449 	ret = xt_compat_target_to_user(t, dstptr, size);
1450 	if (ret)
1451 		return ret;
1452 	next_offset = e->next_offset - (origsize - *size);
1453 	if (put_user(target_offset, &ce->target_offset) != 0 ||
1454 	    put_user(next_offset, &ce->next_offset) != 0)
1455 		return -EFAULT;
1456 	return 0;
1457 }
1458 
1459 static int
compat_find_calc_match(struct xt_entry_match * m,const char * name,const struct ip6t_ip6 * ipv6,unsigned int hookmask,int * size)1460 compat_find_calc_match(struct xt_entry_match *m,
1461 		       const char *name,
1462 		       const struct ip6t_ip6 *ipv6,
1463 		       unsigned int hookmask,
1464 		       int *size)
1465 {
1466 	struct xt_match *match;
1467 
1468 	match = xt_request_find_match(NFPROTO_IPV6, m->u.user.name,
1469 				      m->u.user.revision);
1470 	if (IS_ERR(match)) {
1471 		duprintf("compat_check_calc_match: `%s' not found\n",
1472 			 m->u.user.name);
1473 		return PTR_ERR(match);
1474 	}
1475 	m->u.kernel.match = match;
1476 	*size += xt_compat_match_offset(match);
1477 	return 0;
1478 }
1479 
compat_release_entry(struct compat_ip6t_entry * e)1480 static void compat_release_entry(struct compat_ip6t_entry *e)
1481 {
1482 	struct xt_entry_target *t;
1483 	struct xt_entry_match *ematch;
1484 
1485 	/* Cleanup all matches */
1486 	xt_ematch_foreach(ematch, e)
1487 		module_put(ematch->u.kernel.match->me);
1488 	t = compat_ip6t_get_target(e);
1489 	module_put(t->u.kernel.target->me);
1490 }
1491 
1492 static int
check_compat_entry_size_and_hooks(struct compat_ip6t_entry * e,struct xt_table_info * newinfo,unsigned int * size,const unsigned char * base,const unsigned char * limit,const unsigned int * hook_entries,const unsigned int * underflows,const char * name)1493 check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
1494 				  struct xt_table_info *newinfo,
1495 				  unsigned int *size,
1496 				  const unsigned char *base,
1497 				  const unsigned char *limit,
1498 				  const unsigned int *hook_entries,
1499 				  const unsigned int *underflows,
1500 				  const char *name)
1501 {
1502 	struct xt_entry_match *ematch;
1503 	struct xt_entry_target *t;
1504 	struct xt_target *target;
1505 	unsigned int entry_offset;
1506 	unsigned int j;
1507 	int ret, off, h;
1508 
1509 	duprintf("check_compat_entry_size_and_hooks %p\n", e);
1510 	if ((unsigned long)e % __alignof__(struct compat_ip6t_entry) != 0 ||
1511 	    (unsigned char *)e + sizeof(struct compat_ip6t_entry) >= limit) {
1512 		duprintf("Bad offset %p, limit = %p\n", e, limit);
1513 		return -EINVAL;
1514 	}
1515 
1516 	if (e->next_offset < sizeof(struct compat_ip6t_entry) +
1517 			     sizeof(struct compat_xt_entry_target)) {
1518 		duprintf("checking: element %p size %u\n",
1519 			 e, e->next_offset);
1520 		return -EINVAL;
1521 	}
1522 
1523 	/* For purposes of check_entry casting the compat entry is fine */
1524 	ret = check_entry((struct ip6t_entry *)e, name);
1525 	if (ret)
1526 		return ret;
1527 
1528 	off = sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1529 	entry_offset = (void *)e - (void *)base;
1530 	j = 0;
1531 	xt_ematch_foreach(ematch, e) {
1532 		ret = compat_find_calc_match(ematch, name,
1533 					     &e->ipv6, e->comefrom, &off);
1534 		if (ret != 0)
1535 			goto release_matches;
1536 		++j;
1537 	}
1538 
1539 	t = compat_ip6t_get_target(e);
1540 	target = xt_request_find_target(NFPROTO_IPV6, t->u.user.name,
1541 					t->u.user.revision);
1542 	if (IS_ERR(target)) {
1543 		duprintf("check_compat_entry_size_and_hooks: `%s' not found\n",
1544 			 t->u.user.name);
1545 		ret = PTR_ERR(target);
1546 		goto release_matches;
1547 	}
1548 	t->u.kernel.target = target;
1549 
1550 	off += xt_compat_target_offset(target);
1551 	*size += off;
1552 	ret = xt_compat_add_offset(AF_INET6, entry_offset, off);
1553 	if (ret)
1554 		goto out;
1555 
1556 	/* Check hooks & underflows */
1557 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1558 		if ((unsigned char *)e - base == hook_entries[h])
1559 			newinfo->hook_entry[h] = hook_entries[h];
1560 		if ((unsigned char *)e - base == underflows[h])
1561 			newinfo->underflow[h] = underflows[h];
1562 	}
1563 
1564 	/* Clear counters and comefrom */
1565 	memset(&e->counters, 0, sizeof(e->counters));
1566 	e->comefrom = 0;
1567 	return 0;
1568 
1569 out:
1570 	module_put(t->u.kernel.target->me);
1571 release_matches:
1572 	xt_ematch_foreach(ematch, e) {
1573 		if (j-- == 0)
1574 			break;
1575 		module_put(ematch->u.kernel.match->me);
1576 	}
1577 	return ret;
1578 }
1579 
1580 static int
compat_copy_entry_from_user(struct compat_ip6t_entry * e,void ** dstptr,unsigned int * size,const char * name,struct xt_table_info * newinfo,unsigned char * base)1581 compat_copy_entry_from_user(struct compat_ip6t_entry *e, void **dstptr,
1582 			    unsigned int *size, const char *name,
1583 			    struct xt_table_info *newinfo, unsigned char *base)
1584 {
1585 	struct xt_entry_target *t;
1586 	struct ip6t_entry *de;
1587 	unsigned int origsize;
1588 	int ret, h;
1589 	struct xt_entry_match *ematch;
1590 
1591 	ret = 0;
1592 	origsize = *size;
1593 	de = (struct ip6t_entry *)*dstptr;
1594 	memcpy(de, e, sizeof(struct ip6t_entry));
1595 	memcpy(&de->counters, &e->counters, sizeof(e->counters));
1596 
1597 	*dstptr += sizeof(struct ip6t_entry);
1598 	*size += sizeof(struct ip6t_entry) - sizeof(struct compat_ip6t_entry);
1599 
1600 	xt_ematch_foreach(ematch, e) {
1601 		ret = xt_compat_match_from_user(ematch, dstptr, size);
1602 		if (ret != 0)
1603 			return ret;
1604 	}
1605 	de->target_offset = e->target_offset - (origsize - *size);
1606 	t = compat_ip6t_get_target(e);
1607 	xt_compat_target_from_user(t, dstptr, size);
1608 
1609 	de->next_offset = e->next_offset - (origsize - *size);
1610 	for (h = 0; h < NF_INET_NUMHOOKS; h++) {
1611 		if ((unsigned char *)de - base < newinfo->hook_entry[h])
1612 			newinfo->hook_entry[h] -= origsize - *size;
1613 		if ((unsigned char *)de - base < newinfo->underflow[h])
1614 			newinfo->underflow[h] -= origsize - *size;
1615 	}
1616 	return ret;
1617 }
1618 
compat_check_entry(struct ip6t_entry * e,struct net * net,const char * name)1619 static int compat_check_entry(struct ip6t_entry *e, struct net *net,
1620 			      const char *name)
1621 {
1622 	unsigned int j;
1623 	int ret = 0;
1624 	struct xt_mtchk_param mtpar;
1625 	struct xt_entry_match *ematch;
1626 
1627 	j = 0;
1628 	mtpar.net	= net;
1629 	mtpar.table     = name;
1630 	mtpar.entryinfo = &e->ipv6;
1631 	mtpar.hook_mask = e->comefrom;
1632 	mtpar.family    = NFPROTO_IPV6;
1633 	xt_ematch_foreach(ematch, e) {
1634 		ret = check_match(ematch, &mtpar);
1635 		if (ret != 0)
1636 			goto cleanup_matches;
1637 		++j;
1638 	}
1639 
1640 	ret = check_target(e, net, name);
1641 	if (ret)
1642 		goto cleanup_matches;
1643 	return 0;
1644 
1645  cleanup_matches:
1646 	xt_ematch_foreach(ematch, e) {
1647 		if (j-- == 0)
1648 			break;
1649 		cleanup_match(ematch, net);
1650 	}
1651 	return ret;
1652 }
1653 
1654 static int
translate_compat_table(struct net * net,const char * name,unsigned int valid_hooks,struct xt_table_info ** pinfo,void ** pentry0,unsigned int total_size,unsigned int number,unsigned int * hook_entries,unsigned int * underflows)1655 translate_compat_table(struct net *net,
1656 		       const char *name,
1657 		       unsigned int valid_hooks,
1658 		       struct xt_table_info **pinfo,
1659 		       void **pentry0,
1660 		       unsigned int total_size,
1661 		       unsigned int number,
1662 		       unsigned int *hook_entries,
1663 		       unsigned int *underflows)
1664 {
1665 	unsigned int i, j;
1666 	struct xt_table_info *newinfo, *info;
1667 	void *pos, *entry0, *entry1;
1668 	struct compat_ip6t_entry *iter0;
1669 	struct ip6t_entry *iter1;
1670 	unsigned int size;
1671 	int ret = 0;
1672 
1673 	info = *pinfo;
1674 	entry0 = *pentry0;
1675 	size = total_size;
1676 	info->number = number;
1677 
1678 	/* Init all hooks to impossible value. */
1679 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1680 		info->hook_entry[i] = 0xFFFFFFFF;
1681 		info->underflow[i] = 0xFFFFFFFF;
1682 	}
1683 
1684 	duprintf("translate_compat_table: size %u\n", info->size);
1685 	j = 0;
1686 	xt_compat_lock(AF_INET6);
1687 	xt_compat_init_offsets(AF_INET6, number);
1688 	/* Walk through entries, checking offsets. */
1689 	xt_entry_foreach(iter0, entry0, total_size) {
1690 		ret = check_compat_entry_size_and_hooks(iter0, info, &size,
1691 							entry0,
1692 							entry0 + total_size,
1693 							hook_entries,
1694 							underflows,
1695 							name);
1696 		if (ret != 0)
1697 			goto out_unlock;
1698 		++j;
1699 	}
1700 
1701 	ret = -EINVAL;
1702 	if (j != number) {
1703 		duprintf("translate_compat_table: %u not %u entries\n",
1704 			 j, number);
1705 		goto out_unlock;
1706 	}
1707 
1708 	/* Check hooks all assigned */
1709 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1710 		/* Only hooks which are valid */
1711 		if (!(valid_hooks & (1 << i)))
1712 			continue;
1713 		if (info->hook_entry[i] == 0xFFFFFFFF) {
1714 			duprintf("Invalid hook entry %u %u\n",
1715 				 i, hook_entries[i]);
1716 			goto out_unlock;
1717 		}
1718 		if (info->underflow[i] == 0xFFFFFFFF) {
1719 			duprintf("Invalid underflow %u %u\n",
1720 				 i, underflows[i]);
1721 			goto out_unlock;
1722 		}
1723 	}
1724 
1725 	ret = -ENOMEM;
1726 	newinfo = xt_alloc_table_info(size);
1727 	if (!newinfo)
1728 		goto out_unlock;
1729 
1730 	newinfo->number = number;
1731 	for (i = 0; i < NF_INET_NUMHOOKS; i++) {
1732 		newinfo->hook_entry[i] = info->hook_entry[i];
1733 		newinfo->underflow[i] = info->underflow[i];
1734 	}
1735 	entry1 = newinfo->entries[raw_smp_processor_id()];
1736 	pos = entry1;
1737 	size = total_size;
1738 	xt_entry_foreach(iter0, entry0, total_size) {
1739 		ret = compat_copy_entry_from_user(iter0, &pos, &size,
1740 						  name, newinfo, entry1);
1741 		if (ret != 0)
1742 			break;
1743 	}
1744 	xt_compat_flush_offsets(AF_INET6);
1745 	xt_compat_unlock(AF_INET6);
1746 	if (ret)
1747 		goto free_newinfo;
1748 
1749 	ret = -ELOOP;
1750 	if (!mark_source_chains(newinfo, valid_hooks, entry1))
1751 		goto free_newinfo;
1752 
1753 	i = 0;
1754 	xt_entry_foreach(iter1, entry1, newinfo->size) {
1755 		ret = compat_check_entry(iter1, net, name);
1756 		if (ret != 0)
1757 			break;
1758 		++i;
1759 		if (strcmp(ip6t_get_target(iter1)->u.user.name,
1760 		    XT_ERROR_TARGET) == 0)
1761 			++newinfo->stacksize;
1762 	}
1763 	if (ret) {
1764 		/*
1765 		 * The first i matches need cleanup_entry (calls ->destroy)
1766 		 * because they had called ->check already. The other j-i
1767 		 * entries need only release.
1768 		 */
1769 		int skip = i;
1770 		j -= i;
1771 		xt_entry_foreach(iter0, entry0, newinfo->size) {
1772 			if (skip-- > 0)
1773 				continue;
1774 			if (j-- == 0)
1775 				break;
1776 			compat_release_entry(iter0);
1777 		}
1778 		xt_entry_foreach(iter1, entry1, newinfo->size) {
1779 			if (i-- == 0)
1780 				break;
1781 			cleanup_entry(iter1, net);
1782 		}
1783 		xt_free_table_info(newinfo);
1784 		return ret;
1785 	}
1786 
1787 	/* And one copy for every other CPU */
1788 	for_each_possible_cpu(i)
1789 		if (newinfo->entries[i] && newinfo->entries[i] != entry1)
1790 			memcpy(newinfo->entries[i], entry1, newinfo->size);
1791 
1792 	*pinfo = newinfo;
1793 	*pentry0 = entry1;
1794 	xt_free_table_info(info);
1795 	return 0;
1796 
1797 free_newinfo:
1798 	xt_free_table_info(newinfo);
1799 out:
1800 	xt_entry_foreach(iter0, entry0, total_size) {
1801 		if (j-- == 0)
1802 			break;
1803 		compat_release_entry(iter0);
1804 	}
1805 	return ret;
1806 out_unlock:
1807 	xt_compat_flush_offsets(AF_INET6);
1808 	xt_compat_unlock(AF_INET6);
1809 	goto out;
1810 }
1811 
1812 static int
compat_do_replace(struct net * net,void __user * user,unsigned int len)1813 compat_do_replace(struct net *net, void __user *user, unsigned int len)
1814 {
1815 	int ret;
1816 	struct compat_ip6t_replace tmp;
1817 	struct xt_table_info *newinfo;
1818 	void *loc_cpu_entry;
1819 	struct ip6t_entry *iter;
1820 
1821 	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1822 		return -EFAULT;
1823 
1824 	/* overflow check */
1825 	if (tmp.size >= INT_MAX / num_possible_cpus())
1826 		return -ENOMEM;
1827 	if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
1828 		return -ENOMEM;
1829 	tmp.name[sizeof(tmp.name)-1] = 0;
1830 
1831 	newinfo = xt_alloc_table_info(tmp.size);
1832 	if (!newinfo)
1833 		return -ENOMEM;
1834 
1835 	/* choose the copy that is on our node/cpu */
1836 	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
1837 	if (copy_from_user(loc_cpu_entry, user + sizeof(tmp),
1838 			   tmp.size) != 0) {
1839 		ret = -EFAULT;
1840 		goto free_newinfo;
1841 	}
1842 
1843 	ret = translate_compat_table(net, tmp.name, tmp.valid_hooks,
1844 				     &newinfo, &loc_cpu_entry, tmp.size,
1845 				     tmp.num_entries, tmp.hook_entry,
1846 				     tmp.underflow);
1847 	if (ret != 0)
1848 		goto free_newinfo;
1849 
1850 	duprintf("compat_do_replace: Translated table\n");
1851 
1852 	ret = __do_replace(net, tmp.name, tmp.valid_hooks, newinfo,
1853 			   tmp.num_counters, compat_ptr(tmp.counters));
1854 	if (ret)
1855 		goto free_newinfo_untrans;
1856 	return 0;
1857 
1858  free_newinfo_untrans:
1859 	xt_entry_foreach(iter, loc_cpu_entry, newinfo->size)
1860 		cleanup_entry(iter, net);
1861  free_newinfo:
1862 	xt_free_table_info(newinfo);
1863 	return ret;
1864 }
1865 
1866 static int
compat_do_ip6t_set_ctl(struct sock * sk,int cmd,void __user * user,unsigned int len)1867 compat_do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user,
1868 		       unsigned int len)
1869 {
1870 	int ret;
1871 
1872 	if (!capable(CAP_NET_ADMIN))
1873 		return -EPERM;
1874 
1875 	switch (cmd) {
1876 	case IP6T_SO_SET_REPLACE:
1877 		ret = compat_do_replace(sock_net(sk), user, len);
1878 		break;
1879 
1880 	case IP6T_SO_SET_ADD_COUNTERS:
1881 		ret = do_add_counters(sock_net(sk), user, len, 1);
1882 		break;
1883 
1884 	default:
1885 		duprintf("do_ip6t_set_ctl:  unknown request %i\n", cmd);
1886 		ret = -EINVAL;
1887 	}
1888 
1889 	return ret;
1890 }
1891 
1892 struct compat_ip6t_get_entries {
1893 	char name[XT_TABLE_MAXNAMELEN];
1894 	compat_uint_t size;
1895 	struct compat_ip6t_entry entrytable[0];
1896 };
1897 
1898 static int
compat_copy_entries_to_user(unsigned int total_size,struct xt_table * table,void __user * userptr)1899 compat_copy_entries_to_user(unsigned int total_size, struct xt_table *table,
1900 			    void __user *userptr)
1901 {
1902 	struct xt_counters *counters;
1903 	const struct xt_table_info *private = table->private;
1904 	void __user *pos;
1905 	unsigned int size;
1906 	int ret = 0;
1907 	const void *loc_cpu_entry;
1908 	unsigned int i = 0;
1909 	struct ip6t_entry *iter;
1910 
1911 	counters = alloc_counters(table);
1912 	if (IS_ERR(counters))
1913 		return PTR_ERR(counters);
1914 
1915 	/* choose the copy that is on our node/cpu, ...
1916 	 * This choice is lazy (because current thread is
1917 	 * allowed to migrate to another cpu)
1918 	 */
1919 	loc_cpu_entry = private->entries[raw_smp_processor_id()];
1920 	pos = userptr;
1921 	size = total_size;
1922 	xt_entry_foreach(iter, loc_cpu_entry, total_size) {
1923 		ret = compat_copy_entry_to_user(iter, &pos,
1924 						&size, counters, i++);
1925 		if (ret != 0)
1926 			break;
1927 	}
1928 
1929 	vfree(counters);
1930 	return ret;
1931 }
1932 
1933 static int
compat_get_entries(struct net * net,struct compat_ip6t_get_entries __user * uptr,int * len)1934 compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
1935 		   int *len)
1936 {
1937 	int ret;
1938 	struct compat_ip6t_get_entries get;
1939 	struct xt_table *t;
1940 
1941 	if (*len < sizeof(get)) {
1942 		duprintf("compat_get_entries: %u < %zu\n", *len, sizeof(get));
1943 		return -EINVAL;
1944 	}
1945 
1946 	if (copy_from_user(&get, uptr, sizeof(get)) != 0)
1947 		return -EFAULT;
1948 
1949 	if (*len != sizeof(struct compat_ip6t_get_entries) + get.size) {
1950 		duprintf("compat_get_entries: %u != %zu\n",
1951 			 *len, sizeof(get) + get.size);
1952 		return -EINVAL;
1953 	}
1954 
1955 	xt_compat_lock(AF_INET6);
1956 	t = xt_find_table_lock(net, AF_INET6, get.name);
1957 	if (t && !IS_ERR(t)) {
1958 		const struct xt_table_info *private = t->private;
1959 		struct xt_table_info info;
1960 		duprintf("t->private->number = %u\n", private->number);
1961 		ret = compat_table_info(private, &info);
1962 		if (!ret && get.size == info.size) {
1963 			ret = compat_copy_entries_to_user(private->size,
1964 							  t, uptr->entrytable);
1965 		} else if (!ret) {
1966 			duprintf("compat_get_entries: I've got %u not %u!\n",
1967 				 private->size, get.size);
1968 			ret = -EAGAIN;
1969 		}
1970 		xt_compat_flush_offsets(AF_INET6);
1971 		module_put(t->me);
1972 		xt_table_unlock(t);
1973 	} else
1974 		ret = t ? PTR_ERR(t) : -ENOENT;
1975 
1976 	xt_compat_unlock(AF_INET6);
1977 	return ret;
1978 }
1979 
1980 static int do_ip6t_get_ctl(struct sock *, int, void __user *, int *);
1981 
1982 static int
compat_do_ip6t_get_ctl(struct sock * sk,int cmd,void __user * user,int * len)1983 compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1984 {
1985 	int ret;
1986 
1987 	if (!capable(CAP_NET_ADMIN))
1988 		return -EPERM;
1989 
1990 	switch (cmd) {
1991 	case IP6T_SO_GET_INFO:
1992 		ret = get_info(sock_net(sk), user, len, 1);
1993 		break;
1994 	case IP6T_SO_GET_ENTRIES:
1995 		ret = compat_get_entries(sock_net(sk), user, len);
1996 		break;
1997 	default:
1998 		ret = do_ip6t_get_ctl(sk, cmd, user, len);
1999 	}
2000 	return ret;
2001 }
2002 #endif
2003 
2004 static int
do_ip6t_set_ctl(struct sock * sk,int cmd,void __user * user,unsigned int len)2005 do_ip6t_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len)
2006 {
2007 	int ret;
2008 
2009 	if (!capable(CAP_NET_ADMIN))
2010 		return -EPERM;
2011 
2012 	switch (cmd) {
2013 	case IP6T_SO_SET_REPLACE:
2014 		ret = do_replace(sock_net(sk), user, len);
2015 		break;
2016 
2017 	case IP6T_SO_SET_ADD_COUNTERS:
2018 		ret = do_add_counters(sock_net(sk), user, len, 0);
2019 		break;
2020 
2021 	default:
2022 		duprintf("do_ip6t_set_ctl:  unknown request %i\n", cmd);
2023 		ret = -EINVAL;
2024 	}
2025 
2026 	return ret;
2027 }
2028 
2029 static int
do_ip6t_get_ctl(struct sock * sk,int cmd,void __user * user,int * len)2030 do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
2031 {
2032 	int ret;
2033 
2034 	if (!capable(CAP_NET_ADMIN))
2035 		return -EPERM;
2036 
2037 	switch (cmd) {
2038 	case IP6T_SO_GET_INFO:
2039 		ret = get_info(sock_net(sk), user, len, 0);
2040 		break;
2041 
2042 	case IP6T_SO_GET_ENTRIES:
2043 		ret = get_entries(sock_net(sk), user, len);
2044 		break;
2045 
2046 	case IP6T_SO_GET_REVISION_MATCH:
2047 	case IP6T_SO_GET_REVISION_TARGET: {
2048 		struct xt_get_revision rev;
2049 		int target;
2050 
2051 		if (*len != sizeof(rev)) {
2052 			ret = -EINVAL;
2053 			break;
2054 		}
2055 		if (copy_from_user(&rev, user, sizeof(rev)) != 0) {
2056 			ret = -EFAULT;
2057 			break;
2058 		}
2059 		rev.name[sizeof(rev.name)-1] = 0;
2060 
2061 		if (cmd == IP6T_SO_GET_REVISION_TARGET)
2062 			target = 1;
2063 		else
2064 			target = 0;
2065 
2066 		try_then_request_module(xt_find_revision(AF_INET6, rev.name,
2067 							 rev.revision,
2068 							 target, &ret),
2069 					"ip6t_%s", rev.name);
2070 		break;
2071 	}
2072 
2073 	default:
2074 		duprintf("do_ip6t_get_ctl: unknown request %i\n", cmd);
2075 		ret = -EINVAL;
2076 	}
2077 
2078 	return ret;
2079 }
2080 
ip6t_register_table(struct net * net,const struct xt_table * table,const struct ip6t_replace * repl)2081 struct xt_table *ip6t_register_table(struct net *net,
2082 				     const struct xt_table *table,
2083 				     const struct ip6t_replace *repl)
2084 {
2085 	int ret;
2086 	struct xt_table_info *newinfo;
2087 	struct xt_table_info bootstrap = {0};
2088 	void *loc_cpu_entry;
2089 	struct xt_table *new_table;
2090 
2091 	newinfo = xt_alloc_table_info(repl->size);
2092 	if (!newinfo) {
2093 		ret = -ENOMEM;
2094 		goto out;
2095 	}
2096 
2097 	/* choose the copy on our node/cpu, but dont care about preemption */
2098 	loc_cpu_entry = newinfo->entries[raw_smp_processor_id()];
2099 	memcpy(loc_cpu_entry, repl->entries, repl->size);
2100 
2101 	ret = translate_table(net, newinfo, loc_cpu_entry, repl);
2102 	if (ret != 0)
2103 		goto out_free;
2104 
2105 	new_table = xt_register_table(net, table, &bootstrap, newinfo);
2106 	if (IS_ERR(new_table)) {
2107 		ret = PTR_ERR(new_table);
2108 		goto out_free;
2109 	}
2110 	return new_table;
2111 
2112 out_free:
2113 	xt_free_table_info(newinfo);
2114 out:
2115 	return ERR_PTR(ret);
2116 }
2117 
ip6t_unregister_table(struct net * net,struct xt_table * table)2118 void ip6t_unregister_table(struct net *net, struct xt_table *table)
2119 {
2120 	struct xt_table_info *private;
2121 	void *loc_cpu_entry;
2122 	struct module *table_owner = table->me;
2123 	struct ip6t_entry *iter;
2124 
2125 	private = xt_unregister_table(table);
2126 
2127 	/* Decrease module usage counts and free resources */
2128 	loc_cpu_entry = private->entries[raw_smp_processor_id()];
2129 	xt_entry_foreach(iter, loc_cpu_entry, private->size)
2130 		cleanup_entry(iter, net);
2131 	if (private->number > private->initial_entries)
2132 		module_put(table_owner);
2133 	xt_free_table_info(private);
2134 }
2135 
2136 /* Returns 1 if the type and code is matched by the range, 0 otherwise */
2137 static inline bool
icmp6_type_code_match(u_int8_t test_type,u_int8_t min_code,u_int8_t max_code,u_int8_t type,u_int8_t code,bool invert)2138 icmp6_type_code_match(u_int8_t test_type, u_int8_t min_code, u_int8_t max_code,
2139 		     u_int8_t type, u_int8_t code,
2140 		     bool invert)
2141 {
2142 	return (type == test_type && code >= min_code && code <= max_code)
2143 		^ invert;
2144 }
2145 
2146 static bool
icmp6_match(const struct sk_buff * skb,struct xt_action_param * par)2147 icmp6_match(const struct sk_buff *skb, struct xt_action_param *par)
2148 {
2149 	const struct icmp6hdr *ic;
2150 	struct icmp6hdr _icmph;
2151 	const struct ip6t_icmp *icmpinfo = par->matchinfo;
2152 
2153 	/* Must not be a fragment. */
2154 	if (par->fragoff != 0)
2155 		return false;
2156 
2157 	ic = skb_header_pointer(skb, par->thoff, sizeof(_icmph), &_icmph);
2158 	if (ic == NULL) {
2159 		/* We've been asked to examine this packet, and we
2160 		 * can't.  Hence, no choice but to drop.
2161 		 */
2162 		duprintf("Dropping evil ICMP tinygram.\n");
2163 		par->hotdrop = true;
2164 		return false;
2165 	}
2166 
2167 	return icmp6_type_code_match(icmpinfo->type,
2168 				     icmpinfo->code[0],
2169 				     icmpinfo->code[1],
2170 				     ic->icmp6_type, ic->icmp6_code,
2171 				     !!(icmpinfo->invflags&IP6T_ICMP_INV));
2172 }
2173 
2174 /* Called when user tries to insert an entry of this type. */
icmp6_checkentry(const struct xt_mtchk_param * par)2175 static int icmp6_checkentry(const struct xt_mtchk_param *par)
2176 {
2177 	const struct ip6t_icmp *icmpinfo = par->matchinfo;
2178 
2179 	/* Must specify no unknown invflags */
2180 	return (icmpinfo->invflags & ~IP6T_ICMP_INV) ? -EINVAL : 0;
2181 }
2182 
2183 /* The built-in targets: standard (NULL) and error. */
2184 static struct xt_target ip6t_builtin_tg[] __read_mostly = {
2185 	{
2186 		.name             = XT_STANDARD_TARGET,
2187 		.targetsize       = sizeof(int),
2188 		.family           = NFPROTO_IPV6,
2189 #ifdef CONFIG_COMPAT
2190 		.compatsize       = sizeof(compat_int_t),
2191 		.compat_from_user = compat_standard_from_user,
2192 		.compat_to_user   = compat_standard_to_user,
2193 #endif
2194 	},
2195 	{
2196 		.name             = XT_ERROR_TARGET,
2197 		.target           = ip6t_error,
2198 		.targetsize       = XT_FUNCTION_MAXNAMELEN,
2199 		.family           = NFPROTO_IPV6,
2200 	},
2201 };
2202 
2203 static struct nf_sockopt_ops ip6t_sockopts = {
2204 	.pf		= PF_INET6,
2205 	.set_optmin	= IP6T_BASE_CTL,
2206 	.set_optmax	= IP6T_SO_SET_MAX+1,
2207 	.set		= do_ip6t_set_ctl,
2208 #ifdef CONFIG_COMPAT
2209 	.compat_set	= compat_do_ip6t_set_ctl,
2210 #endif
2211 	.get_optmin	= IP6T_BASE_CTL,
2212 	.get_optmax	= IP6T_SO_GET_MAX+1,
2213 	.get		= do_ip6t_get_ctl,
2214 #ifdef CONFIG_COMPAT
2215 	.compat_get	= compat_do_ip6t_get_ctl,
2216 #endif
2217 	.owner		= THIS_MODULE,
2218 };
2219 
2220 static struct xt_match ip6t_builtin_mt[] __read_mostly = {
2221 	{
2222 		.name       = "icmp6",
2223 		.match      = icmp6_match,
2224 		.matchsize  = sizeof(struct ip6t_icmp),
2225 		.checkentry = icmp6_checkentry,
2226 		.proto      = IPPROTO_ICMPV6,
2227 		.family     = NFPROTO_IPV6,
2228 	},
2229 };
2230 
ip6_tables_net_init(struct net * net)2231 static int __net_init ip6_tables_net_init(struct net *net)
2232 {
2233 	return xt_proto_init(net, NFPROTO_IPV6);
2234 }
2235 
ip6_tables_net_exit(struct net * net)2236 static void __net_exit ip6_tables_net_exit(struct net *net)
2237 {
2238 	xt_proto_fini(net, NFPROTO_IPV6);
2239 }
2240 
2241 static struct pernet_operations ip6_tables_net_ops = {
2242 	.init = ip6_tables_net_init,
2243 	.exit = ip6_tables_net_exit,
2244 };
2245 
ip6_tables_init(void)2246 static int __init ip6_tables_init(void)
2247 {
2248 	int ret;
2249 
2250 	ret = register_pernet_subsys(&ip6_tables_net_ops);
2251 	if (ret < 0)
2252 		goto err1;
2253 
2254 	/* No one else will be downing sem now, so we won't sleep */
2255 	ret = xt_register_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2256 	if (ret < 0)
2257 		goto err2;
2258 	ret = xt_register_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2259 	if (ret < 0)
2260 		goto err4;
2261 
2262 	/* Register setsockopt */
2263 	ret = nf_register_sockopt(&ip6t_sockopts);
2264 	if (ret < 0)
2265 		goto err5;
2266 
2267 	pr_info("(C) 2000-2006 Netfilter Core Team\n");
2268 	return 0;
2269 
2270 err5:
2271 	xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2272 err4:
2273 	xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2274 err2:
2275 	unregister_pernet_subsys(&ip6_tables_net_ops);
2276 err1:
2277 	return ret;
2278 }
2279 
ip6_tables_fini(void)2280 static void __exit ip6_tables_fini(void)
2281 {
2282 	nf_unregister_sockopt(&ip6t_sockopts);
2283 
2284 	xt_unregister_matches(ip6t_builtin_mt, ARRAY_SIZE(ip6t_builtin_mt));
2285 	xt_unregister_targets(ip6t_builtin_tg, ARRAY_SIZE(ip6t_builtin_tg));
2286 	unregister_pernet_subsys(&ip6_tables_net_ops);
2287 }
2288 
2289 /*
2290  * find the offset to specified header or the protocol number of last header
2291  * if target < 0. "last header" is transport protocol header, ESP, or
2292  * "No next header".
2293  *
2294  * If target header is found, its offset is set in *offset and return protocol
2295  * number. Otherwise, return -1.
2296  *
2297  * If the first fragment doesn't contain the final protocol header or
2298  * NEXTHDR_NONE it is considered invalid.
2299  *
2300  * Note that non-1st fragment is special case that "the protocol number
2301  * of last header" is "next header" field in Fragment header. In this case,
2302  * *offset is meaningless and fragment offset is stored in *fragoff if fragoff
2303  * isn't NULL.
2304  *
2305  */
ipv6_find_hdr(const struct sk_buff * skb,unsigned int * offset,int target,unsigned short * fragoff)2306 int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
2307 		  int target, unsigned short *fragoff)
2308 {
2309 	unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr);
2310 	u8 nexthdr = ipv6_hdr(skb)->nexthdr;
2311 	unsigned int len = skb->len - start;
2312 
2313 	if (fragoff)
2314 		*fragoff = 0;
2315 
2316 	while (nexthdr != target) {
2317 		struct ipv6_opt_hdr _hdr, *hp;
2318 		unsigned int hdrlen;
2319 
2320 		if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
2321 			if (target < 0)
2322 				break;
2323 			return -ENOENT;
2324 		}
2325 
2326 		hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr);
2327 		if (hp == NULL)
2328 			return -EBADMSG;
2329 		if (nexthdr == NEXTHDR_FRAGMENT) {
2330 			unsigned short _frag_off;
2331 			__be16 *fp;
2332 			fp = skb_header_pointer(skb,
2333 						start+offsetof(struct frag_hdr,
2334 							       frag_off),
2335 						sizeof(_frag_off),
2336 						&_frag_off);
2337 			if (fp == NULL)
2338 				return -EBADMSG;
2339 
2340 			_frag_off = ntohs(*fp) & ~0x7;
2341 			if (_frag_off) {
2342 				if (target < 0 &&
2343 				    ((!ipv6_ext_hdr(hp->nexthdr)) ||
2344 				     hp->nexthdr == NEXTHDR_NONE)) {
2345 					if (fragoff)
2346 						*fragoff = _frag_off;
2347 					return hp->nexthdr;
2348 				}
2349 				return -ENOENT;
2350 			}
2351 			hdrlen = 8;
2352 		} else if (nexthdr == NEXTHDR_AUTH)
2353 			hdrlen = (hp->hdrlen + 2) << 2;
2354 		else
2355 			hdrlen = ipv6_optlen(hp);
2356 
2357 		nexthdr = hp->nexthdr;
2358 		len -= hdrlen;
2359 		start += hdrlen;
2360 	}
2361 
2362 	*offset = start;
2363 	return nexthdr;
2364 }
2365 
2366 EXPORT_SYMBOL(ip6t_register_table);
2367 EXPORT_SYMBOL(ip6t_unregister_table);
2368 EXPORT_SYMBOL(ip6t_do_table);
2369 EXPORT_SYMBOL(ip6t_ext_hdr);
2370 EXPORT_SYMBOL(ipv6_find_hdr);
2371 
2372 module_init(ip6_tables_init);
2373 module_exit(ip6_tables_fini);
2374