1 /*
2  *  ebtables
3  *
4  *  Author:
5  *  Bart De Schuymer		<bdschuym@pandora.be>
6  *
7  *  ebtables.c,v 2.0, July, 2002
8  *
9  *  This code is stongly inspired on the iptables code which is
10  *  Copyright (C) 1999 Paul `Rusty' Russell & Michael J. Neuling
11  *
12  *  This program is free software; you can redistribute it and/or
13  *  modify it under the terms of the GNU General Public License
14  *  as published by the Free Software Foundation; either version
15  *  2 of the License, or (at your option) any later version.
16  */
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/kmod.h>
19 #include <linux/module.h>
20 #include <linux/vmalloc.h>
21 #include <linux/netfilter/x_tables.h>
22 #include <linux/netfilter_bridge/ebtables.h>
23 #include <linux/spinlock.h>
24 #include <linux/mutex.h>
25 #include <linux/slab.h>
26 #include <asm/uaccess.h>
27 #include <linux/smp.h>
28 #include <linux/cpumask.h>
29 #include <net/sock.h>
30 /* needed for logical [in,out]-dev filtering */
31 #include "../br_private.h"
32 
33 #define BUGPRINT(format, args...) printk("kernel msg: ebtables bug: please "\
34 					 "report to author: "format, ## args)
35 /* #define BUGPRINT(format, args...) */
36 
37 /*
38  * Each cpu has its own set of counters, so there is no need for write_lock in
39  * the softirq
40  * For reading or updating the counters, the user context needs to
41  * get a write_lock
42  */
43 
44 /* The size of each set of counters is altered to get cache alignment */
45 #define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
46 #define COUNTER_OFFSET(n) (SMP_ALIGN(n * sizeof(struct ebt_counter)))
47 #define COUNTER_BASE(c, n, cpu) ((struct ebt_counter *)(((char *)c) + \
48    COUNTER_OFFSET(n) * cpu))
49 
50 
51 
52 static DEFINE_MUTEX(ebt_mutex);
53 
54 #ifdef CONFIG_COMPAT
ebt_standard_compat_from_user(void * dst,const void * src)55 static void ebt_standard_compat_from_user(void *dst, const void *src)
56 {
57 	int v = *(compat_int_t *)src;
58 
59 	if (v >= 0)
60 		v += xt_compat_calc_jump(NFPROTO_BRIDGE, v);
61 	memcpy(dst, &v, sizeof(v));
62 }
63 
ebt_standard_compat_to_user(void __user * dst,const void * src)64 static int ebt_standard_compat_to_user(void __user *dst, const void *src)
65 {
66 	compat_int_t cv = *(int *)src;
67 
68 	if (cv >= 0)
69 		cv -= xt_compat_calc_jump(NFPROTO_BRIDGE, cv);
70 	return copy_to_user(dst, &cv, sizeof(cv)) ? -EFAULT : 0;
71 }
72 #endif
73 
74 
75 static struct xt_target ebt_standard_target = {
76 	.name       = "standard",
77 	.revision   = 0,
78 	.family     = NFPROTO_BRIDGE,
79 	.targetsize = sizeof(int),
80 #ifdef CONFIG_COMPAT
81 	.compatsize = sizeof(compat_int_t),
82 	.compat_from_user = ebt_standard_compat_from_user,
83 	.compat_to_user =  ebt_standard_compat_to_user,
84 #endif
85 };
86 
87 static inline int
ebt_do_watcher(const struct ebt_entry_watcher * w,struct sk_buff * skb,struct xt_action_param * par)88 ebt_do_watcher(const struct ebt_entry_watcher *w, struct sk_buff *skb,
89 	       struct xt_action_param *par)
90 {
91 	par->target   = w->u.watcher;
92 	par->targinfo = w->data;
93 	w->u.watcher->target(skb, par);
94 	/* watchers don't give a verdict */
95 	return 0;
96 }
97 
98 static inline int
ebt_do_match(struct ebt_entry_match * m,const struct sk_buff * skb,struct xt_action_param * par)99 ebt_do_match(struct ebt_entry_match *m, const struct sk_buff *skb,
100 	     struct xt_action_param *par)
101 {
102 	par->match     = m->u.match;
103 	par->matchinfo = m->data;
104 	return m->u.match->match(skb, par) ? EBT_MATCH : EBT_NOMATCH;
105 }
106 
107 static inline int
ebt_dev_check(const char * entry,const struct net_device * device)108 ebt_dev_check(const char *entry, const struct net_device *device)
109 {
110 	int i = 0;
111 	const char *devname;
112 
113 	if (*entry == '\0')
114 		return 0;
115 	if (!device)
116 		return 1;
117 	devname = device->name;
118 	/* 1 is the wildcard token */
119 	while (entry[i] != '\0' && entry[i] != 1 && entry[i] == devname[i])
120 		i++;
121 	return (devname[i] != entry[i] && entry[i] != 1);
122 }
123 
124 #define FWINV2(bool,invflg) ((bool) ^ !!(e->invflags & invflg))
125 /* process standard matches */
126 static inline int
ebt_basic_match(const struct ebt_entry * e,const struct sk_buff * skb,const struct net_device * in,const struct net_device * out)127 ebt_basic_match(const struct ebt_entry *e, const struct sk_buff *skb,
128                 const struct net_device *in, const struct net_device *out)
129 {
130 	const struct ethhdr *h = eth_hdr(skb);
131 	const struct net_bridge_port *p;
132 	__be16 ethproto;
133 	int verdict, i;
134 
135 	if (vlan_tx_tag_present(skb))
136 		ethproto = htons(ETH_P_8021Q);
137 	else
138 		ethproto = h->h_proto;
139 
140 	if (e->bitmask & EBT_802_3) {
141 		if (FWINV2(ntohs(ethproto) >= 1536, EBT_IPROTO))
142 			return 1;
143 	} else if (!(e->bitmask & EBT_NOPROTO) &&
144 	   FWINV2(e->ethproto != ethproto, EBT_IPROTO))
145 		return 1;
146 
147 	if (FWINV2(ebt_dev_check(e->in, in), EBT_IIN))
148 		return 1;
149 	if (FWINV2(ebt_dev_check(e->out, out), EBT_IOUT))
150 		return 1;
151 	/* rcu_read_lock()ed by nf_hook_slow */
152 	if (in && (p = br_port_get_rcu(in)) != NULL &&
153 	    FWINV2(ebt_dev_check(e->logical_in, p->br->dev), EBT_ILOGICALIN))
154 		return 1;
155 	if (out && (p = br_port_get_rcu(out)) != NULL &&
156 	    FWINV2(ebt_dev_check(e->logical_out, p->br->dev), EBT_ILOGICALOUT))
157 		return 1;
158 
159 	if (e->bitmask & EBT_SOURCEMAC) {
160 		verdict = 0;
161 		for (i = 0; i < 6; i++)
162 			verdict |= (h->h_source[i] ^ e->sourcemac[i]) &
163 			   e->sourcemsk[i];
164 		if (FWINV2(verdict != 0, EBT_ISOURCE) )
165 			return 1;
166 	}
167 	if (e->bitmask & EBT_DESTMAC) {
168 		verdict = 0;
169 		for (i = 0; i < 6; i++)
170 			verdict |= (h->h_dest[i] ^ e->destmac[i]) &
171 			   e->destmsk[i];
172 		if (FWINV2(verdict != 0, EBT_IDEST) )
173 			return 1;
174 	}
175 	return 0;
176 }
177 
178 static inline __pure
ebt_next_entry(const struct ebt_entry * entry)179 struct ebt_entry *ebt_next_entry(const struct ebt_entry *entry)
180 {
181 	return (void *)entry + entry->next_offset;
182 }
183 
184 /* Do some firewalling */
ebt_do_table(unsigned int hook,struct sk_buff * skb,const struct net_device * in,const struct net_device * out,struct ebt_table * table)185 unsigned int ebt_do_table (unsigned int hook, struct sk_buff *skb,
186    const struct net_device *in, const struct net_device *out,
187    struct ebt_table *table)
188 {
189 	int i, nentries;
190 	struct ebt_entry *point;
191 	struct ebt_counter *counter_base, *cb_base;
192 	const struct ebt_entry_target *t;
193 	int verdict, sp = 0;
194 	struct ebt_chainstack *cs;
195 	struct ebt_entries *chaininfo;
196 	const char *base;
197 	const struct ebt_table_info *private;
198 	struct xt_action_param acpar;
199 
200 	acpar.family  = NFPROTO_BRIDGE;
201 	acpar.in      = in;
202 	acpar.out     = out;
203 	acpar.hotdrop = false;
204 	acpar.hooknum = hook;
205 
206 	read_lock_bh(&table->lock);
207 	private = table->private;
208 	cb_base = COUNTER_BASE(private->counters, private->nentries,
209 	   smp_processor_id());
210 	if (private->chainstack)
211 		cs = private->chainstack[smp_processor_id()];
212 	else
213 		cs = NULL;
214 	chaininfo = private->hook_entry[hook];
215 	nentries = private->hook_entry[hook]->nentries;
216 	point = (struct ebt_entry *)(private->hook_entry[hook]->data);
217 	counter_base = cb_base + private->hook_entry[hook]->counter_offset;
218 	/* base for chain jumps */
219 	base = private->entries;
220 	i = 0;
221 	while (i < nentries) {
222 		if (ebt_basic_match(point, skb, in, out))
223 			goto letscontinue;
224 
225 		if (EBT_MATCH_ITERATE(point, ebt_do_match, skb, &acpar) != 0)
226 			goto letscontinue;
227 		if (acpar.hotdrop) {
228 			read_unlock_bh(&table->lock);
229 			return NF_DROP;
230 		}
231 
232 		/* increase counter */
233 		(*(counter_base + i)).pcnt++;
234 		(*(counter_base + i)).bcnt += skb->len;
235 
236 		/* these should only watch: not modify, nor tell us
237 		   what to do with the packet */
238 		EBT_WATCHER_ITERATE(point, ebt_do_watcher, skb, &acpar);
239 
240 		t = (struct ebt_entry_target *)
241 		   (((char *)point) + point->target_offset);
242 		/* standard target */
243 		if (!t->u.target->target)
244 			verdict = ((struct ebt_standard_target *)t)->verdict;
245 		else {
246 			acpar.target   = t->u.target;
247 			acpar.targinfo = t->data;
248 			verdict = t->u.target->target(skb, &acpar);
249 		}
250 		if (verdict == EBT_ACCEPT) {
251 			read_unlock_bh(&table->lock);
252 			return NF_ACCEPT;
253 		}
254 		if (verdict == EBT_DROP) {
255 			read_unlock_bh(&table->lock);
256 			return NF_DROP;
257 		}
258 		if (verdict == EBT_RETURN) {
259 letsreturn:
260 #ifdef CONFIG_NETFILTER_DEBUG
261 			if (sp == 0) {
262 				BUGPRINT("RETURN on base chain");
263 				/* act like this is EBT_CONTINUE */
264 				goto letscontinue;
265 			}
266 #endif
267 			sp--;
268 			/* put all the local variables right */
269 			i = cs[sp].n;
270 			chaininfo = cs[sp].chaininfo;
271 			nentries = chaininfo->nentries;
272 			point = cs[sp].e;
273 			counter_base = cb_base +
274 			   chaininfo->counter_offset;
275 			continue;
276 		}
277 		if (verdict == EBT_CONTINUE)
278 			goto letscontinue;
279 #ifdef CONFIG_NETFILTER_DEBUG
280 		if (verdict < 0) {
281 			BUGPRINT("bogus standard verdict\n");
282 			read_unlock_bh(&table->lock);
283 			return NF_DROP;
284 		}
285 #endif
286 		/* jump to a udc */
287 		cs[sp].n = i + 1;
288 		cs[sp].chaininfo = chaininfo;
289 		cs[sp].e = ebt_next_entry(point);
290 		i = 0;
291 		chaininfo = (struct ebt_entries *) (base + verdict);
292 #ifdef CONFIG_NETFILTER_DEBUG
293 		if (chaininfo->distinguisher) {
294 			BUGPRINT("jump to non-chain\n");
295 			read_unlock_bh(&table->lock);
296 			return NF_DROP;
297 		}
298 #endif
299 		nentries = chaininfo->nentries;
300 		point = (struct ebt_entry *)chaininfo->data;
301 		counter_base = cb_base + chaininfo->counter_offset;
302 		sp++;
303 		continue;
304 letscontinue:
305 		point = ebt_next_entry(point);
306 		i++;
307 	}
308 
309 	/* I actually like this :) */
310 	if (chaininfo->policy == EBT_RETURN)
311 		goto letsreturn;
312 	if (chaininfo->policy == EBT_ACCEPT) {
313 		read_unlock_bh(&table->lock);
314 		return NF_ACCEPT;
315 	}
316 	read_unlock_bh(&table->lock);
317 	return NF_DROP;
318 }
319 
320 /* If it succeeds, returns element and locks mutex */
321 static inline void *
find_inlist_lock_noload(struct list_head * head,const char * name,int * error,struct mutex * mutex)322 find_inlist_lock_noload(struct list_head *head, const char *name, int *error,
323    struct mutex *mutex)
324 {
325 	struct {
326 		struct list_head list;
327 		char name[EBT_FUNCTION_MAXNAMELEN];
328 	} *e;
329 
330 	*error = mutex_lock_interruptible(mutex);
331 	if (*error != 0)
332 		return NULL;
333 
334 	list_for_each_entry(e, head, list) {
335 		if (strcmp(e->name, name) == 0)
336 			return e;
337 	}
338 	*error = -ENOENT;
339 	mutex_unlock(mutex);
340 	return NULL;
341 }
342 
343 static void *
find_inlist_lock(struct list_head * head,const char * name,const char * prefix,int * error,struct mutex * mutex)344 find_inlist_lock(struct list_head *head, const char *name, const char *prefix,
345    int *error, struct mutex *mutex)
346 {
347 	return try_then_request_module(
348 			find_inlist_lock_noload(head, name, error, mutex),
349 			"%s%s", prefix, name);
350 }
351 
352 static inline struct ebt_table *
find_table_lock(struct net * net,const char * name,int * error,struct mutex * mutex)353 find_table_lock(struct net *net, const char *name, int *error,
354 		struct mutex *mutex)
355 {
356 	return find_inlist_lock(&net->xt.tables[NFPROTO_BRIDGE], name,
357 				"ebtable_", error, mutex);
358 }
359 
360 static inline int
ebt_check_match(struct ebt_entry_match * m,struct xt_mtchk_param * par,unsigned int * cnt)361 ebt_check_match(struct ebt_entry_match *m, struct xt_mtchk_param *par,
362 		unsigned int *cnt)
363 {
364 	const struct ebt_entry *e = par->entryinfo;
365 	struct xt_match *match;
366 	size_t left = ((char *)e + e->watchers_offset) - (char *)m;
367 	int ret;
368 
369 	if (left < sizeof(struct ebt_entry_match) ||
370 	    left - sizeof(struct ebt_entry_match) < m->match_size)
371 		return -EINVAL;
372 
373 	match = xt_request_find_match(NFPROTO_BRIDGE, m->u.name, 0);
374 	if (IS_ERR(match))
375 		return PTR_ERR(match);
376 	m->u.match = match;
377 
378 	par->match     = match;
379 	par->matchinfo = m->data;
380 	ret = xt_check_match(par, m->match_size,
381 	      e->ethproto, e->invflags & EBT_IPROTO);
382 	if (ret < 0) {
383 		module_put(match->me);
384 		return ret;
385 	}
386 
387 	(*cnt)++;
388 	return 0;
389 }
390 
391 static inline int
ebt_check_watcher(struct ebt_entry_watcher * w,struct xt_tgchk_param * par,unsigned int * cnt)392 ebt_check_watcher(struct ebt_entry_watcher *w, struct xt_tgchk_param *par,
393 		  unsigned int *cnt)
394 {
395 	const struct ebt_entry *e = par->entryinfo;
396 	struct xt_target *watcher;
397 	size_t left = ((char *)e + e->target_offset) - (char *)w;
398 	int ret;
399 
400 	if (left < sizeof(struct ebt_entry_watcher) ||
401 	   left - sizeof(struct ebt_entry_watcher) < w->watcher_size)
402 		return -EINVAL;
403 
404 	watcher = xt_request_find_target(NFPROTO_BRIDGE, w->u.name, 0);
405 	if (IS_ERR(watcher))
406 		return PTR_ERR(watcher);
407 	w->u.watcher = watcher;
408 
409 	par->target   = watcher;
410 	par->targinfo = w->data;
411 	ret = xt_check_target(par, w->watcher_size,
412 	      e->ethproto, e->invflags & EBT_IPROTO);
413 	if (ret < 0) {
414 		module_put(watcher->me);
415 		return ret;
416 	}
417 
418 	(*cnt)++;
419 	return 0;
420 }
421 
ebt_verify_pointers(const struct ebt_replace * repl,struct ebt_table_info * newinfo)422 static int ebt_verify_pointers(const struct ebt_replace *repl,
423 			       struct ebt_table_info *newinfo)
424 {
425 	unsigned int limit = repl->entries_size;
426 	unsigned int valid_hooks = repl->valid_hooks;
427 	unsigned int offset = 0;
428 	int i;
429 
430 	for (i = 0; i < NF_BR_NUMHOOKS; i++)
431 		newinfo->hook_entry[i] = NULL;
432 
433 	newinfo->entries_size = repl->entries_size;
434 	newinfo->nentries = repl->nentries;
435 
436 	while (offset < limit) {
437 		size_t left = limit - offset;
438 		struct ebt_entry *e = (void *)newinfo->entries + offset;
439 
440 		if (left < sizeof(unsigned int))
441 			break;
442 
443 		for (i = 0; i < NF_BR_NUMHOOKS; i++) {
444 			if ((valid_hooks & (1 << i)) == 0)
445 				continue;
446 			if ((char __user *)repl->hook_entry[i] ==
447 			     repl->entries + offset)
448 				break;
449 		}
450 
451 		if (i != NF_BR_NUMHOOKS || !(e->bitmask & EBT_ENTRY_OR_ENTRIES)) {
452 			if (e->bitmask != 0) {
453 				/* we make userspace set this right,
454 				   so there is no misunderstanding */
455 				BUGPRINT("EBT_ENTRY_OR_ENTRIES shouldn't be set "
456 					 "in distinguisher\n");
457 				return -EINVAL;
458 			}
459 			if (i != NF_BR_NUMHOOKS)
460 				newinfo->hook_entry[i] = (struct ebt_entries *)e;
461 			if (left < sizeof(struct ebt_entries))
462 				break;
463 			offset += sizeof(struct ebt_entries);
464 		} else {
465 			if (left < sizeof(struct ebt_entry))
466 				break;
467 			if (left < e->next_offset)
468 				break;
469 			if (e->next_offset < sizeof(struct ebt_entry))
470 				return -EINVAL;
471 			offset += e->next_offset;
472 		}
473 	}
474 	if (offset != limit) {
475 		BUGPRINT("entries_size too small\n");
476 		return -EINVAL;
477 	}
478 
479 	/* check if all valid hooks have a chain */
480 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
481 		if (!newinfo->hook_entry[i] &&
482 		   (valid_hooks & (1 << i))) {
483 			BUGPRINT("Valid hook without chain\n");
484 			return -EINVAL;
485 		}
486 	}
487 	return 0;
488 }
489 
490 /*
491  * this one is very careful, as it is the first function
492  * to parse the userspace data
493  */
494 static inline int
ebt_check_entry_size_and_hooks(const struct ebt_entry * e,const struct ebt_table_info * newinfo,unsigned int * n,unsigned int * cnt,unsigned int * totalcnt,unsigned int * udc_cnt)495 ebt_check_entry_size_and_hooks(const struct ebt_entry *e,
496    const struct ebt_table_info *newinfo,
497    unsigned int *n, unsigned int *cnt,
498    unsigned int *totalcnt, unsigned int *udc_cnt)
499 {
500 	int i;
501 
502 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
503 		if ((void *)e == (void *)newinfo->hook_entry[i])
504 			break;
505 	}
506 	/* beginning of a new chain
507 	   if i == NF_BR_NUMHOOKS it must be a user defined chain */
508 	if (i != NF_BR_NUMHOOKS || !e->bitmask) {
509 		/* this checks if the previous chain has as many entries
510 		   as it said it has */
511 		if (*n != *cnt) {
512 			BUGPRINT("nentries does not equal the nr of entries "
513 				 "in the chain\n");
514 			return -EINVAL;
515 		}
516 		if (((struct ebt_entries *)e)->policy != EBT_DROP &&
517 		   ((struct ebt_entries *)e)->policy != EBT_ACCEPT) {
518 			/* only RETURN from udc */
519 			if (i != NF_BR_NUMHOOKS ||
520 			   ((struct ebt_entries *)e)->policy != EBT_RETURN) {
521 				BUGPRINT("bad policy\n");
522 				return -EINVAL;
523 			}
524 		}
525 		if (i == NF_BR_NUMHOOKS) /* it's a user defined chain */
526 			(*udc_cnt)++;
527 		if (((struct ebt_entries *)e)->counter_offset != *totalcnt) {
528 			BUGPRINT("counter_offset != totalcnt");
529 			return -EINVAL;
530 		}
531 		*n = ((struct ebt_entries *)e)->nentries;
532 		*cnt = 0;
533 		return 0;
534 	}
535 	/* a plain old entry, heh */
536 	if (sizeof(struct ebt_entry) > e->watchers_offset ||
537 	   e->watchers_offset > e->target_offset ||
538 	   e->target_offset >= e->next_offset) {
539 		BUGPRINT("entry offsets not in right order\n");
540 		return -EINVAL;
541 	}
542 	/* this is not checked anywhere else */
543 	if (e->next_offset - e->target_offset < sizeof(struct ebt_entry_target)) {
544 		BUGPRINT("target size too small\n");
545 		return -EINVAL;
546 	}
547 	(*cnt)++;
548 	(*totalcnt)++;
549 	return 0;
550 }
551 
552 struct ebt_cl_stack
553 {
554 	struct ebt_chainstack cs;
555 	int from;
556 	unsigned int hookmask;
557 };
558 
559 /*
560  * we need these positions to check that the jumps to a different part of the
561  * entries is a jump to the beginning of a new chain.
562  */
563 static inline int
ebt_get_udc_positions(struct ebt_entry * e,struct ebt_table_info * newinfo,unsigned int * n,struct ebt_cl_stack * udc)564 ebt_get_udc_positions(struct ebt_entry *e, struct ebt_table_info *newinfo,
565    unsigned int *n, struct ebt_cl_stack *udc)
566 {
567 	int i;
568 
569 	/* we're only interested in chain starts */
570 	if (e->bitmask)
571 		return 0;
572 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
573 		if (newinfo->hook_entry[i] == (struct ebt_entries *)e)
574 			break;
575 	}
576 	/* only care about udc */
577 	if (i != NF_BR_NUMHOOKS)
578 		return 0;
579 
580 	udc[*n].cs.chaininfo = (struct ebt_entries *)e;
581 	/* these initialisations are depended on later in check_chainloops() */
582 	udc[*n].cs.n = 0;
583 	udc[*n].hookmask = 0;
584 
585 	(*n)++;
586 	return 0;
587 }
588 
589 static inline int
ebt_cleanup_match(struct ebt_entry_match * m,struct net * net,unsigned int * i)590 ebt_cleanup_match(struct ebt_entry_match *m, struct net *net, unsigned int *i)
591 {
592 	struct xt_mtdtor_param par;
593 
594 	if (i && (*i)-- == 0)
595 		return 1;
596 
597 	par.net       = net;
598 	par.match     = m->u.match;
599 	par.matchinfo = m->data;
600 	par.family    = NFPROTO_BRIDGE;
601 	if (par.match->destroy != NULL)
602 		par.match->destroy(&par);
603 	module_put(par.match->me);
604 	return 0;
605 }
606 
607 static inline int
ebt_cleanup_watcher(struct ebt_entry_watcher * w,struct net * net,unsigned int * i)608 ebt_cleanup_watcher(struct ebt_entry_watcher *w, struct net *net, unsigned int *i)
609 {
610 	struct xt_tgdtor_param par;
611 
612 	if (i && (*i)-- == 0)
613 		return 1;
614 
615 	par.net      = net;
616 	par.target   = w->u.watcher;
617 	par.targinfo = w->data;
618 	par.family   = NFPROTO_BRIDGE;
619 	if (par.target->destroy != NULL)
620 		par.target->destroy(&par);
621 	module_put(par.target->me);
622 	return 0;
623 }
624 
625 static inline int
ebt_cleanup_entry(struct ebt_entry * e,struct net * net,unsigned int * cnt)626 ebt_cleanup_entry(struct ebt_entry *e, struct net *net, unsigned int *cnt)
627 {
628 	struct xt_tgdtor_param par;
629 	struct ebt_entry_target *t;
630 
631 	if (e->bitmask == 0)
632 		return 0;
633 	/* we're done */
634 	if (cnt && (*cnt)-- == 0)
635 		return 1;
636 	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, NULL);
637 	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, NULL);
638 	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
639 
640 	par.net      = net;
641 	par.target   = t->u.target;
642 	par.targinfo = t->data;
643 	par.family   = NFPROTO_BRIDGE;
644 	if (par.target->destroy != NULL)
645 		par.target->destroy(&par);
646 	module_put(par.target->me);
647 	return 0;
648 }
649 
650 static inline int
ebt_check_entry(struct ebt_entry * e,struct net * net,const struct ebt_table_info * newinfo,const char * name,unsigned int * cnt,struct ebt_cl_stack * cl_s,unsigned int udc_cnt)651 ebt_check_entry(struct ebt_entry *e, struct net *net,
652    const struct ebt_table_info *newinfo,
653    const char *name, unsigned int *cnt,
654    struct ebt_cl_stack *cl_s, unsigned int udc_cnt)
655 {
656 	struct ebt_entry_target *t;
657 	struct xt_target *target;
658 	unsigned int i, j, hook = 0, hookmask = 0;
659 	size_t gap;
660 	int ret;
661 	struct xt_mtchk_param mtpar;
662 	struct xt_tgchk_param tgpar;
663 
664 	/* don't mess with the struct ebt_entries */
665 	if (e->bitmask == 0)
666 		return 0;
667 
668 	if (e->bitmask & ~EBT_F_MASK) {
669 		BUGPRINT("Unknown flag for bitmask\n");
670 		return -EINVAL;
671 	}
672 	if (e->invflags & ~EBT_INV_MASK) {
673 		BUGPRINT("Unknown flag for inv bitmask\n");
674 		return -EINVAL;
675 	}
676 	if ( (e->bitmask & EBT_NOPROTO) && (e->bitmask & EBT_802_3) ) {
677 		BUGPRINT("NOPROTO & 802_3 not allowed\n");
678 		return -EINVAL;
679 	}
680 	/* what hook do we belong to? */
681 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
682 		if (!newinfo->hook_entry[i])
683 			continue;
684 		if ((char *)newinfo->hook_entry[i] < (char *)e)
685 			hook = i;
686 		else
687 			break;
688 	}
689 	/* (1 << NF_BR_NUMHOOKS) tells the check functions the rule is on
690 	   a base chain */
691 	if (i < NF_BR_NUMHOOKS)
692 		hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
693 	else {
694 		for (i = 0; i < udc_cnt; i++)
695 			if ((char *)(cl_s[i].cs.chaininfo) > (char *)e)
696 				break;
697 		if (i == 0)
698 			hookmask = (1 << hook) | (1 << NF_BR_NUMHOOKS);
699 		else
700 			hookmask = cl_s[i - 1].hookmask;
701 	}
702 	i = 0;
703 
704 	mtpar.net	= tgpar.net       = net;
705 	mtpar.table     = tgpar.table     = name;
706 	mtpar.entryinfo = tgpar.entryinfo = e;
707 	mtpar.hook_mask = tgpar.hook_mask = hookmask;
708 	mtpar.family    = tgpar.family    = NFPROTO_BRIDGE;
709 	ret = EBT_MATCH_ITERATE(e, ebt_check_match, &mtpar, &i);
710 	if (ret != 0)
711 		goto cleanup_matches;
712 	j = 0;
713 	ret = EBT_WATCHER_ITERATE(e, ebt_check_watcher, &tgpar, &j);
714 	if (ret != 0)
715 		goto cleanup_watchers;
716 	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
717 	gap = e->next_offset - e->target_offset;
718 
719 	target = xt_request_find_target(NFPROTO_BRIDGE, t->u.name, 0);
720 	if (IS_ERR(target)) {
721 		ret = PTR_ERR(target);
722 		goto cleanup_watchers;
723 	}
724 
725 	t->u.target = target;
726 	if (t->u.target == &ebt_standard_target) {
727 		if (gap < sizeof(struct ebt_standard_target)) {
728 			BUGPRINT("Standard target size too big\n");
729 			ret = -EFAULT;
730 			goto cleanup_watchers;
731 		}
732 		if (((struct ebt_standard_target *)t)->verdict <
733 		   -NUM_STANDARD_TARGETS) {
734 			BUGPRINT("Invalid standard target\n");
735 			ret = -EFAULT;
736 			goto cleanup_watchers;
737 		}
738 	} else if (t->target_size > gap - sizeof(struct ebt_entry_target)) {
739 		module_put(t->u.target->me);
740 		ret = -EFAULT;
741 		goto cleanup_watchers;
742 	}
743 
744 	tgpar.target   = target;
745 	tgpar.targinfo = t->data;
746 	ret = xt_check_target(&tgpar, t->target_size,
747 	      e->ethproto, e->invflags & EBT_IPROTO);
748 	if (ret < 0) {
749 		module_put(target->me);
750 		goto cleanup_watchers;
751 	}
752 	(*cnt)++;
753 	return 0;
754 cleanup_watchers:
755 	EBT_WATCHER_ITERATE(e, ebt_cleanup_watcher, net, &j);
756 cleanup_matches:
757 	EBT_MATCH_ITERATE(e, ebt_cleanup_match, net, &i);
758 	return ret;
759 }
760 
761 /*
762  * checks for loops and sets the hook mask for udc
763  * the hook mask for udc tells us from which base chains the udc can be
764  * accessed. This mask is a parameter to the check() functions of the extensions
765  */
check_chainloops(const struct ebt_entries * chain,struct ebt_cl_stack * cl_s,unsigned int udc_cnt,unsigned int hooknr,char * base)766 static int check_chainloops(const struct ebt_entries *chain, struct ebt_cl_stack *cl_s,
767    unsigned int udc_cnt, unsigned int hooknr, char *base)
768 {
769 	int i, chain_nr = -1, pos = 0, nentries = chain->nentries, verdict;
770 	const struct ebt_entry *e = (struct ebt_entry *)chain->data;
771 	const struct ebt_entry_target *t;
772 
773 	while (pos < nentries || chain_nr != -1) {
774 		/* end of udc, go back one 'recursion' step */
775 		if (pos == nentries) {
776 			/* put back values of the time when this chain was called */
777 			e = cl_s[chain_nr].cs.e;
778 			if (cl_s[chain_nr].from != -1)
779 				nentries =
780 				cl_s[cl_s[chain_nr].from].cs.chaininfo->nentries;
781 			else
782 				nentries = chain->nentries;
783 			pos = cl_s[chain_nr].cs.n;
784 			/* make sure we won't see a loop that isn't one */
785 			cl_s[chain_nr].cs.n = 0;
786 			chain_nr = cl_s[chain_nr].from;
787 			if (pos == nentries)
788 				continue;
789 		}
790 		t = (struct ebt_entry_target *)
791 		   (((char *)e) + e->target_offset);
792 		if (strcmp(t->u.name, EBT_STANDARD_TARGET))
793 			goto letscontinue;
794 		if (e->target_offset + sizeof(struct ebt_standard_target) >
795 		   e->next_offset) {
796 			BUGPRINT("Standard target size too big\n");
797 			return -1;
798 		}
799 		verdict = ((struct ebt_standard_target *)t)->verdict;
800 		if (verdict >= 0) { /* jump to another chain */
801 			struct ebt_entries *hlp2 =
802 			   (struct ebt_entries *)(base + verdict);
803 			for (i = 0; i < udc_cnt; i++)
804 				if (hlp2 == cl_s[i].cs.chaininfo)
805 					break;
806 			/* bad destination or loop */
807 			if (i == udc_cnt) {
808 				BUGPRINT("bad destination\n");
809 				return -1;
810 			}
811 			if (cl_s[i].cs.n) {
812 				BUGPRINT("loop\n");
813 				return -1;
814 			}
815 			if (cl_s[i].hookmask & (1 << hooknr))
816 				goto letscontinue;
817 			/* this can't be 0, so the loop test is correct */
818 			cl_s[i].cs.n = pos + 1;
819 			pos = 0;
820 			cl_s[i].cs.e = ebt_next_entry(e);
821 			e = (struct ebt_entry *)(hlp2->data);
822 			nentries = hlp2->nentries;
823 			cl_s[i].from = chain_nr;
824 			chain_nr = i;
825 			/* this udc is accessible from the base chain for hooknr */
826 			cl_s[i].hookmask |= (1 << hooknr);
827 			continue;
828 		}
829 letscontinue:
830 		e = ebt_next_entry(e);
831 		pos++;
832 	}
833 	return 0;
834 }
835 
836 /* do the parsing of the table/chains/entries/matches/watchers/targets, heh */
translate_table(struct net * net,const char * name,struct ebt_table_info * newinfo)837 static int translate_table(struct net *net, const char *name,
838 			   struct ebt_table_info *newinfo)
839 {
840 	unsigned int i, j, k, udc_cnt;
841 	int ret;
842 	struct ebt_cl_stack *cl_s = NULL; /* used in the checking for chain loops */
843 
844 	i = 0;
845 	while (i < NF_BR_NUMHOOKS && !newinfo->hook_entry[i])
846 		i++;
847 	if (i == NF_BR_NUMHOOKS) {
848 		BUGPRINT("No valid hooks specified\n");
849 		return -EINVAL;
850 	}
851 	if (newinfo->hook_entry[i] != (struct ebt_entries *)newinfo->entries) {
852 		BUGPRINT("Chains don't start at beginning\n");
853 		return -EINVAL;
854 	}
855 	/* make sure chains are ordered after each other in same order
856 	   as their corresponding hooks */
857 	for (j = i + 1; j < NF_BR_NUMHOOKS; j++) {
858 		if (!newinfo->hook_entry[j])
859 			continue;
860 		if (newinfo->hook_entry[j] <= newinfo->hook_entry[i]) {
861 			BUGPRINT("Hook order must be followed\n");
862 			return -EINVAL;
863 		}
864 		i = j;
865 	}
866 
867 	/* do some early checkings and initialize some things */
868 	i = 0; /* holds the expected nr. of entries for the chain */
869 	j = 0; /* holds the up to now counted entries for the chain */
870 	k = 0; /* holds the total nr. of entries, should equal
871 		  newinfo->nentries afterwards */
872 	udc_cnt = 0; /* will hold the nr. of user defined chains (udc) */
873 	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
874 	   ebt_check_entry_size_and_hooks, newinfo,
875 	   &i, &j, &k, &udc_cnt);
876 
877 	if (ret != 0)
878 		return ret;
879 
880 	if (i != j) {
881 		BUGPRINT("nentries does not equal the nr of entries in the "
882 			 "(last) chain\n");
883 		return -EINVAL;
884 	}
885 	if (k != newinfo->nentries) {
886 		BUGPRINT("Total nentries is wrong\n");
887 		return -EINVAL;
888 	}
889 
890 	/* get the location of the udc, put them in an array
891 	   while we're at it, allocate the chainstack */
892 	if (udc_cnt) {
893 		/* this will get free'd in do_replace()/ebt_register_table()
894 		   if an error occurs */
895 		newinfo->chainstack =
896 			vmalloc(nr_cpu_ids * sizeof(*(newinfo->chainstack)));
897 		if (!newinfo->chainstack)
898 			return -ENOMEM;
899 		for_each_possible_cpu(i) {
900 			newinfo->chainstack[i] =
901 			  vmalloc(udc_cnt * sizeof(*(newinfo->chainstack[0])));
902 			if (!newinfo->chainstack[i]) {
903 				while (i)
904 					vfree(newinfo->chainstack[--i]);
905 				vfree(newinfo->chainstack);
906 				newinfo->chainstack = NULL;
907 				return -ENOMEM;
908 			}
909 		}
910 
911 		cl_s = vmalloc(udc_cnt * sizeof(*cl_s));
912 		if (!cl_s)
913 			return -ENOMEM;
914 		i = 0; /* the i'th udc */
915 		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
916 		   ebt_get_udc_positions, newinfo, &i, cl_s);
917 		/* sanity check */
918 		if (i != udc_cnt) {
919 			BUGPRINT("i != udc_cnt\n");
920 			vfree(cl_s);
921 			return -EFAULT;
922 		}
923 	}
924 
925 	/* Check for loops */
926 	for (i = 0; i < NF_BR_NUMHOOKS; i++)
927 		if (newinfo->hook_entry[i])
928 			if (check_chainloops(newinfo->hook_entry[i],
929 			   cl_s, udc_cnt, i, newinfo->entries)) {
930 				vfree(cl_s);
931 				return -EINVAL;
932 			}
933 
934 	/* we now know the following (along with E=mc²):
935 	   - the nr of entries in each chain is right
936 	   - the size of the allocated space is right
937 	   - all valid hooks have a corresponding chain
938 	   - there are no loops
939 	   - wrong data can still be on the level of a single entry
940 	   - could be there are jumps to places that are not the
941 	     beginning of a chain. This can only occur in chains that
942 	     are not accessible from any base chains, so we don't care. */
943 
944 	/* used to know what we need to clean up if something goes wrong */
945 	i = 0;
946 	ret = EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
947 	   ebt_check_entry, net, newinfo, name, &i, cl_s, udc_cnt);
948 	if (ret != 0) {
949 		EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
950 				  ebt_cleanup_entry, net, &i);
951 	}
952 	vfree(cl_s);
953 	return ret;
954 }
955 
956 /* called under write_lock */
get_counters(const struct ebt_counter * oldcounters,struct ebt_counter * counters,unsigned int nentries)957 static void get_counters(const struct ebt_counter *oldcounters,
958    struct ebt_counter *counters, unsigned int nentries)
959 {
960 	int i, cpu;
961 	struct ebt_counter *counter_base;
962 
963 	/* counters of cpu 0 */
964 	memcpy(counters, oldcounters,
965 	       sizeof(struct ebt_counter) * nentries);
966 
967 	/* add other counters to those of cpu 0 */
968 	for_each_possible_cpu(cpu) {
969 		if (cpu == 0)
970 			continue;
971 		counter_base = COUNTER_BASE(oldcounters, nentries, cpu);
972 		for (i = 0; i < nentries; i++) {
973 			counters[i].pcnt += counter_base[i].pcnt;
974 			counters[i].bcnt += counter_base[i].bcnt;
975 		}
976 	}
977 }
978 
do_replace_finish(struct net * net,struct ebt_replace * repl,struct ebt_table_info * newinfo)979 static int do_replace_finish(struct net *net, struct ebt_replace *repl,
980 			      struct ebt_table_info *newinfo)
981 {
982 	int ret, i;
983 	struct ebt_counter *counterstmp = NULL;
984 	/* used to be able to unlock earlier */
985 	struct ebt_table_info *table;
986 	struct ebt_table *t;
987 
988 	/* the user wants counters back
989 	   the check on the size is done later, when we have the lock */
990 	if (repl->num_counters) {
991 		unsigned long size = repl->num_counters * sizeof(*counterstmp);
992 		counterstmp = vmalloc(size);
993 		if (!counterstmp)
994 			return -ENOMEM;
995 	}
996 
997 	newinfo->chainstack = NULL;
998 	ret = ebt_verify_pointers(repl, newinfo);
999 	if (ret != 0)
1000 		goto free_counterstmp;
1001 
1002 	ret = translate_table(net, repl->name, newinfo);
1003 
1004 	if (ret != 0)
1005 		goto free_counterstmp;
1006 
1007 	t = find_table_lock(net, repl->name, &ret, &ebt_mutex);
1008 	if (!t) {
1009 		ret = -ENOENT;
1010 		goto free_iterate;
1011 	}
1012 
1013 	/* the table doesn't like it */
1014 	if (t->check && (ret = t->check(newinfo, repl->valid_hooks)))
1015 		goto free_unlock;
1016 
1017 	if (repl->num_counters && repl->num_counters != t->private->nentries) {
1018 		BUGPRINT("Wrong nr. of counters requested\n");
1019 		ret = -EINVAL;
1020 		goto free_unlock;
1021 	}
1022 
1023 	/* we have the mutex lock, so no danger in reading this pointer */
1024 	table = t->private;
1025 	/* make sure the table can only be rmmod'ed if it contains no rules */
1026 	if (!table->nentries && newinfo->nentries && !try_module_get(t->me)) {
1027 		ret = -ENOENT;
1028 		goto free_unlock;
1029 	} else if (table->nentries && !newinfo->nentries)
1030 		module_put(t->me);
1031 	/* we need an atomic snapshot of the counters */
1032 	write_lock_bh(&t->lock);
1033 	if (repl->num_counters)
1034 		get_counters(t->private->counters, counterstmp,
1035 		   t->private->nentries);
1036 
1037 	t->private = newinfo;
1038 	write_unlock_bh(&t->lock);
1039 	mutex_unlock(&ebt_mutex);
1040 	/* so, a user can change the chains while having messed up her counter
1041 	   allocation. Only reason why this is done is because this way the lock
1042 	   is held only once, while this doesn't bring the kernel into a
1043 	   dangerous state. */
1044 	if (repl->num_counters &&
1045 	   copy_to_user(repl->counters, counterstmp,
1046 	   repl->num_counters * sizeof(struct ebt_counter))) {
1047 		ret = -EFAULT;
1048 	}
1049 	else
1050 		ret = 0;
1051 
1052 	/* decrease module count and free resources */
1053 	EBT_ENTRY_ITERATE(table->entries, table->entries_size,
1054 			  ebt_cleanup_entry, net, NULL);
1055 
1056 	vfree(table->entries);
1057 	if (table->chainstack) {
1058 		for_each_possible_cpu(i)
1059 			vfree(table->chainstack[i]);
1060 		vfree(table->chainstack);
1061 	}
1062 	vfree(table);
1063 
1064 	vfree(counterstmp);
1065 	return ret;
1066 
1067 free_unlock:
1068 	mutex_unlock(&ebt_mutex);
1069 free_iterate:
1070 	EBT_ENTRY_ITERATE(newinfo->entries, newinfo->entries_size,
1071 			  ebt_cleanup_entry, net, NULL);
1072 free_counterstmp:
1073 	vfree(counterstmp);
1074 	/* can be initialized in translate_table() */
1075 	if (newinfo->chainstack) {
1076 		for_each_possible_cpu(i)
1077 			vfree(newinfo->chainstack[i]);
1078 		vfree(newinfo->chainstack);
1079 	}
1080 	return ret;
1081 }
1082 
1083 /* replace the table */
do_replace(struct net * net,const void __user * user,unsigned int len)1084 static int do_replace(struct net *net, const void __user *user,
1085 		      unsigned int len)
1086 {
1087 	int ret, countersize;
1088 	struct ebt_table_info *newinfo;
1089 	struct ebt_replace tmp;
1090 
1091 	if (copy_from_user(&tmp, user, sizeof(tmp)) != 0)
1092 		return -EFAULT;
1093 
1094 	if (len != sizeof(tmp) + tmp.entries_size) {
1095 		BUGPRINT("Wrong len argument\n");
1096 		return -EINVAL;
1097 	}
1098 
1099 	if (tmp.entries_size == 0) {
1100 		BUGPRINT("Entries_size never zero\n");
1101 		return -EINVAL;
1102 	}
1103 	/* overflow check */
1104 	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
1105 			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
1106 		return -ENOMEM;
1107 	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
1108 		return -ENOMEM;
1109 
1110 	tmp.name[sizeof(tmp.name) - 1] = 0;
1111 
1112 	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
1113 	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1114 	if (!newinfo)
1115 		return -ENOMEM;
1116 
1117 	if (countersize)
1118 		memset(newinfo->counters, 0, countersize);
1119 
1120 	newinfo->entries = vmalloc(tmp.entries_size);
1121 	if (!newinfo->entries) {
1122 		ret = -ENOMEM;
1123 		goto free_newinfo;
1124 	}
1125 	if (copy_from_user(
1126 	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
1127 		BUGPRINT("Couldn't copy entries from userspace\n");
1128 		ret = -EFAULT;
1129 		goto free_entries;
1130 	}
1131 
1132 	ret = do_replace_finish(net, &tmp, newinfo);
1133 	if (ret == 0)
1134 		return ret;
1135 free_entries:
1136 	vfree(newinfo->entries);
1137 free_newinfo:
1138 	vfree(newinfo);
1139 	return ret;
1140 }
1141 
1142 struct ebt_table *
ebt_register_table(struct net * net,const struct ebt_table * input_table)1143 ebt_register_table(struct net *net, const struct ebt_table *input_table)
1144 {
1145 	struct ebt_table_info *newinfo;
1146 	struct ebt_table *t, *table;
1147 	struct ebt_replace_kernel *repl;
1148 	int ret, i, countersize;
1149 	void *p;
1150 
1151 	if (input_table == NULL || (repl = input_table->table) == NULL ||
1152 	    repl->entries == NULL || repl->entries_size == 0 ||
1153 	    repl->counters != NULL || input_table->private != NULL) {
1154 		BUGPRINT("Bad table data for ebt_register_table!!!\n");
1155 		return ERR_PTR(-EINVAL);
1156 	}
1157 
1158 	/* Don't add one table to multiple lists. */
1159 	table = kmemdup(input_table, sizeof(struct ebt_table), GFP_KERNEL);
1160 	if (!table) {
1161 		ret = -ENOMEM;
1162 		goto out;
1163 	}
1164 
1165 	countersize = COUNTER_OFFSET(repl->nentries) * nr_cpu_ids;
1166 	newinfo = vmalloc(sizeof(*newinfo) + countersize);
1167 	ret = -ENOMEM;
1168 	if (!newinfo)
1169 		goto free_table;
1170 
1171 	p = vmalloc(repl->entries_size);
1172 	if (!p)
1173 		goto free_newinfo;
1174 
1175 	memcpy(p, repl->entries, repl->entries_size);
1176 	newinfo->entries = p;
1177 
1178 	newinfo->entries_size = repl->entries_size;
1179 	newinfo->nentries = repl->nentries;
1180 
1181 	if (countersize)
1182 		memset(newinfo->counters, 0, countersize);
1183 
1184 	/* fill in newinfo and parse the entries */
1185 	newinfo->chainstack = NULL;
1186 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1187 		if ((repl->valid_hooks & (1 << i)) == 0)
1188 			newinfo->hook_entry[i] = NULL;
1189 		else
1190 			newinfo->hook_entry[i] = p +
1191 				((char *)repl->hook_entry[i] - repl->entries);
1192 	}
1193 	ret = translate_table(net, repl->name, newinfo);
1194 	if (ret != 0) {
1195 		BUGPRINT("Translate_table failed\n");
1196 		goto free_chainstack;
1197 	}
1198 
1199 	if (table->check && table->check(newinfo, table->valid_hooks)) {
1200 		BUGPRINT("The table doesn't like its own initial data, lol\n");
1201 		ret = -EINVAL;
1202 		goto free_chainstack;
1203 	}
1204 
1205 	table->private = newinfo;
1206 	rwlock_init(&table->lock);
1207 	ret = mutex_lock_interruptible(&ebt_mutex);
1208 	if (ret != 0)
1209 		goto free_chainstack;
1210 
1211 	list_for_each_entry(t, &net->xt.tables[NFPROTO_BRIDGE], list) {
1212 		if (strcmp(t->name, table->name) == 0) {
1213 			ret = -EEXIST;
1214 			BUGPRINT("Table name already exists\n");
1215 			goto free_unlock;
1216 		}
1217 	}
1218 
1219 	/* Hold a reference count if the chains aren't empty */
1220 	if (newinfo->nentries && !try_module_get(table->me)) {
1221 		ret = -ENOENT;
1222 		goto free_unlock;
1223 	}
1224 	list_add(&table->list, &net->xt.tables[NFPROTO_BRIDGE]);
1225 	mutex_unlock(&ebt_mutex);
1226 	return table;
1227 free_unlock:
1228 	mutex_unlock(&ebt_mutex);
1229 free_chainstack:
1230 	if (newinfo->chainstack) {
1231 		for_each_possible_cpu(i)
1232 			vfree(newinfo->chainstack[i]);
1233 		vfree(newinfo->chainstack);
1234 	}
1235 	vfree(newinfo->entries);
1236 free_newinfo:
1237 	vfree(newinfo);
1238 free_table:
1239 	kfree(table);
1240 out:
1241 	return ERR_PTR(ret);
1242 }
1243 
ebt_unregister_table(struct net * net,struct ebt_table * table)1244 void ebt_unregister_table(struct net *net, struct ebt_table *table)
1245 {
1246 	int i;
1247 
1248 	if (!table) {
1249 		BUGPRINT("Request to unregister NULL table!!!\n");
1250 		return;
1251 	}
1252 	mutex_lock(&ebt_mutex);
1253 	list_del(&table->list);
1254 	mutex_unlock(&ebt_mutex);
1255 	EBT_ENTRY_ITERATE(table->private->entries, table->private->entries_size,
1256 			  ebt_cleanup_entry, net, NULL);
1257 	if (table->private->nentries)
1258 		module_put(table->me);
1259 	vfree(table->private->entries);
1260 	if (table->private->chainstack) {
1261 		for_each_possible_cpu(i)
1262 			vfree(table->private->chainstack[i]);
1263 		vfree(table->private->chainstack);
1264 	}
1265 	vfree(table->private);
1266 	kfree(table);
1267 }
1268 
1269 /* userspace just supplied us with counters */
do_update_counters(struct net * net,const char * name,struct ebt_counter __user * counters,unsigned int num_counters,const void __user * user,unsigned int len)1270 static int do_update_counters(struct net *net, const char *name,
1271 				struct ebt_counter __user *counters,
1272 				unsigned int num_counters,
1273 				const void __user *user, unsigned int len)
1274 {
1275 	int i, ret;
1276 	struct ebt_counter *tmp;
1277 	struct ebt_table *t;
1278 
1279 	if (num_counters == 0)
1280 		return -EINVAL;
1281 
1282 	tmp = vmalloc(num_counters * sizeof(*tmp));
1283 	if (!tmp)
1284 		return -ENOMEM;
1285 
1286 	t = find_table_lock(net, name, &ret, &ebt_mutex);
1287 	if (!t)
1288 		goto free_tmp;
1289 
1290 	if (num_counters != t->private->nentries) {
1291 		BUGPRINT("Wrong nr of counters\n");
1292 		ret = -EINVAL;
1293 		goto unlock_mutex;
1294 	}
1295 
1296 	if (copy_from_user(tmp, counters, num_counters * sizeof(*counters))) {
1297 		ret = -EFAULT;
1298 		goto unlock_mutex;
1299 	}
1300 
1301 	/* we want an atomic add of the counters */
1302 	write_lock_bh(&t->lock);
1303 
1304 	/* we add to the counters of the first cpu */
1305 	for (i = 0; i < num_counters; i++) {
1306 		t->private->counters[i].pcnt += tmp[i].pcnt;
1307 		t->private->counters[i].bcnt += tmp[i].bcnt;
1308 	}
1309 
1310 	write_unlock_bh(&t->lock);
1311 	ret = 0;
1312 unlock_mutex:
1313 	mutex_unlock(&ebt_mutex);
1314 free_tmp:
1315 	vfree(tmp);
1316 	return ret;
1317 }
1318 
update_counters(struct net * net,const void __user * user,unsigned int len)1319 static int update_counters(struct net *net, const void __user *user,
1320 			    unsigned int len)
1321 {
1322 	struct ebt_replace hlp;
1323 
1324 	if (copy_from_user(&hlp, user, sizeof(hlp)))
1325 		return -EFAULT;
1326 
1327 	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
1328 		return -EINVAL;
1329 
1330 	return do_update_counters(net, hlp.name, hlp.counters,
1331 				hlp.num_counters, user, len);
1332 }
1333 
ebt_make_matchname(const struct ebt_entry_match * m,const char * base,char __user * ubase)1334 static inline int ebt_make_matchname(const struct ebt_entry_match *m,
1335     const char *base, char __user *ubase)
1336 {
1337 	char __user *hlp = ubase + ((char *)m - base);
1338 	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1339 
1340 	/* ebtables expects 32 bytes long names but xt_match names are 29 bytes
1341 	   long. Copy 29 bytes and fill remaining bytes with zeroes. */
1342 	strncpy(name, m->u.match->name, sizeof(name));
1343 	if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
1344 		return -EFAULT;
1345 	return 0;
1346 }
1347 
ebt_make_watchername(const struct ebt_entry_watcher * w,const char * base,char __user * ubase)1348 static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
1349     const char *base, char __user *ubase)
1350 {
1351 	char __user *hlp = ubase + ((char *)w - base);
1352 	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1353 
1354 	strncpy(name, w->u.watcher->name, sizeof(name));
1355 	if (copy_to_user(hlp , name, EBT_FUNCTION_MAXNAMELEN))
1356 		return -EFAULT;
1357 	return 0;
1358 }
1359 
1360 static inline int
ebt_make_names(struct ebt_entry * e,const char * base,char __user * ubase)1361 ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
1362 {
1363 	int ret;
1364 	char __user *hlp;
1365 	const struct ebt_entry_target *t;
1366 	char name[EBT_FUNCTION_MAXNAMELEN] = {};
1367 
1368 	if (e->bitmask == 0)
1369 		return 0;
1370 
1371 	hlp = ubase + (((char *)e + e->target_offset) - base);
1372 	t = (struct ebt_entry_target *)(((char *)e) + e->target_offset);
1373 
1374 	ret = EBT_MATCH_ITERATE(e, ebt_make_matchname, base, ubase);
1375 	if (ret != 0)
1376 		return ret;
1377 	ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
1378 	if (ret != 0)
1379 		return ret;
1380 	strncpy(name, t->u.target->name, sizeof(name));
1381 	if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
1382 		return -EFAULT;
1383 	return 0;
1384 }
1385 
copy_counters_to_user(struct ebt_table * t,const struct ebt_counter * oldcounters,void __user * user,unsigned int num_counters,unsigned int nentries)1386 static int copy_counters_to_user(struct ebt_table *t,
1387 				  const struct ebt_counter *oldcounters,
1388 				  void __user *user, unsigned int num_counters,
1389 				  unsigned int nentries)
1390 {
1391 	struct ebt_counter *counterstmp;
1392 	int ret = 0;
1393 
1394 	/* userspace might not need the counters */
1395 	if (num_counters == 0)
1396 		return 0;
1397 
1398 	if (num_counters != nentries) {
1399 		BUGPRINT("Num_counters wrong\n");
1400 		return -EINVAL;
1401 	}
1402 
1403 	counterstmp = vmalloc(nentries * sizeof(*counterstmp));
1404 	if (!counterstmp)
1405 		return -ENOMEM;
1406 
1407 	write_lock_bh(&t->lock);
1408 	get_counters(oldcounters, counterstmp, nentries);
1409 	write_unlock_bh(&t->lock);
1410 
1411 	if (copy_to_user(user, counterstmp,
1412 	   nentries * sizeof(struct ebt_counter)))
1413 		ret = -EFAULT;
1414 	vfree(counterstmp);
1415 	return ret;
1416 }
1417 
1418 /* called with ebt_mutex locked */
copy_everything_to_user(struct ebt_table * t,void __user * user,const int * len,int cmd)1419 static int copy_everything_to_user(struct ebt_table *t, void __user *user,
1420     const int *len, int cmd)
1421 {
1422 	struct ebt_replace tmp;
1423 	const struct ebt_counter *oldcounters;
1424 	unsigned int entries_size, nentries;
1425 	int ret;
1426 	char *entries;
1427 
1428 	if (cmd == EBT_SO_GET_ENTRIES) {
1429 		entries_size = t->private->entries_size;
1430 		nentries = t->private->nentries;
1431 		entries = t->private->entries;
1432 		oldcounters = t->private->counters;
1433 	} else {
1434 		entries_size = t->table->entries_size;
1435 		nentries = t->table->nentries;
1436 		entries = t->table->entries;
1437 		oldcounters = t->table->counters;
1438 	}
1439 
1440 	if (copy_from_user(&tmp, user, sizeof(tmp)))
1441 		return -EFAULT;
1442 
1443 	if (*len != sizeof(struct ebt_replace) + entries_size +
1444 	   (tmp.num_counters? nentries * sizeof(struct ebt_counter): 0))
1445 		return -EINVAL;
1446 
1447 	if (tmp.nentries != nentries) {
1448 		BUGPRINT("Nentries wrong\n");
1449 		return -EINVAL;
1450 	}
1451 
1452 	if (tmp.entries_size != entries_size) {
1453 		BUGPRINT("Wrong size\n");
1454 		return -EINVAL;
1455 	}
1456 
1457 	ret = copy_counters_to_user(t, oldcounters, tmp.counters,
1458 					tmp.num_counters, nentries);
1459 	if (ret)
1460 		return ret;
1461 
1462 	if (copy_to_user(tmp.entries, entries, entries_size)) {
1463 		BUGPRINT("Couldn't copy entries to userspace\n");
1464 		return -EFAULT;
1465 	}
1466 	/* set the match/watcher/target names right */
1467 	return EBT_ENTRY_ITERATE(entries, entries_size,
1468 	   ebt_make_names, entries, tmp.entries);
1469 }
1470 
do_ebt_set_ctl(struct sock * sk,int cmd,void __user * user,unsigned int len)1471 static int do_ebt_set_ctl(struct sock *sk,
1472 	int cmd, void __user *user, unsigned int len)
1473 {
1474 	int ret;
1475 
1476 	if (!capable(CAP_NET_ADMIN))
1477 		return -EPERM;
1478 
1479 	switch(cmd) {
1480 	case EBT_SO_SET_ENTRIES:
1481 		ret = do_replace(sock_net(sk), user, len);
1482 		break;
1483 	case EBT_SO_SET_COUNTERS:
1484 		ret = update_counters(sock_net(sk), user, len);
1485 		break;
1486 	default:
1487 		ret = -EINVAL;
1488 	}
1489 	return ret;
1490 }
1491 
do_ebt_get_ctl(struct sock * sk,int cmd,void __user * user,int * len)1492 static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
1493 {
1494 	int ret;
1495 	struct ebt_replace tmp;
1496 	struct ebt_table *t;
1497 
1498 	if (!capable(CAP_NET_ADMIN))
1499 		return -EPERM;
1500 
1501 	if (copy_from_user(&tmp, user, sizeof(tmp)))
1502 		return -EFAULT;
1503 
1504 	t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
1505 	if (!t)
1506 		return ret;
1507 
1508 	switch(cmd) {
1509 	case EBT_SO_GET_INFO:
1510 	case EBT_SO_GET_INIT_INFO:
1511 		if (*len != sizeof(struct ebt_replace)){
1512 			ret = -EINVAL;
1513 			mutex_unlock(&ebt_mutex);
1514 			break;
1515 		}
1516 		if (cmd == EBT_SO_GET_INFO) {
1517 			tmp.nentries = t->private->nentries;
1518 			tmp.entries_size = t->private->entries_size;
1519 			tmp.valid_hooks = t->valid_hooks;
1520 		} else {
1521 			tmp.nentries = t->table->nentries;
1522 			tmp.entries_size = t->table->entries_size;
1523 			tmp.valid_hooks = t->table->valid_hooks;
1524 		}
1525 		mutex_unlock(&ebt_mutex);
1526 		if (copy_to_user(user, &tmp, *len) != 0){
1527 			BUGPRINT("c2u Didn't work\n");
1528 			ret = -EFAULT;
1529 			break;
1530 		}
1531 		ret = 0;
1532 		break;
1533 
1534 	case EBT_SO_GET_ENTRIES:
1535 	case EBT_SO_GET_INIT_ENTRIES:
1536 		ret = copy_everything_to_user(t, user, len, cmd);
1537 		mutex_unlock(&ebt_mutex);
1538 		break;
1539 
1540 	default:
1541 		mutex_unlock(&ebt_mutex);
1542 		ret = -EINVAL;
1543 	}
1544 
1545 	return ret;
1546 }
1547 
1548 #ifdef CONFIG_COMPAT
1549 /* 32 bit-userspace compatibility definitions. */
1550 struct compat_ebt_replace {
1551 	char name[EBT_TABLE_MAXNAMELEN];
1552 	compat_uint_t valid_hooks;
1553 	compat_uint_t nentries;
1554 	compat_uint_t entries_size;
1555 	/* start of the chains */
1556 	compat_uptr_t hook_entry[NF_BR_NUMHOOKS];
1557 	/* nr of counters userspace expects back */
1558 	compat_uint_t num_counters;
1559 	/* where the kernel will put the old counters. */
1560 	compat_uptr_t counters;
1561 	compat_uptr_t entries;
1562 };
1563 
1564 /* struct ebt_entry_match, _target and _watcher have same layout */
1565 struct compat_ebt_entry_mwt {
1566 	union {
1567 		char name[EBT_FUNCTION_MAXNAMELEN];
1568 		compat_uptr_t ptr;
1569 	} u;
1570 	compat_uint_t match_size;
1571 	compat_uint_t data[0];
1572 };
1573 
1574 /* account for possible padding between match_size and ->data */
ebt_compat_entry_padsize(void)1575 static int ebt_compat_entry_padsize(void)
1576 {
1577 	BUILD_BUG_ON(XT_ALIGN(sizeof(struct ebt_entry_match)) <
1578 			COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt)));
1579 	return (int) XT_ALIGN(sizeof(struct ebt_entry_match)) -
1580 			COMPAT_XT_ALIGN(sizeof(struct compat_ebt_entry_mwt));
1581 }
1582 
ebt_compat_match_offset(const struct xt_match * match,unsigned int userlen)1583 static int ebt_compat_match_offset(const struct xt_match *match,
1584 				   unsigned int userlen)
1585 {
1586 	/*
1587 	 * ebt_among needs special handling. The kernel .matchsize is
1588 	 * set to -1 at registration time; at runtime an EBT_ALIGN()ed
1589 	 * value is expected.
1590 	 * Example: userspace sends 4500, ebt_among.c wants 4504.
1591 	 */
1592 	if (unlikely(match->matchsize == -1))
1593 		return XT_ALIGN(userlen) - COMPAT_XT_ALIGN(userlen);
1594 	return xt_compat_match_offset(match);
1595 }
1596 
compat_match_to_user(struct ebt_entry_match * m,void __user ** dstptr,unsigned int * size)1597 static int compat_match_to_user(struct ebt_entry_match *m, void __user **dstptr,
1598 				unsigned int *size)
1599 {
1600 	const struct xt_match *match = m->u.match;
1601 	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1602 	int off = ebt_compat_match_offset(match, m->match_size);
1603 	compat_uint_t msize = m->match_size - off;
1604 
1605 	BUG_ON(off >= m->match_size);
1606 
1607 	if (copy_to_user(cm->u.name, match->name,
1608 	    strlen(match->name) + 1) || put_user(msize, &cm->match_size))
1609 		return -EFAULT;
1610 
1611 	if (match->compat_to_user) {
1612 		if (match->compat_to_user(cm->data, m->data))
1613 			return -EFAULT;
1614 	} else if (copy_to_user(cm->data, m->data, msize))
1615 			return -EFAULT;
1616 
1617 	*size -= ebt_compat_entry_padsize() + off;
1618 	*dstptr = cm->data;
1619 	*dstptr += msize;
1620 	return 0;
1621 }
1622 
compat_target_to_user(struct ebt_entry_target * t,void __user ** dstptr,unsigned int * size)1623 static int compat_target_to_user(struct ebt_entry_target *t,
1624 				 void __user **dstptr,
1625 				 unsigned int *size)
1626 {
1627 	const struct xt_target *target = t->u.target;
1628 	struct compat_ebt_entry_mwt __user *cm = *dstptr;
1629 	int off = xt_compat_target_offset(target);
1630 	compat_uint_t tsize = t->target_size - off;
1631 
1632 	BUG_ON(off >= t->target_size);
1633 
1634 	if (copy_to_user(cm->u.name, target->name,
1635 	    strlen(target->name) + 1) || put_user(tsize, &cm->match_size))
1636 		return -EFAULT;
1637 
1638 	if (target->compat_to_user) {
1639 		if (target->compat_to_user(cm->data, t->data))
1640 			return -EFAULT;
1641 	} else if (copy_to_user(cm->data, t->data, tsize))
1642 		return -EFAULT;
1643 
1644 	*size -= ebt_compat_entry_padsize() + off;
1645 	*dstptr = cm->data;
1646 	*dstptr += tsize;
1647 	return 0;
1648 }
1649 
compat_watcher_to_user(struct ebt_entry_watcher * w,void __user ** dstptr,unsigned int * size)1650 static int compat_watcher_to_user(struct ebt_entry_watcher *w,
1651 				  void __user **dstptr,
1652 				  unsigned int *size)
1653 {
1654 	return compat_target_to_user((struct ebt_entry_target *)w,
1655 							dstptr, size);
1656 }
1657 
compat_copy_entry_to_user(struct ebt_entry * e,void __user ** dstptr,unsigned int * size)1658 static int compat_copy_entry_to_user(struct ebt_entry *e, void __user **dstptr,
1659 				unsigned int *size)
1660 {
1661 	struct ebt_entry_target *t;
1662 	struct ebt_entry __user *ce;
1663 	u32 watchers_offset, target_offset, next_offset;
1664 	compat_uint_t origsize;
1665 	int ret;
1666 
1667 	if (e->bitmask == 0) {
1668 		if (*size < sizeof(struct ebt_entries))
1669 			return -EINVAL;
1670 		if (copy_to_user(*dstptr, e, sizeof(struct ebt_entries)))
1671 			return -EFAULT;
1672 
1673 		*dstptr += sizeof(struct ebt_entries);
1674 		*size -= sizeof(struct ebt_entries);
1675 		return 0;
1676 	}
1677 
1678 	if (*size < sizeof(*ce))
1679 		return -EINVAL;
1680 
1681 	ce = (struct ebt_entry __user *)*dstptr;
1682 	if (copy_to_user(ce, e, sizeof(*ce)))
1683 		return -EFAULT;
1684 
1685 	origsize = *size;
1686 	*dstptr += sizeof(*ce);
1687 
1688 	ret = EBT_MATCH_ITERATE(e, compat_match_to_user, dstptr, size);
1689 	if (ret)
1690 		return ret;
1691 	watchers_offset = e->watchers_offset - (origsize - *size);
1692 
1693 	ret = EBT_WATCHER_ITERATE(e, compat_watcher_to_user, dstptr, size);
1694 	if (ret)
1695 		return ret;
1696 	target_offset = e->target_offset - (origsize - *size);
1697 
1698 	t = (struct ebt_entry_target *) ((char *) e + e->target_offset);
1699 
1700 	ret = compat_target_to_user(t, dstptr, size);
1701 	if (ret)
1702 		return ret;
1703 	next_offset = e->next_offset - (origsize - *size);
1704 
1705 	if (put_user(watchers_offset, &ce->watchers_offset) ||
1706 	    put_user(target_offset, &ce->target_offset) ||
1707 	    put_user(next_offset, &ce->next_offset))
1708 		return -EFAULT;
1709 
1710 	*size -= sizeof(*ce);
1711 	return 0;
1712 }
1713 
compat_calc_match(struct ebt_entry_match * m,int * off)1714 static int compat_calc_match(struct ebt_entry_match *m, int *off)
1715 {
1716 	*off += ebt_compat_match_offset(m->u.match, m->match_size);
1717 	*off += ebt_compat_entry_padsize();
1718 	return 0;
1719 }
1720 
compat_calc_watcher(struct ebt_entry_watcher * w,int * off)1721 static int compat_calc_watcher(struct ebt_entry_watcher *w, int *off)
1722 {
1723 	*off += xt_compat_target_offset(w->u.watcher);
1724 	*off += ebt_compat_entry_padsize();
1725 	return 0;
1726 }
1727 
compat_calc_entry(const struct ebt_entry * e,const struct ebt_table_info * info,const void * base,struct compat_ebt_replace * newinfo)1728 static int compat_calc_entry(const struct ebt_entry *e,
1729 			     const struct ebt_table_info *info,
1730 			     const void *base,
1731 			     struct compat_ebt_replace *newinfo)
1732 {
1733 	const struct ebt_entry_target *t;
1734 	unsigned int entry_offset;
1735 	int off, ret, i;
1736 
1737 	if (e->bitmask == 0)
1738 		return 0;
1739 
1740 	off = 0;
1741 	entry_offset = (void *)e - base;
1742 
1743 	EBT_MATCH_ITERATE(e, compat_calc_match, &off);
1744 	EBT_WATCHER_ITERATE(e, compat_calc_watcher, &off);
1745 
1746 	t = (const struct ebt_entry_target *) ((char *) e + e->target_offset);
1747 
1748 	off += xt_compat_target_offset(t->u.target);
1749 	off += ebt_compat_entry_padsize();
1750 
1751 	newinfo->entries_size -= off;
1752 
1753 	ret = xt_compat_add_offset(NFPROTO_BRIDGE, entry_offset, off);
1754 	if (ret)
1755 		return ret;
1756 
1757 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
1758 		const void *hookptr = info->hook_entry[i];
1759 		if (info->hook_entry[i] &&
1760 		    (e < (struct ebt_entry *)(base - hookptr))) {
1761 			newinfo->hook_entry[i] -= off;
1762 			pr_debug("0x%08X -> 0x%08X\n",
1763 					newinfo->hook_entry[i] + off,
1764 					newinfo->hook_entry[i]);
1765 		}
1766 	}
1767 
1768 	return 0;
1769 }
1770 
1771 
compat_table_info(const struct ebt_table_info * info,struct compat_ebt_replace * newinfo)1772 static int compat_table_info(const struct ebt_table_info *info,
1773 			     struct compat_ebt_replace *newinfo)
1774 {
1775 	unsigned int size = info->entries_size;
1776 	const void *entries = info->entries;
1777 
1778 	newinfo->entries_size = size;
1779 
1780 	xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries);
1781 	return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info,
1782 							entries, newinfo);
1783 }
1784 
compat_copy_everything_to_user(struct ebt_table * t,void __user * user,int * len,int cmd)1785 static int compat_copy_everything_to_user(struct ebt_table *t,
1786 					  void __user *user, int *len, int cmd)
1787 {
1788 	struct compat_ebt_replace repl, tmp;
1789 	struct ebt_counter *oldcounters;
1790 	struct ebt_table_info tinfo;
1791 	int ret;
1792 	void __user *pos;
1793 
1794 	memset(&tinfo, 0, sizeof(tinfo));
1795 
1796 	if (cmd == EBT_SO_GET_ENTRIES) {
1797 		tinfo.entries_size = t->private->entries_size;
1798 		tinfo.nentries = t->private->nentries;
1799 		tinfo.entries = t->private->entries;
1800 		oldcounters = t->private->counters;
1801 	} else {
1802 		tinfo.entries_size = t->table->entries_size;
1803 		tinfo.nentries = t->table->nentries;
1804 		tinfo.entries = t->table->entries;
1805 		oldcounters = t->table->counters;
1806 	}
1807 
1808 	if (copy_from_user(&tmp, user, sizeof(tmp)))
1809 		return -EFAULT;
1810 
1811 	if (tmp.nentries != tinfo.nentries ||
1812 	   (tmp.num_counters && tmp.num_counters != tinfo.nentries))
1813 		return -EINVAL;
1814 
1815 	memcpy(&repl, &tmp, sizeof(repl));
1816 	if (cmd == EBT_SO_GET_ENTRIES)
1817 		ret = compat_table_info(t->private, &repl);
1818 	else
1819 		ret = compat_table_info(&tinfo, &repl);
1820 	if (ret)
1821 		return ret;
1822 
1823 	if (*len != sizeof(tmp) + repl.entries_size +
1824 	   (tmp.num_counters? tinfo.nentries * sizeof(struct ebt_counter): 0)) {
1825 		pr_err("wrong size: *len %d, entries_size %u, replsz %d\n",
1826 				*len, tinfo.entries_size, repl.entries_size);
1827 		return -EINVAL;
1828 	}
1829 
1830 	/* userspace might not need the counters */
1831 	ret = copy_counters_to_user(t, oldcounters, compat_ptr(tmp.counters),
1832 					tmp.num_counters, tinfo.nentries);
1833 	if (ret)
1834 		return ret;
1835 
1836 	pos = compat_ptr(tmp.entries);
1837 	return EBT_ENTRY_ITERATE(tinfo.entries, tinfo.entries_size,
1838 			compat_copy_entry_to_user, &pos, &tmp.entries_size);
1839 }
1840 
1841 struct ebt_entries_buf_state {
1842 	char *buf_kern_start;	/* kernel buffer to copy (translated) data to */
1843 	u32 buf_kern_len;	/* total size of kernel buffer */
1844 	u32 buf_kern_offset;	/* amount of data copied so far */
1845 	u32 buf_user_offset;	/* read position in userspace buffer */
1846 };
1847 
ebt_buf_count(struct ebt_entries_buf_state * state,unsigned int sz)1848 static int ebt_buf_count(struct ebt_entries_buf_state *state, unsigned int sz)
1849 {
1850 	state->buf_kern_offset += sz;
1851 	return state->buf_kern_offset >= sz ? 0 : -EINVAL;
1852 }
1853 
ebt_buf_add(struct ebt_entries_buf_state * state,void * data,unsigned int sz)1854 static int ebt_buf_add(struct ebt_entries_buf_state *state,
1855 		       void *data, unsigned int sz)
1856 {
1857 	if (state->buf_kern_start == NULL)
1858 		goto count_only;
1859 
1860 	BUG_ON(state->buf_kern_offset + sz > state->buf_kern_len);
1861 
1862 	memcpy(state->buf_kern_start + state->buf_kern_offset, data, sz);
1863 
1864  count_only:
1865 	state->buf_user_offset += sz;
1866 	return ebt_buf_count(state, sz);
1867 }
1868 
ebt_buf_add_pad(struct ebt_entries_buf_state * state,unsigned int sz)1869 static int ebt_buf_add_pad(struct ebt_entries_buf_state *state, unsigned int sz)
1870 {
1871 	char *b = state->buf_kern_start;
1872 
1873 	BUG_ON(b && state->buf_kern_offset > state->buf_kern_len);
1874 
1875 	if (b != NULL && sz > 0)
1876 		memset(b + state->buf_kern_offset, 0, sz);
1877 	/* do not adjust ->buf_user_offset here, we added kernel-side padding */
1878 	return ebt_buf_count(state, sz);
1879 }
1880 
1881 enum compat_mwt {
1882 	EBT_COMPAT_MATCH,
1883 	EBT_COMPAT_WATCHER,
1884 	EBT_COMPAT_TARGET,
1885 };
1886 
compat_mtw_from_user(struct compat_ebt_entry_mwt * mwt,enum compat_mwt compat_mwt,struct ebt_entries_buf_state * state,const unsigned char * base)1887 static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
1888 				enum compat_mwt compat_mwt,
1889 				struct ebt_entries_buf_state *state,
1890 				const unsigned char *base)
1891 {
1892 	char name[EBT_FUNCTION_MAXNAMELEN];
1893 	struct xt_match *match;
1894 	struct xt_target *wt;
1895 	void *dst = NULL;
1896 	int off, pad = 0;
1897 	unsigned int size_kern, match_size = mwt->match_size;
1898 
1899 	strlcpy(name, mwt->u.name, sizeof(name));
1900 
1901 	if (state->buf_kern_start)
1902 		dst = state->buf_kern_start + state->buf_kern_offset;
1903 
1904 	switch (compat_mwt) {
1905 	case EBT_COMPAT_MATCH:
1906 		match = xt_request_find_match(NFPROTO_BRIDGE, name, 0);
1907 		if (IS_ERR(match))
1908 			return PTR_ERR(match);
1909 
1910 		off = ebt_compat_match_offset(match, match_size);
1911 		if (dst) {
1912 			if (match->compat_from_user)
1913 				match->compat_from_user(dst, mwt->data);
1914 			else
1915 				memcpy(dst, mwt->data, match_size);
1916 		}
1917 
1918 		size_kern = match->matchsize;
1919 		if (unlikely(size_kern == -1))
1920 			size_kern = match_size;
1921 		module_put(match->me);
1922 		break;
1923 	case EBT_COMPAT_WATCHER: /* fallthrough */
1924 	case EBT_COMPAT_TARGET:
1925 		wt = xt_request_find_target(NFPROTO_BRIDGE, name, 0);
1926 		if (IS_ERR(wt))
1927 			return PTR_ERR(wt);
1928 		off = xt_compat_target_offset(wt);
1929 
1930 		if (dst) {
1931 			if (wt->compat_from_user)
1932 				wt->compat_from_user(dst, mwt->data);
1933 			else
1934 				memcpy(dst, mwt->data, match_size);
1935 		}
1936 
1937 		size_kern = wt->targetsize;
1938 		module_put(wt->me);
1939 		break;
1940 
1941 	default:
1942 		return -EINVAL;
1943 	}
1944 
1945 	state->buf_kern_offset += match_size + off;
1946 	state->buf_user_offset += match_size;
1947 	pad = XT_ALIGN(size_kern) - size_kern;
1948 
1949 	if (pad > 0 && dst) {
1950 		BUG_ON(state->buf_kern_len <= pad);
1951 		BUG_ON(state->buf_kern_offset - (match_size + off) + size_kern > state->buf_kern_len - pad);
1952 		memset(dst + size_kern, 0, pad);
1953 	}
1954 	return off + match_size;
1955 }
1956 
1957 /*
1958  * return size of all matches, watchers or target, including necessary
1959  * alignment and padding.
1960  */
ebt_size_mwt(struct compat_ebt_entry_mwt * match32,unsigned int size_left,enum compat_mwt type,struct ebt_entries_buf_state * state,const void * base)1961 static int ebt_size_mwt(struct compat_ebt_entry_mwt *match32,
1962 			unsigned int size_left, enum compat_mwt type,
1963 			struct ebt_entries_buf_state *state, const void *base)
1964 {
1965 	int growth = 0;
1966 	char *buf;
1967 
1968 	if (size_left == 0)
1969 		return 0;
1970 
1971 	buf = (char *) match32;
1972 
1973 	while (size_left >= sizeof(*match32)) {
1974 		struct ebt_entry_match *match_kern;
1975 		int ret;
1976 
1977 		match_kern = (struct ebt_entry_match *) state->buf_kern_start;
1978 		if (match_kern) {
1979 			char *tmp;
1980 			tmp = state->buf_kern_start + state->buf_kern_offset;
1981 			match_kern = (struct ebt_entry_match *) tmp;
1982 		}
1983 		ret = ebt_buf_add(state, buf, sizeof(*match32));
1984 		if (ret < 0)
1985 			return ret;
1986 		size_left -= sizeof(*match32);
1987 
1988 		/* add padding before match->data (if any) */
1989 		ret = ebt_buf_add_pad(state, ebt_compat_entry_padsize());
1990 		if (ret < 0)
1991 			return ret;
1992 
1993 		if (match32->match_size > size_left)
1994 			return -EINVAL;
1995 
1996 		size_left -= match32->match_size;
1997 
1998 		ret = compat_mtw_from_user(match32, type, state, base);
1999 		if (ret < 0)
2000 			return ret;
2001 
2002 		BUG_ON(ret < match32->match_size);
2003 		growth += ret - match32->match_size;
2004 		growth += ebt_compat_entry_padsize();
2005 
2006 		buf += sizeof(*match32);
2007 		buf += match32->match_size;
2008 
2009 		if (match_kern)
2010 			match_kern->match_size = ret;
2011 
2012 		WARN_ON(type == EBT_COMPAT_TARGET && size_left);
2013 		match32 = (struct compat_ebt_entry_mwt *) buf;
2014 	}
2015 
2016 	return growth;
2017 }
2018 
2019 /* called for all ebt_entry structures. */
size_entry_mwt(struct ebt_entry * entry,const unsigned char * base,unsigned int * total,struct ebt_entries_buf_state * state)2020 static int size_entry_mwt(struct ebt_entry *entry, const unsigned char *base,
2021 			  unsigned int *total,
2022 			  struct ebt_entries_buf_state *state)
2023 {
2024 	unsigned int i, j, startoff, new_offset = 0;
2025 	/* stores match/watchers/targets & offset of next struct ebt_entry: */
2026 	unsigned int offsets[4];
2027 	unsigned int *offsets_update = NULL;
2028 	int ret;
2029 	char *buf_start;
2030 
2031 	if (*total < sizeof(struct ebt_entries))
2032 		return -EINVAL;
2033 
2034 	if (!entry->bitmask) {
2035 		*total -= sizeof(struct ebt_entries);
2036 		return ebt_buf_add(state, entry, sizeof(struct ebt_entries));
2037 	}
2038 	if (*total < sizeof(*entry) || entry->next_offset < sizeof(*entry))
2039 		return -EINVAL;
2040 
2041 	startoff = state->buf_user_offset;
2042 	/* pull in most part of ebt_entry, it does not need to be changed. */
2043 	ret = ebt_buf_add(state, entry,
2044 			offsetof(struct ebt_entry, watchers_offset));
2045 	if (ret < 0)
2046 		return ret;
2047 
2048 	offsets[0] = sizeof(struct ebt_entry); /* matches come first */
2049 	memcpy(&offsets[1], &entry->watchers_offset,
2050 			sizeof(offsets) - sizeof(offsets[0]));
2051 
2052 	if (state->buf_kern_start) {
2053 		buf_start = state->buf_kern_start + state->buf_kern_offset;
2054 		offsets_update = (unsigned int *) buf_start;
2055 	}
2056 	ret = ebt_buf_add(state, &offsets[1],
2057 			sizeof(offsets) - sizeof(offsets[0]));
2058 	if (ret < 0)
2059 		return ret;
2060 	buf_start = (char *) entry;
2061 	/*
2062 	 * 0: matches offset, always follows ebt_entry.
2063 	 * 1: watchers offset, from ebt_entry structure
2064 	 * 2: target offset, from ebt_entry structure
2065 	 * 3: next ebt_entry offset, from ebt_entry structure
2066 	 *
2067 	 * offsets are relative to beginning of struct ebt_entry (i.e., 0).
2068 	 */
2069 	for (i = 0, j = 1 ; j < 4 ; j++, i++) {
2070 		struct compat_ebt_entry_mwt *match32;
2071 		unsigned int size;
2072 		char *buf = buf_start;
2073 
2074 		buf = buf_start + offsets[i];
2075 		if (offsets[i] > offsets[j])
2076 			return -EINVAL;
2077 
2078 		match32 = (struct compat_ebt_entry_mwt *) buf;
2079 		size = offsets[j] - offsets[i];
2080 		ret = ebt_size_mwt(match32, size, i, state, base);
2081 		if (ret < 0)
2082 			return ret;
2083 		new_offset += ret;
2084 		if (offsets_update && new_offset) {
2085 			pr_debug("change offset %d to %d\n",
2086 				offsets_update[i], offsets[j] + new_offset);
2087 			offsets_update[i] = offsets[j] + new_offset;
2088 		}
2089 	}
2090 
2091 	if (state->buf_kern_start == NULL) {
2092 		unsigned int offset = buf_start - (char *) base;
2093 
2094 		ret = xt_compat_add_offset(NFPROTO_BRIDGE, offset, new_offset);
2095 		if (ret < 0)
2096 			return ret;
2097 	}
2098 
2099 	startoff = state->buf_user_offset - startoff;
2100 
2101 	BUG_ON(*total < startoff);
2102 	*total -= startoff;
2103 	return 0;
2104 }
2105 
2106 /*
2107  * repl->entries_size is the size of the ebt_entry blob in userspace.
2108  * It might need more memory when copied to a 64 bit kernel in case
2109  * userspace is 32-bit. So, first task: find out how much memory is needed.
2110  *
2111  * Called before validation is performed.
2112  */
compat_copy_entries(unsigned char * data,unsigned int size_user,struct ebt_entries_buf_state * state)2113 static int compat_copy_entries(unsigned char *data, unsigned int size_user,
2114 				struct ebt_entries_buf_state *state)
2115 {
2116 	unsigned int size_remaining = size_user;
2117 	int ret;
2118 
2119 	ret = EBT_ENTRY_ITERATE(data, size_user, size_entry_mwt, data,
2120 					&size_remaining, state);
2121 	if (ret < 0)
2122 		return ret;
2123 
2124 	WARN_ON(size_remaining);
2125 	return state->buf_kern_offset;
2126 }
2127 
2128 
compat_copy_ebt_replace_from_user(struct ebt_replace * repl,void __user * user,unsigned int len)2129 static int compat_copy_ebt_replace_from_user(struct ebt_replace *repl,
2130 					    void __user *user, unsigned int len)
2131 {
2132 	struct compat_ebt_replace tmp;
2133 	int i;
2134 
2135 	if (len < sizeof(tmp))
2136 		return -EINVAL;
2137 
2138 	if (copy_from_user(&tmp, user, sizeof(tmp)))
2139 		return -EFAULT;
2140 
2141 	if (len != sizeof(tmp) + tmp.entries_size)
2142 		return -EINVAL;
2143 
2144 	if (tmp.entries_size == 0)
2145 		return -EINVAL;
2146 
2147 	if (tmp.nentries >= ((INT_MAX - sizeof(struct ebt_table_info)) /
2148 			NR_CPUS - SMP_CACHE_BYTES) / sizeof(struct ebt_counter))
2149 		return -ENOMEM;
2150 	if (tmp.num_counters >= INT_MAX / sizeof(struct ebt_counter))
2151 		return -ENOMEM;
2152 
2153 	memcpy(repl, &tmp, offsetof(struct ebt_replace, hook_entry));
2154 
2155 	/* starting with hook_entry, 32 vs. 64 bit structures are different */
2156 	for (i = 0; i < NF_BR_NUMHOOKS; i++)
2157 		repl->hook_entry[i] = compat_ptr(tmp.hook_entry[i]);
2158 
2159 	repl->num_counters = tmp.num_counters;
2160 	repl->counters = compat_ptr(tmp.counters);
2161 	repl->entries = compat_ptr(tmp.entries);
2162 	return 0;
2163 }
2164 
compat_do_replace(struct net * net,void __user * user,unsigned int len)2165 static int compat_do_replace(struct net *net, void __user *user,
2166 			     unsigned int len)
2167 {
2168 	int ret, i, countersize, size64;
2169 	struct ebt_table_info *newinfo;
2170 	struct ebt_replace tmp;
2171 	struct ebt_entries_buf_state state;
2172 	void *entries_tmp;
2173 
2174 	ret = compat_copy_ebt_replace_from_user(&tmp, user, len);
2175 	if (ret) {
2176 		/* try real handler in case userland supplied needed padding */
2177 		if (ret == -EINVAL && do_replace(net, user, len) == 0)
2178 			ret = 0;
2179 		return ret;
2180 	}
2181 
2182 	countersize = COUNTER_OFFSET(tmp.nentries) * nr_cpu_ids;
2183 	newinfo = vmalloc(sizeof(*newinfo) + countersize);
2184 	if (!newinfo)
2185 		return -ENOMEM;
2186 
2187 	if (countersize)
2188 		memset(newinfo->counters, 0, countersize);
2189 
2190 	memset(&state, 0, sizeof(state));
2191 
2192 	newinfo->entries = vmalloc(tmp.entries_size);
2193 	if (!newinfo->entries) {
2194 		ret = -ENOMEM;
2195 		goto free_newinfo;
2196 	}
2197 	if (copy_from_user(
2198 	   newinfo->entries, tmp.entries, tmp.entries_size) != 0) {
2199 		ret = -EFAULT;
2200 		goto free_entries;
2201 	}
2202 
2203 	entries_tmp = newinfo->entries;
2204 
2205 	xt_compat_lock(NFPROTO_BRIDGE);
2206 
2207 	xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
2208 	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2209 	if (ret < 0)
2210 		goto out_unlock;
2211 
2212 	pr_debug("tmp.entries_size %d, kern off %d, user off %d delta %d\n",
2213 		tmp.entries_size, state.buf_kern_offset, state.buf_user_offset,
2214 		xt_compat_calc_jump(NFPROTO_BRIDGE, tmp.entries_size));
2215 
2216 	size64 = ret;
2217 	newinfo->entries = vmalloc(size64);
2218 	if (!newinfo->entries) {
2219 		vfree(entries_tmp);
2220 		ret = -ENOMEM;
2221 		goto out_unlock;
2222 	}
2223 
2224 	memset(&state, 0, sizeof(state));
2225 	state.buf_kern_start = newinfo->entries;
2226 	state.buf_kern_len = size64;
2227 
2228 	ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
2229 	BUG_ON(ret < 0);	/* parses same data again */
2230 
2231 	vfree(entries_tmp);
2232 	tmp.entries_size = size64;
2233 
2234 	for (i = 0; i < NF_BR_NUMHOOKS; i++) {
2235 		char __user *usrptr;
2236 		if (tmp.hook_entry[i]) {
2237 			unsigned int delta;
2238 			usrptr = (char __user *) tmp.hook_entry[i];
2239 			delta = usrptr - tmp.entries;
2240 			usrptr += xt_compat_calc_jump(NFPROTO_BRIDGE, delta);
2241 			tmp.hook_entry[i] = (struct ebt_entries __user *)usrptr;
2242 		}
2243 	}
2244 
2245 	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2246 	xt_compat_unlock(NFPROTO_BRIDGE);
2247 
2248 	ret = do_replace_finish(net, &tmp, newinfo);
2249 	if (ret == 0)
2250 		return ret;
2251 free_entries:
2252 	vfree(newinfo->entries);
2253 free_newinfo:
2254 	vfree(newinfo);
2255 	return ret;
2256 out_unlock:
2257 	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2258 	xt_compat_unlock(NFPROTO_BRIDGE);
2259 	goto free_entries;
2260 }
2261 
compat_update_counters(struct net * net,void __user * user,unsigned int len)2262 static int compat_update_counters(struct net *net, void __user *user,
2263 				  unsigned int len)
2264 {
2265 	struct compat_ebt_replace hlp;
2266 
2267 	if (copy_from_user(&hlp, user, sizeof(hlp)))
2268 		return -EFAULT;
2269 
2270 	/* try real handler in case userland supplied needed padding */
2271 	if (len != sizeof(hlp) + hlp.num_counters * sizeof(struct ebt_counter))
2272 		return update_counters(net, user, len);
2273 
2274 	return do_update_counters(net, hlp.name, compat_ptr(hlp.counters),
2275 					hlp.num_counters, user, len);
2276 }
2277 
compat_do_ebt_set_ctl(struct sock * sk,int cmd,void __user * user,unsigned int len)2278 static int compat_do_ebt_set_ctl(struct sock *sk,
2279 		int cmd, void __user *user, unsigned int len)
2280 {
2281 	int ret;
2282 
2283 	if (!capable(CAP_NET_ADMIN))
2284 		return -EPERM;
2285 
2286 	switch (cmd) {
2287 	case EBT_SO_SET_ENTRIES:
2288 		ret = compat_do_replace(sock_net(sk), user, len);
2289 		break;
2290 	case EBT_SO_SET_COUNTERS:
2291 		ret = compat_update_counters(sock_net(sk), user, len);
2292 		break;
2293 	default:
2294 		ret = -EINVAL;
2295   }
2296 	return ret;
2297 }
2298 
compat_do_ebt_get_ctl(struct sock * sk,int cmd,void __user * user,int * len)2299 static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
2300 		void __user *user, int *len)
2301 {
2302 	int ret;
2303 	struct compat_ebt_replace tmp;
2304 	struct ebt_table *t;
2305 
2306 	if (!capable(CAP_NET_ADMIN))
2307 		return -EPERM;
2308 
2309 	/* try real handler in case userland supplied needed padding */
2310 	if ((cmd == EBT_SO_GET_INFO ||
2311 	     cmd == EBT_SO_GET_INIT_INFO) && *len != sizeof(tmp))
2312 			return do_ebt_get_ctl(sk, cmd, user, len);
2313 
2314 	if (copy_from_user(&tmp, user, sizeof(tmp)))
2315 		return -EFAULT;
2316 
2317 	t = find_table_lock(sock_net(sk), tmp.name, &ret, &ebt_mutex);
2318 	if (!t)
2319 		return ret;
2320 
2321 	xt_compat_lock(NFPROTO_BRIDGE);
2322 	switch (cmd) {
2323 	case EBT_SO_GET_INFO:
2324 		tmp.nentries = t->private->nentries;
2325 		ret = compat_table_info(t->private, &tmp);
2326 		if (ret)
2327 			goto out;
2328 		tmp.valid_hooks = t->valid_hooks;
2329 
2330 		if (copy_to_user(user, &tmp, *len) != 0) {
2331 			ret = -EFAULT;
2332 			break;
2333 		}
2334 		ret = 0;
2335 		break;
2336 	case EBT_SO_GET_INIT_INFO:
2337 		tmp.nentries = t->table->nentries;
2338 		tmp.entries_size = t->table->entries_size;
2339 		tmp.valid_hooks = t->table->valid_hooks;
2340 
2341 		if (copy_to_user(user, &tmp, *len) != 0) {
2342 			ret = -EFAULT;
2343 			break;
2344 		}
2345 		ret = 0;
2346 		break;
2347 	case EBT_SO_GET_ENTRIES:
2348 	case EBT_SO_GET_INIT_ENTRIES:
2349 		/*
2350 		 * try real handler first in case of userland-side padding.
2351 		 * in case we are dealing with an 'ordinary' 32 bit binary
2352 		 * without 64bit compatibility padding, this will fail right
2353 		 * after copy_from_user when the *len argument is validated.
2354 		 *
2355 		 * the compat_ variant needs to do one pass over the kernel
2356 		 * data set to adjust for size differences before it the check.
2357 		 */
2358 		if (copy_everything_to_user(t, user, len, cmd) == 0)
2359 			ret = 0;
2360 		else
2361 			ret = compat_copy_everything_to_user(t, user, len, cmd);
2362 		break;
2363 	default:
2364 		ret = -EINVAL;
2365 	}
2366  out:
2367 	xt_compat_flush_offsets(NFPROTO_BRIDGE);
2368 	xt_compat_unlock(NFPROTO_BRIDGE);
2369 	mutex_unlock(&ebt_mutex);
2370 	return ret;
2371 }
2372 #endif
2373 
2374 static struct nf_sockopt_ops ebt_sockopts =
2375 {
2376 	.pf		= PF_INET,
2377 	.set_optmin	= EBT_BASE_CTL,
2378 	.set_optmax	= EBT_SO_SET_MAX + 1,
2379 	.set		= do_ebt_set_ctl,
2380 #ifdef CONFIG_COMPAT
2381 	.compat_set	= compat_do_ebt_set_ctl,
2382 #endif
2383 	.get_optmin	= EBT_BASE_CTL,
2384 	.get_optmax	= EBT_SO_GET_MAX + 1,
2385 	.get		= do_ebt_get_ctl,
2386 #ifdef CONFIG_COMPAT
2387 	.compat_get	= compat_do_ebt_get_ctl,
2388 #endif
2389 	.owner		= THIS_MODULE,
2390 };
2391 
ebtables_init(void)2392 static int __init ebtables_init(void)
2393 {
2394 	int ret;
2395 
2396 	ret = xt_register_target(&ebt_standard_target);
2397 	if (ret < 0)
2398 		return ret;
2399 	ret = nf_register_sockopt(&ebt_sockopts);
2400 	if (ret < 0) {
2401 		xt_unregister_target(&ebt_standard_target);
2402 		return ret;
2403 	}
2404 
2405 	printk(KERN_INFO "Ebtables v2.0 registered\n");
2406 	return 0;
2407 }
2408 
ebtables_fini(void)2409 static void __exit ebtables_fini(void)
2410 {
2411 	nf_unregister_sockopt(&ebt_sockopts);
2412 	xt_unregister_target(&ebt_standard_target);
2413 	printk(KERN_INFO "Ebtables v2.0 unregistered\n");
2414 }
2415 
2416 EXPORT_SYMBOL(ebt_register_table);
2417 EXPORT_SYMBOL(ebt_unregister_table);
2418 EXPORT_SYMBOL(ebt_do_table);
2419 module_init(ebtables_init);
2420 module_exit(ebtables_fini);
2421 MODULE_LICENSE("GPL");
2422