xref: /src/sys/netpfil/pf/pf_if.c (revision 349fcf079ca32d5c93e45366d2b27638747affeb)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2003 Cedric Berger
6  * Copyright (c) 2005 Henning Brauer <henning@openbsd.org>
7  * Copyright (c) 2005 Ryan McBride <mcbride@openbsd.org>
8  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  *
15  *    - Redistributions of source code must retain the above copyright
16  *      notice, this list of conditions and the following disclaimer.
17  *    - Redistributions in binary form must reproduce the above
18  *      copyright notice, this list of conditions and the following
19  *      disclaimer in the documentation and/or other materials provided
20  *      with the distribution.
21  *
22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
24  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
25  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
26  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
28  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
30  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
32  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
33  * POSSIBILITY OF SUCH DAMAGE.
34  *
35  *	$OpenBSD: pf_if.c,v 1.54 2008/06/14 16:55:28 mk Exp $
36  */
37 
38 #include <sys/cdefs.h>
39 #include "opt_inet.h"
40 #include "opt_inet6.h"
41 
42 #include <sys/param.h>
43 #include <sys/kernel.h>
44 #include <sys/eventhandler.h>
45 #include <sys/lock.h>
46 #include <sys/mbuf.h>
47 #include <sys/socket.h>
48 
49 #include <net/if.h>
50 #include <net/if_var.h>
51 #include <net/if_private.h>
52 #include <net/vnet.h>
53 #include <net/pfvar.h>
54 #include <net/route.h>
55 
56 VNET_DEFINE(struct pfi_kkif *,	 pfi_all);
57 VNET_DEFINE_STATIC(long, pfi_update);
58 #define	V_pfi_update	VNET(pfi_update)
59 #define PFI_BUFFER_MAX	0x10000
60 
61 VNET_DECLARE(int, pf_vnet_active);
62 #define V_pf_vnet_active	VNET(pf_vnet_active)
63 
64 VNET_DEFINE_STATIC(struct pfr_addr *, pfi_buffer);
65 VNET_DEFINE_STATIC(int, pfi_buffer_cnt);
66 VNET_DEFINE_STATIC(int,	pfi_buffer_max);
67 #define	V_pfi_buffer		 VNET(pfi_buffer)
68 #define	V_pfi_buffer_cnt	 VNET(pfi_buffer_cnt)
69 #define	V_pfi_buffer_max	 VNET(pfi_buffer_max)
70 
71 #ifdef PF_WANT_32_TO_64_COUNTER
72 VNET_DEFINE(struct allkiflist_head, pf_allkiflist);
73 VNET_DEFINE(size_t, pf_allkifcount);
74 VNET_DEFINE(struct pfi_kkif *, pf_kifmarker);
75 #endif
76 
77 static eventhandler_tag	 pfi_attach_cookie;
78 static eventhandler_tag	 pfi_detach_cookie;
79 static eventhandler_tag  pfi_rename_cookie;
80 static eventhandler_tag	 pfi_attach_group_cookie;
81 static eventhandler_tag	 pfi_change_group_cookie;
82 static eventhandler_tag	 pfi_detach_group_cookie;
83 static eventhandler_tag	 pfi_ifaddr_event_cookie;
84 
85 static void	 pfi_attach_ifnet(struct ifnet *, struct pfi_kkif *);
86 static void	 pfi_attach_ifgroup(struct ifg_group *, struct pfi_kkif *);
87 
88 static void	 pfi_kkif_update(struct pfi_kkif *);
89 static void	 pfi_dynaddr_update(struct pfi_dynaddr *dyn);
90 static void	 pfi_table_update(struct pfr_ktable *, struct pfi_kkif *, uint8_t,
91 		    int);
92 static void	 pfi_instance_add(struct ifnet *, uint8_t, int);
93 static void	 pfi_address_add(struct sockaddr *, sa_family_t, uint8_t);
94 static int	 pfi_kkif_compare(struct pfi_kkif *, struct pfi_kkif *);
95 static int	 pfi_skip_if(const char *, struct pfi_kkif *);
96 static int	 pfi_unmask(void *);
97 static void	 pfi_attach_ifnet_event(void * __unused, struct ifnet *);
98 static void	 pfi_detach_ifnet_event(void * __unused, struct ifnet *);
99 static void	 pfi_rename_ifnet_event(void * __unused, struct ifnet *);
100 static void	 pfi_attach_group_event(void * __unused, struct ifg_group *);
101 static void	 pfi_change_group_event(void * __unused, char *);
102 static void	 pfi_detach_group_event(void * __unused, struct ifg_group *);
103 static void	 pfi_ifaddr_event(void * __unused, struct ifnet *);
104 
105 RB_HEAD(pfi_ifhead, pfi_kkif);
106 static RB_PROTOTYPE(pfi_ifhead, pfi_kkif, pfik_tree, pfi_kkif_compare);
107 static RB_GENERATE(pfi_ifhead, pfi_kkif, pfik_tree, pfi_kkif_compare);
108 VNET_DEFINE_STATIC(struct pfi_ifhead, pfi_ifs);
109 #define	V_pfi_ifs	VNET(pfi_ifs)
110 
111 #define	PFI_BUFFER_MAX		0x10000
112 MALLOC_DEFINE(PFI_MTYPE, "pf_ifnet", "pf(4) interface database");
113 
114 LIST_HEAD(pfi_list, pfi_kkif);
115 VNET_DEFINE_STATIC(struct pfi_list, pfi_unlinked_kifs);
116 #define	V_pfi_unlinked_kifs	VNET(pfi_unlinked_kifs)
117 static struct mtx pfi_unlnkdkifs_mtx;
118 MTX_SYSINIT(pfi_unlnkdkifs_mtx, &pfi_unlnkdkifs_mtx, "pf unlinked interfaces",
119     MTX_DEF);
120 
121 void
pfi_initialize_vnet(void)122 pfi_initialize_vnet(void)
123 {
124 	struct pfi_list kifs = LIST_HEAD_INITIALIZER();
125 	struct epoch_tracker et;
126 	struct pfi_kkif *kif;
127 	struct ifg_group *ifg;
128 	struct ifnet *ifp;
129 	int nkifs;
130 
131 	V_pfi_buffer_max = 64;
132 	V_pfi_buffer = malloc(V_pfi_buffer_max * sizeof(*V_pfi_buffer),
133 	    PFI_MTYPE, M_WAITOK);
134 
135 	nkifs = 1;	/* one for V_pfi_all */
136 	IFNET_RLOCK();
137 	CK_STAILQ_FOREACH(ifg, &V_ifg_head, ifg_next)
138 		nkifs++;
139 	CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link)
140 		nkifs++;
141 
142 	for (int n = 0; n < nkifs; n++) {
143 		kif = pf_kkif_create(M_WAITOK);
144 		LIST_INSERT_HEAD(&kifs, kif, pfik_list);
145 	}
146 
147 	NET_EPOCH_ENTER(et);
148 	PF_RULES_WLOCK();
149 	kif = LIST_FIRST(&kifs);
150 	LIST_REMOVE(kif, pfik_list);
151 	V_pfi_all = pfi_kkif_attach(kif, IFG_ALL);
152 	CK_STAILQ_FOREACH(ifg, &V_ifg_head, ifg_next) {
153 		kif = LIST_FIRST(&kifs);
154 		LIST_REMOVE(kif, pfik_list);
155 		pfi_attach_ifgroup(ifg, kif);
156 	}
157 	CK_STAILQ_FOREACH(ifp, &V_ifnet, if_link) {
158 		kif = LIST_FIRST(&kifs);
159 		LIST_REMOVE(kif, pfik_list);
160 		pfi_attach_ifnet(ifp, kif);
161 	}
162 	PF_RULES_WUNLOCK();
163 	NET_EPOCH_EXIT(et);
164 	IFNET_RUNLOCK();
165 
166 	MPASS(LIST_EMPTY(&kifs));
167 }
168 
169 void
pfi_initialize(void)170 pfi_initialize(void)
171 {
172 
173 	pfi_attach_cookie = EVENTHANDLER_REGISTER(ifnet_arrival_event,
174 	    pfi_attach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
175 	pfi_detach_cookie = EVENTHANDLER_REGISTER(ifnet_departure_event,
176 	    pfi_detach_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
177 	pfi_rename_cookie = EVENTHANDLER_REGISTER(ifnet_rename_event,
178 	    pfi_rename_ifnet_event, NULL, EVENTHANDLER_PRI_ANY);
179 	pfi_attach_group_cookie = EVENTHANDLER_REGISTER(group_attach_event,
180 	    pfi_attach_group_event, NULL, EVENTHANDLER_PRI_ANY);
181 	pfi_change_group_cookie = EVENTHANDLER_REGISTER(group_change_event,
182 	    pfi_change_group_event, NULL, EVENTHANDLER_PRI_ANY);
183 	pfi_detach_group_cookie = EVENTHANDLER_REGISTER(group_detach_event,
184 	    pfi_detach_group_event, NULL, EVENTHANDLER_PRI_ANY);
185 	pfi_ifaddr_event_cookie = EVENTHANDLER_REGISTER(ifaddr_event,
186 	    pfi_ifaddr_event, NULL, EVENTHANDLER_PRI_ANY);
187 }
188 
189 void
pfi_cleanup_vnet(void)190 pfi_cleanup_vnet(void)
191 {
192 	struct pfi_kkif *kif;
193 
194 	PF_RULES_WASSERT();
195 
196 	V_pfi_all = NULL;
197 	while ((kif = RB_MIN(pfi_ifhead, &V_pfi_ifs))) {
198 		RB_REMOVE(pfi_ifhead, &V_pfi_ifs, kif);
199 		if (kif->pfik_group)
200 			kif->pfik_group->ifg_pf_kif = NULL;
201 		if (kif->pfik_ifp) {
202 			if_rele(kif->pfik_ifp);
203 			kif->pfik_ifp->if_pf_kif = NULL;
204 		}
205 		pf_kkif_free(kif);
206 	}
207 
208 	mtx_lock(&pfi_unlnkdkifs_mtx);
209 	while ((kif = LIST_FIRST(&V_pfi_unlinked_kifs))) {
210 		LIST_REMOVE(kif, pfik_list);
211 		pf_kkif_free(kif);
212 	}
213 	mtx_unlock(&pfi_unlnkdkifs_mtx);
214 
215 	free(V_pfi_buffer, PFI_MTYPE);
216 }
217 
218 void
pfi_cleanup(void)219 pfi_cleanup(void)
220 {
221 
222 	EVENTHANDLER_DEREGISTER(ifnet_arrival_event, pfi_attach_cookie);
223 	EVENTHANDLER_DEREGISTER(ifnet_departure_event, pfi_detach_cookie);
224 	EVENTHANDLER_DEREGISTER(group_attach_event, pfi_attach_group_cookie);
225 	EVENTHANDLER_DEREGISTER(group_change_event, pfi_change_group_cookie);
226 	EVENTHANDLER_DEREGISTER(group_detach_event, pfi_detach_group_cookie);
227 	EVENTHANDLER_DEREGISTER(ifaddr_event, pfi_ifaddr_event_cookie);
228 }
229 
230 struct pfi_kkif*
pf_kkif_create(int flags)231 pf_kkif_create(int flags)
232 {
233 	struct pfi_kkif *kif;
234 #ifdef PF_WANT_32_TO_64_COUNTER
235 	bool wowned;
236 #endif
237 
238 	kif = malloc(sizeof(*kif), PFI_MTYPE, flags | M_ZERO);
239 	if (! kif)
240 		return (kif);
241 
242 	for (int i = 0; i < 2; i++) {
243 		for (int j = 0; j < 2; j++) {
244 			for (int k = 0; k < 2; k++) {
245 				if (pf_counter_u64_init(&kif->pfik_packets[i][j][k], flags) != 0) {
246 					pf_kkif_free(kif);
247 					return (NULL);
248 				}
249 
250 				if (pf_counter_u64_init(&kif->pfik_bytes[i][j][k], flags) != 0) {
251 					pf_kkif_free(kif);
252 					return (NULL);
253 				}
254 			}
255 		}
256 	}
257 
258 #ifdef PF_WANT_32_TO_64_COUNTER
259 	wowned = PF_RULES_WOWNED();
260 	if (!wowned)
261 		PF_RULES_WLOCK();
262 	LIST_INSERT_HEAD(&V_pf_allkiflist, kif, pfik_allkiflist);
263 	V_pf_allkifcount++;
264 	if (!wowned)
265 		PF_RULES_WUNLOCK();
266 #endif
267 
268 	return (kif);
269 }
270 
271 void
pf_kkif_free(struct pfi_kkif * kif)272 pf_kkif_free(struct pfi_kkif *kif)
273 {
274 #ifdef PF_WANT_32_TO_64_COUNTER
275 	bool wowned;
276 #endif
277 
278 	if (! kif)
279 		return;
280 
281 #ifdef INVARIANTS
282 	if (kif->pfik_ifp) {
283 		struct ifnet *ifp = kif->pfik_ifp;
284 		MPASS(ifp->if_pf_kif == NULL || ifp->if_pf_kif == kif);
285 	}
286 #endif
287 
288 #ifdef PF_WANT_32_TO_64_COUNTER
289 	wowned = PF_RULES_WOWNED();
290 	if (!wowned)
291 		PF_RULES_WLOCK();
292 	LIST_REMOVE(kif, pfik_allkiflist);
293 	V_pf_allkifcount--;
294 	if (!wowned)
295 		PF_RULES_WUNLOCK();
296 #endif
297 
298 	for (int i = 0; i < 2; i++) {
299 		for (int j = 0; j < 2; j++) {
300 			for (int k = 0; k < 2; k++) {
301 				pf_counter_u64_deinit(&kif->pfik_packets[i][j][k]);
302 				pf_counter_u64_deinit(&kif->pfik_bytes[i][j][k]);
303 			}
304 		}
305 	}
306 
307 	free(kif, PFI_MTYPE);
308 }
309 
310 void
pf_kkif_zero(struct pfi_kkif * kif)311 pf_kkif_zero(struct pfi_kkif *kif)
312 {
313 
314 	for (int i = 0; i < 2; i++) {
315 		for (int j = 0; j < 2; j++) {
316 			for (int k = 0; k < 2; k++) {
317 				pf_counter_u64_zero(&kif->pfik_packets[i][j][k]);
318 				pf_counter_u64_zero(&kif->pfik_bytes[i][j][k]);
319 			}
320 		}
321 	}
322 	kif->pfik_tzero = time_second;
323 }
324 
325 struct pfi_kkif *
pfi_kkif_find(const char * kif_name)326 pfi_kkif_find(const char *kif_name)
327 {
328 	struct pfi_kif_cmp s;
329 
330 	PF_RULES_ASSERT();
331 
332 	memset(&s, 0, sizeof(s));
333 	strlcpy(s.pfik_name, kif_name, sizeof(s.pfik_name));
334 
335 	return (RB_FIND(pfi_ifhead, &V_pfi_ifs, (struct pfi_kkif *)&s));
336 }
337 
338 struct pfi_kkif *
pfi_kkif_attach(struct pfi_kkif * kif,const char * kif_name)339 pfi_kkif_attach(struct pfi_kkif *kif, const char *kif_name)
340 {
341 	struct pfi_kkif *kif1;
342 
343 	PF_RULES_WASSERT();
344 	KASSERT(kif != NULL, ("%s: null kif", __func__));
345 
346 	kif1 = pfi_kkif_find(kif_name);
347 	if (kif1 != NULL) {
348 		pf_kkif_free(kif);
349 		return (kif1);
350 	}
351 
352 	pf_kkif_zero(kif);
353 	strlcpy(kif->pfik_name, kif_name, sizeof(kif->pfik_name));
354 	/*
355 	 * It seems that the value of time_second is in unintialzied state
356 	 * when pf sets interface statistics clear time in boot phase if pf
357 	 * was statically linked to kernel. Instead of setting the bogus
358 	 * time value have pfi_get_ifaces handle this case. In
359 	 * pfi_get_ifaces it uses time_second if it sees the time is 0.
360 	 */
361 	kif->pfik_tzero = time_second > 1 ? time_second : 0;
362 	TAILQ_INIT(&kif->pfik_dynaddrs);
363 
364 	if (!strcmp(kif->pfik_name, "any")) {
365 		/* both so it works in the ioctl and the regular case */
366 		kif->pfik_flags |= PFI_IFLAG_ANY;
367 	}
368 
369 	RB_INSERT(pfi_ifhead, &V_pfi_ifs, kif);
370 
371 	return (kif);
372 }
373 
374 void
pfi_kkif_ref(struct pfi_kkif * kif)375 pfi_kkif_ref(struct pfi_kkif *kif)
376 {
377 
378 	PF_RULES_WASSERT();
379 	kif->pfik_rulerefs++;
380 }
381 
382 static void
pfi_kkif_remove_if_unref(struct pfi_kkif * kif)383 pfi_kkif_remove_if_unref(struct pfi_kkif *kif)
384 {
385 
386 	PF_RULES_WASSERT();
387 
388 	if (kif->pfik_rulerefs > 0)
389 		return;
390 
391 	/* kif referencing an existing ifnet or group or holding flags should
392 	 * exist. */
393 	if (kif->pfik_ifp != NULL || kif->pfik_group != NULL ||
394 	    kif == V_pfi_all || kif->pfik_flags != 0)
395 		return;
396 
397 	/*
398 	 * We can get here in at least two distinct paths:
399 	 * - when the struct ifnet is removed, via pfi_detach_ifnet_event()
400 	 * - when a rule referencing us is removed, via pfi_kkif_unref().
401 	 * These two events can race against each other, leading us to free this kif
402 	 * twice. That leads to a loop in V_pfi_unlinked_kifs, and an eventual
403 	 * deadlock.
404 	 *
405 	 * Avoid this by making sure we only ever insert the kif into
406 	 * V_pfi_unlinked_kifs once.
407 	 * If we don't find it in V_pfi_ifs it's already been removed. Check that it
408 	 * exists in V_pfi_unlinked_kifs.
409 	 */
410 	if (! RB_FIND(pfi_ifhead, &V_pfi_ifs, kif)) {
411 #ifdef INVARIANTS
412 		struct pfi_kkif *tmp;
413 		bool found = false;
414 		mtx_lock(&pfi_unlnkdkifs_mtx);
415 		LIST_FOREACH(tmp, &V_pfi_unlinked_kifs, pfik_list) {
416 			if (tmp == kif) {
417 				found = true;
418 				break;
419 			}
420 		}
421 		mtx_unlock(&pfi_unlnkdkifs_mtx);
422 		MPASS(found);
423 #endif
424 		return;
425 	}
426 	RB_REMOVE(pfi_ifhead, &V_pfi_ifs, kif);
427 
428 	kif->pfik_flags |= PFI_IFLAG_REFS;
429 
430 	mtx_lock(&pfi_unlnkdkifs_mtx);
431 	LIST_INSERT_HEAD(&V_pfi_unlinked_kifs, kif, pfik_list);
432 	mtx_unlock(&pfi_unlnkdkifs_mtx);
433 }
434 
435 void
pfi_kkif_unref(struct pfi_kkif * kif)436 pfi_kkif_unref(struct pfi_kkif *kif)
437 {
438 
439 	PF_RULES_WASSERT();
440 	KASSERT(kif->pfik_rulerefs > 0, ("%s: %p has zero refs", __func__, kif));
441 
442 	kif->pfik_rulerefs--;
443 
444 	pfi_kkif_remove_if_unref(kif);
445 }
446 
447 void
pfi_kkif_purge(void)448 pfi_kkif_purge(void)
449 {
450 	struct pfi_kkif *kif, *kif1;
451 
452 	/*
453 	 * Do naive mark-and-sweep garbage collecting of old kifs.
454 	 * Reference flag is raised by pf_purge_expired_states().
455 	 */
456 	mtx_lock(&pfi_unlnkdkifs_mtx);
457 	LIST_FOREACH_SAFE(kif, &V_pfi_unlinked_kifs, pfik_list, kif1) {
458 		if (!(kif->pfik_flags & PFI_IFLAG_REFS)) {
459 			LIST_REMOVE(kif, pfik_list);
460 			pf_kkif_free(kif);
461 		} else
462 			kif->pfik_flags &= ~PFI_IFLAG_REFS;
463 	}
464 	mtx_unlock(&pfi_unlnkdkifs_mtx);
465 }
466 
467 int
pfi_kkif_match(struct pfi_kkif * rule_kif,struct pfi_kkif * packet_kif)468 pfi_kkif_match(struct pfi_kkif *rule_kif, struct pfi_kkif *packet_kif)
469 {
470 	struct ifg_list	*p;
471 
472 	NET_EPOCH_ASSERT();
473 
474 	MPASS(packet_kif != NULL);
475 	MPASS(packet_kif->pfik_ifp != NULL);
476 
477 	if (rule_kif == NULL || rule_kif == packet_kif)
478 		return (1);
479 
480 	if (rule_kif->pfik_group != NULL) {
481 		CK_STAILQ_FOREACH(p, &packet_kif->pfik_ifp->if_groups, ifgl_next)
482 			if (p->ifgl_group == rule_kif->pfik_group)
483 				return (1);
484 	}
485 
486 	if (rule_kif->pfik_flags & PFI_IFLAG_ANY && packet_kif->pfik_ifp &&
487 	    !(packet_kif->pfik_ifp->if_flags & IFF_LOOPBACK))
488 			return (1);
489 
490 	return (0);
491 }
492 
493 static void
pfi_attach_ifnet(struct ifnet * ifp,struct pfi_kkif * kif)494 pfi_attach_ifnet(struct ifnet *ifp, struct pfi_kkif *kif)
495 {
496 
497 	PF_RULES_WASSERT();
498 
499 	V_pfi_update++;
500 	kif = pfi_kkif_attach(kif, ifp->if_xname);
501 	if_ref(ifp);
502 	kif->pfik_ifp = ifp;
503 	ifp->if_pf_kif = kif;
504 	pfi_kkif_update(kif);
505 }
506 
507 static void
pfi_attach_ifgroup(struct ifg_group * ifg,struct pfi_kkif * kif)508 pfi_attach_ifgroup(struct ifg_group *ifg, struct pfi_kkif *kif)
509 {
510 
511 	PF_RULES_WASSERT();
512 
513 	V_pfi_update++;
514 	kif = pfi_kkif_attach(kif, ifg->ifg_group);
515 	kif->pfik_group = ifg;
516 	ifg->ifg_pf_kif = kif;
517 }
518 
519 int
pfi_match_addr(struct pfi_dynaddr * dyn,struct pf_addr * a,sa_family_t af)520 pfi_match_addr(struct pfi_dynaddr *dyn, struct pf_addr *a, sa_family_t af)
521 {
522 	switch (af) {
523 #ifdef INET
524 	case AF_INET:
525 		switch (dyn->pfid_acnt4) {
526 		case 0:
527 			return (0);
528 		case 1:
529 			return (pf_match_addr(0, &dyn->pfid_addr4,
530 			    &dyn->pfid_mask4, a, AF_INET));
531 		default:
532 			return (pfr_match_addr(dyn->pfid_kt, a, AF_INET));
533 		}
534 		break;
535 #endif /* INET */
536 #ifdef INET6
537 	case AF_INET6:
538 		switch (dyn->pfid_acnt6) {
539 		case 0:
540 			return (0);
541 		case 1:
542 			return (pf_match_addr(0, &dyn->pfid_addr6,
543 			    &dyn->pfid_mask6, a, AF_INET6));
544 		default:
545 			return (pfr_match_addr(dyn->pfid_kt, a, AF_INET6));
546 		}
547 		break;
548 #endif /* INET6 */
549 	default:
550 		return (0);
551 	}
552 }
553 
554 int
pfi_dynaddr_setup(struct pf_addr_wrap * aw,sa_family_t af)555 pfi_dynaddr_setup(struct pf_addr_wrap *aw, sa_family_t af)
556 {
557 	struct epoch_tracker	 et;
558 	struct pfi_dynaddr	*dyn;
559 	char			 tblname[PF_TABLE_NAME_SIZE];
560 	struct pf_kruleset	*ruleset = NULL;
561 	struct pfi_kkif		*kif;
562 	int			 rv = 0;
563 
564 	PF_RULES_WASSERT();
565 	KASSERT(aw->type == PF_ADDR_DYNIFTL, ("%s: type %u",
566 	    __func__, aw->type));
567 	KASSERT(aw->p.dyn == NULL, ("%s: dyn is %p", __func__, aw->p.dyn));
568 
569 	if ((dyn = malloc(sizeof(*dyn), PFI_MTYPE, M_NOWAIT | M_ZERO)) == NULL)
570 		return (ENOMEM);
571 
572 	if ((kif = pf_kkif_create(M_NOWAIT)) == NULL) {
573 		free(dyn, PFI_MTYPE);
574 		return (ENOMEM);
575 	}
576 
577 	if (!strcmp(aw->v.ifname, "self"))
578 		dyn->pfid_kif = pfi_kkif_attach(kif, IFG_ALL);
579 	else
580 		dyn->pfid_kif = pfi_kkif_attach(kif, aw->v.ifname);
581 	kif = NULL;
582 	pfi_kkif_ref(dyn->pfid_kif);
583 
584 	dyn->pfid_net = pfi_unmask(&aw->v.a.mask);
585 	if (af == AF_INET && dyn->pfid_net == 32)
586 		dyn->pfid_net = 128;
587 	strlcpy(tblname, aw->v.ifname, sizeof(tblname));
588 	if (aw->iflags & PFI_AFLAG_NETWORK)
589 		strlcat(tblname, ":network", sizeof(tblname));
590 	if (aw->iflags & PFI_AFLAG_BROADCAST)
591 		strlcat(tblname, ":broadcast", sizeof(tblname));
592 	if (aw->iflags & PFI_AFLAG_PEER)
593 		strlcat(tblname, ":peer", sizeof(tblname));
594 	if (aw->iflags & PFI_AFLAG_NOALIAS)
595 		strlcat(tblname, ":0", sizeof(tblname));
596 	if (dyn->pfid_net != 128)
597 		snprintf(tblname + strlen(tblname),
598 		    sizeof(tblname) - strlen(tblname), "/%d", dyn->pfid_net);
599 	if ((ruleset = pf_find_or_create_kruleset(PF_RESERVED_ANCHOR)) == NULL) {
600 		rv = ENOMEM;
601 		goto _bad;
602 	}
603 
604 	if ((dyn->pfid_kt = pfr_attach_table(ruleset, tblname)) == NULL) {
605 		rv = ENOMEM;
606 		goto _bad;
607 	}
608 
609 	dyn->pfid_kt->pfrkt_flags |= PFR_TFLAG_ACTIVE;
610 	dyn->pfid_iflags = aw->iflags;
611 	dyn->pfid_af = af;
612 
613 	TAILQ_INSERT_TAIL(&dyn->pfid_kif->pfik_dynaddrs, dyn, entry);
614 	aw->p.dyn = dyn;
615 	NET_EPOCH_ENTER(et);
616 	pfi_kkif_update(dyn->pfid_kif);
617 	NET_EPOCH_EXIT(et);
618 
619 	return (0);
620 
621 _bad:
622 	if (dyn->pfid_kt != NULL)
623 		pfr_detach_table(dyn->pfid_kt);
624 	if (ruleset != NULL)
625 		pf_remove_if_empty_kruleset(ruleset);
626 	pfi_kkif_unref(dyn->pfid_kif);
627 	free(dyn, PFI_MTYPE);
628 
629 	return (rv);
630 }
631 
632 static void
pfi_kkif_update(struct pfi_kkif * kif)633 pfi_kkif_update(struct pfi_kkif *kif)
634 {
635 	struct ifg_list		*ifgl;
636 	struct ifg_member	*ifgm;
637 	struct pfi_dynaddr	*p;
638 	struct pfi_kkif		*tmpkif;
639 
640 	NET_EPOCH_ASSERT();
641 	PF_RULES_WASSERT();
642 
643 	/* update all dynaddr */
644 	TAILQ_FOREACH(p, &kif->pfik_dynaddrs, entry)
645 		pfi_dynaddr_update(p);
646 
647 	/* Apply group flags to new members. */
648 	if (kif->pfik_group != NULL) {
649 		CK_STAILQ_FOREACH(ifgm, &kif->pfik_group->ifg_members,
650 		    ifgm_next) {
651 			tmpkif = (struct pfi_kkif *)ifgm->ifgm_ifp->if_pf_kif;
652 			if (tmpkif == NULL)
653 				continue;
654 
655 			tmpkif->pfik_flags |= kif->pfik_flags;
656 		}
657 	}
658 
659 	/* again for all groups kif is member of */
660 	if (kif->pfik_ifp != NULL) {
661 		CK_STAILQ_FOREACH(ifgl, &kif->pfik_ifp->if_groups, ifgl_next)
662 			if (ifgl->ifgl_group->ifg_pf_kif) {
663 				pfi_kkif_update((struct pfi_kkif *)
664 				    ifgl->ifgl_group->ifg_pf_kif);
665 			}
666 	}
667 }
668 
669 static void
pfi_dynaddr_update(struct pfi_dynaddr * dyn)670 pfi_dynaddr_update(struct pfi_dynaddr *dyn)
671 {
672 	struct pfi_kkif		*kif;
673 	struct pfr_ktable	*kt;
674 
675 	PF_RULES_WASSERT();
676 	KASSERT(dyn && dyn->pfid_kif && dyn->pfid_kt,
677 	    ("%s: bad argument", __func__));
678 
679 	kif = dyn->pfid_kif;
680 	kt = dyn->pfid_kt;
681 
682 	if (kt->pfrkt_larg != V_pfi_update) {
683 		/* this table needs to be brought up-to-date */
684 		pfi_table_update(kt, kif, dyn->pfid_net, dyn->pfid_iflags);
685 		kt->pfrkt_larg = V_pfi_update;
686 	}
687 	pfr_dynaddr_update(kt, dyn);
688 }
689 
690 static void
pfi_table_update(struct pfr_ktable * kt,struct pfi_kkif * kif,uint8_t net,int flags)691 pfi_table_update(struct pfr_ktable *kt, struct pfi_kkif *kif, uint8_t net,
692     int flags)
693 {
694 	int			 e, size2 = 0;
695 	struct ifg_member	*ifgm;
696 
697 	NET_EPOCH_ASSERT();
698 
699 	V_pfi_buffer_cnt = 0;
700 
701 	if (kif->pfik_ifp != NULL)
702 		pfi_instance_add(kif->pfik_ifp, net, flags);
703 	else if (kif->pfik_group != NULL) {
704 		CK_STAILQ_FOREACH(ifgm, &kif->pfik_group->ifg_members, ifgm_next)
705 			pfi_instance_add(ifgm->ifgm_ifp, net, flags);
706 	}
707 
708 	if ((e = pfr_set_addrs(&kt->pfrkt_t, V_pfi_buffer, V_pfi_buffer_cnt, &size2,
709 	    NULL, NULL, NULL, PFR_FLAG_START | PFR_FLAG_DONE, PFR_TFLAG_ALLMASK)))
710 		printf("%s: cannot set %d new addresses into table %s: %d\n",
711 		    __func__, V_pfi_buffer_cnt, kt->pfrkt_name, e);
712 }
713 
714 static void
pfi_instance_add(struct ifnet * ifp,uint8_t net,int flags)715 pfi_instance_add(struct ifnet *ifp, uint8_t net, int flags)
716 {
717 	struct ifaddr	*ia;
718 	int		 got4 = 0, got6 = 0;
719 	sa_family_t	 af;
720 	uint8_t		 net2;
721 
722 	NET_EPOCH_ASSERT();
723 
724 	CK_STAILQ_FOREACH(ia, &ifp->if_addrhead, ifa_link) {
725 		if (ia->ifa_addr == NULL)
726 			continue;
727 		af = ia->ifa_addr->sa_family;
728 		if (af != AF_INET && af != AF_INET6)
729 			continue;
730 		/*
731 		 * XXX: For point-to-point interfaces, (ifname:0) and IPv4,
732 		 *      jump over addresses without a proper route to work
733 		 *      around a problem with ppp not fully removing the
734 		 *      address used during IPCP.
735 		 */
736 		if ((ifp->if_flags & IFF_POINTOPOINT) &&
737 		    !(ia->ifa_flags & IFA_ROUTE) &&
738 		    (flags & PFI_AFLAG_NOALIAS) && (af == AF_INET))
739 			continue;
740 		if ((flags & PFI_AFLAG_BROADCAST) && af == AF_INET6)
741 			continue;
742 		if ((flags & PFI_AFLAG_BROADCAST) &&
743 		    !(ifp->if_flags & IFF_BROADCAST))
744 			continue;
745 		if ((flags & PFI_AFLAG_PEER) &&
746 		    !(ifp->if_flags & IFF_POINTOPOINT))
747 			continue;
748 		if ((flags & (PFI_AFLAG_NETWORK | PFI_AFLAG_NOALIAS)) &&
749 		    af == AF_INET6 &&
750 		    IN6_IS_ADDR_LINKLOCAL(
751 		    &((struct sockaddr_in6 *)ia->ifa_addr)->sin6_addr))
752 			continue;
753 		if (flags & PFI_AFLAG_NOALIAS) {
754 			if (af == AF_INET && got4)
755 				continue;
756 			if (af == AF_INET6 && got6)
757 				continue;
758 		}
759 		if (af == AF_INET)
760 			got4 = 1;
761 		else if (af == AF_INET6)
762 			got6 = 1;
763 		net2 = net;
764 		if (net2 == 128 && (flags & PFI_AFLAG_NETWORK)) {
765 			if (af == AF_INET)
766 				net2 = pfi_unmask(&((struct sockaddr_in *)
767 				    ia->ifa_netmask)->sin_addr);
768 			else if (af == AF_INET6)
769 				net2 = pfi_unmask(&((struct sockaddr_in6 *)
770 				    ia->ifa_netmask)->sin6_addr);
771 		}
772 		if (af == AF_INET && net2 > 32)
773 			net2 = 32;
774 		if (flags & PFI_AFLAG_BROADCAST)
775 			pfi_address_add(ia->ifa_broadaddr, af, net2);
776 		else if (flags & PFI_AFLAG_PEER)
777 			pfi_address_add(ia->ifa_dstaddr, af, net2);
778 		else
779 			pfi_address_add(ia->ifa_addr, af, net2);
780 	}
781 }
782 
783 static void
pfi_address_add(struct sockaddr * sa,sa_family_t af,uint8_t net)784 pfi_address_add(struct sockaddr *sa, sa_family_t af, uint8_t net)
785 {
786 	struct pfr_addr	*p;
787 	int		 i;
788 
789 	if (V_pfi_buffer_cnt >= V_pfi_buffer_max) {
790 		int		 new_max = V_pfi_buffer_max * 2;
791 
792 		if (new_max > PFI_BUFFER_MAX) {
793 			printf("%s: address buffer full (%d/%d)\n", __func__,
794 			    V_pfi_buffer_cnt, PFI_BUFFER_MAX);
795 			return;
796 		}
797 		p = malloc(new_max * sizeof(*V_pfi_buffer), PFI_MTYPE,
798 		    M_NOWAIT);
799 		if (p == NULL) {
800 			printf("%s: no memory to grow buffer (%d/%d)\n",
801 			    __func__, V_pfi_buffer_cnt, PFI_BUFFER_MAX);
802 			return;
803 		}
804 		memcpy(p, V_pfi_buffer, V_pfi_buffer_max * sizeof(*V_pfi_buffer));
805 		/* no need to zero buffer */
806 		free(V_pfi_buffer, PFI_MTYPE);
807 		V_pfi_buffer = p;
808 		V_pfi_buffer_max = new_max;
809 	}
810 	if (af == AF_INET && net > 32)
811 		net = 128;
812 	p = V_pfi_buffer + V_pfi_buffer_cnt++;
813 	memset(p, 0, sizeof(*p));
814 	p->pfra_af = af;
815 	p->pfra_net = net;
816 	if (af == AF_INET)
817 		p->pfra_ip4addr = ((struct sockaddr_in *)sa)->sin_addr;
818 	else if (af == AF_INET6) {
819 		p->pfra_ip6addr = ((struct sockaddr_in6 *)sa)->sin6_addr;
820 		if (IN6_IS_SCOPE_EMBED(&p->pfra_ip6addr))
821 			p->pfra_ip6addr.s6_addr16[1] = 0;
822 	}
823 	/* mask network address bits */
824 	if (net < 128)
825 		((caddr_t)p)[p->pfra_net/8] &= ~(0xFF >> (p->pfra_net%8));
826 	for (i = (p->pfra_net+7)/8; i < sizeof(p->pfra_u); i++)
827 		((caddr_t)p)[i] = 0;
828 }
829 
830 void
pfi_dynaddr_remove(struct pfi_dynaddr * dyn)831 pfi_dynaddr_remove(struct pfi_dynaddr *dyn)
832 {
833 
834 	KASSERT(dyn->pfid_kif != NULL, ("%s: null pfid_kif", __func__));
835 	KASSERT(dyn->pfid_kt != NULL, ("%s: null pfid_kt", __func__));
836 
837 	TAILQ_REMOVE(&dyn->pfid_kif->pfik_dynaddrs, dyn, entry);
838 	pfi_kkif_unref(dyn->pfid_kif);
839 	pfr_detach_table(dyn->pfid_kt);
840 	free(dyn, PFI_MTYPE);
841 }
842 
843 void
pfi_dynaddr_copyout(struct pf_addr_wrap * aw)844 pfi_dynaddr_copyout(struct pf_addr_wrap *aw)
845 {
846 
847 	KASSERT(aw->type == PF_ADDR_DYNIFTL,
848 	    ("%s: type %u", __func__, aw->type));
849 
850 	if (aw->p.dyn == NULL || aw->p.dyn->pfid_kif == NULL)
851 		return;
852 	aw->p.dyncnt = aw->p.dyn->pfid_acnt4 + aw->p.dyn->pfid_acnt6;
853 }
854 
855 static int
pfi_kkif_compare(struct pfi_kkif * p,struct pfi_kkif * q)856 pfi_kkif_compare(struct pfi_kkif *p, struct pfi_kkif *q)
857 {
858 	return (strncmp(p->pfik_name, q->pfik_name, IFNAMSIZ));
859 }
860 
861 void
pfi_update_status(const char * name,struct pf_status * pfs)862 pfi_update_status(const char *name, struct pf_status *pfs)
863 {
864 	struct pfi_kkif		*p;
865 	struct pfi_kif_cmp	 key;
866 	struct ifg_member	 p_member, *ifgm;
867 	CK_STAILQ_HEAD(, ifg_member) ifg_members;
868 	int			 i, j, k;
869 
870 	if (pfs) {
871 		memset(pfs->pcounters, 0, sizeof(pfs->pcounters));
872 		memset(pfs->bcounters, 0, sizeof(pfs->bcounters));
873 	}
874 
875 	strlcpy(key.pfik_name, name, sizeof(key.pfik_name));
876 	p = RB_FIND(pfi_ifhead, &V_pfi_ifs, (struct pfi_kkif *)&key);
877 	if (p == NULL) {
878 		return;
879 	}
880 
881 	if (p->pfik_group != NULL) {
882 		memcpy(&ifg_members, &p->pfik_group->ifg_members,
883 		    sizeof(ifg_members));
884 	} else {
885 		/* build a temporary list for p only */
886 		memset(&p_member, 0, sizeof(p_member));
887 		p_member.ifgm_ifp = p->pfik_ifp;
888 		CK_STAILQ_INIT(&ifg_members);
889 		CK_STAILQ_INSERT_TAIL(&ifg_members, &p_member, ifgm_next);
890 	}
891 	CK_STAILQ_FOREACH(ifgm, &ifg_members, ifgm_next) {
892 		if (ifgm->ifgm_ifp == NULL || ifgm->ifgm_ifp->if_pf_kif == NULL)
893 			continue;
894 		p = (struct pfi_kkif *)ifgm->ifgm_ifp->if_pf_kif;
895 
896 		/* just clear statistics */
897 		if (pfs == NULL) {
898 			pf_kkif_zero(p);
899 			continue;
900 		}
901 		for (i = 0; i < 2; i++)
902 			for (j = 0; j < 2; j++)
903 				for (k = 0; k < 2; k++) {
904 					pfs->pcounters[i][j][k] +=
905 					    pf_counter_u64_fetch(&p->pfik_packets[i][j][k]);
906 					pfs->bcounters[i][j] +=
907 					    pf_counter_u64_fetch(&p->pfik_bytes[i][j][k]);
908 				}
909 	}
910 }
911 
912 static void
pf_kkif_to_kif(struct pfi_kkif * kkif,struct pfi_kif * kif)913 pf_kkif_to_kif(struct pfi_kkif *kkif, struct pfi_kif *kif)
914 {
915 
916 	memset(kif, 0, sizeof(*kif));
917 	strlcpy(kif->pfik_name, kkif->pfik_name, sizeof(kif->pfik_name));
918 	for (int i = 0; i < 2; i++) {
919 		for (int j = 0; j < 2; j++) {
920 			for (int k = 0; k < 2; k++) {
921 				kif->pfik_packets[i][j][k] =
922 				    pf_counter_u64_fetch(&kkif->pfik_packets[i][j][k]);
923 				kif->pfik_bytes[i][j][k] =
924 				    pf_counter_u64_fetch(&kkif->pfik_bytes[i][j][k]);
925 			}
926 		}
927 	}
928 	kif->pfik_flags = kkif->pfik_flags;
929 	kif->pfik_tzero = kkif->pfik_tzero;
930 	kif->pfik_rulerefs = kkif->pfik_rulerefs;
931 	/*
932 	 * Userspace relies on this pointer to decide if this is a group or
933 	 * not. We don't want to share the actual pointer, because it's
934 	 * useless to userspace and leaks kernel memory layout information.
935 	 * So instead we provide 0xfeedcode as 'true' and NULL as 'false'.
936 	 */
937 	kif->pfik_group =
938 	    kkif->pfik_group ? (struct ifg_group *)0xfeedc0de : NULL;
939 }
940 
941 void
pfi_get_ifaces(const char * name,struct pfi_kif * buf,int * size)942 pfi_get_ifaces(const char *name, struct pfi_kif *buf, int *size)
943 {
944 	struct epoch_tracker et;
945 	struct pfi_kkif	*p, *nextp;
946 	int		 n = 0;
947 
948 	NET_EPOCH_ENTER(et);
949 	for (p = RB_MIN(pfi_ifhead, &V_pfi_ifs); p; p = nextp) {
950 		nextp = RB_NEXT(pfi_ifhead, &V_pfi_ifs, p);
951 		if (pfi_skip_if(name, p))
952 			continue;
953 		if (*size <= n++)
954 			break;
955 		if (!p->pfik_tzero)
956 			p->pfik_tzero = time_second;
957 		pf_kkif_to_kif(p, buf++);
958 		nextp = RB_NEXT(pfi_ifhead, &V_pfi_ifs, p);
959 	}
960 	*size = n;
961 	NET_EPOCH_EXIT(et);
962 }
963 
964 static int
pfi_skip_if(const char * filter,struct pfi_kkif * p)965 pfi_skip_if(const char *filter, struct pfi_kkif *p)
966 {
967 	struct ifg_list *i;
968 	int	n;
969 
970 	NET_EPOCH_ASSERT();
971 
972 	if (filter == NULL || !*filter)
973 		return (0);
974 	if (!strcmp(p->pfik_name, filter))
975 		return (0);	/* exact match */
976 	n = strlen(filter);
977 	if (n < 1 || n >= IFNAMSIZ)
978 		return (1);	/* sanity check */
979 	if (filter[n-1] >= '0' && filter[n-1] <= '9')
980 		return (1);	/* group names may not end in a digit */
981 	if (p->pfik_ifp == NULL)
982 		return (1);
983 	CK_STAILQ_FOREACH(i, &p->pfik_ifp->if_groups, ifgl_next)
984 		if (!strncmp(i->ifgl_group->ifg_group, filter, IFNAMSIZ))
985 			return (0); /* iface is in group "filter" */
986 	return (1);
987 }
988 
989 int
pfi_set_flags(const char * name,int flags)990 pfi_set_flags(const char *name, int flags)
991 {
992 	struct epoch_tracker et;
993 	struct pfi_kkif	*p, *kif;
994 
995 	kif = pf_kkif_create(M_NOWAIT);
996 	if (kif == NULL)
997 		return (ENOMEM);
998 
999 	NET_EPOCH_ENTER(et);
1000 
1001 	kif = pfi_kkif_attach(kif, name);
1002 
1003 	RB_FOREACH(p, pfi_ifhead, &V_pfi_ifs) {
1004 		if (pfi_skip_if(name, p))
1005 			continue;
1006 		p->pfik_flags |= flags;
1007 	}
1008 	NET_EPOCH_EXIT(et);
1009 	return (0);
1010 }
1011 
1012 int
pfi_clear_flags(const char * name,int flags)1013 pfi_clear_flags(const char *name, int flags)
1014 {
1015 	struct epoch_tracker et;
1016 	struct pfi_kkif *p, *tmp;
1017 
1018 	NET_EPOCH_ENTER(et);
1019 	RB_FOREACH_SAFE(p, pfi_ifhead, &V_pfi_ifs, tmp) {
1020 		if (pfi_skip_if(name, p))
1021 			continue;
1022 		p->pfik_flags &= ~flags;
1023 
1024 		if (p->pfik_ifp == NULL && p->pfik_group == NULL &&
1025 		    p->pfik_flags == 0 && p->pfik_rulerefs == 0) {
1026 			/* Delete this kif. */
1027 			RB_REMOVE(pfi_ifhead, &V_pfi_ifs, p);
1028 			pf_kkif_free(p);
1029 		}
1030 	}
1031 	NET_EPOCH_EXIT(et);
1032 	return (0);
1033 }
1034 
1035 /* from pf_print_state.c */
1036 static int
pfi_unmask(void * addr)1037 pfi_unmask(void *addr)
1038 {
1039 	struct pf_addr *m = addr;
1040 	int i = 31, j = 0, b = 0;
1041 	u_int32_t tmp;
1042 
1043 	while (j < 4 && m->addr32[j] == 0xffffffff) {
1044 		b += 32;
1045 		j++;
1046 	}
1047 	if (j < 4) {
1048 		tmp = ntohl(m->addr32[j]);
1049 		for (i = 31; tmp & (1 << i); --i)
1050 			b++;
1051 	}
1052 	return (b);
1053 }
1054 
1055 static void
pfi_attach_ifnet_event(void * arg __unused,struct ifnet * ifp)1056 pfi_attach_ifnet_event(void *arg __unused, struct ifnet *ifp)
1057 {
1058 	struct epoch_tracker et;
1059 	struct pfi_kkif *kif;
1060 
1061 	if (V_pf_vnet_active == 0) {
1062 		/* Avoid teardown race in the least expensive way. */
1063 		return;
1064 	}
1065 	kif = pf_kkif_create(M_NOWAIT);
1066 	NET_EPOCH_ENTER(et);
1067 	PF_RULES_WLOCK();
1068 	pfi_attach_ifnet(ifp, kif);
1069 #ifdef ALTQ
1070 	pf_altq_ifnet_event(ifp, 0);
1071 #endif
1072 	PF_RULES_WUNLOCK();
1073 	NET_EPOCH_EXIT(et);
1074 }
1075 
1076 static void
pfi_rename_ifnet_event(void * arg,struct ifnet * ifp)1077 pfi_rename_ifnet_event(void *arg, struct ifnet *ifp)
1078 {
1079 	/* XXXGL: should be handled better */
1080 	pfi_detach_ifnet_event(arg, ifp);
1081 	pfi_attach_ifnet_event(arg, ifp);
1082 }
1083 
1084 static void
pfi_detach_ifnet_event(void * arg __unused,struct ifnet * ifp)1085 pfi_detach_ifnet_event(void *arg __unused, struct ifnet *ifp)
1086 {
1087 	struct epoch_tracker et;
1088 	struct pfi_kkif *kif = (struct pfi_kkif *)ifp->if_pf_kif;
1089 
1090 	if (pfsync_detach_ifnet_ptr)
1091 		pfsync_detach_ifnet_ptr(ifp);
1092 
1093 	if (kif == NULL)
1094 		return;
1095 
1096 	if (V_pf_vnet_active == 0) {
1097 		/* Avoid teardown race in the least expensive way. */
1098 		return;
1099 	}
1100 
1101 	NET_EPOCH_ENTER(et);
1102 	PF_RULES_WLOCK();
1103 	V_pfi_update++;
1104 	pfi_kkif_update(kif);
1105 
1106 	if (kif->pfik_ifp)
1107 		if_rele(kif->pfik_ifp);
1108 
1109 	kif->pfik_ifp = NULL;
1110 	ifp->if_pf_kif = NULL;
1111 #ifdef ALTQ
1112 	pf_altq_ifnet_event(ifp, 1);
1113 #endif
1114 	pfi_kkif_remove_if_unref(kif);
1115 
1116 	PF_RULES_WUNLOCK();
1117 	NET_EPOCH_EXIT(et);
1118 }
1119 
1120 static void
pfi_attach_group_event(void * arg __unused,struct ifg_group * ifg)1121 pfi_attach_group_event(void *arg __unused, struct ifg_group *ifg)
1122 {
1123 	struct epoch_tracker et;
1124 	struct pfi_kkif *kif;
1125 
1126 	if (V_pf_vnet_active == 0) {
1127 		/* Avoid teardown race in the least expensive way. */
1128 		return;
1129 	}
1130 	kif = pf_kkif_create(M_WAITOK);
1131 	NET_EPOCH_ENTER(et);
1132 	PF_RULES_WLOCK();
1133 	pfi_attach_ifgroup(ifg, kif);
1134 	PF_RULES_WUNLOCK();
1135 	NET_EPOCH_EXIT(et);
1136 }
1137 
1138 static void
pfi_change_group_event(void * arg __unused,char * gname)1139 pfi_change_group_event(void *arg __unused, char *gname)
1140 {
1141 	struct epoch_tracker et;
1142 	struct pfi_kkif *kif;
1143 
1144 	if (V_pf_vnet_active == 0) {
1145 		/* Avoid teardown race in the least expensive way. */
1146 		return;
1147 	}
1148 
1149 	kif = pf_kkif_create(M_WAITOK);
1150 	NET_EPOCH_ENTER(et);
1151 	PF_RULES_WLOCK();
1152 	V_pfi_update++;
1153 	kif = pfi_kkif_attach(kif, gname);
1154 	pfi_kkif_update(kif);
1155 	PF_RULES_WUNLOCK();
1156 	NET_EPOCH_EXIT(et);
1157 }
1158 
1159 static void
pfi_detach_group_event(void * arg __unused,struct ifg_group * ifg)1160 pfi_detach_group_event(void *arg __unused, struct ifg_group *ifg)
1161 {
1162 	struct pfi_kkif *kif = (struct pfi_kkif *)ifg->ifg_pf_kif;
1163 
1164 	if (kif == NULL)
1165 		return;
1166 
1167 	if (V_pf_vnet_active == 0) {
1168 		/* Avoid teardown race in the least expensive way. */
1169 		return;
1170 	}
1171 	PF_RULES_WLOCK();
1172 	V_pfi_update++;
1173 
1174 	kif->pfik_group = NULL;
1175 	ifg->ifg_pf_kif = NULL;
1176 
1177 	pfi_kkif_remove_if_unref(kif);
1178 	PF_RULES_WUNLOCK();
1179 }
1180 
1181 static void
pfi_ifaddr_event(void * arg __unused,struct ifnet * ifp)1182 pfi_ifaddr_event(void *arg __unused, struct ifnet *ifp)
1183 {
1184 
1185 	KASSERT(ifp, ("ifp == NULL"));
1186 
1187 	if (ifp->if_pf_kif == NULL)
1188 		return;
1189 
1190 	if (V_pf_vnet_active == 0) {
1191 		/* Avoid teardown race in the least expensive way. */
1192 		return;
1193 	}
1194 	PF_RULES_WLOCK();
1195 	if (ifp->if_pf_kif) {
1196 		struct epoch_tracker et;
1197 
1198 		V_pfi_update++;
1199 		NET_EPOCH_ENTER(et);
1200 		pfi_kkif_update(ifp->if_pf_kif);
1201 		NET_EPOCH_EXIT(et);
1202 	}
1203 	PF_RULES_WUNLOCK();
1204 }
1205