xref: /src/sys/netpfil/pf/pf_ioctl.c (revision c6bcf6e6fd507d952a48226b51cc161b8ef972a2)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2001 Daniel Hartmeier
5  * Copyright (c) 2002,2003 Henning Brauer
6  * Copyright (c) 2012 Gleb Smirnoff <glebius@FreeBSD.org>
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  *    - Redistributions of source code must retain the above copyright
14  *      notice, this list of conditions and the following disclaimer.
15  *    - Redistributions in binary form must reproduce the above
16  *      copyright notice, this list of conditions and the following
17  *      disclaimer in the documentation and/or other materials provided
18  *      with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
26  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
30  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31  * POSSIBILITY OF SUCH DAMAGE.
32  *
33  * Effort sponsored in part by the Defense Advanced Research Projects
34  * Agency (DARPA) and Air Force Research Laboratory, Air Force
35  * Materiel Command, USAF, under agreement number F30602-01-2-0537.
36  *
37  *	$OpenBSD: pf_ioctl.c,v 1.213 2009/02/15 21:46:12 mbalmer Exp $
38  */
39 
40 #include <sys/cdefs.h>
41 #include "opt_inet.h"
42 #include "opt_inet6.h"
43 #include "opt_bpf.h"
44 #include "opt_pf.h"
45 
46 #include <sys/param.h>
47 #include <sys/_bitset.h>
48 #include <sys/bitset.h>
49 #include <sys/bus.h>
50 #include <sys/conf.h>
51 #include <sys/endian.h>
52 #include <sys/fcntl.h>
53 #include <sys/filio.h>
54 #include <sys/hash.h>
55 #include <sys/interrupt.h>
56 #include <sys/jail.h>
57 #include <sys/kernel.h>
58 #include <sys/kthread.h>
59 #include <sys/lock.h>
60 #include <sys/mbuf.h>
61 #include <sys/module.h>
62 #include <sys/nv.h>
63 #include <sys/proc.h>
64 #include <sys/sdt.h>
65 #include <sys/smp.h>
66 #include <sys/socket.h>
67 #include <sys/sysctl.h>
68 #include <sys/md5.h>
69 #include <sys/ucred.h>
70 
71 #include <net/if.h>
72 #include <net/if_var.h>
73 #include <net/if_private.h>
74 #include <net/vnet.h>
75 #include <net/route.h>
76 #include <net/pfil.h>
77 #include <net/pfvar.h>
78 #include <net/if_pfsync.h>
79 #include <net/if_pflog.h>
80 
81 #include <netinet/in.h>
82 #include <netinet/ip.h>
83 #include <netinet/ip_var.h>
84 #include <netinet6/ip6_var.h>
85 #include <netinet/ip_icmp.h>
86 #include <netpfil/pf/pf_nl.h>
87 #include <netpfil/pf/pf_nv.h>
88 
89 #ifdef INET6
90 #include <netinet/ip6.h>
91 #endif /* INET6 */
92 
93 #ifdef ALTQ
94 #include <net/altq/altq.h>
95 #endif
96 
97 SDT_PROBE_DEFINE3(pf, ioctl, ioctl, error, "int", "int", "int");
98 SDT_PROBE_DEFINE3(pf, ioctl, function, error, "char *", "int", "int");
99 SDT_PROBE_DEFINE2(pf, ioctl, addrule, error, "int", "int");
100 SDT_PROBE_DEFINE2(pf, ioctl, nvchk, error, "int", "int");
101 
102 static struct pf_kpool	*pf_get_kpool(const char *, u_int32_t, u_int8_t,
103 			    u_int32_t, u_int8_t, u_int8_t, u_int8_t, int);
104 
105 static void		 pf_mv_kpool(struct pf_kpalist *, struct pf_kpalist *);
106 static void		 pf_empty_kpool(struct pf_kpalist *);
107 static int		 pfioctl(struct cdev *, u_long, caddr_t, int,
108 			    struct thread *);
109 static int		 pf_begin_eth(uint32_t *, const char *);
110 static int		 pf_rollback_eth(uint32_t, const char *);
111 static int		 pf_commit_eth(uint32_t, const char *);
112 static void		 pf_free_eth_rule(struct pf_keth_rule *);
113 #ifdef ALTQ
114 static int		 pf_begin_altq(u_int32_t *);
115 static int		 pf_rollback_altq(u_int32_t);
116 static int		 pf_commit_altq(u_int32_t);
117 static int		 pf_enable_altq(struct pf_altq *);
118 static int		 pf_disable_altq(struct pf_altq *);
119 static void		 pf_qid_unref(uint16_t);
120 #endif /* ALTQ */
121 static int		 pf_begin_rules(u_int32_t *, int, const char *);
122 static int		 pf_rollback_rules(u_int32_t, int, char *);
123 static int		 pf_setup_pfsync_matching(struct pf_kruleset *);
124 static void		 pf_hash_rule_rolling(MD5_CTX *, struct pf_krule *);
125 static void		 pf_hash_rule(struct pf_krule *);
126 static void		 pf_hash_rule_addr(MD5_CTX *, struct pf_rule_addr *);
127 static int		 pf_commit_rules(u_int32_t, int, char *);
128 static int		 pf_addr_setup(struct pf_kruleset *,
129 			    struct pf_addr_wrap *, sa_family_t);
130 static void		 pf_src_node_copy(const struct pf_ksrc_node *,
131 			    struct pf_src_node *);
132 #ifdef ALTQ
133 static int		 pf_export_kaltq(struct pf_altq *,
134 			    struct pfioc_altq_v1 *, size_t);
135 static int		 pf_import_kaltq(struct pfioc_altq_v1 *,
136 			    struct pf_altq *, size_t);
137 #endif /* ALTQ */
138 
139 static void		 pf_statelim_commit(void);
140 static void		 pf_statelim_rollback(void);
141 static int		 pf_sourcelim_check(void);
142 static void		 pf_sourcelim_commit(void);
143 static void		 pf_sourcelim_rollback(void);
144 
145 VNET_DEFINE(struct pf_krule,	pf_default_rule);
146 
147 static __inline int             pf_krule_compare(struct pf_krule *,
148 				    struct pf_krule *);
149 
150 RB_GENERATE(pf_krule_global, pf_krule, entry_global, pf_krule_compare);
151 
152 #ifdef ALTQ
153 VNET_DEFINE_STATIC(int,		pf_altq_running);
154 #define	V_pf_altq_running	VNET(pf_altq_running)
155 #endif
156 
157 #define	TAGID_MAX	 50000
158 struct pf_tagname {
159 	TAILQ_ENTRY(pf_tagname)	namehash_entries;
160 	TAILQ_ENTRY(pf_tagname)	taghash_entries;
161 	char			name[PF_TAG_NAME_SIZE];
162 	uint16_t		tag;
163 	int			ref;
164 };
165 
166 struct pf_tagset {
167 	TAILQ_HEAD(, pf_tagname)	*namehash;
168 	TAILQ_HEAD(, pf_tagname)	*taghash;
169 	unsigned int			 mask;
170 	uint32_t			 seed;
171 	BITSET_DEFINE(, TAGID_MAX)	 avail;
172 };
173 
174 VNET_DEFINE(struct pf_tagset, pf_tags);
175 #define	V_pf_tags	VNET(pf_tags)
176 static unsigned int	pf_rule_tag_hashsize;
177 #define	PF_RULE_TAG_HASH_SIZE_DEFAULT	128
178 SYSCTL_UINT(_net_pf, OID_AUTO, rule_tag_hashsize, CTLFLAG_RDTUN,
179     &pf_rule_tag_hashsize, PF_RULE_TAG_HASH_SIZE_DEFAULT,
180     "Size of pf(4) rule tag hashtable");
181 
182 #ifdef ALTQ
183 VNET_DEFINE(struct pf_tagset, pf_qids);
184 #define	V_pf_qids	VNET(pf_qids)
185 static unsigned int	pf_queue_tag_hashsize;
186 #define	PF_QUEUE_TAG_HASH_SIZE_DEFAULT	128
187 SYSCTL_UINT(_net_pf, OID_AUTO, queue_tag_hashsize, CTLFLAG_RDTUN,
188     &pf_queue_tag_hashsize, PF_QUEUE_TAG_HASH_SIZE_DEFAULT,
189     "Size of pf(4) queue tag hashtable");
190 #endif
191 VNET_DEFINE(uma_zone_t,	 pf_tag_z);
192 #define	V_pf_tag_z		 VNET(pf_tag_z)
193 static MALLOC_DEFINE(M_PFALTQ, "pf_altq", "pf(4) altq configuration db");
194 static MALLOC_DEFINE(M_PFRULE, "pf_rule", "pf(4) rules");
195 MALLOC_DEFINE(M_PF, "pf", "pf(4)");
196 MALLOC_DEFINE(M_PF_STATE_LIM, "pf_state_lim", "pf(4) state limiter");
197 
198 #if (PF_QNAME_SIZE != PF_TAG_NAME_SIZE)
199 #error PF_QNAME_SIZE must be equal to PF_TAG_NAME_SIZE
200 #endif
201 
202 VNET_DEFINE_STATIC(bool, pf_filter_local) = false;
203 #define V_pf_filter_local	VNET(pf_filter_local)
204 SYSCTL_BOOL(_net_pf, OID_AUTO, filter_local, CTLFLAG_VNET | CTLFLAG_RW,
205     &VNET_NAME(pf_filter_local), false,
206     "Enable filtering for packets delivered to local network stack");
207 
208 #ifdef PF_DEFAULT_TO_DROP
209 VNET_DEFINE_STATIC(bool, default_to_drop) = true;
210 #else
211 VNET_DEFINE_STATIC(bool, default_to_drop);
212 #endif
213 #define	V_default_to_drop VNET(default_to_drop)
214 SYSCTL_BOOL(_net_pf, OID_AUTO, default_to_drop, CTLFLAG_RDTUN | CTLFLAG_VNET,
215     &VNET_NAME(default_to_drop), false,
216     "Make the default rule drop all packets.");
217 
218 static void		 pf_init_tagset(struct pf_tagset *, unsigned int *,
219 			    unsigned int);
220 static void		 pf_cleanup_tagset(struct pf_tagset *);
221 static uint16_t		 tagname2hashindex(const struct pf_tagset *, const char *);
222 static uint16_t		 tag2hashindex(const struct pf_tagset *, uint16_t);
223 static u_int16_t	 tagname2tag(struct pf_tagset *, const char *, bool);
224 static void		 tag_unref(struct pf_tagset *, u_int16_t);
225 
226 struct cdev *pf_dev;
227 
228 /*
229  * XXX - These are new and need to be checked when moveing to a new version
230  */
231 static void		 pf_clear_all_states(void);
232 static int		 pf_killstates_row(struct pf_kstate_kill *,
233 			    struct pf_idhash *);
234 static int		 pf_killstates_nv(struct pfioc_nv *);
235 static int		 pf_clearstates_nv(struct pfioc_nv *);
236 static int		 pf_getstate(struct pfioc_nv *);
237 static int		 pf_getstatus(struct pfioc_nv *);
238 static int		 pf_clear_tables(void);
239 static void		 pf_kill_srcnodes(struct pfioc_src_node_kill *);
240 static int		 pf_keepcounters(struct pfioc_nv *);
241 static void		 pf_tbladdr_copyout(struct pf_addr_wrap *);
242 
243 /*
244  * Wrapper functions for pfil(9) hooks
245  */
246 static pfil_return_t pf_eth_check_in(struct mbuf **m, struct ifnet *ifp,
247     int flags, void *ruleset __unused, struct inpcb *inp);
248 static pfil_return_t pf_eth_check_out(struct mbuf **m, struct ifnet *ifp,
249     int flags, void *ruleset __unused, struct inpcb *inp);
250 #ifdef INET
251 static pfil_return_t pf_check_in(struct mbuf **m, struct ifnet *ifp,
252     int flags, void *ruleset __unused, struct inpcb *inp);
253 static pfil_return_t pf_check_out(struct mbuf **m, struct ifnet *ifp,
254     int flags, void *ruleset __unused, struct inpcb *inp);
255 #endif
256 #ifdef INET6
257 static pfil_return_t pf_check6_in(struct mbuf **m, struct ifnet *ifp,
258     int flags, void *ruleset __unused, struct inpcb *inp);
259 static pfil_return_t pf_check6_out(struct mbuf **m, struct ifnet *ifp,
260     int flags, void *ruleset __unused, struct inpcb *inp);
261 #endif
262 
263 static void		hook_pf_eth(void);
264 static void		hook_pf(void);
265 static void		dehook_pf_eth(void);
266 static void		dehook_pf(void);
267 static int		shutdown_pf(void);
268 static int		pf_load(void);
269 static void		pf_unload(void *);
270 
271 static struct cdevsw pf_cdevsw = {
272 	.d_ioctl =	pfioctl,
273 	.d_name =	PF_NAME,
274 	.d_version =	D_VERSION,
275 };
276 
277 VNET_DEFINE_STATIC(bool, pf_pfil_hooked);
278 #define V_pf_pfil_hooked	VNET(pf_pfil_hooked)
279 VNET_DEFINE_STATIC(bool, pf_pfil_eth_hooked);
280 #define V_pf_pfil_eth_hooked	VNET(pf_pfil_eth_hooked)
281 
282 /*
283  * We need a flag that is neither hooked nor running to know when
284  * the VNET is "valid".  We primarily need this to control (global)
285  * external event, e.g., eventhandlers.
286  */
287 VNET_DEFINE(int, pf_vnet_active);
288 #define V_pf_vnet_active	VNET(pf_vnet_active)
289 
290 int pf_end_threads;
291 struct proc *pf_purge_proc;
292 
293 VNET_DEFINE(struct rmlock, pf_rules_lock);
294 VNET_DEFINE(struct rmlock, pf_tags_lock);
295 VNET_DEFINE_STATIC(struct sx, pf_ioctl_lock);
296 #define	V_pf_ioctl_lock		VNET(pf_ioctl_lock)
297 struct sx			pf_end_lock;
298 
299 /* pfsync */
300 VNET_DEFINE(pfsync_state_import_t *, pfsync_state_import_ptr);
301 VNET_DEFINE(pfsync_insert_state_t *, pfsync_insert_state_ptr);
302 VNET_DEFINE(pfsync_update_state_t *, pfsync_update_state_ptr);
303 VNET_DEFINE(pfsync_delete_state_t *, pfsync_delete_state_ptr);
304 VNET_DEFINE(pfsync_clear_states_t *, pfsync_clear_states_ptr);
305 VNET_DEFINE(pfsync_defer_t *, pfsync_defer_ptr);
306 VNET_DEFINE(pflow_export_state_t *, pflow_export_state_ptr);
307 pfsync_detach_ifnet_t *pfsync_detach_ifnet_ptr;
308 
309 /* pflog */
310 pflog_packet_t			*pflog_packet_ptr = NULL;
311 
312 /*
313  * Copy a user-provided string, returning an error if truncation would occur.
314  * Avoid scanning past "sz" bytes in the source string since there's no
315  * guarantee that it's nul-terminated.
316  */
317 static int
pf_user_strcpy(char * dst,const char * src,size_t sz)318 pf_user_strcpy(char *dst, const char *src, size_t sz)
319 {
320 	if (strnlen(src, sz) == sz)
321 		return (EINVAL);
322 	(void)strlcpy(dst, src, sz);
323 	return (0);
324 }
325 
326 static void
pfattach_vnet(void)327 pfattach_vnet(void)
328 {
329 	u_int32_t *my_timeout = V_pf_default_rule.timeout;
330 
331 	bzero(&V_pf_status, sizeof(V_pf_status));
332 
333 	pf_initialize();
334 	pfr_initialize();
335 	pfi_initialize_vnet();
336 	pf_normalize_init();
337 	pf_syncookies_init();
338 
339 	V_pf_limits[PF_LIMIT_STATES].limit = PFSTATE_HIWAT;
340 	V_pf_limits[PF_LIMIT_SRC_NODES].limit = PFSNODE_HIWAT;
341 	V_pf_limits[PF_LIMIT_ANCHORS].limit = PF_ANCHOR_HIWAT;
342 	V_pf_limits[PF_LIMIT_ETH_ANCHORS].limit = PF_ANCHOR_HIWAT;
343 
344 	RB_INIT(&V_pf_anchors);
345 	pf_init_kruleset(&pf_main_ruleset);
346 
347 	pf_init_keth(V_pf_keth);
348 
349 	/* default rule should never be garbage collected */
350 	V_pf_default_rule.entries.tqe_prev = &V_pf_default_rule.entries.tqe_next;
351 	V_pf_default_rule.action = V_default_to_drop ? PF_DROP : PF_PASS;
352 	V_pf_default_rule.nr = (uint32_t)-1;
353 	V_pf_default_rule.rtableid = -1;
354 
355 	pf_counter_u64_init(&V_pf_default_rule.evaluations, M_WAITOK);
356 	for (int i = 0; i < 2; i++) {
357 		pf_counter_u64_init(&V_pf_default_rule.packets[i], M_WAITOK);
358 		pf_counter_u64_init(&V_pf_default_rule.bytes[i], M_WAITOK);
359 	}
360 	V_pf_default_rule.states_cur = counter_u64_alloc(M_WAITOK);
361 	V_pf_default_rule.states_tot = counter_u64_alloc(M_WAITOK);
362 	for (pf_sn_types_t sn_type = 0; sn_type<PF_SN_MAX; sn_type++)
363 		V_pf_default_rule.src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
364 
365 	V_pf_default_rule.timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
366 	    M_WAITOK | M_ZERO);
367 
368 #ifdef PF_WANT_32_TO_64_COUNTER
369 	V_pf_kifmarker = malloc(sizeof(*V_pf_kifmarker), PFI_MTYPE, M_WAITOK | M_ZERO);
370 	V_pf_rulemarker = malloc(sizeof(*V_pf_rulemarker), M_PFRULE, M_WAITOK | M_ZERO);
371 	PF_RULES_WLOCK();
372 	LIST_INSERT_HEAD(&V_pf_allkiflist, V_pf_kifmarker, pfik_allkiflist);
373 	LIST_INSERT_HEAD(&V_pf_allrulelist, &V_pf_default_rule, allrulelist);
374 	V_pf_allrulecount++;
375 	LIST_INSERT_HEAD(&V_pf_allrulelist, V_pf_rulemarker, allrulelist);
376 	PF_RULES_WUNLOCK();
377 #endif
378 
379 	/* initialize default timeouts */
380 	my_timeout[PFTM_TCP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
381 	my_timeout[PFTM_TCP_OPENING] = PFTM_TCP_OPENING_VAL;
382 	my_timeout[PFTM_TCP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
383 	my_timeout[PFTM_TCP_CLOSING] = PFTM_TCP_CLOSING_VAL;
384 	my_timeout[PFTM_TCP_FIN_WAIT] = PFTM_TCP_FIN_WAIT_VAL;
385 	my_timeout[PFTM_TCP_CLOSED] = PFTM_TCP_CLOSED_VAL;
386 	my_timeout[PFTM_SCTP_FIRST_PACKET] = PFTM_TCP_FIRST_PACKET_VAL;
387 	my_timeout[PFTM_SCTP_OPENING] = PFTM_TCP_OPENING_VAL;
388 	my_timeout[PFTM_SCTP_ESTABLISHED] = PFTM_TCP_ESTABLISHED_VAL;
389 	my_timeout[PFTM_SCTP_CLOSING] = PFTM_TCP_CLOSING_VAL;
390 	my_timeout[PFTM_SCTP_CLOSED] = PFTM_TCP_CLOSED_VAL;
391 	my_timeout[PFTM_UDP_FIRST_PACKET] = PFTM_UDP_FIRST_PACKET_VAL;
392 	my_timeout[PFTM_UDP_SINGLE] = PFTM_UDP_SINGLE_VAL;
393 	my_timeout[PFTM_UDP_MULTIPLE] = PFTM_UDP_MULTIPLE_VAL;
394 	my_timeout[PFTM_ICMP_FIRST_PACKET] = PFTM_ICMP_FIRST_PACKET_VAL;
395 	my_timeout[PFTM_ICMP_ERROR_REPLY] = PFTM_ICMP_ERROR_REPLY_VAL;
396 	my_timeout[PFTM_OTHER_FIRST_PACKET] = PFTM_OTHER_FIRST_PACKET_VAL;
397 	my_timeout[PFTM_OTHER_SINGLE] = PFTM_OTHER_SINGLE_VAL;
398 	my_timeout[PFTM_OTHER_MULTIPLE] = PFTM_OTHER_MULTIPLE_VAL;
399 	my_timeout[PFTM_FRAG] = PFTM_FRAG_VAL;
400 	my_timeout[PFTM_INTERVAL] = PFTM_INTERVAL_VAL;
401 	my_timeout[PFTM_SRC_NODE] = PFTM_SRC_NODE_VAL;
402 	my_timeout[PFTM_TS_DIFF] = PFTM_TS_DIFF_VAL;
403 	my_timeout[PFTM_ADAPTIVE_START] = PFSTATE_ADAPT_START;
404 	my_timeout[PFTM_ADAPTIVE_END] = PFSTATE_ADAPT_END;
405 
406 	V_pf_status.debug = PF_DEBUG_URGENT;
407 	/*
408 	 * XXX This is different than in OpenBSD where reassembly is enabled by
409 	 * defult. In FreeBSD we expect people to still use scrub rules and
410 	 * switch to the new syntax later. Only when they switch they must
411 	 * explicitly enable reassemle. We could change the default once the
412 	 * scrub rule functionality is hopefully removed some day in future.
413 	 */
414 	V_pf_status.reass = 0;
415 
416 	V_pf_pfil_hooked = false;
417 	V_pf_pfil_eth_hooked = false;
418 
419 	/* XXX do our best to avoid a conflict */
420 	V_pf_status.hostid = arc4random();
421 
422 	for (int i = 0; i < PFRES_MAX; i++)
423 		V_pf_status.counters[i] = counter_u64_alloc(M_WAITOK);
424 	for (int i = 0; i < KLCNT_MAX; i++)
425 		V_pf_status.lcounters[i] = counter_u64_alloc(M_WAITOK);
426 	for (int i = 0; i < FCNT_MAX; i++)
427 		pf_counter_u64_init(&V_pf_status.fcounters[i], M_WAITOK);
428 	for (int i = 0; i < SCNT_MAX; i++)
429 		V_pf_status.scounters[i] = counter_u64_alloc(M_WAITOK);
430 	for (int i = 0; i < NCNT_MAX; i++)
431 		V_pf_status.ncounters[i] = counter_u64_alloc(M_WAITOK);
432 
433 	if (swi_add(&V_pf_swi_ie, "pf send", pf_intr, curvnet, SWI_NET,
434 	    INTR_MPSAFE, &V_pf_swi_cookie) != 0)
435 		/* XXXGL: leaked all above. */
436 		return;
437 }
438 
439 static struct pf_kpool *
pf_get_kpool(const char * anchor,u_int32_t ticket,u_int8_t rule_action,u_int32_t rule_number,u_int8_t r_last,u_int8_t active,u_int8_t check_ticket,int which)440 pf_get_kpool(const char *anchor, u_int32_t ticket, u_int8_t rule_action,
441     u_int32_t rule_number, u_int8_t r_last, u_int8_t active,
442     u_int8_t check_ticket, int which)
443 {
444 	struct pf_kruleset	*ruleset;
445 	struct pf_krule		*rule;
446 	int			 rs_num;
447 
448 	MPASS(which == PF_RDR || which == PF_NAT || which == PF_RT);
449 
450 	ruleset = pf_find_kruleset(anchor);
451 	if (ruleset == NULL)
452 		return (NULL);
453 	rs_num = pf_get_ruleset_number(rule_action);
454 	if (rs_num >= PF_RULESET_MAX)
455 		return (NULL);
456 	if (active) {
457 		if (check_ticket && ticket !=
458 		    ruleset->rules[rs_num].active.ticket)
459 			return (NULL);
460 		if (r_last)
461 			rule = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
462 			    pf_krulequeue);
463 		else
464 			rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
465 	} else {
466 		if (check_ticket && ticket !=
467 		    ruleset->rules[rs_num].inactive.ticket)
468 			return (NULL);
469 		if (r_last)
470 			rule = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
471 			    pf_krulequeue);
472 		else
473 			rule = TAILQ_FIRST(ruleset->rules[rs_num].inactive.ptr);
474 	}
475 	if (!r_last) {
476 		while ((rule != NULL) && (rule->nr != rule_number))
477 			rule = TAILQ_NEXT(rule, entries);
478 	}
479 	if (rule == NULL)
480 		return (NULL);
481 
482 	switch (which) {
483 	case PF_RDR:
484 		return (&rule->rdr);
485 	case PF_NAT:
486 		return (&rule->nat);
487 	case PF_RT:
488 		return (&rule->route);
489 	default:
490 		panic("Unknow pool type %d", which);
491 	}
492 }
493 
494 static void
pf_mv_kpool(struct pf_kpalist * poola,struct pf_kpalist * poolb)495 pf_mv_kpool(struct pf_kpalist *poola, struct pf_kpalist *poolb)
496 {
497 	struct pf_kpooladdr	*mv_pool_pa;
498 
499 	while ((mv_pool_pa = TAILQ_FIRST(poola)) != NULL) {
500 		TAILQ_REMOVE(poola, mv_pool_pa, entries);
501 		TAILQ_INSERT_TAIL(poolb, mv_pool_pa, entries);
502 	}
503 }
504 
505 static void
pf_empty_kpool(struct pf_kpalist * poola)506 pf_empty_kpool(struct pf_kpalist *poola)
507 {
508 	struct pf_kpooladdr *pa;
509 
510 	while ((pa = TAILQ_FIRST(poola)) != NULL) {
511 		switch (pa->addr.type) {
512 		case PF_ADDR_DYNIFTL:
513 			pfi_dynaddr_remove(pa->addr.p.dyn);
514 			break;
515 		case PF_ADDR_TABLE:
516 			/* XXX: this could be unfinished pooladdr on pabuf */
517 			if (pa->addr.p.tbl != NULL)
518 				pfr_detach_table(pa->addr.p.tbl);
519 			break;
520 		}
521 		if (pa->kif)
522 			pfi_kkif_unref(pa->kif);
523 		TAILQ_REMOVE(poola, pa, entries);
524 		free(pa, M_PFRULE);
525 	}
526 }
527 
528 static void
pf_unlink_rule_locked(struct pf_krulequeue * rulequeue,struct pf_krule * rule)529 pf_unlink_rule_locked(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
530 {
531 
532 	PF_RULES_WASSERT();
533 	PF_UNLNKDRULES_ASSERT();
534 
535 	TAILQ_REMOVE(rulequeue, rule, entries);
536 
537 	rule->rule_ref |= PFRULE_REFS;
538 	TAILQ_INSERT_TAIL(&V_pf_unlinked_rules, rule, entries);
539 }
540 
541 static void
pf_unlink_rule(struct pf_krulequeue * rulequeue,struct pf_krule * rule)542 pf_unlink_rule(struct pf_krulequeue *rulequeue, struct pf_krule *rule)
543 {
544 
545 	PF_RULES_WASSERT();
546 
547 	PF_UNLNKDRULES_LOCK();
548 	pf_unlink_rule_locked(rulequeue, rule);
549 	PF_UNLNKDRULES_UNLOCK();
550 }
551 
552 static void
pf_free_eth_rule(struct pf_keth_rule * rule)553 pf_free_eth_rule(struct pf_keth_rule *rule)
554 {
555 	PF_RULES_WASSERT();
556 
557 	if (rule == NULL)
558 		return;
559 
560 	if (rule->tag)
561 		tag_unref(&V_pf_tags, rule->tag);
562 	if (rule->match_tag)
563 		tag_unref(&V_pf_tags, rule->match_tag);
564 #ifdef ALTQ
565 	pf_qid_unref(rule->qid);
566 #endif
567 
568 	if (rule->bridge_to)
569 		pfi_kkif_unref(rule->bridge_to);
570 	if (rule->kif)
571 		pfi_kkif_unref(rule->kif);
572 
573 	if (rule->ipsrc.addr.type == PF_ADDR_TABLE)
574 		pfr_detach_table(rule->ipsrc.addr.p.tbl);
575 	if (rule->ipdst.addr.type == PF_ADDR_TABLE)
576 		pfr_detach_table(rule->ipdst.addr.p.tbl);
577 
578 	counter_u64_free(rule->evaluations);
579 	for (int i = 0; i < 2; i++) {
580 		counter_u64_free(rule->packets[i]);
581 		counter_u64_free(rule->bytes[i]);
582 	}
583 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
584 	pf_keth_anchor_remove(rule);
585 
586 	free(rule, M_PFRULE);
587 }
588 
589 void
pf_free_rule(struct pf_krule * rule)590 pf_free_rule(struct pf_krule *rule)
591 {
592 
593 	PF_RULES_WASSERT();
594 	PF_CONFIG_ASSERT();
595 
596 	if (rule->tag)
597 		tag_unref(&V_pf_tags, rule->tag);
598 	if (rule->match_tag)
599 		tag_unref(&V_pf_tags, rule->match_tag);
600 #ifdef ALTQ
601 	if (rule->pqid != rule->qid)
602 		pf_qid_unref(rule->pqid);
603 	pf_qid_unref(rule->qid);
604 #endif
605 	switch (rule->src.addr.type) {
606 	case PF_ADDR_DYNIFTL:
607 		pfi_dynaddr_remove(rule->src.addr.p.dyn);
608 		break;
609 	case PF_ADDR_TABLE:
610 		pfr_detach_table(rule->src.addr.p.tbl);
611 		break;
612 	}
613 	switch (rule->dst.addr.type) {
614 	case PF_ADDR_DYNIFTL:
615 		pfi_dynaddr_remove(rule->dst.addr.p.dyn);
616 		break;
617 	case PF_ADDR_TABLE:
618 		pfr_detach_table(rule->dst.addr.p.tbl);
619 		break;
620 	}
621 	if (rule->overload_tbl)
622 		pfr_detach_table(rule->overload_tbl);
623 	if (rule->kif)
624 		pfi_kkif_unref(rule->kif);
625 	if (rule->rcv_kif)
626 		pfi_kkif_unref(rule->rcv_kif);
627 	pf_remove_kanchor(rule);
628 	pf_empty_kpool(&rule->rdr.list);
629 	pf_empty_kpool(&rule->nat.list);
630 	pf_empty_kpool(&rule->route.list);
631 
632 	pf_krule_free(rule);
633 }
634 
635 static void
pf_init_tagset(struct pf_tagset * ts,unsigned int * tunable_size,unsigned int default_size)636 pf_init_tagset(struct pf_tagset *ts, unsigned int *tunable_size,
637     unsigned int default_size)
638 {
639 	unsigned int i;
640 	unsigned int hashsize;
641 
642 	if (*tunable_size == 0 || !powerof2(*tunable_size))
643 		*tunable_size = default_size;
644 
645 	hashsize = *tunable_size;
646 	ts->namehash = mallocarray(hashsize, sizeof(*ts->namehash), M_PFHASH,
647 	    M_WAITOK);
648 	ts->taghash = mallocarray(hashsize, sizeof(*ts->taghash), M_PFHASH,
649 	    M_WAITOK);
650 	ts->mask = hashsize - 1;
651 	ts->seed = arc4random();
652 	for (i = 0; i < hashsize; i++) {
653 		TAILQ_INIT(&ts->namehash[i]);
654 		TAILQ_INIT(&ts->taghash[i]);
655 	}
656 	BIT_FILL(TAGID_MAX, &ts->avail);
657 }
658 
659 static void
pf_cleanup_tagset(struct pf_tagset * ts)660 pf_cleanup_tagset(struct pf_tagset *ts)
661 {
662 	unsigned int i;
663 	unsigned int hashsize;
664 	struct pf_tagname *t, *tmp;
665 
666 	/*
667 	 * Only need to clean up one of the hashes as each tag is hashed
668 	 * into each table.
669 	 */
670 	hashsize = ts->mask + 1;
671 	for (i = 0; i < hashsize; i++)
672 		TAILQ_FOREACH_SAFE(t, &ts->namehash[i], namehash_entries, tmp)
673 			uma_zfree(V_pf_tag_z, t);
674 
675 	free(ts->namehash, M_PFHASH);
676 	free(ts->taghash, M_PFHASH);
677 }
678 
679 static uint16_t
tagname2hashindex(const struct pf_tagset * ts,const char * tagname)680 tagname2hashindex(const struct pf_tagset *ts, const char *tagname)
681 {
682 	size_t len;
683 
684 	len = strnlen(tagname, PF_TAG_NAME_SIZE - 1);
685 	return (murmur3_32_hash(tagname, len, ts->seed) & ts->mask);
686 }
687 
688 static uint16_t
tag2hashindex(const struct pf_tagset * ts,uint16_t tag)689 tag2hashindex(const struct pf_tagset *ts, uint16_t tag)
690 {
691 
692 	return (tag & ts->mask);
693 }
694 
695 static u_int16_t
tagname2tag(struct pf_tagset * ts,const char * tagname,bool add_new)696 tagname2tag(struct pf_tagset *ts, const char *tagname, bool add_new)
697 {
698 	struct pf_tagname	*tag;
699 	u_int32_t		 index;
700 	u_int16_t		 new_tagid;
701 
702 	PF_TAGS_RLOCK_TRACKER;
703 
704 	PF_TAGS_RLOCK();
705 
706 	index = tagname2hashindex(ts, tagname);
707 	TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
708 		if (strcmp(tagname, tag->name) == 0) {
709 			tag->ref++;
710 			new_tagid = tag->tag;
711 			PF_TAGS_RUNLOCK();
712 			return (new_tagid);
713 		}
714 
715 	/*
716 	 * When used for pfsync with queues we must not create new entries.
717 	 * Pf tags can be created just fine by this function, but queues
718 	 * require additional configuration. If they are missing on the target
719 	 * system we just ignore them
720 	 */
721 	if (add_new == false) {
722 		printf("%s: Not creating a new tag\n", __func__);
723 		PF_TAGS_RUNLOCK();
724 		return (0);
725 	}
726 
727 	/*
728 	 * If a new entry must be created do it under a write lock.
729 	 * But first search again, somebody could have created the tag
730 	 * between unlocking the read lock and locking the write lock.
731 	 */
732 	PF_TAGS_RUNLOCK();
733 	PF_TAGS_WLOCK();
734 	TAILQ_FOREACH(tag, &ts->namehash[index], namehash_entries)
735 		if (strcmp(tagname, tag->name) == 0) {
736 			tag->ref++;
737 			new_tagid = tag->tag;
738 			PF_TAGS_WUNLOCK();
739 			return (new_tagid);
740 		}
741 
742 	/*
743 	 * new entry
744 	 *
745 	 * to avoid fragmentation, we do a linear search from the beginning
746 	 * and take the first free slot we find.
747 	 */
748 	new_tagid = BIT_FFS(TAGID_MAX, &ts->avail);
749 	/*
750 	 * Tags are 1-based, with valid tags in the range [1..TAGID_MAX].
751 	 * BIT_FFS() returns a 1-based bit number, with 0 indicating no bits
752 	 * set.  It may also return a bit number greater than TAGID_MAX due
753 	 * to rounding of the number of bits in the vector up to a multiple
754 	 * of the vector word size at declaration/allocation time.
755 	 */
756 	if ((new_tagid == 0) || (new_tagid > TAGID_MAX)) {
757 		PF_TAGS_WUNLOCK();
758 		return (0);
759 	}
760 
761 	/* Mark the tag as in use.  Bits are 0-based for BIT_CLR() */
762 	BIT_CLR(TAGID_MAX, new_tagid - 1, &ts->avail);
763 
764 	/* allocate and fill new struct pf_tagname */
765 	tag = uma_zalloc(V_pf_tag_z, M_NOWAIT);
766 	if (tag == NULL) {
767 		PF_TAGS_WUNLOCK();
768 		return (0);
769 	}
770 	strlcpy(tag->name, tagname, sizeof(tag->name));
771 	tag->tag = new_tagid;
772 	tag->ref = 1;
773 
774 	/* Insert into namehash */
775 	TAILQ_INSERT_TAIL(&ts->namehash[index], tag, namehash_entries);
776 
777 	/* Insert into taghash */
778 	index = tag2hashindex(ts, new_tagid);
779 	TAILQ_INSERT_TAIL(&ts->taghash[index], tag, taghash_entries);
780 
781 	PF_TAGS_WUNLOCK();
782 	return (new_tagid);
783 }
784 
785 static char *
tag2tagname(struct pf_tagset * ts,u_int16_t tag)786 tag2tagname(struct pf_tagset *ts, u_int16_t tag)
787 {
788 	struct pf_tagname	*t;
789 	uint16_t		 index;
790 
791 	PF_TAGS_RLOCK_TRACKER;
792 
793 	PF_TAGS_RLOCK();
794 
795 	index = tag2hashindex(ts, tag);
796 	TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
797 		if (tag == t->tag) {
798 			PF_TAGS_RUNLOCK();
799 			return (t->name);
800 		}
801 
802 	PF_TAGS_RUNLOCK();
803 	return (NULL);
804 }
805 
806 static void
tag_unref(struct pf_tagset * ts,u_int16_t tag)807 tag_unref(struct pf_tagset *ts, u_int16_t tag)
808 {
809 	struct pf_tagname	*t;
810 	uint16_t		 index;
811 
812 	PF_TAGS_WLOCK();
813 
814 	index = tag2hashindex(ts, tag);
815 	TAILQ_FOREACH(t, &ts->taghash[index], taghash_entries)
816 		if (tag == t->tag) {
817 			if (--t->ref == 0) {
818 				TAILQ_REMOVE(&ts->taghash[index], t,
819 				    taghash_entries);
820 				index = tagname2hashindex(ts, t->name);
821 				TAILQ_REMOVE(&ts->namehash[index], t,
822 				    namehash_entries);
823 				/* Bits are 0-based for BIT_SET() */
824 				BIT_SET(TAGID_MAX, tag - 1, &ts->avail);
825 				uma_zfree(V_pf_tag_z, t);
826 			}
827 			break;
828 		}
829 
830 	PF_TAGS_WUNLOCK();
831 }
832 
833 uint16_t
pf_tagname2tag(const char * tagname)834 pf_tagname2tag(const char *tagname)
835 {
836 	return (tagname2tag(&V_pf_tags, tagname, true));
837 }
838 
839 static const char *
pf_tag2tagname(uint16_t tag)840 pf_tag2tagname(uint16_t tag)
841 {
842 	return (tag2tagname(&V_pf_tags, tag));
843 }
844 
845 static int
pf_begin_eth(uint32_t * ticket,const char * anchor)846 pf_begin_eth(uint32_t *ticket, const char *anchor)
847 {
848 	struct pf_keth_rule *rule, *tmp;
849 	struct pf_keth_ruleset *rs;
850 
851 	PF_RULES_WASSERT();
852 
853 	rs = pf_find_or_create_keth_ruleset(anchor);
854 	if (rs == NULL)
855 		return (EINVAL);
856 
857 	/* Purge old inactive rules. */
858 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
859 	    tmp) {
860 		TAILQ_REMOVE(rs->inactive.rules, rule,
861 		    entries);
862 		pf_free_eth_rule(rule);
863 	}
864 
865 	*ticket = ++rs->inactive.ticket;
866 	rs->inactive.open = 1;
867 
868 	return (0);
869 }
870 
871 static int
pf_rollback_eth(uint32_t ticket,const char * anchor)872 pf_rollback_eth(uint32_t ticket, const char *anchor)
873 {
874 	struct pf_keth_rule *rule, *tmp;
875 	struct pf_keth_ruleset *rs;
876 
877 	PF_RULES_WASSERT();
878 
879 	rs = pf_find_keth_ruleset(anchor);
880 	if (rs == NULL)
881 		return (EINVAL);
882 
883 	if (!rs->inactive.open ||
884 	    ticket != rs->inactive.ticket)
885 		return (0);
886 
887 	/* Purge old inactive rules. */
888 	TAILQ_FOREACH_SAFE(rule, rs->inactive.rules, entries,
889 	    tmp) {
890 		TAILQ_REMOVE(rs->inactive.rules, rule, entries);
891 		pf_free_eth_rule(rule);
892 	}
893 
894 	rs->inactive.open = 0;
895 
896 	pf_remove_if_empty_keth_ruleset(rs);
897 
898 	return (0);
899 }
900 
901 #define	PF_SET_SKIP_STEPS(i)					\
902 	do {							\
903 		while (head[i] != cur) {			\
904 			head[i]->skip[i].ptr = cur;		\
905 			head[i] = TAILQ_NEXT(head[i], entries);	\
906 		}						\
907 	} while (0)
908 
909 static void
pf_eth_calc_skip_steps(struct pf_keth_ruleq * rules)910 pf_eth_calc_skip_steps(struct pf_keth_ruleq *rules)
911 {
912 	struct pf_keth_rule *cur, *prev, *head[PFE_SKIP_COUNT];
913 	int i;
914 
915 	cur = TAILQ_FIRST(rules);
916 	prev = cur;
917 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
918 		head[i] = cur;
919 	while (cur != NULL) {
920 		if (cur->kif != prev->kif || cur->ifnot != prev->ifnot)
921 			PF_SET_SKIP_STEPS(PFE_SKIP_IFP);
922 		if (cur->direction != prev->direction)
923 			PF_SET_SKIP_STEPS(PFE_SKIP_DIR);
924 		if (cur->proto != prev->proto)
925 			PF_SET_SKIP_STEPS(PFE_SKIP_PROTO);
926 		if (memcmp(&cur->src, &prev->src, sizeof(cur->src)) != 0)
927 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_ADDR);
928 		if (memcmp(&cur->dst, &prev->dst, sizeof(cur->dst)) != 0)
929 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_ADDR);
930 		if (cur->ipsrc.neg != prev->ipsrc.neg ||
931 		    pf_addr_wrap_neq(&cur->ipsrc.addr, &prev->ipsrc.addr))
932 			PF_SET_SKIP_STEPS(PFE_SKIP_SRC_IP_ADDR);
933 		if (cur->ipdst.neg != prev->ipdst.neg ||
934 		    pf_addr_wrap_neq(&cur->ipdst.addr, &prev->ipdst.addr))
935 			PF_SET_SKIP_STEPS(PFE_SKIP_DST_IP_ADDR);
936 
937 		prev = cur;
938 		cur = TAILQ_NEXT(cur, entries);
939 	}
940 	for (i = 0; i < PFE_SKIP_COUNT; ++i)
941 		PF_SET_SKIP_STEPS(i);
942 }
943 
944 static int
pf_commit_eth(uint32_t ticket,const char * anchor)945 pf_commit_eth(uint32_t ticket, const char *anchor)
946 {
947 	struct pf_keth_ruleq *rules;
948 	struct pf_keth_ruleset *rs;
949 
950 	rs = pf_find_keth_ruleset(anchor);
951 	if (rs == NULL) {
952 		return (EINVAL);
953 	}
954 
955 	if (!rs->inactive.open ||
956 	    ticket != rs->inactive.ticket)
957 		return (EBUSY);
958 
959 	PF_RULES_WASSERT();
960 
961 	pf_eth_calc_skip_steps(rs->inactive.rules);
962 
963 	rules = rs->active.rules;
964 	atomic_store_ptr(&rs->active.rules, rs->inactive.rules);
965 	rs->inactive.rules = rules;
966 	rs->inactive.ticket = rs->active.ticket;
967 
968 	return (pf_rollback_eth(rs->inactive.ticket,
969 	    rs->anchor ? rs->anchor->path : ""));
970 }
971 
972 #ifdef ALTQ
973 uint16_t
pf_qname2qid(const char * qname,bool add_new)974 pf_qname2qid(const char *qname, bool add_new)
975 {
976 	return (tagname2tag(&V_pf_qids, qname, add_new));
977 }
978 
979 static void
pf_qid_unref(uint16_t qid)980 pf_qid_unref(uint16_t qid)
981 {
982 	tag_unref(&V_pf_qids, qid);
983 }
984 
985 static int
pf_begin_altq(u_int32_t * ticket)986 pf_begin_altq(u_int32_t *ticket)
987 {
988 	struct pf_altq	*altq, *tmp;
989 	int		 error = 0;
990 
991 	PF_RULES_WASSERT();
992 
993 	/* Purge the old altq lists */
994 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
995 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
996 			/* detach and destroy the discipline */
997 			error = altq_remove(altq);
998 		}
999 		free(altq, M_PFALTQ);
1000 	}
1001 	TAILQ_INIT(V_pf_altq_ifs_inactive);
1002 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1003 		pf_qid_unref(altq->qid);
1004 		free(altq, M_PFALTQ);
1005 	}
1006 	TAILQ_INIT(V_pf_altqs_inactive);
1007 	if (error)
1008 		return (error);
1009 	*ticket = ++V_ticket_altqs_inactive;
1010 	V_altqs_inactive_open = 1;
1011 	return (0);
1012 }
1013 
1014 static int
pf_rollback_altq(u_int32_t ticket)1015 pf_rollback_altq(u_int32_t ticket)
1016 {
1017 	struct pf_altq	*altq, *tmp;
1018 	int		 error = 0;
1019 
1020 	PF_RULES_WASSERT();
1021 
1022 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
1023 		return (0);
1024 	/* Purge the old altq lists */
1025 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
1026 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1027 			/* detach and destroy the discipline */
1028 			error = altq_remove(altq);
1029 		}
1030 		free(altq, M_PFALTQ);
1031 	}
1032 	TAILQ_INIT(V_pf_altq_ifs_inactive);
1033 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1034 		pf_qid_unref(altq->qid);
1035 		free(altq, M_PFALTQ);
1036 	}
1037 	TAILQ_INIT(V_pf_altqs_inactive);
1038 	V_altqs_inactive_open = 0;
1039 	return (error);
1040 }
1041 
1042 static int
pf_commit_altq(u_int32_t ticket)1043 pf_commit_altq(u_int32_t ticket)
1044 {
1045 	struct pf_altqqueue	*old_altqs, *old_altq_ifs;
1046 	struct pf_altq		*altq, *tmp;
1047 	int			 err, error = 0;
1048 
1049 	PF_RULES_WASSERT();
1050 
1051 	if (!V_altqs_inactive_open || ticket != V_ticket_altqs_inactive)
1052 		return (EBUSY);
1053 
1054 	/* swap altqs, keep the old. */
1055 	old_altqs = V_pf_altqs_active;
1056 	old_altq_ifs = V_pf_altq_ifs_active;
1057 	V_pf_altqs_active = V_pf_altqs_inactive;
1058 	V_pf_altq_ifs_active = V_pf_altq_ifs_inactive;
1059 	V_pf_altqs_inactive = old_altqs;
1060 	V_pf_altq_ifs_inactive = old_altq_ifs;
1061 	V_ticket_altqs_active = V_ticket_altqs_inactive;
1062 
1063 	/* Attach new disciplines */
1064 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
1065 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1066 			/* attach the discipline */
1067 			error = altq_pfattach(altq);
1068 			if (error == 0 && V_pf_altq_running)
1069 				error = pf_enable_altq(altq);
1070 			if (error != 0)
1071 				return (error);
1072 		}
1073 	}
1074 
1075 	/* Purge the old altq lists */
1076 	TAILQ_FOREACH_SAFE(altq, V_pf_altq_ifs_inactive, entries, tmp) {
1077 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
1078 			/* detach and destroy the discipline */
1079 			if (V_pf_altq_running)
1080 				error = pf_disable_altq(altq);
1081 			err = altq_pfdetach(altq);
1082 			if (err != 0 && error == 0)
1083 				error = err;
1084 			err = altq_remove(altq);
1085 			if (err != 0 && error == 0)
1086 				error = err;
1087 		}
1088 		free(altq, M_PFALTQ);
1089 	}
1090 	TAILQ_INIT(V_pf_altq_ifs_inactive);
1091 	TAILQ_FOREACH_SAFE(altq, V_pf_altqs_inactive, entries, tmp) {
1092 		pf_qid_unref(altq->qid);
1093 		free(altq, M_PFALTQ);
1094 	}
1095 	TAILQ_INIT(V_pf_altqs_inactive);
1096 
1097 	V_altqs_inactive_open = 0;
1098 	return (error);
1099 }
1100 
1101 static int
pf_enable_altq(struct pf_altq * altq)1102 pf_enable_altq(struct pf_altq *altq)
1103 {
1104 	struct ifnet		*ifp;
1105 	struct tb_profile	 tb;
1106 	int			 error = 0;
1107 
1108 	if ((ifp = ifunit(altq->ifname)) == NULL)
1109 		return (EINVAL);
1110 
1111 	if (ifp->if_snd.altq_type != ALTQT_NONE)
1112 		error = altq_enable(&ifp->if_snd);
1113 
1114 	/* set tokenbucket regulator */
1115 	if (error == 0 && ifp != NULL && ALTQ_IS_ENABLED(&ifp->if_snd)) {
1116 		tb.rate = altq->ifbandwidth;
1117 		tb.depth = altq->tbrsize;
1118 		error = tbr_set(&ifp->if_snd, &tb);
1119 	}
1120 
1121 	return (error);
1122 }
1123 
1124 static int
pf_disable_altq(struct pf_altq * altq)1125 pf_disable_altq(struct pf_altq *altq)
1126 {
1127 	struct ifnet		*ifp;
1128 	struct tb_profile	 tb;
1129 	int			 error;
1130 
1131 	if ((ifp = ifunit(altq->ifname)) == NULL)
1132 		return (EINVAL);
1133 
1134 	/*
1135 	 * when the discipline is no longer referenced, it was overridden
1136 	 * by a new one.  if so, just return.
1137 	 */
1138 	if (altq->altq_disc != ifp->if_snd.altq_disc)
1139 		return (0);
1140 
1141 	error = altq_disable(&ifp->if_snd);
1142 
1143 	if (error == 0) {
1144 		/* clear tokenbucket regulator */
1145 		tb.rate = 0;
1146 		error = tbr_set(&ifp->if_snd, &tb);
1147 	}
1148 
1149 	return (error);
1150 }
1151 
1152 static int
pf_altq_ifnet_event_add(struct ifnet * ifp,int remove,u_int32_t ticket,struct pf_altq * altq)1153 pf_altq_ifnet_event_add(struct ifnet *ifp, int remove, u_int32_t ticket,
1154     struct pf_altq *altq)
1155 {
1156 	struct ifnet	*ifp1;
1157 	int		 error = 0;
1158 
1159 	/* Deactivate the interface in question */
1160 	altq->local_flags &= ~PFALTQ_FLAG_IF_REMOVED;
1161 	if ((ifp1 = ifunit(altq->ifname)) == NULL ||
1162 	    (remove && ifp1 == ifp)) {
1163 		altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
1164 	} else {
1165 		error = altq_add(ifp1, altq);
1166 
1167 		if (ticket != V_ticket_altqs_inactive)
1168 			error = EBUSY;
1169 
1170 		if (error)
1171 			free(altq, M_PFALTQ);
1172 	}
1173 
1174 	return (error);
1175 }
1176 
1177 void
pf_altq_ifnet_event(struct ifnet * ifp,int remove)1178 pf_altq_ifnet_event(struct ifnet *ifp, int remove)
1179 {
1180 	struct pf_altq	*a1, *a2, *a3;
1181 	u_int32_t	 ticket;
1182 	int		 error = 0;
1183 
1184 	/*
1185 	 * No need to re-evaluate the configuration for events on interfaces
1186 	 * that do not support ALTQ, as it's not possible for such
1187 	 * interfaces to be part of the configuration.
1188 	 */
1189 	if (!ALTQ_IS_READY(&ifp->if_snd))
1190 		return;
1191 
1192 	/* Interrupt userland queue modifications */
1193 	if (V_altqs_inactive_open)
1194 		pf_rollback_altq(V_ticket_altqs_inactive);
1195 
1196 	/* Start new altq ruleset */
1197 	if (pf_begin_altq(&ticket))
1198 		return;
1199 
1200 	/* Copy the current active set */
1201 	TAILQ_FOREACH(a1, V_pf_altq_ifs_active, entries) {
1202 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1203 		if (a2 == NULL) {
1204 			error = ENOMEM;
1205 			break;
1206 		}
1207 		bcopy(a1, a2, sizeof(struct pf_altq));
1208 
1209 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1210 		if (error)
1211 			break;
1212 
1213 		TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, a2, entries);
1214 	}
1215 	if (error)
1216 		goto out;
1217 	TAILQ_FOREACH(a1, V_pf_altqs_active, entries) {
1218 		a2 = malloc(sizeof(*a2), M_PFALTQ, M_NOWAIT);
1219 		if (a2 == NULL) {
1220 			error = ENOMEM;
1221 			break;
1222 		}
1223 		bcopy(a1, a2, sizeof(struct pf_altq));
1224 
1225 		if ((a2->qid = pf_qname2qid(a2->qname, true)) == 0) {
1226 			error = EBUSY;
1227 			free(a2, M_PFALTQ);
1228 			break;
1229 		}
1230 		a2->altq_disc = NULL;
1231 		TAILQ_FOREACH(a3, V_pf_altq_ifs_inactive, entries) {
1232 			if (strncmp(a3->ifname, a2->ifname,
1233 				IFNAMSIZ) == 0) {
1234 				a2->altq_disc = a3->altq_disc;
1235 				break;
1236 			}
1237 		}
1238 		error = pf_altq_ifnet_event_add(ifp, remove, ticket, a2);
1239 		if (error)
1240 			break;
1241 
1242 		TAILQ_INSERT_TAIL(V_pf_altqs_inactive, a2, entries);
1243 	}
1244 
1245 out:
1246 	if (error != 0)
1247 		pf_rollback_altq(ticket);
1248 	else
1249 		pf_commit_altq(ticket);
1250 }
1251 #endif /* ALTQ */
1252 
1253 static struct pf_krule_global *
pf_rule_tree_alloc(int flags)1254 pf_rule_tree_alloc(int flags)
1255 {
1256 	struct pf_krule_global *tree;
1257 
1258 	tree = malloc(sizeof(struct pf_krule_global), M_PF, flags);
1259 	if (tree == NULL)
1260 		return (NULL);
1261 	RB_INIT(tree);
1262 	return (tree);
1263 }
1264 
1265 void
pf_rule_tree_free(struct pf_krule_global * tree)1266 pf_rule_tree_free(struct pf_krule_global *tree)
1267 {
1268 
1269 	free(tree, M_PF);
1270 }
1271 
1272 static int
pf_begin_rules(u_int32_t * ticket,int rs_num,const char * anchor)1273 pf_begin_rules(u_int32_t *ticket, int rs_num, const char *anchor)
1274 {
1275 	struct pf_krule_global *tree;
1276 	struct pf_kruleset	*rs;
1277 	struct pf_krule		*rule;
1278 
1279 	PF_RULES_WASSERT();
1280 
1281 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1282 		return (EINVAL);
1283 	tree = pf_rule_tree_alloc(M_NOWAIT);
1284 	if (tree == NULL)
1285 		return (ENOMEM);
1286 	rs = pf_find_or_create_kruleset(anchor);
1287 	if (rs == NULL) {
1288 		pf_rule_tree_free(tree);
1289 		return (EINVAL);
1290 	}
1291 	pf_rule_tree_free(rs->rules[rs_num].inactive.tree);
1292 	rs->rules[rs_num].inactive.tree = tree;
1293 
1294 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1295 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1296 		rs->rules[rs_num].inactive.rcount--;
1297 	}
1298 	*ticket = ++rs->rules[rs_num].inactive.ticket;
1299 	rs->rules[rs_num].inactive.open = 1;
1300 	return (0);
1301 }
1302 
1303 static int
pf_rollback_rules(u_int32_t ticket,int rs_num,char * anchor)1304 pf_rollback_rules(u_int32_t ticket, int rs_num, char *anchor)
1305 {
1306 	struct pf_kruleset	*rs;
1307 	struct pf_krule		*rule;
1308 
1309 	PF_RULES_WASSERT();
1310 
1311 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1312 		return (EINVAL);
1313 	rs = pf_find_kruleset(anchor);
1314 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1315 	    rs->rules[rs_num].inactive.ticket != ticket)
1316 		return (0);
1317 	while ((rule = TAILQ_FIRST(rs->rules[rs_num].inactive.ptr)) != NULL) {
1318 		pf_unlink_rule(rs->rules[rs_num].inactive.ptr, rule);
1319 		rs->rules[rs_num].inactive.rcount--;
1320 	}
1321 	rs->rules[rs_num].inactive.open = 0;
1322 
1323 	if (anchor[0])
1324 		return (0);
1325 
1326 	pf_statelim_rollback();
1327 	pf_sourcelim_rollback();
1328 	return (0);
1329 }
1330 
1331 #define PF_MD5_UPD(st, elm)						\
1332 		MD5Update(ctx, (u_int8_t *) &(st)->elm, sizeof((st)->elm))
1333 
1334 #define PF_MD5_UPD_STR(st, elm)						\
1335 		MD5Update(ctx, (u_int8_t *) (st)->elm, strlen((st)->elm))
1336 
1337 #define PF_MD5_UPD_HTONL(st, elm, stor) do {				\
1338 		(stor) = htonl((st)->elm);				\
1339 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int32_t));\
1340 } while (0)
1341 
1342 #define PF_MD5_UPD_HTONS(st, elm, stor) do {				\
1343 		(stor) = htons((st)->elm);				\
1344 		MD5Update(ctx, (u_int8_t *) &(stor), sizeof(u_int16_t));\
1345 } while (0)
1346 
1347 static void
pf_hash_rule_addr(MD5_CTX * ctx,struct pf_rule_addr * pfr)1348 pf_hash_rule_addr(MD5_CTX *ctx, struct pf_rule_addr *pfr)
1349 {
1350 	PF_MD5_UPD(pfr, addr.type);
1351 	switch (pfr->addr.type) {
1352 		case PF_ADDR_DYNIFTL:
1353 			PF_MD5_UPD(pfr, addr.v.ifname);
1354 			PF_MD5_UPD(pfr, addr.iflags);
1355 			break;
1356 		case PF_ADDR_TABLE:
1357 			if (strncmp(pfr->addr.v.tblname, PF_OPTIMIZER_TABLE_PFX,
1358 			    strlen(PF_OPTIMIZER_TABLE_PFX)))
1359 				PF_MD5_UPD(pfr, addr.v.tblname);
1360 			break;
1361 		case PF_ADDR_ADDRMASK:
1362 		case PF_ADDR_RANGE:
1363 			/* XXX ignore af? */
1364 			PF_MD5_UPD(pfr, addr.v.a.addr.addr32);
1365 			PF_MD5_UPD(pfr, addr.v.a.mask.addr32);
1366 			break;
1367 		case PF_ADDR_NONE:
1368 		case PF_ADDR_NOROUTE:
1369 		case PF_ADDR_URPFFAILED:
1370 			/* These do not use any address data. */
1371 			break;
1372 		default:
1373 			panic("Unknown address type %d", pfr->addr.type);
1374 	}
1375 
1376 	PF_MD5_UPD(pfr, port[0]);
1377 	PF_MD5_UPD(pfr, port[1]);
1378 	PF_MD5_UPD(pfr, neg);
1379 	PF_MD5_UPD(pfr, port_op);
1380 }
1381 
1382 static void
pf_hash_pool(MD5_CTX * ctx,struct pf_kpool * pool)1383 pf_hash_pool(MD5_CTX *ctx, struct pf_kpool *pool)
1384 {
1385 	uint16_t x;
1386 	int y;
1387 
1388 	if (pool->cur) {
1389 		PF_MD5_UPD(pool, cur->addr);
1390 		PF_MD5_UPD_STR(pool, cur->ifname);
1391 		PF_MD5_UPD(pool, cur->af);
1392 	}
1393 	PF_MD5_UPD(pool, key);
1394 	PF_MD5_UPD(pool, counter);
1395 
1396 	PF_MD5_UPD(pool, mape.offset);
1397 	PF_MD5_UPD(pool, mape.psidlen);
1398 	PF_MD5_UPD_HTONS(pool, mape.psid, x);
1399 	PF_MD5_UPD_HTONL(pool, tblidx, y);
1400 	PF_MD5_UPD_HTONS(pool, proxy_port[0], x);
1401 	PF_MD5_UPD_HTONS(pool, proxy_port[1], x);
1402 	PF_MD5_UPD(pool, opts);
1403 	PF_MD5_UPD(pool, ipv6_nexthop_af);
1404 }
1405 
1406 static void
pf_hash_rule_rolling(MD5_CTX * ctx,struct pf_krule * rule)1407 pf_hash_rule_rolling(MD5_CTX *ctx, struct pf_krule *rule)
1408 {
1409 	u_int16_t x;
1410 	u_int32_t y;
1411 
1412 	pf_hash_rule_addr(ctx, &rule->src);
1413 	pf_hash_rule_addr(ctx, &rule->dst);
1414 	for (int i = 0; i < PF_RULE_MAX_LABEL_COUNT; i++)
1415 		PF_MD5_UPD_STR(rule, label[i]);
1416 	PF_MD5_UPD_HTONL(rule, ridentifier, y);
1417 	PF_MD5_UPD_STR(rule, ifname);
1418 	PF_MD5_UPD_STR(rule, rcv_ifname);
1419 	PF_MD5_UPD_STR(rule, qname);
1420 	PF_MD5_UPD_STR(rule, pqname);
1421 	PF_MD5_UPD_STR(rule, tagname);
1422 	PF_MD5_UPD_STR(rule, match_tagname);
1423 
1424 	PF_MD5_UPD_STR(rule, overload_tblname);
1425 
1426 	pf_hash_pool(ctx, &rule->nat);
1427 	pf_hash_pool(ctx, &rule->rdr);
1428 	pf_hash_pool(ctx, &rule->route);
1429 	PF_MD5_UPD_HTONL(rule, pktrate.limit, y);
1430 	PF_MD5_UPD_HTONL(rule, pktrate.seconds, y);
1431 
1432 	PF_MD5_UPD_HTONL(rule, os_fingerprint, y);
1433 
1434 	PF_MD5_UPD_HTONL(rule, rtableid, y);
1435 	for (int i = 0; i < PFTM_MAX; i++)
1436 		PF_MD5_UPD_HTONL(rule, timeout[i], y);
1437 	PF_MD5_UPD_HTONL(rule, max_states, y);
1438 	PF_MD5_UPD_HTONL(rule, max_src_nodes, y);
1439 	PF_MD5_UPD_HTONL(rule, max_src_states, y);
1440 	PF_MD5_UPD_HTONL(rule, max_src_conn, y);
1441 	PF_MD5_UPD_HTONL(rule, max_src_conn_rate.limit, y);
1442 	PF_MD5_UPD_HTONL(rule, max_src_conn_rate.seconds, y);
1443 	PF_MD5_UPD_HTONS(rule, max_pkt_size, y);
1444 	PF_MD5_UPD_HTONS(rule, qid, x);
1445 	PF_MD5_UPD_HTONS(rule, pqid, x);
1446 	PF_MD5_UPD_HTONS(rule, dnpipe, x);
1447 	PF_MD5_UPD_HTONS(rule, dnrpipe, x);
1448 	PF_MD5_UPD_HTONL(rule, free_flags, y);
1449 	PF_MD5_UPD_HTONL(rule, prob, y);
1450 
1451 	PF_MD5_UPD_HTONS(rule, return_icmp, x);
1452 	PF_MD5_UPD_HTONS(rule, return_icmp6, x);
1453 	PF_MD5_UPD_HTONS(rule, max_mss, x);
1454 	PF_MD5_UPD_HTONS(rule, tag, x); /* dup? */
1455 	PF_MD5_UPD_HTONS(rule, match_tag, x); /* dup? */
1456 	PF_MD5_UPD_HTONS(rule, scrub_flags, x);
1457 
1458 	PF_MD5_UPD(rule, uid.op);
1459 	PF_MD5_UPD_HTONL(rule, uid.uid[0], y);
1460 	PF_MD5_UPD_HTONL(rule, uid.uid[1], y);
1461 	PF_MD5_UPD(rule, gid.op);
1462 	PF_MD5_UPD_HTONL(rule, gid.gid[0], y);
1463 	PF_MD5_UPD_HTONL(rule, gid.gid[1], y);
1464 
1465 	PF_MD5_UPD_HTONL(rule, rule_flag, y);
1466 	PF_MD5_UPD_HTONL(rule, rule_ref, y);
1467 	PF_MD5_UPD(rule, action);
1468 	PF_MD5_UPD(rule, direction);
1469 	PF_MD5_UPD(rule, log);
1470 	PF_MD5_UPD(rule, logif);
1471 	PF_MD5_UPD(rule, quick);
1472 	PF_MD5_UPD(rule, ifnot);
1473 	PF_MD5_UPD(rule, match_tag_not);
1474 	PF_MD5_UPD(rule, natpass);
1475 
1476 	PF_MD5_UPD(rule, keep_state);
1477 	PF_MD5_UPD(rule, af);
1478 	PF_MD5_UPD(rule, proto);
1479 	PF_MD5_UPD_HTONS(rule, type, x);
1480 	PF_MD5_UPD_HTONS(rule, code, x);
1481 	PF_MD5_UPD(rule, flags);
1482 	PF_MD5_UPD(rule, flagset);
1483 	PF_MD5_UPD(rule, min_ttl);
1484 	PF_MD5_UPD(rule, allow_opts);
1485 	PF_MD5_UPD(rule, rt);
1486 	PF_MD5_UPD(rule, return_ttl);
1487 	PF_MD5_UPD(rule, tos);
1488 	PF_MD5_UPD(rule, set_tos);
1489 	PF_MD5_UPD(rule, anchor_relative);
1490 	PF_MD5_UPD(rule, anchor_wildcard);
1491 
1492 	PF_MD5_UPD(rule, flush);
1493 	PF_MD5_UPD(rule, prio);
1494 	PF_MD5_UPD(rule, set_prio[0]);
1495 	PF_MD5_UPD(rule, set_prio[1]);
1496 	PF_MD5_UPD(rule, naf);
1497 	PF_MD5_UPD(rule, rcvifnot);
1498 	PF_MD5_UPD(rule, statelim.id);
1499 	PF_MD5_UPD_HTONL(rule, statelim.limiter_action, y);
1500 	PF_MD5_UPD(rule, sourcelim.id);
1501 	PF_MD5_UPD_HTONL(rule, sourcelim.limiter_action, y);
1502 
1503 	PF_MD5_UPD(rule, divert.addr);
1504 	PF_MD5_UPD_HTONS(rule, divert.port, x);
1505 
1506 	if (rule->anchor != NULL)
1507 		PF_MD5_UPD_STR(rule, anchor->path);
1508 }
1509 
1510 static void
pf_hash_rule(struct pf_krule * rule)1511 pf_hash_rule(struct pf_krule *rule)
1512 {
1513 	MD5_CTX		ctx;
1514 
1515 	MD5Init(&ctx);
1516 	pf_hash_rule_rolling(&ctx, rule);
1517 	MD5Final(rule->md5sum, &ctx);
1518 }
1519 
1520 static int
pf_krule_compare(struct pf_krule * a,struct pf_krule * b)1521 pf_krule_compare(struct pf_krule *a, struct pf_krule *b)
1522 {
1523 
1524 	return (memcmp(a->md5sum, b->md5sum, PF_MD5_DIGEST_LENGTH));
1525 }
1526 
1527 static int
pf_commit_rules(u_int32_t ticket,int rs_num,char * anchor)1528 pf_commit_rules(u_int32_t ticket, int rs_num, char *anchor)
1529 {
1530 	struct pf_kruleset	*rs;
1531 	struct pf_krule		*rule, *old_rule;
1532 	struct pf_krulequeue	*old_rules;
1533 	struct pf_krule_global  *old_tree;
1534 	int			 error;
1535 	u_int32_t		 old_rcount;
1536 	bool			 is_main_ruleset = anchor[0] == '\0';
1537 
1538 	PF_RULES_WASSERT();
1539 
1540 	if (rs_num < 0 || rs_num >= PF_RULESET_MAX)
1541 		return (EINVAL);
1542 	rs = pf_find_kruleset(anchor);
1543 	if (rs == NULL || !rs->rules[rs_num].inactive.open ||
1544 	    ticket != rs->rules[rs_num].inactive.ticket)
1545 		return (EBUSY);
1546 
1547 	/* Calculate checksum for the main ruleset */
1548 	if (rs == &pf_main_ruleset) {
1549 		error = pf_sourcelim_check();
1550 		if (error != 0)
1551 			return (error);
1552 		error = pf_setup_pfsync_matching(rs);
1553 		if (error != 0)
1554 			return (error);
1555 	}
1556 
1557 	/* Swap rules, keep the old. */
1558 	old_rules = rs->rules[rs_num].active.ptr;
1559 	old_rcount = rs->rules[rs_num].active.rcount;
1560 	old_tree = rs->rules[rs_num].active.tree;
1561 
1562 	rs->rules[rs_num].active.ptr =
1563 	    rs->rules[rs_num].inactive.ptr;
1564 	rs->rules[rs_num].active.tree =
1565 	    rs->rules[rs_num].inactive.tree;
1566 	rs->rules[rs_num].active.rcount =
1567 	    rs->rules[rs_num].inactive.rcount;
1568 
1569 	/* Attempt to preserve counter information. */
1570 	if (V_pf_status.keep_counters && old_tree != NULL) {
1571 		TAILQ_FOREACH(rule, rs->rules[rs_num].active.ptr,
1572 		    entries) {
1573 			old_rule = RB_FIND(pf_krule_global, old_tree, rule);
1574 			if (old_rule == NULL) {
1575 				continue;
1576 			}
1577 			pf_counter_u64_critical_enter();
1578 			pf_counter_u64_rollup_protected(&rule->evaluations,
1579 			    pf_counter_u64_fetch(&old_rule->evaluations));
1580 			pf_counter_u64_rollup_protected(&rule->packets[0],
1581 			    pf_counter_u64_fetch(&old_rule->packets[0]));
1582 			pf_counter_u64_rollup_protected(&rule->packets[1],
1583 			    pf_counter_u64_fetch(&old_rule->packets[1]));
1584 			pf_counter_u64_rollup_protected(&rule->bytes[0],
1585 			    pf_counter_u64_fetch(&old_rule->bytes[0]));
1586 			pf_counter_u64_rollup_protected(&rule->bytes[1],
1587 			    pf_counter_u64_fetch(&old_rule->bytes[1]));
1588 			pf_counter_u64_critical_exit();
1589 		}
1590 	}
1591 
1592 	rs->rules[rs_num].inactive.ptr = old_rules;
1593 	rs->rules[rs_num].inactive.tree = NULL; /* important for pf_ioctl_addrule */
1594 	rs->rules[rs_num].inactive.rcount = old_rcount;
1595 
1596 	rs->rules[rs_num].active.ticket =
1597 	    rs->rules[rs_num].inactive.ticket;
1598 	pf_calc_skip_steps(rs->rules[rs_num].active.ptr);
1599 
1600 	/* Purge the old rule list. */
1601 	PF_UNLNKDRULES_LOCK();
1602 	while ((rule = TAILQ_FIRST(old_rules)) != NULL)
1603 		pf_unlink_rule_locked(old_rules, rule);
1604 	PF_UNLNKDRULES_UNLOCK();
1605 	rs->rules[rs_num].inactive.rcount = 0;
1606 	rs->rules[rs_num].inactive.open = 0;
1607 	pf_remove_if_empty_kruleset(rs);
1608 	pf_rule_tree_free(old_tree);
1609 
1610 	/* statelim/sourcelim/queue defs only in the main ruleset */
1611 	if (! is_main_ruleset || rs_num != PF_RULESET_FILTER)
1612 		return (0);
1613 
1614 	pf_statelim_commit();
1615 	pf_sourcelim_commit();
1616 
1617 	return (0);
1618 }
1619 
1620 static int
pf_setup_pfsync_matching(struct pf_kruleset * rs)1621 pf_setup_pfsync_matching(struct pf_kruleset *rs)
1622 {
1623 	MD5_CTX			 ctx;
1624 	struct pf_krule		*rule;
1625 	int			 rs_cnt;
1626 	u_int8_t		 digest[PF_MD5_DIGEST_LENGTH];
1627 
1628 	MD5Init(&ctx);
1629 	for (rs_cnt = 0; rs_cnt < PF_RULESET_MAX; rs_cnt++) {
1630 		/* XXX PF_RULESET_SCRUB as well? */
1631 		if (rs_cnt == PF_RULESET_SCRUB)
1632 			continue;
1633 
1634 		if (rs->rules[rs_cnt].inactive.rcount) {
1635 			TAILQ_FOREACH(rule, rs->rules[rs_cnt].inactive.ptr,
1636 			    entries) {
1637 				pf_hash_rule_rolling(&ctx, rule);
1638 			}
1639 		}
1640 	}
1641 
1642 	MD5Final(digest, &ctx);
1643 	memcpy(V_pf_status.pf_chksum, digest, sizeof(V_pf_status.pf_chksum));
1644 	return (0);
1645 }
1646 
1647 static int
pf_eth_addr_setup(struct pf_keth_ruleset * ruleset,struct pf_addr_wrap * addr)1648 pf_eth_addr_setup(struct pf_keth_ruleset *ruleset, struct pf_addr_wrap *addr)
1649 {
1650 	int error = 0;
1651 
1652 	switch (addr->type) {
1653 	case PF_ADDR_TABLE:
1654 		addr->p.tbl = pfr_eth_attach_table(ruleset, addr->v.tblname);
1655 		if (addr->p.tbl == NULL)
1656 			error = ENOMEM;
1657 		break;
1658 	default:
1659 		error = EINVAL;
1660 	}
1661 
1662 	return (error);
1663 }
1664 
1665 static int
pf_addr_setup(struct pf_kruleset * ruleset,struct pf_addr_wrap * addr,sa_family_t af)1666 pf_addr_setup(struct pf_kruleset *ruleset, struct pf_addr_wrap *addr,
1667     sa_family_t af)
1668 {
1669 	int error = 0;
1670 
1671 	switch (addr->type) {
1672 	case PF_ADDR_TABLE:
1673 		addr->p.tbl = pfr_attach_table(ruleset, addr->v.tblname);
1674 		if (addr->p.tbl == NULL)
1675 			error = ENOMEM;
1676 		break;
1677 	case PF_ADDR_DYNIFTL:
1678 		error = pfi_dynaddr_setup(addr, af);
1679 		break;
1680 	}
1681 
1682 	return (error);
1683 }
1684 
1685 void
pf_addr_copyout(struct pf_addr_wrap * addr)1686 pf_addr_copyout(struct pf_addr_wrap *addr)
1687 {
1688 
1689 	switch (addr->type) {
1690 	case PF_ADDR_DYNIFTL:
1691 		pfi_dynaddr_copyout(addr);
1692 		break;
1693 	case PF_ADDR_TABLE:
1694 		pf_tbladdr_copyout(addr);
1695 		break;
1696 	}
1697 }
1698 
1699 int
pf_statelim_add(const struct pfioc_statelim * ioc)1700 pf_statelim_add(const struct pfioc_statelim *ioc)
1701 {
1702 	struct pf_statelim	*pfstlim;
1703 	int			 error;
1704 	size_t			 namelen;
1705 
1706 	if (ioc->id < PF_STATELIM_ID_MIN ||
1707 	    ioc->id > PF_STATELIM_ID_MAX)
1708 		return (EINVAL);
1709 
1710 	if (ioc->limit < PF_STATELIM_LIMIT_MIN ||
1711 	    ioc->limit > PF_STATELIM_LIMIT_MAX)
1712 		return (EINVAL);
1713 
1714 	if ((ioc->rate.limit == 0) != (ioc->rate.seconds == 0))
1715 		return (EINVAL);
1716 
1717 	namelen = strnlen(ioc->name, sizeof(ioc->name));
1718 	/* is the name from userland nul terminated? */
1719 	if (namelen == sizeof(ioc->name))
1720 		return (EINVAL);
1721 
1722 	pfstlim = malloc(sizeof(*pfstlim), M_PF_STATE_LIM, M_WAITOK | M_ZERO);
1723 	if (pfstlim == NULL)
1724 		return (ENOMEM);
1725 
1726 	pfstlim->pfstlim_id = ioc->id;
1727 	if (strlcpy(pfstlim->pfstlim_nm, ioc->name,
1728 	    sizeof(pfstlim->pfstlim_nm)) >= sizeof(pfstlim->pfstlim_nm)) {
1729 		error = EINVAL;
1730 		goto free;
1731 	}
1732 	pfstlim->pfstlim_limit = ioc->limit;
1733 	pfstlim->pfstlim_rate.limit = ioc->rate.limit;
1734 	pfstlim->pfstlim_rate.seconds = ioc->rate.seconds;
1735 
1736 	if (pfstlim->pfstlim_rate.limit) {
1737 		uint64_t bucket = SEC_TO_NSEC(pfstlim->pfstlim_rate.seconds);
1738 		struct timespec ts;
1739 
1740 		getnanouptime(&ts);
1741 
1742 		pfstlim->pfstlim_rate_ts = SEC_TO_NSEC(ts.tv_sec) + ts.tv_nsec -
1743 		    bucket;
1744 		pfstlim->pfstlim_rate_token = bucket /
1745 		    pfstlim->pfstlim_rate.limit;
1746 		pfstlim->pfstlim_rate_bucket = bucket;
1747 	}
1748 
1749 	TAILQ_INIT(&pfstlim->pfstlim_states);
1750 	mtx_init(&pfstlim->pfstlim_lock, "pf state limit", NULL, MTX_DEF);
1751 
1752 	PF_RULES_WLOCK();
1753 	if (ioc->ticket != pf_main_ruleset.rules[PF_RULESET_FILTER].inactive.ticket) {
1754 		error = EBUSY;
1755 		goto unlock;
1756 	}
1757 
1758 	if (RB_INSERT(pf_statelim_id_tree, &V_pf_statelim_id_tree_inactive,
1759 		pfstlim) != NULL) {
1760 		error = EBUSY;
1761 		goto unlock;
1762 	}
1763 
1764 	if (RB_INSERT(pf_statelim_nm_tree, &V_pf_statelim_nm_tree_inactive,
1765 		pfstlim) != NULL) {
1766 		RB_REMOVE(pf_statelim_id_tree, &V_pf_statelim_id_tree_inactive,
1767 		    pfstlim);
1768 		error = EBUSY;
1769 		goto unlock;
1770 	}
1771 
1772 	TAILQ_INSERT_HEAD(&V_pf_statelim_list_inactive, pfstlim, pfstlim_list);
1773 
1774 	PF_RULES_WUNLOCK();
1775 
1776 	return (0);
1777 
1778 unlock:
1779 	PF_RULES_WUNLOCK();
1780 
1781 free:
1782 	free(pfstlim, M_PF_STATE_LIM);
1783 
1784 	return (error);
1785 }
1786 
1787 static void
pf_statelim_unlink(struct pf_statelim * pfstlim,struct pf_state_link_list * garbage)1788 pf_statelim_unlink(struct pf_statelim *pfstlim,
1789     struct pf_state_link_list *garbage)
1790 {
1791 	struct pf_state_link *pfl;
1792 
1793 
1794 	/* unwire the links */
1795 	TAILQ_FOREACH(pfl, &pfstlim->pfstlim_states, pfl_link) {
1796 		struct pf_kstate *s = pfl->pfl_state;
1797 
1798 		/* if !rmst */
1799 		PF_STATE_LOCK(s);
1800 		s->statelim = 0;
1801 		SLIST_REMOVE(&s->linkage, pfl, pf_state_link, pfl_linkage);
1802 		PF_STATE_UNLOCK(s);
1803 	}
1804 
1805 	/* take the list away */
1806 	TAILQ_CONCAT(garbage, &pfstlim->pfstlim_states, pfl_link);
1807 	pfstlim->pfstlim_inuse = 0;
1808 }
1809 
1810 void
pf_statelim_commit(void)1811 pf_statelim_commit(void)
1812 {
1813 	struct pf_statelim *pfstlim, *npfstlim, *opfstlim;
1814 	struct pf_statelim_list l = TAILQ_HEAD_INITIALIZER(l);
1815 	struct pf_state_link_list garbage = TAILQ_HEAD_INITIALIZER(garbage);
1816 	struct pf_state_link *pfl, *npfl;
1817 
1818 	PF_RULES_WASSERT();
1819 
1820 	/* merge the new statelims into the current set */
1821 
1822 	/* start with an empty active list */
1823 	TAILQ_CONCAT(&l, &V_pf_statelim_list_active, pfstlim_list);
1824 
1825 	/* beware, the inactive bits gets messed up here */
1826 
1827 	/* try putting pending statelims into the active tree */
1828 	TAILQ_FOREACH_SAFE(pfstlim, &V_pf_statelim_list_inactive, pfstlim_list,
1829 	    npfstlim) {
1830 		opfstlim = RB_INSERT(pf_statelim_id_tree,
1831 		    &V_pf_statelim_id_tree_active, pfstlim);
1832 		if (opfstlim != NULL) {
1833 			/* this statelim already exists, merge */
1834 			opfstlim->pfstlim_limit = pfstlim->pfstlim_limit;
1835 			opfstlim->pfstlim_rate.limit =
1836 			    pfstlim->pfstlim_rate.limit;
1837 			opfstlim->pfstlim_rate.seconds =
1838 			    pfstlim->pfstlim_rate.seconds;
1839 
1840 			opfstlim->pfstlim_rate_ts = pfstlim->pfstlim_rate_ts;
1841 			opfstlim->pfstlim_rate_token =
1842 			    pfstlim->pfstlim_rate_token;
1843 			opfstlim->pfstlim_rate_bucket =
1844 			    pfstlim->pfstlim_rate_bucket;
1845 
1846 			memcpy(opfstlim->pfstlim_nm, pfstlim->pfstlim_nm,
1847 			    sizeof(opfstlim->pfstlim_nm));
1848 
1849 			/* use the existing statelim instead */
1850 			free(pfstlim, M_PF_STATE_LIM);
1851 			TAILQ_REMOVE(&l, opfstlim, pfstlim_list);
1852 			pfstlim = opfstlim;
1853 		}
1854 
1855 		TAILQ_INSERT_TAIL(&V_pf_statelim_list_active, pfstlim,
1856 		    pfstlim_list);
1857 	}
1858 
1859 	/* clean up the now unused statelims from the old set */
1860 	TAILQ_FOREACH_SAFE(pfstlim, &l, pfstlim_list, npfstlim) {
1861 		pf_statelim_unlink(pfstlim, &garbage);
1862 
1863 		RB_REMOVE(pf_statelim_id_tree, &V_pf_statelim_id_tree_active,
1864 		    pfstlim);
1865 
1866 		free(pfstlim, M_PF_STATE_LIM);
1867 	}
1868 
1869 	/* fix up the inactive tree */
1870 	RB_INIT(&V_pf_statelim_id_tree_inactive);
1871 	RB_INIT(&V_pf_statelim_nm_tree_inactive);
1872 	TAILQ_INIT(&V_pf_statelim_list_inactive);
1873 
1874 	TAILQ_FOREACH_SAFE(pfl, &garbage, pfl_link, npfl)
1875 		free(pfl, M_PF_STATE_LINK);
1876 }
1877 
1878 static void
pf_sourcelim_unlink(struct pf_sourcelim * pfsrlim,struct pf_state_link_list * garbage)1879 pf_sourcelim_unlink(struct pf_sourcelim *pfsrlim,
1880     struct pf_state_link_list *garbage)
1881 {
1882 	extern struct pf_source_list pf_source_gc;
1883 	struct pf_source *pfsr;
1884 	struct pf_state_link *pfl;
1885 
1886 	PF_RULES_WASSERT();
1887 
1888 	while ((pfsr = RB_ROOT(&pfsrlim->pfsrlim_sources)) != NULL) {
1889 		RB_REMOVE(pf_source_tree, &pfsrlim->pfsrlim_sources, pfsr);
1890 		RB_REMOVE(pf_source_ioc_tree, &pfsrlim->pfsrlim_ioc_sources,
1891 		    pfsr);
1892 		if (pfsr->pfsr_inuse == 0)
1893 			TAILQ_REMOVE(&pf_source_gc, pfsr, pfsr_empty_gc);
1894 
1895 		/* unwire the links */
1896 		TAILQ_FOREACH(pfl, &pfsr->pfsr_states, pfl_link) {
1897 			struct pf_kstate *s = pfl->pfl_state;
1898 
1899 			PF_STATE_LOCK(s);
1900 			/* if !rmst */
1901 			s->sourcelim = 0;
1902 			SLIST_REMOVE(&s->linkage, pfl, pf_state_link,
1903 			    pfl_linkage);
1904 			PF_STATE_UNLOCK(s);
1905 		}
1906 
1907 		/* take the list away */
1908 		TAILQ_CONCAT(garbage, &pfsr->pfsr_states, pfl_link);
1909 
1910 		free(pfsr, M_PF_SOURCE_LIM);
1911 	}
1912 }
1913 
1914 int
pf_sourcelim_check(void)1915 pf_sourcelim_check(void)
1916 {
1917 	struct pf_sourcelim *pfsrlim, *npfsrlim;
1918 
1919 	PF_RULES_WASSERT();
1920 
1921 	/* check if we can merge */
1922 
1923 	TAILQ_FOREACH(pfsrlim, &V_pf_sourcelim_list_inactive, pfsrlim_list) {
1924 		npfsrlim = RB_FIND(pf_sourcelim_id_tree,
1925 		    &V_pf_sourcelim_id_tree_active, pfsrlim);
1926 
1927 		/* new config, no conflict */
1928 		if (npfsrlim == NULL)
1929 			continue;
1930 
1931 		/* nothing is tracked at the moment, no conflict */
1932 		if (RB_EMPTY(&npfsrlim->pfsrlim_sources))
1933 			continue;
1934 
1935 		if (strcmp(npfsrlim->pfsrlim_overload.name,
1936 		    pfsrlim->pfsrlim_overload.name) != 0)
1937 			return (EBUSY);
1938 
1939 		/*
1940 		 * we should allow the prefixlens to get shorter
1941 		 * and merge pf_source entries.
1942 		 */
1943 
1944 		if ((npfsrlim->pfsrlim_ipv4_prefix !=
1945 			pfsrlim->pfsrlim_ipv4_prefix) ||
1946 		    (npfsrlim->pfsrlim_ipv6_prefix !=
1947 			pfsrlim->pfsrlim_ipv6_prefix))
1948 			return (EBUSY);
1949 	}
1950 
1951 	return (0);
1952 }
1953 
1954 void
pf_sourcelim_commit(void)1955 pf_sourcelim_commit(void)
1956 {
1957 	struct pf_sourcelim *pfsrlim, *npfsrlim, *opfsrlim;
1958 	struct pf_sourcelim_list l = TAILQ_HEAD_INITIALIZER(l);
1959 	struct pf_state_link_list garbage = TAILQ_HEAD_INITIALIZER(garbage);
1960 	struct pf_state_link *pfl, *npfl;
1961 
1962 	PF_RULES_WASSERT();
1963 
1964 	/* merge the new sourcelims into the current set */
1965 
1966 	/* start with an empty active list */
1967 	TAILQ_CONCAT(&l, &V_pf_sourcelim_list_active, pfsrlim_list);
1968 
1969 	/* beware, the inactive bits gets messed up here */
1970 
1971 	/* try putting pending sourcelims into the active tree */
1972 	TAILQ_FOREACH_SAFE(pfsrlim, &V_pf_sourcelim_list_inactive, pfsrlim_list,
1973 	    npfsrlim) {
1974 		opfsrlim = RB_INSERT(pf_sourcelim_id_tree,
1975 		    &V_pf_sourcelim_id_tree_active, pfsrlim);
1976 		if (opfsrlim != NULL) {
1977 			/* this sourcelim already exists, merge */
1978 			opfsrlim->pfsrlim_entries = pfsrlim->pfsrlim_entries;
1979 			opfsrlim->pfsrlim_limit = pfsrlim->pfsrlim_limit;
1980 			opfsrlim->pfsrlim_ipv4_prefix =
1981 			    pfsrlim->pfsrlim_ipv4_prefix;
1982 			opfsrlim->pfsrlim_ipv6_prefix =
1983 			    pfsrlim->pfsrlim_ipv6_prefix;
1984 			opfsrlim->pfsrlim_rate.limit =
1985 			    pfsrlim->pfsrlim_rate.limit;
1986 			opfsrlim->pfsrlim_rate.seconds =
1987 			    pfsrlim->pfsrlim_rate.seconds;
1988 
1989 			opfsrlim->pfsrlim_ipv4_mask =
1990 			    pfsrlim->pfsrlim_ipv4_mask;
1991 			opfsrlim->pfsrlim_ipv6_mask =
1992 			    pfsrlim->pfsrlim_ipv6_mask;
1993 
1994 			/* keep the existing pfstlim_rate_ts */
1995 
1996 			opfsrlim->pfsrlim_rate_token =
1997 			    pfsrlim->pfsrlim_rate_token;
1998 			opfsrlim->pfsrlim_rate_bucket =
1999 			    pfsrlim->pfsrlim_rate_bucket;
2000 
2001 			if (opfsrlim->pfsrlim_overload.table != NULL) {
2002 				pfr_detach_table(
2003 				    opfsrlim->pfsrlim_overload.table);
2004 			}
2005 
2006 			strlcpy(opfsrlim->pfsrlim_overload.name,
2007 			    pfsrlim->pfsrlim_overload.name,
2008 			    sizeof(opfsrlim->pfsrlim_overload.name));
2009 			opfsrlim->pfsrlim_overload.hwm =
2010 			    pfsrlim->pfsrlim_overload.hwm;
2011 			opfsrlim->pfsrlim_overload.lwm =
2012 			    pfsrlim->pfsrlim_overload.lwm;
2013 			opfsrlim->pfsrlim_overload.table =
2014 			    pfsrlim->pfsrlim_overload.table,
2015 
2016 			memcpy(opfsrlim->pfsrlim_nm, pfsrlim->pfsrlim_nm,
2017 			    sizeof(opfsrlim->pfsrlim_nm));
2018 
2019 			/* use the existing sourcelim instead */
2020 			free(pfsrlim, M_PF_SOURCE_LIM);
2021 			TAILQ_REMOVE(&l, opfsrlim, pfsrlim_list);
2022 			pfsrlim = opfsrlim;
2023 		}
2024 
2025 		TAILQ_INSERT_TAIL(&V_pf_sourcelim_list_active, pfsrlim,
2026 		    pfsrlim_list);
2027 	}
2028 
2029 	/* clean up the now unused sourcelims from the old set */
2030 	TAILQ_FOREACH_SAFE(pfsrlim, &l, pfsrlim_list, npfsrlim) {
2031 		pf_sourcelim_unlink(pfsrlim, &garbage);
2032 
2033 		RB_REMOVE(pf_sourcelim_id_tree, &V_pf_sourcelim_id_tree_active,
2034 		    pfsrlim);
2035 
2036 		if (pfsrlim->pfsrlim_overload.table != NULL)
2037 			pfr_detach_table(pfsrlim->pfsrlim_overload.table);
2038 
2039 		free(pfsrlim, M_PF_SOURCE_LIM);
2040 	}
2041 
2042 	/* fix up the inactive tree */
2043 	RB_INIT(&V_pf_sourcelim_id_tree_inactive);
2044 	RB_INIT(&V_pf_sourcelim_nm_tree_inactive);
2045 	TAILQ_INIT(&V_pf_sourcelim_list_inactive);
2046 
2047 	TAILQ_FOREACH_SAFE(pfl, &garbage, pfl_link, npfl)
2048 		free(pfl, M_PF_STATE_LINK);
2049 }
2050 
2051 void
pf_statelim_rollback(void)2052 pf_statelim_rollback(void)
2053 {
2054 	struct pf_statelim *pfstlim, *npfstlim;
2055 
2056 	PF_RULES_WASSERT();
2057 
2058 	TAILQ_FOREACH_SAFE(pfstlim, &V_pf_statelim_list_inactive, pfstlim_list,
2059 	    npfstlim)
2060 		free(pfstlim, M_PF_STATE_LIM);
2061 
2062 	TAILQ_INIT(&V_pf_statelim_list_inactive);
2063 	RB_INIT(&V_pf_statelim_id_tree_inactive);
2064 	RB_INIT(&V_pf_statelim_nm_tree_inactive);
2065 }
2066 
2067 struct pf_statelim *
pf_statelim_rb_find(struct pf_statelim_id_tree * tree,struct pf_statelim * key)2068 pf_statelim_rb_find(struct pf_statelim_id_tree *tree, struct pf_statelim *key)
2069 {
2070 	PF_RULES_ASSERT();
2071 
2072 	return (RB_FIND(pf_statelim_id_tree, tree, key));
2073 }
2074 
2075 struct pf_statelim *
pf_statelim_rb_nfind(struct pf_statelim_id_tree * tree,struct pf_statelim * key)2076 pf_statelim_rb_nfind(struct pf_statelim_id_tree *tree, struct pf_statelim *key)
2077 {
2078 	PF_RULES_ASSERT();
2079 
2080 	return (RB_NFIND(pf_statelim_id_tree, tree, key));
2081 }
2082 
2083 int
pf_statelim_get(struct pfioc_statelim * ioc,struct pf_statelim * (* rbt_op)(struct pf_statelim_id_tree *,struct pf_statelim *))2084 pf_statelim_get(struct pfioc_statelim *ioc,
2085     struct pf_statelim *(*rbt_op)(struct pf_statelim_id_tree *,
2086     struct pf_statelim *))
2087 {
2088 	struct pf_statelim key = { .pfstlim_id = ioc->id };
2089 	struct pf_statelim *pfstlim;
2090 	int error = 0;
2091 	PF_RULES_RLOCK_TRACKER;
2092 
2093 	PF_RULES_RLOCK();
2094 
2095 	pfstlim = (*rbt_op)(&V_pf_statelim_id_tree_active, &key);
2096 	if (pfstlim == NULL) {
2097 		error = ENOENT;
2098 		goto unlock;
2099 	}
2100 
2101 	ioc->id = pfstlim->pfstlim_id;
2102 	ioc->limit = pfstlim->pfstlim_limit;
2103 	ioc->rate.limit = pfstlim->pfstlim_rate.limit;
2104 	ioc->rate.seconds = pfstlim->pfstlim_rate.seconds;
2105 	CTASSERT(sizeof(ioc->name) == sizeof(pfstlim->pfstlim_nm));
2106 	memcpy(ioc->name, pfstlim->pfstlim_nm, sizeof(ioc->name));
2107 
2108 	ioc->inuse = pfstlim->pfstlim_inuse;
2109 	ioc->admitted = pfstlim->pfstlim_counters.admitted;
2110 	ioc->hardlimited = pfstlim->pfstlim_counters.hardlimited;
2111 	ioc->ratelimited = pfstlim->pfstlim_counters.ratelimited;
2112 
2113 unlock:
2114 	PF_RULES_RUNLOCK();
2115 
2116 	return (error);
2117 }
2118 
2119 int
pf_sourcelim_add(const struct pfioc_sourcelim * ioc)2120 pf_sourcelim_add(const struct pfioc_sourcelim *ioc)
2121 {
2122 	struct pf_sourcelim	*pfsrlim;
2123 	int			 error;
2124 	size_t			 namelen, tablelen;
2125 	unsigned int		 prefix;
2126 	size_t			 i;
2127 
2128 	if (ioc->id < PF_SOURCELIM_ID_MIN ||
2129 	    ioc->id > PF_SOURCELIM_ID_MAX)
2130 		return (EINVAL);
2131 
2132 	if (ioc->entries < 1)
2133 		return (EINVAL);
2134 
2135 	if (ioc->limit < 1)
2136 		return (EINVAL);
2137 
2138 	if ((ioc->rate.limit == 0) != (ioc->rate.seconds == 0))
2139 		return (EINVAL);
2140 
2141 	if (ioc->inet_prefix > 32)
2142 		return (EINVAL);
2143 	if (ioc->inet6_prefix > 128)
2144 		return (EINVAL);
2145 
2146 	namelen = strnlen(ioc->name, sizeof(ioc->name));
2147 	/* is the name from userland nul terminated? */
2148 	if (namelen == sizeof(ioc->name))
2149 		return (EINVAL);
2150 
2151 	tablelen = strnlen(ioc->overload_tblname,
2152 	    sizeof(ioc->overload_tblname));
2153 	/* is the name from userland nul terminated? */
2154 	if (tablelen == sizeof(ioc->overload_tblname))
2155 		return (EINVAL);
2156 	if (tablelen != 0) {
2157 		if (ioc->overload_hwm == 0)
2158 			return (EINVAL);
2159 
2160 		if (ioc->overload_hwm < ioc->overload_lwm)
2161 			return (EINVAL);
2162 	}
2163 
2164 	pfsrlim = malloc(sizeof(*pfsrlim), M_PF_SOURCE_LIM, M_WAITOK | M_ZERO);
2165 	if (pfsrlim == NULL)
2166 		return (ENOMEM);
2167 
2168 	pfsrlim->pfsrlim_id = ioc->id;
2169 	pfsrlim->pfsrlim_entries = ioc->entries;
2170 	pfsrlim->pfsrlim_limit = ioc->limit;
2171 	pfsrlim->pfsrlim_ipv4_prefix = ioc->inet_prefix;
2172 	pfsrlim->pfsrlim_ipv6_prefix = ioc->inet6_prefix;
2173 	pfsrlim->pfsrlim_rate.limit = ioc->rate.limit;
2174 	pfsrlim->pfsrlim_rate.seconds = ioc->rate.seconds;
2175 	if (strlcpy(pfsrlim->pfsrlim_overload.name, ioc->overload_tblname,
2176 	    sizeof(pfsrlim->pfsrlim_overload.name)) >=
2177 	    sizeof(pfsrlim->pfsrlim_overload.name)) {
2178 		error = EINVAL;
2179 		goto free;
2180 	}
2181 	pfsrlim->pfsrlim_overload.hwm = ioc->overload_hwm;
2182 	pfsrlim->pfsrlim_overload.lwm = ioc->overload_lwm;
2183 	if (strlcpy(pfsrlim->pfsrlim_nm, ioc->name,
2184 	    sizeof(pfsrlim->pfsrlim_nm)) >= sizeof(pfsrlim->pfsrlim_nm)) {
2185 		error = EINVAL;
2186 		goto free;
2187 	}
2188 
2189 	if (pfsrlim->pfsrlim_rate.limit) {
2190 		uint64_t bucket = pfsrlim->pfsrlim_rate.seconds * 1000000000ULL;
2191 
2192 		pfsrlim->pfsrlim_rate_token = bucket /
2193 		    pfsrlim->pfsrlim_rate.limit;
2194 		pfsrlim->pfsrlim_rate_bucket = bucket;
2195 	}
2196 
2197 	pfsrlim->pfsrlim_ipv4_mask.v4.s_addr = htonl(
2198 	    0xffffffff << (32 - pfsrlim->pfsrlim_ipv4_prefix));
2199 
2200 	prefix = pfsrlim->pfsrlim_ipv6_prefix;
2201 	for (i = 0; i < nitems(pfsrlim->pfsrlim_ipv6_mask.addr32); i++) {
2202 		if (prefix == 0) {
2203 			/* the memory is already zeroed */
2204 			break;
2205 		}
2206 		if (prefix < 32) {
2207 			pfsrlim->pfsrlim_ipv6_mask.addr32[i] = htonl(
2208 			    0xffffffff << (32 - prefix));
2209 			break;
2210 		}
2211 
2212 		pfsrlim->pfsrlim_ipv6_mask.addr32[i] = htonl(0xffffffff);
2213 		prefix -= 32;
2214 	}
2215 
2216 	RB_INIT(&pfsrlim->pfsrlim_sources);
2217 	mtx_init(&pfsrlim->pfsrlim_lock, "pf source limit", NULL, MTX_DEF);
2218 
2219 	PF_RULES_WLOCK();
2220 	if (ioc->ticket != pf_main_ruleset.rules[PF_RULESET_FILTER].inactive.ticket) {
2221 		error = EBUSY;
2222 		goto unlock;
2223 	}
2224 
2225 	if (pfsrlim->pfsrlim_overload.name[0] != '\0') {
2226 		pfsrlim->pfsrlim_overload.table = pfr_attach_table(
2227 		    &pf_main_ruleset, pfsrlim->pfsrlim_overload.name);
2228 		if (pfsrlim->pfsrlim_overload.table == NULL) {
2229 			error = EINVAL;
2230 			goto unlock;
2231 		}
2232 	}
2233 
2234 	if (RB_INSERT(pf_sourcelim_id_tree, &V_pf_sourcelim_id_tree_inactive,
2235 		pfsrlim) != NULL) {
2236 		error = EBUSY;
2237 		goto unlock;
2238 	}
2239 
2240 	if (RB_INSERT(pf_sourcelim_nm_tree, &V_pf_sourcelim_nm_tree_inactive,
2241 		pfsrlim) != NULL) {
2242 		RB_INSERT(pf_sourcelim_nm_tree, &V_pf_sourcelim_nm_tree_inactive,
2243 		    pfsrlim);
2244 		error = EBUSY;
2245 		goto unlock;
2246 	}
2247 
2248 	TAILQ_INSERT_HEAD(&V_pf_sourcelim_list_inactive, pfsrlim, pfsrlim_list);
2249 
2250 	PF_RULES_WUNLOCK();
2251 
2252 	return (0);
2253 
2254 unlock:
2255 	PF_RULES_WUNLOCK();
2256 
2257 free:
2258 	free(pfsrlim, M_PF_SOURCE_LIM);
2259 
2260 	return (error);
2261 }
2262 
2263 void
pf_sourcelim_rollback(void)2264 pf_sourcelim_rollback(void)
2265 {
2266 	struct pf_sourcelim *pfsrlim, *npfsrlim;
2267 
2268 	PF_RULES_WASSERT();
2269 
2270 	TAILQ_FOREACH_SAFE(pfsrlim, &V_pf_sourcelim_list_inactive, pfsrlim_list,
2271 	    npfsrlim) {
2272 		if (pfsrlim->pfsrlim_overload.table != NULL)
2273 			pfr_detach_table(pfsrlim->pfsrlim_overload.table);
2274 
2275 		free(pfsrlim, M_PF_SOURCE_LIM);
2276 	}
2277 
2278 	TAILQ_INIT(&V_pf_sourcelim_list_inactive);
2279 	RB_INIT(&V_pf_sourcelim_id_tree_inactive);
2280 	RB_INIT(&V_pf_sourcelim_nm_tree_inactive);
2281 }
2282 
2283 struct pf_sourcelim *
pf_sourcelim_rb_find(struct pf_sourcelim_id_tree * tree,struct pf_sourcelim * key)2284 pf_sourcelim_rb_find(struct pf_sourcelim_id_tree *tree,
2285     struct pf_sourcelim *key)
2286 {
2287 	PF_RULES_ASSERT();
2288 	return (RB_FIND(pf_sourcelim_id_tree, tree, key));
2289 }
2290 
2291 struct pf_sourcelim *
pf_sourcelim_rb_nfind(struct pf_sourcelim_id_tree * tree,struct pf_sourcelim * key)2292 pf_sourcelim_rb_nfind(struct pf_sourcelim_id_tree *tree,
2293     struct pf_sourcelim *key)
2294 {
2295 	PF_RULES_ASSERT();
2296 	return (RB_NFIND(pf_sourcelim_id_tree, tree, key));
2297 }
2298 
2299 int
pf_sourcelim_get(struct pfioc_sourcelim * ioc,struct pf_sourcelim * (* rbt_op)(struct pf_sourcelim_id_tree *,struct pf_sourcelim *))2300 pf_sourcelim_get(struct pfioc_sourcelim *ioc,
2301     struct pf_sourcelim *(*rbt_op)(struct pf_sourcelim_id_tree *,
2302     struct pf_sourcelim *))
2303 {
2304 	struct pf_sourcelim key = { .pfsrlim_id = ioc->id };
2305 	struct pf_sourcelim *pfsrlim;
2306 	int error = 0;
2307 	PF_RULES_RLOCK_TRACKER;
2308 
2309 	PF_RULES_RLOCK();
2310 
2311 	pfsrlim = (*rbt_op)(&V_pf_sourcelim_id_tree_active, &key);
2312 	if (pfsrlim == NULL) {
2313 		error = ESRCH;
2314 		goto unlock;
2315 	}
2316 
2317 	ioc->id = pfsrlim->pfsrlim_id;
2318 	ioc->entries = pfsrlim->pfsrlim_entries;
2319 	ioc->limit = pfsrlim->pfsrlim_limit;
2320 	ioc->inet_prefix = pfsrlim->pfsrlim_ipv4_prefix;
2321 	ioc->inet6_prefix = pfsrlim->pfsrlim_ipv6_prefix;
2322 	ioc->rate.limit = pfsrlim->pfsrlim_rate.limit;
2323 	ioc->rate.seconds = pfsrlim->pfsrlim_rate.seconds;
2324 
2325 	CTASSERT(sizeof(ioc->overload_tblname) ==
2326 	    sizeof(pfsrlim->pfsrlim_overload.name));
2327 	memcpy(ioc->overload_tblname, pfsrlim->pfsrlim_overload.name,
2328 	    sizeof(pfsrlim->pfsrlim_overload.name));
2329 	ioc->overload_hwm = pfsrlim->pfsrlim_overload.hwm;
2330 	ioc->overload_lwm = pfsrlim->pfsrlim_overload.lwm;
2331 
2332 	CTASSERT(sizeof(ioc->name) == sizeof(pfsrlim->pfsrlim_nm));
2333 	memcpy(ioc->name, pfsrlim->pfsrlim_nm, sizeof(ioc->name));
2334 	/* XXX overload table thing */
2335 
2336 	ioc->nentries = pfsrlim->pfsrlim_nsources;
2337 
2338 	ioc->inuse = pfsrlim->pfsrlim_counters.inuse;
2339 	ioc->addrallocs = pfsrlim->pfsrlim_counters.addrallocs;
2340 	ioc->addrnomem = pfsrlim->pfsrlim_counters.addrnomem;
2341 	ioc->admitted = pfsrlim->pfsrlim_counters.admitted;
2342 	ioc->addrlimited = pfsrlim->pfsrlim_counters.addrlimited;
2343 	ioc->hardlimited = pfsrlim->pfsrlim_counters.hardlimited;
2344 	ioc->ratelimited = pfsrlim->pfsrlim_counters.ratelimited;
2345 
2346 unlock:
2347 	PF_RULES_RUNLOCK();
2348 
2349 	return (error);
2350 }
2351 
2352 struct pf_source *
pf_source_rb_find(struct pf_source_ioc_tree * tree,struct pf_source * key)2353 pf_source_rb_find(struct pf_source_ioc_tree *tree,
2354     struct pf_source *key)
2355 {
2356 	PF_RULES_ASSERT();
2357 
2358 	return (RB_FIND(pf_source_ioc_tree, tree, key));
2359 }
2360 
2361 struct pf_source *
pf_source_rb_nfind(struct pf_source_ioc_tree * tree,struct pf_source * key)2362 pf_source_rb_nfind(struct pf_source_ioc_tree *tree,
2363     struct pf_source *key)
2364 {
2365 	PF_RULES_ASSERT();
2366 
2367 	return (RB_NFIND(pf_source_ioc_tree, tree, key));
2368 }
2369 
2370 int
pf_source_clr(struct pfioc_source_kill * ioc)2371 pf_source_clr(struct pfioc_source_kill *ioc)
2372 {
2373 	extern struct pf_source_list pf_source_gc;
2374 	struct pf_sourcelim plkey = {
2375 		.pfsrlim_id = ioc->id,
2376 	};
2377 	struct pf_source skey = {
2378 		.pfsr_af = ioc->af,
2379 		.pfsr_rdomain = ioc->rdomain,
2380 		.pfsr_addr = ioc->addr,
2381 	};
2382 	struct pf_sourcelim *pfsrlim;
2383 	struct pf_source *pfsr;
2384 	struct pf_state_link *pfl, *npfl;
2385 	int error = 0;
2386 	unsigned int gen;
2387 
2388 	if (ioc->rmstates) {
2389 		/* XXX userland wants the states removed too */
2390 		return (EOPNOTSUPP);
2391 	}
2392 
2393 	PF_RULES_WLOCK();
2394 
2395 	pfsrlim = pf_sourcelim_rb_find(&V_pf_sourcelim_id_tree_active, &plkey);
2396 	if (pfsrlim == NULL) {
2397 		error = ESRCH;
2398 		goto unlock;
2399 	}
2400 
2401 	pfsr = pf_source_rb_find(&pfsrlim->pfsrlim_ioc_sources, &skey);
2402 	if (pfsr == NULL) {
2403 		error = ENOENT;
2404 		goto unlock;
2405 	}
2406 
2407 	RB_REMOVE(pf_source_tree, &pfsrlim->pfsrlim_sources, pfsr);
2408 	RB_REMOVE(pf_source_ioc_tree, &pfsrlim->pfsrlim_ioc_sources, pfsr);
2409 	if (pfsr->pfsr_inuse == 0)
2410 		TAILQ_REMOVE(&pf_source_gc, pfsr, pfsr_empty_gc);
2411 
2412 	gen = pf_sourcelim_enter(pfsrlim);
2413 	pfsrlim->pfsrlim_nsources--;
2414 	pfsrlim->pfsrlim_counters.inuse -= pfsr->pfsr_inuse;
2415 	pf_sourcelim_leave(pfsrlim, gen);
2416 
2417 	/* unwire the links */
2418 	TAILQ_FOREACH(pfl, &pfsr->pfsr_states, pfl_link) {
2419 		struct pf_kstate *st = pfl->pfl_state;
2420 
2421 		/* if !rmst */
2422 		st->sourcelim = 0;
2423 		SLIST_REMOVE(&st->linkage, pfl, pf_state_link, pfl_linkage);
2424 	}
2425 
2426 	PF_RULES_WUNLOCK();
2427 
2428 	TAILQ_FOREACH_SAFE(pfl, &pfsr->pfsr_states, pfl_link, npfl)
2429 		free(pfl, M_PF_STATE_LINK);
2430 
2431 	free(pfsr, M_PF_SOURCE_LIM);
2432 
2433 	return (0);
2434 
2435 unlock:
2436 	PF_RULES_WUNLOCK();
2437 
2438 	return (error);
2439 }
2440 
2441 static void
pf_src_node_copy(const struct pf_ksrc_node * in,struct pf_src_node * out)2442 pf_src_node_copy(const struct pf_ksrc_node *in, struct pf_src_node *out)
2443 {
2444 	int	secs = time_uptime;
2445 
2446 	bzero(out, sizeof(struct pf_src_node));
2447 
2448 	bcopy(&in->addr, &out->addr, sizeof(struct pf_addr));
2449 	bcopy(&in->raddr, &out->raddr, sizeof(struct pf_addr));
2450 
2451 	if (in->rule != NULL)
2452 		out->rule.nr = in->rule->nr;
2453 
2454 	for (int i = 0; i < 2; i++) {
2455 		out->bytes[i] = counter_u64_fetch(in->bytes[i]);
2456 		out->packets[i] = counter_u64_fetch(in->packets[i]);
2457 	}
2458 
2459 	out->states = in->states;
2460 	out->conn = in->conn;
2461 	out->af = in->af;
2462 	out->ruletype = in->ruletype;
2463 
2464 	out->creation = secs - in->creation;
2465 	if (out->expire > secs)
2466 		out->expire -= secs;
2467 	else
2468 		out->expire = 0;
2469 
2470 	/* Adjust the connection rate estimate. */
2471 	out->conn_rate.limit = in->conn_rate.limit;
2472 	out->conn_rate.seconds = in->conn_rate.seconds;
2473 	/* If there's no limit there's no counter_rate. */
2474 	if (in->conn_rate.cr != NULL)
2475 		out->conn_rate.count = counter_rate_get(in->conn_rate.cr);
2476 }
2477 
2478 #ifdef ALTQ
2479 /*
2480  * Handle export of struct pf_kaltq to user binaries that may be using any
2481  * version of struct pf_altq.
2482  */
2483 static int
pf_export_kaltq(struct pf_altq * q,struct pfioc_altq_v1 * pa,size_t ioc_size)2484 pf_export_kaltq(struct pf_altq *q, struct pfioc_altq_v1 *pa, size_t ioc_size)
2485 {
2486 	u_int32_t version;
2487 
2488 	if (ioc_size == sizeof(struct pfioc_altq_v0))
2489 		version = 0;
2490 	else
2491 		version = pa->version;
2492 
2493 	if (version > PFIOC_ALTQ_VERSION)
2494 		return (EINVAL);
2495 
2496 #define ASSIGN(x) exported_q->x = q->x
2497 #define COPY(x) \
2498 	bcopy(&q->x, &exported_q->x, min(sizeof(q->x), sizeof(exported_q->x)))
2499 #define SATU16(x) (u_int32_t)uqmin((x), USHRT_MAX)
2500 #define SATU32(x) (u_int32_t)uqmin((x), UINT_MAX)
2501 
2502 	switch (version) {
2503 	case 0: {
2504 		struct pf_altq_v0 *exported_q =
2505 		    &((struct pfioc_altq_v0 *)pa)->altq;
2506 
2507 		COPY(ifname);
2508 
2509 		ASSIGN(scheduler);
2510 		ASSIGN(tbrsize);
2511 		exported_q->tbrsize = SATU16(q->tbrsize);
2512 		exported_q->ifbandwidth = SATU32(q->ifbandwidth);
2513 
2514 		COPY(qname);
2515 		COPY(parent);
2516 		ASSIGN(parent_qid);
2517 		exported_q->bandwidth = SATU32(q->bandwidth);
2518 		ASSIGN(priority);
2519 		ASSIGN(local_flags);
2520 
2521 		ASSIGN(qlimit);
2522 		ASSIGN(flags);
2523 
2524 		if (q->scheduler == ALTQT_HFSC) {
2525 #define ASSIGN_OPT(x) exported_q->pq_u.hfsc_opts.x = q->pq_u.hfsc_opts.x
2526 #define ASSIGN_OPT_SATU32(x) exported_q->pq_u.hfsc_opts.x = \
2527 			    SATU32(q->pq_u.hfsc_opts.x)
2528 
2529 			ASSIGN_OPT_SATU32(rtsc_m1);
2530 			ASSIGN_OPT(rtsc_d);
2531 			ASSIGN_OPT_SATU32(rtsc_m2);
2532 
2533 			ASSIGN_OPT_SATU32(lssc_m1);
2534 			ASSIGN_OPT(lssc_d);
2535 			ASSIGN_OPT_SATU32(lssc_m2);
2536 
2537 			ASSIGN_OPT_SATU32(ulsc_m1);
2538 			ASSIGN_OPT(ulsc_d);
2539 			ASSIGN_OPT_SATU32(ulsc_m2);
2540 
2541 			ASSIGN_OPT(flags);
2542 
2543 #undef ASSIGN_OPT
2544 #undef ASSIGN_OPT_SATU32
2545 		} else
2546 			COPY(pq_u);
2547 
2548 		ASSIGN(qid);
2549 		break;
2550 	}
2551 	case 1:	{
2552 		struct pf_altq_v1 *exported_q =
2553 		    &((struct pfioc_altq_v1 *)pa)->altq;
2554 
2555 		COPY(ifname);
2556 
2557 		ASSIGN(scheduler);
2558 		ASSIGN(tbrsize);
2559 		ASSIGN(ifbandwidth);
2560 
2561 		COPY(qname);
2562 		COPY(parent);
2563 		ASSIGN(parent_qid);
2564 		ASSIGN(bandwidth);
2565 		ASSIGN(priority);
2566 		ASSIGN(local_flags);
2567 
2568 		ASSIGN(qlimit);
2569 		ASSIGN(flags);
2570 		COPY(pq_u);
2571 
2572 		ASSIGN(qid);
2573 		break;
2574 	}
2575 	default:
2576 		panic("%s: unhandled struct pfioc_altq version", __func__);
2577 		break;
2578 	}
2579 
2580 #undef ASSIGN
2581 #undef COPY
2582 #undef SATU16
2583 #undef SATU32
2584 
2585 	return (0);
2586 }
2587 
2588 /*
2589  * Handle import to struct pf_kaltq of struct pf_altq from user binaries
2590  * that may be using any version of it.
2591  */
2592 static int
pf_import_kaltq(struct pfioc_altq_v1 * pa,struct pf_altq * q,size_t ioc_size)2593 pf_import_kaltq(struct pfioc_altq_v1 *pa, struct pf_altq *q, size_t ioc_size)
2594 {
2595 	u_int32_t version;
2596 
2597 	if (ioc_size == sizeof(struct pfioc_altq_v0))
2598 		version = 0;
2599 	else
2600 		version = pa->version;
2601 
2602 	if (version > PFIOC_ALTQ_VERSION)
2603 		return (EINVAL);
2604 
2605 #define ASSIGN(x) q->x = imported_q->x
2606 #define COPY(x) \
2607 	bcopy(&imported_q->x, &q->x, min(sizeof(imported_q->x), sizeof(q->x)))
2608 
2609 	switch (version) {
2610 	case 0: {
2611 		struct pf_altq_v0 *imported_q =
2612 		    &((struct pfioc_altq_v0 *)pa)->altq;
2613 
2614 		COPY(ifname);
2615 
2616 		ASSIGN(scheduler);
2617 		ASSIGN(tbrsize); /* 16-bit -> 32-bit */
2618 		ASSIGN(ifbandwidth); /* 32-bit -> 64-bit */
2619 
2620 		COPY(qname);
2621 		COPY(parent);
2622 		ASSIGN(parent_qid);
2623 		ASSIGN(bandwidth); /* 32-bit -> 64-bit */
2624 		ASSIGN(priority);
2625 		ASSIGN(local_flags);
2626 
2627 		ASSIGN(qlimit);
2628 		ASSIGN(flags);
2629 
2630 		if (imported_q->scheduler == ALTQT_HFSC) {
2631 #define ASSIGN_OPT(x) q->pq_u.hfsc_opts.x = imported_q->pq_u.hfsc_opts.x
2632 
2633 			/*
2634 			 * The m1 and m2 parameters are being copied from
2635 			 * 32-bit to 64-bit.
2636 			 */
2637 			ASSIGN_OPT(rtsc_m1);
2638 			ASSIGN_OPT(rtsc_d);
2639 			ASSIGN_OPT(rtsc_m2);
2640 
2641 			ASSIGN_OPT(lssc_m1);
2642 			ASSIGN_OPT(lssc_d);
2643 			ASSIGN_OPT(lssc_m2);
2644 
2645 			ASSIGN_OPT(ulsc_m1);
2646 			ASSIGN_OPT(ulsc_d);
2647 			ASSIGN_OPT(ulsc_m2);
2648 
2649 			ASSIGN_OPT(flags);
2650 
2651 #undef ASSIGN_OPT
2652 		} else
2653 			COPY(pq_u);
2654 
2655 		ASSIGN(qid);
2656 		break;
2657 	}
2658 	case 1: {
2659 		struct pf_altq_v1 *imported_q =
2660 		    &((struct pfioc_altq_v1 *)pa)->altq;
2661 
2662 		COPY(ifname);
2663 
2664 		ASSIGN(scheduler);
2665 		ASSIGN(tbrsize);
2666 		ASSIGN(ifbandwidth);
2667 
2668 		COPY(qname);
2669 		COPY(parent);
2670 		ASSIGN(parent_qid);
2671 		ASSIGN(bandwidth);
2672 		ASSIGN(priority);
2673 		ASSIGN(local_flags);
2674 
2675 		ASSIGN(qlimit);
2676 		ASSIGN(flags);
2677 		COPY(pq_u);
2678 
2679 		ASSIGN(qid);
2680 		break;
2681 	}
2682 	default:
2683 		panic("%s: unhandled struct pfioc_altq version", __func__);
2684 		break;
2685 	}
2686 
2687 #undef ASSIGN
2688 #undef COPY
2689 
2690 	return (0);
2691 }
2692 
2693 static struct pf_altq *
pf_altq_get_nth_active(u_int32_t n)2694 pf_altq_get_nth_active(u_int32_t n)
2695 {
2696 	struct pf_altq		*altq;
2697 	u_int32_t		 nr;
2698 
2699 	nr = 0;
2700 	TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
2701 		if (nr == n)
2702 			return (altq);
2703 		nr++;
2704 	}
2705 
2706 	TAILQ_FOREACH(altq, V_pf_altqs_active, entries) {
2707 		if (nr == n)
2708 			return (altq);
2709 		nr++;
2710 	}
2711 
2712 	return (NULL);
2713 }
2714 #endif /* ALTQ */
2715 
2716 struct pf_krule *
pf_krule_alloc(void)2717 pf_krule_alloc(void)
2718 {
2719 	struct pf_krule *rule;
2720 
2721 	rule = malloc(sizeof(struct pf_krule), M_PFRULE, M_WAITOK | M_ZERO);
2722 	mtx_init(&rule->nat.mtx, "pf_krule_nat_pool", NULL, MTX_DEF);
2723 	mtx_init(&rule->rdr.mtx, "pf_krule_rdr_pool", NULL, MTX_DEF);
2724 	mtx_init(&rule->route.mtx, "pf_krule_route_pool", NULL, MTX_DEF);
2725 	rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
2726 	    M_WAITOK | M_ZERO);
2727 	return (rule);
2728 }
2729 
2730 void
pf_krule_free(struct pf_krule * rule)2731 pf_krule_free(struct pf_krule *rule)
2732 {
2733 #ifdef PF_WANT_32_TO_64_COUNTER
2734 	bool wowned;
2735 #endif
2736 
2737 	if (rule == NULL)
2738 		return;
2739 
2740 #ifdef PF_WANT_32_TO_64_COUNTER
2741 	if (rule->allrulelinked) {
2742 		wowned = PF_RULES_WOWNED();
2743 		if (!wowned)
2744 			PF_RULES_WLOCK();
2745 		LIST_REMOVE(rule, allrulelist);
2746 		V_pf_allrulecount--;
2747 		if (!wowned)
2748 			PF_RULES_WUNLOCK();
2749 	}
2750 #endif
2751 
2752 	pf_counter_u64_deinit(&rule->evaluations);
2753 	for (int i = 0; i < 2; i++) {
2754 		pf_counter_u64_deinit(&rule->packets[i]);
2755 		pf_counter_u64_deinit(&rule->bytes[i]);
2756 	}
2757 	counter_u64_free(rule->states_cur);
2758 	counter_u64_free(rule->states_tot);
2759 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
2760 		counter_u64_free(rule->src_nodes[sn_type]);
2761 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, rule->timestamp);
2762 
2763 	mtx_destroy(&rule->nat.mtx);
2764 	mtx_destroy(&rule->rdr.mtx);
2765 	mtx_destroy(&rule->route.mtx);
2766 	free(rule, M_PFRULE);
2767 }
2768 
2769 void
pf_krule_clear_counters(struct pf_krule * rule)2770 pf_krule_clear_counters(struct pf_krule *rule)
2771 {
2772 	pf_counter_u64_zero(&rule->evaluations);
2773 	for (int i = 0; i < 2; i++) {
2774 		pf_counter_u64_zero(&rule->packets[i]);
2775 		pf_counter_u64_zero(&rule->bytes[i]);
2776 	}
2777 	counter_u64_zero(rule->states_tot);
2778 }
2779 
2780 static void
pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr * kpool,struct pf_pooladdr * pool)2781 pf_kpooladdr_to_pooladdr(const struct pf_kpooladdr *kpool,
2782     struct pf_pooladdr *pool)
2783 {
2784 
2785 	bzero(pool, sizeof(*pool));
2786 	bcopy(&kpool->addr, &pool->addr, sizeof(pool->addr));
2787 	strlcpy(pool->ifname, kpool->ifname, sizeof(pool->ifname));
2788 }
2789 
2790 static int
pf_pooladdr_to_kpooladdr(const struct pf_pooladdr * pool,struct pf_kpooladdr * kpool)2791 pf_pooladdr_to_kpooladdr(const struct pf_pooladdr *pool,
2792     struct pf_kpooladdr *kpool)
2793 {
2794 	int ret;
2795 
2796 	bzero(kpool, sizeof(*kpool));
2797 	bcopy(&pool->addr, &kpool->addr, sizeof(kpool->addr));
2798 	ret = pf_user_strcpy(kpool->ifname, pool->ifname,
2799 	    sizeof(kpool->ifname));
2800 	return (ret);
2801 }
2802 
2803 static void
pf_pool_to_kpool(const struct pf_pool * pool,struct pf_kpool * kpool)2804 pf_pool_to_kpool(const struct pf_pool *pool, struct pf_kpool *kpool)
2805 {
2806 	_Static_assert(sizeof(pool->key) == sizeof(kpool->key), "");
2807 	_Static_assert(sizeof(pool->counter) == sizeof(kpool->counter), "");
2808 
2809 	bcopy(&pool->key, &kpool->key, sizeof(kpool->key));
2810 	bcopy(&pool->counter, &kpool->counter, sizeof(kpool->counter));
2811 
2812 	kpool->tblidx = pool->tblidx;
2813 	kpool->proxy_port[0] = pool->proxy_port[0];
2814 	kpool->proxy_port[1] = pool->proxy_port[1];
2815 	kpool->opts = pool->opts;
2816 }
2817 
2818 static int
pf_rule_to_krule(const struct pf_rule * rule,struct pf_krule * krule)2819 pf_rule_to_krule(const struct pf_rule *rule, struct pf_krule *krule)
2820 {
2821 	int ret;
2822 
2823 #ifndef INET
2824 	if (rule->af == AF_INET) {
2825 		return (EAFNOSUPPORT);
2826 	}
2827 #endif /* INET */
2828 #ifndef INET6
2829 	if (rule->af == AF_INET6) {
2830 		return (EAFNOSUPPORT);
2831 	}
2832 #endif /* INET6 */
2833 
2834 	ret = pf_check_rule_addr(&rule->src);
2835 	if (ret != 0)
2836 		return (ret);
2837 	ret = pf_check_rule_addr(&rule->dst);
2838 	if (ret != 0)
2839 		return (ret);
2840 
2841 	bcopy(&rule->src, &krule->src, sizeof(rule->src));
2842 	bcopy(&rule->dst, &krule->dst, sizeof(rule->dst));
2843 
2844 	ret = pf_user_strcpy(krule->label[0], rule->label, sizeof(rule->label));
2845 	if (ret != 0)
2846 		return (ret);
2847 	ret = pf_user_strcpy(krule->ifname, rule->ifname, sizeof(rule->ifname));
2848 	if (ret != 0)
2849 		return (ret);
2850 	ret = pf_user_strcpy(krule->qname, rule->qname, sizeof(rule->qname));
2851 	if (ret != 0)
2852 		return (ret);
2853 	ret = pf_user_strcpy(krule->pqname, rule->pqname, sizeof(rule->pqname));
2854 	if (ret != 0)
2855 		return (ret);
2856 	ret = pf_user_strcpy(krule->tagname, rule->tagname,
2857 	    sizeof(rule->tagname));
2858 	if (ret != 0)
2859 		return (ret);
2860 	ret = pf_user_strcpy(krule->match_tagname, rule->match_tagname,
2861 	    sizeof(rule->match_tagname));
2862 	if (ret != 0)
2863 		return (ret);
2864 	ret = pf_user_strcpy(krule->overload_tblname, rule->overload_tblname,
2865 	    sizeof(rule->overload_tblname));
2866 	if (ret != 0)
2867 		return (ret);
2868 
2869 	pf_pool_to_kpool(&rule->rpool, &krule->rdr);
2870 
2871 	/* Don't allow userspace to set evaluations, packets or bytes. */
2872 	/* kif, anchor, overload_tbl are not copied over. */
2873 
2874 	krule->os_fingerprint = rule->os_fingerprint;
2875 
2876 	krule->rtableid = rule->rtableid;
2877 	/* pf_rule->timeout is smaller than pf_krule->timeout */
2878 	bcopy(rule->timeout, krule->timeout, sizeof(rule->timeout));
2879 	krule->max_states = rule->max_states;
2880 	krule->max_src_nodes = rule->max_src_nodes;
2881 	krule->max_src_states = rule->max_src_states;
2882 	krule->max_src_conn = rule->max_src_conn;
2883 	krule->max_src_conn_rate.limit = rule->max_src_conn_rate.limit;
2884 	krule->max_src_conn_rate.seconds = rule->max_src_conn_rate.seconds;
2885 	krule->qid = rule->qid;
2886 	krule->pqid = rule->pqid;
2887 	krule->nr = rule->nr;
2888 	krule->prob = rule->prob;
2889 	krule->cuid = rule->cuid;
2890 	krule->cpid = rule->cpid;
2891 
2892 	krule->return_icmp = rule->return_icmp;
2893 	krule->return_icmp6 = rule->return_icmp6;
2894 	krule->max_mss = rule->max_mss;
2895 	krule->tag = rule->tag;
2896 	krule->match_tag = rule->match_tag;
2897 	krule->scrub_flags = rule->scrub_flags;
2898 
2899 	bcopy(&rule->uid, &krule->uid, sizeof(krule->uid));
2900 	bcopy(&rule->gid, &krule->gid, sizeof(krule->gid));
2901 
2902 	krule->rule_flag = rule->rule_flag;
2903 	krule->action = rule->action;
2904 	krule->direction = rule->direction;
2905 	krule->log = rule->log;
2906 	krule->logif = rule->logif;
2907 	krule->quick = rule->quick;
2908 	krule->ifnot = rule->ifnot;
2909 	krule->match_tag_not = rule->match_tag_not;
2910 	krule->natpass = rule->natpass;
2911 
2912 	krule->keep_state = rule->keep_state;
2913 	krule->af = rule->af;
2914 	krule->proto = rule->proto;
2915 	krule->type = rule->type;
2916 	krule->code = rule->code;
2917 	krule->flags = rule->flags;
2918 	krule->flagset = rule->flagset;
2919 	krule->min_ttl = rule->min_ttl;
2920 	krule->allow_opts = rule->allow_opts;
2921 	krule->rt = rule->rt;
2922 	krule->return_ttl = rule->return_ttl;
2923 	krule->tos = rule->tos;
2924 	krule->set_tos = rule->set_tos;
2925 
2926 	krule->flush = rule->flush;
2927 	krule->prio = rule->prio;
2928 	krule->set_prio[0] = rule->set_prio[0];
2929 	krule->set_prio[1] = rule->set_prio[1];
2930 
2931 	bcopy(&rule->divert, &krule->divert, sizeof(krule->divert));
2932 
2933 	return (0);
2934 }
2935 
2936 int
pf_ioctl_getrules(struct pfioc_rule * pr)2937 pf_ioctl_getrules(struct pfioc_rule *pr)
2938 {
2939 	PF_RULES_RLOCK_TRACKER;
2940 	struct pf_kruleset	*ruleset;
2941 	struct pf_krule		*tail;
2942 	int			 rs_num;
2943 
2944 	PF_RULES_RLOCK();
2945 	ruleset = pf_find_kruleset(pr->anchor);
2946 	if (ruleset == NULL) {
2947 		PF_RULES_RUNLOCK();
2948 		return (EINVAL);
2949 	}
2950 	rs_num = pf_get_ruleset_number(pr->rule.action);
2951 	if (rs_num >= PF_RULESET_MAX) {
2952 		PF_RULES_RUNLOCK();
2953 		return (EINVAL);
2954 	}
2955 	tail = TAILQ_LAST(ruleset->rules[rs_num].active.ptr,
2956 	    pf_krulequeue);
2957 	if (tail)
2958 		pr->nr = tail->nr + 1;
2959 	else
2960 		pr->nr = 0;
2961 	pr->ticket = ruleset->rules[rs_num].active.ticket;
2962 	PF_RULES_RUNLOCK();
2963 
2964 	return (0);
2965 }
2966 
2967 static int
pf_rule_checkaf(struct pf_krule * r)2968 pf_rule_checkaf(struct pf_krule *r)
2969 {
2970 	switch (r->af) {
2971 	case 0:
2972 		if (r->rule_flag & PFRULE_AFTO)
2973 			return (EPFNOSUPPORT);
2974 		break;
2975 	case AF_INET:
2976 		if ((r->rule_flag & PFRULE_AFTO) && r->naf != AF_INET6)
2977 			return (EPFNOSUPPORT);
2978 		break;
2979 #ifdef INET6
2980 	case AF_INET6:
2981 		if ((r->rule_flag & PFRULE_AFTO) && r->naf != AF_INET)
2982 			return (EPFNOSUPPORT);
2983 		break;
2984 #endif /* INET6 */
2985 	default:
2986 		return (EPFNOSUPPORT);
2987 	}
2988 
2989 	if ((r->rule_flag & PFRULE_AFTO) == 0 && r->naf != 0)
2990 		return (EPFNOSUPPORT);
2991 
2992 	return (0);
2993 }
2994 
2995 static int
pf_validate_range(uint8_t op,uint16_t port[2])2996 pf_validate_range(uint8_t op, uint16_t port[2])
2997 {
2998 	uint16_t a = ntohs(port[0]);
2999 	uint16_t b = ntohs(port[1]);
3000 
3001 	if ((op == PF_OP_RRG && a > b) ||  /* 34:12,  i.e. none */
3002 	    (op == PF_OP_IRG && a >= b) || /* 34><12, i.e. none */
3003 	    (op == PF_OP_XRG && a > b))	   /* 34<>22, i.e. all */
3004 		return 1;
3005 	return 0;
3006 }
3007 
3008 static int
pf_chk_limiter_action(int limiter_action)3009 pf_chk_limiter_action(int limiter_action)
3010 {
3011 	int rv;
3012 
3013 	switch (limiter_action) {
3014 	case PF_LIMITER_NOMATCH:
3015 	case PF_LIMITER_BLOCK:
3016 		rv = 0;
3017 		break;
3018 	default:
3019 		rv = 1;
3020 	}
3021 
3022 	return (rv);
3023 }
3024 
3025 int
pf_ioctl_addrule(struct pf_krule * rule,uint32_t ticket,uint32_t pool_ticket,const char * anchor,const char * anchor_call,uid_t uid,pid_t pid)3026 pf_ioctl_addrule(struct pf_krule *rule, uint32_t ticket,
3027     uint32_t pool_ticket, const char *anchor, const char *anchor_call,
3028     uid_t uid, pid_t pid)
3029 {
3030 	struct pf_kruleset	*ruleset;
3031 	struct pf_krule		*tail;
3032 	struct pf_kpooladdr	*pa;
3033 	struct pfi_kkif		*kif = NULL, *rcv_kif = NULL;
3034 	int			 rs_num;
3035 	int			 error = 0;
3036 
3037 #define	ERROUT(x)		ERROUT_FUNCTION(errout, x)
3038 #define	ERROUT_UNLOCKED(x)	ERROUT_FUNCTION(errout_unlocked, x)
3039 
3040 	if ((rule->return_icmp >> 8) > ICMP_MAXTYPE)
3041 		ERROUT_UNLOCKED(EINVAL);
3042 
3043 	if ((error = pf_rule_checkaf(rule)))
3044 		ERROUT_UNLOCKED(error);
3045 	if (pf_validate_range(rule->src.port_op, rule->src.port))
3046 		ERROUT_UNLOCKED(EINVAL);
3047 	if (pf_validate_range(rule->dst.port_op, rule->dst.port))
3048 		ERROUT_UNLOCKED(EINVAL);
3049 	if (pf_chk_limiter_action(rule->statelim.limiter_action) ||
3050 	    pf_chk_limiter_action(rule->sourcelim.limiter_action))
3051 		ERROUT_UNLOCKED(EINVAL);
3052 
3053 	if (rule->ifname[0])
3054 		kif = pf_kkif_create(M_WAITOK);
3055 	if (rule->rcv_ifname[0])
3056 		rcv_kif = pf_kkif_create(M_WAITOK);
3057 	pf_counter_u64_init(&rule->evaluations, M_WAITOK);
3058 	for (int i = 0; i < 2; i++) {
3059 		pf_counter_u64_init(&rule->packets[i], M_WAITOK);
3060 		pf_counter_u64_init(&rule->bytes[i], M_WAITOK);
3061 	}
3062 	rule->states_cur = counter_u64_alloc(M_WAITOK);
3063 	rule->states_tot = counter_u64_alloc(M_WAITOK);
3064 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
3065 		rule->src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
3066 	rule->cuid = uid;
3067 	rule->cpid = pid;
3068 	TAILQ_INIT(&rule->rdr.list);
3069 	TAILQ_INIT(&rule->nat.list);
3070 	TAILQ_INIT(&rule->route.list);
3071 
3072 	PF_CONFIG_LOCK();
3073 	PF_RULES_WLOCK();
3074 #ifdef PF_WANT_32_TO_64_COUNTER
3075 	LIST_INSERT_HEAD(&V_pf_allrulelist, rule, allrulelist);
3076 	MPASS(!rule->allrulelinked);
3077 	rule->allrulelinked = true;
3078 	V_pf_allrulecount++;
3079 #endif
3080 	ruleset = pf_find_kruleset(anchor);
3081 	if (ruleset == NULL)
3082 		ERROUT(EINVAL);
3083 	rs_num = pf_get_ruleset_number(rule->action);
3084 	if (rs_num >= PF_RULESET_MAX)
3085 		ERROUT(EINVAL);
3086 	if (ticket != ruleset->rules[rs_num].inactive.ticket) {
3087 		DPFPRINTF(PF_DEBUG_MISC,
3088 		    "ticket: %d != [%d]%d", ticket, rs_num,
3089 		    ruleset->rules[rs_num].inactive.ticket);
3090 		ERROUT(EBUSY);
3091 	}
3092 	if (pool_ticket != V_ticket_pabuf) {
3093 		DPFPRINTF(PF_DEBUG_MISC,
3094 		    "pool_ticket: %d != %d", pool_ticket,
3095 		    V_ticket_pabuf);
3096 		ERROUT(EBUSY);
3097 	}
3098 	/*
3099 	 * XXXMJG hack: there is no mechanism to ensure they started the
3100 	 * transaction. Ticket checked above may happen to match by accident,
3101 	 * even if nobody called DIOCXBEGIN, let alone this process.
3102 	 * Partially work around it by checking if the RB tree got allocated,
3103 	 * see pf_begin_rules.
3104 	 */
3105 	if (ruleset->rules[rs_num].inactive.tree == NULL) {
3106 		ERROUT(EINVAL);
3107 	}
3108 
3109 	tail = TAILQ_LAST(ruleset->rules[rs_num].inactive.ptr,
3110 	    pf_krulequeue);
3111 	if (tail)
3112 		rule->nr = tail->nr + 1;
3113 	else
3114 		rule->nr = 0;
3115 	if (rule->ifname[0]) {
3116 		rule->kif = pfi_kkif_attach(kif, rule->ifname);
3117 		kif = NULL;
3118 		pfi_kkif_ref(rule->kif);
3119 	} else
3120 		rule->kif = NULL;
3121 
3122 	if (rule->rcv_ifname[0]) {
3123 		rule->rcv_kif = pfi_kkif_attach(rcv_kif, rule->rcv_ifname);
3124 		rcv_kif = NULL;
3125 		pfi_kkif_ref(rule->rcv_kif);
3126 	} else
3127 		rule->rcv_kif = NULL;
3128 
3129 	if (rule->rtableid > 0 && rule->rtableid >= rt_numfibs)
3130 		ERROUT(EBUSY);
3131 #ifdef ALTQ
3132 	/* set queue IDs */
3133 	if (rule->qname[0] != 0) {
3134 		if ((rule->qid = pf_qname2qid(rule->qname, true)) == 0)
3135 			ERROUT(EBUSY);
3136 		else if (rule->pqname[0] != 0) {
3137 			if ((rule->pqid =
3138 			    pf_qname2qid(rule->pqname, true)) == 0)
3139 				ERROUT(EBUSY);
3140 		} else
3141 			rule->pqid = rule->qid;
3142 	}
3143 #endif
3144 	if (rule->tagname[0])
3145 		if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
3146 			ERROUT(EBUSY);
3147 	if (rule->match_tagname[0])
3148 		if ((rule->match_tag =
3149 		    pf_tagname2tag(rule->match_tagname)) == 0)
3150 			ERROUT(EBUSY);
3151 	if (rule->rt && !rule->direction)
3152 		ERROUT(EINVAL);
3153 	if (!rule->log)
3154 		rule->logif = 0;
3155 	if (! pf_init_threshold(&rule->pktrate, rule->pktrate.limit,
3156 	   rule->pktrate.seconds))
3157 		ERROUT(ENOMEM);
3158 	if (pf_addr_setup(ruleset, &rule->src.addr, rule->af))
3159 		ERROUT(ENOMEM);
3160 	if (pf_addr_setup(ruleset, &rule->dst.addr, rule->af))
3161 		ERROUT(ENOMEM);
3162 	if (pf_kanchor_setup(rule, ruleset, anchor_call))
3163 		ERROUT(EINVAL);
3164 	if (rule->scrub_flags & PFSTATE_SETPRIO &&
3165 	    (rule->set_prio[0] > PF_PRIO_MAX ||
3166 	    rule->set_prio[1] > PF_PRIO_MAX))
3167 		ERROUT(EINVAL);
3168 	for (int i = 0; i < 3; i++) {
3169 		TAILQ_FOREACH(pa, &V_pf_pabuf[i], entries)
3170 			if (pa->addr.type == PF_ADDR_TABLE) {
3171 				pa->addr.p.tbl = pfr_attach_table(ruleset,
3172 				    pa->addr.v.tblname);
3173 				if (pa->addr.p.tbl == NULL)
3174 					ERROUT(ENOMEM);
3175 			}
3176 	}
3177 
3178 	rule->overload_tbl = NULL;
3179 	if (rule->overload_tblname[0]) {
3180 		if ((rule->overload_tbl = pfr_attach_table(ruleset,
3181 		    rule->overload_tblname)) == NULL)
3182 			ERROUT(EINVAL);
3183 		else
3184 			rule->overload_tbl->pfrkt_flags |=
3185 			    PFR_TFLAG_ACTIVE;
3186 	}
3187 
3188 	pf_mv_kpool(&V_pf_pabuf[0], &rule->nat.list);
3189 
3190 	/*
3191 	 * Old version of pfctl provide route redirection pools in single
3192 	 * common redirection pool rdr. New versions use rdr only for
3193 	 * rdr-to rules.
3194 	 */
3195 	if (rule->rt > PF_NOPFROUTE && TAILQ_EMPTY(&V_pf_pabuf[2])) {
3196 		pf_mv_kpool(&V_pf_pabuf[1], &rule->route.list);
3197 	} else {
3198 		pf_mv_kpool(&V_pf_pabuf[1], &rule->rdr.list);
3199 		pf_mv_kpool(&V_pf_pabuf[2], &rule->route.list);
3200 	}
3201 
3202 	if (((rule->action == PF_NAT) || (rule->action == PF_RDR) ||
3203 	    (rule->action == PF_BINAT))	&& rule->anchor == NULL &&
3204 	    TAILQ_FIRST(&rule->rdr.list) == NULL) {
3205 		ERROUT(EINVAL);
3206 	}
3207 
3208 	if (rule->rt > PF_NOPFROUTE && (TAILQ_FIRST(&rule->route.list) == NULL)) {
3209 		ERROUT(EINVAL);
3210 	}
3211 
3212 	if (rule->action == PF_PASS && (rule->rdr.opts & PF_POOL_STICKYADDR ||
3213 	    rule->nat.opts & PF_POOL_STICKYADDR) && !rule->keep_state) {
3214 		ERROUT(EINVAL);
3215 	}
3216 
3217 	MPASS(error == 0);
3218 
3219 	rule->nat.cur = TAILQ_FIRST(&rule->nat.list);
3220 	rule->rdr.cur = TAILQ_FIRST(&rule->rdr.list);
3221 	rule->route.cur = TAILQ_FIRST(&rule->route.list);
3222 	rule->route.ipv6_nexthop_af = AF_INET6;
3223 	TAILQ_INSERT_TAIL(ruleset->rules[rs_num].inactive.ptr,
3224 	    rule, entries);
3225 	ruleset->rules[rs_num].inactive.rcount++;
3226 
3227 	PF_RULES_WUNLOCK();
3228 	pf_hash_rule(rule);
3229 	if (RB_INSERT(pf_krule_global, ruleset->rules[rs_num].inactive.tree, rule) != NULL) {
3230 		PF_RULES_WLOCK();
3231 		TAILQ_REMOVE(ruleset->rules[rs_num].inactive.ptr, rule, entries);
3232 		ruleset->rules[rs_num].inactive.rcount--;
3233 		pf_free_rule(rule);
3234 		rule = NULL;
3235 		ERROUT(EEXIST);
3236 	}
3237 	PF_CONFIG_UNLOCK();
3238 
3239 	return (0);
3240 
3241 #undef ERROUT
3242 #undef ERROUT_UNLOCKED
3243 errout:
3244 	PF_RULES_WUNLOCK();
3245 	PF_CONFIG_UNLOCK();
3246 errout_unlocked:
3247 	pf_kkif_free(rcv_kif);
3248 	pf_kkif_free(kif);
3249 	pf_krule_free(rule);
3250 	return (error);
3251 }
3252 
3253 static bool
pf_label_match(const struct pf_krule * rule,const char * label)3254 pf_label_match(const struct pf_krule *rule, const char *label)
3255 {
3256 	int i = 0;
3257 
3258 	while (*rule->label[i]) {
3259 		if (strcmp(rule->label[i], label) == 0)
3260 			return (true);
3261 		i++;
3262 	}
3263 
3264 	return (false);
3265 }
3266 
3267 static unsigned int
pf_kill_matching_state(struct pf_state_key_cmp * key,int dir)3268 pf_kill_matching_state(struct pf_state_key_cmp *key, int dir)
3269 {
3270 	struct pf_kstate *s;
3271 	int more = 0;
3272 
3273 	s = pf_find_state_all(key, dir, &more);
3274 	if (s == NULL)
3275 		return (0);
3276 
3277 	if (more) {
3278 		PF_STATE_UNLOCK(s);
3279 		return (0);
3280 	}
3281 
3282 	pf_remove_state(s);
3283 	return (1);
3284 }
3285 
3286 static int
pf_killstates_row(struct pf_kstate_kill * psk,struct pf_idhash * ih)3287 pf_killstates_row(struct pf_kstate_kill *psk, struct pf_idhash *ih)
3288 {
3289 	struct pf_kstate	*s;
3290 	struct pf_state_key	*sk;
3291 	struct pf_addr		*srcaddr, *dstaddr;
3292 	struct pf_state_key_cmp	 match_key;
3293 	int			 idx, killed = 0;
3294 	unsigned int		 dir;
3295 	u_int16_t		 srcport, dstport;
3296 	struct pfi_kkif		*kif;
3297 
3298 relock_DIOCKILLSTATES:
3299 	PF_HASHROW_LOCK(ih);
3300 	LIST_FOREACH(s, &ih->states, entry) {
3301 		/* For floating states look at the original kif. */
3302 		kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
3303 
3304 		sk = s->key[psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE];
3305 		if (s->direction == PF_OUT) {
3306 			srcaddr = &sk->addr[1];
3307 			dstaddr = &sk->addr[0];
3308 			srcport = sk->port[1];
3309 			dstport = sk->port[0];
3310 		} else {
3311 			srcaddr = &sk->addr[0];
3312 			dstaddr = &sk->addr[1];
3313 			srcport = sk->port[0];
3314 			dstport = sk->port[1];
3315 		}
3316 
3317 		if (psk->psk_af && sk->af != psk->psk_af)
3318 			continue;
3319 
3320 		if (psk->psk_proto && psk->psk_proto != sk->proto)
3321 			continue;
3322 
3323 		if (! pf_match_addr(psk->psk_src.neg,
3324 		    &psk->psk_src.addr.v.a.addr,
3325 		    &psk->psk_src.addr.v.a.mask, srcaddr, sk->af))
3326 			continue;
3327 
3328 		if (! pf_match_addr(psk->psk_dst.neg,
3329 		    &psk->psk_dst.addr.v.a.addr,
3330 		    &psk->psk_dst.addr.v.a.mask, dstaddr, sk->af))
3331 			continue;
3332 
3333 		if (!  pf_match_addr(psk->psk_rt_addr.neg,
3334 		    &psk->psk_rt_addr.addr.v.a.addr,
3335 		    &psk->psk_rt_addr.addr.v.a.mask,
3336 		    &s->act.rt_addr, sk->af))
3337 			continue;
3338 
3339 		if (psk->psk_src.port_op != 0 &&
3340 		    ! pf_match_port(psk->psk_src.port_op,
3341 		    psk->psk_src.port[0], psk->psk_src.port[1], srcport))
3342 			continue;
3343 
3344 		if (psk->psk_dst.port_op != 0 &&
3345 		    ! pf_match_port(psk->psk_dst.port_op,
3346 		    psk->psk_dst.port[0], psk->psk_dst.port[1], dstport))
3347 			continue;
3348 
3349 		if (psk->psk_label[0] &&
3350 		    ! pf_label_match(s->rule, psk->psk_label))
3351 			continue;
3352 
3353 		if (psk->psk_ifname[0] && strcmp(psk->psk_ifname,
3354 		    kif->pfik_name))
3355 			continue;
3356 
3357 		if (psk->psk_kill_match) {
3358 			/* Create the key to find matching states, with lock
3359 			 * held. */
3360 
3361 			bzero(&match_key, sizeof(match_key));
3362 
3363 			if (s->direction == PF_OUT) {
3364 				dir = PF_IN;
3365 				idx = psk->psk_nat ? PF_SK_WIRE : PF_SK_STACK;
3366 			} else {
3367 				dir = PF_OUT;
3368 				idx = psk->psk_nat ? PF_SK_STACK : PF_SK_WIRE;
3369 			}
3370 
3371 			match_key.af = s->key[idx]->af;
3372 			match_key.proto = s->key[idx]->proto;
3373 			pf_addrcpy(&match_key.addr[0],
3374 			    &s->key[idx]->addr[1], match_key.af);
3375 			match_key.port[0] = s->key[idx]->port[1];
3376 			pf_addrcpy(&match_key.addr[1],
3377 			    &s->key[idx]->addr[0], match_key.af);
3378 			match_key.port[1] = s->key[idx]->port[0];
3379 		}
3380 
3381 		pf_remove_state(s);
3382 		killed++;
3383 
3384 		if (psk->psk_kill_match)
3385 			killed += pf_kill_matching_state(&match_key, dir);
3386 
3387 		goto relock_DIOCKILLSTATES;
3388 	}
3389 	PF_HASHROW_UNLOCK(ih);
3390 
3391 	return (killed);
3392 }
3393 
3394 int
pf_start(void)3395 pf_start(void)
3396 {
3397 	int error = 0;
3398 
3399 	sx_xlock(&V_pf_ioctl_lock);
3400 	if (V_pf_status.running)
3401 		error = EEXIST;
3402 	else {
3403 		hook_pf();
3404 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
3405 			hook_pf_eth();
3406 		V_pf_status.running = 1;
3407 		V_pf_status.since = time_uptime;
3408 		new_unrhdr64(&V_pf_stateid, time_second);
3409 
3410 		DPFPRINTF(PF_DEBUG_MISC, "pf: started");
3411 	}
3412 	sx_xunlock(&V_pf_ioctl_lock);
3413 
3414 	return (error);
3415 }
3416 
3417 int
pf_stop(void)3418 pf_stop(void)
3419 {
3420 	int error = 0;
3421 
3422 	sx_xlock(&V_pf_ioctl_lock);
3423 	if (!V_pf_status.running)
3424 		error = ENOENT;
3425 	else {
3426 		V_pf_status.running = 0;
3427 		dehook_pf();
3428 		dehook_pf_eth();
3429 		V_pf_status.since = time_uptime;
3430 		DPFPRINTF(PF_DEBUG_MISC, "pf: stopped");
3431 	}
3432 	sx_xunlock(&V_pf_ioctl_lock);
3433 
3434 	return (error);
3435 }
3436 
3437 void
pf_ioctl_clear_status(void)3438 pf_ioctl_clear_status(void)
3439 {
3440 	PF_RULES_WLOCK();
3441 	for (int i = 0; i < PFRES_MAX; i++)
3442 		counter_u64_zero(V_pf_status.counters[i]);
3443 	for (int i = 0; i < FCNT_MAX; i++)
3444 		pf_counter_u64_zero(&V_pf_status.fcounters[i]);
3445 	for (int i = 0; i < SCNT_MAX; i++)
3446 		counter_u64_zero(V_pf_status.scounters[i]);
3447 	for (int i = 0; i < NCNT_MAX; i++)
3448 		counter_u64_zero(V_pf_status.ncounters[i]);
3449 	for (int i = 0; i < KLCNT_MAX; i++)
3450 		counter_u64_zero(V_pf_status.lcounters[i]);
3451 	V_pf_status.since = time_uptime;
3452 	if (*V_pf_status.ifname)
3453 		pfi_update_status(V_pf_status.ifname, NULL);
3454 	PF_RULES_WUNLOCK();
3455 }
3456 
3457 int
pf_ioctl_set_timeout(int timeout,int seconds,int * prev_seconds)3458 pf_ioctl_set_timeout(int timeout, int seconds, int *prev_seconds)
3459 {
3460 	uint32_t old;
3461 
3462 	if (timeout < 0 || timeout >= PFTM_MAX ||
3463 	    seconds < 0)
3464 		return (EINVAL);
3465 
3466 	PF_RULES_WLOCK();
3467 	old = V_pf_default_rule.timeout[timeout];
3468 	if (timeout == PFTM_INTERVAL && seconds == 0)
3469 		seconds = 1;
3470 	V_pf_default_rule.timeout[timeout] = seconds;
3471 	if (timeout == PFTM_INTERVAL && seconds < old)
3472 		wakeup(pf_purge_thread);
3473 
3474 	if (prev_seconds != NULL)
3475 		*prev_seconds = old;
3476 
3477 	PF_RULES_WUNLOCK();
3478 
3479 	return (0);
3480 }
3481 
3482 int
pf_ioctl_get_timeout(int timeout,int * seconds)3483 pf_ioctl_get_timeout(int timeout, int *seconds)
3484 {
3485 	PF_RULES_RLOCK_TRACKER;
3486 
3487 	if (timeout < 0 || timeout >= PFTM_MAX)
3488 		return (EINVAL);
3489 
3490 	PF_RULES_RLOCK();
3491 	*seconds = V_pf_default_rule.timeout[timeout];
3492 	PF_RULES_RUNLOCK();
3493 
3494 	return (0);
3495 }
3496 
3497 int
pf_ioctl_set_limit(int index,unsigned int limit,unsigned int * old_limit)3498 pf_ioctl_set_limit(int index, unsigned int limit, unsigned int *old_limit)
3499 {
3500 
3501 	PF_RULES_WLOCK();
3502 	if (index < 0 || index >= PF_LIMIT_MAX ||
3503 	    V_pf_limits[index].zone == NULL) {
3504 		PF_RULES_WUNLOCK();
3505 		return (EINVAL);
3506 	}
3507 	uma_zone_set_max(V_pf_limits[index].zone,
3508 	    limit == 0 ? INT_MAX : limit);
3509 	if (old_limit != NULL)
3510 		*old_limit = V_pf_limits[index].limit;
3511 	V_pf_limits[index].limit = limit;
3512 	PF_RULES_WUNLOCK();
3513 
3514 	return (0);
3515 }
3516 
3517 int
pf_ioctl_get_limit(int index,unsigned int * limit)3518 pf_ioctl_get_limit(int index, unsigned int *limit)
3519 {
3520 	PF_RULES_RLOCK_TRACKER;
3521 
3522 	if (index < 0 || index >= PF_LIMIT_MAX)
3523 		return (EINVAL);
3524 
3525 	PF_RULES_RLOCK();
3526 	*limit = V_pf_limits[index].limit;
3527 	PF_RULES_RUNLOCK();
3528 
3529 	return (0);
3530 }
3531 
3532 int
pf_ioctl_begin_addrs(uint32_t * ticket)3533 pf_ioctl_begin_addrs(uint32_t *ticket)
3534 {
3535 	PF_RULES_WLOCK();
3536 	pf_empty_kpool(&V_pf_pabuf[0]);
3537 	pf_empty_kpool(&V_pf_pabuf[1]);
3538 	pf_empty_kpool(&V_pf_pabuf[2]);
3539 	*ticket = ++V_ticket_pabuf;
3540 	PF_RULES_WUNLOCK();
3541 
3542 	return (0);
3543 }
3544 
3545 int
pf_ioctl_add_addr(struct pf_nl_pooladdr * pp)3546 pf_ioctl_add_addr(struct pf_nl_pooladdr *pp)
3547 {
3548 	struct pf_kpooladdr	*pa = NULL;
3549 	struct pfi_kkif		*kif = NULL;
3550 	int error;
3551 
3552 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
3553 	    pp->which != PF_RT)
3554 		return (EINVAL);
3555 
3556 	switch (pp->af) {
3557 #ifdef INET
3558 	case AF_INET:
3559 		/* FALLTHROUGH */
3560 #endif /* INET */
3561 #ifdef INET6
3562 	case AF_INET6:
3563 		/* FALLTHROUGH */
3564 #endif /* INET6 */
3565 	case AF_UNSPEC:
3566 		break;
3567 	default:
3568 		return (EAFNOSUPPORT);
3569 	}
3570 
3571 	if (pp->addr.addr.type != PF_ADDR_ADDRMASK &&
3572 	    pp->addr.addr.type != PF_ADDR_DYNIFTL &&
3573 	    pp->addr.addr.type != PF_ADDR_TABLE)
3574 		return (EINVAL);
3575 
3576 	if (pp->addr.addr.p.dyn != NULL)
3577 		return (EINVAL);
3578 
3579 	pa = malloc(sizeof(*pa), M_PFRULE, M_WAITOK);
3580 	error = pf_pooladdr_to_kpooladdr(&pp->addr, pa);
3581 	if (error != 0)
3582 		goto out;
3583 	if (pa->ifname[0])
3584 		kif = pf_kkif_create(M_WAITOK);
3585 	PF_RULES_WLOCK();
3586 	if (pp->ticket != V_ticket_pabuf) {
3587 		PF_RULES_WUNLOCK();
3588 		if (pa->ifname[0])
3589 			pf_kkif_free(kif);
3590 		error = EBUSY;
3591 		goto out;
3592 	}
3593 	if (pa->ifname[0]) {
3594 		pa->kif = pfi_kkif_attach(kif, pa->ifname);
3595 		kif = NULL;
3596 		pfi_kkif_ref(pa->kif);
3597 	} else
3598 		pa->kif = NULL;
3599 	if (pa->addr.type == PF_ADDR_DYNIFTL && ((error =
3600 	    pfi_dynaddr_setup(&pa->addr, pp->af)) != 0)) {
3601 		if (pa->ifname[0])
3602 			pfi_kkif_unref(pa->kif);
3603 		PF_RULES_WUNLOCK();
3604 		goto out;
3605 	}
3606 	pa->af = pp->af;
3607 	switch (pp->which) {
3608 	case PF_NAT:
3609 		TAILQ_INSERT_TAIL(&V_pf_pabuf[0], pa, entries);
3610 		break;
3611 	case PF_RDR:
3612 		TAILQ_INSERT_TAIL(&V_pf_pabuf[1], pa, entries);
3613 		break;
3614 	case PF_RT:
3615 		TAILQ_INSERT_TAIL(&V_pf_pabuf[2], pa, entries);
3616 		break;
3617 	}
3618 	PF_RULES_WUNLOCK();
3619 
3620 	return (0);
3621 
3622 out:
3623 	free(pa, M_PFRULE);
3624 	return (error);
3625 }
3626 
3627 int
pf_ioctl_get_addrs(struct pf_nl_pooladdr * pp)3628 pf_ioctl_get_addrs(struct pf_nl_pooladdr *pp)
3629 {
3630 	struct pf_kpool		*pool;
3631 	struct pf_kpooladdr	*pa;
3632 
3633 	PF_RULES_RLOCK_TRACKER;
3634 
3635 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
3636 	    pp->which != PF_RT)
3637 		return (EINVAL);
3638 
3639 	pp->anchor[sizeof(pp->anchor) - 1] = 0;
3640 	pp->nr = 0;
3641 
3642 	PF_RULES_RLOCK();
3643 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
3644 	    pp->r_num, 0, 1, 0, pp->which);
3645 	if (pool == NULL) {
3646 		PF_RULES_RUNLOCK();
3647 		return (EBUSY);
3648 	}
3649 	TAILQ_FOREACH(pa, &pool->list, entries)
3650 		pp->nr++;
3651 	PF_RULES_RUNLOCK();
3652 
3653 	return (0);
3654 }
3655 
3656 int
pf_ioctl_get_addr(struct pf_nl_pooladdr * pp)3657 pf_ioctl_get_addr(struct pf_nl_pooladdr *pp)
3658 {
3659 	struct pf_kpool		*pool;
3660 	struct pf_kpooladdr	*pa;
3661 	u_int32_t		 nr = 0;
3662 
3663 	if (pp->which != PF_RDR && pp->which != PF_NAT &&
3664 	    pp->which != PF_RT)
3665 		return (EINVAL);
3666 
3667 	PF_RULES_RLOCK_TRACKER;
3668 
3669 	pp->anchor[sizeof(pp->anchor) - 1] = '\0';
3670 
3671 	PF_RULES_RLOCK();
3672 	pool = pf_get_kpool(pp->anchor, pp->ticket, pp->r_action,
3673 	    pp->r_num, 0, 1, 1, pp->which);
3674 	if (pool == NULL) {
3675 		PF_RULES_RUNLOCK();
3676 		return (EBUSY);
3677 	}
3678 	pa = TAILQ_FIRST(&pool->list);
3679 	while ((pa != NULL) && (nr < pp->nr)) {
3680 		pa = TAILQ_NEXT(pa, entries);
3681 		nr++;
3682 	}
3683 	if (pa == NULL) {
3684 		PF_RULES_RUNLOCK();
3685 		return (EBUSY);
3686 	}
3687 	pf_kpooladdr_to_pooladdr(pa, &pp->addr);
3688 	pp->af = pa->af;
3689 	pf_addr_copyout(&pp->addr.addr);
3690 	PF_RULES_RUNLOCK();
3691 
3692 	return (0);
3693 }
3694 
3695 int
pf_ioctl_get_rulesets(struct pfioc_ruleset * pr)3696 pf_ioctl_get_rulesets(struct pfioc_ruleset *pr)
3697 {
3698 	struct pf_kruleset	*ruleset;
3699 	struct pf_kanchor	*anchor;
3700 
3701 	PF_RULES_RLOCK_TRACKER;
3702 
3703 	pr->path[sizeof(pr->path) - 1] = '\0';
3704 
3705 	PF_RULES_RLOCK();
3706 	if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
3707 		PF_RULES_RUNLOCK();
3708 		return (ENOENT);
3709 	}
3710 	pr->nr = 0;
3711 	if (ruleset == &pf_main_ruleset) {
3712 		/* XXX kludge for pf_main_ruleset */
3713 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
3714 			if (anchor->parent == NULL)
3715 				pr->nr++;
3716 	} else {
3717 		RB_FOREACH(anchor, pf_kanchor_node,
3718 		    &ruleset->anchor->children)
3719 			pr->nr++;
3720 	}
3721 	PF_RULES_RUNLOCK();
3722 
3723 	return (0);
3724 }
3725 
3726 int
pf_ioctl_get_ruleset(struct pfioc_ruleset * pr)3727 pf_ioctl_get_ruleset(struct pfioc_ruleset *pr)
3728 {
3729 	struct pf_kruleset	*ruleset;
3730 	struct pf_kanchor	*anchor;
3731 	u_int32_t		 nr = 0;
3732 	int			 error = 0;
3733 
3734 	PF_RULES_RLOCK_TRACKER;
3735 
3736 	PF_RULES_RLOCK();
3737 	if ((ruleset = pf_find_kruleset(pr->path)) == NULL) {
3738 		PF_RULES_RUNLOCK();
3739 		return (ENOENT);
3740 	}
3741 
3742 	pr->name[0] = '\0';
3743 	if (ruleset == &pf_main_ruleset) {
3744 		/* XXX kludge for pf_main_ruleset */
3745 		RB_FOREACH(anchor, pf_kanchor_global, &V_pf_anchors)
3746 			if (anchor->parent == NULL && nr++ == pr->nr) {
3747 				strlcpy(pr->name, anchor->name,
3748 				    sizeof(pr->name));
3749 				break;
3750 			}
3751 	} else {
3752 		RB_FOREACH(anchor, pf_kanchor_node,
3753 		    &ruleset->anchor->children)
3754 			if (nr++ == pr->nr) {
3755 				strlcpy(pr->name, anchor->name,
3756 				    sizeof(pr->name));
3757 				break;
3758 			}
3759 	}
3760 	if (!pr->name[0])
3761 		error = EBUSY;
3762 	PF_RULES_RUNLOCK();
3763 
3764 	return (error);
3765 }
3766 
3767 int
pf_ioctl_natlook(struct pfioc_natlook * pnl)3768 pf_ioctl_natlook(struct pfioc_natlook *pnl)
3769 {
3770 	struct pf_state_key	*sk;
3771 	struct pf_kstate	*state;
3772 	struct pf_state_key_cmp	 key;
3773 	int			 m = 0, direction = pnl->direction;
3774 	int			 sidx, didx;
3775 
3776 	/* NATLOOK src and dst are reversed, so reverse sidx/didx */
3777 	sidx = (direction == PF_IN) ? 1 : 0;
3778 	didx = (direction == PF_IN) ? 0 : 1;
3779 
3780 	if (!pnl->proto ||
3781 	    PF_AZERO(&pnl->saddr, pnl->af) ||
3782 	    PF_AZERO(&pnl->daddr, pnl->af) ||
3783 	    ((pnl->proto == IPPROTO_TCP ||
3784 	    pnl->proto == IPPROTO_UDP) &&
3785 	    (!pnl->dport || !pnl->sport)))
3786 		return (EINVAL);
3787 
3788 	switch (pnl->direction) {
3789 	case PF_IN:
3790 	case PF_OUT:
3791 	case PF_INOUT:
3792 		break;
3793 	default:
3794 		return (EINVAL);
3795 	}
3796 
3797 	switch (pnl->af) {
3798 #ifdef INET
3799 	case AF_INET:
3800 		break;
3801 #endif /* INET */
3802 #ifdef INET6
3803 	case AF_INET6:
3804 		break;
3805 #endif /* INET6 */
3806 	default:
3807 		return (EAFNOSUPPORT);
3808 	}
3809 
3810 	bzero(&key, sizeof(key));
3811 	key.af = pnl->af;
3812 	key.proto = pnl->proto;
3813 	pf_addrcpy(&key.addr[sidx], &pnl->saddr, pnl->af);
3814 	key.port[sidx] = pnl->sport;
3815 	pf_addrcpy(&key.addr[didx], &pnl->daddr, pnl->af);
3816 	key.port[didx] = pnl->dport;
3817 
3818 	state = pf_find_state_all(&key, direction, &m);
3819 	if (state == NULL)
3820 		return (ENOENT);
3821 
3822 	if (m > 1) {
3823 		PF_STATE_UNLOCK(state);
3824 		return (E2BIG);	/* more than one state */
3825 	}
3826 
3827 	sk = state->key[sidx];
3828 	pf_addrcpy(&pnl->rsaddr,
3829 	    &sk->addr[sidx], sk->af);
3830 	pnl->rsport = sk->port[sidx];
3831 	pf_addrcpy(&pnl->rdaddr,
3832 	    &sk->addr[didx], sk->af);
3833 	pnl->rdport = sk->port[didx];
3834 	PF_STATE_UNLOCK(state);
3835 
3836 	return (0);
3837 }
3838 
3839 static int
pfioctl(struct cdev * dev,u_long cmd,caddr_t addr,int flags,struct thread * td)3840 pfioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
3841 {
3842 	int			 error = 0;
3843 	PF_RULES_RLOCK_TRACKER;
3844 
3845 #define	ERROUT_IOCTL(target, x)					\
3846     do {								\
3847 	    error = (x);						\
3848 	    SDT_PROBE3(pf, ioctl, ioctl, error, cmd, error, __LINE__);	\
3849 	    goto target;						\
3850     } while (0)
3851 
3852 
3853 	/* XXX keep in sync with switch() below */
3854 	if (securelevel_gt(td->td_ucred, 2))
3855 		switch (cmd) {
3856 		case DIOCGETRULES:
3857 		case DIOCGETRULENV:
3858 		case DIOCGETADDRS:
3859 		case DIOCGETADDR:
3860 		case DIOCGETSTATE:
3861 		case DIOCGETSTATENV:
3862 		case DIOCSETSTATUSIF:
3863 		case DIOCGETSTATUSNV:
3864 		case DIOCCLRSTATUS:
3865 		case DIOCNATLOOK:
3866 		case DIOCSETDEBUG:
3867 #ifdef COMPAT_FREEBSD14
3868 		case DIOCGETSTATES:
3869 		case DIOCGETSTATESV2:
3870 #endif
3871 		case DIOCGETTIMEOUT:
3872 		case DIOCCLRRULECTRS:
3873 		case DIOCGETLIMIT:
3874 		case DIOCGETALTQSV0:
3875 		case DIOCGETALTQSV1:
3876 		case DIOCGETALTQV0:
3877 		case DIOCGETALTQV1:
3878 		case DIOCGETQSTATSV0:
3879 		case DIOCGETQSTATSV1:
3880 		case DIOCGETRULESETS:
3881 		case DIOCGETRULESET:
3882 		case DIOCRGETTABLES:
3883 		case DIOCRGETTSTATS:
3884 		case DIOCRCLRTSTATS:
3885 		case DIOCRCLRADDRS:
3886 		case DIOCRADDADDRS:
3887 		case DIOCRDELADDRS:
3888 		case DIOCRSETADDRS:
3889 		case DIOCRGETADDRS:
3890 		case DIOCRGETASTATS:
3891 		case DIOCRCLRASTATS:
3892 		case DIOCRTSTADDRS:
3893 		case DIOCOSFPGET:
3894 		case DIOCGETSRCNODES:
3895 		case DIOCCLRSRCNODES:
3896 		case DIOCGETSYNCOOKIES:
3897 		case DIOCIGETIFACES:
3898 		case DIOCGIFSPEEDV0:
3899 		case DIOCGIFSPEEDV1:
3900 		case DIOCSETIFFLAG:
3901 		case DIOCCLRIFFLAG:
3902 		case DIOCGETETHRULES:
3903 		case DIOCGETETHRULE:
3904 		case DIOCGETETHRULESETS:
3905 		case DIOCGETETHRULESET:
3906 			break;
3907 		case DIOCRCLRTABLES:
3908 		case DIOCRADDTABLES:
3909 		case DIOCRDELTABLES:
3910 		case DIOCRSETTFLAGS:
3911 			if (((struct pfioc_table *)addr)->pfrio_flags &
3912 			    PFR_FLAG_DUMMY)
3913 				break; /* dummy operation ok */
3914 			return (EPERM);
3915 		default:
3916 			return (EPERM);
3917 		}
3918 
3919 	if (!(flags & FWRITE))
3920 		switch (cmd) {
3921 		case DIOCGETRULES:
3922 		case DIOCGETADDRS:
3923 		case DIOCGETADDR:
3924 		case DIOCGETSTATE:
3925 		case DIOCGETSTATENV:
3926 		case DIOCGETSTATUSNV:
3927 #ifdef COMPAT_FREEBSD14
3928 		case DIOCGETSTATES:
3929 		case DIOCGETSTATESV2:
3930 #endif
3931 		case DIOCGETTIMEOUT:
3932 		case DIOCGETLIMIT:
3933 		case DIOCGETALTQSV0:
3934 		case DIOCGETALTQSV1:
3935 		case DIOCGETALTQV0:
3936 		case DIOCGETALTQV1:
3937 		case DIOCGETQSTATSV0:
3938 		case DIOCGETQSTATSV1:
3939 		case DIOCGETRULESETS:
3940 		case DIOCGETRULESET:
3941 		case DIOCNATLOOK:
3942 		case DIOCRGETTABLES:
3943 		case DIOCRGETTSTATS:
3944 		case DIOCRGETADDRS:
3945 		case DIOCRGETASTATS:
3946 		case DIOCRTSTADDRS:
3947 		case DIOCOSFPGET:
3948 		case DIOCGETSRCNODES:
3949 		case DIOCGETSYNCOOKIES:
3950 		case DIOCIGETIFACES:
3951 		case DIOCGIFSPEEDV1:
3952 		case DIOCGIFSPEEDV0:
3953 		case DIOCGETRULENV:
3954 		case DIOCGETETHRULES:
3955 		case DIOCGETETHRULE:
3956 		case DIOCGETETHRULESETS:
3957 		case DIOCGETETHRULESET:
3958 			break;
3959 		case DIOCRCLRTABLES:
3960 		case DIOCRADDTABLES:
3961 		case DIOCRDELTABLES:
3962 		case DIOCRCLRTSTATS:
3963 		case DIOCRCLRADDRS:
3964 		case DIOCRADDADDRS:
3965 		case DIOCRDELADDRS:
3966 		case DIOCRSETADDRS:
3967 		case DIOCRSETTFLAGS:
3968 			if (((struct pfioc_table *)addr)->pfrio_flags &
3969 			    PFR_FLAG_DUMMY) {
3970 				flags |= FWRITE; /* need write lock for dummy */
3971 				break; /* dummy operation ok */
3972 			}
3973 			return (EACCES);
3974 		default:
3975 			return (EACCES);
3976 		}
3977 
3978 	CURVNET_SET(TD_TO_VNET(td));
3979 
3980 	switch (cmd) {
3981 #ifdef COMPAT_FREEBSD14
3982 	case DIOCSTART:
3983 		error = pf_start();
3984 		break;
3985 
3986 	case DIOCSTOP:
3987 		error = pf_stop();
3988 		break;
3989 #endif
3990 
3991 	case DIOCGETETHRULES: {
3992 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
3993 		nvlist_t		*nvl;
3994 		void			*packed;
3995 		struct pf_keth_rule	*tail;
3996 		struct pf_keth_ruleset	*rs;
3997 		u_int32_t		 ticket, nr;
3998 		const char		*anchor = "";
3999 
4000 		nvl = NULL;
4001 		packed = NULL;
4002 
4003 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULES_error, x)
4004 
4005 		if (nv->len > pf_ioctl_maxcount)
4006 			ERROUT(ENOMEM);
4007 
4008 		/* Copy the request in */
4009 		packed = malloc(nv->len, M_NVLIST, M_WAITOK);
4010 		error = copyin(nv->data, packed, nv->len);
4011 		if (error)
4012 			ERROUT(error);
4013 
4014 		nvl = nvlist_unpack(packed, nv->len, 0);
4015 		if (nvl == NULL)
4016 			ERROUT(EBADMSG);
4017 
4018 		if (! nvlist_exists_string(nvl, "anchor"))
4019 			ERROUT(EBADMSG);
4020 
4021 		anchor = nvlist_get_string(nvl, "anchor");
4022 
4023 		rs = pf_find_keth_ruleset(anchor);
4024 
4025 		nvlist_destroy(nvl);
4026 		nvl = NULL;
4027 		free(packed, M_NVLIST);
4028 		packed = NULL;
4029 
4030 		if (rs == NULL)
4031 			ERROUT(ENOENT);
4032 
4033 		/* Reply */
4034 		nvl = nvlist_create(0);
4035 		if (nvl == NULL)
4036 			ERROUT(ENOMEM);
4037 
4038 		PF_RULES_RLOCK();
4039 
4040 		ticket = rs->active.ticket;
4041 		tail = TAILQ_LAST(rs->active.rules, pf_keth_ruleq);
4042 		if (tail)
4043 			nr = tail->nr + 1;
4044 		else
4045 			nr = 0;
4046 
4047 		PF_RULES_RUNLOCK();
4048 
4049 		nvlist_add_number(nvl, "ticket", ticket);
4050 		nvlist_add_number(nvl, "nr", nr);
4051 
4052 		packed = nvlist_pack(nvl, &nv->len);
4053 		if (packed == NULL)
4054 			ERROUT(ENOMEM);
4055 
4056 		if (nv->size == 0)
4057 			ERROUT(0);
4058 		else if (nv->size < nv->len)
4059 			ERROUT(ENOSPC);
4060 
4061 		error = copyout(packed, nv->data, nv->len);
4062 
4063 #undef ERROUT
4064 DIOCGETETHRULES_error:
4065 		free(packed, M_NVLIST);
4066 		nvlist_destroy(nvl);
4067 		break;
4068 	}
4069 
4070 	case DIOCGETETHRULE: {
4071 		struct epoch_tracker	 et;
4072 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
4073 		nvlist_t		*nvl = NULL;
4074 		void			*nvlpacked = NULL;
4075 		struct pf_keth_rule	*rule = NULL;
4076 		struct pf_keth_ruleset	*rs;
4077 		u_int32_t		 ticket, nr;
4078 		bool			 clear = false;
4079 		const char		*anchor;
4080 
4081 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULE_error, x)
4082 
4083 		if (nv->len > pf_ioctl_maxcount)
4084 			ERROUT(ENOMEM);
4085 
4086 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
4087 		error = copyin(nv->data, nvlpacked, nv->len);
4088 		if (error)
4089 			ERROUT(error);
4090 
4091 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
4092 		if (nvl == NULL)
4093 			ERROUT(EBADMSG);
4094 		if (! nvlist_exists_number(nvl, "ticket"))
4095 			ERROUT(EBADMSG);
4096 		ticket = nvlist_get_number(nvl, "ticket");
4097 		if (! nvlist_exists_string(nvl, "anchor"))
4098 			ERROUT(EBADMSG);
4099 		anchor = nvlist_get_string(nvl, "anchor");
4100 
4101 		if (nvlist_exists_bool(nvl, "clear"))
4102 			clear = nvlist_get_bool(nvl, "clear");
4103 
4104 		if (clear && !(flags & FWRITE))
4105 			ERROUT(EACCES);
4106 
4107 		if (! nvlist_exists_number(nvl, "nr"))
4108 			ERROUT(EBADMSG);
4109 		nr = nvlist_get_number(nvl, "nr");
4110 
4111 		PF_RULES_RLOCK();
4112 		rs = pf_find_keth_ruleset(anchor);
4113 		if (rs == NULL) {
4114 			PF_RULES_RUNLOCK();
4115 			ERROUT(ENOENT);
4116 		}
4117 		if (ticket != rs->active.ticket) {
4118 			PF_RULES_RUNLOCK();
4119 			ERROUT(EBUSY);
4120 		}
4121 
4122 		nvlist_destroy(nvl);
4123 		nvl = NULL;
4124 		free(nvlpacked, M_NVLIST);
4125 		nvlpacked = NULL;
4126 
4127 		rule = TAILQ_FIRST(rs->active.rules);
4128 		while ((rule != NULL) && (rule->nr != nr))
4129 			rule = TAILQ_NEXT(rule, entries);
4130 		if (rule == NULL) {
4131 			PF_RULES_RUNLOCK();
4132 			ERROUT(ENOENT);
4133 		}
4134 		/* Make sure rule can't go away. */
4135 		NET_EPOCH_ENTER(et);
4136 		PF_RULES_RUNLOCK();
4137 		nvl = pf_keth_rule_to_nveth_rule(rule);
4138 		if (pf_keth_anchor_nvcopyout(rs, rule, nvl)) {
4139 			NET_EPOCH_EXIT(et);
4140 			ERROUT(EBUSY);
4141 		}
4142 		NET_EPOCH_EXIT(et);
4143 		if (nvl == NULL)
4144 			ERROUT(ENOMEM);
4145 
4146 		nvlpacked = nvlist_pack(nvl, &nv->len);
4147 		if (nvlpacked == NULL)
4148 			ERROUT(ENOMEM);
4149 
4150 		if (nv->size == 0)
4151 			ERROUT(0);
4152 		else if (nv->size < nv->len)
4153 			ERROUT(ENOSPC);
4154 
4155 		error = copyout(nvlpacked, nv->data, nv->len);
4156 		if (error == 0 && clear) {
4157 			counter_u64_zero(rule->evaluations);
4158 			for (int i = 0; i < 2; i++) {
4159 				counter_u64_zero(rule->packets[i]);
4160 				counter_u64_zero(rule->bytes[i]);
4161 			}
4162 		}
4163 
4164 #undef ERROUT
4165 DIOCGETETHRULE_error:
4166 		free(nvlpacked, M_NVLIST);
4167 		nvlist_destroy(nvl);
4168 		break;
4169 	}
4170 
4171 	case DIOCADDETHRULE: {
4172 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
4173 		nvlist_t		*nvl = NULL;
4174 		void			*nvlpacked = NULL;
4175 		struct pf_keth_rule	*rule = NULL, *tail = NULL;
4176 		struct pf_keth_ruleset	*ruleset = NULL;
4177 		struct pfi_kkif		*kif = NULL, *bridge_to_kif = NULL;
4178 		const char		*anchor = "", *anchor_call = "";
4179 
4180 #define ERROUT(x)	ERROUT_IOCTL(DIOCADDETHRULE_error, x)
4181 
4182 		if (nv->len > pf_ioctl_maxcount)
4183 			ERROUT(ENOMEM);
4184 
4185 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
4186 		error = copyin(nv->data, nvlpacked, nv->len);
4187 		if (error)
4188 			ERROUT(error);
4189 
4190 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
4191 		if (nvl == NULL)
4192 			ERROUT(EBADMSG);
4193 
4194 		if (! nvlist_exists_number(nvl, "ticket"))
4195 			ERROUT(EBADMSG);
4196 
4197 		if (nvlist_exists_string(nvl, "anchor"))
4198 			anchor = nvlist_get_string(nvl, "anchor");
4199 		if (nvlist_exists_string(nvl, "anchor_call"))
4200 			anchor_call = nvlist_get_string(nvl, "anchor_call");
4201 
4202 		ruleset = pf_find_keth_ruleset(anchor);
4203 		if (ruleset == NULL)
4204 			ERROUT(EINVAL);
4205 
4206 		if (nvlist_get_number(nvl, "ticket") !=
4207 		    ruleset->inactive.ticket) {
4208 			DPFPRINTF(PF_DEBUG_MISC,
4209 			    "ticket: %d != %d",
4210 			    (u_int32_t)nvlist_get_number(nvl, "ticket"),
4211 			    ruleset->inactive.ticket);
4212 			ERROUT(EBUSY);
4213 		}
4214 
4215 		rule = malloc(sizeof(*rule), M_PFRULE, M_WAITOK);
4216 		rule->timestamp = NULL;
4217 
4218 		error = pf_nveth_rule_to_keth_rule(nvl, rule);
4219 		if (error != 0)
4220 			ERROUT(error);
4221 
4222 		if (rule->ifname[0])
4223 			kif = pf_kkif_create(M_WAITOK);
4224 		if (rule->bridge_to_name[0])
4225 			bridge_to_kif = pf_kkif_create(M_WAITOK);
4226 		rule->evaluations = counter_u64_alloc(M_WAITOK);
4227 		for (int i = 0; i < 2; i++) {
4228 			rule->packets[i] = counter_u64_alloc(M_WAITOK);
4229 			rule->bytes[i] = counter_u64_alloc(M_WAITOK);
4230 		}
4231 		rule->timestamp = uma_zalloc_pcpu(pf_timestamp_pcpu_zone,
4232 		    M_WAITOK | M_ZERO);
4233 
4234 		PF_RULES_WLOCK();
4235 
4236 		if (rule->ifname[0]) {
4237 			rule->kif = pfi_kkif_attach(kif, rule->ifname);
4238 			pfi_kkif_ref(rule->kif);
4239 		} else
4240 			rule->kif = NULL;
4241 		if (rule->bridge_to_name[0]) {
4242 			rule->bridge_to = pfi_kkif_attach(bridge_to_kif,
4243 			    rule->bridge_to_name);
4244 			pfi_kkif_ref(rule->bridge_to);
4245 		} else
4246 			rule->bridge_to = NULL;
4247 
4248 #ifdef ALTQ
4249 		/* set queue IDs */
4250 		if (rule->qname[0] != 0) {
4251 			if ((rule->qid = pf_qname2qid(rule->qname, true)) == 0)
4252 				error = EBUSY;
4253 			else
4254 				rule->qid = rule->qid;
4255 		}
4256 #endif
4257 		if (rule->tagname[0])
4258 			if ((rule->tag = pf_tagname2tag(rule->tagname)) == 0)
4259 				error = EBUSY;
4260 		if (rule->match_tagname[0])
4261 			if ((rule->match_tag = pf_tagname2tag(
4262 			    rule->match_tagname)) == 0)
4263 				error = EBUSY;
4264 
4265 		if (error == 0 && rule->ipdst.addr.type == PF_ADDR_TABLE)
4266 			error = pf_eth_addr_setup(ruleset, &rule->ipdst.addr);
4267 		if (error == 0 && rule->ipsrc.addr.type == PF_ADDR_TABLE)
4268 			error = pf_eth_addr_setup(ruleset, &rule->ipsrc.addr);
4269 
4270 		if (error) {
4271 			pf_free_eth_rule(rule);
4272 			PF_RULES_WUNLOCK();
4273 			ERROUT(error);
4274 		}
4275 
4276 		if (pf_keth_anchor_setup(rule, ruleset, anchor_call)) {
4277 			pf_free_eth_rule(rule);
4278 			PF_RULES_WUNLOCK();
4279 			ERROUT(EINVAL);
4280 		}
4281 
4282 		tail = TAILQ_LAST(ruleset->inactive.rules, pf_keth_ruleq);
4283 		if (tail)
4284 			rule->nr = tail->nr + 1;
4285 		else
4286 			rule->nr = 0;
4287 
4288 		TAILQ_INSERT_TAIL(ruleset->inactive.rules, rule, entries);
4289 
4290 		PF_RULES_WUNLOCK();
4291 
4292 #undef ERROUT
4293 DIOCADDETHRULE_error:
4294 		nvlist_destroy(nvl);
4295 		free(nvlpacked, M_NVLIST);
4296 		break;
4297 	}
4298 
4299 	case DIOCGETETHRULESETS: {
4300 		struct epoch_tracker	 et;
4301 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
4302 		nvlist_t		*nvl = NULL;
4303 		void			*nvlpacked = NULL;
4304 		struct pf_keth_ruleset	*ruleset;
4305 		struct pf_keth_anchor	*anchor;
4306 		int			 nr = 0;
4307 
4308 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESETS_error, x)
4309 
4310 		if (nv->len > pf_ioctl_maxcount)
4311 			ERROUT(ENOMEM);
4312 
4313 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
4314 		error = copyin(nv->data, nvlpacked, nv->len);
4315 		if (error)
4316 			ERROUT(error);
4317 
4318 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
4319 		if (nvl == NULL)
4320 			ERROUT(EBADMSG);
4321 		if (! nvlist_exists_string(nvl, "path"))
4322 			ERROUT(EBADMSG);
4323 
4324 		NET_EPOCH_ENTER(et);
4325 
4326 		if ((ruleset = pf_find_keth_ruleset(
4327 		    nvlist_get_string(nvl, "path"))) == NULL) {
4328 			NET_EPOCH_EXIT(et);
4329 			ERROUT(ENOENT);
4330 		}
4331 
4332 		if (ruleset->anchor == NULL) {
4333 			RB_FOREACH(anchor, pf_keth_anchor_global, &V_pf_keth_anchors)
4334 				if (anchor->parent == NULL)
4335 					nr++;
4336 		} else {
4337 			RB_FOREACH(anchor, pf_keth_anchor_node,
4338 			    &ruleset->anchor->children)
4339 				nr++;
4340 		}
4341 
4342 		NET_EPOCH_EXIT(et);
4343 
4344 		nvlist_destroy(nvl);
4345 		nvl = NULL;
4346 		free(nvlpacked, M_NVLIST);
4347 		nvlpacked = NULL;
4348 
4349 		nvl = nvlist_create(0);
4350 		if (nvl == NULL)
4351 			ERROUT(ENOMEM);
4352 
4353 		nvlist_add_number(nvl, "nr", nr);
4354 
4355 		nvlpacked = nvlist_pack(nvl, &nv->len);
4356 		if (nvlpacked == NULL)
4357 			ERROUT(ENOMEM);
4358 
4359 		if (nv->size == 0)
4360 			ERROUT(0);
4361 		else if (nv->size < nv->len)
4362 			ERROUT(ENOSPC);
4363 
4364 		error = copyout(nvlpacked, nv->data, nv->len);
4365 
4366 #undef ERROUT
4367 DIOCGETETHRULESETS_error:
4368 		free(nvlpacked, M_NVLIST);
4369 		nvlist_destroy(nvl);
4370 		break;
4371 	}
4372 
4373 	case DIOCGETETHRULESET: {
4374 		struct epoch_tracker	 et;
4375 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
4376 		nvlist_t		*nvl = NULL;
4377 		void			*nvlpacked = NULL;
4378 		struct pf_keth_ruleset	*ruleset;
4379 		struct pf_keth_anchor	*anchor;
4380 		int			 nr = 0, req_nr = 0;
4381 		bool			 found = false;
4382 
4383 #define ERROUT(x)	ERROUT_IOCTL(DIOCGETETHRULESET_error, x)
4384 
4385 		if (nv->len > pf_ioctl_maxcount)
4386 			ERROUT(ENOMEM);
4387 
4388 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
4389 		error = copyin(nv->data, nvlpacked, nv->len);
4390 		if (error)
4391 			ERROUT(error);
4392 
4393 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
4394 		if (nvl == NULL)
4395 			ERROUT(EBADMSG);
4396 		if (! nvlist_exists_string(nvl, "path"))
4397 			ERROUT(EBADMSG);
4398 		if (! nvlist_exists_number(nvl, "nr"))
4399 			ERROUT(EBADMSG);
4400 
4401 		req_nr = nvlist_get_number(nvl, "nr");
4402 
4403 		NET_EPOCH_ENTER(et);
4404 
4405 		if ((ruleset = pf_find_keth_ruleset(
4406 		    nvlist_get_string(nvl, "path"))) == NULL) {
4407 			NET_EPOCH_EXIT(et);
4408 			ERROUT(ENOENT);
4409 		}
4410 
4411 		nvlist_destroy(nvl);
4412 		nvl = NULL;
4413 		free(nvlpacked, M_NVLIST);
4414 		nvlpacked = NULL;
4415 
4416 		nvl = nvlist_create(0);
4417 		if (nvl == NULL) {
4418 			NET_EPOCH_EXIT(et);
4419 			ERROUT(ENOMEM);
4420 		}
4421 
4422 		if (ruleset->anchor == NULL) {
4423 			RB_FOREACH(anchor, pf_keth_anchor_global,
4424 			    &V_pf_keth_anchors) {
4425 				if (anchor->parent == NULL && nr++ == req_nr) {
4426 					found = true;
4427 					break;
4428 				}
4429 			}
4430 		} else {
4431 			RB_FOREACH(anchor, pf_keth_anchor_node,
4432 			     &ruleset->anchor->children) {
4433 				if (nr++ == req_nr) {
4434 					found = true;
4435 					break;
4436 				}
4437 			}
4438 		}
4439 
4440 		NET_EPOCH_EXIT(et);
4441 		if (found) {
4442 			nvlist_add_number(nvl, "nr", nr);
4443 			nvlist_add_string(nvl, "name", anchor->name);
4444 			if (ruleset->anchor)
4445 				nvlist_add_string(nvl, "path",
4446 				    ruleset->anchor->path);
4447 			else
4448 				nvlist_add_string(nvl, "path", "");
4449 		} else {
4450 			ERROUT(EBUSY);
4451 		}
4452 
4453 		nvlpacked = nvlist_pack(nvl, &nv->len);
4454 		if (nvlpacked == NULL)
4455 			ERROUT(ENOMEM);
4456 
4457 		if (nv->size == 0)
4458 			ERROUT(0);
4459 		else if (nv->size < nv->len)
4460 			ERROUT(ENOSPC);
4461 
4462 		error = copyout(nvlpacked, nv->data, nv->len);
4463 
4464 #undef ERROUT
4465 DIOCGETETHRULESET_error:
4466 		free(nvlpacked, M_NVLIST);
4467 		nvlist_destroy(nvl);
4468 		break;
4469 	}
4470 
4471 	case DIOCADDRULENV: {
4472 		struct pfioc_nv	*nv = (struct pfioc_nv *)addr;
4473 		nvlist_t	*nvl = NULL;
4474 		void		*nvlpacked = NULL;
4475 		struct pf_krule	*rule = NULL;
4476 		const char	*anchor = "", *anchor_call = "";
4477 		uint32_t	 ticket = 0, pool_ticket = 0;
4478 
4479 #define	ERROUT(x)	ERROUT_IOCTL(DIOCADDRULENV_error, x)
4480 
4481 		if (nv->len > pf_ioctl_maxcount)
4482 			ERROUT(ENOMEM);
4483 
4484 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
4485 		error = copyin(nv->data, nvlpacked, nv->len);
4486 		if (error)
4487 			ERROUT(error);
4488 
4489 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
4490 		if (nvl == NULL)
4491 			ERROUT(EBADMSG);
4492 
4493 		if (! nvlist_exists_number(nvl, "ticket"))
4494 			ERROUT(EINVAL);
4495 		ticket = nvlist_get_number(nvl, "ticket");
4496 
4497 		if (! nvlist_exists_number(nvl, "pool_ticket"))
4498 			ERROUT(EINVAL);
4499 		pool_ticket = nvlist_get_number(nvl, "pool_ticket");
4500 
4501 		if (! nvlist_exists_nvlist(nvl, "rule"))
4502 			ERROUT(EINVAL);
4503 
4504 		rule = pf_krule_alloc();
4505 		error = pf_nvrule_to_krule(nvlist_get_nvlist(nvl, "rule"),
4506 		    rule);
4507 		if (error)
4508 			ERROUT(error);
4509 
4510 		if (nvlist_exists_string(nvl, "anchor"))
4511 			anchor = nvlist_get_string(nvl, "anchor");
4512 		if (nvlist_exists_string(nvl, "anchor_call"))
4513 			anchor_call = nvlist_get_string(nvl, "anchor_call");
4514 
4515 		if ((error = nvlist_error(nvl)))
4516 			ERROUT(error);
4517 
4518 		/* Frees rule on error */
4519 		error = pf_ioctl_addrule(rule, ticket, pool_ticket, anchor,
4520 		    anchor_call, td->td_ucred->cr_ruid,
4521 		    td->td_proc ? td->td_proc->p_pid : 0);
4522 
4523 		nvlist_destroy(nvl);
4524 		free(nvlpacked, M_NVLIST);
4525 		break;
4526 #undef ERROUT
4527 DIOCADDRULENV_error:
4528 		pf_krule_free(rule);
4529 		nvlist_destroy(nvl);
4530 		free(nvlpacked, M_NVLIST);
4531 
4532 		break;
4533 	}
4534 	case DIOCADDRULE: {
4535 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
4536 		struct pf_krule		*rule;
4537 
4538 		rule = pf_krule_alloc();
4539 		error = pf_rule_to_krule(&pr->rule, rule);
4540 		if (error != 0) {
4541 			pf_krule_free(rule);
4542 			goto fail;
4543 		}
4544 
4545 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
4546 
4547 		/* Frees rule on error */
4548 		error = pf_ioctl_addrule(rule, pr->ticket, pr->pool_ticket,
4549 		    pr->anchor, pr->anchor_call, td->td_ucred->cr_ruid,
4550 		    td->td_proc ? td->td_proc->p_pid : 0);
4551 		break;
4552 	}
4553 
4554 	case DIOCGETRULES: {
4555 		struct pfioc_rule	*pr = (struct pfioc_rule *)addr;
4556 
4557 		pr->anchor[sizeof(pr->anchor) - 1] = '\0';
4558 
4559 		error = pf_ioctl_getrules(pr);
4560 
4561 		break;
4562 	}
4563 
4564 	case DIOCGETRULENV: {
4565 		PF_RULES_RLOCK_TRACKER;
4566 		struct pfioc_nv		*nv = (struct pfioc_nv *)addr;
4567 		nvlist_t		*nvrule = NULL;
4568 		nvlist_t		*nvl = NULL;
4569 		struct pf_kruleset	*ruleset;
4570 		struct pf_krule		*rule;
4571 		void			*nvlpacked = NULL;
4572 		int			 rs_num, nr;
4573 		bool			 clear_counter = false;
4574 
4575 #define	ERROUT(x)	ERROUT_IOCTL(DIOCGETRULENV_error, x)
4576 #define	ERROUT_LOCKED(x) do {			\
4577 	if (clear_counter)			\
4578 		PF_RULES_WUNLOCK();		\
4579 	else					\
4580 		PF_RULES_RUNLOCK();		\
4581 	ERROUT(x);				\
4582 } while (0)
4583 
4584 		if (nv->len > pf_ioctl_maxcount)
4585 			ERROUT(ENOMEM);
4586 
4587 		/* Copy the request in */
4588 		nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
4589 		error = copyin(nv->data, nvlpacked, nv->len);
4590 		if (error)
4591 			ERROUT(error);
4592 
4593 		nvl = nvlist_unpack(nvlpacked, nv->len, 0);
4594 		if (nvl == NULL)
4595 			ERROUT(EBADMSG);
4596 
4597 		if (! nvlist_exists_string(nvl, "anchor"))
4598 			ERROUT(EBADMSG);
4599 		if (! nvlist_exists_number(nvl, "ruleset"))
4600 			ERROUT(EBADMSG);
4601 		if (! nvlist_exists_number(nvl, "ticket"))
4602 			ERROUT(EBADMSG);
4603 		if (! nvlist_exists_number(nvl, "nr"))
4604 			ERROUT(EBADMSG);
4605 
4606 		if (nvlist_exists_bool(nvl, "clear_counter"))
4607 			clear_counter = nvlist_get_bool(nvl, "clear_counter");
4608 
4609 		if (clear_counter && !(flags & FWRITE))
4610 			ERROUT(EACCES);
4611 
4612 		nr = nvlist_get_number(nvl, "nr");
4613 
4614 		if (clear_counter)
4615 			PF_RULES_WLOCK();
4616 		else
4617 			PF_RULES_RLOCK();
4618 		ruleset = pf_find_kruleset(nvlist_get_string(nvl, "anchor"));
4619 		if (ruleset == NULL)
4620 			ERROUT_LOCKED(ENOENT);
4621 
4622 		rs_num = pf_get_ruleset_number(nvlist_get_number(nvl, "ruleset"));
4623 		if (rs_num >= PF_RULESET_MAX)
4624 			ERROUT_LOCKED(EINVAL);
4625 
4626 		if (nvlist_get_number(nvl, "ticket") !=
4627 		    ruleset->rules[rs_num].active.ticket)
4628 			ERROUT_LOCKED(EBUSY);
4629 
4630 		if ((error = nvlist_error(nvl)))
4631 			ERROUT_LOCKED(error);
4632 
4633 		rule = TAILQ_FIRST(ruleset->rules[rs_num].active.ptr);
4634 		while ((rule != NULL) && (rule->nr != nr))
4635 			rule = TAILQ_NEXT(rule, entries);
4636 		if (rule == NULL)
4637 			ERROUT_LOCKED(EBUSY);
4638 
4639 		nvrule = pf_krule_to_nvrule(rule);
4640 
4641 		nvlist_destroy(nvl);
4642 		nvl = nvlist_create(0);
4643 		if (nvl == NULL)
4644 			ERROUT_LOCKED(ENOMEM);
4645 		nvlist_add_number(nvl, "nr", nr);
4646 		nvlist_add_nvlist(nvl, "rule", nvrule);
4647 		nvlist_destroy(nvrule);
4648 		nvrule = NULL;
4649 		if (pf_kanchor_nvcopyout(ruleset, rule, nvl))
4650 			ERROUT_LOCKED(EBUSY);
4651 
4652 		free(nvlpacked, M_NVLIST);
4653 		nvlpacked = nvlist_pack(nvl, &nv->len);
4654 		if (nvlpacked == NULL)
4655 			ERROUT_LOCKED(ENOMEM);
4656 
4657 		if (nv->size == 0)
4658 			ERROUT_LOCKED(0);
4659 		else if (nv->size < nv->len)
4660 			ERROUT_LOCKED(ENOSPC);
4661 
4662 		if (clear_counter) {
4663 			pf_krule_clear_counters(rule);
4664 			PF_RULES_WUNLOCK();
4665 		} else {
4666 			PF_RULES_RUNLOCK();
4667 		}
4668 
4669 		error = copyout(nvlpacked, nv->data, nv->len);
4670 
4671 #undef ERROUT_LOCKED
4672 #undef ERROUT
4673 DIOCGETRULENV_error:
4674 		free(nvlpacked, M_NVLIST);
4675 		nvlist_destroy(nvrule);
4676 		nvlist_destroy(nvl);
4677 
4678 		break;
4679 	}
4680 
4681 	case DIOCCHANGERULE: {
4682 		struct pfioc_rule	*pcr = (struct pfioc_rule *)addr;
4683 		struct pf_kruleset	*ruleset;
4684 		struct pf_krule		*oldrule = NULL, *newrule = NULL;
4685 		struct pfi_kkif		*kif = NULL;
4686 		struct pf_kpooladdr	*pa;
4687 		u_int32_t		 nr = 0;
4688 		int			 rs_num;
4689 
4690 		pcr->anchor[sizeof(pcr->anchor) - 1] = '\0';
4691 
4692 		if (pcr->action < PF_CHANGE_ADD_HEAD ||
4693 		    pcr->action > PF_CHANGE_GET_TICKET) {
4694 			error = EINVAL;
4695 			goto fail;
4696 		}
4697 		if (pcr->rule.return_icmp >> 8 > ICMP_MAXTYPE) {
4698 			error = EINVAL;
4699 			goto fail;
4700 		}
4701 
4702 		if (pcr->action != PF_CHANGE_REMOVE) {
4703 			newrule = pf_krule_alloc();
4704 			error = pf_rule_to_krule(&pcr->rule, newrule);
4705 			if (error != 0) {
4706 				pf_krule_free(newrule);
4707 				goto fail;
4708 			}
4709 
4710 			if ((error = pf_rule_checkaf(newrule))) {
4711 				pf_krule_free(newrule);
4712 				goto fail;
4713 			}
4714 			if (newrule->ifname[0])
4715 				kif = pf_kkif_create(M_WAITOK);
4716 			pf_counter_u64_init(&newrule->evaluations, M_WAITOK);
4717 			for (int i = 0; i < 2; i++) {
4718 				pf_counter_u64_init(&newrule->packets[i], M_WAITOK);
4719 				pf_counter_u64_init(&newrule->bytes[i], M_WAITOK);
4720 			}
4721 			newrule->states_cur = counter_u64_alloc(M_WAITOK);
4722 			newrule->states_tot = counter_u64_alloc(M_WAITOK);
4723 			for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
4724 				newrule->src_nodes[sn_type] = counter_u64_alloc(M_WAITOK);
4725 			newrule->cuid = td->td_ucred->cr_ruid;
4726 			newrule->cpid = td->td_proc ? td->td_proc->p_pid : 0;
4727 			TAILQ_INIT(&newrule->nat.list);
4728 			TAILQ_INIT(&newrule->rdr.list);
4729 			TAILQ_INIT(&newrule->route.list);
4730 		}
4731 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGERULE_error, x)
4732 
4733 		PF_CONFIG_LOCK();
4734 		PF_RULES_WLOCK();
4735 #ifdef PF_WANT_32_TO_64_COUNTER
4736 		if (newrule != NULL) {
4737 			LIST_INSERT_HEAD(&V_pf_allrulelist, newrule, allrulelist);
4738 			newrule->allrulelinked = true;
4739 			V_pf_allrulecount++;
4740 		}
4741 #endif
4742 
4743 		if (!(pcr->action == PF_CHANGE_REMOVE ||
4744 		    pcr->action == PF_CHANGE_GET_TICKET) &&
4745 		    pcr->pool_ticket != V_ticket_pabuf)
4746 			ERROUT(EBUSY);
4747 
4748 		ruleset = pf_find_kruleset(pcr->anchor);
4749 		if (ruleset == NULL)
4750 			ERROUT(EINVAL);
4751 
4752 		rs_num = pf_get_ruleset_number(pcr->rule.action);
4753 		if (rs_num >= PF_RULESET_MAX)
4754 			ERROUT(EINVAL);
4755 
4756 		/*
4757 		 * XXXMJG: there is no guarantee that the ruleset was
4758 		 * created by the usual route of calling DIOCXBEGIN.
4759 		 * As a result it is possible the rule tree will not
4760 		 * be allocated yet. Hack around it by doing it here.
4761 		 * Note it is fine to let the tree persist in case of
4762 		 * error as it will be freed down the road on future
4763 		 * updates (if need be).
4764 		 */
4765 		if (ruleset->rules[rs_num].active.tree == NULL) {
4766 			ruleset->rules[rs_num].active.tree = pf_rule_tree_alloc(M_NOWAIT);
4767 			if (ruleset->rules[rs_num].active.tree == NULL) {
4768 				ERROUT(ENOMEM);
4769 			}
4770 		}
4771 
4772 		if (pcr->action == PF_CHANGE_GET_TICKET) {
4773 			pcr->ticket = ++ruleset->rules[rs_num].active.ticket;
4774 			ERROUT(0);
4775 		} else if (pcr->ticket !=
4776 			    ruleset->rules[rs_num].active.ticket)
4777 				ERROUT(EINVAL);
4778 
4779 		if (pcr->action != PF_CHANGE_REMOVE) {
4780 			if (newrule->ifname[0]) {
4781 				newrule->kif = pfi_kkif_attach(kif,
4782 				    newrule->ifname);
4783 				kif = NULL;
4784 				pfi_kkif_ref(newrule->kif);
4785 			} else
4786 				newrule->kif = NULL;
4787 
4788 			if (newrule->rtableid > 0 &&
4789 			    newrule->rtableid >= rt_numfibs)
4790 				error = EBUSY;
4791 
4792 #ifdef ALTQ
4793 			/* set queue IDs */
4794 			if (newrule->qname[0] != 0) {
4795 				if ((newrule->qid =
4796 				    pf_qname2qid(newrule->qname, true)) == 0)
4797 					error = EBUSY;
4798 				else if (newrule->pqname[0] != 0) {
4799 					if ((newrule->pqid =
4800 					    pf_qname2qid(newrule->pqname, true)) == 0)
4801 						error = EBUSY;
4802 				} else
4803 					newrule->pqid = newrule->qid;
4804 			}
4805 #endif /* ALTQ */
4806 			if (newrule->tagname[0])
4807 				if ((newrule->tag =
4808 				    pf_tagname2tag(newrule->tagname)) == 0)
4809 					error = EBUSY;
4810 			if (newrule->match_tagname[0])
4811 				if ((newrule->match_tag = pf_tagname2tag(
4812 				    newrule->match_tagname)) == 0)
4813 					error = EBUSY;
4814 			if (newrule->rt && !newrule->direction)
4815 				error = EINVAL;
4816 			if (!newrule->log)
4817 				newrule->logif = 0;
4818 			if (pf_addr_setup(ruleset, &newrule->src.addr, newrule->af))
4819 				error = ENOMEM;
4820 			if (pf_addr_setup(ruleset, &newrule->dst.addr, newrule->af))
4821 				error = ENOMEM;
4822 			if (pf_kanchor_setup(newrule, ruleset, pcr->anchor_call))
4823 				error = EINVAL;
4824 			for (int i = 0; i < 3; i++) {
4825 				TAILQ_FOREACH(pa, &V_pf_pabuf[i], entries)
4826 					if (pa->addr.type == PF_ADDR_TABLE) {
4827 						pa->addr.p.tbl =
4828 						    pfr_attach_table(ruleset,
4829 						    pa->addr.v.tblname);
4830 						if (pa->addr.p.tbl == NULL)
4831 							error = ENOMEM;
4832 					}
4833 			}
4834 
4835 			newrule->overload_tbl = NULL;
4836 			if (newrule->overload_tblname[0]) {
4837 				if ((newrule->overload_tbl = pfr_attach_table(
4838 				    ruleset, newrule->overload_tblname)) ==
4839 				    NULL)
4840 					error = EINVAL;
4841 				else
4842 					newrule->overload_tbl->pfrkt_flags |=
4843 					    PFR_TFLAG_ACTIVE;
4844 			}
4845 
4846 			pf_mv_kpool(&V_pf_pabuf[0], &newrule->nat.list);
4847 			pf_mv_kpool(&V_pf_pabuf[1], &newrule->rdr.list);
4848 			pf_mv_kpool(&V_pf_pabuf[2], &newrule->route.list);
4849 			if (((((newrule->action == PF_NAT) ||
4850 			    (newrule->action == PF_RDR) ||
4851 			    (newrule->action == PF_BINAT) ||
4852 			    (newrule->rt > PF_NOPFROUTE)) &&
4853 			    !newrule->anchor)) &&
4854 			    (TAILQ_FIRST(&newrule->rdr.list) == NULL))
4855 				error = EINVAL;
4856 
4857 			if (error) {
4858 				pf_free_rule(newrule);
4859 				PF_RULES_WUNLOCK();
4860 				PF_CONFIG_UNLOCK();
4861 				goto fail;
4862 			}
4863 
4864 			newrule->nat.cur = TAILQ_FIRST(&newrule->nat.list);
4865 			newrule->rdr.cur = TAILQ_FIRST(&newrule->rdr.list);
4866 		}
4867 		pf_empty_kpool(&V_pf_pabuf[0]);
4868 		pf_empty_kpool(&V_pf_pabuf[1]);
4869 		pf_empty_kpool(&V_pf_pabuf[2]);
4870 
4871 		if (pcr->action == PF_CHANGE_ADD_HEAD)
4872 			oldrule = TAILQ_FIRST(
4873 			    ruleset->rules[rs_num].active.ptr);
4874 		else if (pcr->action == PF_CHANGE_ADD_TAIL)
4875 			oldrule = TAILQ_LAST(
4876 			    ruleset->rules[rs_num].active.ptr, pf_krulequeue);
4877 		else {
4878 			oldrule = TAILQ_FIRST(
4879 			    ruleset->rules[rs_num].active.ptr);
4880 			while ((oldrule != NULL) && (oldrule->nr != pcr->nr))
4881 				oldrule = TAILQ_NEXT(oldrule, entries);
4882 			if (oldrule == NULL) {
4883 				if (newrule != NULL)
4884 					pf_free_rule(newrule);
4885 				PF_RULES_WUNLOCK();
4886 				PF_CONFIG_UNLOCK();
4887 				error = EINVAL;
4888 				goto fail;
4889 			}
4890 		}
4891 
4892 		if (pcr->action == PF_CHANGE_REMOVE) {
4893 			pf_unlink_rule(ruleset->rules[rs_num].active.ptr,
4894 			    oldrule);
4895 			RB_REMOVE(pf_krule_global,
4896 			    ruleset->rules[rs_num].active.tree, oldrule);
4897 			ruleset->rules[rs_num].active.rcount--;
4898 		} else {
4899 			pf_hash_rule(newrule);
4900 			if (RB_INSERT(pf_krule_global,
4901 			    ruleset->rules[rs_num].active.tree, newrule) != NULL) {
4902 				pf_free_rule(newrule);
4903 				PF_RULES_WUNLOCK();
4904 				PF_CONFIG_UNLOCK();
4905 				error = EEXIST;
4906 				goto fail;
4907 			}
4908 
4909 			if (oldrule == NULL)
4910 				TAILQ_INSERT_TAIL(
4911 				    ruleset->rules[rs_num].active.ptr,
4912 				    newrule, entries);
4913 			else if (pcr->action == PF_CHANGE_ADD_HEAD ||
4914 			    pcr->action == PF_CHANGE_ADD_BEFORE)
4915 				TAILQ_INSERT_BEFORE(oldrule, newrule, entries);
4916 			else
4917 				TAILQ_INSERT_AFTER(
4918 				    ruleset->rules[rs_num].active.ptr,
4919 				    oldrule, newrule, entries);
4920 			ruleset->rules[rs_num].active.rcount++;
4921 		}
4922 
4923 		nr = 0;
4924 		TAILQ_FOREACH(oldrule,
4925 		    ruleset->rules[rs_num].active.ptr, entries)
4926 			oldrule->nr = nr++;
4927 
4928 		ruleset->rules[rs_num].active.ticket++;
4929 
4930 		pf_calc_skip_steps(ruleset->rules[rs_num].active.ptr);
4931 		pf_remove_if_empty_kruleset(ruleset);
4932 
4933 		PF_RULES_WUNLOCK();
4934 		PF_CONFIG_UNLOCK();
4935 		break;
4936 
4937 #undef ERROUT
4938 DIOCCHANGERULE_error:
4939 		PF_RULES_WUNLOCK();
4940 		PF_CONFIG_UNLOCK();
4941 		pf_krule_free(newrule);
4942 		pf_kkif_free(kif);
4943 		break;
4944 	}
4945 
4946 	case DIOCCLRSTATESNV: {
4947 		error = pf_clearstates_nv((struct pfioc_nv *)addr);
4948 		break;
4949 	}
4950 
4951 	case DIOCKILLSTATESNV: {
4952 		error = pf_killstates_nv((struct pfioc_nv *)addr);
4953 		break;
4954 	}
4955 
4956 	case DIOCADDSTATE: {
4957 		struct pfioc_state		*ps = (struct pfioc_state *)addr;
4958 		struct pfsync_state_1301	*sp = &ps->state;
4959 
4960 		if (sp->timeout >= PFTM_MAX) {
4961 			error = EINVAL;
4962 			goto fail;
4963 		}
4964 		if (V_pfsync_state_import_ptr != NULL) {
4965 			PF_RULES_RLOCK();
4966 			error = V_pfsync_state_import_ptr(
4967 			    (union pfsync_state_union *)sp, PFSYNC_SI_IOCTL,
4968 			    PFSYNC_MSG_VERSION_1301);
4969 			PF_RULES_RUNLOCK();
4970 		} else
4971 			error = EOPNOTSUPP;
4972 		break;
4973 	}
4974 
4975 	case DIOCGETSTATE: {
4976 		struct pfioc_state	*ps = (struct pfioc_state *)addr;
4977 		struct pf_kstate	*s;
4978 
4979 		s = pf_find_state_byid(ps->state.id, ps->state.creatorid);
4980 		if (s == NULL) {
4981 			error = ENOENT;
4982 			goto fail;
4983 		}
4984 
4985 		pfsync_state_export_1301(&ps->state, s);
4986 		PF_STATE_UNLOCK(s);
4987 		break;
4988 	}
4989 
4990 	case DIOCGETSTATENV: {
4991 		error = pf_getstate((struct pfioc_nv *)addr);
4992 		break;
4993 	}
4994 
4995 #ifdef COMPAT_FREEBSD14
4996 	case DIOCGETSTATES: {
4997 		struct pfioc_states	*ps = (struct pfioc_states *)addr;
4998 		struct pf_kstate	*s;
4999 		struct pfsync_state_1301	*pstore, *p;
5000 		int			 i, nr;
5001 		size_t			 slice_count = 16, count;
5002 		void			*out;
5003 
5004 		if (ps->ps_len <= 0) {
5005 			nr = uma_zone_get_cur(V_pf_state_z);
5006 			ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
5007 			break;
5008 		}
5009 
5010 		out = ps->ps_states;
5011 		pstore = mallocarray(slice_count,
5012 		    sizeof(struct pfsync_state_1301), M_PF, M_WAITOK | M_ZERO);
5013 		nr = 0;
5014 
5015 		for (i = 0; i <= V_pf_hashmask; i++) {
5016 			struct pf_idhash *ih = &V_pf_idhash[i];
5017 
5018 DIOCGETSTATES_retry:
5019 			p = pstore;
5020 
5021 			if (LIST_EMPTY(&ih->states))
5022 				continue;
5023 
5024 			PF_HASHROW_LOCK(ih);
5025 			count = 0;
5026 			LIST_FOREACH(s, &ih->states, entry) {
5027 				if (s->timeout == PFTM_UNLINKED)
5028 					continue;
5029 				count++;
5030 			}
5031 
5032 			if (count > slice_count) {
5033 				PF_HASHROW_UNLOCK(ih);
5034 				free(pstore, M_PF);
5035 				slice_count = count * 2;
5036 				pstore = mallocarray(slice_count,
5037 				    sizeof(struct pfsync_state_1301), M_PF,
5038 				    M_WAITOK | M_ZERO);
5039 				goto DIOCGETSTATES_retry;
5040 			}
5041 
5042 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
5043 				PF_HASHROW_UNLOCK(ih);
5044 				goto DIOCGETSTATES_full;
5045 			}
5046 
5047 			LIST_FOREACH(s, &ih->states, entry) {
5048 				if (s->timeout == PFTM_UNLINKED)
5049 					continue;
5050 
5051 				pfsync_state_export_1301(p, s);
5052 				p++;
5053 				nr++;
5054 			}
5055 			PF_HASHROW_UNLOCK(ih);
5056 			error = copyout(pstore, out,
5057 			    sizeof(struct pfsync_state_1301) * count);
5058 			if (error) {
5059 				free(pstore, M_PF);
5060 				goto fail;
5061 			}
5062 			out = ps->ps_states + nr;
5063 		}
5064 DIOCGETSTATES_full:
5065 		ps->ps_len = sizeof(struct pfsync_state_1301) * nr;
5066 		free(pstore, M_PF);
5067 
5068 		break;
5069 	}
5070 
5071 	case DIOCGETSTATESV2: {
5072 		struct pfioc_states_v2	*ps = (struct pfioc_states_v2 *)addr;
5073 		struct pf_kstate	*s;
5074 		struct pf_state_export	*pstore, *p;
5075 		int i, nr;
5076 		size_t slice_count = 16, count;
5077 		void *out;
5078 
5079 		if (ps->ps_req_version > PF_STATE_VERSION) {
5080 			error = ENOTSUP;
5081 			goto fail;
5082 		}
5083 
5084 		if (ps->ps_len <= 0) {
5085 			nr = uma_zone_get_cur(V_pf_state_z);
5086 			ps->ps_len = sizeof(struct pf_state_export) * nr;
5087 			break;
5088 		}
5089 
5090 		out = ps->ps_states;
5091 		pstore = mallocarray(slice_count,
5092 		    sizeof(struct pf_state_export), M_PF, M_WAITOK | M_ZERO);
5093 		nr = 0;
5094 
5095 		for (i = 0; i <= V_pf_hashmask; i++) {
5096 			struct pf_idhash *ih = &V_pf_idhash[i];
5097 
5098 DIOCGETSTATESV2_retry:
5099 			p = pstore;
5100 
5101 			if (LIST_EMPTY(&ih->states))
5102 				continue;
5103 
5104 			PF_HASHROW_LOCK(ih);
5105 			count = 0;
5106 			LIST_FOREACH(s, &ih->states, entry) {
5107 				if (s->timeout == PFTM_UNLINKED)
5108 					continue;
5109 				count++;
5110 			}
5111 
5112 			if (count > slice_count) {
5113 				PF_HASHROW_UNLOCK(ih);
5114 				free(pstore, M_PF);
5115 				slice_count = count * 2;
5116 				pstore = mallocarray(slice_count,
5117 				    sizeof(struct pf_state_export), M_PF,
5118 				    M_WAITOK | M_ZERO);
5119 				goto DIOCGETSTATESV2_retry;
5120 			}
5121 
5122 			if ((nr+count) * sizeof(*p) > ps->ps_len) {
5123 				PF_HASHROW_UNLOCK(ih);
5124 				goto DIOCGETSTATESV2_full;
5125 			}
5126 
5127 			LIST_FOREACH(s, &ih->states, entry) {
5128 				if (s->timeout == PFTM_UNLINKED)
5129 					continue;
5130 
5131 				pf_state_export(p, s);
5132 				p++;
5133 				nr++;
5134 			}
5135 			PF_HASHROW_UNLOCK(ih);
5136 			error = copyout(pstore, out,
5137 			    sizeof(struct pf_state_export) * count);
5138 			if (error) {
5139 				free(pstore, M_PF);
5140 				goto fail;
5141 			}
5142 			out = ps->ps_states + nr;
5143 		}
5144 DIOCGETSTATESV2_full:
5145 		ps->ps_len = nr * sizeof(struct pf_state_export);
5146 		free(pstore, M_PF);
5147 
5148 		break;
5149 	}
5150 #endif
5151 	case DIOCGETSTATUSNV: {
5152 		error = pf_getstatus((struct pfioc_nv *)addr);
5153 		break;
5154 	}
5155 
5156 	case DIOCSETSTATUSIF: {
5157 		struct pfioc_if	*pi = (struct pfioc_if *)addr;
5158 
5159 		if (pi->ifname[0] == 0) {
5160 			bzero(V_pf_status.ifname, IFNAMSIZ);
5161 			break;
5162 		}
5163 		PF_RULES_WLOCK();
5164 		error = pf_user_strcpy(V_pf_status.ifname, pi->ifname, IFNAMSIZ);
5165 		PF_RULES_WUNLOCK();
5166 		break;
5167 	}
5168 
5169 	case DIOCCLRSTATUS: {
5170 		pf_ioctl_clear_status();
5171 		break;
5172 	}
5173 
5174 	case DIOCNATLOOK: {
5175 		struct pfioc_natlook	*pnl = (struct pfioc_natlook *)addr;
5176 
5177 		error = pf_ioctl_natlook(pnl);
5178 		break;
5179 	}
5180 
5181 	case DIOCSETTIMEOUT: {
5182 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
5183 
5184 		error = pf_ioctl_set_timeout(pt->timeout, pt->seconds,
5185 		    &pt->seconds);
5186 		break;
5187 	}
5188 
5189 	case DIOCGETTIMEOUT: {
5190 		struct pfioc_tm	*pt = (struct pfioc_tm *)addr;
5191 
5192 		error = pf_ioctl_get_timeout(pt->timeout, &pt->seconds);
5193 		break;
5194 	}
5195 
5196 	case DIOCGETLIMIT: {
5197 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
5198 
5199 		error = pf_ioctl_get_limit(pl->index, &pl->limit);
5200 		break;
5201 	}
5202 
5203 	case DIOCSETLIMIT: {
5204 		struct pfioc_limit	*pl = (struct pfioc_limit *)addr;
5205 		unsigned int old_limit;
5206 
5207 		error = pf_ioctl_set_limit(pl->index, pl->limit, &old_limit);
5208 		pl->limit = old_limit;
5209 		break;
5210 	}
5211 
5212 	case DIOCSETDEBUG: {
5213 		u_int32_t	*level = (u_int32_t *)addr;
5214 
5215 		PF_RULES_WLOCK();
5216 		V_pf_status.debug = *level;
5217 		PF_RULES_WUNLOCK();
5218 		break;
5219 	}
5220 
5221 	case DIOCCLRRULECTRS: {
5222 		/* obsoleted by DIOCGETRULE with action=PF_GET_CLR_CNTR */
5223 		struct pf_kruleset	*ruleset = &pf_main_ruleset;
5224 		struct pf_krule		*rule;
5225 
5226 		PF_RULES_WLOCK();
5227 		TAILQ_FOREACH(rule,
5228 		    ruleset->rules[PF_RULESET_FILTER].active.ptr, entries) {
5229 			pf_counter_u64_zero(&rule->evaluations);
5230 			for (int i = 0; i < 2; i++) {
5231 				pf_counter_u64_zero(&rule->packets[i]);
5232 				pf_counter_u64_zero(&rule->bytes[i]);
5233 			}
5234 		}
5235 		PF_RULES_WUNLOCK();
5236 		break;
5237 	}
5238 
5239 	case DIOCGIFSPEEDV0:
5240 	case DIOCGIFSPEEDV1: {
5241 		struct pf_ifspeed_v1	*psp = (struct pf_ifspeed_v1 *)addr;
5242 		struct pf_ifspeed_v1	ps;
5243 		struct ifnet		*ifp;
5244 
5245 		if (psp->ifname[0] == '\0') {
5246 			error = EINVAL;
5247 			goto fail;
5248 		}
5249 
5250 		error = pf_user_strcpy(ps.ifname, psp->ifname, IFNAMSIZ);
5251 		if (error != 0)
5252 			goto fail;
5253 		ifp = ifunit(ps.ifname);
5254 		if (ifp != NULL) {
5255 			psp->baudrate32 =
5256 			    (u_int32_t)uqmin(ifp->if_baudrate, UINT_MAX);
5257 			if (cmd == DIOCGIFSPEEDV1)
5258 				psp->baudrate = ifp->if_baudrate;
5259 		} else {
5260 			error = EINVAL;
5261 		}
5262 		break;
5263 	}
5264 
5265 #ifdef ALTQ
5266 	case DIOCSTARTALTQ: {
5267 		struct pf_altq		*altq;
5268 
5269 		PF_RULES_WLOCK();
5270 		/* enable all altq interfaces on active list */
5271 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
5272 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
5273 				error = pf_enable_altq(altq);
5274 				if (error != 0)
5275 					break;
5276 			}
5277 		}
5278 		if (error == 0)
5279 			V_pf_altq_running = 1;
5280 		PF_RULES_WUNLOCK();
5281 		DPFPRINTF(PF_DEBUG_MISC, "altq: started");
5282 		break;
5283 	}
5284 
5285 	case DIOCSTOPALTQ: {
5286 		struct pf_altq		*altq;
5287 
5288 		PF_RULES_WLOCK();
5289 		/* disable all altq interfaces on active list */
5290 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries) {
5291 			if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) == 0) {
5292 				error = pf_disable_altq(altq);
5293 				if (error != 0)
5294 					break;
5295 			}
5296 		}
5297 		if (error == 0)
5298 			V_pf_altq_running = 0;
5299 		PF_RULES_WUNLOCK();
5300 		DPFPRINTF(PF_DEBUG_MISC, "altq: stopped");
5301 		break;
5302 	}
5303 
5304 	case DIOCADDALTQV0:
5305 	case DIOCADDALTQV1: {
5306 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
5307 		struct pf_altq		*altq, *a;
5308 		struct ifnet		*ifp;
5309 
5310 		altq = malloc(sizeof(*altq), M_PFALTQ, M_WAITOK | M_ZERO);
5311 		error = pf_import_kaltq(pa, altq, IOCPARM_LEN(cmd));
5312 		if (error)
5313 			goto fail;
5314 		altq->local_flags = 0;
5315 
5316 		PF_RULES_WLOCK();
5317 		if (pa->ticket != V_ticket_altqs_inactive) {
5318 			PF_RULES_WUNLOCK();
5319 			free(altq, M_PFALTQ);
5320 			error = EBUSY;
5321 			goto fail;
5322 		}
5323 
5324 		/*
5325 		 * if this is for a queue, find the discipline and
5326 		 * copy the necessary fields
5327 		 */
5328 		if (altq->qname[0] != 0) {
5329 			if ((altq->qid = pf_qname2qid(altq->qname, true)) == 0) {
5330 				PF_RULES_WUNLOCK();
5331 				error = EBUSY;
5332 				free(altq, M_PFALTQ);
5333 				goto fail;
5334 			}
5335 			altq->altq_disc = NULL;
5336 			TAILQ_FOREACH(a, V_pf_altq_ifs_inactive, entries) {
5337 				if (strncmp(a->ifname, altq->ifname,
5338 				    IFNAMSIZ) == 0) {
5339 					altq->altq_disc = a->altq_disc;
5340 					break;
5341 				}
5342 			}
5343 		}
5344 
5345 		if ((ifp = ifunit(altq->ifname)) == NULL)
5346 			altq->local_flags |= PFALTQ_FLAG_IF_REMOVED;
5347 		else
5348 			error = altq_add(ifp, altq);
5349 
5350 		if (error) {
5351 			PF_RULES_WUNLOCK();
5352 			free(altq, M_PFALTQ);
5353 			goto fail;
5354 		}
5355 
5356 		if (altq->qname[0] != 0)
5357 			TAILQ_INSERT_TAIL(V_pf_altqs_inactive, altq, entries);
5358 		else
5359 			TAILQ_INSERT_TAIL(V_pf_altq_ifs_inactive, altq, entries);
5360 		/* version error check done on import above */
5361 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
5362 		PF_RULES_WUNLOCK();
5363 		break;
5364 	}
5365 
5366 	case DIOCGETALTQSV0:
5367 	case DIOCGETALTQSV1: {
5368 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
5369 		struct pf_altq		*altq;
5370 
5371 		PF_RULES_RLOCK();
5372 		pa->nr = 0;
5373 		TAILQ_FOREACH(altq, V_pf_altq_ifs_active, entries)
5374 			pa->nr++;
5375 		TAILQ_FOREACH(altq, V_pf_altqs_active, entries)
5376 			pa->nr++;
5377 		pa->ticket = V_ticket_altqs_active;
5378 		PF_RULES_RUNLOCK();
5379 		break;
5380 	}
5381 
5382 	case DIOCGETALTQV0:
5383 	case DIOCGETALTQV1: {
5384 		struct pfioc_altq_v1	*pa = (struct pfioc_altq_v1 *)addr;
5385 		struct pf_altq		*altq;
5386 
5387 		PF_RULES_RLOCK();
5388 		if (pa->ticket != V_ticket_altqs_active) {
5389 			PF_RULES_RUNLOCK();
5390 			error = EBUSY;
5391 			goto fail;
5392 		}
5393 		altq = pf_altq_get_nth_active(pa->nr);
5394 		if (altq == NULL) {
5395 			PF_RULES_RUNLOCK();
5396 			error = EBUSY;
5397 			goto fail;
5398 		}
5399 		pf_export_kaltq(altq, pa, IOCPARM_LEN(cmd));
5400 		PF_RULES_RUNLOCK();
5401 		break;
5402 	}
5403 
5404 	case DIOCCHANGEALTQV0:
5405 	case DIOCCHANGEALTQV1:
5406 		/* CHANGEALTQ not supported yet! */
5407 		error = ENODEV;
5408 		break;
5409 
5410 	case DIOCGETQSTATSV0:
5411 	case DIOCGETQSTATSV1: {
5412 		struct pfioc_qstats_v1	*pq = (struct pfioc_qstats_v1 *)addr;
5413 		struct pf_altq		*altq;
5414 		int			 nbytes;
5415 		u_int32_t		 version;
5416 
5417 		PF_RULES_RLOCK();
5418 		if (pq->ticket != V_ticket_altqs_active) {
5419 			PF_RULES_RUNLOCK();
5420 			error = EBUSY;
5421 			goto fail;
5422 		}
5423 		nbytes = pq->nbytes;
5424 		altq = pf_altq_get_nth_active(pq->nr);
5425 		if (altq == NULL) {
5426 			PF_RULES_RUNLOCK();
5427 			error = EBUSY;
5428 			goto fail;
5429 		}
5430 
5431 		if ((altq->local_flags & PFALTQ_FLAG_IF_REMOVED) != 0) {
5432 			PF_RULES_RUNLOCK();
5433 			error = ENXIO;
5434 			goto fail;
5435 		}
5436 		PF_RULES_RUNLOCK();
5437 		if (cmd == DIOCGETQSTATSV0)
5438 			version = 0;  /* DIOCGETQSTATSV0 means stats struct v0 */
5439 		else
5440 			version = pq->version;
5441 		error = altq_getqstats(altq, pq->buf, &nbytes, version);
5442 		if (error == 0) {
5443 			pq->scheduler = altq->scheduler;
5444 			pq->nbytes = nbytes;
5445 		}
5446 		break;
5447 	}
5448 #endif /* ALTQ */
5449 
5450 	case DIOCBEGINADDRS: {
5451 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
5452 
5453 		error = pf_ioctl_begin_addrs(&pp->ticket);
5454 		break;
5455 	}
5456 
5457 	case DIOCADDADDR: {
5458 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
5459 		struct pf_nl_pooladdr npp = {};
5460 
5461 		npp.which = PF_RDR;
5462 		memcpy(&npp, pp, sizeof(*pp));
5463 		error = pf_ioctl_add_addr(&npp);
5464 		break;
5465 	}
5466 
5467 	case DIOCGETADDRS: {
5468 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
5469 		struct pf_nl_pooladdr npp = {};
5470 
5471 		npp.which = PF_RDR;
5472 		memcpy(&npp, pp, sizeof(*pp));
5473 		error = pf_ioctl_get_addrs(&npp);
5474 		memcpy(pp, &npp, sizeof(*pp));
5475 
5476 		break;
5477 	}
5478 
5479 	case DIOCGETADDR: {
5480 		struct pfioc_pooladdr	*pp = (struct pfioc_pooladdr *)addr;
5481 		struct pf_nl_pooladdr npp = {};
5482 
5483 		npp.which = PF_RDR;
5484 		memcpy(&npp, pp, sizeof(*pp));
5485 		error = pf_ioctl_get_addr(&npp);
5486 		memcpy(pp, &npp, sizeof(*pp));
5487 
5488 		break;
5489 	}
5490 
5491 	case DIOCCHANGEADDR: {
5492 		struct pfioc_pooladdr	*pca = (struct pfioc_pooladdr *)addr;
5493 		struct pf_kpool		*pool;
5494 		struct pf_kpooladdr	*oldpa = NULL, *newpa = NULL;
5495 		struct pf_kruleset	*ruleset;
5496 		struct pfi_kkif		*kif = NULL;
5497 
5498 		pca->anchor[sizeof(pca->anchor) - 1] = '\0';
5499 
5500 		if (pca->action < PF_CHANGE_ADD_HEAD ||
5501 		    pca->action > PF_CHANGE_REMOVE) {
5502 			error = EINVAL;
5503 			goto fail;
5504 		}
5505 		if (pca->addr.addr.type != PF_ADDR_ADDRMASK &&
5506 		    pca->addr.addr.type != PF_ADDR_DYNIFTL &&
5507 		    pca->addr.addr.type != PF_ADDR_TABLE) {
5508 			error = EINVAL;
5509 			goto fail;
5510 		}
5511 		if (pca->addr.addr.p.dyn != NULL) {
5512 			error = EINVAL;
5513 			goto fail;
5514 		}
5515 
5516 		if (pca->action != PF_CHANGE_REMOVE) {
5517 #ifndef INET
5518 			if (pca->af == AF_INET) {
5519 				error = EAFNOSUPPORT;
5520 				goto fail;
5521 			}
5522 #endif /* INET */
5523 #ifndef INET6
5524 			if (pca->af == AF_INET6) {
5525 				error = EAFNOSUPPORT;
5526 				goto fail;
5527 			}
5528 #endif /* INET6 */
5529 			newpa = malloc(sizeof(*newpa), M_PFRULE, M_WAITOK);
5530 			bcopy(&pca->addr, newpa, sizeof(struct pf_pooladdr));
5531 			if (newpa->ifname[0])
5532 				kif = pf_kkif_create(M_WAITOK);
5533 			newpa->kif = NULL;
5534 		}
5535 #define	ERROUT(x)	ERROUT_IOCTL(DIOCCHANGEADDR_error, x)
5536 		PF_RULES_WLOCK();
5537 		ruleset = pf_find_kruleset(pca->anchor);
5538 		if (ruleset == NULL)
5539 			ERROUT(EBUSY);
5540 
5541 		pool = pf_get_kpool(pca->anchor, pca->ticket, pca->r_action,
5542 		    pca->r_num, pca->r_last, 1, 1, PF_RDR);
5543 		if (pool == NULL)
5544 			ERROUT(EBUSY);
5545 
5546 		if (pca->action != PF_CHANGE_REMOVE) {
5547 			if (newpa->ifname[0]) {
5548 				newpa->kif = pfi_kkif_attach(kif, newpa->ifname);
5549 				pfi_kkif_ref(newpa->kif);
5550 				kif = NULL;
5551 			}
5552 
5553 			switch (newpa->addr.type) {
5554 			case PF_ADDR_DYNIFTL:
5555 				error = pfi_dynaddr_setup(&newpa->addr,
5556 				    pca->af);
5557 				break;
5558 			case PF_ADDR_TABLE:
5559 				newpa->addr.p.tbl = pfr_attach_table(ruleset,
5560 				    newpa->addr.v.tblname);
5561 				if (newpa->addr.p.tbl == NULL)
5562 					error = ENOMEM;
5563 				break;
5564 			}
5565 			if (error)
5566 				goto DIOCCHANGEADDR_error;
5567 		}
5568 
5569 		switch (pca->action) {
5570 		case PF_CHANGE_ADD_HEAD:
5571 			oldpa = TAILQ_FIRST(&pool->list);
5572 			break;
5573 		case PF_CHANGE_ADD_TAIL:
5574 			oldpa = TAILQ_LAST(&pool->list, pf_kpalist);
5575 			break;
5576 		default:
5577 			oldpa = TAILQ_FIRST(&pool->list);
5578 			for (int i = 0; oldpa && i < pca->nr; i++)
5579 				oldpa = TAILQ_NEXT(oldpa, entries);
5580 
5581 			if (oldpa == NULL)
5582 				ERROUT(EINVAL);
5583 		}
5584 
5585 		if (pca->action == PF_CHANGE_REMOVE) {
5586 			TAILQ_REMOVE(&pool->list, oldpa, entries);
5587 			switch (oldpa->addr.type) {
5588 			case PF_ADDR_DYNIFTL:
5589 				pfi_dynaddr_remove(oldpa->addr.p.dyn);
5590 				break;
5591 			case PF_ADDR_TABLE:
5592 				pfr_detach_table(oldpa->addr.p.tbl);
5593 				break;
5594 			}
5595 			if (oldpa->kif)
5596 				pfi_kkif_unref(oldpa->kif);
5597 			free(oldpa, M_PFRULE);
5598 		} else {
5599 			if (oldpa == NULL)
5600 				TAILQ_INSERT_TAIL(&pool->list, newpa, entries);
5601 			else if (pca->action == PF_CHANGE_ADD_HEAD ||
5602 			    pca->action == PF_CHANGE_ADD_BEFORE)
5603 				TAILQ_INSERT_BEFORE(oldpa, newpa, entries);
5604 			else
5605 				TAILQ_INSERT_AFTER(&pool->list, oldpa,
5606 				    newpa, entries);
5607 		}
5608 
5609 		pool->cur = TAILQ_FIRST(&pool->list);
5610 		pf_addrcpy(&pool->counter, &pool->cur->addr.v.a.addr, pca->af);
5611 		PF_RULES_WUNLOCK();
5612 		break;
5613 
5614 #undef ERROUT
5615 DIOCCHANGEADDR_error:
5616 		if (newpa != NULL) {
5617 			if (newpa->kif)
5618 				pfi_kkif_unref(newpa->kif);
5619 			free(newpa, M_PFRULE);
5620 		}
5621 		PF_RULES_WUNLOCK();
5622 		pf_kkif_free(kif);
5623 		break;
5624 	}
5625 
5626 	case DIOCGETRULESETS: {
5627 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
5628 
5629 		pr->path[sizeof(pr->path) - 1] = '\0';
5630 
5631 		error = pf_ioctl_get_rulesets(pr);
5632 		break;
5633 	}
5634 
5635 	case DIOCGETRULESET: {
5636 		struct pfioc_ruleset	*pr = (struct pfioc_ruleset *)addr;
5637 
5638 		pr->path[sizeof(pr->path) - 1] = '\0';
5639 
5640 		error = pf_ioctl_get_ruleset(pr);
5641 		break;
5642 	}
5643 
5644 	case DIOCRCLRTABLES: {
5645 		struct pfioc_table *io = (struct pfioc_table *)addr;
5646 
5647 		if (io->pfrio_esize != 0) {
5648 			error = ENODEV;
5649 			goto fail;
5650 		}
5651 		if (strnlen(io->pfrio_table.pfrt_anchor, MAXPATHLEN)
5652 		    == MAXPATHLEN) {
5653 			error = EINVAL;
5654 			goto fail;
5655 		}
5656 		if (strnlen(io->pfrio_table.pfrt_name, PF_TABLE_NAME_SIZE)
5657 		    == PF_TABLE_NAME_SIZE) {
5658 			error = EINVAL;
5659 			goto fail;
5660 		}
5661 
5662 		PF_RULES_WLOCK();
5663 		error = pfr_clr_tables(&io->pfrio_table, &io->pfrio_ndel,
5664 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
5665 		PF_RULES_WUNLOCK();
5666 		break;
5667 	}
5668 
5669 	case DIOCRADDTABLES: {
5670 		struct pfioc_table *io = (struct pfioc_table *)addr;
5671 		struct pfr_table *pfrts;
5672 		size_t totlen;
5673 
5674 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
5675 			error = ENODEV;
5676 			goto fail;
5677 		}
5678 
5679 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
5680 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
5681 			error = ENOMEM;
5682 			goto fail;
5683 		}
5684 
5685 		totlen = io->pfrio_size * sizeof(struct pfr_table);
5686 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
5687 		    M_PF, M_WAITOK);
5688 		error = copyin(io->pfrio_buffer, pfrts, totlen);
5689 		if (error) {
5690 			free(pfrts, M_PF);
5691 			goto fail;
5692 		}
5693 		PF_RULES_WLOCK();
5694 		error = pfr_add_tables(pfrts, io->pfrio_size,
5695 		    &io->pfrio_nadd, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5696 		PF_RULES_WUNLOCK();
5697 		free(pfrts, M_PF);
5698 		break;
5699 	}
5700 
5701 	case DIOCRDELTABLES: {
5702 		struct pfioc_table *io = (struct pfioc_table *)addr;
5703 		struct pfr_table *pfrts;
5704 		size_t totlen;
5705 
5706 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
5707 			error = ENODEV;
5708 			goto fail;
5709 		}
5710 
5711 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
5712 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
5713 			error = ENOMEM;
5714 			goto fail;
5715 		}
5716 
5717 		totlen = io->pfrio_size * sizeof(struct pfr_table);
5718 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
5719 		    M_PF, M_WAITOK);
5720 		error = copyin(io->pfrio_buffer, pfrts, totlen);
5721 		if (error) {
5722 			free(pfrts, M_PF);
5723 			goto fail;
5724 		}
5725 		PF_RULES_WLOCK();
5726 		error = pfr_del_tables(pfrts, io->pfrio_size,
5727 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5728 		PF_RULES_WUNLOCK();
5729 		free(pfrts, M_PF);
5730 		break;
5731 	}
5732 
5733 	case DIOCRGETTABLES: {
5734 		struct pfioc_table *io = (struct pfioc_table *)addr;
5735 		struct pfr_table *pfrts;
5736 		size_t totlen;
5737 		int n;
5738 
5739 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
5740 			error = ENODEV;
5741 			goto fail;
5742 		}
5743 		PF_RULES_RLOCK();
5744 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
5745 		if (n < 0) {
5746 			PF_RULES_RUNLOCK();
5747 			error = EINVAL;
5748 			goto fail;
5749 		}
5750 		io->pfrio_size = min(io->pfrio_size, n);
5751 
5752 		totlen = io->pfrio_size * sizeof(struct pfr_table);
5753 
5754 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
5755 		    M_PF, M_NOWAIT | M_ZERO);
5756 		if (pfrts == NULL) {
5757 			error = ENOMEM;
5758 			PF_RULES_RUNLOCK();
5759 			goto fail;
5760 		}
5761 		error = pfr_get_tables(&io->pfrio_table, pfrts,
5762 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5763 		PF_RULES_RUNLOCK();
5764 		if (error == 0)
5765 			error = copyout(pfrts, io->pfrio_buffer, totlen);
5766 		free(pfrts, M_PF);
5767 		break;
5768 	}
5769 
5770 	case DIOCRGETTSTATS: {
5771 		struct pfioc_table *io = (struct pfioc_table *)addr;
5772 		struct pfr_tstats *pfrtstats;
5773 		size_t totlen;
5774 		int n;
5775 
5776 		if (io->pfrio_esize != sizeof(struct pfr_tstats)) {
5777 			error = ENODEV;
5778 			goto fail;
5779 		}
5780 		PF_TABLE_STATS_LOCK();
5781 		PF_RULES_RLOCK();
5782 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
5783 		if (n < 0) {
5784 			PF_RULES_RUNLOCK();
5785 			PF_TABLE_STATS_UNLOCK();
5786 			error = EINVAL;
5787 			goto fail;
5788 		}
5789 		io->pfrio_size = min(io->pfrio_size, n);
5790 
5791 		totlen = io->pfrio_size * sizeof(struct pfr_tstats);
5792 		pfrtstats = mallocarray(io->pfrio_size,
5793 		    sizeof(struct pfr_tstats), M_PF, M_NOWAIT | M_ZERO);
5794 		if (pfrtstats == NULL) {
5795 			error = ENOMEM;
5796 			PF_RULES_RUNLOCK();
5797 			PF_TABLE_STATS_UNLOCK();
5798 			goto fail;
5799 		}
5800 		error = pfr_get_tstats(&io->pfrio_table, pfrtstats,
5801 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5802 		PF_RULES_RUNLOCK();
5803 		PF_TABLE_STATS_UNLOCK();
5804 		if (error == 0)
5805 			error = copyout(pfrtstats, io->pfrio_buffer, totlen);
5806 		free(pfrtstats, M_PF);
5807 		break;
5808 	}
5809 
5810 	case DIOCRCLRTSTATS: {
5811 		struct pfioc_table *io = (struct pfioc_table *)addr;
5812 		struct pfr_table *pfrts;
5813 		size_t totlen;
5814 
5815 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
5816 			error = ENODEV;
5817 			goto fail;
5818 		}
5819 
5820 		if (io->pfrio_size < 0 || io->pfrio_size > pf_ioctl_maxcount ||
5821 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_table))) {
5822 			/* We used to count tables and use the minimum required
5823 			 * size, so we didn't fail on overly large requests.
5824 			 * Keep doing so. */
5825 			io->pfrio_size = pf_ioctl_maxcount;
5826 			goto fail;
5827 		}
5828 
5829 		totlen = io->pfrio_size * sizeof(struct pfr_table);
5830 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
5831 		    M_PF, M_WAITOK);
5832 		error = copyin(io->pfrio_buffer, pfrts, totlen);
5833 		if (error) {
5834 			free(pfrts, M_PF);
5835 			goto fail;
5836 		}
5837 
5838 		PF_TABLE_STATS_LOCK();
5839 		PF_RULES_RLOCK();
5840 		error = pfr_clr_tstats(pfrts, io->pfrio_size,
5841 		    &io->pfrio_nzero, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5842 		PF_RULES_RUNLOCK();
5843 		PF_TABLE_STATS_UNLOCK();
5844 		free(pfrts, M_PF);
5845 		break;
5846 	}
5847 
5848 	case DIOCRSETTFLAGS: {
5849 		struct pfioc_table *io = (struct pfioc_table *)addr;
5850 		struct pfr_table *pfrts;
5851 		size_t totlen;
5852 		int n;
5853 
5854 		if (io->pfrio_esize != sizeof(struct pfr_table)) {
5855 			error = ENODEV;
5856 			goto fail;
5857 		}
5858 
5859 		PF_RULES_RLOCK();
5860 		n = pfr_table_count(&io->pfrio_table, io->pfrio_flags);
5861 		if (n < 0) {
5862 			PF_RULES_RUNLOCK();
5863 			error = EINVAL;
5864 			goto fail;
5865 		}
5866 
5867 		io->pfrio_size = min(io->pfrio_size, n);
5868 		PF_RULES_RUNLOCK();
5869 
5870 		totlen = io->pfrio_size * sizeof(struct pfr_table);
5871 		pfrts = mallocarray(io->pfrio_size, sizeof(struct pfr_table),
5872 		    M_PF, M_WAITOK);
5873 		error = copyin(io->pfrio_buffer, pfrts, totlen);
5874 		if (error) {
5875 			free(pfrts, M_PF);
5876 			goto fail;
5877 		}
5878 		PF_RULES_WLOCK();
5879 		error = pfr_set_tflags(pfrts, io->pfrio_size,
5880 		    io->pfrio_setflag, io->pfrio_clrflag, &io->pfrio_nchange,
5881 		    &io->pfrio_ndel, io->pfrio_flags | PFR_FLAG_USERIOCTL);
5882 		PF_RULES_WUNLOCK();
5883 		free(pfrts, M_PF);
5884 		break;
5885 	}
5886 
5887 	case DIOCRCLRADDRS: {
5888 		struct pfioc_table *io = (struct pfioc_table *)addr;
5889 
5890 		if (io->pfrio_esize != 0) {
5891 			error = ENODEV;
5892 			goto fail;
5893 		}
5894 		PF_RULES_WLOCK();
5895 		error = pfr_clr_addrs(&io->pfrio_table, &io->pfrio_ndel,
5896 		    io->pfrio_flags | PFR_FLAG_USERIOCTL);
5897 		PF_RULES_WUNLOCK();
5898 		break;
5899 	}
5900 
5901 	case DIOCRADDADDRS: {
5902 		struct pfioc_table *io = (struct pfioc_table *)addr;
5903 		struct pfr_addr *pfras;
5904 		size_t totlen;
5905 
5906 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5907 			error = ENODEV;
5908 			goto fail;
5909 		}
5910 		if (io->pfrio_size < 0 ||
5911 		    io->pfrio_size > pf_ioctl_maxcount ||
5912 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5913 			error = EINVAL;
5914 			goto fail;
5915 		}
5916 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5917 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5918 		    M_PF, M_WAITOK);
5919 		error = copyin(io->pfrio_buffer, pfras, totlen);
5920 		if (error) {
5921 			free(pfras, M_PF);
5922 			goto fail;
5923 		}
5924 		PF_RULES_WLOCK();
5925 		io->pfrio_nadd = 0;
5926 		error = pfr_add_addrs(&io->pfrio_table, pfras,
5927 		    io->pfrio_size, &io->pfrio_nadd, io->pfrio_flags |
5928 		    PFR_FLAG_USERIOCTL);
5929 		PF_RULES_WUNLOCK();
5930 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5931 			error = copyout(pfras, io->pfrio_buffer, totlen);
5932 		free(pfras, M_PF);
5933 		break;
5934 	}
5935 
5936 	case DIOCRDELADDRS: {
5937 		struct pfioc_table *io = (struct pfioc_table *)addr;
5938 		struct pfr_addr *pfras;
5939 		size_t totlen;
5940 
5941 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5942 			error = ENODEV;
5943 			goto fail;
5944 		}
5945 		if (io->pfrio_size < 0 ||
5946 		    io->pfrio_size > pf_ioctl_maxcount ||
5947 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
5948 			error = EINVAL;
5949 			goto fail;
5950 		}
5951 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
5952 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
5953 		    M_PF, M_WAITOK);
5954 		error = copyin(io->pfrio_buffer, pfras, totlen);
5955 		if (error) {
5956 			free(pfras, M_PF);
5957 			goto fail;
5958 		}
5959 		PF_RULES_WLOCK();
5960 		error = pfr_del_addrs(&io->pfrio_table, pfras,
5961 		    io->pfrio_size, &io->pfrio_ndel, io->pfrio_flags |
5962 		    PFR_FLAG_USERIOCTL);
5963 		PF_RULES_WUNLOCK();
5964 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
5965 			error = copyout(pfras, io->pfrio_buffer, totlen);
5966 		free(pfras, M_PF);
5967 		break;
5968 	}
5969 
5970 	case DIOCRSETADDRS: {
5971 		struct pfioc_table *io = (struct pfioc_table *)addr;
5972 		struct pfr_addr *pfras;
5973 		size_t totlen, count;
5974 
5975 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
5976 			error = ENODEV;
5977 			goto fail;
5978 		}
5979 		if (io->pfrio_size < 0 || io->pfrio_size2 < 0) {
5980 			error = EINVAL;
5981 			goto fail;
5982 		}
5983 		count = max(io->pfrio_size, io->pfrio_size2);
5984 		if (count > pf_ioctl_maxcount ||
5985 		    WOULD_OVERFLOW(count, sizeof(struct pfr_addr))) {
5986 			error = EINVAL;
5987 			goto fail;
5988 		}
5989 		totlen = count * sizeof(struct pfr_addr);
5990 		pfras = mallocarray(count, sizeof(struct pfr_addr), M_PF,
5991 		    M_WAITOK);
5992 		error = copyin(io->pfrio_buffer, pfras, totlen);
5993 		if (error) {
5994 			free(pfras, M_PF);
5995 			goto fail;
5996 		}
5997 		PF_RULES_WLOCK();
5998 		error = pfr_set_addrs(&io->pfrio_table, pfras,
5999 		    io->pfrio_size, &io->pfrio_size2, &io->pfrio_nadd,
6000 		    &io->pfrio_ndel, &io->pfrio_nchange, io->pfrio_flags |
6001 		    PFR_FLAG_START | PFR_FLAG_DONE | PFR_FLAG_USERIOCTL, 0);
6002 		PF_RULES_WUNLOCK();
6003 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
6004 			error = copyout(pfras, io->pfrio_buffer, totlen);
6005 		free(pfras, M_PF);
6006 		break;
6007 	}
6008 
6009 	case DIOCRGETADDRS: {
6010 		struct pfioc_table *io = (struct pfioc_table *)addr;
6011 		struct pfr_addr *pfras;
6012 		size_t totlen;
6013 
6014 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
6015 			error = ENODEV;
6016 			goto fail;
6017 		}
6018 		if (io->pfrio_size < 0 ||
6019 		    io->pfrio_size > pf_ioctl_maxcount ||
6020 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
6021 			error = EINVAL;
6022 			goto fail;
6023 		}
6024 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
6025 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
6026 		    M_PF, M_WAITOK | M_ZERO);
6027 		PF_RULES_RLOCK();
6028 		error = pfr_get_addrs(&io->pfrio_table, pfras,
6029 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
6030 		PF_RULES_RUNLOCK();
6031 		if (error == 0)
6032 			error = copyout(pfras, io->pfrio_buffer, totlen);
6033 		free(pfras, M_PF);
6034 		break;
6035 	}
6036 
6037 	case DIOCRGETASTATS: {
6038 		struct pfioc_table *io = (struct pfioc_table *)addr;
6039 		struct pfr_astats *pfrastats;
6040 		size_t totlen;
6041 
6042 		if (io->pfrio_esize != sizeof(struct pfr_astats)) {
6043 			error = ENODEV;
6044 			goto fail;
6045 		}
6046 		if (io->pfrio_size < 0 ||
6047 		    io->pfrio_size > pf_ioctl_maxcount ||
6048 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_astats))) {
6049 			error = EINVAL;
6050 			goto fail;
6051 		}
6052 		totlen = io->pfrio_size * sizeof(struct pfr_astats);
6053 		pfrastats = mallocarray(io->pfrio_size,
6054 		    sizeof(struct pfr_astats), M_PF, M_WAITOK | M_ZERO);
6055 		PF_RULES_RLOCK();
6056 		error = pfr_get_astats(&io->pfrio_table, pfrastats,
6057 		    &io->pfrio_size, io->pfrio_flags | PFR_FLAG_USERIOCTL);
6058 		PF_RULES_RUNLOCK();
6059 		if (error == 0)
6060 			error = copyout(pfrastats, io->pfrio_buffer, totlen);
6061 		free(pfrastats, M_PF);
6062 		break;
6063 	}
6064 
6065 	case DIOCRCLRASTATS: {
6066 		struct pfioc_table *io = (struct pfioc_table *)addr;
6067 		struct pfr_addr *pfras;
6068 		size_t totlen;
6069 
6070 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
6071 			error = ENODEV;
6072 			goto fail;
6073 		}
6074 		if (io->pfrio_size < 0 ||
6075 		    io->pfrio_size > pf_ioctl_maxcount ||
6076 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
6077 			error = EINVAL;
6078 			goto fail;
6079 		}
6080 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
6081 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
6082 		    M_PF, M_WAITOK);
6083 		error = copyin(io->pfrio_buffer, pfras, totlen);
6084 		if (error) {
6085 			free(pfras, M_PF);
6086 			goto fail;
6087 		}
6088 		PF_RULES_WLOCK();
6089 		error = pfr_clr_astats(&io->pfrio_table, pfras,
6090 		    io->pfrio_size, &io->pfrio_nzero, io->pfrio_flags |
6091 		    PFR_FLAG_USERIOCTL);
6092 		PF_RULES_WUNLOCK();
6093 		if (error == 0 && io->pfrio_flags & PFR_FLAG_FEEDBACK)
6094 			error = copyout(pfras, io->pfrio_buffer, totlen);
6095 		free(pfras, M_PF);
6096 		break;
6097 	}
6098 
6099 	case DIOCRTSTADDRS: {
6100 		struct pfioc_table *io = (struct pfioc_table *)addr;
6101 		struct pfr_addr *pfras;
6102 		size_t totlen;
6103 
6104 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
6105 			error = ENODEV;
6106 			goto fail;
6107 		}
6108 		if (io->pfrio_size < 0 ||
6109 		    io->pfrio_size > pf_ioctl_maxcount ||
6110 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
6111 			error = EINVAL;
6112 			goto fail;
6113 		}
6114 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
6115 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
6116 		    M_PF, M_WAITOK);
6117 		error = copyin(io->pfrio_buffer, pfras, totlen);
6118 		if (error) {
6119 			free(pfras, M_PF);
6120 			goto fail;
6121 		}
6122 		PF_RULES_RLOCK();
6123 		error = pfr_tst_addrs(&io->pfrio_table, pfras,
6124 		    io->pfrio_size, &io->pfrio_nmatch, io->pfrio_flags |
6125 		    PFR_FLAG_USERIOCTL);
6126 		PF_RULES_RUNLOCK();
6127 		if (error == 0)
6128 			error = copyout(pfras, io->pfrio_buffer, totlen);
6129 		free(pfras, M_PF);
6130 		break;
6131 	}
6132 
6133 	case DIOCRINADEFINE: {
6134 		struct pfioc_table *io = (struct pfioc_table *)addr;
6135 		struct pfr_addr *pfras;
6136 		size_t totlen;
6137 
6138 		if (io->pfrio_esize != sizeof(struct pfr_addr)) {
6139 			error = ENODEV;
6140 			goto fail;
6141 		}
6142 		if (io->pfrio_size < 0 ||
6143 		    io->pfrio_size > pf_ioctl_maxcount ||
6144 		    WOULD_OVERFLOW(io->pfrio_size, sizeof(struct pfr_addr))) {
6145 			error = EINVAL;
6146 			goto fail;
6147 		}
6148 		totlen = io->pfrio_size * sizeof(struct pfr_addr);
6149 		pfras = mallocarray(io->pfrio_size, sizeof(struct pfr_addr),
6150 		    M_PF, M_WAITOK);
6151 		error = copyin(io->pfrio_buffer, pfras, totlen);
6152 		if (error) {
6153 			free(pfras, M_PF);
6154 			goto fail;
6155 		}
6156 		PF_RULES_WLOCK();
6157 		error = pfr_ina_define(&io->pfrio_table, pfras,
6158 		    io->pfrio_size, &io->pfrio_nadd, &io->pfrio_naddr,
6159 		    io->pfrio_ticket, io->pfrio_flags | PFR_FLAG_USERIOCTL);
6160 		PF_RULES_WUNLOCK();
6161 		free(pfras, M_PF);
6162 		break;
6163 	}
6164 
6165 	case DIOCOSFPADD: {
6166 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
6167 		PF_RULES_WLOCK();
6168 		error = pf_osfp_add(io);
6169 		PF_RULES_WUNLOCK();
6170 		break;
6171 	}
6172 
6173 	case DIOCOSFPGET: {
6174 		struct pf_osfp_ioctl *io = (struct pf_osfp_ioctl *)addr;
6175 		PF_RULES_RLOCK();
6176 		error = pf_osfp_get(io);
6177 		PF_RULES_RUNLOCK();
6178 		break;
6179 	}
6180 
6181 	case DIOCXBEGIN: {
6182 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
6183 		struct pfioc_trans_e	*ioes, *ioe;
6184 		size_t			 totlen;
6185 		int			 i;
6186 
6187 		if (io->esize != sizeof(*ioe)) {
6188 			error = ENODEV;
6189 			goto fail;
6190 		}
6191 		if (io->size < 0 ||
6192 		    io->size > pf_ioctl_maxcount ||
6193 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
6194 			error = EINVAL;
6195 			goto fail;
6196 		}
6197 		totlen = sizeof(struct pfioc_trans_e) * io->size;
6198 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
6199 		    M_PF, M_WAITOK);
6200 		error = copyin(io->array, ioes, totlen);
6201 		if (error) {
6202 			free(ioes, M_PF);
6203 			goto fail;
6204 		}
6205 		PF_RULES_WLOCK();
6206 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
6207 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
6208 			switch (ioe->rs_num) {
6209 			case PF_RULESET_ETH:
6210 				if ((error = pf_begin_eth(&ioe->ticket, ioe->anchor))) {
6211 					PF_RULES_WUNLOCK();
6212 					free(ioes, M_PF);
6213 					goto fail;
6214 				}
6215 				break;
6216 #ifdef ALTQ
6217 			case PF_RULESET_ALTQ:
6218 				if (ioe->anchor[0]) {
6219 					PF_RULES_WUNLOCK();
6220 					free(ioes, M_PF);
6221 					error = EINVAL;
6222 					goto fail;
6223 				}
6224 				if ((error = pf_begin_altq(&ioe->ticket))) {
6225 					PF_RULES_WUNLOCK();
6226 					free(ioes, M_PF);
6227 					goto fail;
6228 				}
6229 				break;
6230 #endif /* ALTQ */
6231 			case PF_RULESET_TABLE:
6232 			    {
6233 				struct pfr_table table;
6234 
6235 				bzero(&table, sizeof(table));
6236 				strlcpy(table.pfrt_anchor, ioe->anchor,
6237 				    sizeof(table.pfrt_anchor));
6238 				if ((error = pfr_ina_begin(&table,
6239 				    &ioe->ticket, NULL, 0))) {
6240 					PF_RULES_WUNLOCK();
6241 					free(ioes, M_PF);
6242 					goto fail;
6243 				}
6244 				break;
6245 			    }
6246 			default:
6247 				if ((error = pf_begin_rules(&ioe->ticket,
6248 				    ioe->rs_num, ioe->anchor))) {
6249 					PF_RULES_WUNLOCK();
6250 					free(ioes, M_PF);
6251 					goto fail;
6252 				}
6253 				break;
6254 			}
6255 		}
6256 		PF_RULES_WUNLOCK();
6257 		error = copyout(ioes, io->array, totlen);
6258 		free(ioes, M_PF);
6259 		break;
6260 	}
6261 
6262 	case DIOCXROLLBACK: {
6263 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
6264 		struct pfioc_trans_e	*ioe, *ioes;
6265 		size_t			 totlen;
6266 		int			 i;
6267 
6268 		if (io->esize != sizeof(*ioe)) {
6269 			error = ENODEV;
6270 			goto fail;
6271 		}
6272 		if (io->size < 0 ||
6273 		    io->size > pf_ioctl_maxcount ||
6274 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
6275 			error = EINVAL;
6276 			goto fail;
6277 		}
6278 		totlen = sizeof(struct pfioc_trans_e) * io->size;
6279 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
6280 		    M_PF, M_WAITOK);
6281 		error = copyin(io->array, ioes, totlen);
6282 		if (error) {
6283 			free(ioes, M_PF);
6284 			goto fail;
6285 		}
6286 		PF_RULES_WLOCK();
6287 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
6288 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
6289 			switch (ioe->rs_num) {
6290 			case PF_RULESET_ETH:
6291 				if ((error = pf_rollback_eth(ioe->ticket,
6292 				    ioe->anchor))) {
6293 					PF_RULES_WUNLOCK();
6294 					free(ioes, M_PF);
6295 					goto fail; /* really bad */
6296 				}
6297 				break;
6298 #ifdef ALTQ
6299 			case PF_RULESET_ALTQ:
6300 				if (ioe->anchor[0]) {
6301 					PF_RULES_WUNLOCK();
6302 					free(ioes, M_PF);
6303 					error = EINVAL;
6304 					goto fail;
6305 				}
6306 				if ((error = pf_rollback_altq(ioe->ticket))) {
6307 					PF_RULES_WUNLOCK();
6308 					free(ioes, M_PF);
6309 					goto fail; /* really bad */
6310 				}
6311 				break;
6312 #endif /* ALTQ */
6313 			case PF_RULESET_TABLE:
6314 			    {
6315 				struct pfr_table table;
6316 
6317 				bzero(&table, sizeof(table));
6318 				strlcpy(table.pfrt_anchor, ioe->anchor,
6319 				    sizeof(table.pfrt_anchor));
6320 				if ((error = pfr_ina_rollback(&table,
6321 				    ioe->ticket, NULL, 0))) {
6322 					PF_RULES_WUNLOCK();
6323 					free(ioes, M_PF);
6324 					goto fail; /* really bad */
6325 				}
6326 				break;
6327 			    }
6328 			default:
6329 				if ((error = pf_rollback_rules(ioe->ticket,
6330 				    ioe->rs_num, ioe->anchor))) {
6331 					PF_RULES_WUNLOCK();
6332 					free(ioes, M_PF);
6333 					goto fail; /* really bad */
6334 				}
6335 				break;
6336 			}
6337 		}
6338 		PF_RULES_WUNLOCK();
6339 		free(ioes, M_PF);
6340 		break;
6341 	}
6342 
6343 	case DIOCXCOMMIT: {
6344 		struct pfioc_trans	*io = (struct pfioc_trans *)addr;
6345 		struct pfioc_trans_e	*ioe, *ioes;
6346 		struct pf_kruleset	*rs;
6347 		struct pf_keth_ruleset	*ers;
6348 		size_t			 totlen;
6349 		int			 i;
6350 
6351 		if (io->esize != sizeof(*ioe)) {
6352 			error = ENODEV;
6353 			goto fail;
6354 		}
6355 
6356 		if (io->size < 0 ||
6357 		    io->size > pf_ioctl_maxcount ||
6358 		    WOULD_OVERFLOW(io->size, sizeof(struct pfioc_trans_e))) {
6359 			error = EINVAL;
6360 			goto fail;
6361 		}
6362 
6363 		totlen = sizeof(struct pfioc_trans_e) * io->size;
6364 		ioes = mallocarray(io->size, sizeof(struct pfioc_trans_e),
6365 		    M_PF, M_WAITOK);
6366 		error = copyin(io->array, ioes, totlen);
6367 		if (error) {
6368 			free(ioes, M_PF);
6369 			goto fail;
6370 		}
6371 		PF_RULES_WLOCK();
6372 		/* First makes sure everything will succeed. */
6373 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
6374 			ioe->anchor[sizeof(ioe->anchor) - 1] = '\0';
6375 			switch (ioe->rs_num) {
6376 			case PF_RULESET_ETH:
6377 				ers = pf_find_keth_ruleset(ioe->anchor);
6378 				if (ers == NULL || ioe->ticket == 0 ||
6379 				    ioe->ticket != ers->inactive.ticket) {
6380 					PF_RULES_WUNLOCK();
6381 					free(ioes, M_PF);
6382 					error = EINVAL;
6383 					goto fail;
6384 				}
6385 				break;
6386 #ifdef ALTQ
6387 			case PF_RULESET_ALTQ:
6388 				if (ioe->anchor[0]) {
6389 					PF_RULES_WUNLOCK();
6390 					free(ioes, M_PF);
6391 					error = EINVAL;
6392 					goto fail;
6393 				}
6394 				if (!V_altqs_inactive_open || ioe->ticket !=
6395 				    V_ticket_altqs_inactive) {
6396 					PF_RULES_WUNLOCK();
6397 					free(ioes, M_PF);
6398 					error = EBUSY;
6399 					goto fail;
6400 				}
6401 				break;
6402 #endif /* ALTQ */
6403 			case PF_RULESET_TABLE:
6404 				rs = pf_find_kruleset(ioe->anchor);
6405 				if (rs == NULL || !rs->topen || ioe->ticket !=
6406 				    rs->tticket) {
6407 					PF_RULES_WUNLOCK();
6408 					free(ioes, M_PF);
6409 					error = EBUSY;
6410 					goto fail;
6411 				}
6412 				break;
6413 			default:
6414 				if (ioe->rs_num < 0 || ioe->rs_num >=
6415 				    PF_RULESET_MAX) {
6416 					PF_RULES_WUNLOCK();
6417 					free(ioes, M_PF);
6418 					error = EINVAL;
6419 					goto fail;
6420 				}
6421 				rs = pf_find_kruleset(ioe->anchor);
6422 				if (rs == NULL ||
6423 				    !rs->rules[ioe->rs_num].inactive.open ||
6424 				    rs->rules[ioe->rs_num].inactive.ticket !=
6425 				    ioe->ticket) {
6426 					PF_RULES_WUNLOCK();
6427 					free(ioes, M_PF);
6428 					error = EBUSY;
6429 					goto fail;
6430 				}
6431 				break;
6432 			}
6433 		}
6434 		/* Now do the commit - no errors should happen here. */
6435 		for (i = 0, ioe = ioes; i < io->size; i++, ioe++) {
6436 			switch (ioe->rs_num) {
6437 			case PF_RULESET_ETH:
6438 				if ((error = pf_commit_eth(ioe->ticket, ioe->anchor))) {
6439 					PF_RULES_WUNLOCK();
6440 					free(ioes, M_PF);
6441 					goto fail; /* really bad */
6442 				}
6443 				break;
6444 #ifdef ALTQ
6445 			case PF_RULESET_ALTQ:
6446 				if ((error = pf_commit_altq(ioe->ticket))) {
6447 					PF_RULES_WUNLOCK();
6448 					free(ioes, M_PF);
6449 					goto fail; /* really bad */
6450 				}
6451 				break;
6452 #endif /* ALTQ */
6453 			case PF_RULESET_TABLE:
6454 			    {
6455 				struct pfr_table table;
6456 
6457 				bzero(&table, sizeof(table));
6458 				(void)strlcpy(table.pfrt_anchor, ioe->anchor,
6459 				    sizeof(table.pfrt_anchor));
6460 				if ((error = pfr_ina_commit(&table,
6461 				    ioe->ticket, NULL, NULL, 0))) {
6462 					PF_RULES_WUNLOCK();
6463 					free(ioes, M_PF);
6464 					goto fail; /* really bad */
6465 				}
6466 				break;
6467 			    }
6468 			default:
6469 				if ((error = pf_commit_rules(ioe->ticket,
6470 				    ioe->rs_num, ioe->anchor))) {
6471 					PF_RULES_WUNLOCK();
6472 					free(ioes, M_PF);
6473 					goto fail; /* really bad */
6474 				}
6475 				break;
6476 			}
6477 		}
6478 		PF_RULES_WUNLOCK();
6479 
6480 		/* Only hook into EtherNet taffic if we've got rules for it. */
6481 		if (! TAILQ_EMPTY(V_pf_keth->active.rules))
6482 			hook_pf_eth();
6483 		else
6484 			dehook_pf_eth();
6485 
6486 		free(ioes, M_PF);
6487 		break;
6488 	}
6489 
6490 	case DIOCGETSRCNODES: {
6491 		struct pfioc_src_nodes	*psn = (struct pfioc_src_nodes *)addr;
6492 		struct pf_srchash	*sh;
6493 		struct pf_ksrc_node	*n;
6494 		struct pf_src_node	*p, *pstore;
6495 		uint32_t		 i, nr = 0;
6496 
6497 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
6498 				i++, sh++) {
6499 			PF_HASHROW_LOCK(sh);
6500 			LIST_FOREACH(n, &sh->nodes, entry)
6501 				nr++;
6502 			PF_HASHROW_UNLOCK(sh);
6503 		}
6504 
6505 		psn->psn_len = min(psn->psn_len,
6506 		    sizeof(struct pf_src_node) * nr);
6507 
6508 		if (psn->psn_len == 0) {
6509 			psn->psn_len = sizeof(struct pf_src_node) * nr;
6510 			goto fail;
6511 		}
6512 
6513 		nr = 0;
6514 
6515 		p = pstore = malloc(psn->psn_len, M_PF, M_WAITOK | M_ZERO);
6516 		for (i = 0, sh = V_pf_srchash; i <= V_pf_srchashmask;
6517 		    i++, sh++) {
6518 		    PF_HASHROW_LOCK(sh);
6519 		    LIST_FOREACH(n, &sh->nodes, entry) {
6520 
6521 			if ((nr + 1) * sizeof(*p) > (unsigned)psn->psn_len)
6522 				break;
6523 
6524 			pf_src_node_copy(n, p);
6525 
6526 			p++;
6527 			nr++;
6528 		    }
6529 		    PF_HASHROW_UNLOCK(sh);
6530 		}
6531 		error = copyout(pstore, psn->psn_src_nodes,
6532 		    sizeof(struct pf_src_node) * nr);
6533 		if (error) {
6534 			free(pstore, M_PF);
6535 			goto fail;
6536 		}
6537 		psn->psn_len = sizeof(struct pf_src_node) * nr;
6538 		free(pstore, M_PF);
6539 		break;
6540 	}
6541 
6542 	case DIOCCLRSRCNODES: {
6543 		pf_kill_srcnodes(NULL);
6544 		break;
6545 	}
6546 
6547 	case DIOCKILLSRCNODES:
6548 		pf_kill_srcnodes((struct pfioc_src_node_kill *)addr);
6549 		break;
6550 
6551 #ifdef COMPAT_FREEBSD13
6552 	case DIOCKEEPCOUNTERS_FREEBSD13:
6553 #endif
6554 	case DIOCKEEPCOUNTERS:
6555 		error = pf_keepcounters((struct pfioc_nv *)addr);
6556 		break;
6557 
6558 	case DIOCGETSYNCOOKIES:
6559 		error = pf_get_syncookies((struct pfioc_nv *)addr);
6560 		break;
6561 
6562 	case DIOCSETSYNCOOKIES:
6563 		error = pf_set_syncookies((struct pfioc_nv *)addr);
6564 		break;
6565 
6566 	case DIOCSETHOSTID: {
6567 		u_int32_t	*hostid = (u_int32_t *)addr;
6568 
6569 		PF_RULES_WLOCK();
6570 		if (*hostid == 0)
6571 			V_pf_status.hostid = arc4random();
6572 		else
6573 			V_pf_status.hostid = *hostid;
6574 		PF_RULES_WUNLOCK();
6575 		break;
6576 	}
6577 
6578 	case DIOCOSFPFLUSH:
6579 		PF_RULES_WLOCK();
6580 		pf_osfp_flush();
6581 		PF_RULES_WUNLOCK();
6582 		break;
6583 
6584 	case DIOCIGETIFACES: {
6585 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
6586 		struct pfi_kif *ifstore;
6587 		size_t bufsiz;
6588 
6589 		if (io->pfiio_esize != sizeof(struct pfi_kif)) {
6590 			error = ENODEV;
6591 			goto fail;
6592 		}
6593 
6594 		if (io->pfiio_size < 0 ||
6595 		    io->pfiio_size > pf_ioctl_maxcount ||
6596 		    WOULD_OVERFLOW(io->pfiio_size, sizeof(struct pfi_kif))) {
6597 			error = EINVAL;
6598 			goto fail;
6599 		}
6600 
6601 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
6602 
6603 		bufsiz = io->pfiio_size * sizeof(struct pfi_kif);
6604 		ifstore = mallocarray(io->pfiio_size, sizeof(struct pfi_kif),
6605 		    M_PF, M_WAITOK | M_ZERO);
6606 
6607 		PF_RULES_RLOCK();
6608 		pfi_get_ifaces(io->pfiio_name, ifstore, &io->pfiio_size);
6609 		PF_RULES_RUNLOCK();
6610 		error = copyout(ifstore, io->pfiio_buffer, bufsiz);
6611 		free(ifstore, M_PF);
6612 		break;
6613 	}
6614 
6615 	case DIOCSETIFFLAG: {
6616 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
6617 
6618 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
6619 
6620 		PF_RULES_WLOCK();
6621 		error = pfi_set_flags(io->pfiio_name, io->pfiio_flags);
6622 		PF_RULES_WUNLOCK();
6623 		break;
6624 	}
6625 
6626 	case DIOCCLRIFFLAG: {
6627 		struct pfioc_iface *io = (struct pfioc_iface *)addr;
6628 
6629 		io->pfiio_name[sizeof(io->pfiio_name) - 1] = '\0';
6630 
6631 		PF_RULES_WLOCK();
6632 		error = pfi_clear_flags(io->pfiio_name, io->pfiio_flags);
6633 		PF_RULES_WUNLOCK();
6634 		break;
6635 	}
6636 
6637 	case DIOCSETREASS: {
6638 		u_int32_t	*reass = (u_int32_t *)addr;
6639 
6640 		V_pf_status.reass = *reass & (PF_REASS_ENABLED|PF_REASS_NODF);
6641 		/* Removal of DF flag without reassembly enabled is not a
6642 		 * valid combination. Disable reassembly in such case. */
6643 		if (!(V_pf_status.reass & PF_REASS_ENABLED))
6644 			V_pf_status.reass = 0;
6645 		break;
6646 	}
6647 
6648 	default:
6649 		error = ENODEV;
6650 		break;
6651 	}
6652 fail:
6653 	CURVNET_RESTORE();
6654 
6655 #undef ERROUT_IOCTL
6656 
6657 	return (error);
6658 }
6659 
6660 static void
pfsync_state_export(union pfsync_state_union * sp,struct pf_kstate * st,int msg_version)6661 pfsync_state_export(union pfsync_state_union *sp, struct pf_kstate *st, int msg_version)
6662 {
6663 	const char	*tagname;
6664 
6665 	/* copy from state key */
6666 	sp->pfs_1301.key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
6667 	sp->pfs_1301.key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
6668 	sp->pfs_1301.key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
6669 	sp->pfs_1301.key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
6670 	sp->pfs_1301.key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
6671 	sp->pfs_1301.key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
6672 	sp->pfs_1301.key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
6673 	sp->pfs_1301.key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
6674 
6675 	/* copy from state */
6676 	strlcpy(sp->pfs_1301.ifname, st->kif->pfik_name, sizeof(sp->pfs_1301.ifname));
6677 	bcopy(&st->act.rt_addr, &sp->pfs_1301.rt_addr, sizeof(sp->pfs_1301.rt_addr));
6678 	sp->pfs_1301.creation = htonl(time_uptime - (st->creation / 1000));
6679 	sp->pfs_1301.expire = pf_state_expires(st);
6680 	if (sp->pfs_1301.expire <= time_uptime)
6681 		sp->pfs_1301.expire = htonl(0);
6682 	else
6683 		sp->pfs_1301.expire = htonl(sp->pfs_1301.expire - time_uptime);
6684 
6685 	switch (msg_version) {
6686 		case PFSYNC_MSG_VERSION_1301:
6687 			sp->pfs_1301.state_flags = st->state_flags;
6688 			sp->pfs_1301.direction = st->direction;
6689 			sp->pfs_1301.log = st->act.log;
6690 			sp->pfs_1301.timeout = st->timeout;
6691 			sp->pfs_1301.proto = st->key[PF_SK_WIRE]->proto;
6692 			sp->pfs_1301.af = st->key[PF_SK_WIRE]->af;
6693 			/*
6694 			 * XXX Why do we bother pfsyncing source node information if source
6695 			 * nodes are not synced? Showing users that there is source tracking
6696 			 * when there is none seems useless.
6697 			 */
6698 			if (st->sns[PF_SN_LIMIT] != NULL)
6699 				sp->pfs_1301.sync_flags |= PFSYNC_FLAG_SRCNODE;
6700 			if (st->sns[PF_SN_NAT] != NULL || st->sns[PF_SN_ROUTE])
6701 				sp->pfs_1301.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
6702 			break;
6703 		case PFSYNC_MSG_VERSION_1400:
6704 			sp->pfs_1400.state_flags = htons(st->state_flags);
6705 			sp->pfs_1400.direction = st->direction;
6706 			sp->pfs_1400.log = st->act.log;
6707 			sp->pfs_1400.timeout = st->timeout;
6708 			sp->pfs_1400.proto = st->key[PF_SK_WIRE]->proto;
6709 			sp->pfs_1400.af = st->key[PF_SK_WIRE]->af;
6710 			sp->pfs_1400.qid = htons(st->act.qid);
6711 			sp->pfs_1400.pqid = htons(st->act.pqid);
6712 			sp->pfs_1400.dnpipe = htons(st->act.dnpipe);
6713 			sp->pfs_1400.dnrpipe = htons(st->act.dnrpipe);
6714 			sp->pfs_1400.rtableid = htonl(st->act.rtableid);
6715 			sp->pfs_1400.min_ttl = st->act.min_ttl;
6716 			sp->pfs_1400.set_tos = st->act.set_tos;
6717 			sp->pfs_1400.max_mss = htons(st->act.max_mss);
6718 			sp->pfs_1400.set_prio[0] = st->act.set_prio[0];
6719 			sp->pfs_1400.set_prio[1] = st->act.set_prio[1];
6720 			sp->pfs_1400.rt = st->act.rt;
6721 			if (st->act.rt_kif)
6722 				strlcpy(sp->pfs_1400.rt_ifname,
6723 				    st->act.rt_kif->pfik_name,
6724 				    sizeof(sp->pfs_1400.rt_ifname));
6725 			/*
6726 			 * XXX Why do we bother pfsyncing source node information if source
6727 			 * nodes are not synced? Showing users that there is source tracking
6728 			 * when there is none seems useless.
6729 			 */
6730 			if (st->sns[PF_SN_LIMIT] != NULL)
6731 				sp->pfs_1400.sync_flags |= PFSYNC_FLAG_SRCNODE;
6732 			if (st->sns[PF_SN_NAT] != NULL || st->sns[PF_SN_ROUTE])
6733 				sp->pfs_1400.sync_flags |= PFSYNC_FLAG_NATSRCNODE;
6734 			break;
6735 		case PFSYNC_MSG_VERSION_1500:
6736 			sp->pfs_1500.state_flags = htons(st->state_flags);
6737 			sp->pfs_1500.direction = st->direction;
6738 			sp->pfs_1500.log = st->act.log;
6739 			sp->pfs_1500.timeout = st->timeout;
6740 			sp->pfs_1500.wire_proto = st->key[PF_SK_WIRE]->proto;
6741 			sp->pfs_1500.wire_af = st->key[PF_SK_WIRE]->af;
6742 			sp->pfs_1500.stack_proto = st->key[PF_SK_STACK]->proto;
6743 			sp->pfs_1500.stack_af = st->key[PF_SK_STACK]->af;
6744 			sp->pfs_1500.qid = htons(st->act.qid);
6745 			sp->pfs_1500.pqid = htons(st->act.pqid);
6746 			sp->pfs_1500.dnpipe = htons(st->act.dnpipe);
6747 			sp->pfs_1500.dnrpipe = htons(st->act.dnrpipe);
6748 			sp->pfs_1500.rtableid = htonl(st->act.rtableid);
6749 			sp->pfs_1500.min_ttl = st->act.min_ttl;
6750 			sp->pfs_1500.set_tos = st->act.set_tos;
6751 			sp->pfs_1500.max_mss = htons(st->act.max_mss);
6752 			sp->pfs_1500.set_prio[0] = st->act.set_prio[0];
6753 			sp->pfs_1500.set_prio[1] = st->act.set_prio[1];
6754 			sp->pfs_1500.rt = st->act.rt;
6755 			sp->pfs_1500.rt_af = st->act.rt_af;
6756 			if (st->act.rt_kif)
6757 				strlcpy(sp->pfs_1500.rt_ifname,
6758 				    st->act.rt_kif->pfik_name,
6759 				    sizeof(sp->pfs_1500.rt_ifname));
6760 			strlcpy(sp->pfs_1500.orig_ifname,
6761 			    st->orig_kif->pfik_name,
6762 			    sizeof(sp->pfs_1500.orig_ifname));
6763 			if ((tagname = pf_tag2tagname(st->tag)) != NULL)
6764 				strlcpy(sp->pfs_1500.tagname, tagname,
6765 				    sizeof(sp->pfs_1500.tagname));
6766 			break;
6767 		default:
6768 			panic("%s: Unsupported pfsync_msg_version %d",
6769 			    __func__, msg_version);
6770 	}
6771 
6772 	sp->pfs_1301.id = st->id;
6773 	sp->pfs_1301.creatorid = st->creatorid;
6774 	pf_state_peer_hton(&st->src, &sp->pfs_1301.src);
6775 	pf_state_peer_hton(&st->dst, &sp->pfs_1301.dst);
6776 
6777 	if (st->rule == NULL)
6778 		sp->pfs_1301.rule = htonl(-1);
6779 	else
6780 		sp->pfs_1301.rule = htonl(st->rule->nr);
6781 	if (st->anchor == NULL)
6782 		sp->pfs_1301.anchor = htonl(-1);
6783 	else
6784 		sp->pfs_1301.anchor = htonl(st->anchor->nr);
6785 	if (st->nat_rule == NULL)
6786 		sp->pfs_1301.nat_rule = htonl(-1);
6787 	else
6788 		sp->pfs_1301.nat_rule = htonl(st->nat_rule->nr);
6789 
6790 	pf_state_counter_hton(st->packets[0], sp->pfs_1301.packets[0]);
6791 	pf_state_counter_hton(st->packets[1], sp->pfs_1301.packets[1]);
6792 	pf_state_counter_hton(st->bytes[0], sp->pfs_1301.bytes[0]);
6793 	pf_state_counter_hton(st->bytes[1], sp->pfs_1301.bytes[1]);
6794 }
6795 
6796 void
pfsync_state_export_1301(struct pfsync_state_1301 * sp,struct pf_kstate * st)6797 pfsync_state_export_1301(struct pfsync_state_1301 *sp, struct pf_kstate *st)
6798 {
6799 	bzero(sp, sizeof(*sp));
6800 	pfsync_state_export((union pfsync_state_union *)sp, st,
6801 	    PFSYNC_MSG_VERSION_1301);
6802 }
6803 
6804 void
pfsync_state_export_1400(struct pfsync_state_1400 * sp,struct pf_kstate * st)6805 pfsync_state_export_1400(struct pfsync_state_1400 *sp, struct pf_kstate *st)
6806 {
6807 	bzero(sp, sizeof(*sp));
6808 	pfsync_state_export((union pfsync_state_union *)sp, st,
6809 	    PFSYNC_MSG_VERSION_1400);
6810 }
6811 
6812 void
pfsync_state_export_1500(struct pfsync_state_1500 * sp,struct pf_kstate * st)6813 pfsync_state_export_1500(struct pfsync_state_1500 *sp, struct pf_kstate *st)
6814 {
6815 	bzero(sp, sizeof(*sp));
6816 	pfsync_state_export((union pfsync_state_union *)sp, st,
6817 	    PFSYNC_MSG_VERSION_1500);
6818 }
6819 
6820 void
pf_state_export(struct pf_state_export * sp,struct pf_kstate * st)6821 pf_state_export(struct pf_state_export *sp, struct pf_kstate *st)
6822 {
6823 	bzero(sp, sizeof(*sp));
6824 
6825 	sp->version = PF_STATE_VERSION;
6826 
6827 	/* copy from state key */
6828 	sp->key[PF_SK_WIRE].addr[0] = st->key[PF_SK_WIRE]->addr[0];
6829 	sp->key[PF_SK_WIRE].addr[1] = st->key[PF_SK_WIRE]->addr[1];
6830 	sp->key[PF_SK_WIRE].port[0] = st->key[PF_SK_WIRE]->port[0];
6831 	sp->key[PF_SK_WIRE].port[1] = st->key[PF_SK_WIRE]->port[1];
6832 	sp->key[PF_SK_STACK].addr[0] = st->key[PF_SK_STACK]->addr[0];
6833 	sp->key[PF_SK_STACK].addr[1] = st->key[PF_SK_STACK]->addr[1];
6834 	sp->key[PF_SK_STACK].port[0] = st->key[PF_SK_STACK]->port[0];
6835 	sp->key[PF_SK_STACK].port[1] = st->key[PF_SK_STACK]->port[1];
6836 	sp->proto = st->key[PF_SK_WIRE]->proto;
6837 	sp->af = st->key[PF_SK_WIRE]->af;
6838 
6839 	/* copy from state */
6840 	strlcpy(sp->ifname, st->kif->pfik_name, sizeof(sp->ifname));
6841 	strlcpy(sp->orig_ifname, st->orig_kif->pfik_name,
6842 	    sizeof(sp->orig_ifname));
6843 	memcpy(&sp->rt_addr, &st->act.rt_addr, sizeof(sp->rt_addr));
6844 	sp->creation = htonl(time_uptime - (st->creation / 1000));
6845 	sp->expire = pf_state_expires(st);
6846 	if (sp->expire <= time_uptime)
6847 		sp->expire = htonl(0);
6848 	else
6849 		sp->expire = htonl(sp->expire - time_uptime);
6850 
6851 	sp->direction = st->direction;
6852 	sp->log = st->act.log;
6853 	sp->timeout = st->timeout;
6854 	/* 8 bits for the old libpfctl, 16 bits for the new libpfctl */
6855 	sp->state_flags_compat = st->state_flags;
6856 	sp->state_flags = htons(st->state_flags);
6857 	if (st->sns[PF_SN_LIMIT] != NULL)
6858 		sp->sync_flags |= PFSYNC_FLAG_SRCNODE;
6859 	if (st->sns[PF_SN_NAT] != NULL || st->sns[PF_SN_ROUTE] != NULL)
6860 		sp->sync_flags |= PFSYNC_FLAG_NATSRCNODE;
6861 	sp->id = st->id;
6862 	sp->creatorid = st->creatorid;
6863 	pf_state_peer_hton(&st->src, &sp->src);
6864 	pf_state_peer_hton(&st->dst, &sp->dst);
6865 
6866 	if (st->rule == NULL)
6867 		sp->rule = htonl(-1);
6868 	else
6869 		sp->rule = htonl(st->rule->nr);
6870 	if (st->anchor == NULL)
6871 		sp->anchor = htonl(-1);
6872 	else
6873 		sp->anchor = htonl(st->anchor->nr);
6874 	if (st->nat_rule == NULL)
6875 		sp->nat_rule = htonl(-1);
6876 	else
6877 		sp->nat_rule = htonl(st->nat_rule->nr);
6878 
6879 	sp->packets[0] = st->packets[0];
6880 	sp->packets[1] = st->packets[1];
6881 	sp->bytes[0] = st->bytes[0];
6882 	sp->bytes[1] = st->bytes[1];
6883 
6884 	sp->qid = htons(st->act.qid);
6885 	sp->pqid = htons(st->act.pqid);
6886 	sp->dnpipe = htons(st->act.dnpipe);
6887 	sp->dnrpipe = htons(st->act.dnrpipe);
6888 	sp->rtableid = htonl(st->act.rtableid);
6889 	sp->min_ttl = st->act.min_ttl;
6890 	sp->set_tos = st->act.set_tos;
6891 	sp->max_mss = htons(st->act.max_mss);
6892 	sp->rt = st->act.rt;
6893 	if (st->act.rt_kif)
6894 		strlcpy(sp->rt_ifname, st->act.rt_kif->pfik_name,
6895 		    sizeof(sp->rt_ifname));
6896 	sp->set_prio[0] = st->act.set_prio[0];
6897 	sp->set_prio[1] = st->act.set_prio[1];
6898 
6899 }
6900 
6901 static void
pf_tbladdr_copyout(struct pf_addr_wrap * aw)6902 pf_tbladdr_copyout(struct pf_addr_wrap *aw)
6903 {
6904 	struct pfr_ktable *kt;
6905 
6906 	KASSERT(aw->type == PF_ADDR_TABLE, ("%s: type %u", __func__, aw->type));
6907 
6908 	kt = aw->p.tbl;
6909 	if (!(kt->pfrkt_flags & PFR_TFLAG_ACTIVE) && kt->pfrkt_root != NULL)
6910 		kt = kt->pfrkt_root;
6911 	aw->p.tbl = NULL;
6912 	aw->p.tblcnt = (kt->pfrkt_flags & PFR_TFLAG_ACTIVE) ?
6913 		kt->pfrkt_cnt : -1;
6914 }
6915 
6916 static int
pf_add_status_counters(nvlist_t * nvl,const char * name,counter_u64_t * counters,size_t number,char ** names)6917 pf_add_status_counters(nvlist_t *nvl, const char *name, counter_u64_t *counters,
6918     size_t number, char **names)
6919 {
6920 	nvlist_t        *nvc;
6921 
6922 	nvc = nvlist_create(0);
6923 	if (nvc == NULL)
6924 		return (ENOMEM);
6925 
6926 	for (int i = 0; i < number; i++) {
6927 		nvlist_append_number_array(nvc, "counters",
6928 		    counter_u64_fetch(counters[i]));
6929 		nvlist_append_string_array(nvc, "names",
6930 		    names[i]);
6931 		nvlist_append_number_array(nvc, "ids",
6932 		    i);
6933 	}
6934 	nvlist_add_nvlist(nvl, name, nvc);
6935 	nvlist_destroy(nvc);
6936 
6937 	return (0);
6938 }
6939 
6940 static int
pf_getstatus(struct pfioc_nv * nv)6941 pf_getstatus(struct pfioc_nv *nv)
6942 {
6943 	nvlist_t        *nvl = NULL, *nvc = NULL;
6944 	void            *nvlpacked = NULL;
6945 	int              error;
6946 	struct pf_status s;
6947 	char *pf_reasons[PFRES_MAX+1] = PFRES_NAMES;
6948 	char *pf_lcounter[KLCNT_MAX+1] = KLCNT_NAMES;
6949 	char *pf_fcounter[FCNT_MAX+1] = FCNT_NAMES;
6950 	time_t since;
6951 
6952 	PF_RULES_RLOCK_TRACKER;
6953 
6954 #define ERROUT(x)      ERROUT_FUNCTION(errout, x)
6955 
6956 	PF_RULES_RLOCK();
6957 
6958 	nvl = nvlist_create(0);
6959 	if (nvl == NULL)
6960 		ERROUT(ENOMEM);
6961 
6962 	since = time_second - (time_uptime - V_pf_status.since);
6963 
6964 	nvlist_add_bool(nvl, "running", V_pf_status.running);
6965 	nvlist_add_number(nvl, "since", since);
6966 	nvlist_add_number(nvl, "debug", V_pf_status.debug);
6967 	nvlist_add_number(nvl, "hostid", V_pf_status.hostid);
6968 	nvlist_add_number(nvl, "states", V_pf_status.states);
6969 	nvlist_add_number(nvl, "src_nodes", V_pf_status.src_nodes);
6970 	nvlist_add_number(nvl, "reass", V_pf_status.reass);
6971 	nvlist_add_bool(nvl, "syncookies_active",
6972 	    V_pf_status.syncookies_active);
6973 	nvlist_add_number(nvl, "halfopen_states", V_pf_status.states_halfopen);
6974 
6975 	/* counters */
6976 	error = pf_add_status_counters(nvl, "counters", V_pf_status.counters,
6977 	    PFRES_MAX, pf_reasons);
6978 	if (error != 0)
6979 		ERROUT(error);
6980 
6981 	/* lcounters */
6982 	error = pf_add_status_counters(nvl, "lcounters", V_pf_status.lcounters,
6983 	    KLCNT_MAX, pf_lcounter);
6984 	if (error != 0)
6985 		ERROUT(error);
6986 
6987 	/* fcounters */
6988 	nvc = nvlist_create(0);
6989 	if (nvc == NULL)
6990 		ERROUT(ENOMEM);
6991 
6992 	for (int i = 0; i < FCNT_MAX; i++) {
6993 		nvlist_append_number_array(nvc, "counters",
6994 		    pf_counter_u64_fetch(&V_pf_status.fcounters[i]));
6995 		nvlist_append_string_array(nvc, "names",
6996 		    pf_fcounter[i]);
6997 		nvlist_append_number_array(nvc, "ids",
6998 		    i);
6999 	}
7000 	nvlist_add_nvlist(nvl, "fcounters", nvc);
7001 	nvlist_destroy(nvc);
7002 	nvc = NULL;
7003 
7004 	/* scounters */
7005 	error = pf_add_status_counters(nvl, "scounters", V_pf_status.scounters,
7006 	    SCNT_MAX, pf_fcounter);
7007 	if (error != 0)
7008 		ERROUT(error);
7009 
7010 	nvlist_add_string(nvl, "ifname", V_pf_status.ifname);
7011 	nvlist_add_binary(nvl, "chksum", V_pf_status.pf_chksum,
7012 	    PF_MD5_DIGEST_LENGTH);
7013 
7014 	pfi_update_status(V_pf_status.ifname, &s);
7015 
7016 	/* pcounters / bcounters */
7017 	for (int i = 0; i < 2; i++) {
7018 		for (int j = 0; j < 2; j++) {
7019 			for (int k = 0; k < 2; k++) {
7020 				nvlist_append_number_array(nvl, "pcounters",
7021 				    s.pcounters[i][j][k]);
7022 			}
7023 			nvlist_append_number_array(nvl, "bcounters",
7024 			    s.bcounters[i][j]);
7025 		}
7026 	}
7027 
7028 	nvlpacked = nvlist_pack(nvl, &nv->len);
7029 	if (nvlpacked == NULL)
7030 		ERROUT(ENOMEM);
7031 
7032 	if (nv->size == 0)
7033 		ERROUT(0);
7034 	else if (nv->size < nv->len)
7035 		ERROUT(ENOSPC);
7036 
7037 	PF_RULES_RUNLOCK();
7038 	error = copyout(nvlpacked, nv->data, nv->len);
7039 	goto done;
7040 
7041 #undef ERROUT
7042 errout:
7043 	PF_RULES_RUNLOCK();
7044 done:
7045 	free(nvlpacked, M_NVLIST);
7046 	nvlist_destroy(nvc);
7047 	nvlist_destroy(nvl);
7048 
7049 	return (error);
7050 }
7051 
7052 /*
7053  * XXX - Check for version mismatch!!!
7054  */
7055 static void
pf_clear_all_states(void)7056 pf_clear_all_states(void)
7057 {
7058 	struct epoch_tracker	 et;
7059 	struct pf_kstate	*s;
7060 	u_int i;
7061 
7062 	NET_EPOCH_ENTER(et);
7063 	for (i = 0; i <= V_pf_hashmask; i++) {
7064 		struct pf_idhash *ih = &V_pf_idhash[i];
7065 relock:
7066 		PF_HASHROW_LOCK(ih);
7067 		LIST_FOREACH(s, &ih->states, entry) {
7068 			s->timeout = PFTM_PURGE;
7069 			/* Don't send out individual delete messages. */
7070 			s->state_flags |= PFSTATE_NOSYNC;
7071 			pf_remove_state(s);
7072 			goto relock;
7073 		}
7074 		PF_HASHROW_UNLOCK(ih);
7075 	}
7076 	NET_EPOCH_EXIT(et);
7077 }
7078 
7079 static int
pf_clear_tables(void)7080 pf_clear_tables(void)
7081 {
7082 	struct pfioc_table io;
7083 	int error;
7084 
7085 	bzero(&io, sizeof(io));
7086 	io.pfrio_flags |= PFR_FLAG_ALLRSETS;
7087 
7088 	error = pfr_clr_tables(&io.pfrio_table, &io.pfrio_ndel,
7089 	    io.pfrio_flags);
7090 
7091 	return (error);
7092 }
7093 
7094 static void
pf_kill_srcnodes(struct pfioc_src_node_kill * psnk)7095 pf_kill_srcnodes(struct pfioc_src_node_kill *psnk)
7096 {
7097 	struct pf_ksrc_node_list	 kill;
7098 	u_int 				 killed;
7099 
7100 	LIST_INIT(&kill);
7101 	for (int i = 0; i <= V_pf_srchashmask; i++) {
7102 		struct pf_srchash *sh = &V_pf_srchash[i];
7103 		struct pf_ksrc_node *sn, *tmp;
7104 
7105 		PF_HASHROW_LOCK(sh);
7106 		LIST_FOREACH_SAFE(sn, &sh->nodes, entry, tmp)
7107 			if (psnk == NULL ||
7108 			    (pf_match_addr(psnk->psnk_src.neg,
7109 			      &psnk->psnk_src.addr.v.a.addr,
7110 			      &psnk->psnk_src.addr.v.a.mask,
7111 			      &sn->addr, sn->af) &&
7112 			    pf_match_addr(psnk->psnk_dst.neg,
7113 			      &psnk->psnk_dst.addr.v.a.addr,
7114 			      &psnk->psnk_dst.addr.v.a.mask,
7115 			      &sn->raddr, sn->af))) {
7116 				pf_unlink_src_node(sn);
7117 				LIST_INSERT_HEAD(&kill, sn, entry);
7118 				sn->expire = 1;
7119 			}
7120 		PF_HASHROW_UNLOCK(sh);
7121 	}
7122 
7123 	for (int i = 0; i <= V_pf_hashmask; i++) {
7124 		struct pf_idhash *ih = &V_pf_idhash[i];
7125 		struct pf_kstate *s;
7126 
7127 		PF_HASHROW_LOCK(ih);
7128 		LIST_FOREACH(s, &ih->states, entry) {
7129 			for(pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX;
7130 			    sn_type++) {
7131 				if (s->sns[sn_type] &&
7132 				    s->sns[sn_type]->expire == 1) {
7133 					s->sns[sn_type] = NULL;
7134 				}
7135 			}
7136 		}
7137 		PF_HASHROW_UNLOCK(ih);
7138 	}
7139 
7140 	killed = pf_free_src_nodes(&kill);
7141 
7142 	if (psnk != NULL)
7143 		psnk->psnk_killed = killed;
7144 }
7145 
7146 static int
pf_keepcounters(struct pfioc_nv * nv)7147 pf_keepcounters(struct pfioc_nv *nv)
7148 {
7149 	nvlist_t	*nvl = NULL;
7150 	void		*nvlpacked = NULL;
7151 	int		 error = 0;
7152 
7153 #define	ERROUT(x)	ERROUT_FUNCTION(on_error, x)
7154 
7155 	if (nv->len > pf_ioctl_maxcount)
7156 		ERROUT(ENOMEM);
7157 
7158 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
7159 	error = copyin(nv->data, nvlpacked, nv->len);
7160 	if (error)
7161 		ERROUT(error);
7162 
7163 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
7164 	if (nvl == NULL)
7165 		ERROUT(EBADMSG);
7166 
7167 	if (! nvlist_exists_bool(nvl, "keep_counters"))
7168 		ERROUT(EBADMSG);
7169 
7170 	V_pf_status.keep_counters = nvlist_get_bool(nvl, "keep_counters");
7171 
7172 on_error:
7173 	nvlist_destroy(nvl);
7174 	free(nvlpacked, M_NVLIST);
7175 	return (error);
7176 }
7177 
7178 unsigned int
pf_clear_states(const struct pf_kstate_kill * kill)7179 pf_clear_states(const struct pf_kstate_kill *kill)
7180 {
7181 	struct pf_state_key_cmp	 match_key;
7182 	struct pf_kstate	*s;
7183 	struct pfi_kkif	*kif;
7184 	int		 idx;
7185 	unsigned int	 killed = 0, dir;
7186 
7187 	NET_EPOCH_ASSERT();
7188 
7189 	for (unsigned int i = 0; i <= V_pf_hashmask; i++) {
7190 		struct pf_idhash *ih = &V_pf_idhash[i];
7191 
7192 relock_DIOCCLRSTATES:
7193 		PF_HASHROW_LOCK(ih);
7194 		LIST_FOREACH(s, &ih->states, entry) {
7195 			/* For floating states look at the original kif. */
7196 			kif = s->kif == V_pfi_all ? s->orig_kif : s->kif;
7197 
7198 			if (kill->psk_ifname[0] &&
7199 			    strcmp(kill->psk_ifname,
7200 			    kif->pfik_name))
7201 				continue;
7202 
7203 			if (kill->psk_kill_match) {
7204 				bzero(&match_key, sizeof(match_key));
7205 
7206 				if (s->direction == PF_OUT) {
7207 					dir = PF_IN;
7208 					idx = PF_SK_STACK;
7209 				} else {
7210 					dir = PF_OUT;
7211 					idx = PF_SK_WIRE;
7212 				}
7213 
7214 				match_key.af = s->key[idx]->af;
7215 				match_key.proto = s->key[idx]->proto;
7216 				pf_addrcpy(&match_key.addr[0],
7217 				    &s->key[idx]->addr[1], match_key.af);
7218 				match_key.port[0] = s->key[idx]->port[1];
7219 				pf_addrcpy(&match_key.addr[1],
7220 				    &s->key[idx]->addr[0], match_key.af);
7221 				match_key.port[1] = s->key[idx]->port[0];
7222 			}
7223 
7224 			/*
7225 			 * Don't send out individual
7226 			 * delete messages.
7227 			 */
7228 			s->state_flags |= PFSTATE_NOSYNC;
7229 			pf_remove_state(s);
7230 			killed++;
7231 
7232 			if (kill->psk_kill_match)
7233 				killed += pf_kill_matching_state(&match_key,
7234 				    dir);
7235 
7236 			goto relock_DIOCCLRSTATES;
7237 		}
7238 		PF_HASHROW_UNLOCK(ih);
7239 	}
7240 
7241 	if (V_pfsync_clear_states_ptr != NULL)
7242 		V_pfsync_clear_states_ptr(V_pf_status.hostid, kill->psk_ifname);
7243 
7244 	return (killed);
7245 }
7246 
7247 void
pf_killstates(struct pf_kstate_kill * kill,unsigned int * killed)7248 pf_killstates(struct pf_kstate_kill *kill, unsigned int *killed)
7249 {
7250 	struct pf_kstate	*s;
7251 
7252 	NET_EPOCH_ASSERT();
7253 	if (kill->psk_pfcmp.id) {
7254 		if (kill->psk_pfcmp.creatorid == 0)
7255 			kill->psk_pfcmp.creatorid = V_pf_status.hostid;
7256 		if ((s = pf_find_state_byid(kill->psk_pfcmp.id,
7257 		    kill->psk_pfcmp.creatorid))) {
7258 			pf_remove_state(s);
7259 			*killed = 1;
7260 		}
7261 		return;
7262 	}
7263 
7264 	for (unsigned int i = 0; i <= V_pf_hashmask; i++)
7265 		*killed += pf_killstates_row(kill, &V_pf_idhash[i]);
7266 }
7267 
7268 static int
pf_killstates_nv(struct pfioc_nv * nv)7269 pf_killstates_nv(struct pfioc_nv *nv)
7270 {
7271 	struct pf_kstate_kill	 kill;
7272 	struct epoch_tracker	 et;
7273 	nvlist_t		*nvl = NULL;
7274 	void			*nvlpacked = NULL;
7275 	int			 error = 0;
7276 	unsigned int		 killed = 0;
7277 
7278 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
7279 
7280 	if (nv->len > pf_ioctl_maxcount)
7281 		ERROUT(ENOMEM);
7282 
7283 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
7284 	error = copyin(nv->data, nvlpacked, nv->len);
7285 	if (error)
7286 		ERROUT(error);
7287 
7288 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
7289 	if (nvl == NULL)
7290 		ERROUT(EBADMSG);
7291 
7292 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
7293 	if (error)
7294 		ERROUT(error);
7295 
7296 	NET_EPOCH_ENTER(et);
7297 	pf_killstates(&kill, &killed);
7298 	NET_EPOCH_EXIT(et);
7299 
7300 	free(nvlpacked, M_NVLIST);
7301 	nvlpacked = NULL;
7302 	nvlist_destroy(nvl);
7303 	nvl = nvlist_create(0);
7304 	if (nvl == NULL)
7305 		ERROUT(ENOMEM);
7306 
7307 	nvlist_add_number(nvl, "killed", killed);
7308 
7309 	nvlpacked = nvlist_pack(nvl, &nv->len);
7310 	if (nvlpacked == NULL)
7311 		ERROUT(ENOMEM);
7312 
7313 	if (nv->size == 0)
7314 		ERROUT(0);
7315 	else if (nv->size < nv->len)
7316 		ERROUT(ENOSPC);
7317 
7318 	error = copyout(nvlpacked, nv->data, nv->len);
7319 
7320 on_error:
7321 	nvlist_destroy(nvl);
7322 	free(nvlpacked, M_NVLIST);
7323 	return (error);
7324 }
7325 
7326 static int
pf_clearstates_nv(struct pfioc_nv * nv)7327 pf_clearstates_nv(struct pfioc_nv *nv)
7328 {
7329 	struct pf_kstate_kill	 kill;
7330 	struct epoch_tracker	 et;
7331 	nvlist_t		*nvl = NULL;
7332 	void			*nvlpacked = NULL;
7333 	int			 error = 0;
7334 	unsigned int		 killed;
7335 
7336 #define ERROUT(x)	ERROUT_FUNCTION(on_error, x)
7337 
7338 	if (nv->len > pf_ioctl_maxcount)
7339 		ERROUT(ENOMEM);
7340 
7341 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
7342 	error = copyin(nv->data, nvlpacked, nv->len);
7343 	if (error)
7344 		ERROUT(error);
7345 
7346 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
7347 	if (nvl == NULL)
7348 		ERROUT(EBADMSG);
7349 
7350 	error = pf_nvstate_kill_to_kstate_kill(nvl, &kill);
7351 	if (error)
7352 		ERROUT(error);
7353 
7354 	NET_EPOCH_ENTER(et);
7355 	killed = pf_clear_states(&kill);
7356 	NET_EPOCH_EXIT(et);
7357 
7358 	free(nvlpacked, M_NVLIST);
7359 	nvlpacked = NULL;
7360 	nvlist_destroy(nvl);
7361 	nvl = nvlist_create(0);
7362 	if (nvl == NULL)
7363 		ERROUT(ENOMEM);
7364 
7365 	nvlist_add_number(nvl, "killed", killed);
7366 
7367 	nvlpacked = nvlist_pack(nvl, &nv->len);
7368 	if (nvlpacked == NULL)
7369 		ERROUT(ENOMEM);
7370 
7371 	if (nv->size == 0)
7372 		ERROUT(0);
7373 	else if (nv->size < nv->len)
7374 		ERROUT(ENOSPC);
7375 
7376 	error = copyout(nvlpacked, nv->data, nv->len);
7377 
7378 #undef ERROUT
7379 on_error:
7380 	nvlist_destroy(nvl);
7381 	free(nvlpacked, M_NVLIST);
7382 	return (error);
7383 }
7384 
7385 static int
pf_getstate(struct pfioc_nv * nv)7386 pf_getstate(struct pfioc_nv *nv)
7387 {
7388 	nvlist_t		*nvl = NULL, *nvls;
7389 	void			*nvlpacked = NULL;
7390 	struct pf_kstate	*s = NULL;
7391 	int			 error = 0;
7392 	uint64_t		 id, creatorid;
7393 
7394 #define ERROUT(x)	ERROUT_FUNCTION(errout, x)
7395 
7396 	if (nv->len > pf_ioctl_maxcount)
7397 		ERROUT(ENOMEM);
7398 
7399 	nvlpacked = malloc(nv->len, M_NVLIST, M_WAITOK);
7400 	error = copyin(nv->data, nvlpacked, nv->len);
7401 	if (error)
7402 		ERROUT(error);
7403 
7404 	nvl = nvlist_unpack(nvlpacked, nv->len, 0);
7405 	if (nvl == NULL)
7406 		ERROUT(EBADMSG);
7407 
7408 	PFNV_CHK(pf_nvuint64(nvl, "id", &id));
7409 	PFNV_CHK(pf_nvuint64(nvl, "creatorid", &creatorid));
7410 
7411 	s = pf_find_state_byid(id, creatorid);
7412 	if (s == NULL)
7413 		ERROUT(ENOENT);
7414 
7415 	free(nvlpacked, M_NVLIST);
7416 	nvlpacked = NULL;
7417 	nvlist_destroy(nvl);
7418 	nvl = nvlist_create(0);
7419 	if (nvl == NULL)
7420 		ERROUT(ENOMEM);
7421 
7422 	nvls = pf_state_to_nvstate(s);
7423 	if (nvls == NULL)
7424 		ERROUT(ENOMEM);
7425 
7426 	nvlist_add_nvlist(nvl, "state", nvls);
7427 	nvlist_destroy(nvls);
7428 
7429 	nvlpacked = nvlist_pack(nvl, &nv->len);
7430 	if (nvlpacked == NULL)
7431 		ERROUT(ENOMEM);
7432 
7433 	if (nv->size == 0)
7434 		ERROUT(0);
7435 	else if (nv->size < nv->len)
7436 		ERROUT(ENOSPC);
7437 
7438 	error = copyout(nvlpacked, nv->data, nv->len);
7439 
7440 #undef ERROUT
7441 errout:
7442 	if (s != NULL)
7443 		PF_STATE_UNLOCK(s);
7444 	free(nvlpacked, M_NVLIST);
7445 	nvlist_destroy(nvl);
7446 	return (error);
7447 }
7448 
7449 /*
7450  * XXX - Check for version mismatch!!!
7451  */
7452 
7453 /*
7454  * Duplicate pfctl -Fa operation to get rid of as much as we can.
7455  */
7456 static int
shutdown_pf(void)7457 shutdown_pf(void)
7458 {
7459 	int error = 0;
7460 	u_int32_t t[5];
7461 	char nn = '\0';
7462 	struct pf_kanchor *anchor, *tmp_anchor;
7463 	struct pf_keth_anchor *eth_anchor, *tmp_eth_anchor;
7464 	int rs_num;
7465 
7466 	do {
7467 		/* Unlink rules of all user defined anchors */
7468 		RB_FOREACH_SAFE(anchor, pf_kanchor_global, &V_pf_anchors,
7469 		    tmp_anchor) {
7470 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
7471 				if ((error = pf_begin_rules(&t[rs_num], rs_num,
7472 				    anchor->path)) != 0) {
7473 					DPFPRINTF(PF_DEBUG_MISC, "%s: "
7474 					    "anchor.path=%s rs_num=%d",
7475 					    __func__, anchor->path, rs_num);
7476 					goto error;	/* XXX: rollback? */
7477 				}
7478 			}
7479 			for (rs_num = 0; rs_num < PF_RULESET_MAX; ++rs_num) {
7480 				error = pf_commit_rules(t[rs_num], rs_num,
7481 				    anchor->path);
7482 				MPASS(error == 0);
7483 			}
7484 		}
7485 
7486 		/* Unlink rules of all user defined ether anchors */
7487 		RB_FOREACH_SAFE(eth_anchor, pf_keth_anchor_global,
7488 		    &V_pf_keth_anchors, tmp_eth_anchor) {
7489 			if ((error = pf_begin_eth(&t[0], eth_anchor->path))
7490 			    != 0) {
7491 				DPFPRINTF(PF_DEBUG_MISC, "%s: eth "
7492 				    "anchor.path=%s", __func__,
7493 				    eth_anchor->path);
7494 				goto error;
7495 			}
7496 			error = pf_commit_eth(t[0], eth_anchor->path);
7497 			MPASS(error == 0);
7498 		}
7499 
7500 		if ((error = pf_begin_rules(&t[0], PF_RULESET_SCRUB, &nn))
7501 		    != 0) {
7502 			DPFPRINTF(PF_DEBUG_MISC, "%s: SCRUB", __func__);
7503 			break;
7504 		}
7505 		if ((error = pf_begin_rules(&t[1], PF_RULESET_FILTER, &nn))
7506 		    != 0) {
7507 			DPFPRINTF(PF_DEBUG_MISC, "%s: FILTER", __func__);
7508 			break;		/* XXX: rollback? */
7509 		}
7510 		if ((error = pf_begin_rules(&t[2], PF_RULESET_NAT, &nn))
7511 		    != 0) {
7512 			DPFPRINTF(PF_DEBUG_MISC, "%s: NAT", __func__);
7513 			break;		/* XXX: rollback? */
7514 		}
7515 		if ((error = pf_begin_rules(&t[3], PF_RULESET_BINAT, &nn))
7516 		    != 0) {
7517 			DPFPRINTF(PF_DEBUG_MISC, "%s: BINAT", __func__);
7518 			break;		/* XXX: rollback? */
7519 		}
7520 		if ((error = pf_begin_rules(&t[4], PF_RULESET_RDR, &nn))
7521 		    != 0) {
7522 			DPFPRINTF(PF_DEBUG_MISC, "%s: RDR", __func__);
7523 			break;		/* XXX: rollback? */
7524 		}
7525 
7526 		error = pf_commit_rules(t[0], PF_RULESET_SCRUB, &nn);
7527 		MPASS(error == 0);
7528 		error = pf_commit_rules(t[1], PF_RULESET_FILTER, &nn);
7529 		MPASS(error == 0);
7530 		error = pf_commit_rules(t[2], PF_RULESET_NAT, &nn);
7531 		MPASS(error == 0);
7532 		error = pf_commit_rules(t[3], PF_RULESET_BINAT, &nn);
7533 		MPASS(error == 0);
7534 		error = pf_commit_rules(t[4], PF_RULESET_RDR, &nn);
7535 		MPASS(error == 0);
7536 
7537 		if ((error = pf_clear_tables()) != 0)
7538 			break;
7539 
7540 		if ((error = pf_begin_eth(&t[0], &nn)) != 0) {
7541 			DPFPRINTF(PF_DEBUG_MISC, "%s: eth", __func__);
7542 			break;
7543 		}
7544 		error = pf_commit_eth(t[0], &nn);
7545 		MPASS(error == 0);
7546 
7547 #ifdef ALTQ
7548 		if ((error = pf_begin_altq(&t[0])) != 0) {
7549 			DPFPRINTF(PF_DEBUG_MISC, "%s: ALTQ", __func__);
7550 			break;
7551 		}
7552 		pf_commit_altq(t[0]);
7553 #endif
7554 
7555 		pf_clear_all_states();
7556 
7557 		pf_kill_srcnodes(NULL);
7558 
7559 		for (int i = 0; i < PF_RULESET_MAX; i++) {
7560 			pf_rule_tree_free(pf_main_ruleset.rules[i].active.tree);
7561 			pf_rule_tree_free(pf_main_ruleset.rules[i].inactive.tree);
7562 		}
7563 
7564 		/* status does not use malloced mem so no need to cleanup */
7565 		/* fingerprints and interfaces have their own cleanup code */
7566 	} while(0);
7567 
7568 error:
7569 	return (error);
7570 }
7571 
7572 static pfil_return_t
pf_check_return(int chk,struct mbuf ** m)7573 pf_check_return(int chk, struct mbuf **m)
7574 {
7575 
7576 	switch (chk) {
7577 	case PF_PASS:
7578 		if (*m == NULL)
7579 			return (PFIL_CONSUMED);
7580 		else
7581 			return (PFIL_PASS);
7582 		break;
7583 	default:
7584 		if (*m != NULL) {
7585 			m_freem(*m);
7586 			*m = NULL;
7587 		}
7588 		return (PFIL_DROPPED);
7589 	}
7590 }
7591 
7592 static pfil_return_t
pf_eth_check_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)7593 pf_eth_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
7594     void *ruleset __unused, struct inpcb *inp)
7595 {
7596 	int chk;
7597 
7598 	CURVNET_ASSERT_SET();
7599 
7600 	chk = pf_test_eth(PF_IN, flags, ifp, m, inp);
7601 
7602 	return (pf_check_return(chk, m));
7603 }
7604 
7605 static pfil_return_t
pf_eth_check_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)7606 pf_eth_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
7607     void *ruleset __unused, struct inpcb *inp)
7608 {
7609 	int chk;
7610 
7611 	CURVNET_ASSERT_SET();
7612 
7613 	chk = pf_test_eth(PF_OUT, flags, ifp, m, inp);
7614 
7615 	return (pf_check_return(chk, m));
7616 }
7617 
7618 #ifdef INET
7619 static pfil_return_t
pf_check_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)7620 pf_check_in(struct mbuf **m, struct ifnet *ifp, int flags,
7621     void *ruleset __unused, struct inpcb *inp)
7622 {
7623 	int chk;
7624 
7625 	CURVNET_ASSERT_SET();
7626 
7627 	chk = pf_test(AF_INET, PF_IN, flags, ifp, m, inp, NULL);
7628 
7629 	return (pf_check_return(chk, m));
7630 }
7631 
7632 static pfil_return_t
pf_check_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)7633 pf_check_out(struct mbuf **m, struct ifnet *ifp, int flags,
7634     void *ruleset __unused,  struct inpcb *inp)
7635 {
7636 	int chk;
7637 
7638 	CURVNET_ASSERT_SET();
7639 
7640 	chk = pf_test(AF_INET, PF_OUT, flags, ifp, m, inp, NULL);
7641 
7642 	return (pf_check_return(chk, m));
7643 }
7644 #endif
7645 
7646 #ifdef INET6
7647 static pfil_return_t
pf_check6_in(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)7648 pf_check6_in(struct mbuf **m, struct ifnet *ifp, int flags,
7649     void *ruleset __unused,  struct inpcb *inp)
7650 {
7651 	int chk;
7652 
7653 	CURVNET_ASSERT_SET();
7654 
7655 	/*
7656 	 * In case of loopback traffic IPv6 uses the real interface in
7657 	 * order to support scoped addresses. In order to support stateful
7658 	 * filtering we have change this to lo0 as it is the case in IPv4.
7659 	 */
7660 	chk = pf_test(AF_INET6, PF_IN, flags, (*m)->m_flags & M_LOOP ? V_loif : ifp,
7661 	    m, inp, NULL);
7662 
7663 	return (pf_check_return(chk, m));
7664 }
7665 
7666 static pfil_return_t
pf_check6_out(struct mbuf ** m,struct ifnet * ifp,int flags,void * ruleset __unused,struct inpcb * inp)7667 pf_check6_out(struct mbuf **m, struct ifnet *ifp, int flags,
7668     void *ruleset __unused,  struct inpcb *inp)
7669 {
7670 	int chk;
7671 
7672 	CURVNET_ASSERT_SET();
7673 
7674 	chk = pf_test(AF_INET6, PF_OUT, flags, ifp, m, inp, NULL);
7675 
7676 	return (pf_check_return(chk, m));
7677 }
7678 #endif /* INET6 */
7679 
7680 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_in_hook);
7681 VNET_DEFINE_STATIC(pfil_hook_t, pf_eth_out_hook);
7682 #define	V_pf_eth_in_hook	VNET(pf_eth_in_hook)
7683 #define	V_pf_eth_out_hook	VNET(pf_eth_out_hook)
7684 
7685 #ifdef INET
7686 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_in_hook);
7687 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip4_out_hook);
7688 #define	V_pf_ip4_in_hook	VNET(pf_ip4_in_hook)
7689 #define	V_pf_ip4_out_hook	VNET(pf_ip4_out_hook)
7690 #endif
7691 #ifdef INET6
7692 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_in_hook);
7693 VNET_DEFINE_STATIC(pfil_hook_t, pf_ip6_out_hook);
7694 #define	V_pf_ip6_in_hook	VNET(pf_ip6_in_hook)
7695 #define	V_pf_ip6_out_hook	VNET(pf_ip6_out_hook)
7696 #endif
7697 
7698 static void
hook_pf_eth(void)7699 hook_pf_eth(void)
7700 {
7701 	struct pfil_hook_args pha = {
7702 		.pa_version = PFIL_VERSION,
7703 		.pa_modname = "pf",
7704 		.pa_type = PFIL_TYPE_ETHERNET,
7705 	};
7706 	struct pfil_link_args pla = {
7707 		.pa_version = PFIL_VERSION,
7708 	};
7709 	int ret __diagused;
7710 
7711 	if (atomic_load_bool(&V_pf_pfil_eth_hooked))
7712 		return;
7713 
7714 	pha.pa_mbuf_chk = pf_eth_check_in;
7715 	pha.pa_flags = PFIL_IN;
7716 	pha.pa_rulname = "eth-in";
7717 	V_pf_eth_in_hook = pfil_add_hook(&pha);
7718 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
7719 	pla.pa_head = V_link_pfil_head;
7720 	pla.pa_hook = V_pf_eth_in_hook;
7721 	ret = pfil_link(&pla);
7722 	MPASS(ret == 0);
7723 	pha.pa_mbuf_chk = pf_eth_check_out;
7724 	pha.pa_flags = PFIL_OUT;
7725 	pha.pa_rulname = "eth-out";
7726 	V_pf_eth_out_hook = pfil_add_hook(&pha);
7727 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
7728 	pla.pa_head = V_link_pfil_head;
7729 	pla.pa_hook = V_pf_eth_out_hook;
7730 	ret = pfil_link(&pla);
7731 	MPASS(ret == 0);
7732 
7733 	atomic_store_bool(&V_pf_pfil_eth_hooked, true);
7734 }
7735 
7736 static void
hook_pf(void)7737 hook_pf(void)
7738 {
7739 	struct pfil_hook_args pha = {
7740 		.pa_version = PFIL_VERSION,
7741 		.pa_modname = "pf",
7742 	};
7743 	struct pfil_link_args pla = {
7744 		.pa_version = PFIL_VERSION,
7745 	};
7746 	int ret __diagused;
7747 
7748 	if (atomic_load_bool(&V_pf_pfil_hooked))
7749 		return;
7750 
7751 #ifdef INET
7752 	pha.pa_type = PFIL_TYPE_IP4;
7753 	pha.pa_mbuf_chk = pf_check_in;
7754 	pha.pa_flags = PFIL_IN;
7755 	pha.pa_rulname = "default-in";
7756 	V_pf_ip4_in_hook = pfil_add_hook(&pha);
7757 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
7758 	pla.pa_head = V_inet_pfil_head;
7759 	pla.pa_hook = V_pf_ip4_in_hook;
7760 	ret = pfil_link(&pla);
7761 	MPASS(ret == 0);
7762 	pha.pa_mbuf_chk = pf_check_out;
7763 	pha.pa_flags = PFIL_OUT;
7764 	pha.pa_rulname = "default-out";
7765 	V_pf_ip4_out_hook = pfil_add_hook(&pha);
7766 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
7767 	pla.pa_head = V_inet_pfil_head;
7768 	pla.pa_hook = V_pf_ip4_out_hook;
7769 	ret = pfil_link(&pla);
7770 	MPASS(ret == 0);
7771 	if (V_pf_filter_local) {
7772 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
7773 		pla.pa_head = V_inet_local_pfil_head;
7774 		pla.pa_hook = V_pf_ip4_out_hook;
7775 		ret = pfil_link(&pla);
7776 		MPASS(ret == 0);
7777 	}
7778 #endif
7779 #ifdef INET6
7780 	pha.pa_type = PFIL_TYPE_IP6;
7781 	pha.pa_mbuf_chk = pf_check6_in;
7782 	pha.pa_flags = PFIL_IN;
7783 	pha.pa_rulname = "default-in6";
7784 	V_pf_ip6_in_hook = pfil_add_hook(&pha);
7785 	pla.pa_flags = PFIL_IN | PFIL_HEADPTR | PFIL_HOOKPTR;
7786 	pla.pa_head = V_inet6_pfil_head;
7787 	pla.pa_hook = V_pf_ip6_in_hook;
7788 	ret = pfil_link(&pla);
7789 	MPASS(ret == 0);
7790 	pha.pa_mbuf_chk = pf_check6_out;
7791 	pha.pa_rulname = "default-out6";
7792 	pha.pa_flags = PFIL_OUT;
7793 	V_pf_ip6_out_hook = pfil_add_hook(&pha);
7794 	pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
7795 	pla.pa_head = V_inet6_pfil_head;
7796 	pla.pa_hook = V_pf_ip6_out_hook;
7797 	ret = pfil_link(&pla);
7798 	MPASS(ret == 0);
7799 	if (V_pf_filter_local) {
7800 		pla.pa_flags = PFIL_OUT | PFIL_HEADPTR | PFIL_HOOKPTR;
7801 		pla.pa_head = V_inet6_local_pfil_head;
7802 		pla.pa_hook = V_pf_ip6_out_hook;
7803 		ret = pfil_link(&pla);
7804 		MPASS(ret == 0);
7805 	}
7806 #endif
7807 
7808 	atomic_store_bool(&V_pf_pfil_hooked, true);
7809 }
7810 
7811 static void
dehook_pf_eth(void)7812 dehook_pf_eth(void)
7813 {
7814 
7815 	if (!atomic_load_bool(&V_pf_pfil_eth_hooked))
7816 		return;
7817 
7818 	pfil_remove_hook(V_pf_eth_in_hook);
7819 	pfil_remove_hook(V_pf_eth_out_hook);
7820 
7821 	atomic_store_bool(&V_pf_pfil_eth_hooked, false);
7822 }
7823 
7824 static void
dehook_pf(void)7825 dehook_pf(void)
7826 {
7827 
7828 	if (!atomic_load_bool(&V_pf_pfil_hooked))
7829 		return;
7830 
7831 #ifdef INET
7832 	pfil_remove_hook(V_pf_ip4_in_hook);
7833 	pfil_remove_hook(V_pf_ip4_out_hook);
7834 #endif
7835 #ifdef INET6
7836 	pfil_remove_hook(V_pf_ip6_in_hook);
7837 	pfil_remove_hook(V_pf_ip6_out_hook);
7838 #endif
7839 
7840 	atomic_store_bool(&V_pf_pfil_hooked, false);
7841 }
7842 
7843 static void
pf_load_vnet(void)7844 pf_load_vnet(void)
7845 {
7846 	V_pf_tag_z = uma_zcreate("pf tags", sizeof(struct pf_tagname),
7847 	    NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
7848 
7849 	rm_init_flags(&V_pf_rules_lock, "pf rulesets", RM_RECURSE);
7850 	rm_init_flags(&V_pf_tags_lock, "pf tags and queues", RM_RECURSE);
7851 	sx_init(&V_pf_ioctl_lock, "pf ioctl");
7852 
7853 	pf_init_tagset(&V_pf_tags, &pf_rule_tag_hashsize,
7854 	    PF_RULE_TAG_HASH_SIZE_DEFAULT);
7855 #ifdef ALTQ
7856 	pf_init_tagset(&V_pf_qids, &pf_queue_tag_hashsize,
7857 	    PF_QUEUE_TAG_HASH_SIZE_DEFAULT);
7858 #endif
7859 
7860 	V_pf_keth = &V_pf_main_keth_anchor.ruleset;
7861 
7862 	pfattach_vnet();
7863 	V_pf_vnet_active = 1;
7864 }
7865 
7866 static int
pf_load(void)7867 pf_load(void)
7868 {
7869 	int error;
7870 
7871 	sx_init(&pf_end_lock, "pf end thread");
7872 
7873 	pf_mtag_initialize();
7874 
7875 	pf_dev = make_dev(&pf_cdevsw, 0, UID_ROOT, GID_WHEEL, 0600, PF_NAME);
7876 	if (pf_dev == NULL)
7877 		return (ENOMEM);
7878 
7879 	pf_end_threads = 0;
7880 	error = kproc_create(pf_purge_thread, NULL, &pf_purge_proc, 0, 0, "pf purge");
7881 	if (error != 0)
7882 		return (error);
7883 
7884 	pfi_initialize();
7885 
7886 	return (0);
7887 }
7888 
7889 static void
pf_unload_vnet(void)7890 pf_unload_vnet(void)
7891 {
7892 	int ret __diagused;
7893 
7894 	V_pf_vnet_active = 0;
7895 	V_pf_status.running = 0;
7896 	dehook_pf();
7897 	dehook_pf_eth();
7898 
7899 	PF_RULES_WLOCK();
7900 	pf_syncookies_cleanup();
7901 	shutdown_pf();
7902 	PF_RULES_WUNLOCK();
7903 
7904 	ret = swi_remove(V_pf_swi_cookie);
7905 	MPASS(ret == 0);
7906 	ret = intr_event_destroy(V_pf_swi_ie);
7907 	MPASS(ret == 0);
7908 
7909 	pf_unload_vnet_purge();
7910 
7911 	pf_normalize_cleanup();
7912 	PF_RULES_WLOCK();
7913 	pfi_cleanup_vnet();
7914 	PF_RULES_WUNLOCK();
7915 	pfr_cleanup();
7916 	pf_osfp_flush();
7917 	pf_cleanup();
7918 	if (IS_DEFAULT_VNET(curvnet))
7919 		pf_mtag_cleanup();
7920 
7921 	pf_cleanup_tagset(&V_pf_tags);
7922 #ifdef ALTQ
7923 	pf_cleanup_tagset(&V_pf_qids);
7924 #endif
7925 	uma_zdestroy(V_pf_tag_z);
7926 
7927 #ifdef PF_WANT_32_TO_64_COUNTER
7928 	PF_RULES_WLOCK();
7929 	LIST_REMOVE(V_pf_kifmarker, pfik_allkiflist);
7930 
7931 	MPASS(LIST_EMPTY(&V_pf_allkiflist));
7932 	MPASS(V_pf_allkifcount == 0);
7933 
7934 	LIST_REMOVE(&V_pf_default_rule, allrulelist);
7935 	V_pf_allrulecount--;
7936 	LIST_REMOVE(V_pf_rulemarker, allrulelist);
7937 
7938 	MPASS(LIST_EMPTY(&V_pf_allrulelist));
7939 	MPASS(V_pf_allrulecount == 0);
7940 
7941 	PF_RULES_WUNLOCK();
7942 
7943 	free(V_pf_kifmarker, PFI_MTYPE);
7944 	free(V_pf_rulemarker, M_PFRULE);
7945 #endif
7946 
7947 	/* Free counters last as we updated them during shutdown. */
7948 	pf_counter_u64_deinit(&V_pf_default_rule.evaluations);
7949 	for (int i = 0; i < 2; i++) {
7950 		pf_counter_u64_deinit(&V_pf_default_rule.packets[i]);
7951 		pf_counter_u64_deinit(&V_pf_default_rule.bytes[i]);
7952 	}
7953 	counter_u64_free(V_pf_default_rule.states_cur);
7954 	counter_u64_free(V_pf_default_rule.states_tot);
7955 	for (pf_sn_types_t sn_type=0; sn_type<PF_SN_MAX; sn_type++)
7956 		counter_u64_free(V_pf_default_rule.src_nodes[sn_type]);
7957 	uma_zfree_pcpu(pf_timestamp_pcpu_zone, V_pf_default_rule.timestamp);
7958 
7959 	for (int i = 0; i < PFRES_MAX; i++)
7960 		counter_u64_free(V_pf_status.counters[i]);
7961 	for (int i = 0; i < KLCNT_MAX; i++)
7962 		counter_u64_free(V_pf_status.lcounters[i]);
7963 	for (int i = 0; i < FCNT_MAX; i++)
7964 		pf_counter_u64_deinit(&V_pf_status.fcounters[i]);
7965 	for (int i = 0; i < SCNT_MAX; i++)
7966 		counter_u64_free(V_pf_status.scounters[i]);
7967 	for (int i = 0; i < NCNT_MAX; i++)
7968 		counter_u64_free(V_pf_status.ncounters[i]);
7969 
7970 	rm_destroy(&V_pf_rules_lock);
7971 	sx_destroy(&V_pf_ioctl_lock);
7972 }
7973 
7974 static void
pf_unload(void * dummy __unused)7975 pf_unload(void *dummy __unused)
7976 {
7977 
7978 	sx_xlock(&pf_end_lock);
7979 	pf_end_threads = 1;
7980 	while (pf_end_threads < 2) {
7981 		wakeup_one(pf_purge_thread);
7982 		sx_sleep(pf_purge_proc, &pf_end_lock, 0, "pftmo", 0);
7983 	}
7984 	sx_xunlock(&pf_end_lock);
7985 
7986 	pf_nl_unregister();
7987 
7988 	if (pf_dev != NULL)
7989 		destroy_dev(pf_dev);
7990 
7991 	pfi_cleanup();
7992 
7993 	sx_destroy(&pf_end_lock);
7994 }
7995 
7996 static void
vnet_pf_init(void * unused __unused)7997 vnet_pf_init(void *unused __unused)
7998 {
7999 
8000 	pf_load_vnet();
8001 }
8002 VNET_SYSINIT(vnet_pf_init, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
8003     vnet_pf_init, NULL);
8004 
8005 static void
vnet_pf_uninit(const void * unused __unused)8006 vnet_pf_uninit(const void *unused __unused)
8007 {
8008 
8009 	pf_unload_vnet();
8010 }
8011 SYSUNINIT(pf_unload, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND, pf_unload, NULL);
8012 VNET_SYSUNINIT(vnet_pf_uninit, SI_SUB_PROTO_FIREWALL, SI_ORDER_THIRD,
8013     vnet_pf_uninit, NULL);
8014 
8015 static int
pf_modevent(module_t mod,int type,void * data)8016 pf_modevent(module_t mod, int type, void *data)
8017 {
8018 	int error = 0;
8019 
8020 	switch(type) {
8021 	case MOD_LOAD:
8022 		error = pf_load();
8023 		pf_nl_register();
8024 		break;
8025 	case MOD_UNLOAD:
8026 		/* Handled in SYSUNINIT(pf_unload) to ensure it's done after
8027 		 * the vnet_pf_uninit()s */
8028 		break;
8029 	default:
8030 		error = EINVAL;
8031 		break;
8032 	}
8033 
8034 	return (error);
8035 }
8036 
8037 static moduledata_t pf_mod = {
8038 	"pf",
8039 	pf_modevent,
8040 	0
8041 };
8042 
8043 DECLARE_MODULE(pf, pf_mod, SI_SUB_PROTO_FIREWALL, SI_ORDER_SECOND);
8044 MODULE_DEPEND(pf, netlink, 1, 1, 1);
8045 MODULE_DEPEND(pf, crypto, 1, 1, 1);
8046 MODULE_VERSION(pf, PF_MODVER);
8047