xref: /src/sys/netipsec/ipsec_offload.c (revision fb65357d8707219c98db2216622057133f42fa3d)
1 /*-
2  * Copyright (c) 2021,2022 NVIDIA CORPORATION & AFFILIATES. ALL RIGHTS RESERVED.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  * 1. Redistributions of source code must retain the above copyright
8  *    notice, this list of conditions and the following disclaimer.
9  * 2. Redistributions in binary form must reproduce the above copyright
10  *    notice, this list of conditions and the following disclaimer in the
11  *    documentation and/or other materials provided with the distribution.
12  *
13  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23  * SUCH DAMAGE.
24  */
25 
26 #include "opt_inet.h"
27 #include "opt_inet6.h"
28 #include "opt_ipsec.h"
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/ck.h>
33 #include <sys/eventhandler.h>
34 #include <sys/kernel.h>
35 #include <sys/mbuf.h>
36 #include <sys/pctrie.h>
37 #include <sys/proc.h>
38 #include <sys/socket.h>
39 #include <sys/sysctl.h>
40 #include <sys/protosw.h>
41 #include <sys/stdarg.h>
42 #include <sys/taskqueue.h>
43 
44 #include <net/if.h>
45 #include <net/if_var.h>
46 #include <net/if_private.h>
47 #include <net/vnet.h>
48 #include <netinet/in.h>
49 #include <netinet/ip.h>
50 #include <netinet/ip_var.h>
51 #include <netinet/ip6.h>
52 #include <netinet6/ip6_var.h>
53 #include <netinet/in_pcb.h>
54 #include <netinet/tcp_var.h>
55 
56 #include <netipsec/key.h>
57 #include <netipsec/keydb.h>
58 #include <netipsec/key_debug.h>
59 #include <netipsec/xform.h>
60 #include <netipsec/ipsec.h>
61 #include <netipsec/ipsec_offload.h>
62 #include <netipsec/ah_var.h>
63 #include <netipsec/esp.h>
64 #include <netipsec/esp_var.h>
65 #include <netipsec/ipcomp_var.h>
66 
67 #ifdef IPSEC_OFFLOAD
68 
69 static struct mtx ipsec_accel_sav_tmp;
70 static struct unrhdr *drv_spi_unr;
71 static struct mtx ipsec_accel_cnt_lock;
72 static struct taskqueue *ipsec_accel_tq;
73 
74 struct ipsec_accel_install_newkey_tq {
75 	struct secasvar *sav;
76 	struct vnet *install_vnet;
77 	struct task install_task;
78 };
79 
80 struct ipsec_accel_forget_tq {
81 	struct vnet *forget_vnet;
82 	struct task forget_task;
83 	struct secasvar *sav;
84 };
85 
86 struct ifp_handle_sav {
87 	CK_LIST_ENTRY(ifp_handle_sav) sav_link;
88 	CK_LIST_ENTRY(ifp_handle_sav) sav_allh_link;
89 	struct secasvar *sav;
90 	struct ifnet *ifp;
91 	void *ifdata;
92 	uint64_t drv_spi;
93 	uint32_t flags;
94 	size_t hdr_ext_size;
95 	uint64_t cnt_octets;
96 	uint64_t cnt_allocs;
97 	struct xform_history xfh;
98 };
99 
100 #define	IFP_HS_HANDLED	0x00000001
101 #define	IFP_HS_REJECTED	0x00000002
102 #define	IFP_HS_MARKER	0x00000010
103 
104 static CK_LIST_HEAD(, ifp_handle_sav) ipsec_accel_all_sav_handles;
105 
106 struct ifp_handle_sp {
107 	CK_LIST_ENTRY(ifp_handle_sp) sp_link;
108 	CK_LIST_ENTRY(ifp_handle_sp) sp_allh_link;
109 	struct secpolicy *sp;
110 	struct ifnet *ifp;
111 	void *ifdata;
112 	uint32_t flags;
113 };
114 
115 #define	IFP_HP_HANDLED	0x00000001
116 #define	IFP_HP_REJECTED	0x00000002
117 #define	IFP_HP_MARKER	0x00000004
118 
119 static CK_LIST_HEAD(, ifp_handle_sp) ipsec_accel_all_sp_handles;
120 
121 static void *
122 drvspi_sa_trie_alloc(struct pctrie *ptree)
123 {
124 	void *res;
125 
126 	res = malloc(pctrie_node_size(), M_IPSEC_MISC, M_ZERO | M_NOWAIT);
127 	if (res != NULL)
128 		pctrie_zone_init(res, 0, 0);
129 	return (res);
130 }
131 
132 static void
133 drvspi_sa_trie_free(struct pctrie *ptree, void *node)
134 {
135 	free(node, M_IPSEC_MISC);
136 }
137 
138 PCTRIE_DEFINE(DRVSPI_SA, ifp_handle_sav, drv_spi,
139     drvspi_sa_trie_alloc, drvspi_sa_trie_free);
140 static struct pctrie drv_spi_pctrie;
141 
142 static eventhandler_tag ipsec_accel_ifdetach_event_tag;
143 
144 static void ipsec_accel_sa_newkey_impl(struct secasvar *sav);
145 static int ipsec_accel_handle_sav(struct secasvar *sav, struct ifnet *ifp,
146     u_int drv_spi, void *priv, uint32_t flags, struct ifp_handle_sav **ires);
147 static void ipsec_accel_forget_sav_clear(struct secasvar *sav);
148 static struct ifp_handle_sav *ipsec_accel_is_accel_sav_ptr(struct secasvar *sav,
149     struct ifnet *ifp);
150 static int ipsec_accel_sa_lifetime_op_impl(struct secasvar *sav,
151     struct seclifetime *lft_c, if_t ifp, enum IF_SA_CNT_WHICH op,
152     struct rm_priotracker *sahtree_trackerp);
153 static void ipsec_accel_sa_recordxfer(struct secasvar *sav, struct mbuf *m);
154 static void ipsec_accel_sync_imp(void);
155 static bool ipsec_accel_is_accel_sav_impl(struct secasvar *sav);
156 static struct mbuf *ipsec_accel_key_setaccelif_impl(struct secasvar *sav);
157 static void ipsec_accel_on_ifdown_impl(struct ifnet *ifp);
158 static void ipsec_accel_drv_sa_lifetime_update_impl(struct secasvar *sav,
159     if_t ifp, u_int drv_spi, uint64_t octets, uint64_t allocs);
160 static int ipsec_accel_drv_sa_lifetime_fetch_impl(struct secasvar *sav,
161     if_t ifp, u_int drv_spi, uint64_t *octets, uint64_t *allocs);
162 static void ipsec_accel_ifdetach_event(void *arg, struct ifnet *ifp);
163 
164 static void
165 ipsec_accel_init(void *arg)
166 {
167 	mtx_init(&ipsec_accel_sav_tmp, "ipasat", MTX_DEF, 0);
168 	mtx_init(&ipsec_accel_cnt_lock, "ipascn", MTX_DEF, 0);
169 	drv_spi_unr = new_unrhdr(IPSEC_ACCEL_DRV_SPI_MIN,
170 	    IPSEC_ACCEL_DRV_SPI_MAX, &ipsec_accel_sav_tmp);
171 	ipsec_accel_tq = taskqueue_create("ipsec_offload", M_WAITOK,
172 	    taskqueue_thread_enqueue, &ipsec_accel_tq);
173 	(void)taskqueue_start_threads(&ipsec_accel_tq,
174 	    1 /* Must be single-threaded */, PWAIT,
175 	    "ipsec_offload");
176 	ipsec_accel_sa_newkey_p = ipsec_accel_sa_newkey_impl;
177 	ipsec_accel_forget_sav_p = ipsec_accel_forget_sav_impl;
178 	ipsec_accel_spdadd_p = ipsec_accel_spdadd_impl;
179 	ipsec_accel_spddel_p = ipsec_accel_spddel_impl;
180 	ipsec_accel_sa_lifetime_op_p = ipsec_accel_sa_lifetime_op_impl;
181 	ipsec_accel_sync_p = ipsec_accel_sync_imp;
182 	ipsec_accel_is_accel_sav_p = ipsec_accel_is_accel_sav_impl;
183 	ipsec_accel_key_setaccelif_p = ipsec_accel_key_setaccelif_impl;
184 	ipsec_accel_on_ifdown_p = ipsec_accel_on_ifdown_impl;
185 	ipsec_accel_drv_sa_lifetime_update_p =
186 	    ipsec_accel_drv_sa_lifetime_update_impl;
187 	ipsec_accel_drv_sa_lifetime_fetch_p =
188 	    ipsec_accel_drv_sa_lifetime_fetch_impl;
189 	pctrie_init(&drv_spi_pctrie);
190 	ipsec_accel_ifdetach_event_tag = EVENTHANDLER_REGISTER(
191 	    ifnet_departure_event, ipsec_accel_ifdetach_event, NULL,
192 	    EVENTHANDLER_PRI_ANY);
193 }
194 SYSINIT(ipsec_accel_init, SI_SUB_VNET_DONE, SI_ORDER_ANY,
195     ipsec_accel_init, NULL);
196 
197 static void
198 ipsec_accel_fini(void *arg)
199 {
200 	EVENTHANDLER_DEREGISTER(ifnet_departure_event,
201 	    ipsec_accel_ifdetach_event_tag);
202 	ipsec_accel_sa_newkey_p = NULL;
203 	ipsec_accel_forget_sav_p = NULL;
204 	ipsec_accel_spdadd_p = NULL;
205 	ipsec_accel_spddel_p = NULL;
206 	ipsec_accel_sa_lifetime_op_p = NULL;
207 	ipsec_accel_sync_p = NULL;
208 	ipsec_accel_is_accel_sav_p = NULL;
209 	ipsec_accel_key_setaccelif_p = NULL;
210 	ipsec_accel_on_ifdown_p = NULL;
211 	ipsec_accel_drv_sa_lifetime_update_p = NULL;
212 	ipsec_accel_drv_sa_lifetime_fetch_p = NULL;
213 	ipsec_accel_sync_imp();
214 	clean_unrhdr(drv_spi_unr);	/* avoid panic, should go later */
215 	clear_unrhdr(drv_spi_unr);
216 	delete_unrhdr(drv_spi_unr);
217 	taskqueue_drain_all(ipsec_accel_tq);
218 	taskqueue_free(ipsec_accel_tq);
219 	mtx_destroy(&ipsec_accel_sav_tmp);
220 	mtx_destroy(&ipsec_accel_cnt_lock);
221 }
222 SYSUNINIT(ipsec_accel_fini, SI_SUB_VNET_DONE, SI_ORDER_ANY,
223     ipsec_accel_fini, NULL);
224 
225 SYSCTL_NODE(_net_inet_ipsec, OID_AUTO, offload, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
226     "");
227 
228 static bool ipsec_offload_verbose = false;
229 SYSCTL_BOOL(_net_inet_ipsec_offload, OID_AUTO, verbose, CTLFLAG_RW,
230     &ipsec_offload_verbose, 0,
231     "Verbose SA/SP offload install and deinstall");
232 
233 static void
234 dprintf(const char *fmt, ...)
235 {
236 	va_list ap;
237 
238 	if (!ipsec_offload_verbose)
239 		return;
240 
241 	va_start(ap, fmt);
242 	vprintf(fmt, ap);
243 	va_end(ap);
244 }
245 
246 static void
247 ipsec_accel_alloc_forget_tq(struct secasvar *sav)
248 {
249 	void *ftq;
250 
251 	if (sav->accel_forget_tq != 0)
252 		return;
253 
254 	ftq = malloc(sizeof(struct ipsec_accel_forget_tq), M_TEMP, M_WAITOK);
255 	if (!atomic_cmpset_ptr(&sav->accel_forget_tq, 0, (uintptr_t)ftq))
256 		free(ftq, M_TEMP);
257 }
258 
259 static bool
260 ipsec_accel_sa_install_match(if_t ifp, void *arg)
261 {
262 	if ((ifp->if_capenable2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) == 0)
263 		return (false);
264 	if (ifp->if_ipsec_accel_m->if_sa_newkey == NULL) {
265 		dprintf("driver bug ifp %s if_sa_newkey NULL\n",
266 		    if_name(ifp));
267 		return (false);
268 	}
269 	return (true);
270 }
271 
272 static int
273 ipsec_accel_sa_newkey_cb(if_t ifp, void *arg)
274 {
275 	struct ipsec_accel_install_newkey_tq *tq;
276 	void *priv;
277 	u_int drv_spi;
278 	int error;
279 
280 	tq = arg;
281 
282 	dprintf("ipsec_accel_sa_newkey_act: ifp %s h %p spi %#x "
283 	    "flags %#x seq %d\n",
284 	    if_name(ifp), ifp->if_ipsec_accel_m->if_sa_newkey,
285 	    be32toh(tq->sav->spi), tq->sav->flags, tq->sav->seq);
286 	priv = NULL;
287 	drv_spi = alloc_unr(drv_spi_unr);
288 	if (tq->sav->accel_ifname != NULL &&
289 	    strcmp(tq->sav->accel_ifname, if_name(ifp)) != 0) {
290 		error = ipsec_accel_handle_sav(tq->sav,
291 		    ifp, drv_spi, priv, IFP_HS_REJECTED, NULL);
292 		goto out;
293 	}
294 	if (drv_spi == -1) {
295 		/* XXXKIB */
296 		dprintf("ipsec_accel_sa_install_newkey: cannot alloc "
297 		    "drv_spi if %s spi %#x\n", if_name(ifp),
298 		    be32toh(tq->sav->spi));
299 		return (ENOMEM);
300 	}
301 	error = ifp->if_ipsec_accel_m->if_sa_newkey(ifp, tq->sav,
302 	    drv_spi, &priv);
303 	if (error != 0) {
304 		if (error == EOPNOTSUPP) {
305 			dprintf("ipsec_accel_sa_newkey: driver "
306 			    "refused sa if %s spi %#x\n",
307 			    if_name(ifp), be32toh(tq->sav->spi));
308 			error = ipsec_accel_handle_sav(tq->sav,
309 			    ifp, drv_spi, priv, IFP_HS_REJECTED, NULL);
310 			/* XXXKIB */
311 		} else {
312 			dprintf("ipsec_accel_sa_newkey: driver "
313 			    "error %d if %s spi %#x\n",
314 			    error, if_name(ifp), be32toh(tq->sav->spi));
315 			/* XXXKIB */
316 		}
317 	} else {
318 		error = ipsec_accel_handle_sav(tq->sav, ifp,
319 		    drv_spi, priv, IFP_HS_HANDLED, NULL);
320 		if (error != 0) {
321 			/* XXXKIB */
322 			dprintf("ipsec_accel_sa_newkey: handle_sav "
323 			    "err %d if %s spi %#x\n", error,
324 			    if_name(ifp), be32toh(tq->sav->spi));
325 		}
326 	}
327 out:
328 	return (error);
329 }
330 
331 static void
332 ipsec_accel_sa_newkey_act(void *context, int pending)
333 {
334 	struct ipsec_accel_install_newkey_tq *tq;
335 	void *tqf;
336 	struct secasvar *sav;
337 
338 	tq = context;
339 	tqf = NULL;
340 	sav = tq->sav;
341 	CURVNET_SET(tq->install_vnet);
342 	mtx_lock(&ipsec_accel_sav_tmp);
343 	if ((sav->accel_flags & (SADB_KEY_ACCEL_INST |
344 	    SADB_KEY_ACCEL_DEINST)) == 0 &&
345 	    sav->state == SADB_SASTATE_MATURE) {
346 		sav->accel_flags |= SADB_KEY_ACCEL_INST;
347 		mtx_unlock(&ipsec_accel_sav_tmp);
348 		if_foreach_sleep(ipsec_accel_sa_install_match, context,
349 		    ipsec_accel_sa_newkey_cb, context);
350 		ipsec_accel_alloc_forget_tq(sav);
351 		mtx_lock(&ipsec_accel_sav_tmp);
352 
353 		/*
354 		 * If ipsec_accel_forget_sav() raced with us and set
355 		 * the flag, do its work.  Its task cannot execute in
356 		 * parallel since ipsec_accel taskqueue is single-threaded.
357 		 */
358 		if ((sav->accel_flags & SADB_KEY_ACCEL_DEINST) != 0) {
359 			tqf = (void *)sav->accel_forget_tq;
360 			sav->accel_forget_tq = 0;
361 			ipsec_accel_forget_sav_clear(sav);
362 		}
363 	}
364 	mtx_unlock(&ipsec_accel_sav_tmp);
365 	key_freesav(&tq->sav);
366 	CURVNET_RESTORE();
367 	free(tq, M_TEMP);
368 	free(tqf, M_TEMP);
369 }
370 
371 static void
372 ipsec_accel_sa_newkey_impl(struct secasvar *sav)
373 {
374 	struct ipsec_accel_install_newkey_tq *tq;
375 
376 	if ((sav->accel_flags & (SADB_KEY_ACCEL_INST |
377 	    SADB_KEY_ACCEL_DEINST)) != 0)
378 		return;
379 
380 	dprintf(
381 	    "ipsec_accel_sa_install_newkey: spi %#x flags %#x seq %d\n",
382 	    be32toh(sav->spi), sav->flags, sav->seq);
383 
384 	tq = malloc(sizeof(*tq), M_TEMP, M_NOWAIT);
385 	if (tq == NULL) {
386 		dprintf("ipsec_accel_sa_install_newkey: no memory for tq, "
387 		    "spi %#x\n", be32toh(sav->spi));
388 		/* XXXKIB */
389 		return;
390 	}
391 
392 	refcount_acquire(&sav->refcnt);
393 
394 	TASK_INIT(&tq->install_task, 0, ipsec_accel_sa_newkey_act, tq);
395 	tq->sav = sav;
396 	tq->install_vnet = curthread->td_vnet;
397 	taskqueue_enqueue(ipsec_accel_tq, &tq->install_task);
398 }
399 
400 static int
401 ipsec_accel_handle_sav(struct secasvar *sav, struct ifnet *ifp,
402     u_int drv_spi, void *priv, uint32_t flags, struct ifp_handle_sav **ires)
403 {
404 	struct ifp_handle_sav *ihs, *i;
405 	int error;
406 
407 	MPASS(__bitcount(flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) == 1);
408 
409 	ihs = malloc(sizeof(*ihs), M_IPSEC_MISC, M_WAITOK | M_ZERO);
410 	ihs->ifp = ifp;
411 	ihs->sav = sav;
412 	ihs->drv_spi = drv_spi;
413 	ihs->ifdata = priv;
414 	ihs->flags = flags;
415 	ihs->hdr_ext_size = esp_hdrsiz(sav);
416 	memcpy(&ihs->xfh.dst, &sav->sah->saidx.dst, sizeof(ihs->xfh.dst));
417 	ihs->xfh.spi = sav->spi;
418 	ihs->xfh.proto = sav->sah->saidx.proto;
419 	ihs->xfh.mode = sav->sah->saidx.mode;
420 	mtx_lock(&ipsec_accel_sav_tmp);
421 	CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
422 		if (i->ifp == ifp) {
423 			error = EALREADY;
424 			goto errout;
425 		}
426 	}
427 	error = DRVSPI_SA_PCTRIE_INSERT(&drv_spi_pctrie, ihs);
428 	if (error != 0)
429 		goto errout;
430 	if_ref(ihs->ifp);
431 	CK_LIST_INSERT_HEAD(&sav->accel_ifps, ihs, sav_link);
432 	CK_LIST_INSERT_HEAD(&ipsec_accel_all_sav_handles, ihs, sav_allh_link);
433 	mtx_unlock(&ipsec_accel_sav_tmp);
434 	if (ires != NULL)
435 		*ires = ihs;
436 	return (0);
437 errout:
438 	mtx_unlock(&ipsec_accel_sav_tmp);
439 	free(ihs, M_IPSEC_MISC);
440 	if (ires != NULL)
441 		*ires = NULL;
442 	return (error);
443 }
444 
445 static void
446 ipsec_accel_forget_handle_sav(struct ifp_handle_sav *i, bool freesav)
447 {
448 	struct ifnet *ifp;
449 	struct secasvar *sav;
450 
451 	mtx_assert(&ipsec_accel_sav_tmp, MA_OWNED);
452 
453 	CK_LIST_REMOVE(i, sav_link);
454 	CK_LIST_REMOVE(i, sav_allh_link);
455 	DRVSPI_SA_PCTRIE_REMOVE(&drv_spi_pctrie, i->drv_spi);
456 	mtx_unlock(&ipsec_accel_sav_tmp);
457 	NET_EPOCH_WAIT();
458 	ifp = i->ifp;
459 	sav = i->sav;
460 	if ((i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) ==
461 	    IFP_HS_HANDLED) {
462 		dprintf("sa deinstall %s %p spi %#x ifl %#x\n",
463 		    if_name(ifp), sav, be32toh(sav->spi), i->flags);
464 		ifp->if_ipsec_accel_m->if_sa_deinstall(ifp,
465 		    i->drv_spi, i->ifdata);
466 	}
467 	if_rele(ifp);
468 	free_unr(drv_spi_unr, i->drv_spi);
469 	free(i, M_IPSEC_MISC);
470 	if (freesav)
471 		key_freesav(&sav);
472 	mtx_lock(&ipsec_accel_sav_tmp);
473 }
474 
475 static void
476 ipsec_accel_forget_sav_clear(struct secasvar *sav)
477 {
478 	struct ifp_handle_sav *i;
479 
480 	for (;;) {
481 		i = CK_LIST_FIRST(&sav->accel_ifps);
482 		if (i == NULL)
483 			break;
484 		ipsec_accel_forget_handle_sav(i, false);
485 	}
486 }
487 
488 static void
489 ipsec_accel_forget_sav_act(void *arg, int pending)
490 {
491 	struct ipsec_accel_forget_tq *tq;
492 	struct secasvar *sav;
493 
494 	tq = arg;
495 	sav = tq->sav;
496 	CURVNET_SET(tq->forget_vnet);
497 	mtx_lock(&ipsec_accel_sav_tmp);
498 	ipsec_accel_forget_sav_clear(sav);
499 	mtx_unlock(&ipsec_accel_sav_tmp);
500 	key_freesav(&sav);
501 	CURVNET_RESTORE();
502 	free(tq, M_TEMP);
503 }
504 
505 void
506 ipsec_accel_forget_sav_impl(struct secasvar *sav)
507 {
508 	struct ipsec_accel_forget_tq *tq;
509 
510 	mtx_lock(&ipsec_accel_sav_tmp);
511 	sav->accel_flags |= SADB_KEY_ACCEL_DEINST;
512 	tq = (void *)atomic_load_ptr(&sav->accel_forget_tq);
513 	if (tq == NULL || !atomic_cmpset_ptr(&sav->accel_forget_tq,
514 	    (uintptr_t)tq, 0)) {
515 		mtx_unlock(&ipsec_accel_sav_tmp);
516 		return;
517 	}
518 	mtx_unlock(&ipsec_accel_sav_tmp);
519 
520 	refcount_acquire(&sav->refcnt);
521 	TASK_INIT(&tq->forget_task, 0, ipsec_accel_forget_sav_act, tq);
522 	tq->forget_vnet = curthread->td_vnet;
523 	tq->sav = sav;
524 	taskqueue_enqueue(ipsec_accel_tq, &tq->forget_task);
525 }
526 
527 static void
528 ipsec_accel_on_ifdown_sav(struct ifnet *ifp)
529 {
530 	struct ifp_handle_sav *i, *marker;
531 
532 	marker = malloc(sizeof(*marker), M_IPSEC_MISC, M_WAITOK | M_ZERO);
533 	marker->flags = IFP_HS_MARKER;
534 
535 	mtx_lock(&ipsec_accel_sav_tmp);
536 	CK_LIST_INSERT_HEAD(&ipsec_accel_all_sav_handles, marker,
537 	    sav_allh_link);
538 	for (;;) {
539 		i = CK_LIST_NEXT(marker, sav_allh_link);
540 		if (i == NULL)
541 			break;
542 		CK_LIST_REMOVE(marker, sav_allh_link);
543 		CK_LIST_INSERT_AFTER(i, marker, sav_allh_link);
544 		if (i->ifp == ifp) {
545 			refcount_acquire(&i->sav->refcnt); /* XXXKIB wrap ? */
546 			ipsec_accel_forget_handle_sav(i, true);
547 		}
548 	}
549 	CK_LIST_REMOVE(marker, sav_allh_link);
550 	mtx_unlock(&ipsec_accel_sav_tmp);
551 	free(marker, M_IPSEC_MISC);
552 }
553 
554 static struct ifp_handle_sav *
555 ipsec_accel_is_accel_sav_ptr_raw(struct secasvar *sav, struct ifnet *ifp)
556 {
557 	struct ifp_handle_sav *i;
558 
559 	if ((ifp->if_capenable2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) == 0)
560 		return (NULL);
561 	CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
562 		if (i->ifp == ifp)
563 			return (i);
564 	}
565 	return (NULL);
566 }
567 
568 static struct ifp_handle_sav *
569 ipsec_accel_is_accel_sav_ptr(struct secasvar *sav, struct ifnet *ifp)
570 {
571 	NET_EPOCH_ASSERT();
572 	return (ipsec_accel_is_accel_sav_ptr_raw(sav, ifp));
573 }
574 
575 static bool
576 ipsec_accel_is_accel_sav_impl(struct secasvar *sav)
577 {
578 	return (!CK_LIST_EMPTY(&sav->accel_ifps));
579 }
580 
581 static struct secasvar *
582 ipsec_accel_drvspi_to_sa(u_int drv_spi)
583 {
584 	struct ifp_handle_sav *i;
585 
586 	i = DRVSPI_SA_PCTRIE_LOOKUP(&drv_spi_pctrie, drv_spi);
587 	if (i == NULL)
588 		return (NULL);
589 	return (i->sav);
590 }
591 
592 static struct ifp_handle_sp *
593 ipsec_accel_find_accel_sp(struct secpolicy *sp, if_t ifp)
594 {
595 	struct ifp_handle_sp *i;
596 
597 	CK_LIST_FOREACH(i, &sp->accel_ifps, sp_link) {
598 		if (i->ifp == ifp)
599 			return (i);
600 	}
601 	return (NULL);
602 }
603 
604 static bool
605 ipsec_accel_is_accel_sp(struct secpolicy *sp, if_t ifp)
606 {
607 	return (ipsec_accel_find_accel_sp(sp, ifp) != NULL);
608 }
609 
610 static int
611 ipsec_accel_remember_sp(struct secpolicy *sp, if_t ifp,
612     struct ifp_handle_sp **ip)
613 {
614 	struct ifp_handle_sp *i;
615 
616 	i = malloc(sizeof(*i), M_IPSEC_MISC, M_WAITOK | M_ZERO);
617 	i->sp = sp;
618 	i->ifp = ifp;
619 	if_ref(ifp);
620 	i->flags = IFP_HP_HANDLED;
621 	mtx_lock(&ipsec_accel_sav_tmp);
622 	CK_LIST_INSERT_HEAD(&sp->accel_ifps, i, sp_link);
623 	CK_LIST_INSERT_HEAD(&ipsec_accel_all_sp_handles, i, sp_allh_link);
624 	mtx_unlock(&ipsec_accel_sav_tmp);
625 	*ip = i;
626 	return (0);
627 }
628 
629 static bool
630 ipsec_accel_spdadd_match(if_t ifp, void *arg)
631 {
632 	struct secpolicy *sp;
633 
634 	if ((ifp->if_capenable2 & IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD)) == 0 ||
635 	    ifp->if_ipsec_accel_m->if_spdadd == NULL)
636 		return (false);
637 	sp = arg;
638 	if (sp->accel_ifname != NULL &&
639 	    strcmp(sp->accel_ifname, if_name(ifp)) != 0)
640 		return (false);
641 	if (ipsec_accel_is_accel_sp(sp, ifp))
642 		return (false);
643 	return (true);
644 }
645 
646 static int
647 ipsec_accel_spdadd_cb(if_t ifp, void *arg)
648 {
649 	struct secpolicy *sp;
650 	struct inpcb *inp;
651 	struct ifp_handle_sp *i;
652 	int error;
653 
654 	sp = arg;
655 	inp = sp->ipsec_accel_add_sp_inp;
656 	dprintf("ipsec_accel_spdadd_cb: ifp %s m %p sp %p inp %p\n",
657 	    if_name(ifp), ifp->if_ipsec_accel_m->if_spdadd, sp, inp);
658 	error = ipsec_accel_remember_sp(sp, ifp, &i);
659 	if (error != 0) {
660 		dprintf("ipsec_accel_spdadd: %s if_spdadd %p remember res %d\n",
661 		    if_name(ifp), sp, error);
662 		return (error);
663 	}
664 	error = ifp->if_ipsec_accel_m->if_spdadd(ifp, sp, inp, &i->ifdata);
665 	if (error != 0) {
666 		i->flags |= IFP_HP_REJECTED;
667 		dprintf("ipsec_accel_spdadd: %s if_spdadd %p res %d\n",
668 		    if_name(ifp), sp, error);
669 	}
670 	return (error);
671 }
672 
673 static void
674 ipsec_accel_spdadd_act(void *arg, int pending)
675 {
676 	struct secpolicy *sp;
677 	struct inpcb *inp;
678 
679 	sp = arg;
680 	CURVNET_SET(sp->accel_add_tq.adddel_vnet);
681 	if_foreach_sleep(ipsec_accel_spdadd_match, arg,
682 	    ipsec_accel_spdadd_cb, arg);
683 	inp = sp->ipsec_accel_add_sp_inp;
684 	if (inp != NULL) {
685 		INP_WLOCK(inp);
686 		if (!in_pcbrele_wlocked(inp))
687 			INP_WUNLOCK(inp);
688 		sp->ipsec_accel_add_sp_inp = NULL;
689 	}
690 	CURVNET_RESTORE();
691 	key_freesp(&sp);
692 }
693 
694 void
695 ipsec_accel_spdadd_impl(struct secpolicy *sp, struct inpcb *inp)
696 {
697 	struct ipsec_accel_adddel_sp_tq *tq;
698 
699 	if (sp == NULL)
700 		return;
701 	if (sp->tcount == 0 && inp == NULL)
702 		return;
703 	tq = &sp->accel_add_tq;
704 	if (atomic_cmpset_int(&tq->adddel_scheduled, 0, 1) == 0)
705 		return;
706 	tq->adddel_vnet = curthread->td_vnet;
707 	sp->ipsec_accel_add_sp_inp = inp;
708 	if (inp != NULL)
709 		in_pcbref(inp);
710 	TASK_INIT(&tq->adddel_task, 0, ipsec_accel_spdadd_act, sp);
711 	key_addref(sp);
712 	taskqueue_enqueue(ipsec_accel_tq, &tq->adddel_task);
713 }
714 
715 static void
716 ipsec_accel_spddel_act(void *arg, int pending)
717 {
718 	struct ifp_handle_sp *i;
719 	struct secpolicy *sp;
720 	int error;
721 
722 	sp = arg;
723 	CURVNET_SET(sp->accel_del_tq.adddel_vnet);
724 	mtx_lock(&ipsec_accel_sav_tmp);
725 	for (;;) {
726 		i = CK_LIST_FIRST(&sp->accel_ifps);
727 		if (i == NULL)
728 			break;
729 		CK_LIST_REMOVE(i, sp_link);
730 		CK_LIST_REMOVE(i, sp_allh_link);
731 		mtx_unlock(&ipsec_accel_sav_tmp);
732 		NET_EPOCH_WAIT();
733 		if ((i->flags & (IFP_HP_HANDLED | IFP_HP_REJECTED)) ==
734 		    IFP_HP_HANDLED) {
735 			dprintf("spd deinstall %s %p\n", if_name(i->ifp), sp);
736 			error = i->ifp->if_ipsec_accel_m->if_spddel(i->ifp,
737 			    sp, i->ifdata);
738 			if (error != 0) {
739 				dprintf(
740 		    "ipsec_accel_spddel: %s if_spddel %p res %d\n",
741 				    if_name(i->ifp), sp, error);
742 			}
743 		}
744 		if_rele(i->ifp);
745 		free(i, M_IPSEC_MISC);
746 		mtx_lock(&ipsec_accel_sav_tmp);
747 	}
748 	mtx_unlock(&ipsec_accel_sav_tmp);
749 	key_freesp(&sp);
750 	CURVNET_RESTORE();
751 }
752 
753 void
754 ipsec_accel_spddel_impl(struct secpolicy *sp)
755 {
756 	struct ipsec_accel_adddel_sp_tq *tq;
757 
758 	if (sp == NULL)
759 		return;
760 
761 	tq = &sp->accel_del_tq;
762 	if (atomic_cmpset_int(&tq->adddel_scheduled, 0, 1) == 0)
763 		return;
764 	tq->adddel_vnet = curthread->td_vnet;
765 	TASK_INIT(&tq->adddel_task, 0, ipsec_accel_spddel_act, sp);
766 	key_addref(sp);
767 	taskqueue_enqueue(ipsec_accel_tq, &tq->adddel_task);
768 }
769 
770 static void
771 ipsec_accel_on_ifdown_sp(struct ifnet *ifp)
772 {
773 	struct ifp_handle_sp *i, *marker;
774 	struct secpolicy *sp;
775 	int error;
776 
777 	marker = malloc(sizeof(*marker), M_IPSEC_MISC, M_WAITOK | M_ZERO);
778 	marker->flags = IFP_HS_MARKER;
779 
780 	mtx_lock(&ipsec_accel_sav_tmp);
781 	CK_LIST_INSERT_HEAD(&ipsec_accel_all_sp_handles, marker,
782 	    sp_allh_link);
783 	for (;;) {
784 		i = CK_LIST_NEXT(marker, sp_allh_link);
785 		if (i == NULL)
786 			break;
787 		CK_LIST_REMOVE(marker, sp_allh_link);
788 		CK_LIST_INSERT_AFTER(i, marker, sp_allh_link);
789 		if (i->ifp != ifp)
790 			continue;
791 
792 		sp = i->sp;
793 		key_addref(sp);
794 		CK_LIST_REMOVE(i, sp_link);
795 		CK_LIST_REMOVE(i, sp_allh_link);
796 		mtx_unlock(&ipsec_accel_sav_tmp);
797 		NET_EPOCH_WAIT();
798 		if ((i->flags & (IFP_HP_HANDLED | IFP_HP_REJECTED)) ==
799 		    IFP_HP_HANDLED) {
800 			dprintf("spd deinstall %s %p\n", if_name(ifp), sp);
801 			error = ifp->if_ipsec_accel_m->if_spddel(ifp,
802 			    sp, i->ifdata);
803 		}
804 		if (error != 0) {
805 			dprintf(
806 		    "ipsec_accel_on_ifdown_sp: %s if_spddel %p res %d\n",
807 			    if_name(ifp), sp, error);
808 		}
809 		key_freesp(&sp);
810 		if_rele(ifp);
811 		free(i, M_IPSEC_MISC);
812 		mtx_lock(&ipsec_accel_sav_tmp);
813 	}
814 	CK_LIST_REMOVE(marker, sp_allh_link);
815 	mtx_unlock(&ipsec_accel_sav_tmp);
816 	free(marker, M_IPSEC_MISC);
817 }
818 
819 static void
820 ipsec_accel_on_ifdown_impl(struct ifnet *ifp)
821 {
822 	ipsec_accel_on_ifdown_sp(ifp);
823 	ipsec_accel_on_ifdown_sav(ifp);
824 }
825 
826 static void
827 ipsec_accel_ifdetach_event(void *arg __unused, struct ifnet *ifp)
828 {
829 	if ((ifp->if_flags & IFF_RENAMING) != 0)
830 		return;
831 	ipsec_accel_on_ifdown_impl(ifp);
832 }
833 
834 static bool
835 ipsec_accel_output_pad(struct mbuf *m, struct secasvar *sav, int skip, int mtu)
836 {
837 	int alen, blks, hlen, padding, rlen;
838 
839 	rlen = m->m_pkthdr.len - skip;
840 	hlen = ((sav->flags & SADB_X_EXT_OLD) != 0 ? sizeof(struct esp) :
841 	    sizeof(struct newesp)) + sav->ivlen;
842 	blks = MAX(4, SAV_ISCTR(sav) && VNET(esp_ctr_compatibility) ?
843 	    sav->tdb_encalgxform->native_blocksize :
844 	    sav->tdb_encalgxform->blocksize);
845 	padding = ((blks - ((rlen + 2) % blks)) % blks) + 2;
846 	alen = xform_ah_authsize(sav->tdb_authalgxform);
847 
848 	return (skip + hlen + rlen + padding + alen <= mtu);
849 }
850 
851 static bool
852 ipsec_accel_output_tag(struct mbuf *m, u_int drv_spi)
853 {
854 	struct ipsec_accel_out_tag *tag;
855 
856 	tag = (struct ipsec_accel_out_tag *)m_tag_get(
857 	    PACKET_TAG_IPSEC_ACCEL_OUT, sizeof(*tag), M_NOWAIT);
858 	if (tag == NULL)
859 		return (false);
860 	tag->drv_spi = drv_spi;
861 	m_tag_prepend(m, &tag->tag);
862 	return (true);
863 }
864 
865 bool
866 ipsec_accel_output(struct ifnet *ifp, struct mbuf *m, struct inpcb *inp,
867     struct secpolicy *sp, struct secasvar *sav, int af, int mtu, int *hwassist)
868 {
869 	struct ifp_handle_sav *i;
870 	struct ip *ip;
871 	struct tcpcb *tp;
872 	u_long ip_len, skip;
873 	bool res;
874 
875 	*hwassist = 0;
876 	res = false;
877 	if (ifp == NULL)
878 		return (res);
879 
880 	M_ASSERTPKTHDR(m);
881 	NET_EPOCH_ASSERT();
882 
883 	if (sav == NULL) {
884 		res = ipsec_accel_output_tag(m, IPSEC_ACCEL_DRV_SPI_BYPASS);
885 		goto out;
886 	}
887 
888 	i = ipsec_accel_is_accel_sav_ptr(sav, ifp);
889 	if (i == NULL || (i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) !=
890 	    IFP_HS_HANDLED)
891 		goto out;
892 
893 	if ((m->m_pkthdr.csum_flags & CSUM_TSO) == 0) {
894 		ip_len = m->m_pkthdr.len;
895 		if (ip_len + i->hdr_ext_size > mtu)
896 			goto out;
897 		switch (af) {
898 		case AF_INET:
899 			ip = mtod(m, struct ip *);
900 			skip = ip->ip_hl << 2;
901 			break;
902 		case AF_INET6:
903 			skip = sizeof(struct ip6_hdr);
904 			break;
905 		default:
906 			__unreachable();
907 		}
908 		if (!ipsec_accel_output_pad(m, sav, skip, mtu))
909 			goto out;
910 	}
911 
912 	if (!ipsec_accel_output_tag(m, i->drv_spi))
913 		goto out;
914 
915 	ipsec_accel_sa_recordxfer(sav, m);
916 	key_freesav(&sav);
917 	if (sp != NULL)
918 		key_freesp(&sp);
919 
920 	*hwassist = ifp->if_ipsec_accel_m->if_hwassist(ifp, sav,
921 	    i->drv_spi, i->ifdata);
922 	res = true;
923 out:
924 	if (inp != NULL && inp->inp_pcbinfo == &V_tcbinfo) {
925 		INP_WLOCK_ASSERT(inp);
926 		tp = (struct tcpcb *)inp;
927 		if (res && (*hwassist & (CSUM_TSO | CSUM_IP6_TSO)) != 0) {
928 			tp->t_flags2 |= TF2_IPSEC_TSO;
929 		} else {
930 			tp->t_flags2 &= ~TF2_IPSEC_TSO;
931 		}
932 	}
933 	return (res);
934 }
935 
936 struct ipsec_accel_in_tag *
937 ipsec_accel_input_tag_lookup(const struct mbuf *m)
938 {
939 	struct ipsec_accel_in_tag *tag;
940 	struct m_tag *xtag;
941 
942 	xtag = m_tag_find(__DECONST(struct mbuf *, m),
943 	    PACKET_TAG_IPSEC_ACCEL_IN, NULL);
944 	if (xtag == NULL)
945 		return (NULL);
946 	tag = __containerof(xtag, struct ipsec_accel_in_tag, tag);
947 	return (tag);
948 }
949 
950 int
951 ipsec_accel_input(struct mbuf *m, int offset, int proto)
952 {
953 	struct secasvar *sav;
954 	struct ipsec_accel_in_tag *tag;
955 
956 	tag = ipsec_accel_input_tag_lookup(m);
957 	if (tag == NULL)
958 		return (ENXIO);
959 
960 	if (tag->drv_spi < IPSEC_ACCEL_DRV_SPI_MIN ||
961 	    tag->drv_spi > IPSEC_ACCEL_DRV_SPI_MAX) {
962 		dprintf("if %s mbuf %p drv_spi %d invalid, packet dropped\n",
963 		    (m->m_flags & M_PKTHDR) != 0 ? if_name(m->m_pkthdr.rcvif) :
964 		    "<unknwn>", m, tag->drv_spi);
965 		m_freem(m);
966 		return (EINPROGRESS);
967 	}
968 
969 	sav = ipsec_accel_drvspi_to_sa(tag->drv_spi);
970 	if (sav != NULL)
971 		ipsec_accel_sa_recordxfer(sav, m);
972 	return (0);
973 }
974 
975 static void
976 ipsec_accel_sa_recordxfer(struct secasvar *sav, struct mbuf *m)
977 {
978 	counter_u64_add(sav->accel_lft_sw, 1);
979 	counter_u64_add(sav->accel_lft_sw + 1, m->m_pkthdr.len);
980 	if (sav->accel_firstused == 0)
981 		sav->accel_firstused = time_second;
982 }
983 
984 static void
985 ipsec_accel_sa_lifetime_update(struct seclifetime *lft_c,
986     const struct seclifetime *lft_l)
987 {
988 	lft_c->allocations += lft_l->allocations;
989 	lft_c->bytes += lft_l->bytes;
990 	lft_c->usetime = min(lft_c->usetime, lft_l->usetime);
991 }
992 
993 static void
994 ipsec_accel_drv_sa_lifetime_update_impl(struct secasvar *sav, if_t ifp,
995     u_int drv_spi, uint64_t octets, uint64_t allocs)
996 {
997 	struct epoch_tracker et;
998 	struct ifp_handle_sav *i;
999 	uint64_t odiff, adiff;
1000 
1001 	NET_EPOCH_ENTER(et);
1002 	mtx_lock(&ipsec_accel_cnt_lock);
1003 
1004 	if (allocs != 0) {
1005 		if (sav->firstused == 0)
1006 			sav->firstused = time_second;
1007 		if (sav->accel_firstused == 0)
1008 			sav->accel_firstused = time_second;
1009 	}
1010 
1011 	CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1012 		if (i->ifp == ifp && i->drv_spi == drv_spi)
1013 			break;
1014 	}
1015 	if (i == NULL)
1016 		goto out;
1017 
1018 	odiff = octets - i->cnt_octets;
1019 	adiff = allocs - i->cnt_allocs;
1020 
1021 	if (sav->lft_c != NULL) {
1022 		counter_u64_add(sav->lft_c_bytes, odiff);
1023 		counter_u64_add(sav->lft_c_allocations, adiff);
1024 	}
1025 
1026 	i->cnt_octets = octets;
1027 	i->cnt_allocs = allocs;
1028 	sav->accel_hw_octets += odiff;
1029 	sav->accel_hw_allocs += adiff;
1030 
1031 out:
1032 	mtx_unlock(&ipsec_accel_cnt_lock);
1033 	NET_EPOCH_EXIT(et);
1034 }
1035 
1036 static int
1037 ipsec_accel_drv_sa_lifetime_fetch_impl(struct secasvar *sav,
1038     if_t ifp, u_int drv_spi, uint64_t *octets, uint64_t *allocs)
1039 {
1040 	struct ifp_handle_sav *i;
1041 	int error;
1042 
1043 	NET_EPOCH_ASSERT();
1044 	error = 0;
1045 
1046 	mtx_lock(&ipsec_accel_cnt_lock);
1047 	CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1048 		if (i->ifp == ifp && i->drv_spi == drv_spi) {
1049 			*octets = i->cnt_octets;
1050 			*allocs = i->cnt_allocs;
1051 			break;
1052 		}
1053 	}
1054 	if (i == NULL)
1055 		error = ENOENT;
1056 	mtx_unlock(&ipsec_accel_cnt_lock);
1057 	return (error);
1058 }
1059 
1060 static void
1061 ipsec_accel_sa_lifetime_hw(struct secasvar *sav, if_t ifp,
1062     struct seclifetime *lft)
1063 {
1064 	struct ifp_handle_sav *i;
1065 	if_sa_cnt_fn_t p;
1066 
1067 	IFNET_RLOCK_ASSERT();
1068 
1069 	i = ipsec_accel_is_accel_sav_ptr(sav, ifp);
1070 	if (i != NULL && (i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) ==
1071 	    IFP_HS_HANDLED) {
1072 		p = ifp->if_ipsec_accel_m->if_sa_cnt;
1073 		if (p != NULL)
1074 			p(ifp, sav, i->drv_spi, i->ifdata, lft);
1075 	}
1076 }
1077 
1078 static int
1079 ipsec_accel_sa_lifetime_op_impl(struct secasvar *sav,
1080     struct seclifetime *lft_c, if_t ifp, enum IF_SA_CNT_WHICH op,
1081     struct rm_priotracker *sahtree_trackerp)
1082 {
1083 	struct seclifetime lft_l, lft_s;
1084 	struct ifp_handle_sav *i;
1085 	if_t ifp1;
1086 	if_sa_cnt_fn_t p;
1087 	int error;
1088 
1089 	error = 0;
1090 	memset(&lft_l, 0, sizeof(lft_l));
1091 	memset(&lft_s, 0, sizeof(lft_s));
1092 
1093 	switch (op & ~IF_SA_CNT_UPD) {
1094 	case IF_SA_CNT_IFP_HW_VAL:
1095 		ipsec_accel_sa_lifetime_hw(sav, ifp, &lft_l);
1096 		ipsec_accel_sa_lifetime_update(&lft_l, &lft_s);
1097 		break;
1098 
1099 	case IF_SA_CNT_TOTAL_SW_VAL:
1100 		lft_l.allocations = (uint32_t)counter_u64_fetch(
1101 		    sav->accel_lft_sw);
1102 		lft_l.bytes = counter_u64_fetch(sav->accel_lft_sw + 1);
1103 		lft_l.usetime = sav->accel_firstused;
1104 		break;
1105 
1106 	case IF_SA_CNT_TOTAL_HW_VAL:
1107 		IFNET_RLOCK_ASSERT();
1108 		CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1109 			if ((i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) !=
1110 			    IFP_HS_HANDLED)
1111 				continue;
1112 			ifp1 = i->ifp;
1113 			p = ifp1->if_ipsec_accel_m->if_sa_cnt;
1114 			if (p == NULL)
1115 				continue;
1116 			memset(&lft_s, 0, sizeof(lft_s));
1117 			if (sahtree_trackerp != NULL)
1118 				ipsec_sahtree_runlock(sahtree_trackerp);
1119 			error = p(ifp1, sav, i->drv_spi, i->ifdata, &lft_s);
1120 			if (sahtree_trackerp != NULL)
1121 				ipsec_sahtree_rlock(sahtree_trackerp);
1122 			if (error == 0)
1123 				ipsec_accel_sa_lifetime_update(&lft_l, &lft_s);
1124 		}
1125 		break;
1126 	}
1127 
1128 	if (error == 0) {
1129 		if ((op & IF_SA_CNT_UPD) == 0)
1130 			memset(lft_c, 0, sizeof(*lft_c));
1131 		ipsec_accel_sa_lifetime_update(lft_c, &lft_l);
1132 	}
1133 
1134 	return (error);
1135 }
1136 
1137 static void
1138 ipsec_accel_sync_imp(void)
1139 {
1140 	taskqueue_drain_all(ipsec_accel_tq);
1141 }
1142 
1143 static struct mbuf *
1144 ipsec_accel_key_setaccelif_impl(struct secasvar *sav)
1145 {
1146 	struct mbuf *m, *m1;
1147 	struct ifp_handle_sav *i;
1148 	struct epoch_tracker et;
1149 
1150 	if (sav->accel_ifname != NULL)
1151 		return (key_setaccelif(sav->accel_ifname));
1152 
1153 	m = m1 = NULL;
1154 
1155 	NET_EPOCH_ENTER(et);
1156 	CK_LIST_FOREACH(i, &sav->accel_ifps, sav_link) {
1157 		if ((i->flags & (IFP_HS_HANDLED | IFP_HS_REJECTED)) ==
1158 		    IFP_HS_HANDLED) {
1159 			m1 = key_setaccelif(if_name(i->ifp));
1160 			if (m == NULL)
1161 				m = m1;
1162 			else if (m1 != NULL)
1163 				m_cat(m, m1);
1164 		}
1165 	}
1166 	NET_EPOCH_EXIT(et);
1167 	return (m);
1168 }
1169 
1170 bool
1171 ipsec_accel_fill_xh(if_t ifp, uint32_t drv_spi, struct xform_history *xh)
1172 {
1173 	struct ifp_handle_sav *i;
1174 
1175 	if (drv_spi < IPSEC_ACCEL_DRV_SPI_MIN ||
1176 	    drv_spi > IPSEC_ACCEL_DRV_SPI_MAX)
1177 		return (false);
1178 
1179 	i = DRVSPI_SA_PCTRIE_LOOKUP(&drv_spi_pctrie, drv_spi);
1180 	if (i == NULL)
1181 		return (false);
1182 	memcpy(xh, &i->xfh, sizeof(*xh));
1183 	return (true);
1184 }
1185 
1186 #endif	/* IPSEC_OFFLOAD */
1187