1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2018 Chelsio Communications, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28 #include <sys/cdefs.h>
29 #include "opt_inet.h"
30 #include "opt_inet6.h"
31
32 #include <sys/param.h>
33 #include <sys/eventhandler.h>
34 #include <sys/fnv_hash.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/module.h>
38 #include <sys/bus.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/rwlock.h>
42 #include <sys/socket.h>
43 #include <sys/sbuf.h>
44 #include <netinet/in.h>
45
46 #include "common/common.h"
47 #include "common/t4_msg.h"
48 #include "common/t4_regs.h"
49 #include "common/t4_regs_values.h"
50 #include "common/t4_tcb.h"
51 #include "t4_l2t.h"
52 #include "t4_smt.h"
53
54 struct filter_entry {
55 LIST_ENTRY(filter_entry) link_4t;
56 LIST_ENTRY(filter_entry) link_tid;
57
58 uint32_t valid:1; /* filter allocated and valid */
59 uint32_t locked:1; /* filter is administratively locked or busy */
60 uint32_t pending:1; /* filter action is pending firmware reply */
61 int tid; /* tid of the filter TCB */
62 struct l2t_entry *l2te; /* L2 table entry for DMAC rewrite */
63 struct smt_entry *smt; /* SMT entry for SMAC rewrite */
64
65 struct t4_filter_specification fs;
66 };
67
68 static void free_filter_resources(struct filter_entry *);
69 static int get_tcamfilter(struct adapter *, struct t4_filter *);
70 static int get_hashfilter(struct adapter *, struct t4_filter *);
71 static int set_hashfilter(struct adapter *, struct t4_filter *, uint64_t,
72 struct l2t_entry *, struct smt_entry *);
73 static int del_hashfilter(struct adapter *, struct t4_filter *);
74 static int configure_hashfilter_tcb(struct adapter *, struct filter_entry *);
75
76 static inline bool
separate_hpfilter_region(struct adapter * sc)77 separate_hpfilter_region(struct adapter *sc)
78 {
79
80 return (chip_id(sc) >= CHELSIO_T6);
81 }
82
83 static inline uint32_t
hf_hashfn_4t(struct t4_filter_specification * fs)84 hf_hashfn_4t(struct t4_filter_specification *fs)
85 {
86 struct t4_filter_tuple *ft = &fs->val;
87 uint32_t hash;
88
89 if (fs->type) {
90 /* IPv6 */
91 hash = fnv_32_buf(&ft->sip[0], 16, FNV1_32_INIT);
92 hash = fnv_32_buf(&ft->dip[0], 16, hash);
93 } else {
94 hash = fnv_32_buf(&ft->sip[0], 4, FNV1_32_INIT);
95 hash = fnv_32_buf(&ft->dip[0], 4, hash);
96 }
97 hash = fnv_32_buf(&ft->sport, sizeof(ft->sport), hash);
98 hash = fnv_32_buf(&ft->dport, sizeof(ft->dport), hash);
99
100 return (hash);
101 }
102
103 static inline uint32_t
hf_hashfn_tid(int tid)104 hf_hashfn_tid(int tid)
105 {
106
107 return (fnv_32_buf(&tid, sizeof(tid), FNV1_32_INIT));
108 }
109
110 static int
alloc_hftid_hash(struct tid_info * t,int flags)111 alloc_hftid_hash(struct tid_info *t, int flags)
112 {
113 int n;
114
115 MPASS(t->ntids > 0);
116 MPASS(t->hftid_hash_4t == NULL);
117 MPASS(t->hftid_hash_tid == NULL);
118
119 n = max(t->ntids / 1024, 16);
120 t->hftid_hash_4t = hashinit_flags(n, M_CXGBE, &t->hftid_4t_mask, flags);
121 if (t->hftid_hash_4t == NULL)
122 return (ENOMEM);
123 t->hftid_hash_tid = hashinit_flags(n, M_CXGBE, &t->hftid_tid_mask,
124 flags);
125 if (t->hftid_hash_tid == NULL) {
126 hashdestroy(t->hftid_hash_4t, M_CXGBE, t->hftid_4t_mask);
127 t->hftid_hash_4t = NULL;
128 return (ENOMEM);
129 }
130
131 mtx_init(&t->hftid_lock, "T4 hashfilters", 0, MTX_DEF);
132 cv_init(&t->hftid_cv, "t4hfcv");
133
134 return (0);
135 }
136
137 void
free_hftid_hash(struct tid_info * t)138 free_hftid_hash(struct tid_info *t)
139 {
140 struct filter_entry *f, *ftmp;
141 LIST_HEAD(, filter_entry) *head;
142 int i;
143 #ifdef INVARIANTS
144 int n = 0;
145 #endif
146
147 if (t->tids_in_use > 0) {
148 /* Remove everything from the tid hash. */
149 head = t->hftid_hash_tid;
150 for (i = 0; i <= t->hftid_tid_mask; i++) {
151 LIST_FOREACH_SAFE(f, &head[i], link_tid, ftmp) {
152 LIST_REMOVE(f, link_tid);
153 }
154 }
155
156 /* Remove and then free each filter in the 4t hash. */
157 head = t->hftid_hash_4t;
158 for (i = 0; i <= t->hftid_4t_mask; i++) {
159 LIST_FOREACH_SAFE(f, &head[i], link_4t, ftmp) {
160 #ifdef INVARIANTS
161 n += f->fs.type ? 2 : 1;
162 #endif
163 LIST_REMOVE(f, link_4t);
164 free(f, M_CXGBE);
165 }
166 }
167 MPASS(t->tids_in_use == n);
168 t->tids_in_use = 0;
169 }
170
171 if (t->hftid_hash_4t) {
172 hashdestroy(t->hftid_hash_4t, M_CXGBE, t->hftid_4t_mask);
173 t->hftid_hash_4t = NULL;
174 }
175 if (t->hftid_hash_tid) {
176 hashdestroy(t->hftid_hash_tid, M_CXGBE, t->hftid_tid_mask);
177 t->hftid_hash_tid = NULL;
178 }
179 if (mtx_initialized(&t->hftid_lock)) {
180 mtx_destroy(&t->hftid_lock);
181 cv_destroy(&t->hftid_cv);
182 }
183 }
184
185 static void
insert_hf(struct adapter * sc,struct filter_entry * f,uint32_t hash)186 insert_hf(struct adapter *sc, struct filter_entry *f, uint32_t hash)
187 {
188 struct tid_info *t = &sc->tids;
189 LIST_HEAD(, filter_entry) *head = t->hftid_hash_4t;
190
191 MPASS(head != NULL);
192 if (hash == 0)
193 hash = hf_hashfn_4t(&f->fs);
194 LIST_INSERT_HEAD(&head[hash & t->hftid_4t_mask], f, link_4t);
195 atomic_add_int(&t->tids_in_use, f->fs.type ? 2 : 1);
196 }
197
198 static void
insert_hftid(struct adapter * sc,struct filter_entry * f)199 insert_hftid(struct adapter *sc, struct filter_entry *f)
200 {
201 struct tid_info *t = &sc->tids;
202 LIST_HEAD(, filter_entry) *head = t->hftid_hash_tid;
203 uint32_t hash;
204
205 MPASS(f->tid >= t->tid_base);
206 MPASS(f->tid - t->tid_base < t->ntids);
207 mtx_assert(&t->hftid_lock, MA_OWNED);
208
209 hash = hf_hashfn_tid(f->tid);
210 LIST_INSERT_HEAD(&head[hash & t->hftid_tid_mask], f, link_tid);
211 }
212
213 static bool
filter_eq(struct t4_filter_specification * fs1,struct t4_filter_specification * fs2)214 filter_eq(struct t4_filter_specification *fs1,
215 struct t4_filter_specification *fs2)
216 {
217 int n;
218
219 MPASS(fs1->hash && fs2->hash);
220
221 if (fs1->type != fs2->type)
222 return (false);
223
224 n = fs1->type ? 16 : 4;
225 if (bcmp(&fs1->val.sip[0], &fs2->val.sip[0], n) ||
226 bcmp(&fs1->val.dip[0], &fs2->val.dip[0], n) ||
227 fs1->val.sport != fs2->val.sport ||
228 fs1->val.dport != fs2->val.dport)
229 return (false);
230
231 /*
232 * We know the masks are the same because all hashfilters conform to the
233 * global tp->filter_mask and the driver has verified that already.
234 */
235
236 if ((fs1->mask.pfvf_vld || fs1->mask.ovlan_vld) &&
237 fs1->val.vnic != fs2->val.vnic)
238 return (false);
239 if (fs1->mask.vlan_vld && fs1->val.vlan != fs2->val.vlan)
240 return (false);
241 if (fs1->mask.macidx && fs1->val.macidx != fs2->val.macidx)
242 return (false);
243 if (fs1->mask.frag && fs1->val.frag != fs2->val.frag)
244 return (false);
245 if (fs1->mask.matchtype && fs1->val.matchtype != fs2->val.matchtype)
246 return (false);
247 if (fs1->mask.iport && fs1->val.iport != fs2->val.iport)
248 return (false);
249 if (fs1->mask.fcoe && fs1->val.fcoe != fs2->val.fcoe)
250 return (false);
251 if (fs1->mask.proto && fs1->val.proto != fs2->val.proto)
252 return (false);
253 if (fs1->mask.tos && fs1->val.tos != fs2->val.tos)
254 return (false);
255 if (fs1->mask.ethtype && fs1->val.ethtype != fs2->val.ethtype)
256 return (false);
257
258 return (true);
259 }
260
261 static struct filter_entry *
lookup_hf(struct adapter * sc,struct t4_filter_specification * fs,uint32_t hash)262 lookup_hf(struct adapter *sc, struct t4_filter_specification *fs, uint32_t hash)
263 {
264 struct tid_info *t = &sc->tids;
265 LIST_HEAD(, filter_entry) *head = t->hftid_hash_4t;
266 struct filter_entry *f;
267
268 mtx_assert(&t->hftid_lock, MA_OWNED);
269 MPASS(head != NULL);
270
271 if (hash == 0)
272 hash = hf_hashfn_4t(fs);
273
274 LIST_FOREACH(f, &head[hash & t->hftid_4t_mask], link_4t) {
275 if (filter_eq(&f->fs, fs))
276 return (f);
277 }
278
279 return (NULL);
280 }
281
282 static struct filter_entry *
lookup_hftid(struct adapter * sc,int tid)283 lookup_hftid(struct adapter *sc, int tid)
284 {
285 struct tid_info *t = &sc->tids;
286 LIST_HEAD(, filter_entry) *head = t->hftid_hash_tid;
287 struct filter_entry *f;
288 uint32_t hash;
289
290 mtx_assert(&t->hftid_lock, MA_OWNED);
291 MPASS(head != NULL);
292
293 hash = hf_hashfn_tid(tid);
294 LIST_FOREACH(f, &head[hash & t->hftid_tid_mask], link_tid) {
295 if (f->tid == tid)
296 return (f);
297 }
298
299 return (NULL);
300 }
301
302 static void
remove_hf(struct adapter * sc,struct filter_entry * f)303 remove_hf(struct adapter *sc, struct filter_entry *f)
304 {
305 struct tid_info *t = &sc->tids;
306
307 mtx_assert(&t->hftid_lock, MA_OWNED);
308
309 LIST_REMOVE(f, link_4t);
310 atomic_subtract_int(&t->tids_in_use, f->fs.type ? 2 : 1);
311 }
312
313 static void
remove_hftid(struct adapter * sc,struct filter_entry * f)314 remove_hftid(struct adapter *sc, struct filter_entry *f)
315 {
316 #ifdef INVARIANTS
317 struct tid_info *t = &sc->tids;
318
319 mtx_assert(&t->hftid_lock, MA_OWNED);
320 #endif
321
322 LIST_REMOVE(f, link_tid);
323 }
324
325 static uint16_t
mode_to_fconf_t4(uint32_t mode)326 mode_to_fconf_t4(uint32_t mode)
327 {
328 uint32_t fconf = 0;
329
330 if (mode & T4_FILTER_IP_FRAGMENT)
331 fconf |= F_FRAGMENTATION;
332 if (mode & T4_FILTER_MPS_HIT_TYPE)
333 fconf |= F_MPSHITTYPE;
334 if (mode & T4_FILTER_MAC_IDX)
335 fconf |= F_MACMATCH;
336 if (mode & T4_FILTER_ETH_TYPE)
337 fconf |= F_ETHERTYPE;
338 if (mode & T4_FILTER_IP_PROTO)
339 fconf |= F_PROTOCOL;
340 if (mode & T4_FILTER_IP_TOS)
341 fconf |= F_TOS;
342 if (mode & T4_FILTER_VLAN)
343 fconf |= F_VLAN;
344 if (mode & T4_FILTER_VNIC)
345 fconf |= F_VNIC_ID;
346 if (mode & T4_FILTER_PORT)
347 fconf |= F_PORT;
348 if (mode & T4_FILTER_FCoE)
349 fconf |= F_FCOE;
350
351 return (fconf);
352 }
353
354 static uint16_t
mode_to_fconf_t7(uint32_t mode)355 mode_to_fconf_t7(uint32_t mode)
356 {
357 uint32_t fconf = 0;
358
359 if (mode & T4_FILTER_TCPFLAGS)
360 fconf |= F_TCPFLAGS;
361 if (mode & T4_FILTER_SYNONLY)
362 fconf |= F_SYNONLY;
363 if (mode & T4_FILTER_ROCE)
364 fconf |= F_ROCE;
365 if (mode & T4_FILTER_IP_FRAGMENT)
366 fconf |= F_T7_FRAGMENTATION;
367 if (mode & T4_FILTER_MPS_HIT_TYPE)
368 fconf |= F_T7_MPSHITTYPE;
369 if (mode & T4_FILTER_MAC_IDX)
370 fconf |= F_T7_MACMATCH;
371 if (mode & T4_FILTER_ETH_TYPE)
372 fconf |= F_T7_ETHERTYPE;
373 if (mode & T4_FILTER_IP_PROTO)
374 fconf |= F_T7_PROTOCOL;
375 if (mode & T4_FILTER_IP_TOS)
376 fconf |= F_T7_TOS;
377 if (mode & T4_FILTER_VLAN)
378 fconf |= F_T7_VLAN;
379 if (mode & T4_FILTER_VNIC)
380 fconf |= F_T7_VNIC_ID;
381 if (mode & T4_FILTER_PORT)
382 fconf |= F_T7_PORT;
383 if (mode & T4_FILTER_FCoE)
384 fconf |= F_T7_FCOE;
385 if (mode & T4_FILTER_IPSECIDX)
386 fconf |= F_IPSECIDX;
387
388 return (fconf);
389 }
390
391 /*
392 * Input: driver's 32b filter mode.
393 * Returns: hardware filter mode (bits to set in vlan_pri_map) for the input.
394 */
395 static uint16_t
mode_to_fconf(struct adapter * sc,uint32_t mode)396 mode_to_fconf(struct adapter *sc, uint32_t mode)
397 {
398 if (chip_id(sc) >= CHELSIO_T7)
399 return (mode_to_fconf_t7(mode));
400 else
401 return (mode_to_fconf_t4(mode));
402 }
403
404 /*
405 * Input: driver's 32b filter mode.
406 * Returns: hardware vnic mode (ingress config) matching the input.
407 */
408 static int
mode_to_iconf(uint32_t mode)409 mode_to_iconf(uint32_t mode)
410 {
411 if ((mode & T4_FILTER_VNIC) == 0)
412 return (-1); /* ingress config doesn't matter. */
413
414 if (mode & T4_FILTER_IC_VNIC)
415 return (FW_VNIC_MODE_PF_VF);
416 else if (mode & T4_FILTER_IC_ENCAP)
417 return (FW_VNIC_MODE_ENCAP_EN);
418 else
419 return (FW_VNIC_MODE_OUTER_VLAN);
420 }
421
422 static int
check_fspec_against_fconf_iconf(struct adapter * sc,struct t4_filter_specification * fs)423 check_fspec_against_fconf_iconf(struct adapter *sc,
424 struct t4_filter_specification *fs)
425 {
426 struct tp_params *tpp = &sc->params.tp;
427 uint32_t fconf = 0;
428
429 if (chip_id(sc) >= CHELSIO_T7) {
430 if (fs->val.tcpflags || fs->mask.tcpflags)
431 fconf |= F_TCPFLAGS;
432 if (fs->val.synonly || fs->mask.synonly)
433 fconf |= F_SYNONLY;
434 if (fs->val.roce || fs->mask.roce)
435 fconf |= F_ROCE;
436 if (fs->val.frag || fs->mask.frag)
437 fconf |= F_T7_FRAGMENTATION;
438 if (fs->val.matchtype || fs->mask.matchtype)
439 fconf |= F_T7_MPSHITTYPE;
440 if (fs->val.macidx || fs->mask.macidx)
441 fconf |= F_T7_MACMATCH;
442 if (fs->val.ethtype || fs->mask.ethtype)
443 fconf |= F_T7_ETHERTYPE;
444 if (fs->val.proto || fs->mask.proto)
445 fconf |= F_T7_PROTOCOL;
446 if (fs->val.tos || fs->mask.tos)
447 fconf |= F_T7_TOS;
448 if (fs->val.vlan_vld || fs->mask.vlan_vld)
449 fconf |= F_T7_VLAN;
450 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
451 if (tpp->vnic_mode != FW_VNIC_MODE_OUTER_VLAN)
452 return (EINVAL);
453 fconf |= F_T7_VNIC_ID;
454 }
455 if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
456 if (tpp->vnic_mode != FW_VNIC_MODE_PF_VF)
457 return (EINVAL);
458 fconf |= F_T7_VNIC_ID;
459 }
460 #ifdef notyet
461 if (fs->val.encap_vld || fs->mask.encap_vld) {
462 if (tpp->vnic_mode != FW_VNIC_MODE_ENCAP_EN);
463 return (EINVAL);
464 fconf |= F_T7_VNIC_ID;
465 }
466 #endif
467 if (fs->val.iport || fs->mask.iport)
468 fconf |= F_T7_PORT;
469 if (fs->val.fcoe || fs->mask.fcoe)
470 fconf |= F_T7_FCOE;
471 if (fs->val.ipsecidx || fs->mask.ipsecidx)
472 fconf |= F_IPSECIDX;
473 } else {
474 if (fs->val.tcpflags || fs->mask.tcpflags ||
475 fs->val.synonly || fs->mask.synonly ||
476 fs->val.roce || fs->mask.roce ||
477 fs->val.ipsecidx || fs->mask.ipsecidx)
478 return (EINVAL);
479 if (fs->val.frag || fs->mask.frag)
480 fconf |= F_FRAGMENTATION;
481 if (fs->val.matchtype || fs->mask.matchtype)
482 fconf |= F_MPSHITTYPE;
483 if (fs->val.macidx || fs->mask.macidx)
484 fconf |= F_MACMATCH;
485 if (fs->val.ethtype || fs->mask.ethtype)
486 fconf |= F_ETHERTYPE;
487 if (fs->val.proto || fs->mask.proto)
488 fconf |= F_PROTOCOL;
489 if (fs->val.tos || fs->mask.tos)
490 fconf |= F_TOS;
491 if (fs->val.vlan_vld || fs->mask.vlan_vld)
492 fconf |= F_VLAN;
493 if (fs->val.ovlan_vld || fs->mask.ovlan_vld) {
494 if (tpp->vnic_mode != FW_VNIC_MODE_OUTER_VLAN)
495 return (EINVAL);
496 fconf |= F_VNIC_ID;
497 }
498 if (fs->val.pfvf_vld || fs->mask.pfvf_vld) {
499 if (tpp->vnic_mode != FW_VNIC_MODE_PF_VF)
500 return (EINVAL);
501 fconf |= F_VNIC_ID;
502 }
503 #ifdef notyet
504 if (fs->val.encap_vld || fs->mask.encap_vld) {
505 if (tpp->vnic_mode != FW_VNIC_MODE_ENCAP_EN);
506 return (EINVAL);
507 fconf |= F_VNIC_ID;
508 }
509 #endif
510 if (fs->val.iport || fs->mask.iport)
511 fconf |= F_PORT;
512 if (fs->val.fcoe || fs->mask.fcoe)
513 fconf |= F_FCOE;
514 }
515 if ((tpp->filter_mode | fconf) != tpp->filter_mode)
516 return (E2BIG);
517
518 return (0);
519 }
520
521 static uint32_t
fconf_to_mode_t4(uint16_t hwmode,int vnic_mode)522 fconf_to_mode_t4(uint16_t hwmode, int vnic_mode)
523 {
524 uint32_t mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
525 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
526
527 if (hwmode & F_FRAGMENTATION)
528 mode |= T4_FILTER_IP_FRAGMENT;
529 if (hwmode & F_MPSHITTYPE)
530 mode |= T4_FILTER_MPS_HIT_TYPE;
531 if (hwmode & F_MACMATCH)
532 mode |= T4_FILTER_MAC_IDX;
533 if (hwmode & F_ETHERTYPE)
534 mode |= T4_FILTER_ETH_TYPE;
535 if (hwmode & F_PROTOCOL)
536 mode |= T4_FILTER_IP_PROTO;
537 if (hwmode & F_TOS)
538 mode |= T4_FILTER_IP_TOS;
539 if (hwmode & F_VLAN)
540 mode |= T4_FILTER_VLAN;
541 if (hwmode & F_VNIC_ID)
542 mode |= T4_FILTER_VNIC; /* real meaning depends on vnic_mode. */
543 if (hwmode & F_PORT)
544 mode |= T4_FILTER_PORT;
545 if (hwmode & F_FCOE)
546 mode |= T4_FILTER_FCoE;
547
548 switch (vnic_mode) {
549 case FW_VNIC_MODE_PF_VF:
550 mode |= T4_FILTER_IC_VNIC;
551 break;
552 case FW_VNIC_MODE_ENCAP_EN:
553 mode |= T4_FILTER_IC_ENCAP;
554 break;
555 case FW_VNIC_MODE_OUTER_VLAN:
556 default:
557 break;
558 }
559
560 return (mode);
561 }
562
563 static uint32_t
fconf_to_mode_t7(uint16_t hwmode,int vnic_mode)564 fconf_to_mode_t7(uint16_t hwmode, int vnic_mode)
565 {
566 uint32_t mode = T4_FILTER_IPv4 | T4_FILTER_IPv6 | T4_FILTER_IP_SADDR |
567 T4_FILTER_IP_DADDR | T4_FILTER_IP_SPORT | T4_FILTER_IP_DPORT;
568
569 if (hwmode & F_TCPFLAGS)
570 mode |= T4_FILTER_TCPFLAGS;
571 if (hwmode & F_SYNONLY)
572 mode |= T4_FILTER_SYNONLY;
573 if (hwmode & F_ROCE)
574 mode |= T4_FILTER_ROCE;
575 if (hwmode & F_T7_FRAGMENTATION)
576 mode |= T4_FILTER_IP_FRAGMENT;
577 if (hwmode & F_T7_MPSHITTYPE)
578 mode |= T4_FILTER_MPS_HIT_TYPE;
579 if (hwmode & F_T7_MACMATCH)
580 mode |= T4_FILTER_MAC_IDX;
581 if (hwmode & F_T7_ETHERTYPE)
582 mode |= T4_FILTER_ETH_TYPE;
583 if (hwmode & F_T7_PROTOCOL)
584 mode |= T4_FILTER_IP_PROTO;
585 if (hwmode & F_T7_TOS)
586 mode |= T4_FILTER_IP_TOS;
587 if (hwmode & F_T7_VLAN)
588 mode |= T4_FILTER_VLAN;
589 if (hwmode & F_T7_VNIC_ID)
590 mode |= T4_FILTER_VNIC; /* real meaning depends on vnic_mode. */
591 if (hwmode & F_T7_PORT)
592 mode |= T4_FILTER_PORT;
593 if (hwmode & F_T7_FCOE)
594 mode |= T4_FILTER_FCoE;
595 if (hwmode & F_IPSECIDX)
596 mode |= T4_FILTER_IPSECIDX;
597
598 switch (vnic_mode) {
599 case FW_VNIC_MODE_PF_VF:
600 mode |= T4_FILTER_IC_VNIC;
601 break;
602 case FW_VNIC_MODE_ENCAP_EN:
603 mode |= T4_FILTER_IC_ENCAP;
604 break;
605 case FW_VNIC_MODE_OUTER_VLAN:
606 default:
607 break;
608 }
609
610 return (mode);
611 }
612
613 /*
614 * Input: hardware filter configuration (filter mode/mask, ingress config).
615 * Output: driver's 32b filter mode matching the input.
616 */
617 static inline uint32_t
fconf_to_mode(struct adapter * sc,uint16_t hwmode,int vnic_mode)618 fconf_to_mode(struct adapter *sc, uint16_t hwmode, int vnic_mode)
619 {
620 if (chip_id(sc) >= CHELSIO_T7)
621 return (fconf_to_mode_t7(hwmode, vnic_mode));
622 else
623 return (fconf_to_mode_t4(hwmode, vnic_mode));
624 }
625
626 int
get_filter_mode(struct adapter * sc,uint32_t * mode)627 get_filter_mode(struct adapter *sc, uint32_t *mode)
628 {
629 struct tp_params *tp = &sc->params.tp;
630 uint16_t filter_mode;
631
632 /* Filter mask must comply with the global filter mode. */
633 MPASS((tp->filter_mode | tp->filter_mask) == tp->filter_mode);
634
635 /* Non-zero incoming value in mode means "hashfilter mode". */
636 filter_mode = *mode ? tp->filter_mask : tp->filter_mode;
637 *mode = fconf_to_mode(sc, filter_mode, tp->vnic_mode);
638
639 return (0);
640 }
641
642 int
set_filter_mode(struct adapter * sc,uint32_t mode)643 set_filter_mode(struct adapter *sc, uint32_t mode)
644 {
645 struct tp_params *tp = &sc->params.tp;
646 int rc, iconf;
647 uint16_t fconf;
648
649 iconf = mode_to_iconf(mode);
650 fconf = mode_to_fconf(sc, mode);
651 if ((iconf == -1 || iconf == tp->vnic_mode) && fconf == tp->filter_mode)
652 return (0); /* Nothing to do */
653
654 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setfm");
655 if (rc)
656 return (rc);
657
658 if (!hw_all_ok(sc)) {
659 rc = ENXIO;
660 goto done;
661 }
662
663 if (sc->tids.ftids_in_use > 0 || /* TCAM filters active */
664 sc->tids.hpftids_in_use > 0 || /* hi-pri TCAM filters active */
665 sc->tids.tids_in_use > 0) { /* TOE or hashfilters active */
666 rc = EBUSY;
667 goto done;
668 }
669
670 #ifdef TCP_OFFLOAD
671 if (uld_active(sc, ULD_TOM)) {
672 rc = EBUSY;
673 goto done;
674 }
675 #endif
676
677 /* Note that filter mask will get clipped to the new filter mode. */
678 rc = -t4_set_filter_cfg(sc, fconf, -1, iconf);
679 done:
680 end_synchronized_op(sc, 0);
681 return (rc);
682 }
683
684 int
set_filter_mask(struct adapter * sc,uint32_t mode)685 set_filter_mask(struct adapter *sc, uint32_t mode)
686 {
687 struct tp_params *tp = &sc->params.tp;
688 int rc, iconf;
689 uint16_t fmask;
690
691 iconf = mode_to_iconf(mode);
692 fmask = mode_to_fconf(sc, mode);
693 if ((iconf == -1 || iconf == tp->vnic_mode) && fmask == tp->filter_mask)
694 return (0); /* Nothing to do */
695
696 /*
697 * We aren't going to change the global filter mode or VNIC mode here.
698 * The given filter mask must conform to them.
699 */
700 if ((fmask | tp->filter_mode) != tp->filter_mode)
701 return (EINVAL);
702 if (iconf != -1 && iconf != tp->vnic_mode)
703 return (EINVAL);
704
705 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4sethfm");
706 if (rc)
707 return (rc);
708
709 if (!hw_all_ok(sc)) {
710 rc = ENXIO;
711 goto done;
712 }
713
714 if (sc->tids.tids_in_use > 0) { /* TOE or hashfilters active */
715 rc = EBUSY;
716 goto done;
717 }
718
719 #ifdef TCP_OFFLOAD
720 if (uld_active(sc, ULD_TOM)) {
721 rc = EBUSY;
722 goto done;
723 }
724 #endif
725 rc = -t4_set_filter_cfg(sc, -1, fmask, -1);
726 done:
727 end_synchronized_op(sc, 0);
728 return (rc);
729 }
730
731 static inline uint64_t
get_filter_hits(struct adapter * sc,uint32_t tid)732 get_filter_hits(struct adapter *sc, uint32_t tid)
733 {
734 uint32_t tcb_addr;
735 uint64_t hits;
736
737 tcb_addr = t4_read_reg(sc, A_TP_CMM_TCB_BASE) + tid * TCB_SIZE;
738
739 mtx_lock(&sc->reg_lock);
740 if (!hw_all_ok(sc))
741 hits = 0;
742 else if (is_t4(sc)) {
743 uint64_t t;
744
745 read_via_memwin(sc, 0, tcb_addr + 16, (uint32_t *)&t, 8);
746 hits = be64toh(t);
747 } else {
748 uint32_t t;
749
750 read_via_memwin(sc, 0, tcb_addr + 24, &t, 4);
751 hits = be32toh(t);
752 }
753 mtx_unlock(&sc->reg_lock);
754
755 return (hits);
756 }
757
758 int
get_filter(struct adapter * sc,struct t4_filter * t)759 get_filter(struct adapter *sc, struct t4_filter *t)
760 {
761 if (t->fs.hash)
762 return (get_hashfilter(sc, t));
763 else
764 return (get_tcamfilter(sc, t));
765 }
766
767 static int
set_tcamfilter(struct adapter * sc,struct t4_filter * t,struct l2t_entry * l2te,struct smt_entry * smt)768 set_tcamfilter(struct adapter *sc, struct t4_filter *t, struct l2t_entry *l2te,
769 struct smt_entry *smt)
770 {
771 struct filter_entry *f;
772 struct fw_filter2_wr *fwr;
773 u_int vnic_vld, vnic_vld_mask;
774 struct wrq_cookie cookie;
775 int i, rc, busy, locked;
776 u_int tid;
777 const int ntids = t->fs.type ? 4 : 1;
778
779 MPASS(!t->fs.hash);
780 /* Already validated against fconf, iconf */
781 MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
782 MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
783
784 if (separate_hpfilter_region(sc) && t->fs.prio) {
785 MPASS(t->idx < sc->tids.nhpftids);
786 f = &sc->tids.hpftid_tab[t->idx];
787 tid = sc->tids.hpftid_base + t->idx;
788 } else {
789 MPASS(t->idx < sc->tids.nftids);
790 f = &sc->tids.ftid_tab[t->idx];
791 tid = sc->tids.ftid_base + t->idx;
792 }
793 rc = busy = locked = 0;
794 mtx_lock(&sc->tids.ftid_lock);
795 for (i = 0; i < ntids; i++) {
796 busy += f[i].pending + f[i].valid;
797 locked += f[i].locked;
798 }
799 if (locked > 0)
800 rc = EPERM;
801 else if (busy > 0)
802 rc = EBUSY;
803 else {
804 int len16;
805
806 if (sc->params.filter2_wr_support)
807 len16 = howmany(sizeof(struct fw_filter2_wr), 16);
808 else
809 len16 = howmany(sizeof(struct fw_filter_wr), 16);
810 fwr = start_wrq_wr(&sc->sge.ctrlq[0], len16, &cookie);
811 if (__predict_false(fwr == NULL))
812 rc = ENOMEM;
813 else {
814 f->pending = 1;
815 if (separate_hpfilter_region(sc) && t->fs.prio)
816 sc->tids.hpftids_in_use++;
817 else
818 sc->tids.ftids_in_use++;
819 }
820 }
821 mtx_unlock(&sc->tids.ftid_lock);
822 if (rc != 0)
823 return (rc);
824
825 /*
826 * Can't fail now. A set-filter WR will definitely be sent.
827 */
828
829 f->tid = tid;
830 f->fs = t->fs;
831 f->l2te = l2te;
832 f->smt = smt;
833
834 if (t->fs.val.pfvf_vld || t->fs.val.ovlan_vld)
835 vnic_vld = 1;
836 else
837 vnic_vld = 0;
838 if (t->fs.mask.pfvf_vld || t->fs.mask.ovlan_vld)
839 vnic_vld_mask = 1;
840 else
841 vnic_vld_mask = 0;
842
843 bzero(fwr, sizeof(*fwr));
844 if (sc->params.filter2_wr_support)
845 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER2_WR));
846 else
847 fwr->op_pkd = htobe32(V_FW_WR_OP(FW_FILTER_WR));
848 fwr->len16_pkd = htobe32(FW_LEN16(*fwr));
849 fwr->tid_to_iq =
850 htobe32(V_FW_FILTER_WR_TID(f->tid) |
851 V_FW_FILTER_WR_RQTYPE(f->fs.type) |
852 V_FW_FILTER_WR_NOREPLY(0) |
853 V_FW_FILTER_WR_IQ(f->fs.iq));
854 fwr->del_filter_to_l2tix =
855 htobe32(V_FW_FILTER_WR_RPTTID(f->fs.rpttid) |
856 V_FW_FILTER_WR_DROP(f->fs.action == FILTER_DROP) |
857 V_FW_FILTER_WR_DIRSTEER(f->fs.dirsteer) |
858 V_FW_FILTER_WR_MASKHASH(f->fs.maskhash) |
859 V_FW_FILTER_WR_DIRSTEERHASH(f->fs.dirsteerhash) |
860 V_FW_FILTER_WR_LPBK(f->fs.action == FILTER_SWITCH) |
861 V_FW_FILTER_WR_DMAC(f->fs.newdmac) |
862 V_FW_FILTER_WR_SMAC(f->fs.newsmac) |
863 V_FW_FILTER_WR_INSVLAN(f->fs.newvlan == VLAN_INSERT ||
864 f->fs.newvlan == VLAN_REWRITE) |
865 V_FW_FILTER_WR_RMVLAN(f->fs.newvlan == VLAN_REMOVE ||
866 f->fs.newvlan == VLAN_REWRITE) |
867 V_FW_FILTER_WR_HITCNTS(f->fs.hitcnts) |
868 V_FW_FILTER_WR_TXCHAN(f->fs.eport) |
869 V_FW_FILTER_WR_PRIO(f->fs.prio) |
870 V_FW_FILTER_WR_L2TIX(f->l2te ? f->l2te->idx : 0));
871 fwr->ethtype = htobe16(f->fs.val.ethtype);
872 fwr->ethtypem = htobe16(f->fs.mask.ethtype);
873 fwr->frag_to_ovlan_vldm =
874 (V_FW_FILTER_WR_FRAG(f->fs.val.frag) |
875 V_FW_FILTER_WR_FRAGM(f->fs.mask.frag) |
876 V_FW_FILTER_WR_IVLAN_VLD(f->fs.val.vlan_vld) |
877 V_FW_FILTER_WR_OVLAN_VLD(vnic_vld) |
878 V_FW_FILTER_WR_IVLAN_VLDM(f->fs.mask.vlan_vld) |
879 V_FW_FILTER_WR_OVLAN_VLDM(vnic_vld_mask));
880 fwr->smac_sel = 0;
881 fwr->rx_chan_rx_rpl_iq = htobe16(V_FW_FILTER_WR_RX_CHAN(0) |
882 V_FW_FILTER_WR_RX_RPL_IQ(sc->sge.fwq.abs_id));
883 fwr->maci_to_matchtypem =
884 htobe32(V_FW_FILTER_WR_MACI(f->fs.val.macidx) |
885 V_FW_FILTER_WR_MACIM(f->fs.mask.macidx) |
886 V_FW_FILTER_WR_FCOE(f->fs.val.fcoe) |
887 V_FW_FILTER_WR_FCOEM(f->fs.mask.fcoe) |
888 V_FW_FILTER_WR_PORT(f->fs.val.iport) |
889 V_FW_FILTER_WR_PORTM(f->fs.mask.iport) |
890 V_FW_FILTER_WR_MATCHTYPE(f->fs.val.matchtype) |
891 V_FW_FILTER_WR_MATCHTYPEM(f->fs.mask.matchtype));
892 fwr->ptcl = f->fs.val.proto;
893 fwr->ptclm = f->fs.mask.proto;
894 fwr->ttyp = f->fs.val.tos;
895 fwr->ttypm = f->fs.mask.tos;
896 fwr->ivlan = htobe16(f->fs.val.vlan);
897 fwr->ivlanm = htobe16(f->fs.mask.vlan);
898 fwr->ovlan = htobe16(f->fs.val.vnic);
899 fwr->ovlanm = htobe16(f->fs.mask.vnic);
900 bcopy(f->fs.val.dip, fwr->lip, sizeof (fwr->lip));
901 bcopy(f->fs.mask.dip, fwr->lipm, sizeof (fwr->lipm));
902 bcopy(f->fs.val.sip, fwr->fip, sizeof (fwr->fip));
903 bcopy(f->fs.mask.sip, fwr->fipm, sizeof (fwr->fipm));
904 fwr->lp = htobe16(f->fs.val.dport);
905 fwr->lpm = htobe16(f->fs.mask.dport);
906 fwr->fp = htobe16(f->fs.val.sport);
907 fwr->fpm = htobe16(f->fs.mask.sport);
908 /* sma = 0 tells the fw to use SMAC_SEL for source MAC address */
909 bzero(fwr->sma, sizeof (fwr->sma));
910 if (sc->params.filter2_wr_support) {
911 fwr->filter_type_swapmac =
912 V_FW_FILTER2_WR_SWAPMAC(f->fs.swapmac);
913 fwr->natmode_to_ulp_type =
914 V_FW_FILTER2_WR_ULP_TYPE(f->fs.nat_mode ?
915 ULP_MODE_TCPDDP : ULP_MODE_NONE) |
916 V_FW_FILTER2_WR_NATFLAGCHECK(f->fs.nat_flag_chk) |
917 V_FW_FILTER2_WR_NATMODE(f->fs.nat_mode);
918 memcpy(fwr->newlip, f->fs.nat_dip, sizeof(fwr->newlip));
919 memcpy(fwr->newfip, f->fs.nat_sip, sizeof(fwr->newfip));
920 fwr->newlport = htobe16(f->fs.nat_dport);
921 fwr->newfport = htobe16(f->fs.nat_sport);
922 fwr->natseqcheck = htobe32(f->fs.nat_seq_chk);
923 }
924 commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
925
926 /* Wait for response. */
927 mtx_lock(&sc->tids.ftid_lock);
928 for (;;) {
929 if (f->pending == 0) {
930 rc = f->valid ? 0 : EIO;
931 break;
932 }
933 if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
934 rc = EINPROGRESS;
935 break;
936 }
937 }
938 mtx_unlock(&sc->tids.ftid_lock);
939 return (rc);
940 }
941
942 static int
hashfilter_ntuple(struct adapter * sc,const struct t4_filter_specification * fs,uint64_t * ftuple)943 hashfilter_ntuple(struct adapter *sc, const struct t4_filter_specification *fs,
944 uint64_t *ftuple)
945 {
946 struct tp_params *tp = &sc->params.tp;
947 uint16_t fmask;
948
949 /*
950 * Initialize each of the fields which we care about which are present
951 * in the Compressed Filter Tuple.
952 */
953 #define SFF(V, S) ((uint64_t)(V) << S) /* Shifted Filter Field. */
954 *ftuple = fmask = 0;
955 if (chip_id(sc) >= CHELSIO_T7) {
956 if (tp->ipsecidx_shift >= 0 && fs->mask.ipsecidx) {
957 *ftuple |= SFF(fs->val.ipsecidx, tp->ipsecidx_shift);
958 fmask |= F_IPSECIDX;
959 }
960 if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
961 *ftuple |= SFF(fs->val.fcoe, tp->fcoe_shift);
962 fmask |= F_T7_FCOE;
963 }
964 if (tp->port_shift >= 0 && fs->mask.iport) {
965 *ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
966 fmask |= F_T7_PORT;
967 }
968 if (tp->vnic_shift >= 0 && fs->mask.vnic) {
969 /* vnic_mode was already validated. */
970 if (tp->vnic_mode == FW_VNIC_MODE_PF_VF)
971 MPASS(fs->mask.pfvf_vld);
972 else if (tp->vnic_mode == FW_VNIC_MODE_OUTER_VLAN)
973 MPASS(fs->mask.ovlan_vld);
974 #ifdef notyet
975 else if (tp->vnic_mode == FW_VNIC_MODE_ENCAP_EN)
976 MPASS(fs->mask.encap_vld);
977 #endif
978 *ftuple |= SFF(F_FT_VNID_ID_VLD | fs->val.vnic, tp->vnic_shift);
979 fmask |= F_T7_VNIC_ID;
980 }
981 if (tp->vlan_shift >= 0 && fs->mask.vlan) {
982 *ftuple |= SFF(F_FT_VLAN_VLD | fs->val.vlan, tp->vlan_shift);
983 fmask |= F_T7_VLAN;
984 }
985 if (tp->tos_shift >= 0 && fs->mask.tos) {
986 *ftuple |= SFF(fs->val.tos, tp->tos_shift);
987 fmask |= F_T7_TOS;
988 }
989 if (tp->protocol_shift >= 0 && fs->mask.proto) {
990 *ftuple |= SFF(fs->val.proto, tp->protocol_shift);
991 fmask |= F_T7_PROTOCOL;
992 }
993 if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
994 *ftuple |= SFF(fs->val.ethtype, tp->ethertype_shift);
995 fmask |= F_T7_ETHERTYPE;
996 }
997 if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
998 *ftuple |= SFF(fs->val.macidx, tp->macmatch_shift);
999 fmask |= F_T7_MACMATCH;
1000 }
1001 if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
1002 *ftuple |= SFF(fs->val.matchtype, tp->matchtype_shift);
1003 fmask |= F_T7_MPSHITTYPE;
1004 }
1005 if (tp->frag_shift >= 0 && fs->mask.frag) {
1006 *ftuple |= SFF(fs->val.frag, tp->frag_shift);
1007 fmask |= F_T7_FRAGMENTATION;
1008 }
1009 if (tp->roce_shift >= 0 && fs->mask.roce) {
1010 *ftuple |= SFF(fs->val.roce, tp->roce_shift);
1011 fmask |= F_ROCE;
1012 }
1013 if (tp->synonly_shift >= 0 && fs->mask.synonly) {
1014 *ftuple |= SFF(fs->val.synonly, tp->synonly_shift);
1015 fmask |= F_SYNONLY;
1016 }
1017 if (tp->tcpflags_shift >= 0 && fs->mask.tcpflags) {
1018 *ftuple |= SFF(fs->val.tcpflags, tp->synonly_shift);
1019 fmask |= F_TCPFLAGS;
1020 }
1021 } else {
1022 if (fs->mask.ipsecidx || fs->mask.roce || fs->mask.synonly ||
1023 fs->mask.tcpflags) {
1024 MPASS(tp->ipsecidx_shift == -1);
1025 MPASS(tp->roce_shift == -1);
1026 MPASS(tp->synonly_shift == -1);
1027 MPASS(tp->tcpflags_shift == -1);
1028 return (EINVAL);
1029 }
1030 if (tp->fcoe_shift >= 0 && fs->mask.fcoe) {
1031 *ftuple |= SFF(fs->val.fcoe, tp->fcoe_shift);
1032 fmask |= F_FCOE;
1033 }
1034 if (tp->port_shift >= 0 && fs->mask.iport) {
1035 *ftuple |= (uint64_t)fs->val.iport << tp->port_shift;
1036 fmask |= F_PORT;
1037 }
1038 if (tp->vnic_shift >= 0 && fs->mask.vnic) {
1039 /* vnic_mode was already validated. */
1040 if (tp->vnic_mode == FW_VNIC_MODE_PF_VF)
1041 MPASS(fs->mask.pfvf_vld);
1042 else if (tp->vnic_mode == FW_VNIC_MODE_OUTER_VLAN)
1043 MPASS(fs->mask.ovlan_vld);
1044 #ifdef notyet
1045 else if (tp->vnic_mode == FW_VNIC_MODE_ENCAP_EN)
1046 MPASS(fs->mask.encap_vld);
1047 #endif
1048 *ftuple |= SFF(F_FT_VNID_ID_VLD | fs->val.vnic, tp->vnic_shift);
1049 fmask |= F_VNIC_ID;
1050 }
1051 if (tp->vlan_shift >= 0 && fs->mask.vlan) {
1052 *ftuple |= SFF(F_FT_VLAN_VLD | fs->val.vlan, tp->vlan_shift);
1053 fmask |= F_VLAN;
1054 }
1055 if (tp->tos_shift >= 0 && fs->mask.tos) {
1056 *ftuple |= SFF(fs->val.tos, tp->tos_shift);
1057 fmask |= F_TOS;
1058 }
1059 if (tp->protocol_shift >= 0 && fs->mask.proto) {
1060 *ftuple |= SFF(fs->val.proto, tp->protocol_shift);
1061 fmask |= F_PROTOCOL;
1062 }
1063 if (tp->ethertype_shift >= 0 && fs->mask.ethtype) {
1064 *ftuple |= SFF(fs->val.ethtype, tp->ethertype_shift);
1065 fmask |= F_ETHERTYPE;
1066 }
1067 if (tp->macmatch_shift >= 0 && fs->mask.macidx) {
1068 *ftuple |= SFF(fs->val.macidx, tp->macmatch_shift);
1069 fmask |= F_MACMATCH;
1070 }
1071 if (tp->matchtype_shift >= 0 && fs->mask.matchtype) {
1072 *ftuple |= SFF(fs->val.matchtype, tp->matchtype_shift);
1073 fmask |= F_MPSHITTYPE;
1074 }
1075 if (tp->frag_shift >= 0 && fs->mask.frag) {
1076 *ftuple |= SFF(fs->val.frag, tp->frag_shift);
1077 fmask |= F_FRAGMENTATION;
1078 }
1079 }
1080 #undef SFF
1081
1082 /* A hashfilter must conform to the hardware filter mask. */
1083 if (fmask != tp->filter_mask)
1084 return (EINVAL);
1085
1086 return (0);
1087 }
1088
1089 static bool
is_4tuple_specified(struct t4_filter_specification * fs)1090 is_4tuple_specified(struct t4_filter_specification *fs)
1091 {
1092 int i;
1093 const int n = fs->type ? 16 : 4;
1094
1095 if (fs->mask.sport != 0xffff || fs->mask.dport != 0xffff)
1096 return (false);
1097
1098 for (i = 0; i < n; i++) {
1099 if (fs->mask.sip[i] != 0xff)
1100 return (false);
1101 if (fs->mask.dip[i] != 0xff)
1102 return (false);
1103 }
1104
1105 return (true);
1106 }
1107
1108 int
set_filter(struct adapter * sc,struct t4_filter * t)1109 set_filter(struct adapter *sc, struct t4_filter *t)
1110 {
1111 struct tid_info *ti = &sc->tids;
1112 struct l2t_entry *l2te = NULL;
1113 struct smt_entry *smt = NULL;
1114 uint64_t ftuple;
1115 int rc;
1116
1117 /*
1118 * Basic filter checks first.
1119 */
1120
1121 if (t->fs.hash) {
1122 if (!is_hashfilter(sc) || ti->ntids == 0)
1123 return (ENOTSUP);
1124 /* Hardware, not user, selects a tid for hashfilters. */
1125 if (t->idx != (uint32_t)-1)
1126 return (EINVAL);
1127 /* T5 can't count hashfilter hits. */
1128 if (is_t5(sc) && t->fs.hitcnts)
1129 return (EINVAL);
1130 if (!is_4tuple_specified(&t->fs))
1131 return (EINVAL);
1132 rc = hashfilter_ntuple(sc, &t->fs, &ftuple);
1133 if (rc != 0)
1134 return (rc);
1135 } else {
1136 if (separate_hpfilter_region(sc) && t->fs.prio) {
1137 if (ti->nhpftids == 0)
1138 return (ENOTSUP);
1139 if (t->idx >= ti->nhpftids)
1140 return (EINVAL);
1141 } else {
1142 if (ti->nftids == 0)
1143 return (ENOTSUP);
1144 if (t->idx >= ti->nftids)
1145 return (EINVAL);
1146 }
1147 /* IPv6 filter idx must be 4 aligned */
1148 if (t->fs.type == 1 &&
1149 ((t->idx & 0x3) || t->idx + 4 >= ti->nftids))
1150 return (EINVAL);
1151 }
1152
1153 /* T4 doesn't support VLAN tag removal or rewrite, swapmac, and NAT. */
1154 if (is_t4(sc) && t->fs.action == FILTER_SWITCH &&
1155 (t->fs.newvlan == VLAN_REMOVE || t->fs.newvlan == VLAN_REWRITE ||
1156 t->fs.swapmac || t->fs.nat_mode))
1157 return (ENOTSUP);
1158
1159 if (t->fs.action == FILTER_SWITCH && t->fs.eport >= sc->params.nports)
1160 return (EINVAL);
1161 if (t->fs.val.iport >= sc->params.nports)
1162 return (EINVAL);
1163
1164 /* Can't specify an iqid/rss_info if not steering. */
1165 if (!t->fs.dirsteer && !t->fs.dirsteerhash && !t->fs.maskhash && t->fs.iq)
1166 return (EINVAL);
1167
1168 /* Validate against the global filter mode and ingress config */
1169 rc = check_fspec_against_fconf_iconf(sc, &t->fs);
1170 if (rc != 0)
1171 return (rc);
1172
1173 /*
1174 * Basic checks passed. Make sure the queues and tid tables are setup.
1175 */
1176
1177 rc = begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4setf");
1178 if (rc)
1179 return (rc);
1180
1181 if (!hw_all_ok(sc)) {
1182 rc = ENXIO;
1183 goto done;
1184 }
1185
1186 if (!(sc->flags & FULL_INIT_DONE) && ((rc = adapter_init(sc)) != 0))
1187 goto done;
1188
1189 if (t->fs.hash) {
1190 if (__predict_false(ti->hftid_hash_4t == NULL)) {
1191 rc = alloc_hftid_hash(&sc->tids, HASH_NOWAIT);
1192 if (rc != 0)
1193 goto done;
1194 }
1195 } else if (separate_hpfilter_region(sc) && t->fs.prio &&
1196 __predict_false(ti->hpftid_tab == NULL)) {
1197 MPASS(ti->nhpftids != 0);
1198 KASSERT(ti->hpftids_in_use == 0,
1199 ("%s: no memory allocated but hpftids_in_use is %u",
1200 __func__, ti->hpftids_in_use));
1201 ti->hpftid_tab = malloc(sizeof(struct filter_entry) *
1202 ti->nhpftids, M_CXGBE, M_NOWAIT | M_ZERO);
1203 if (ti->hpftid_tab == NULL) {
1204 rc = ENOMEM;
1205 goto done;
1206 }
1207 if (!mtx_initialized(&sc->tids.ftid_lock)) {
1208 mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
1209 cv_init(&ti->ftid_cv, "t4fcv");
1210 }
1211 } else if (__predict_false(ti->ftid_tab == NULL)) {
1212 MPASS(ti->nftids != 0);
1213 KASSERT(ti->ftids_in_use == 0,
1214 ("%s: no memory allocated but ftids_in_use is %u",
1215 __func__, ti->ftids_in_use));
1216 ti->ftid_tab = malloc(sizeof(struct filter_entry) * ti->nftids,
1217 M_CXGBE, M_NOWAIT | M_ZERO);
1218 if (ti->ftid_tab == NULL) {
1219 rc = ENOMEM;
1220 goto done;
1221 }
1222 if (!mtx_initialized(&sc->tids.ftid_lock)) {
1223 mtx_init(&ti->ftid_lock, "T4 filters", 0, MTX_DEF);
1224 cv_init(&ti->ftid_cv, "t4fcv");
1225 }
1226 }
1227 done:
1228 end_synchronized_op(sc, 0);
1229 if (rc != 0)
1230 return (rc);
1231
1232 /*
1233 * Allocate L2T entry, SMT entry, etc.
1234 */
1235
1236 if (t->fs.newdmac || t->fs.newvlan) {
1237 /* This filter needs an L2T entry; allocate one. */
1238 l2te = t4_l2t_alloc_switching(sc, t->fs.vlan, t->fs.eport,
1239 t->fs.dmac);
1240 if (__predict_false(l2te == NULL)) {
1241 rc = EAGAIN;
1242 goto error;
1243 }
1244 }
1245
1246 if (t->fs.newsmac) {
1247 /* This filter needs an SMT entry; allocate one. */
1248 smt = t4_smt_alloc_switching(sc->smt, t->fs.smac);
1249 if (__predict_false(smt == NULL)) {
1250 rc = EAGAIN;
1251 goto error;
1252 }
1253 rc = t4_smt_set_switching(sc, smt, 0x0, t->fs.smac);
1254 if (rc)
1255 goto error;
1256 }
1257
1258 if (t->fs.hash)
1259 rc = set_hashfilter(sc, t, ftuple, l2te, smt);
1260 else
1261 rc = set_tcamfilter(sc, t, l2te, smt);
1262
1263 if (rc != 0 && rc != EINPROGRESS) {
1264 error:
1265 if (l2te)
1266 t4_l2t_release(l2te);
1267 if (smt)
1268 t4_smt_release(smt);
1269 }
1270 return (rc);
1271 }
1272
1273 static int
del_tcamfilter(struct adapter * sc,struct t4_filter * t)1274 del_tcamfilter(struct adapter *sc, struct t4_filter *t)
1275 {
1276 struct filter_entry *f;
1277 struct fw_filter_wr *fwr;
1278 struct wrq_cookie cookie;
1279 int rc, nfilters;
1280 #ifdef INVARIANTS
1281 u_int tid_base;
1282 #endif
1283
1284 mtx_lock(&sc->tids.ftid_lock);
1285 if (separate_hpfilter_region(sc) && t->fs.prio) {
1286 nfilters = sc->tids.nhpftids;
1287 f = sc->tids.hpftid_tab;
1288 #ifdef INVARIANTS
1289 tid_base = sc->tids.hpftid_base;
1290 #endif
1291 } else {
1292 nfilters = sc->tids.nftids;
1293 f = sc->tids.ftid_tab;
1294 #ifdef INVARIANTS
1295 tid_base = sc->tids.ftid_base;
1296 #endif
1297 }
1298 MPASS(f != NULL); /* Caller checked this. */
1299 if (t->idx >= nfilters) {
1300 rc = EINVAL;
1301 goto done;
1302 }
1303 f += t->idx;
1304
1305 if (f->locked) {
1306 rc = EPERM;
1307 goto done;
1308 }
1309 if (f->pending) {
1310 rc = EBUSY;
1311 goto done;
1312 }
1313 if (f->valid == 0) {
1314 rc = EINVAL;
1315 goto done;
1316 }
1317 MPASS(f->tid == tid_base + t->idx);
1318 fwr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*fwr), 16), &cookie);
1319 if (fwr == NULL) {
1320 rc = ENOMEM;
1321 goto done;
1322 }
1323
1324 bzero(fwr, sizeof (*fwr));
1325 t4_mk_filtdelwr(f->tid, fwr, sc->sge.fwq.abs_id);
1326 f->pending = 1;
1327 commit_wrq_wr(&sc->sge.ctrlq[0], fwr, &cookie);
1328 t->fs = f->fs; /* extra info for the caller */
1329
1330 for (;;) {
1331 if (f->pending == 0) {
1332 rc = f->valid ? EIO : 0;
1333 break;
1334 }
1335 if (cv_wait_sig(&sc->tids.ftid_cv, &sc->tids.ftid_lock) != 0) {
1336 rc = EINPROGRESS;
1337 break;
1338 }
1339 }
1340 done:
1341 mtx_unlock(&sc->tids.ftid_lock);
1342 return (rc);
1343 }
1344
1345 int
del_filter(struct adapter * sc,struct t4_filter * t)1346 del_filter(struct adapter *sc, struct t4_filter *t)
1347 {
1348
1349 /* No filters possible if not initialized yet. */
1350 if (!(sc->flags & FULL_INIT_DONE))
1351 return (EINVAL);
1352
1353 /*
1354 * The checks for tid tables ensure that the locks that del_* will reach
1355 * for are initialized.
1356 */
1357 if (t->fs.hash) {
1358 if (sc->tids.hftid_hash_4t != NULL)
1359 return (del_hashfilter(sc, t));
1360 } else if (separate_hpfilter_region(sc) && t->fs.prio) {
1361 if (sc->tids.hpftid_tab != NULL)
1362 return (del_tcamfilter(sc, t));
1363 } else {
1364 if (sc->tids.ftid_tab != NULL)
1365 return (del_tcamfilter(sc, t));
1366 }
1367
1368 return (EINVAL);
1369 }
1370
1371 /*
1372 * Release secondary resources associated with the filter.
1373 */
1374 static void
free_filter_resources(struct filter_entry * f)1375 free_filter_resources(struct filter_entry *f)
1376 {
1377
1378 if (f->l2te) {
1379 t4_l2t_release(f->l2te);
1380 f->l2te = NULL;
1381 }
1382 if (f->smt) {
1383 t4_smt_release(f->smt);
1384 f->smt = NULL;
1385 }
1386 }
1387
1388 static int
set_tcb_field(struct adapter * sc,u_int tid,uint16_t word,uint64_t mask,uint64_t val,int no_reply)1389 set_tcb_field(struct adapter *sc, u_int tid, uint16_t word, uint64_t mask,
1390 uint64_t val, int no_reply)
1391 {
1392 struct wrq_cookie cookie;
1393 struct cpl_set_tcb_field *req;
1394
1395 req = start_wrq_wr(&sc->sge.ctrlq[0], howmany(sizeof(*req), 16), &cookie);
1396 if (req == NULL)
1397 return (ENOMEM);
1398 bzero(req, sizeof(*req));
1399 INIT_TP_WR_MIT_CPL(req, CPL_SET_TCB_FIELD, tid);
1400 if (no_reply) {
1401 req->reply_ctrl = htobe16(F_NO_REPLY);
1402 } else {
1403 const int qid = sc->sge.fwq.abs_id;
1404
1405 if (chip_id(sc) >= CHELSIO_T7) {
1406 req->reply_ctrl = htobe16(V_T7_QUEUENO(qid) |
1407 V_T7_REPLY_CHAN(0) | V_NO_REPLY(0));
1408 } else {
1409 req->reply_ctrl = htobe16(V_QUEUENO(qid) |
1410 V_REPLY_CHAN(0) | V_NO_REPLY(0));
1411 }
1412 }
1413 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(CPL_COOKIE_HASHFILTER));
1414 req->mask = htobe64(mask);
1415 req->val = htobe64(val);
1416 commit_wrq_wr(&sc->sge.ctrlq[0], req, &cookie);
1417
1418 return (0);
1419 }
1420
1421 /* Set one of the t_flags bits in the TCB. */
1422 static inline int
set_tcb_tflag(struct adapter * sc,int tid,u_int bit_pos,u_int val,u_int no_reply)1423 set_tcb_tflag(struct adapter *sc, int tid, u_int bit_pos, u_int val,
1424 u_int no_reply)
1425 {
1426
1427 return (set_tcb_field(sc, tid, W_TCB_T_FLAGS, 1ULL << bit_pos,
1428 (uint64_t)val << bit_pos, no_reply));
1429 }
1430
1431 int
t4_filter_rpl(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)1432 t4_filter_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1433 {
1434 struct adapter *sc = iq->adapter;
1435 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
1436 u_int tid = GET_TID(rpl);
1437 u_int rc, idx;
1438 struct filter_entry *f;
1439
1440 KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
1441 rss->opcode));
1442
1443
1444 if (is_hpftid(sc, tid)) {
1445 idx = tid - sc->tids.hpftid_base;
1446 f = &sc->tids.hpftid_tab[idx];
1447 } else if (is_ftid(sc, tid)) {
1448 idx = tid - sc->tids.ftid_base;
1449 f = &sc->tids.ftid_tab[idx];
1450 } else
1451 panic("%s: FW reply for invalid TID %d.", __func__, tid);
1452
1453 MPASS(f->tid == tid);
1454 rc = G_COOKIE(rpl->cookie);
1455
1456 mtx_lock(&sc->tids.ftid_lock);
1457 KASSERT(f->pending, ("%s: reply %d for filter[%u] that isn't pending.",
1458 __func__, rc, tid));
1459 switch(rc) {
1460 case FW_FILTER_WR_FLT_ADDED:
1461 /* set-filter succeeded */
1462 f->valid = 1;
1463 if (f->fs.newsmac) {
1464 MPASS(f->smt != NULL);
1465 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
1466 set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
1467 V_TCB_SMAC_SEL(M_TCB_SMAC_SEL),
1468 V_TCB_SMAC_SEL(f->smt->idx), 1);
1469 /* XXX: wait for reply to TCB update before !pending */
1470 }
1471 break;
1472 case FW_FILTER_WR_FLT_DELETED:
1473 /* del-filter succeeded */
1474 MPASS(f->valid == 1);
1475 f->valid = 0;
1476 /* Fall through */
1477 case FW_FILTER_WR_SMT_TBL_FULL:
1478 /* set-filter failed due to lack of SMT space. */
1479 MPASS(f->valid == 0);
1480 free_filter_resources(f);
1481 if (separate_hpfilter_region(sc) && f->fs.prio)
1482 sc->tids.hpftids_in_use--;
1483 else
1484 sc->tids.ftids_in_use--;
1485 break;
1486 case FW_FILTER_WR_SUCCESS:
1487 case FW_FILTER_WR_EINVAL:
1488 default:
1489 panic("%s: unexpected reply %d for filter[%d].", __func__, rc,
1490 idx);
1491 }
1492 f->pending = 0;
1493 cv_broadcast(&sc->tids.ftid_cv);
1494 mtx_unlock(&sc->tids.ftid_lock);
1495
1496 return (0);
1497 }
1498
1499 /*
1500 * This is the reply to the Active Open that created the filter. Additional TCB
1501 * updates may be required to complete the filter configuration.
1502 */
1503 int
t4_hashfilter_ao_rpl(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)1504 t4_hashfilter_ao_rpl(struct sge_iq *iq, const struct rss_header *rss,
1505 struct mbuf *m)
1506 {
1507 struct adapter *sc = iq->adapter;
1508 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1);
1509 u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status)));
1510 u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status));
1511 struct filter_entry *f = lookup_atid(sc, atid);
1512
1513 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1514
1515 mtx_lock(&sc->tids.hftid_lock);
1516 KASSERT(f->pending, ("%s: hashfilter[%p] isn't pending.", __func__, f));
1517 KASSERT(f->tid == -1, ("%s: hashfilter[%p] has tid %d already.",
1518 __func__, f, f->tid));
1519 if (status == CPL_ERR_NONE) {
1520 f->tid = GET_TID(cpl);
1521 MPASS(lookup_hftid(sc, f->tid) == NULL);
1522 insert_hftid(sc, f);
1523 /*
1524 * Leave the filter pending until it is fully set up, which will
1525 * be indicated by the reply to the last TCB update. No need to
1526 * unblock the ioctl thread either.
1527 */
1528 if (configure_hashfilter_tcb(sc, f) == EINPROGRESS)
1529 goto done;
1530 f->valid = 1;
1531 f->pending = 0;
1532 } else {
1533 /* provide errno instead of tid to ioctl */
1534 f->tid = act_open_rpl_status_to_errno(status);
1535 f->valid = 0;
1536 f->pending = 0;
1537 if (act_open_has_tid(status))
1538 release_tid(sc, GET_TID(cpl), &sc->sge.ctrlq[0]);
1539 free_filter_resources(f);
1540 remove_hf(sc, f);
1541 if (f->locked == 0)
1542 free(f, M_CXGBE);
1543 }
1544 cv_broadcast(&sc->tids.hftid_cv);
1545 done:
1546 mtx_unlock(&sc->tids.hftid_lock);
1547
1548 free_atid(sc, atid);
1549 return (0);
1550 }
1551
1552 int
t4_hashfilter_tcb_rpl(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)1553 t4_hashfilter_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss,
1554 struct mbuf *m)
1555 {
1556 struct adapter *sc = iq->adapter;
1557 const struct cpl_set_tcb_rpl *rpl = (const void *)(rss + 1);
1558 u_int tid = GET_TID(rpl);
1559 struct filter_entry *f;
1560
1561 mtx_lock(&sc->tids.hftid_lock);
1562 f = lookup_hftid(sc, tid);
1563 KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1564 KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1565 f, tid));
1566 KASSERT(f->valid == 0, ("%s: hashfilter %p [%u] is valid already.",
1567 __func__, f, tid));
1568 f->pending = 0;
1569 if (rpl->status == 0) {
1570 f->valid = 1;
1571 } else {
1572 f->tid = EIO;
1573 f->valid = 0;
1574 free_filter_resources(f);
1575 remove_hftid(sc, f);
1576 remove_hf(sc, f);
1577 release_tid(sc, tid, &sc->sge.ctrlq[0]);
1578 if (f->locked == 0)
1579 free(f, M_CXGBE);
1580 }
1581 cv_broadcast(&sc->tids.hftid_cv);
1582 mtx_unlock(&sc->tids.hftid_lock);
1583
1584 return (0);
1585 }
1586
1587 int
t4_del_hashfilter_rpl(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)1588 t4_del_hashfilter_rpl(struct sge_iq *iq, const struct rss_header *rss,
1589 struct mbuf *m)
1590 {
1591 struct adapter *sc = iq->adapter;
1592 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1);
1593 unsigned int tid = GET_TID(cpl);
1594 struct filter_entry *f;
1595
1596 mtx_lock(&sc->tids.hftid_lock);
1597 f = lookup_hftid(sc, tid);
1598 KASSERT(f->tid == tid, ("%s: filter tid mismatch", __func__));
1599 KASSERT(f->pending, ("%s: hashfilter %p [%u] isn't pending.", __func__,
1600 f, tid));
1601 KASSERT(f->valid, ("%s: hashfilter %p [%u] isn't valid.", __func__, f,
1602 tid));
1603 f->pending = 0;
1604 if (cpl->status == 0) {
1605 f->valid = 0;
1606 free_filter_resources(f);
1607 remove_hftid(sc, f);
1608 remove_hf(sc, f);
1609 release_tid(sc, tid, &sc->sge.ctrlq[0]);
1610 if (f->locked == 0)
1611 free(f, M_CXGBE);
1612 }
1613 cv_broadcast(&sc->tids.hftid_cv);
1614 mtx_unlock(&sc->tids.hftid_lock);
1615
1616 return (0);
1617 }
1618
1619 static int
get_tcamfilter(struct adapter * sc,struct t4_filter * t)1620 get_tcamfilter(struct adapter *sc, struct t4_filter *t)
1621 {
1622 int i, nfilters;
1623 struct filter_entry *f;
1624 u_int in_use;
1625 #ifdef INVARIANTS
1626 u_int tid_base;
1627 #endif
1628
1629 MPASS(!t->fs.hash);
1630
1631 if (separate_hpfilter_region(sc) && t->fs.prio) {
1632 nfilters = sc->tids.nhpftids;
1633 f = sc->tids.hpftid_tab;
1634 in_use = sc->tids.hpftids_in_use;
1635 #ifdef INVARIANTS
1636 tid_base = sc->tids.hpftid_base;
1637 #endif
1638 } else {
1639 nfilters = sc->tids.nftids;
1640 f = sc->tids.ftid_tab;
1641 in_use = sc->tids.ftids_in_use;
1642 #ifdef INVARIANTS
1643 tid_base = sc->tids.ftid_base;
1644 #endif
1645 }
1646
1647 if (in_use == 0 || f == NULL || t->idx >= nfilters) {
1648 t->idx = 0xffffffff;
1649 return (0);
1650 }
1651
1652 f += t->idx;
1653 mtx_lock(&sc->tids.ftid_lock);
1654 for (i = t->idx; i < nfilters; i++, f++) {
1655 if (f->valid) {
1656 MPASS(f->tid == tid_base + i);
1657 t->idx = i;
1658 t->l2tidx = f->l2te ? f->l2te->idx : 0;
1659 t->smtidx = f->smt ? f->smt->idx : 0;
1660 if (f->fs.hitcnts)
1661 t->hits = get_filter_hits(sc, f->tid);
1662 else
1663 t->hits = UINT64_MAX;
1664 t->fs = f->fs;
1665
1666 goto done;
1667 }
1668 }
1669 t->idx = 0xffffffff;
1670 done:
1671 mtx_unlock(&sc->tids.ftid_lock);
1672 return (0);
1673 }
1674
1675 static int
get_hashfilter(struct adapter * sc,struct t4_filter * t)1676 get_hashfilter(struct adapter *sc, struct t4_filter *t)
1677 {
1678 struct tid_info *ti = &sc->tids;
1679 int tid;
1680 struct filter_entry *f;
1681 const int inv_tid = ti->ntids + ti->tid_base;
1682
1683 MPASS(t->fs.hash);
1684
1685 if (ti->tids_in_use == 0 || ti->hftid_hash_tid == NULL ||
1686 t->idx >= inv_tid) {
1687 t->idx = 0xffffffff;
1688 return (0);
1689 }
1690 if (t->idx < ti->tid_base)
1691 t->idx = ti->tid_base;
1692
1693 mtx_lock(&ti->hftid_lock);
1694 for (tid = t->idx; tid < inv_tid; tid++) {
1695 f = lookup_hftid(sc, tid);
1696 if (f != NULL && f->valid) {
1697 t->idx = tid;
1698 t->l2tidx = f->l2te ? f->l2te->idx : 0;
1699 t->smtidx = f->smt ? f->smt->idx : 0;
1700 if (f->fs.hitcnts)
1701 t->hits = get_filter_hits(sc, tid);
1702 else
1703 t->hits = UINT64_MAX;
1704 t->fs = f->fs;
1705
1706 goto done;
1707 }
1708 }
1709 t->idx = 0xffffffff;
1710 done:
1711 mtx_unlock(&ti->hftid_lock);
1712 return (0);
1713 }
1714
1715 static void
mk_act_open_req6(struct adapter * sc,struct filter_entry * f,int atid,uint64_t ftuple,struct cpl_act_open_req6 * cpl)1716 mk_act_open_req6(struct adapter *sc, struct filter_entry *f, int atid,
1717 uint64_t ftuple, struct cpl_act_open_req6 *cpl)
1718 {
1719 struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl;
1720 struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl;
1721
1722 /* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1723 MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1724 MPASS(atid >= 0);
1725
1726 if (chip_id(sc) == CHELSIO_T5) {
1727 INIT_TP_WR(cpl5, 0);
1728 } else {
1729 INIT_TP_WR(cpl6, 0);
1730 cpl6->rsvd2 = 0;
1731 cpl6->opt3 = 0;
1732 }
1733
1734 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
1735 V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1736 V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1737 cpl->local_port = htobe16(f->fs.val.dport);
1738 cpl->peer_port = htobe16(f->fs.val.sport);
1739 cpl->local_ip_hi = *(uint64_t *)(&f->fs.val.dip);
1740 cpl->local_ip_lo = *(((uint64_t *)&f->fs.val.dip) + 1);
1741 cpl->peer_ip_hi = *(uint64_t *)(&f->fs.val.sip);
1742 cpl->peer_ip_lo = *(((uint64_t *)&f->fs.val.sip) + 1);
1743 cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1744 f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1745 V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1746 V_NO_CONG(f->fs.rpttid) |
1747 V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1748 F_TCAM_BYPASS | F_NON_OFFLOAD);
1749
1750 cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1751 cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1752 V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1753 V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1754 F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1755 V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1756 V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1757 }
1758
1759 static void
mk_act_open_req(struct adapter * sc,struct filter_entry * f,int atid,uint64_t ftuple,struct cpl_act_open_req * cpl)1760 mk_act_open_req(struct adapter *sc, struct filter_entry *f, int atid,
1761 uint64_t ftuple, struct cpl_act_open_req *cpl)
1762 {
1763 struct cpl_t5_act_open_req *cpl5 = (void *)cpl;
1764 struct cpl_t6_act_open_req *cpl6 = (void *)cpl;
1765
1766 /* Review changes to CPL after cpl_t6_act_open_req if this goes off. */
1767 MPASS(chip_id(sc) >= CHELSIO_T5 && chip_id(sc) <= CHELSIO_T6);
1768 MPASS(atid >= 0);
1769
1770 if (chip_id(sc) == CHELSIO_T5) {
1771 INIT_TP_WR(cpl5, 0);
1772 } else {
1773 INIT_TP_WR(cpl6, 0);
1774 cpl6->rsvd2 = 0;
1775 cpl6->opt3 = 0;
1776 }
1777
1778 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
1779 V_TID_QID(sc->sge.fwq.abs_id) | V_TID_TID(atid) |
1780 V_TID_COOKIE(CPL_COOKIE_HASHFILTER)));
1781 cpl->local_port = htobe16(f->fs.val.dport);
1782 cpl->peer_port = htobe16(f->fs.val.sport);
1783 cpl->local_ip = f->fs.val.dip[0] | f->fs.val.dip[1] << 8 |
1784 f->fs.val.dip[2] << 16 | f->fs.val.dip[3] << 24;
1785 cpl->peer_ip = f->fs.val.sip[0] | f->fs.val.sip[1] << 8 |
1786 f->fs.val.sip[2] << 16 | f->fs.val.sip[3] << 24;
1787 cpl->opt0 = htobe64(V_NAGLE(f->fs.newvlan == VLAN_REMOVE ||
1788 f->fs.newvlan == VLAN_REWRITE) | V_DELACK(f->fs.hitcnts) |
1789 V_L2T_IDX(f->l2te ? f->l2te->idx : 0) | V_TX_CHAN(f->fs.eport) |
1790 V_NO_CONG(f->fs.rpttid) |
1791 V_ULP_MODE(f->fs.nat_mode ? ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1792 F_TCAM_BYPASS | F_NON_OFFLOAD);
1793
1794 cpl6->params = htobe64(V_FILTER_TUPLE(ftuple));
1795 cpl6->opt2 = htobe32(F_RSS_QUEUE_VALID | V_RSS_QUEUE(f->fs.iq) |
1796 V_TX_QUEUE(f->fs.nat_mode) | V_WND_SCALE_EN(f->fs.nat_flag_chk) |
1797 V_RX_FC_DISABLE(f->fs.nat_seq_chk ? 1 : 0) | F_T5_OPT_2_VALID |
1798 F_RX_CHANNEL | V_SACK_EN(f->fs.swapmac) |
1799 V_CONG_CNTRL((f->fs.action == FILTER_DROP) | (f->fs.dirsteer << 1)) |
1800 V_PACE(f->fs.maskhash | (f->fs.dirsteerhash << 1)));
1801 }
1802
1803 static int
act_open_cpl_len16(struct adapter * sc,int isipv6)1804 act_open_cpl_len16(struct adapter *sc, int isipv6)
1805 {
1806 int idx;
1807 static const int sz_table[4][2] = {
1808 {
1809 howmany(sizeof (struct cpl_act_open_req), 16),
1810 howmany(sizeof (struct cpl_act_open_req6), 16)
1811 },
1812 {
1813 howmany(sizeof (struct cpl_t5_act_open_req), 16),
1814 howmany(sizeof (struct cpl_t5_act_open_req6), 16)
1815 },
1816 {
1817 howmany(sizeof (struct cpl_t6_act_open_req), 16),
1818 howmany(sizeof (struct cpl_t6_act_open_req6), 16)
1819 },
1820 {
1821 howmany(sizeof (struct cpl_t7_act_open_req), 16),
1822 howmany(sizeof (struct cpl_t7_act_open_req6), 16)
1823 },
1824 };
1825
1826 MPASS(chip_id(sc) >= CHELSIO_T4);
1827 idx = min(chip_id(sc) - CHELSIO_T4, 3);
1828
1829 return (sz_table[idx][!!isipv6]);
1830 }
1831
1832 static int
set_hashfilter(struct adapter * sc,struct t4_filter * t,uint64_t ftuple,struct l2t_entry * l2te,struct smt_entry * smt)1833 set_hashfilter(struct adapter *sc, struct t4_filter *t, uint64_t ftuple,
1834 struct l2t_entry *l2te, struct smt_entry *smt)
1835 {
1836 void *wr;
1837 struct wrq_cookie cookie;
1838 struct filter_entry *f;
1839 int rc, atid = -1;
1840 uint32_t hash;
1841
1842 MPASS(t->fs.hash);
1843 /* Already validated against fconf, iconf */
1844 MPASS((t->fs.val.pfvf_vld & t->fs.val.ovlan_vld) == 0);
1845 MPASS((t->fs.mask.pfvf_vld & t->fs.mask.ovlan_vld) == 0);
1846
1847 hash = hf_hashfn_4t(&t->fs);
1848
1849 mtx_lock(&sc->tids.hftid_lock);
1850 if (lookup_hf(sc, &t->fs, hash) != NULL) {
1851 rc = EEXIST;
1852 goto done;
1853 }
1854
1855 f = malloc(sizeof(*f), M_CXGBE, M_ZERO | M_NOWAIT);
1856 if (__predict_false(f == NULL)) {
1857 rc = ENOMEM;
1858 goto done;
1859 }
1860 f->fs = t->fs;
1861 f->l2te = l2te;
1862 f->smt = smt;
1863
1864 atid = alloc_atid(sc, f);
1865 if (__predict_false(atid) == -1) {
1866 free(f, M_CXGBE);
1867 rc = EAGAIN;
1868 goto done;
1869 }
1870 MPASS(atid >= 0);
1871
1872 wr = start_wrq_wr(&sc->sge.ctrlq[0], act_open_cpl_len16(sc, f->fs.type),
1873 &cookie);
1874 if (wr == NULL) {
1875 free_atid(sc, atid);
1876 free(f, M_CXGBE);
1877 rc = ENOMEM;
1878 goto done;
1879 }
1880 if (f->fs.type)
1881 mk_act_open_req6(sc, f, atid, ftuple, wr);
1882 else
1883 mk_act_open_req(sc, f, atid, ftuple, wr);
1884
1885 f->locked = 1; /* ithread mustn't free f if ioctl is still around. */
1886 f->pending = 1;
1887 f->tid = -1;
1888 insert_hf(sc, f, hash);
1889 commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
1890
1891 for (;;) {
1892 MPASS(f->locked);
1893 if (f->pending == 0) {
1894 if (f->valid) {
1895 rc = 0;
1896 f->locked = 0;
1897 t->idx = f->tid;
1898 } else {
1899 rc = f->tid;
1900 free(f, M_CXGBE);
1901 }
1902 break;
1903 }
1904 if (cv_wait_sig(&sc->tids.hftid_cv, &sc->tids.hftid_lock) != 0) {
1905 f->locked = 0;
1906 rc = EINPROGRESS;
1907 break;
1908 }
1909 }
1910 done:
1911 mtx_unlock(&sc->tids.hftid_lock);
1912 return (rc);
1913 }
1914
1915 /* ABORT_REQ sent as a ULP command looks like this */
1916 #define LEN__ABORT_REQ_ULP (sizeof(struct ulp_txpkt) + \
1917 sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_req_core))
1918
1919 static void *
mk_abort_req_ulp(struct ulp_txpkt * ulpmc,uint32_t tid)1920 mk_abort_req_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1921 {
1922 struct ulptx_idata *ulpsc;
1923 struct cpl_abort_req_core *req;
1924
1925 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1926 ulpmc->len = htobe32(howmany(LEN__ABORT_REQ_ULP, 16));
1927
1928 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1929 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1930 ulpsc->len = htobe32(sizeof(*req));
1931
1932 req = (struct cpl_abort_req_core *)(ulpsc + 1);
1933 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
1934 req->rsvd0 = htonl(0);
1935 req->rsvd1 = 0;
1936 req->cmd = CPL_ABORT_NO_RST;
1937
1938 ulpsc = (struct ulptx_idata *)(req + 1);
1939 if (LEN__ABORT_REQ_ULP % 16) {
1940 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1941 ulpsc->len = htobe32(0);
1942 return (ulpsc + 1);
1943 }
1944 return (ulpsc);
1945 }
1946
1947 /* ABORT_RPL sent as a ULP command looks like this */
1948 #define LEN__ABORT_RPL_ULP (sizeof(struct ulp_txpkt) + \
1949 sizeof(struct ulptx_idata) + sizeof(struct cpl_abort_rpl_core))
1950
1951 static void *
mk_abort_rpl_ulp(struct ulp_txpkt * ulpmc,uint32_t tid)1952 mk_abort_rpl_ulp(struct ulp_txpkt *ulpmc, uint32_t tid)
1953 {
1954 struct ulptx_idata *ulpsc;
1955 struct cpl_abort_rpl_core *rpl;
1956
1957 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
1958 ulpmc->len = htobe32(howmany(LEN__ABORT_RPL_ULP, 16));
1959
1960 ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1961 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1962 ulpsc->len = htobe32(sizeof(*rpl));
1963
1964 rpl = (struct cpl_abort_rpl_core *)(ulpsc + 1);
1965 OPCODE_TID(rpl) = htobe32(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
1966 rpl->rsvd0 = htonl(0);
1967 rpl->rsvd1 = 0;
1968 rpl->cmd = CPL_ABORT_NO_RST;
1969
1970 ulpsc = (struct ulptx_idata *)(rpl + 1);
1971 if (LEN__ABORT_RPL_ULP % 16) {
1972 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1973 ulpsc->len = htobe32(0);
1974 return (ulpsc + 1);
1975 }
1976 return (ulpsc);
1977 }
1978
1979 static inline int
del_hashfilter_wrlen(void)1980 del_hashfilter_wrlen(void)
1981 {
1982
1983 return (sizeof(struct work_request_hdr) +
1984 roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
1985 roundup2(LEN__ABORT_REQ_ULP, 16) +
1986 roundup2(LEN__ABORT_RPL_ULP, 16));
1987 }
1988
1989 static void
mk_del_hashfilter_wr(struct adapter * sc,int tid,struct work_request_hdr * wrh,int wrlen,int qid)1990 mk_del_hashfilter_wr(struct adapter *sc, int tid, struct work_request_hdr *wrh,
1991 int wrlen, int qid)
1992 {
1993 struct ulp_txpkt *ulpmc;
1994
1995 INIT_ULPTX_WRH(wrh, wrlen, 0, 0);
1996 ulpmc = (struct ulp_txpkt *)(wrh + 1);
1997 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, tid, W_TCB_RSS_INFO,
1998 V_TCB_RSS_INFO(M_TCB_RSS_INFO), V_TCB_RSS_INFO(qid));
1999 ulpmc = mk_abort_req_ulp(ulpmc, tid);
2000 ulpmc = mk_abort_rpl_ulp(ulpmc, tid);
2001 }
2002
2003 static int
del_hashfilter(struct adapter * sc,struct t4_filter * t)2004 del_hashfilter(struct adapter *sc, struct t4_filter *t)
2005 {
2006 struct tid_info *ti = &sc->tids;
2007 void *wr;
2008 struct filter_entry *f;
2009 struct wrq_cookie cookie;
2010 int rc;
2011 const int wrlen = del_hashfilter_wrlen();
2012 const int inv_tid = ti->ntids + ti->tid_base;
2013
2014 MPASS(sc->tids.hftid_hash_4t != NULL);
2015 MPASS(sc->tids.ntids > 0);
2016
2017 if (t->idx < sc->tids.tid_base || t->idx >= inv_tid)
2018 return (EINVAL);
2019
2020 mtx_lock(&ti->hftid_lock);
2021 f = lookup_hftid(sc, t->idx);
2022 if (f == NULL || f->valid == 0) {
2023 rc = EINVAL;
2024 goto done;
2025 }
2026 MPASS(f->tid == t->idx);
2027 if (f->locked) {
2028 rc = EPERM;
2029 goto done;
2030 }
2031 if (f->pending) {
2032 rc = EBUSY;
2033 goto done;
2034 }
2035 wr = start_wrq_wr(&sc->sge.ctrlq[0], howmany(wrlen, 16), &cookie);
2036 if (wr == NULL) {
2037 rc = ENOMEM;
2038 goto done;
2039 }
2040
2041 mk_del_hashfilter_wr(sc, t->idx, wr, wrlen, sc->sge.fwq.abs_id);
2042 f->locked = 1;
2043 f->pending = 1;
2044 commit_wrq_wr(&sc->sge.ctrlq[0], wr, &cookie);
2045 t->fs = f->fs; /* extra info for the caller */
2046
2047 for (;;) {
2048 MPASS(f->locked);
2049 if (f->pending == 0) {
2050 if (f->valid) {
2051 f->locked = 0;
2052 rc = EIO;
2053 } else {
2054 rc = 0;
2055 free(f, M_CXGBE);
2056 }
2057 break;
2058 }
2059 if (cv_wait_sig(&ti->hftid_cv, &ti->hftid_lock) != 0) {
2060 f->locked = 0;
2061 rc = EINPROGRESS;
2062 break;
2063 }
2064 }
2065 done:
2066 mtx_unlock(&ti->hftid_lock);
2067 return (rc);
2068 }
2069
2070 #define WORD_MASK 0xffffffff
2071 static void
set_nat_params(struct adapter * sc,struct filter_entry * f,const bool dip,const bool sip,const bool dp,const bool sp)2072 set_nat_params(struct adapter *sc, struct filter_entry *f, const bool dip,
2073 const bool sip, const bool dp, const bool sp)
2074 {
2075
2076 if (dip) {
2077 if (f->fs.type) {
2078 set_tcb_field(sc, f->tid, W_TCB_SND_UNA_RAW, WORD_MASK,
2079 f->fs.nat_dip[15] | f->fs.nat_dip[14] << 8 |
2080 f->fs.nat_dip[13] << 16 | f->fs.nat_dip[12] << 24, 1);
2081
2082 set_tcb_field(sc, f->tid,
2083 W_TCB_SND_UNA_RAW + 1, WORD_MASK,
2084 f->fs.nat_dip[11] | f->fs.nat_dip[10] << 8 |
2085 f->fs.nat_dip[9] << 16 | f->fs.nat_dip[8] << 24, 1);
2086
2087 set_tcb_field(sc, f->tid,
2088 W_TCB_SND_UNA_RAW + 2, WORD_MASK,
2089 f->fs.nat_dip[7] | f->fs.nat_dip[6] << 8 |
2090 f->fs.nat_dip[5] << 16 | f->fs.nat_dip[4] << 24, 1);
2091
2092 set_tcb_field(sc, f->tid,
2093 W_TCB_SND_UNA_RAW + 3, WORD_MASK,
2094 f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
2095 f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
2096 } else {
2097 set_tcb_field(sc, f->tid,
2098 W_TCB_RX_FRAG3_LEN_RAW, WORD_MASK,
2099 f->fs.nat_dip[3] | f->fs.nat_dip[2] << 8 |
2100 f->fs.nat_dip[1] << 16 | f->fs.nat_dip[0] << 24, 1);
2101 }
2102 }
2103
2104 if (sip) {
2105 if (f->fs.type) {
2106 set_tcb_field(sc, f->tid,
2107 W_TCB_RX_FRAG2_PTR_RAW, WORD_MASK,
2108 f->fs.nat_sip[15] | f->fs.nat_sip[14] << 8 |
2109 f->fs.nat_sip[13] << 16 | f->fs.nat_sip[12] << 24, 1);
2110
2111 set_tcb_field(sc, f->tid,
2112 W_TCB_RX_FRAG2_PTR_RAW + 1, WORD_MASK,
2113 f->fs.nat_sip[11] | f->fs.nat_sip[10] << 8 |
2114 f->fs.nat_sip[9] << 16 | f->fs.nat_sip[8] << 24, 1);
2115
2116 set_tcb_field(sc, f->tid,
2117 W_TCB_RX_FRAG2_PTR_RAW + 2, WORD_MASK,
2118 f->fs.nat_sip[7] | f->fs.nat_sip[6] << 8 |
2119 f->fs.nat_sip[5] << 16 | f->fs.nat_sip[4] << 24, 1);
2120
2121 set_tcb_field(sc, f->tid,
2122 W_TCB_RX_FRAG2_PTR_RAW + 3, WORD_MASK,
2123 f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
2124 f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
2125
2126 } else {
2127 set_tcb_field(sc, f->tid,
2128 W_TCB_RX_FRAG3_START_IDX_OFFSET_RAW, WORD_MASK,
2129 f->fs.nat_sip[3] | f->fs.nat_sip[2] << 8 |
2130 f->fs.nat_sip[1] << 16 | f->fs.nat_sip[0] << 24, 1);
2131 }
2132 }
2133
2134 set_tcb_field(sc, f->tid, W_TCB_PDU_HDR_LEN, WORD_MASK,
2135 (dp ? f->fs.nat_dport : 0) | (sp ? f->fs.nat_sport << 16 : 0), 1);
2136 }
2137
2138 /*
2139 * Returns EINPROGRESS to indicate that at least one TCB update was sent and the
2140 * last of the series of updates requested a reply. The reply informs the
2141 * driver that the filter is fully setup.
2142 */
2143 static int
configure_hashfilter_tcb(struct adapter * sc,struct filter_entry * f)2144 configure_hashfilter_tcb(struct adapter *sc, struct filter_entry *f)
2145 {
2146 int updated = 0;
2147
2148 MPASS(f->tid < sc->tids.ntids);
2149 MPASS(f->fs.hash);
2150 MPASS(f->pending);
2151 MPASS(f->valid == 0);
2152
2153 if (f->fs.newdmac) {
2154 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECE, 1, 1);
2155 updated++;
2156 }
2157
2158 if (f->fs.newvlan == VLAN_INSERT || f->fs.newvlan == VLAN_REWRITE) {
2159 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_RFR, 1, 1);
2160 updated++;
2161 }
2162
2163 if (f->fs.newsmac) {
2164 MPASS(f->smt != NULL);
2165 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_CWR, 1, 1);
2166 set_tcb_field(sc, f->tid, W_TCB_SMAC_SEL,
2167 V_TCB_SMAC_SEL(M_TCB_SMAC_SEL), V_TCB_SMAC_SEL(f->smt->idx),
2168 1);
2169 updated++;
2170 }
2171
2172 switch(f->fs.nat_mode) {
2173 case NAT_MODE_NONE:
2174 break;
2175 case NAT_MODE_DIP:
2176 set_nat_params(sc, f, true, false, false, false);
2177 updated++;
2178 break;
2179 case NAT_MODE_DIP_DP:
2180 set_nat_params(sc, f, true, false, true, false);
2181 updated++;
2182 break;
2183 case NAT_MODE_DIP_DP_SIP:
2184 set_nat_params(sc, f, true, true, true, false);
2185 updated++;
2186 break;
2187 case NAT_MODE_DIP_DP_SP:
2188 set_nat_params(sc, f, true, false, true, true);
2189 updated++;
2190 break;
2191 case NAT_MODE_SIP_SP:
2192 set_nat_params(sc, f, false, true, false, true);
2193 updated++;
2194 break;
2195 case NAT_MODE_DIP_SIP_SP:
2196 set_nat_params(sc, f, true, true, false, true);
2197 updated++;
2198 break;
2199 case NAT_MODE_ALL:
2200 set_nat_params(sc, f, true, true, true, true);
2201 updated++;
2202 break;
2203 default:
2204 MPASS(0); /* should have been validated earlier */
2205 break;
2206
2207 }
2208
2209 if (f->fs.nat_seq_chk) {
2210 set_tcb_field(sc, f->tid, W_TCB_RCV_NXT,
2211 V_TCB_RCV_NXT(M_TCB_RCV_NXT),
2212 V_TCB_RCV_NXT(f->fs.nat_seq_chk), 1);
2213 updated++;
2214 }
2215
2216 if (is_t5(sc) && f->fs.action == FILTER_DROP) {
2217 /*
2218 * Migrating = 1, Non-offload = 0 to get a T5 hashfilter to drop.
2219 */
2220 set_tcb_field(sc, f->tid, W_TCB_T_FLAGS, V_TF_NON_OFFLOAD(1) |
2221 V_TF_MIGRATING(1), V_TF_MIGRATING(1), 1);
2222 updated++;
2223 }
2224
2225 /*
2226 * Enable switching after all secondary resources (L2T entry, SMT entry,
2227 * etc.) are setup so that any switched packet will use correct
2228 * values.
2229 */
2230 if (f->fs.action == FILTER_SWITCH) {
2231 set_tcb_tflag(sc, f->tid, S_TF_CCTRL_ECN, 1, 1);
2232 updated++;
2233 }
2234
2235 if (f->fs.hitcnts || updated > 0) {
2236 set_tcb_field(sc, f->tid, W_TCB_TIMESTAMP,
2237 V_TCB_TIMESTAMP(M_TCB_TIMESTAMP) |
2238 V_TCB_T_RTT_TS_RECENT_AGE(M_TCB_T_RTT_TS_RECENT_AGE),
2239 V_TCB_TIMESTAMP(0ULL) | V_TCB_T_RTT_TS_RECENT_AGE(0ULL), 0);
2240 return (EINPROGRESS);
2241 }
2242
2243 return (0);
2244 }
2245