1 /* $OpenBSD: if_trunk.c,v 1.30 2007/01/31 06:20:19 reyk Exp $ */
2
3 /*
4 * Copyright (c) 2005, 2006 Reyk Floeter <reyk@openbsd.org>
5 * Copyright (c) 2007 Andrew Thompson <thompsa@FreeBSD.org>
6 * Copyright (c) 2014, 2016 Marcelo Araujo <araujo@FreeBSD.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21 #include "opt_inet.h"
22 #include "opt_inet6.h"
23 #include "opt_kern_tls.h"
24 #include "opt_ratelimit.h"
25
26 #include <sys/param.h>
27 #include <sys/kernel.h>
28 #include <sys/malloc.h>
29 #include <sys/mbuf.h>
30 #include <sys/queue.h>
31 #include <sys/socket.h>
32 #include <sys/sockio.h>
33 #include <sys/sysctl.h>
34 #include <sys/module.h>
35 #include <sys/priv.h>
36 #include <sys/systm.h>
37 #include <sys/proc.h>
38 #include <sys/lock.h>
39 #include <sys/rmlock.h>
40 #include <sys/sx.h>
41 #include <sys/taskqueue.h>
42 #include <sys/eventhandler.h>
43
44 #include <net/ethernet.h>
45 #include <net/if.h>
46 #include <net/if_clone.h>
47 #include <net/if_arp.h>
48 #include <net/if_dl.h>
49 #include <net/if_media.h>
50 #include <net/if_types.h>
51 #include <net/if_var.h>
52 #include <net/if_private.h>
53 #include <net/bpf.h>
54 #include <net/route.h>
55 #include <net/vnet.h>
56 #include <net/infiniband.h>
57
58 #if defined(INET) || defined(INET6)
59 #include <netinet/in.h>
60 #include <netinet/ip.h>
61 #endif
62 #ifdef INET
63 #include <netinet/in_systm.h>
64 #include <netinet/if_ether.h>
65 #endif
66
67 #ifdef INET6
68 #include <netinet/ip6.h>
69 #include <netinet6/in6_var.h>
70 #include <netinet6/in6_ifattach.h>
71 #endif
72
73 #include <net/if_vlan_var.h>
74 #include <net/if_lagg.h>
75 #include <net/ieee8023ad_lacp.h>
76
77 #ifdef DEV_NETMAP
78 MODULE_DEPEND(if_lagg, netmap, 1, 1, 1);
79 #endif
80
81 #define LAGG_SX_INIT(_sc) sx_init(&(_sc)->sc_sx, "if_lagg sx")
82 #define LAGG_SX_DESTROY(_sc) sx_destroy(&(_sc)->sc_sx)
83 #define LAGG_XLOCK(_sc) sx_xlock(&(_sc)->sc_sx)
84 #define LAGG_XUNLOCK(_sc) sx_xunlock(&(_sc)->sc_sx)
85 #define LAGG_XLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SA_XLOCKED)
86 #define LAGG_SLOCK(_sc) sx_slock(&(_sc)->sc_sx)
87 #define LAGG_SUNLOCK(_sc) sx_sunlock(&(_sc)->sc_sx)
88 #define LAGG_SXLOCK_ASSERT(_sc) sx_assert(&(_sc)->sc_sx, SA_LOCKED)
89
90 /* Special flags we should propagate to the lagg ports. */
91 static struct {
92 int flag;
93 int (*func)(struct ifnet *, int);
94 } lagg_pflags[] = {
95 {IFF_PROMISC, ifpromisc},
96 {IFF_ALLMULTI, if_allmulti},
97 {0, NULL}
98 };
99
100 struct lagg_snd_tag {
101 struct m_snd_tag com;
102 struct m_snd_tag *tag;
103 };
104
105 VNET_DEFINE_STATIC(SLIST_HEAD(__trhead, lagg_softc), lagg_list) =
106 SLIST_HEAD_INITIALIZER(); /* list of laggs */
107 #define V_lagg_list VNET(lagg_list)
108 VNET_DEFINE_STATIC(struct mtx, lagg_list_mtx);
109 #define V_lagg_list_mtx VNET(lagg_list_mtx)
110 #define LAGG_LIST_LOCK_INIT(x) mtx_init(&V_lagg_list_mtx, \
111 "if_lagg list", NULL, MTX_DEF)
112 #define LAGG_LIST_LOCK_DESTROY(x) mtx_destroy(&V_lagg_list_mtx)
113 #define LAGG_LIST_LOCK(x) mtx_lock(&V_lagg_list_mtx)
114 #define LAGG_LIST_UNLOCK(x) mtx_unlock(&V_lagg_list_mtx)
115 static eventhandler_tag lagg_detach_cookie = NULL;
116
117 static int lagg_clone_create(struct if_clone *, char *, size_t,
118 struct ifc_data *, struct ifnet **);
119 static int lagg_clone_destroy(struct if_clone *, struct ifnet *, uint32_t);
120 VNET_DEFINE_STATIC(struct if_clone *, lagg_cloner);
121 #define V_lagg_cloner VNET(lagg_cloner)
122 static const char laggname[] = "lagg";
123 static MALLOC_DEFINE(M_LAGG, laggname, "802.3AD Link Aggregation Interface");
124
125 static void lagg_capabilities(struct lagg_softc *);
126 static int lagg_port_create(struct lagg_softc *, struct ifnet *);
127 static int lagg_port_destroy(struct lagg_port *, int);
128 static struct mbuf *lagg_input_ethernet(struct ifnet *, struct mbuf *);
129 static struct mbuf *lagg_input_infiniband(struct ifnet *, struct mbuf *);
130 static void lagg_linkstate(struct lagg_softc *);
131 static void lagg_port_state(struct ifnet *, int);
132 static int lagg_port_ioctl(struct ifnet *, u_long, caddr_t);
133 static int lagg_port_output(struct ifnet *, struct mbuf *,
134 const struct sockaddr *, struct route *);
135 static void lagg_port_ifdetach(void *arg __unused, struct ifnet *);
136 #ifdef LAGG_PORT_STACKING
137 static int lagg_port_checkstacking(struct lagg_softc *);
138 #endif
139 static void lagg_port2req(struct lagg_port *, struct lagg_reqport *);
140 static void lagg_if_updown(struct lagg_softc *, bool);
141 static void lagg_init(void *);
142 static void lagg_init_locked(struct lagg_softc *);
143 static void lagg_stop(struct lagg_softc *);
144 static int lagg_ioctl(struct ifnet *, u_long, caddr_t);
145 #if defined(KERN_TLS) || defined(RATELIMIT)
146 static int lagg_snd_tag_alloc(struct ifnet *,
147 union if_snd_tag_alloc_params *,
148 struct m_snd_tag **);
149 static int lagg_snd_tag_modify(struct m_snd_tag *,
150 union if_snd_tag_modify_params *);
151 static int lagg_snd_tag_query(struct m_snd_tag *,
152 union if_snd_tag_query_params *);
153 static void lagg_snd_tag_free(struct m_snd_tag *);
154 static struct m_snd_tag *lagg_next_snd_tag(struct m_snd_tag *);
155 static void lagg_ratelimit_query(struct ifnet *,
156 struct if_ratelimit_query_results *);
157 #endif
158 static int lagg_setmulti(struct lagg_port *);
159 static int lagg_clrmulti(struct lagg_port *);
160 static void lagg_setcaps(struct lagg_port *, int cap, int cap2);
161 static int lagg_setflag(struct lagg_port *, int, int,
162 int (*func)(struct ifnet *, int));
163 static int lagg_setflags(struct lagg_port *, int status);
164 static uint64_t lagg_get_counter(struct ifnet *ifp, ift_counter cnt);
165 static int lagg_transmit_ethernet(struct ifnet *, struct mbuf *);
166 static int lagg_transmit_infiniband(struct ifnet *, struct mbuf *);
167 static void lagg_qflush(struct ifnet *);
168 static int lagg_media_change(struct ifnet *);
169 static void lagg_media_status(struct ifnet *, struct ifmediareq *);
170 static struct lagg_port *lagg_link_active(struct lagg_softc *,
171 struct lagg_port *);
172
173 /* No proto */
174 static int lagg_none_start(struct lagg_softc *, struct mbuf *);
175 static struct mbuf *lagg_none_input(struct lagg_softc *, struct lagg_port *,
176 struct mbuf *);
177
178 /* Simple round robin */
179 static void lagg_rr_attach(struct lagg_softc *);
180 static int lagg_rr_start(struct lagg_softc *, struct mbuf *);
181
182 /* Active failover */
183 static int lagg_fail_start(struct lagg_softc *, struct mbuf *);
184 static struct mbuf *lagg_fail_input(struct lagg_softc *, struct lagg_port *,
185 struct mbuf *);
186
187 /* Loadbalancing */
188 static void lagg_lb_attach(struct lagg_softc *);
189 static void lagg_lb_detach(struct lagg_softc *);
190 static int lagg_lb_port_create(struct lagg_port *);
191 static void lagg_lb_port_destroy(struct lagg_port *);
192 static int lagg_lb_start(struct lagg_softc *, struct mbuf *);
193 static int lagg_lb_porttable(struct lagg_softc *, struct lagg_port *);
194
195 /* Broadcast */
196 static int lagg_bcast_start(struct lagg_softc *, struct mbuf *);
197
198 /* 802.3ad LACP */
199 static void lagg_lacp_attach(struct lagg_softc *);
200 static void lagg_lacp_detach(struct lagg_softc *);
201 static int lagg_lacp_start(struct lagg_softc *, struct mbuf *);
202 static struct mbuf *lagg_lacp_input(struct lagg_softc *, struct lagg_port *,
203 struct mbuf *);
204 static void lagg_lacp_lladdr(struct lagg_softc *);
205
206 /* Default input */
207 static struct mbuf *lagg_default_input(struct lagg_softc *, struct lagg_port *,
208 struct mbuf *);
209
210 /* lagg protocol table */
211 static const struct lagg_proto {
212 void (*pr_attach)(struct lagg_softc *);
213 void (*pr_detach)(struct lagg_softc *);
214 int (*pr_start)(struct lagg_softc *, struct mbuf *);
215 struct mbuf * (*pr_input)(struct lagg_softc *, struct lagg_port *,
216 struct mbuf *);
217 int (*pr_addport)(struct lagg_port *);
218 void (*pr_delport)(struct lagg_port *);
219 void (*pr_linkstate)(struct lagg_port *);
220 void (*pr_init)(struct lagg_softc *);
221 void (*pr_stop)(struct lagg_softc *);
222 void (*pr_lladdr)(struct lagg_softc *);
223 void (*pr_request)(struct lagg_softc *, void *);
224 void (*pr_portreq)(struct lagg_port *, void *);
225 } lagg_protos[] = {
226 [LAGG_PROTO_NONE] = {
227 .pr_start = lagg_none_start,
228 .pr_input = lagg_none_input,
229 },
230 [LAGG_PROTO_ROUNDROBIN] = {
231 .pr_attach = lagg_rr_attach,
232 .pr_start = lagg_rr_start,
233 .pr_input = lagg_default_input,
234 },
235 [LAGG_PROTO_FAILOVER] = {
236 .pr_start = lagg_fail_start,
237 .pr_input = lagg_fail_input,
238 },
239 [LAGG_PROTO_LOADBALANCE] = {
240 .pr_attach = lagg_lb_attach,
241 .pr_detach = lagg_lb_detach,
242 .pr_start = lagg_lb_start,
243 .pr_input = lagg_default_input,
244 .pr_addport = lagg_lb_port_create,
245 .pr_delport = lagg_lb_port_destroy,
246 },
247 [LAGG_PROTO_LACP] = {
248 .pr_attach = lagg_lacp_attach,
249 .pr_detach = lagg_lacp_detach,
250 .pr_start = lagg_lacp_start,
251 .pr_input = lagg_lacp_input,
252 .pr_addport = lacp_port_create,
253 .pr_delport = lacp_port_destroy,
254 .pr_linkstate = lacp_linkstate,
255 .pr_init = lacp_init,
256 .pr_stop = lacp_stop,
257 .pr_lladdr = lagg_lacp_lladdr,
258 .pr_request = lacp_req,
259 .pr_portreq = lacp_portreq,
260 },
261 [LAGG_PROTO_BROADCAST] = {
262 .pr_start = lagg_bcast_start,
263 .pr_input = lagg_default_input,
264 },
265 };
266
267 SYSCTL_DECL(_net_link);
268 SYSCTL_NODE(_net_link, OID_AUTO, lagg, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
269 "Link Aggregation");
270
271 /* Allow input on any failover links */
272 VNET_DEFINE_STATIC(int, lagg_failover_rx_all);
273 #define V_lagg_failover_rx_all VNET(lagg_failover_rx_all)
274 SYSCTL_INT(_net_link_lagg, OID_AUTO, failover_rx_all, CTLFLAG_RW | CTLFLAG_VNET,
275 &VNET_NAME(lagg_failover_rx_all), 0,
276 "Accept input from any interface in a failover lagg");
277
278 /* Default value for using flowid */
279 VNET_DEFINE_STATIC(int, def_use_flowid) = 0;
280 #define V_def_use_flowid VNET(def_use_flowid)
281 SYSCTL_INT(_net_link_lagg, OID_AUTO, default_use_flowid,
282 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(def_use_flowid), 0,
283 "Default setting for using flow id for load sharing");
284
285 /* Default value for using numa */
286 VNET_DEFINE_STATIC(int, def_use_numa) = 1;
287 #define V_def_use_numa VNET(def_use_numa)
288 SYSCTL_INT(_net_link_lagg, OID_AUTO, default_use_numa,
289 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(def_use_numa), 0,
290 "Use numa to steer flows");
291
292 /* Default value for flowid shift */
293 VNET_DEFINE_STATIC(int, def_flowid_shift) = 16;
294 #define V_def_flowid_shift VNET(def_flowid_shift)
295 SYSCTL_INT(_net_link_lagg, OID_AUTO, default_flowid_shift,
296 CTLFLAG_RWTUN | CTLFLAG_VNET, &VNET_NAME(def_flowid_shift), 0,
297 "Default setting for flowid shift for load sharing");
298
299 static void
vnet_lagg_init(const void * unused __unused)300 vnet_lagg_init(const void *unused __unused)
301 {
302
303 LAGG_LIST_LOCK_INIT();
304 struct if_clone_addreq req = {
305 .create_f = lagg_clone_create,
306 .destroy_f = lagg_clone_destroy,
307 .flags = IFC_F_AUTOUNIT,
308 };
309 V_lagg_cloner = ifc_attach_cloner(laggname, &req);
310 }
311 VNET_SYSINIT(vnet_lagg_init, SI_SUB_PROTO_IFATTACHDOMAIN, SI_ORDER_ANY,
312 vnet_lagg_init, NULL);
313
314 static void
vnet_lagg_uninit(const void * unused __unused)315 vnet_lagg_uninit(const void *unused __unused)
316 {
317
318 ifc_detach_cloner(V_lagg_cloner);
319 LAGG_LIST_LOCK_DESTROY();
320 }
321 VNET_SYSUNINIT(vnet_lagg_uninit, SI_SUB_INIT_IF, SI_ORDER_ANY,
322 vnet_lagg_uninit, NULL);
323
324 static int
lagg_modevent(module_t mod,int type,void * data)325 lagg_modevent(module_t mod, int type, void *data)
326 {
327
328 switch (type) {
329 case MOD_LOAD:
330 lagg_input_ethernet_p = lagg_input_ethernet;
331 lagg_input_infiniband_p = lagg_input_infiniband;
332 lagg_linkstate_p = lagg_port_state;
333 lagg_detach_cookie = EVENTHANDLER_REGISTER(
334 ifnet_departure_event, lagg_port_ifdetach, NULL,
335 EVENTHANDLER_PRI_ANY);
336 break;
337 case MOD_UNLOAD:
338 EVENTHANDLER_DEREGISTER(ifnet_departure_event,
339 lagg_detach_cookie);
340 lagg_input_ethernet_p = NULL;
341 lagg_input_infiniband_p = NULL;
342 lagg_linkstate_p = NULL;
343 break;
344 default:
345 return (EOPNOTSUPP);
346 }
347 return (0);
348 }
349
350 static moduledata_t lagg_mod = {
351 "if_lagg",
352 lagg_modevent,
353 0
354 };
355
356 DECLARE_MODULE(if_lagg, lagg_mod, SI_SUB_PSEUDO, SI_ORDER_ANY);
357 MODULE_VERSION(if_lagg, 1);
358 MODULE_DEPEND(if_lagg, if_infiniband, 1, 1, 1);
359
360 static void
lagg_proto_attach(struct lagg_softc * sc,lagg_proto pr)361 lagg_proto_attach(struct lagg_softc *sc, lagg_proto pr)
362 {
363
364 LAGG_XLOCK_ASSERT(sc);
365 KASSERT(sc->sc_proto == LAGG_PROTO_NONE, ("%s: sc %p has proto",
366 __func__, sc));
367
368 if (sc->sc_ifflags & IFF_DEBUG)
369 if_printf(sc->sc_ifp, "using proto %u\n", pr);
370
371 if (lagg_protos[pr].pr_attach != NULL)
372 lagg_protos[pr].pr_attach(sc);
373 sc->sc_proto = pr;
374 }
375
376 static void
lagg_proto_detach(struct lagg_softc * sc)377 lagg_proto_detach(struct lagg_softc *sc)
378 {
379 lagg_proto pr;
380
381 LAGG_XLOCK_ASSERT(sc);
382 pr = sc->sc_proto;
383 sc->sc_proto = LAGG_PROTO_NONE;
384
385 if (lagg_protos[pr].pr_detach != NULL)
386 lagg_protos[pr].pr_detach(sc);
387 }
388
389 static inline int
lagg_proto_start(struct lagg_softc * sc,struct mbuf * m)390 lagg_proto_start(struct lagg_softc *sc, struct mbuf *m)
391 {
392
393 return (lagg_protos[sc->sc_proto].pr_start(sc, m));
394 }
395
396 static inline struct mbuf *
lagg_proto_input(struct lagg_softc * sc,struct lagg_port * lp,struct mbuf * m)397 lagg_proto_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
398 {
399
400 return (lagg_protos[sc->sc_proto].pr_input(sc, lp, m));
401 }
402
403 static int
lagg_proto_addport(struct lagg_softc * sc,struct lagg_port * lp)404 lagg_proto_addport(struct lagg_softc *sc, struct lagg_port *lp)
405 {
406
407 if (lagg_protos[sc->sc_proto].pr_addport == NULL)
408 return (0);
409 else
410 return (lagg_protos[sc->sc_proto].pr_addport(lp));
411 }
412
413 static void
lagg_proto_delport(struct lagg_softc * sc,struct lagg_port * lp)414 lagg_proto_delport(struct lagg_softc *sc, struct lagg_port *lp)
415 {
416
417 if (lagg_protos[sc->sc_proto].pr_delport != NULL)
418 lagg_protos[sc->sc_proto].pr_delport(lp);
419 }
420
421 static void
lagg_proto_linkstate(struct lagg_softc * sc,struct lagg_port * lp)422 lagg_proto_linkstate(struct lagg_softc *sc, struct lagg_port *lp)
423 {
424
425 if (lagg_protos[sc->sc_proto].pr_linkstate != NULL)
426 lagg_protos[sc->sc_proto].pr_linkstate(lp);
427 }
428
429 static void
lagg_proto_init(struct lagg_softc * sc)430 lagg_proto_init(struct lagg_softc *sc)
431 {
432
433 if (lagg_protos[sc->sc_proto].pr_init != NULL)
434 lagg_protos[sc->sc_proto].pr_init(sc);
435 }
436
437 static void
lagg_proto_stop(struct lagg_softc * sc)438 lagg_proto_stop(struct lagg_softc *sc)
439 {
440
441 if (lagg_protos[sc->sc_proto].pr_stop != NULL)
442 lagg_protos[sc->sc_proto].pr_stop(sc);
443 }
444
445 static void
lagg_proto_lladdr(struct lagg_softc * sc)446 lagg_proto_lladdr(struct lagg_softc *sc)
447 {
448
449 if (lagg_protos[sc->sc_proto].pr_lladdr != NULL)
450 lagg_protos[sc->sc_proto].pr_lladdr(sc);
451 }
452
453 static void
lagg_proto_request(struct lagg_softc * sc,void * v)454 lagg_proto_request(struct lagg_softc *sc, void *v)
455 {
456
457 if (lagg_protos[sc->sc_proto].pr_request != NULL)
458 lagg_protos[sc->sc_proto].pr_request(sc, v);
459 }
460
461 static void
lagg_proto_portreq(struct lagg_softc * sc,struct lagg_port * lp,void * v)462 lagg_proto_portreq(struct lagg_softc *sc, struct lagg_port *lp, void *v)
463 {
464
465 if (lagg_protos[sc->sc_proto].pr_portreq != NULL)
466 lagg_protos[sc->sc_proto].pr_portreq(lp, v);
467 }
468
469 /*
470 * This routine is run via an vlan
471 * config EVENT
472 */
473 static void
lagg_register_vlan(void * arg,struct ifnet * ifp,u_int16_t vtag)474 lagg_register_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
475 {
476 struct lagg_softc *sc = ifp->if_softc;
477 struct lagg_port *lp;
478
479 if (ifp->if_softc != arg) /* Not our event */
480 return;
481
482 LAGG_XLOCK(sc);
483 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
484 EVENTHANDLER_INVOKE(vlan_config, lp->lp_ifp, vtag);
485 LAGG_XUNLOCK(sc);
486 }
487
488 /*
489 * This routine is run via an vlan
490 * unconfig EVENT
491 */
492 static void
lagg_unregister_vlan(void * arg,struct ifnet * ifp,u_int16_t vtag)493 lagg_unregister_vlan(void *arg, struct ifnet *ifp, u_int16_t vtag)
494 {
495 struct lagg_softc *sc = ifp->if_softc;
496 struct lagg_port *lp;
497
498 if (ifp->if_softc != arg) /* Not our event */
499 return;
500
501 LAGG_XLOCK(sc);
502 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
503 EVENTHANDLER_INVOKE(vlan_unconfig, lp->lp_ifp, vtag);
504 LAGG_XUNLOCK(sc);
505 }
506
507 static int
lagg_clone_create(struct if_clone * ifc,char * name,size_t len,struct ifc_data * ifd,struct ifnet ** ifpp)508 lagg_clone_create(struct if_clone *ifc, char *name, size_t len,
509 struct ifc_data *ifd, struct ifnet **ifpp)
510 {
511 struct iflaggparam iflp;
512 struct lagg_softc *sc;
513 struct ifnet *ifp;
514 int if_type;
515 int error;
516 static const uint8_t eaddr[LAGG_ADDR_LEN];
517
518 if (ifd->params != NULL) {
519 error = ifc_copyin(ifd, &iflp, sizeof(iflp));
520 if (error)
521 return (error);
522
523 switch (iflp.lagg_type) {
524 case LAGG_TYPE_ETHERNET:
525 if_type = IFT_ETHER;
526 break;
527 case LAGG_TYPE_INFINIBAND:
528 if_type = IFT_INFINIBAND;
529 break;
530 default:
531 return (EINVAL);
532 }
533 } else {
534 if_type = IFT_ETHER;
535 }
536
537 sc = malloc(sizeof(*sc), M_LAGG, M_WAITOK | M_ZERO);
538 ifp = sc->sc_ifp = if_alloc(if_type);
539 LAGG_SX_INIT(sc);
540
541 mtx_init(&sc->sc_mtx, "lagg-mtx", NULL, MTX_DEF);
542 callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0);
543
544 LAGG_XLOCK(sc);
545 if (V_def_use_flowid)
546 sc->sc_opts |= LAGG_OPT_USE_FLOWID;
547 if (V_def_use_numa)
548 sc->sc_opts |= LAGG_OPT_USE_NUMA;
549 sc->flowid_shift = V_def_flowid_shift;
550
551 /* Hash all layers by default */
552 sc->sc_flags = MBUF_HASHFLAG_L2 | MBUF_HASHFLAG_L3 | MBUF_HASHFLAG_L4;
553
554 lagg_proto_attach(sc, LAGG_PROTO_DEFAULT);
555
556 CK_SLIST_INIT(&sc->sc_ports);
557
558 switch (if_type) {
559 case IFT_ETHER:
560 /* Initialise pseudo media types */
561 ifmedia_init(&sc->sc_media, 0, lagg_media_change,
562 lagg_media_status);
563 ifmedia_add(&sc->sc_media, IFM_ETHER | IFM_AUTO, 0, NULL);
564 ifmedia_set(&sc->sc_media, IFM_ETHER | IFM_AUTO);
565
566 if_initname(ifp, laggname, ifd->unit);
567 ifp->if_transmit = lagg_transmit_ethernet;
568 break;
569 case IFT_INFINIBAND:
570 if_initname(ifp, laggname, ifd->unit);
571 ifp->if_transmit = lagg_transmit_infiniband;
572 break;
573 default:
574 break;
575 }
576 ifp->if_softc = sc;
577 ifp->if_qflush = lagg_qflush;
578 ifp->if_init = lagg_init;
579 ifp->if_ioctl = lagg_ioctl;
580 ifp->if_get_counter = lagg_get_counter;
581 ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST | IFF_MULTICAST;
582 #if defined(KERN_TLS) || defined(RATELIMIT)
583 ifp->if_snd_tag_alloc = lagg_snd_tag_alloc;
584 ifp->if_ratelimit_query = lagg_ratelimit_query;
585 #endif
586 ifp->if_capenable = ifp->if_capabilities = IFCAP_HWSTATS;
587
588 /*
589 * Attach as an ordinary ethernet device, children will be attached
590 * as special device IFT_IEEE8023ADLAG or IFT_INFINIBANDLAG.
591 */
592 switch (if_type) {
593 case IFT_ETHER:
594 ether_ifattach(ifp, eaddr);
595 break;
596 case IFT_INFINIBAND:
597 infiniband_ifattach(ifp, eaddr, sc->sc_bcast_addr);
598 break;
599 default:
600 break;
601 }
602
603 sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config,
604 lagg_register_vlan, sc, EVENTHANDLER_PRI_FIRST);
605 sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig,
606 lagg_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST);
607
608 /* Insert into the global list of laggs */
609 LAGG_LIST_LOCK();
610 SLIST_INSERT_HEAD(&V_lagg_list, sc, sc_entries);
611 LAGG_LIST_UNLOCK();
612 LAGG_XUNLOCK(sc);
613 *ifpp = ifp;
614
615 return (0);
616 }
617
618 static int
lagg_clone_destroy(struct if_clone * ifc,struct ifnet * ifp,uint32_t flags)619 lagg_clone_destroy(struct if_clone *ifc, struct ifnet *ifp, uint32_t flags)
620 {
621 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
622 struct lagg_port *lp;
623
624 LAGG_XLOCK(sc);
625 sc->sc_destroying = 1;
626 lagg_stop(sc);
627 ifp->if_flags &= ~IFF_UP;
628
629 EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach);
630 EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach);
631
632 /* Shutdown and remove lagg ports */
633 while ((lp = CK_SLIST_FIRST(&sc->sc_ports)) != NULL)
634 lagg_port_destroy(lp, 1);
635
636 /* Unhook the aggregation protocol */
637 lagg_proto_detach(sc);
638 LAGG_XUNLOCK(sc);
639
640 switch (ifp->if_type) {
641 case IFT_ETHER:
642 ether_ifdetach(ifp);
643 ifmedia_removeall(&sc->sc_media);
644 break;
645 case IFT_INFINIBAND:
646 infiniband_ifdetach(ifp);
647 break;
648 default:
649 break;
650 }
651 if_free(ifp);
652
653 LAGG_LIST_LOCK();
654 SLIST_REMOVE(&V_lagg_list, sc, lagg_softc, sc_entries);
655 LAGG_LIST_UNLOCK();
656
657 mtx_destroy(&sc->sc_mtx);
658 LAGG_SX_DESTROY(sc);
659 free(sc, M_LAGG);
660
661 return (0);
662 }
663
664 static void
lagg_capabilities(struct lagg_softc * sc)665 lagg_capabilities(struct lagg_softc *sc)
666 {
667 struct lagg_port *lp;
668 int cap, cap2, ena, ena2, pena, pena2;
669 uint64_t hwa;
670 struct ifnet_hw_tsomax hw_tsomax;
671
672 LAGG_XLOCK_ASSERT(sc);
673
674 /* Get common enabled capabilities for the lagg ports */
675 ena = ena2 = ~0;
676 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
677 ena &= lp->lp_ifp->if_capenable;
678 ena2 &= lp->lp_ifp->if_capenable2;
679 }
680 if (CK_SLIST_FIRST(&sc->sc_ports) == NULL)
681 ena = ena2 = 0;
682
683 /*
684 * Apply common enabled capabilities back to the lagg ports.
685 * May require several iterations if they are dependent.
686 */
687 do {
688 pena = ena;
689 pena2 = ena2;
690 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
691 lagg_setcaps(lp, ena, ena2);
692 ena &= lp->lp_ifp->if_capenable;
693 ena2 &= lp->lp_ifp->if_capenable2;
694 }
695 } while (pena != ena || pena2 != ena2);
696 ena2 &= ~IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD);
697
698 /* Get other capabilities from the lagg ports */
699 cap = cap2 = ~0;
700 hwa = ~(uint64_t)0;
701 memset(&hw_tsomax, 0, sizeof(hw_tsomax));
702 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
703 cap &= lp->lp_ifp->if_capabilities;
704 cap2 &= lp->lp_ifp->if_capabilities2;
705 hwa &= lp->lp_ifp->if_hwassist;
706 if_hw_tsomax_common(lp->lp_ifp, &hw_tsomax);
707 }
708 cap2 &= ~IFCAP2_BIT(IFCAP2_IPSEC_OFFLOAD);
709 if (CK_SLIST_FIRST(&sc->sc_ports) == NULL)
710 cap = cap2 = hwa = 0;
711
712 if (sc->sc_ifp->if_capabilities != cap ||
713 sc->sc_ifp->if_capenable != ena ||
714 sc->sc_ifp->if_capenable2 != ena2 ||
715 sc->sc_ifp->if_hwassist != hwa ||
716 if_hw_tsomax_update(sc->sc_ifp, &hw_tsomax) != 0) {
717 sc->sc_ifp->if_capabilities = cap;
718 sc->sc_ifp->if_capabilities2 = cap2;
719 sc->sc_ifp->if_capenable = ena;
720 sc->sc_ifp->if_capenable2 = ena2;
721 sc->sc_ifp->if_hwassist = hwa;
722 (void)if_hw_tsomax_update(sc->sc_ifp, &hw_tsomax);
723 getmicrotime(&sc->sc_ifp->if_lastchange);
724
725 if (sc->sc_ifflags & IFF_DEBUG)
726 if_printf(sc->sc_ifp,
727 "capabilities 0x%08x enabled 0x%08x\n", cap, ena);
728 }
729 }
730
731 static int
lagg_port_create(struct lagg_softc * sc,struct ifnet * ifp)732 lagg_port_create(struct lagg_softc *sc, struct ifnet *ifp)
733 {
734 struct lagg_softc *sc_ptr;
735 struct lagg_port *lp, *tlp;
736 struct ifreq ifr;
737 int error, i, oldmtu;
738 int if_type;
739 uint64_t *pval;
740
741 LAGG_XLOCK_ASSERT(sc);
742
743 if (sc->sc_ifp == ifp) {
744 if_printf(sc->sc_ifp,
745 "cannot add a lagg to itself as a port\n");
746 return (EINVAL);
747 }
748
749 if (sc->sc_destroying == 1)
750 return (ENXIO);
751
752 /* Limit the maximal number of lagg ports */
753 if (sc->sc_count >= LAGG_MAX_PORTS)
754 return (ENOSPC);
755
756 /* Check if port has already been associated to a lagg */
757 if (ifp->if_lagg != NULL) {
758 /* Port is already in the current lagg? */
759 lp = (struct lagg_port *)ifp->if_lagg;
760 if (lp->lp_softc == sc)
761 return (EEXIST);
762 return (EBUSY);
763 }
764
765 switch (sc->sc_ifp->if_type) {
766 case IFT_ETHER:
767 /* XXX Disallow non-ethernet interfaces (this should be any of 802) */
768 if (ifp->if_type != IFT_ETHER && ifp->if_type != IFT_L2VLAN)
769 return (EPROTONOSUPPORT);
770 if_type = IFT_IEEE8023ADLAG;
771 break;
772 case IFT_INFINIBAND:
773 /* XXX Disallow non-infiniband interfaces */
774 if (ifp->if_type != IFT_INFINIBAND)
775 return (EPROTONOSUPPORT);
776 if_type = IFT_INFINIBANDLAG;
777 break;
778 default:
779 break;
780 }
781
782 /* Allow the first Ethernet member to define the MTU */
783 oldmtu = -1;
784 if (CK_SLIST_EMPTY(&sc->sc_ports)) {
785 sc->sc_ifp->if_mtu = ifp->if_mtu;
786 } else if (sc->sc_ifp->if_mtu != ifp->if_mtu) {
787 if (ifp->if_ioctl == NULL) {
788 if_printf(sc->sc_ifp, "cannot change MTU for %s\n",
789 ifp->if_xname);
790 return (EINVAL);
791 }
792 oldmtu = ifp->if_mtu;
793 strlcpy(ifr.ifr_name, ifp->if_xname, sizeof(ifr.ifr_name));
794 ifr.ifr_mtu = sc->sc_ifp->if_mtu;
795 error = (*ifp->if_ioctl)(ifp, SIOCSIFMTU, (caddr_t)&ifr);
796 if (error != 0) {
797 if_printf(sc->sc_ifp, "invalid MTU for %s\n",
798 ifp->if_xname);
799 return (error);
800 }
801 ifr.ifr_mtu = oldmtu;
802 }
803
804 lp = malloc(sizeof(struct lagg_port), M_LAGG, M_WAITOK | M_ZERO);
805 lp->lp_softc = sc;
806
807 /* Check if port is a stacked lagg */
808 LAGG_LIST_LOCK();
809 SLIST_FOREACH(sc_ptr, &V_lagg_list, sc_entries) {
810 if (ifp == sc_ptr->sc_ifp) {
811 LAGG_LIST_UNLOCK();
812 free(lp, M_LAGG);
813 if (oldmtu != -1)
814 (*ifp->if_ioctl)(ifp, SIOCSIFMTU,
815 (caddr_t)&ifr);
816 return (EINVAL);
817 /* XXX disable stacking for the moment, its untested */
818 #ifdef LAGG_PORT_STACKING
819 lp->lp_flags |= LAGG_PORT_STACK;
820 if (lagg_port_checkstacking(sc_ptr) >=
821 LAGG_MAX_STACKING) {
822 LAGG_LIST_UNLOCK();
823 free(lp, M_LAGG);
824 if (oldmtu != -1)
825 (*ifp->if_ioctl)(ifp, SIOCSIFMTU,
826 (caddr_t)&ifr);
827 return (E2BIG);
828 }
829 #endif
830 }
831 }
832 LAGG_LIST_UNLOCK();
833
834 if_ref(ifp);
835 lp->lp_ifp = ifp;
836
837 bcopy(IF_LLADDR(ifp), lp->lp_lladdr, ifp->if_addrlen);
838 lp->lp_ifcapenable = ifp->if_capenable;
839 if (CK_SLIST_EMPTY(&sc->sc_ports)) {
840 bcopy(IF_LLADDR(ifp), IF_LLADDR(sc->sc_ifp), ifp->if_addrlen);
841 lagg_proto_lladdr(sc);
842 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
843 } else {
844 if_setlladdr(ifp, IF_LLADDR(sc->sc_ifp), ifp->if_addrlen);
845 }
846 lagg_setflags(lp, 1);
847
848 if (CK_SLIST_EMPTY(&sc->sc_ports))
849 sc->sc_primary = lp;
850
851 /* Change the interface type */
852 lp->lp_iftype = ifp->if_type;
853 ifp->if_type = if_type;
854 ifp->if_lagg = lp;
855 lp->lp_ioctl = ifp->if_ioctl;
856 ifp->if_ioctl = lagg_port_ioctl;
857 lp->lp_output = ifp->if_output;
858 ifp->if_output = lagg_port_output;
859
860 /* Read port counters */
861 pval = lp->port_counters.val;
862 for (i = 0; i < IFCOUNTERS; i++, pval++)
863 *pval = ifp->if_get_counter(ifp, i);
864
865 /*
866 * Insert into the list of ports.
867 * Keep ports sorted by if_index. It is handy, when configuration
868 * is predictable and `ifconfig laggN create ...` command
869 * will lead to the same result each time.
870 */
871 CK_SLIST_FOREACH(tlp, &sc->sc_ports, lp_entries) {
872 if (tlp->lp_ifp->if_index < ifp->if_index && (
873 CK_SLIST_NEXT(tlp, lp_entries) == NULL ||
874 ((struct lagg_port*)CK_SLIST_NEXT(tlp, lp_entries))->lp_ifp->if_index >
875 ifp->if_index))
876 break;
877 }
878 if (tlp != NULL)
879 CK_SLIST_INSERT_AFTER(tlp, lp, lp_entries);
880 else
881 CK_SLIST_INSERT_HEAD(&sc->sc_ports, lp, lp_entries);
882 sc->sc_count++;
883
884 lagg_setmulti(lp);
885
886 if ((error = lagg_proto_addport(sc, lp)) != 0) {
887 /* Remove the port, without calling pr_delport. */
888 lagg_port_destroy(lp, 0);
889 if (oldmtu != -1)
890 (*ifp->if_ioctl)(ifp, SIOCSIFMTU, (caddr_t)&ifr);
891 return (error);
892 }
893
894 /* Update lagg capabilities */
895 lagg_capabilities(sc);
896 lagg_linkstate(sc);
897
898 return (0);
899 }
900
901 #ifdef LAGG_PORT_STACKING
902 static int
lagg_port_checkstacking(struct lagg_softc * sc)903 lagg_port_checkstacking(struct lagg_softc *sc)
904 {
905 struct lagg_softc *sc_ptr;
906 struct lagg_port *lp;
907 int m = 0;
908
909 LAGG_SXLOCK_ASSERT(sc);
910 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
911 if (lp->lp_flags & LAGG_PORT_STACK) {
912 sc_ptr = (struct lagg_softc *)lp->lp_ifp->if_softc;
913 m = MAX(m, lagg_port_checkstacking(sc_ptr));
914 }
915 }
916
917 return (m + 1);
918 }
919 #endif
920
921 static void
lagg_port_destroy_cb(epoch_context_t ec)922 lagg_port_destroy_cb(epoch_context_t ec)
923 {
924 struct lagg_port *lp;
925 struct ifnet *ifp;
926
927 lp = __containerof(ec, struct lagg_port, lp_epoch_ctx);
928 ifp = lp->lp_ifp;
929
930 if_rele(ifp);
931 free(lp, M_LAGG);
932 }
933
934 static int
lagg_port_destroy(struct lagg_port * lp,int rundelport)935 lagg_port_destroy(struct lagg_port *lp, int rundelport)
936 {
937 struct lagg_softc *sc = lp->lp_softc;
938 struct lagg_port *lp_ptr, *lp0;
939 struct ifnet *ifp = lp->lp_ifp;
940 uint64_t *pval, vdiff;
941 int i;
942
943 LAGG_XLOCK_ASSERT(sc);
944
945 if (rundelport)
946 lagg_proto_delport(sc, lp);
947
948 if (lp->lp_detaching == 0)
949 lagg_clrmulti(lp);
950
951 /* Restore interface */
952 ifp->if_type = lp->lp_iftype;
953 ifp->if_ioctl = lp->lp_ioctl;
954 ifp->if_output = lp->lp_output;
955 ifp->if_lagg = NULL;
956
957 /* Update detached port counters */
958 pval = lp->port_counters.val;
959 for (i = 0; i < IFCOUNTERS; i++, pval++) {
960 vdiff = ifp->if_get_counter(ifp, i) - *pval;
961 sc->detached_counters.val[i] += vdiff;
962 }
963
964 /* Finally, remove the port from the lagg */
965 CK_SLIST_REMOVE(&sc->sc_ports, lp, lagg_port, lp_entries);
966 sc->sc_count--;
967
968 /* Update the primary interface */
969 if (lp == sc->sc_primary) {
970 uint8_t lladdr[LAGG_ADDR_LEN];
971
972 if ((lp0 = CK_SLIST_FIRST(&sc->sc_ports)) == NULL)
973 bzero(&lladdr, LAGG_ADDR_LEN);
974 else
975 bcopy(lp0->lp_lladdr, lladdr, LAGG_ADDR_LEN);
976 sc->sc_primary = lp0;
977 if (sc->sc_destroying == 0) {
978 bcopy(lladdr, IF_LLADDR(sc->sc_ifp), sc->sc_ifp->if_addrlen);
979 lagg_proto_lladdr(sc);
980 EVENTHANDLER_INVOKE(iflladdr_event, sc->sc_ifp);
981
982 /*
983 * Update lladdr for each port (new primary needs update
984 * as well, to switch from old lladdr to its 'real' one).
985 * We can skip this if the lagg is being destroyed.
986 */
987 CK_SLIST_FOREACH(lp_ptr, &sc->sc_ports, lp_entries)
988 if_setlladdr(lp_ptr->lp_ifp, lladdr,
989 lp_ptr->lp_ifp->if_addrlen);
990 }
991 }
992
993 if (lp->lp_ifflags)
994 if_printf(ifp, "%s: lp_ifflags unclean\n", __func__);
995
996 if (lp->lp_detaching == 0) {
997 lagg_setflags(lp, 0);
998 lagg_setcaps(lp, lp->lp_ifcapenable, lp->lp_ifcapenable2);
999 if_setlladdr(ifp, lp->lp_lladdr, ifp->if_addrlen);
1000 }
1001
1002 /*
1003 * free port and release it's ifnet reference after a grace period has
1004 * elapsed.
1005 */
1006 NET_EPOCH_CALL(lagg_port_destroy_cb, &lp->lp_epoch_ctx);
1007 /* Update lagg capabilities */
1008 lagg_capabilities(sc);
1009 lagg_linkstate(sc);
1010
1011 return (0);
1012 }
1013
1014 static int
lagg_port_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)1015 lagg_port_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1016 {
1017 struct lagg_reqport *rp = (struct lagg_reqport *)data;
1018 struct lagg_softc *sc;
1019 struct lagg_port *lp = NULL;
1020 int error = 0;
1021
1022 /* Should be checked by the caller */
1023 switch (ifp->if_type) {
1024 case IFT_IEEE8023ADLAG:
1025 case IFT_INFINIBANDLAG:
1026 if ((lp = ifp->if_lagg) == NULL || (sc = lp->lp_softc) == NULL)
1027 goto fallback;
1028 break;
1029 default:
1030 goto fallback;
1031 }
1032
1033 switch (cmd) {
1034 case SIOCGLAGGPORT:
1035 if (rp->rp_portname[0] == '\0' ||
1036 ifunit(rp->rp_portname) != ifp) {
1037 error = EINVAL;
1038 break;
1039 }
1040
1041 LAGG_SLOCK(sc);
1042 if (__predict_true((lp = ifp->if_lagg) != NULL &&
1043 lp->lp_softc == sc))
1044 lagg_port2req(lp, rp);
1045 else
1046 error = ENOENT; /* XXXGL: can happen? */
1047 LAGG_SUNLOCK(sc);
1048 break;
1049
1050 case SIOCSIFCAP:
1051 case SIOCSIFCAPNV:
1052 if (lp->lp_ioctl == NULL) {
1053 error = EINVAL;
1054 break;
1055 }
1056 error = (*lp->lp_ioctl)(ifp, cmd, data);
1057 if (error)
1058 break;
1059
1060 /* Update lagg interface capabilities */
1061 LAGG_XLOCK(sc);
1062 lagg_capabilities(sc);
1063 LAGG_XUNLOCK(sc);
1064 VLAN_CAPABILITIES(sc->sc_ifp);
1065 break;
1066
1067 case SIOCSIFMTU:
1068 /* Do not allow the MTU to be changed once joined */
1069 error = EINVAL;
1070 break;
1071
1072 default:
1073 goto fallback;
1074 }
1075
1076 return (error);
1077
1078 fallback:
1079 if (lp != NULL && lp->lp_ioctl != NULL)
1080 return ((*lp->lp_ioctl)(ifp, cmd, data));
1081
1082 return (EINVAL);
1083 }
1084
1085 /*
1086 * Requests counter @cnt data.
1087 *
1088 * Counter value is calculated the following way:
1089 * 1) for each port, sum difference between current and "initial" measurements.
1090 * 2) add lagg logical interface counters.
1091 * 3) add data from detached_counters array.
1092 *
1093 * We also do the following things on ports attach/detach:
1094 * 1) On port attach we store all counters it has into port_counter array.
1095 * 2) On port detach we add the different between "initial" and
1096 * current counters data to detached_counters array.
1097 */
1098 static uint64_t
lagg_get_counter(struct ifnet * ifp,ift_counter cnt)1099 lagg_get_counter(struct ifnet *ifp, ift_counter cnt)
1100 {
1101 struct epoch_tracker et;
1102 struct lagg_softc *sc;
1103 struct lagg_port *lp;
1104 struct ifnet *lpifp;
1105 uint64_t newval, oldval, vsum;
1106
1107 /* Revise this when we've got non-generic counters. */
1108 KASSERT(cnt < IFCOUNTERS, ("%s: invalid cnt %d", __func__, cnt));
1109
1110 sc = (struct lagg_softc *)ifp->if_softc;
1111
1112 vsum = 0;
1113 NET_EPOCH_ENTER(et);
1114 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1115 /* Saved attached value */
1116 oldval = lp->port_counters.val[cnt];
1117 /* current value */
1118 lpifp = lp->lp_ifp;
1119 newval = lpifp->if_get_counter(lpifp, cnt);
1120 /* Calculate diff and save new */
1121 vsum += newval - oldval;
1122 }
1123 NET_EPOCH_EXIT(et);
1124
1125 /*
1126 * Add counter data which might be added by upper
1127 * layer protocols operating on logical interface.
1128 */
1129 vsum += if_get_counter_default(ifp, cnt);
1130
1131 /*
1132 * Add counter data from detached ports counters
1133 */
1134 vsum += sc->detached_counters.val[cnt];
1135
1136 return (vsum);
1137 }
1138
1139 /*
1140 * For direct output to child ports.
1141 */
1142 static int
lagg_port_output(struct ifnet * ifp,struct mbuf * m,const struct sockaddr * dst,struct route * ro)1143 lagg_port_output(struct ifnet *ifp, struct mbuf *m,
1144 const struct sockaddr *dst, struct route *ro)
1145 {
1146 struct lagg_port *lp = ifp->if_lagg;
1147
1148 switch (dst->sa_family) {
1149 case pseudo_AF_HDRCMPLT:
1150 case AF_UNSPEC:
1151 if (lp != NULL)
1152 return ((*lp->lp_output)(ifp, m, dst, ro));
1153 }
1154
1155 /* drop any other frames */
1156 m_freem(m);
1157 return (ENETDOWN);
1158 }
1159
1160 static void
lagg_port_ifdetach(void * arg __unused,struct ifnet * ifp)1161 lagg_port_ifdetach(void *arg __unused, struct ifnet *ifp)
1162 {
1163 struct lagg_port *lp;
1164 struct lagg_softc *sc;
1165
1166 if ((lp = ifp->if_lagg) == NULL)
1167 return;
1168
1169 sc = lp->lp_softc;
1170
1171 LAGG_XLOCK(sc);
1172 lp->lp_detaching = 1;
1173 lagg_port_destroy(lp, 1);
1174 LAGG_XUNLOCK(sc);
1175 VLAN_CAPABILITIES(sc->sc_ifp);
1176 }
1177
1178 static void
lagg_port2req(struct lagg_port * lp,struct lagg_reqport * rp)1179 lagg_port2req(struct lagg_port *lp, struct lagg_reqport *rp)
1180 {
1181 struct lagg_softc *sc = lp->lp_softc;
1182
1183 strlcpy(rp->rp_ifname, sc->sc_ifname, sizeof(rp->rp_ifname));
1184 strlcpy(rp->rp_portname, lp->lp_ifp->if_xname, sizeof(rp->rp_portname));
1185 rp->rp_prio = lp->lp_prio;
1186 rp->rp_flags = lp->lp_flags;
1187 lagg_proto_portreq(sc, lp, &rp->rp_psc);
1188
1189 /* Add protocol specific flags */
1190 switch (sc->sc_proto) {
1191 case LAGG_PROTO_FAILOVER:
1192 if (lp == sc->sc_primary)
1193 rp->rp_flags |= LAGG_PORT_MASTER;
1194 if (lp == lagg_link_active(sc, sc->sc_primary))
1195 rp->rp_flags |= LAGG_PORT_ACTIVE;
1196 break;
1197
1198 case LAGG_PROTO_ROUNDROBIN:
1199 case LAGG_PROTO_LOADBALANCE:
1200 case LAGG_PROTO_BROADCAST:
1201 if (LAGG_PORTACTIVE(lp))
1202 rp->rp_flags |= LAGG_PORT_ACTIVE;
1203 break;
1204
1205 case LAGG_PROTO_LACP:
1206 /* LACP has a different definition of active */
1207 if (lacp_isactive(lp))
1208 rp->rp_flags |= LAGG_PORT_ACTIVE;
1209 if (lacp_iscollecting(lp))
1210 rp->rp_flags |= LAGG_PORT_COLLECTING;
1211 if (lacp_isdistributing(lp))
1212 rp->rp_flags |= LAGG_PORT_DISTRIBUTING;
1213 break;
1214 }
1215
1216 }
1217
1218 static void
lagg_watchdog_infiniband(void * arg)1219 lagg_watchdog_infiniband(void *arg)
1220 {
1221 struct epoch_tracker et;
1222 struct lagg_softc *sc;
1223 struct lagg_port *lp;
1224 struct ifnet *ifp;
1225 struct ifnet *lp_ifp;
1226
1227 sc = arg;
1228
1229 /*
1230 * Because infiniband nodes have a fixed MAC address, which is
1231 * generated by the so-called GID, we need to regularly update
1232 * the link level address of the parent lagg<N> device when
1233 * the active port changes. Possibly we could piggy-back on
1234 * link up/down events aswell, but using a timer also provides
1235 * a guarantee against too frequent events. This operation
1236 * does not have to be atomic.
1237 */
1238 NET_EPOCH_ENTER(et);
1239 lp = lagg_link_active(sc, sc->sc_primary);
1240 if (lp != NULL) {
1241 ifp = sc->sc_ifp;
1242 lp_ifp = lp->lp_ifp;
1243
1244 if (ifp != NULL && lp_ifp != NULL &&
1245 (memcmp(IF_LLADDR(ifp), IF_LLADDR(lp_ifp), ifp->if_addrlen) != 0 ||
1246 memcmp(sc->sc_bcast_addr, lp_ifp->if_broadcastaddr, ifp->if_addrlen) != 0)) {
1247 memcpy(IF_LLADDR(ifp), IF_LLADDR(lp_ifp), ifp->if_addrlen);
1248 memcpy(sc->sc_bcast_addr, lp_ifp->if_broadcastaddr, ifp->if_addrlen);
1249
1250 CURVNET_SET(ifp->if_vnet);
1251 EVENTHANDLER_INVOKE(iflladdr_event, ifp);
1252 CURVNET_RESTORE();
1253 }
1254 }
1255 NET_EPOCH_EXIT(et);
1256
1257 callout_reset(&sc->sc_watchdog, hz, &lagg_watchdog_infiniband, arg);
1258 }
1259
1260 static void
lagg_if_updown(struct lagg_softc * sc,bool up)1261 lagg_if_updown(struct lagg_softc *sc, bool up)
1262 {
1263 struct ifreq ifr = {};
1264 struct lagg_port *lp;
1265
1266 LAGG_XLOCK_ASSERT(sc);
1267
1268 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1269 if (up)
1270 if_up(lp->lp_ifp);
1271 else
1272 if_down(lp->lp_ifp);
1273
1274 if (lp->lp_ioctl != NULL)
1275 lp->lp_ioctl(lp->lp_ifp, SIOCSIFFLAGS, (caddr_t)&ifr);
1276 }
1277 }
1278
1279 static void
lagg_init(void * xsc)1280 lagg_init(void *xsc)
1281 {
1282 struct lagg_softc *sc = (struct lagg_softc *)xsc;
1283
1284 LAGG_XLOCK(sc);
1285 lagg_init_locked(sc);
1286 LAGG_XUNLOCK(sc);
1287 }
1288
1289 static void
lagg_init_locked(struct lagg_softc * sc)1290 lagg_init_locked(struct lagg_softc *sc)
1291 {
1292 struct ifnet *ifp = sc->sc_ifp;
1293 struct lagg_port *lp;
1294
1295 LAGG_XLOCK_ASSERT(sc);
1296 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1297 return;
1298
1299 /*
1300 * Update the port lladdrs if needed.
1301 * This might be if_setlladdr() notification
1302 * that lladdr has been changed.
1303 */
1304 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1305 if (memcmp(IF_LLADDR(ifp), IF_LLADDR(lp->lp_ifp),
1306 ifp->if_addrlen) != 0)
1307 if_setlladdr(lp->lp_ifp, IF_LLADDR(ifp), ifp->if_addrlen);
1308 }
1309
1310 lagg_if_updown(sc, true);
1311
1312 lagg_proto_init(sc);
1313
1314 if (ifp->if_type == IFT_INFINIBAND) {
1315 mtx_lock(&sc->sc_mtx);
1316 lagg_watchdog_infiniband(sc);
1317 mtx_unlock(&sc->sc_mtx);
1318 }
1319 ifp->if_drv_flags |= IFF_DRV_RUNNING;
1320 }
1321
1322 static void
lagg_stop(struct lagg_softc * sc)1323 lagg_stop(struct lagg_softc *sc)
1324 {
1325 struct ifnet *ifp = sc->sc_ifp;
1326
1327 LAGG_XLOCK_ASSERT(sc);
1328
1329 if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1330 return;
1331
1332 ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
1333
1334 lagg_proto_stop(sc);
1335
1336 mtx_lock(&sc->sc_mtx);
1337 callout_stop(&sc->sc_watchdog);
1338 mtx_unlock(&sc->sc_mtx);
1339
1340 lagg_if_updown(sc, false);
1341
1342 callout_drain(&sc->sc_watchdog);
1343 }
1344
1345 static int
lagg_ioctl(struct ifnet * ifp,u_long cmd,caddr_t data)1346 lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
1347 {
1348 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
1349 struct lagg_reqall *ra = (struct lagg_reqall *)data;
1350 struct lagg_reqopts *ro = (struct lagg_reqopts *)data;
1351 struct lagg_reqport *rp = (struct lagg_reqport *)data, rpbuf;
1352 struct lagg_reqflags *rf = (struct lagg_reqflags *)data;
1353 struct ifreq *ifr = (struct ifreq *)data;
1354 struct lagg_port *lp;
1355 struct ifnet *tpif;
1356 struct thread *td = curthread;
1357 char *buf, *outbuf;
1358 int count, buflen, len, error = 0, oldmtu;
1359
1360 bzero(&rpbuf, sizeof(rpbuf));
1361
1362 /* XXX: This can race with lagg_clone_destroy. */
1363
1364 switch (cmd) {
1365 case SIOCGLAGG:
1366 LAGG_XLOCK(sc);
1367 buflen = sc->sc_count * sizeof(struct lagg_reqport);
1368 outbuf = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO);
1369 ra->ra_proto = sc->sc_proto;
1370 lagg_proto_request(sc, &ra->ra_psc);
1371 count = 0;
1372 buf = outbuf;
1373 len = min(ra->ra_size, buflen);
1374 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1375 if (len < sizeof(rpbuf))
1376 break;
1377
1378 lagg_port2req(lp, &rpbuf);
1379 memcpy(buf, &rpbuf, sizeof(rpbuf));
1380 count++;
1381 buf += sizeof(rpbuf);
1382 len -= sizeof(rpbuf);
1383 }
1384 LAGG_XUNLOCK(sc);
1385 ra->ra_ports = count;
1386 ra->ra_size = count * sizeof(rpbuf);
1387 error = copyout(outbuf, ra->ra_port, ra->ra_size);
1388 free(outbuf, M_TEMP);
1389 break;
1390 case SIOCSLAGG:
1391 error = priv_check(td, PRIV_NET_LAGG);
1392 if (error)
1393 break;
1394 if (ra->ra_proto >= LAGG_PROTO_MAX) {
1395 error = EPROTONOSUPPORT;
1396 break;
1397 }
1398 /* Infiniband only supports the failover protocol. */
1399 if (ra->ra_proto != LAGG_PROTO_FAILOVER &&
1400 ifp->if_type == IFT_INFINIBAND) {
1401 error = EPROTONOSUPPORT;
1402 break;
1403 }
1404 LAGG_XLOCK(sc);
1405 lagg_proto_detach(sc);
1406 lagg_proto_attach(sc, ra->ra_proto);
1407 LAGG_XUNLOCK(sc);
1408 break;
1409 case SIOCGLAGGOPTS:
1410 LAGG_XLOCK(sc);
1411 ro->ro_opts = sc->sc_opts;
1412 if (sc->sc_proto == LAGG_PROTO_LACP) {
1413 struct lacp_softc *lsc;
1414
1415 lsc = (struct lacp_softc *)sc->sc_psc;
1416 if (lsc->lsc_debug.lsc_tx_test != 0)
1417 ro->ro_opts |= LAGG_OPT_LACP_TXTEST;
1418 if (lsc->lsc_debug.lsc_rx_test != 0)
1419 ro->ro_opts |= LAGG_OPT_LACP_RXTEST;
1420 if (lsc->lsc_strict_mode != 0)
1421 ro->ro_opts |= LAGG_OPT_LACP_STRICT;
1422 if (lsc->lsc_fast_timeout != 0)
1423 ro->ro_opts |= LAGG_OPT_LACP_FAST_TIMO;
1424
1425 ro->ro_active = sc->sc_active;
1426 } else {
1427 ro->ro_active = 0;
1428 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
1429 ro->ro_active += LAGG_PORTACTIVE(lp);
1430 }
1431 ro->ro_bkt = sc->sc_stride;
1432 ro->ro_flapping = sc->sc_flapping;
1433 ro->ro_flowid_shift = sc->flowid_shift;
1434 LAGG_XUNLOCK(sc);
1435 break;
1436 case SIOCSLAGGOPTS:
1437 error = priv_check(td, PRIV_NET_LAGG);
1438 if (error)
1439 break;
1440
1441 /*
1442 * The stride option was added without defining a corresponding
1443 * LAGG_OPT flag, so handle a non-zero value before checking
1444 * anything else to preserve compatibility.
1445 */
1446 LAGG_XLOCK(sc);
1447 if (ro->ro_opts == 0 && ro->ro_bkt != 0) {
1448 if (sc->sc_proto != LAGG_PROTO_ROUNDROBIN) {
1449 LAGG_XUNLOCK(sc);
1450 error = EINVAL;
1451 break;
1452 }
1453 sc->sc_stride = ro->ro_bkt;
1454 }
1455 if (ro->ro_opts == 0) {
1456 LAGG_XUNLOCK(sc);
1457 break;
1458 }
1459
1460 /*
1461 * Set options. LACP options are stored in sc->sc_psc,
1462 * not in sc_opts.
1463 */
1464 int valid, lacp;
1465
1466 switch (ro->ro_opts) {
1467 case LAGG_OPT_USE_FLOWID:
1468 case -LAGG_OPT_USE_FLOWID:
1469 case LAGG_OPT_USE_NUMA:
1470 case -LAGG_OPT_USE_NUMA:
1471 case LAGG_OPT_FLOWIDSHIFT:
1472 case LAGG_OPT_RR_LIMIT:
1473 valid = 1;
1474 lacp = 0;
1475 break;
1476 case LAGG_OPT_LACP_TXTEST:
1477 case -LAGG_OPT_LACP_TXTEST:
1478 case LAGG_OPT_LACP_RXTEST:
1479 case -LAGG_OPT_LACP_RXTEST:
1480 case LAGG_OPT_LACP_STRICT:
1481 case -LAGG_OPT_LACP_STRICT:
1482 case LAGG_OPT_LACP_FAST_TIMO:
1483 case -LAGG_OPT_LACP_FAST_TIMO:
1484 valid = lacp = 1;
1485 break;
1486 default:
1487 valid = lacp = 0;
1488 break;
1489 }
1490
1491 if (valid == 0 ||
1492 (lacp == 1 && sc->sc_proto != LAGG_PROTO_LACP)) {
1493 /* Invalid combination of options specified. */
1494 error = EINVAL;
1495 LAGG_XUNLOCK(sc);
1496 break; /* Return from SIOCSLAGGOPTS. */
1497 }
1498
1499 /*
1500 * Store new options into sc->sc_opts except for
1501 * FLOWIDSHIFT, RR and LACP options.
1502 */
1503 if (lacp == 0) {
1504 if (ro->ro_opts == LAGG_OPT_FLOWIDSHIFT)
1505 sc->flowid_shift = ro->ro_flowid_shift;
1506 else if (ro->ro_opts == LAGG_OPT_RR_LIMIT) {
1507 if (sc->sc_proto != LAGG_PROTO_ROUNDROBIN ||
1508 ro->ro_bkt == 0) {
1509 error = EINVAL;
1510 LAGG_XUNLOCK(sc);
1511 break;
1512 }
1513 sc->sc_stride = ro->ro_bkt;
1514 } else if (ro->ro_opts > 0)
1515 sc->sc_opts |= ro->ro_opts;
1516 else
1517 sc->sc_opts &= ~ro->ro_opts;
1518 } else {
1519 struct lacp_softc *lsc;
1520 struct lacp_port *lp;
1521
1522 lsc = (struct lacp_softc *)sc->sc_psc;
1523
1524 switch (ro->ro_opts) {
1525 case LAGG_OPT_LACP_TXTEST:
1526 lsc->lsc_debug.lsc_tx_test = 1;
1527 break;
1528 case -LAGG_OPT_LACP_TXTEST:
1529 lsc->lsc_debug.lsc_tx_test = 0;
1530 break;
1531 case LAGG_OPT_LACP_RXTEST:
1532 lsc->lsc_debug.lsc_rx_test = 1;
1533 break;
1534 case -LAGG_OPT_LACP_RXTEST:
1535 lsc->lsc_debug.lsc_rx_test = 0;
1536 break;
1537 case LAGG_OPT_LACP_STRICT:
1538 lsc->lsc_strict_mode = 1;
1539 break;
1540 case -LAGG_OPT_LACP_STRICT:
1541 lsc->lsc_strict_mode = 0;
1542 break;
1543 case LAGG_OPT_LACP_FAST_TIMO:
1544 LACP_LOCK(lsc);
1545 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next)
1546 lp->lp_state |= LACP_STATE_TIMEOUT;
1547 LACP_UNLOCK(lsc);
1548 lsc->lsc_fast_timeout = 1;
1549 break;
1550 case -LAGG_OPT_LACP_FAST_TIMO:
1551 LACP_LOCK(lsc);
1552 LIST_FOREACH(lp, &lsc->lsc_ports, lp_next)
1553 lp->lp_state &= ~LACP_STATE_TIMEOUT;
1554 LACP_UNLOCK(lsc);
1555 lsc->lsc_fast_timeout = 0;
1556 break;
1557 }
1558 }
1559 LAGG_XUNLOCK(sc);
1560 break;
1561 case SIOCGLAGGFLAGS:
1562 rf->rf_flags = 0;
1563 LAGG_XLOCK(sc);
1564 if (sc->sc_flags & MBUF_HASHFLAG_L2)
1565 rf->rf_flags |= LAGG_F_HASHL2;
1566 if (sc->sc_flags & MBUF_HASHFLAG_L3)
1567 rf->rf_flags |= LAGG_F_HASHL3;
1568 if (sc->sc_flags & MBUF_HASHFLAG_L4)
1569 rf->rf_flags |= LAGG_F_HASHL4;
1570 LAGG_XUNLOCK(sc);
1571 break;
1572 case SIOCSLAGGHASH:
1573 error = priv_check(td, PRIV_NET_LAGG);
1574 if (error)
1575 break;
1576 if ((rf->rf_flags & LAGG_F_HASHMASK) == 0) {
1577 error = EINVAL;
1578 break;
1579 }
1580 LAGG_XLOCK(sc);
1581 sc->sc_flags = 0;
1582 if (rf->rf_flags & LAGG_F_HASHL2)
1583 sc->sc_flags |= MBUF_HASHFLAG_L2;
1584 if (rf->rf_flags & LAGG_F_HASHL3)
1585 sc->sc_flags |= MBUF_HASHFLAG_L3;
1586 if (rf->rf_flags & LAGG_F_HASHL4)
1587 sc->sc_flags |= MBUF_HASHFLAG_L4;
1588 LAGG_XUNLOCK(sc);
1589 break;
1590 case SIOCGLAGGPORT:
1591 if (rp->rp_portname[0] == '\0' ||
1592 (tpif = ifunit_ref(rp->rp_portname)) == NULL) {
1593 error = EINVAL;
1594 break;
1595 }
1596
1597 LAGG_SLOCK(sc);
1598 if (__predict_true((lp = tpif->if_lagg) != NULL &&
1599 lp->lp_softc == sc))
1600 lagg_port2req(lp, rp);
1601 else
1602 error = ENOENT; /* XXXGL: can happen? */
1603 LAGG_SUNLOCK(sc);
1604 if_rele(tpif);
1605 break;
1606
1607 case SIOCSLAGGPORT:
1608 error = priv_check(td, PRIV_NET_LAGG);
1609 if (error)
1610 break;
1611 if (rp->rp_portname[0] == '\0' ||
1612 (tpif = ifunit_ref(rp->rp_portname)) == NULL) {
1613 error = EINVAL;
1614 break;
1615 }
1616 #ifdef INET6
1617 /*
1618 * A laggport interface should not have inet6 address
1619 * because two interfaces with a valid link-local
1620 * scope zone must not be merged in any form. This
1621 * restriction is needed to prevent violation of
1622 * link-local scope zone. Attempts to add a laggport
1623 * interface which has inet6 addresses triggers
1624 * removal of all inet6 addresses on the member
1625 * interface.
1626 */
1627 if (in6ifa_llaonifp(tpif)) {
1628 in6_ifdetach(tpif);
1629 if_printf(sc->sc_ifp,
1630 "IPv6 addresses on %s have been removed "
1631 "before adding it as a member to prevent "
1632 "IPv6 address scope violation.\n",
1633 tpif->if_xname);
1634 }
1635 #endif
1636 oldmtu = ifp->if_mtu;
1637 LAGG_XLOCK(sc);
1638 error = lagg_port_create(sc, tpif);
1639 LAGG_XUNLOCK(sc);
1640 if_rele(tpif);
1641
1642 /*
1643 * LAGG MTU may change during addition of the first port.
1644 * If it did, do network layer specific procedure.
1645 */
1646 if (ifp->if_mtu != oldmtu)
1647 if_notifymtu(ifp);
1648
1649 VLAN_CAPABILITIES(ifp);
1650 break;
1651 case SIOCSLAGGDELPORT:
1652 error = priv_check(td, PRIV_NET_LAGG);
1653 if (error)
1654 break;
1655 if (rp->rp_portname[0] == '\0' ||
1656 (tpif = ifunit_ref(rp->rp_portname)) == NULL) {
1657 error = EINVAL;
1658 break;
1659 }
1660
1661 LAGG_XLOCK(sc);
1662 if ((lp = (struct lagg_port *)tpif->if_lagg) == NULL ||
1663 lp->lp_softc != sc) {
1664 error = ENOENT;
1665 LAGG_XUNLOCK(sc);
1666 if_rele(tpif);
1667 break;
1668 }
1669
1670 error = lagg_port_destroy(lp, 1);
1671 LAGG_XUNLOCK(sc);
1672 if_rele(tpif);
1673 VLAN_CAPABILITIES(ifp);
1674 break;
1675 case SIOCSIFFLAGS:
1676 /* Set flags on ports too */
1677 LAGG_XLOCK(sc);
1678 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1679 lagg_setflags(lp, 1);
1680 }
1681
1682 if ((ifp->if_flags & IFF_UP) == 0 &&
1683 (ifp->if_drv_flags & IFF_DRV_RUNNING) != 0)
1684 /*
1685 * If interface is marked down and it is running,
1686 * then stop and disable it.
1687 */
1688 lagg_stop(sc);
1689 else if ((ifp->if_flags & IFF_UP) != 0 &&
1690 (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
1691 /*
1692 * If interface is marked up and it is stopped, then
1693 * start it.
1694 */
1695 lagg_init_locked(sc);
1696 LAGG_XUNLOCK(sc);
1697 break;
1698 case SIOCADDMULTI:
1699 case SIOCDELMULTI:
1700 LAGG_XLOCK(sc);
1701 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1702 lagg_clrmulti(lp);
1703 lagg_setmulti(lp);
1704 }
1705 LAGG_XUNLOCK(sc);
1706 error = 0;
1707 break;
1708 case SIOCSIFMEDIA:
1709 case SIOCGIFMEDIA:
1710 if (ifp->if_type == IFT_INFINIBAND)
1711 error = EINVAL;
1712 else
1713 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd);
1714 break;
1715
1716 case SIOCSIFCAP:
1717 case SIOCSIFCAPNV:
1718 LAGG_XLOCK(sc);
1719 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1720 if (lp->lp_ioctl != NULL)
1721 (*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
1722 }
1723 lagg_capabilities(sc);
1724 LAGG_XUNLOCK(sc);
1725 VLAN_CAPABILITIES(ifp);
1726 error = 0;
1727 break;
1728
1729 case SIOCGIFCAPNV:
1730 error = 0;
1731 break;
1732
1733 case SIOCSIFMTU:
1734 LAGG_XLOCK(sc);
1735 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1736 if (lp->lp_ioctl != NULL)
1737 error = (*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
1738 else
1739 error = EINVAL;
1740 if (error != 0) {
1741 if_printf(ifp,
1742 "failed to change MTU to %d on port %s, "
1743 "reverting all ports to original MTU (%d)\n",
1744 ifr->ifr_mtu, lp->lp_ifp->if_xname, ifp->if_mtu);
1745 break;
1746 }
1747 }
1748 if (error == 0) {
1749 ifp->if_mtu = ifr->ifr_mtu;
1750 } else {
1751 /* set every port back to the original MTU */
1752 ifr->ifr_mtu = ifp->if_mtu;
1753 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
1754 if (lp->lp_ioctl != NULL)
1755 (*lp->lp_ioctl)(lp->lp_ifp, cmd, data);
1756 }
1757 }
1758 lagg_capabilities(sc);
1759 LAGG_XUNLOCK(sc);
1760 VLAN_CAPABILITIES(ifp);
1761 break;
1762
1763 default:
1764 error = ether_ioctl(ifp, cmd, data);
1765 break;
1766 }
1767 return (error);
1768 }
1769
1770 #if defined(KERN_TLS) || defined(RATELIMIT)
1771 #ifdef RATELIMIT
1772 static const struct if_snd_tag_sw lagg_snd_tag_ul_sw = {
1773 .snd_tag_modify = lagg_snd_tag_modify,
1774 .snd_tag_query = lagg_snd_tag_query,
1775 .snd_tag_free = lagg_snd_tag_free,
1776 .next_snd_tag = lagg_next_snd_tag,
1777 .type = IF_SND_TAG_TYPE_UNLIMITED
1778 };
1779
1780 static const struct if_snd_tag_sw lagg_snd_tag_rl_sw = {
1781 .snd_tag_modify = lagg_snd_tag_modify,
1782 .snd_tag_query = lagg_snd_tag_query,
1783 .snd_tag_free = lagg_snd_tag_free,
1784 .next_snd_tag = lagg_next_snd_tag,
1785 .type = IF_SND_TAG_TYPE_RATE_LIMIT
1786 };
1787 #endif
1788
1789 #ifdef KERN_TLS
1790 static const struct if_snd_tag_sw lagg_snd_tag_tls_sw = {
1791 .snd_tag_modify = lagg_snd_tag_modify,
1792 .snd_tag_query = lagg_snd_tag_query,
1793 .snd_tag_free = lagg_snd_tag_free,
1794 .next_snd_tag = lagg_next_snd_tag,
1795 .type = IF_SND_TAG_TYPE_TLS
1796 };
1797
1798 #ifdef RATELIMIT
1799 static const struct if_snd_tag_sw lagg_snd_tag_tls_rl_sw = {
1800 .snd_tag_modify = lagg_snd_tag_modify,
1801 .snd_tag_query = lagg_snd_tag_query,
1802 .snd_tag_free = lagg_snd_tag_free,
1803 .next_snd_tag = lagg_next_snd_tag,
1804 .type = IF_SND_TAG_TYPE_TLS_RATE_LIMIT
1805 };
1806 #endif
1807 #endif
1808
1809 static inline struct lagg_snd_tag *
mst_to_lst(struct m_snd_tag * mst)1810 mst_to_lst(struct m_snd_tag *mst)
1811 {
1812
1813 return (__containerof(mst, struct lagg_snd_tag, com));
1814 }
1815
1816 /*
1817 * Look up the port used by a specific flow. This only works for lagg
1818 * protocols with deterministic port mappings (e.g. not roundrobin).
1819 * In addition protocols which use a hash to map flows to ports must
1820 * be configured to use the mbuf flowid rather than hashing packet
1821 * contents.
1822 */
1823 static struct lagg_port *
lookup_snd_tag_port(struct ifnet * ifp,uint32_t flowid,uint32_t flowtype,uint8_t numa_domain)1824 lookup_snd_tag_port(struct ifnet *ifp, uint32_t flowid, uint32_t flowtype,
1825 uint8_t numa_domain)
1826 {
1827 struct lagg_softc *sc;
1828 struct lagg_port *lp;
1829 struct lagg_lb *lb;
1830 uint32_t hash, p;
1831 int err;
1832
1833 sc = ifp->if_softc;
1834
1835 switch (sc->sc_proto) {
1836 case LAGG_PROTO_FAILOVER:
1837 return (lagg_link_active(sc, sc->sc_primary));
1838 case LAGG_PROTO_LOADBALANCE:
1839 if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) == 0 ||
1840 flowtype == M_HASHTYPE_NONE)
1841 return (NULL);
1842 p = flowid >> sc->flowid_shift;
1843 p %= sc->sc_count;
1844 lb = (struct lagg_lb *)sc->sc_psc;
1845 lp = lb->lb_ports[p];
1846 return (lagg_link_active(sc, lp));
1847 case LAGG_PROTO_LACP:
1848 if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) == 0 ||
1849 flowtype == M_HASHTYPE_NONE)
1850 return (NULL);
1851 hash = flowid >> sc->flowid_shift;
1852 return (lacp_select_tx_port_by_hash(sc, hash, numa_domain, &err));
1853 default:
1854 return (NULL);
1855 }
1856 }
1857
1858 static int
lagg_snd_tag_alloc(struct ifnet * ifp,union if_snd_tag_alloc_params * params,struct m_snd_tag ** ppmt)1859 lagg_snd_tag_alloc(struct ifnet *ifp,
1860 union if_snd_tag_alloc_params *params,
1861 struct m_snd_tag **ppmt)
1862 {
1863 struct epoch_tracker et;
1864 const struct if_snd_tag_sw *sw;
1865 struct lagg_snd_tag *lst;
1866 struct lagg_port *lp;
1867 struct ifnet *lp_ifp;
1868 struct m_snd_tag *mst;
1869 int error;
1870
1871 switch (params->hdr.type) {
1872 #ifdef RATELIMIT
1873 case IF_SND_TAG_TYPE_UNLIMITED:
1874 sw = &lagg_snd_tag_ul_sw;
1875 break;
1876 case IF_SND_TAG_TYPE_RATE_LIMIT:
1877 sw = &lagg_snd_tag_rl_sw;
1878 break;
1879 #endif
1880 #ifdef KERN_TLS
1881 case IF_SND_TAG_TYPE_TLS:
1882 sw = &lagg_snd_tag_tls_sw;
1883 break;
1884 case IF_SND_TAG_TYPE_TLS_RX:
1885 /* Return tag from port interface directly. */
1886 sw = NULL;
1887 break;
1888 #ifdef RATELIMIT
1889 case IF_SND_TAG_TYPE_TLS_RATE_LIMIT:
1890 sw = &lagg_snd_tag_tls_rl_sw;
1891 break;
1892 #endif
1893 #endif
1894 default:
1895 return (EOPNOTSUPP);
1896 }
1897
1898 NET_EPOCH_ENTER(et);
1899 lp = lookup_snd_tag_port(ifp, params->hdr.flowid,
1900 params->hdr.flowtype, params->hdr.numa_domain);
1901 if (lp == NULL) {
1902 NET_EPOCH_EXIT(et);
1903 return (EOPNOTSUPP);
1904 }
1905 if (lp->lp_ifp == NULL) {
1906 NET_EPOCH_EXIT(et);
1907 return (EOPNOTSUPP);
1908 }
1909 lp_ifp = lp->lp_ifp;
1910 if_ref(lp_ifp);
1911 NET_EPOCH_EXIT(et);
1912
1913 if (sw != NULL) {
1914 lst = malloc(sizeof(*lst), M_LAGG, M_NOWAIT);
1915 if (lst == NULL) {
1916 if_rele(lp_ifp);
1917 return (ENOMEM);
1918 }
1919 } else
1920 lst = NULL;
1921
1922 error = m_snd_tag_alloc(lp_ifp, params, &mst);
1923 if_rele(lp_ifp);
1924 if (error) {
1925 free(lst, M_LAGG);
1926 return (error);
1927 }
1928
1929 if (sw != NULL) {
1930 m_snd_tag_init(&lst->com, ifp, sw);
1931 lst->tag = mst;
1932
1933 *ppmt = &lst->com;
1934 } else
1935 *ppmt = mst;
1936
1937 return (0);
1938 }
1939
1940 static struct m_snd_tag *
lagg_next_snd_tag(struct m_snd_tag * mst)1941 lagg_next_snd_tag(struct m_snd_tag *mst)
1942 {
1943 struct lagg_snd_tag *lst;
1944
1945 lst = mst_to_lst(mst);
1946 return (lst->tag);
1947 }
1948
1949 static int
lagg_snd_tag_modify(struct m_snd_tag * mst,union if_snd_tag_modify_params * params)1950 lagg_snd_tag_modify(struct m_snd_tag *mst,
1951 union if_snd_tag_modify_params *params)
1952 {
1953 struct lagg_snd_tag *lst;
1954
1955 lst = mst_to_lst(mst);
1956 return (lst->tag->sw->snd_tag_modify(lst->tag, params));
1957 }
1958
1959 static int
lagg_snd_tag_query(struct m_snd_tag * mst,union if_snd_tag_query_params * params)1960 lagg_snd_tag_query(struct m_snd_tag *mst,
1961 union if_snd_tag_query_params *params)
1962 {
1963 struct lagg_snd_tag *lst;
1964
1965 lst = mst_to_lst(mst);
1966 return (lst->tag->sw->snd_tag_query(lst->tag, params));
1967 }
1968
1969 static void
lagg_snd_tag_free(struct m_snd_tag * mst)1970 lagg_snd_tag_free(struct m_snd_tag *mst)
1971 {
1972 struct lagg_snd_tag *lst;
1973
1974 lst = mst_to_lst(mst);
1975 m_snd_tag_rele(lst->tag);
1976 free(lst, M_LAGG);
1977 }
1978
1979 static void
lagg_ratelimit_query(struct ifnet * ifp __unused,struct if_ratelimit_query_results * q)1980 lagg_ratelimit_query(struct ifnet *ifp __unused, struct if_ratelimit_query_results *q)
1981 {
1982 /*
1983 * For lagg, we have an indirect
1984 * interface. The caller needs to
1985 * get a ratelimit tag on the actual
1986 * interface the flow will go on.
1987 */
1988 q->rate_table = NULL;
1989 q->flags = RT_IS_INDIRECT;
1990 q->max_flows = 0;
1991 q->number_of_rates = 0;
1992 }
1993 #endif
1994
1995 static int
lagg_setmulti(struct lagg_port * lp)1996 lagg_setmulti(struct lagg_port *lp)
1997 {
1998 struct lagg_softc *sc = lp->lp_softc;
1999 struct ifnet *ifp = lp->lp_ifp;
2000 struct ifnet *scifp = sc->sc_ifp;
2001 struct lagg_mc *mc;
2002 struct ifmultiaddr *ifma;
2003 int error;
2004
2005 IF_ADDR_WLOCK(scifp);
2006 CK_STAILQ_FOREACH(ifma, &scifp->if_multiaddrs, ifma_link) {
2007 if (ifma->ifma_addr->sa_family != AF_LINK)
2008 continue;
2009 mc = malloc(sizeof(struct lagg_mc), M_LAGG, M_NOWAIT);
2010 if (mc == NULL) {
2011 IF_ADDR_WUNLOCK(scifp);
2012 return (ENOMEM);
2013 }
2014 bcopy(ifma->ifma_addr, &mc->mc_addr, ifma->ifma_addr->sa_len);
2015 mc->mc_addr.sdl_index = ifp->if_index;
2016 mc->mc_ifma = NULL;
2017 SLIST_INSERT_HEAD(&lp->lp_mc_head, mc, mc_entries);
2018 }
2019 IF_ADDR_WUNLOCK(scifp);
2020 SLIST_FOREACH (mc, &lp->lp_mc_head, mc_entries) {
2021 error = if_addmulti(ifp,
2022 (struct sockaddr *)&mc->mc_addr, &mc->mc_ifma);
2023 if (error)
2024 return (error);
2025 }
2026 return (0);
2027 }
2028
2029 static int
lagg_clrmulti(struct lagg_port * lp)2030 lagg_clrmulti(struct lagg_port *lp)
2031 {
2032 struct lagg_mc *mc;
2033
2034 LAGG_XLOCK_ASSERT(lp->lp_softc);
2035 while ((mc = SLIST_FIRST(&lp->lp_mc_head)) != NULL) {
2036 SLIST_REMOVE(&lp->lp_mc_head, mc, lagg_mc, mc_entries);
2037 if (mc->mc_ifma && lp->lp_detaching == 0)
2038 if_delmulti_ifma(mc->mc_ifma);
2039 free(mc, M_LAGG);
2040 }
2041 return (0);
2042 }
2043
2044 static void
lagg_setcaps(struct lagg_port * lp,int cap,int cap2)2045 lagg_setcaps(struct lagg_port *lp, int cap, int cap2)
2046 {
2047 struct ifreq ifr;
2048 struct siocsifcapnv_driver_data drv_ioctl_data;
2049
2050 if (lp->lp_ifp->if_capenable == cap &&
2051 lp->lp_ifp->if_capenable2 == cap2)
2052 return;
2053 if (lp->lp_ioctl == NULL)
2054 return;
2055 /* XXX */
2056 if ((lp->lp_ifp->if_capabilities & IFCAP_NV) != 0) {
2057 drv_ioctl_data.reqcap = cap;
2058 drv_ioctl_data.reqcap2 = cap2;
2059 drv_ioctl_data.nvcap = NULL;
2060 (*lp->lp_ioctl)(lp->lp_ifp, SIOCSIFCAPNV,
2061 (caddr_t)&drv_ioctl_data);
2062 } else {
2063 ifr.ifr_reqcap = cap;
2064 (*lp->lp_ioctl)(lp->lp_ifp, SIOCSIFCAP, (caddr_t)&ifr);
2065 }
2066 }
2067
2068 /* Handle a ref counted flag that should be set on the lagg port as well */
2069 static int
lagg_setflag(struct lagg_port * lp,int flag,int status,int (* func)(struct ifnet *,int))2070 lagg_setflag(struct lagg_port *lp, int flag, int status,
2071 int (*func)(struct ifnet *, int))
2072 {
2073 struct lagg_softc *sc = lp->lp_softc;
2074 struct ifnet *scifp = sc->sc_ifp;
2075 struct ifnet *ifp = lp->lp_ifp;
2076 int error;
2077
2078 LAGG_XLOCK_ASSERT(sc);
2079
2080 status = status ? (scifp->if_flags & flag) : 0;
2081 /* Now "status" contains the flag value or 0 */
2082
2083 /*
2084 * See if recorded ports status is different from what
2085 * we want it to be. If it is, flip it. We record ports
2086 * status in lp_ifflags so that we won't clear ports flag
2087 * we haven't set. In fact, we don't clear or set ports
2088 * flags directly, but get or release references to them.
2089 * That's why we can be sure that recorded flags still are
2090 * in accord with actual ports flags.
2091 */
2092 if (status != (lp->lp_ifflags & flag)) {
2093 error = (*func)(ifp, status);
2094 if (error)
2095 return (error);
2096 lp->lp_ifflags &= ~flag;
2097 lp->lp_ifflags |= status;
2098 }
2099 return (0);
2100 }
2101
2102 /*
2103 * Handle IFF_* flags that require certain changes on the lagg port
2104 * if "status" is true, update ports flags respective to the lagg
2105 * if "status" is false, forcedly clear the flags set on port.
2106 */
2107 static int
lagg_setflags(struct lagg_port * lp,int status)2108 lagg_setflags(struct lagg_port *lp, int status)
2109 {
2110 int error, i;
2111
2112 for (i = 0; lagg_pflags[i].flag; i++) {
2113 error = lagg_setflag(lp, lagg_pflags[i].flag,
2114 status, lagg_pflags[i].func);
2115 if (error)
2116 return (error);
2117 }
2118 return (0);
2119 }
2120
2121 static int
lagg_transmit_ethernet(struct ifnet * ifp,struct mbuf * m)2122 lagg_transmit_ethernet(struct ifnet *ifp, struct mbuf *m)
2123 {
2124 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
2125
2126 NET_EPOCH_ASSERT();
2127 #if defined(KERN_TLS) || defined(RATELIMIT)
2128 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG)
2129 MPASS(m->m_pkthdr.snd_tag->ifp == ifp);
2130 #endif
2131 /* We need at least one port */
2132 if (sc->sc_count == 0) {
2133 m_freem(m);
2134 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2135 return (ENXIO);
2136 }
2137
2138 ETHER_BPF_MTAP(ifp, m);
2139
2140 return (lagg_proto_start(sc, m));
2141 }
2142
2143 static int
lagg_transmit_infiniband(struct ifnet * ifp,struct mbuf * m)2144 lagg_transmit_infiniband(struct ifnet *ifp, struct mbuf *m)
2145 {
2146 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
2147
2148 NET_EPOCH_ASSERT();
2149 #if defined(KERN_TLS) || defined(RATELIMIT)
2150 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG)
2151 MPASS(m->m_pkthdr.snd_tag->ifp == ifp);
2152 #endif
2153 /* We need at least one port */
2154 if (sc->sc_count == 0) {
2155 m_freem(m);
2156 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
2157 return (ENXIO);
2158 }
2159
2160 infiniband_bpf_mtap(ifp, m);
2161
2162 return (lagg_proto_start(sc, m));
2163 }
2164
2165 /*
2166 * The ifp->if_qflush entry point for lagg(4) is no-op.
2167 */
2168 static void
lagg_qflush(struct ifnet * ifp __unused)2169 lagg_qflush(struct ifnet *ifp __unused)
2170 {
2171 }
2172
2173 static struct mbuf *
lagg_input_ethernet(struct ifnet * ifp,struct mbuf * m)2174 lagg_input_ethernet(struct ifnet *ifp, struct mbuf *m)
2175 {
2176 struct lagg_port *lp = ifp->if_lagg;
2177 struct lagg_softc *sc = lp->lp_softc;
2178 struct ifnet *scifp = sc->sc_ifp;
2179
2180 NET_EPOCH_ASSERT();
2181 if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2182 lp->lp_detaching != 0) {
2183 m_freem(m);
2184 return (NULL);
2185 }
2186
2187 m = lagg_proto_input(sc, lp, m);
2188 if (m != NULL) {
2189 ETHER_BPF_MTAP(scifp, m);
2190
2191 if ((scifp->if_flags & IFF_MONITOR) != 0) {
2192 m_freem(m);
2193 m = NULL;
2194 }
2195 }
2196
2197 #ifdef DEV_NETMAP
2198 if (m != NULL && scifp->if_capenable & IFCAP_NETMAP) {
2199 scifp->if_input(scifp, m);
2200 m = NULL;
2201 }
2202 #endif /* DEV_NETMAP */
2203
2204 return (m);
2205 }
2206
2207 static struct mbuf *
lagg_input_infiniband(struct ifnet * ifp,struct mbuf * m)2208 lagg_input_infiniband(struct ifnet *ifp, struct mbuf *m)
2209 {
2210 struct lagg_port *lp = ifp->if_lagg;
2211 struct lagg_softc *sc = lp->lp_softc;
2212 struct ifnet *scifp = sc->sc_ifp;
2213
2214 NET_EPOCH_ASSERT();
2215 if ((scifp->if_drv_flags & IFF_DRV_RUNNING) == 0 ||
2216 lp->lp_detaching != 0) {
2217 m_freem(m);
2218 return (NULL);
2219 }
2220
2221 m = lagg_proto_input(sc, lp, m);
2222 if (m != NULL) {
2223 infiniband_bpf_mtap(scifp, m);
2224
2225 if ((scifp->if_flags & IFF_MONITOR) != 0) {
2226 m_freem(m);
2227 m = NULL;
2228 }
2229 }
2230
2231 return (m);
2232 }
2233
2234 static int
lagg_media_change(struct ifnet * ifp)2235 lagg_media_change(struct ifnet *ifp)
2236 {
2237 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
2238
2239 if (sc->sc_ifflags & IFF_DEBUG)
2240 printf("%s\n", __func__);
2241
2242 /* Ignore */
2243 return (0);
2244 }
2245
2246 static void
lagg_media_status(struct ifnet * ifp,struct ifmediareq * imr)2247 lagg_media_status(struct ifnet *ifp, struct ifmediareq *imr)
2248 {
2249 struct epoch_tracker et;
2250 struct lagg_softc *sc = (struct lagg_softc *)ifp->if_softc;
2251 struct lagg_port *lp;
2252
2253 imr->ifm_status = IFM_AVALID;
2254 imr->ifm_active = IFM_ETHER | IFM_AUTO;
2255
2256 NET_EPOCH_ENTER(et);
2257 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
2258 if (LAGG_PORTACTIVE(lp))
2259 imr->ifm_status |= IFM_ACTIVE;
2260 }
2261 NET_EPOCH_EXIT(et);
2262 }
2263
2264 static void
lagg_linkstate(struct lagg_softc * sc)2265 lagg_linkstate(struct lagg_softc *sc)
2266 {
2267 struct epoch_tracker et;
2268 struct lagg_port *lp;
2269 int new_link = LINK_STATE_DOWN;
2270 uint64_t speed;
2271
2272 LAGG_XLOCK_ASSERT(sc);
2273
2274 /* LACP handles link state itself */
2275 if (sc->sc_proto == LAGG_PROTO_LACP)
2276 return;
2277
2278 /* Our link is considered up if at least one of our ports is active */
2279 NET_EPOCH_ENTER(et);
2280 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
2281 if (lp->lp_ifp->if_link_state == LINK_STATE_UP) {
2282 new_link = LINK_STATE_UP;
2283 break;
2284 }
2285 }
2286 NET_EPOCH_EXIT(et);
2287 if_link_state_change(sc->sc_ifp, new_link);
2288
2289 /* Update if_baudrate to reflect the max possible speed */
2290 switch (sc->sc_proto) {
2291 case LAGG_PROTO_FAILOVER:
2292 sc->sc_ifp->if_baudrate = sc->sc_primary != NULL ?
2293 sc->sc_primary->lp_ifp->if_baudrate : 0;
2294 break;
2295 case LAGG_PROTO_ROUNDROBIN:
2296 case LAGG_PROTO_LOADBALANCE:
2297 case LAGG_PROTO_BROADCAST:
2298 speed = 0;
2299 NET_EPOCH_ENTER(et);
2300 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2301 speed += lp->lp_ifp->if_baudrate;
2302 NET_EPOCH_EXIT(et);
2303 sc->sc_ifp->if_baudrate = speed;
2304 break;
2305 case LAGG_PROTO_LACP:
2306 /* LACP updates if_baudrate itself */
2307 break;
2308 }
2309 }
2310
2311 static void
lagg_port_state(struct ifnet * ifp,int state)2312 lagg_port_state(struct ifnet *ifp, int state)
2313 {
2314 struct lagg_port *lp = (struct lagg_port *)ifp->if_lagg;
2315 struct lagg_softc *sc = NULL;
2316
2317 if (lp != NULL)
2318 sc = lp->lp_softc;
2319 if (sc == NULL)
2320 return;
2321
2322 LAGG_XLOCK(sc);
2323 lagg_linkstate(sc);
2324 lagg_proto_linkstate(sc, lp);
2325 LAGG_XUNLOCK(sc);
2326 }
2327
2328 static struct lagg_port *
lagg_link_active(struct lagg_softc * sc,struct lagg_port * lp)2329 lagg_link_active(struct lagg_softc *sc, struct lagg_port *lp)
2330 {
2331 struct lagg_port *lp_next, *rval = NULL;
2332
2333 /*
2334 * Search a port which reports an active link state.
2335 */
2336
2337 #ifdef INVARIANTS
2338 /*
2339 * This is called with either in the network epoch
2340 * or with LAGG_XLOCK(sc) held.
2341 */
2342 if (!in_epoch(net_epoch_preempt))
2343 LAGG_XLOCK_ASSERT(sc);
2344 #endif
2345
2346 if (lp == NULL)
2347 goto search;
2348 if (LAGG_PORTACTIVE(lp)) {
2349 rval = lp;
2350 goto found;
2351 }
2352 if ((lp_next = CK_SLIST_NEXT(lp, lp_entries)) != NULL &&
2353 LAGG_PORTACTIVE(lp_next)) {
2354 rval = lp_next;
2355 goto found;
2356 }
2357
2358 search:
2359 CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
2360 if (LAGG_PORTACTIVE(lp_next)) {
2361 return (lp_next);
2362 }
2363 }
2364 found:
2365 return (rval);
2366 }
2367
2368 int
lagg_enqueue(struct ifnet * ifp,struct mbuf * m)2369 lagg_enqueue(struct ifnet *ifp, struct mbuf *m)
2370 {
2371
2372 #if defined(KERN_TLS) || defined(RATELIMIT)
2373 if (m->m_pkthdr.csum_flags & CSUM_SND_TAG) {
2374 struct lagg_snd_tag *lst;
2375 struct m_snd_tag *mst;
2376
2377 mst = m->m_pkthdr.snd_tag;
2378 lst = mst_to_lst(mst);
2379 if (lst->tag->ifp != ifp) {
2380 m_freem(m);
2381 return (EAGAIN);
2382 }
2383 m->m_pkthdr.snd_tag = m_snd_tag_ref(lst->tag);
2384 m_snd_tag_rele(mst);
2385 }
2386 #endif
2387 return (ifp->if_transmit)(ifp, m);
2388 }
2389
2390 /*
2391 * No proto
2392 */
2393 static int
lagg_none_start(struct lagg_softc * sc,struct mbuf * m)2394 lagg_none_start(struct lagg_softc *sc, struct mbuf *m)
2395 {
2396 m_freem(m);
2397 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2398 /* No active ports available */
2399 return (ENETDOWN);
2400 }
2401
2402 static struct mbuf *
lagg_none_input(struct lagg_softc * sc,struct lagg_port * lp,struct mbuf * m)2403 lagg_none_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
2404 {
2405 m_freem(m);
2406 return (NULL);
2407 }
2408
2409 /*
2410 * Simple round robin aggregation
2411 */
2412 static void
lagg_rr_attach(struct lagg_softc * sc)2413 lagg_rr_attach(struct lagg_softc *sc)
2414 {
2415 sc->sc_seq = 0;
2416 sc->sc_stride = 1;
2417 }
2418
2419 static int
lagg_rr_start(struct lagg_softc * sc,struct mbuf * m)2420 lagg_rr_start(struct lagg_softc *sc, struct mbuf *m)
2421 {
2422 struct lagg_port *lp;
2423 uint32_t p;
2424
2425 p = atomic_fetchadd_32(&sc->sc_seq, 1);
2426 p /= sc->sc_stride;
2427 p %= sc->sc_count;
2428 lp = CK_SLIST_FIRST(&sc->sc_ports);
2429
2430 while (p--)
2431 lp = CK_SLIST_NEXT(lp, lp_entries);
2432
2433 /*
2434 * Check the port's link state. This will return the next active
2435 * port if the link is down or the port is NULL.
2436 */
2437 if ((lp = lagg_link_active(sc, lp)) == NULL) {
2438 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2439 m_freem(m);
2440 return (ENETDOWN);
2441 }
2442
2443 /* Send mbuf */
2444 return (lagg_enqueue(lp->lp_ifp, m));
2445 }
2446
2447 /*
2448 * Broadcast mode
2449 */
2450 static int
lagg_bcast_start(struct lagg_softc * sc,struct mbuf * m)2451 lagg_bcast_start(struct lagg_softc *sc, struct mbuf *m)
2452 {
2453 int errors = 0;
2454 int ret;
2455 struct lagg_port *lp, *last = NULL;
2456 struct mbuf *m0;
2457
2458 NET_EPOCH_ASSERT();
2459 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries) {
2460 if (!LAGG_PORTACTIVE(lp))
2461 continue;
2462
2463 if (last != NULL) {
2464 m0 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
2465 if (m0 == NULL) {
2466 ret = ENOBUFS;
2467 errors++;
2468 break;
2469 }
2470 lagg_enqueue(last->lp_ifp, m0);
2471 }
2472 last = lp;
2473 }
2474
2475 if (last == NULL) {
2476 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2477 m_freem(m);
2478 return (ENOENT);
2479 }
2480 if ((last = lagg_link_active(sc, last)) == NULL) {
2481 errors++;
2482 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, errors);
2483 m_freem(m);
2484 return (ENETDOWN);
2485 }
2486
2487 ret = lagg_enqueue(last->lp_ifp, m);
2488 if (errors != 0)
2489 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, errors);
2490
2491 return (ret);
2492 }
2493
2494 /*
2495 * Active failover
2496 */
2497 static int
lagg_fail_start(struct lagg_softc * sc,struct mbuf * m)2498 lagg_fail_start(struct lagg_softc *sc, struct mbuf *m)
2499 {
2500 struct lagg_port *lp;
2501
2502 /* Use the master port if active or the next available port */
2503 if ((lp = lagg_link_active(sc, sc->sc_primary)) == NULL) {
2504 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2505 m_freem(m);
2506 return (ENETDOWN);
2507 }
2508
2509 /* Send mbuf */
2510 return (lagg_enqueue(lp->lp_ifp, m));
2511 }
2512
2513 static struct mbuf *
lagg_fail_input(struct lagg_softc * sc,struct lagg_port * lp,struct mbuf * m)2514 lagg_fail_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
2515 {
2516 struct ifnet *ifp = sc->sc_ifp;
2517 struct lagg_port *tmp_tp;
2518
2519 if (lp == sc->sc_primary || V_lagg_failover_rx_all) {
2520 m->m_pkthdr.rcvif = ifp;
2521 return (m);
2522 }
2523
2524 if (!LAGG_PORTACTIVE(sc->sc_primary)) {
2525 tmp_tp = lagg_link_active(sc, sc->sc_primary);
2526 /*
2527 * If tmp_tp is null, we've received a packet when all
2528 * our links are down. Weird, but process it anyways.
2529 */
2530 if (tmp_tp == NULL || tmp_tp == lp) {
2531 m->m_pkthdr.rcvif = ifp;
2532 return (m);
2533 }
2534 }
2535
2536 m_freem(m);
2537 return (NULL);
2538 }
2539
2540 /*
2541 * Loadbalancing
2542 */
2543 static void
lagg_lb_attach(struct lagg_softc * sc)2544 lagg_lb_attach(struct lagg_softc *sc)
2545 {
2546 struct lagg_port *lp;
2547 struct lagg_lb *lb;
2548
2549 LAGG_XLOCK_ASSERT(sc);
2550 lb = malloc(sizeof(struct lagg_lb), M_LAGG, M_WAITOK | M_ZERO);
2551 lb->lb_key = m_ether_tcpip_hash_init();
2552 sc->sc_psc = lb;
2553
2554 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2555 lagg_lb_port_create(lp);
2556 }
2557
2558 static void
lagg_lb_detach(struct lagg_softc * sc)2559 lagg_lb_detach(struct lagg_softc *sc)
2560 {
2561 struct lagg_lb *lb;
2562
2563 lb = (struct lagg_lb *)sc->sc_psc;
2564 if (lb != NULL)
2565 free(lb, M_LAGG);
2566 }
2567
2568 static int
lagg_lb_porttable(struct lagg_softc * sc,struct lagg_port * lp)2569 lagg_lb_porttable(struct lagg_softc *sc, struct lagg_port *lp)
2570 {
2571 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
2572 struct lagg_port *lp_next;
2573 int i = 0, rv;
2574
2575 rv = 0;
2576 bzero(&lb->lb_ports, sizeof(lb->lb_ports));
2577 LAGG_XLOCK_ASSERT(sc);
2578 CK_SLIST_FOREACH(lp_next, &sc->sc_ports, lp_entries) {
2579 if (lp_next == lp)
2580 continue;
2581 if (i >= LAGG_MAX_PORTS) {
2582 rv = EINVAL;
2583 break;
2584 }
2585 if (sc->sc_ifflags & IFF_DEBUG)
2586 printf("%s: port %s at index %d\n",
2587 sc->sc_ifname, lp_next->lp_ifp->if_xname, i);
2588 lb->lb_ports[i++] = lp_next;
2589 }
2590
2591 return (rv);
2592 }
2593
2594 static int
lagg_lb_port_create(struct lagg_port * lp)2595 lagg_lb_port_create(struct lagg_port *lp)
2596 {
2597 struct lagg_softc *sc = lp->lp_softc;
2598 return (lagg_lb_porttable(sc, NULL));
2599 }
2600
2601 static void
lagg_lb_port_destroy(struct lagg_port * lp)2602 lagg_lb_port_destroy(struct lagg_port *lp)
2603 {
2604 struct lagg_softc *sc = lp->lp_softc;
2605 lagg_lb_porttable(sc, lp);
2606 }
2607
2608 static int
lagg_lb_start(struct lagg_softc * sc,struct mbuf * m)2609 lagg_lb_start(struct lagg_softc *sc, struct mbuf *m)
2610 {
2611 struct lagg_lb *lb = (struct lagg_lb *)sc->sc_psc;
2612 struct lagg_port *lp = NULL;
2613 uint32_t p = 0;
2614
2615 if ((sc->sc_opts & LAGG_OPT_USE_FLOWID) &&
2616 M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
2617 p = m->m_pkthdr.flowid >> sc->flowid_shift;
2618 else
2619 p = m_ether_tcpip_hash(sc->sc_flags, m, lb->lb_key);
2620 p %= sc->sc_count;
2621 lp = lb->lb_ports[p];
2622
2623 /*
2624 * Check the port's link state. This will return the next active
2625 * port if the link is down or the port is NULL.
2626 */
2627 if ((lp = lagg_link_active(sc, lp)) == NULL) {
2628 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2629 m_freem(m);
2630 return (ENETDOWN);
2631 }
2632
2633 /* Send mbuf */
2634 return (lagg_enqueue(lp->lp_ifp, m));
2635 }
2636
2637 /*
2638 * 802.3ad LACP
2639 */
2640 static void
lagg_lacp_attach(struct lagg_softc * sc)2641 lagg_lacp_attach(struct lagg_softc *sc)
2642 {
2643 struct lagg_port *lp;
2644
2645 lacp_attach(sc);
2646 LAGG_XLOCK_ASSERT(sc);
2647 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2648 lacp_port_create(lp);
2649 }
2650
2651 static void
lagg_lacp_detach(struct lagg_softc * sc)2652 lagg_lacp_detach(struct lagg_softc *sc)
2653 {
2654 struct lagg_port *lp;
2655 void *psc;
2656
2657 LAGG_XLOCK_ASSERT(sc);
2658 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2659 lacp_port_destroy(lp);
2660
2661 psc = sc->sc_psc;
2662 sc->sc_psc = NULL;
2663 lacp_detach(psc);
2664 }
2665
2666 static void
lagg_lacp_lladdr(struct lagg_softc * sc)2667 lagg_lacp_lladdr(struct lagg_softc *sc)
2668 {
2669 struct lagg_port *lp;
2670
2671 LAGG_SXLOCK_ASSERT(sc);
2672
2673 /* purge all the lacp ports */
2674 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2675 lacp_port_destroy(lp);
2676
2677 /* add them back in */
2678 CK_SLIST_FOREACH(lp, &sc->sc_ports, lp_entries)
2679 lacp_port_create(lp);
2680 }
2681
2682 static int
lagg_lacp_start(struct lagg_softc * sc,struct mbuf * m)2683 lagg_lacp_start(struct lagg_softc *sc, struct mbuf *m)
2684 {
2685 struct lagg_port *lp;
2686 int err;
2687
2688 lp = lacp_select_tx_port(sc, m, &err);
2689 if (lp == NULL) {
2690 if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1);
2691 m_freem(m);
2692 return (err);
2693 }
2694
2695 /* Send mbuf */
2696 return (lagg_enqueue(lp->lp_ifp, m));
2697 }
2698
2699 static struct mbuf *
lagg_lacp_input(struct lagg_softc * sc,struct lagg_port * lp,struct mbuf * m)2700 lagg_lacp_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
2701 {
2702 struct ifnet *ifp = sc->sc_ifp;
2703 struct ether_header *eh;
2704 u_short etype;
2705
2706 eh = mtod(m, struct ether_header *);
2707 etype = ntohs(eh->ether_type);
2708
2709 /* Tap off LACP control messages */
2710 if ((m->m_flags & M_VLANTAG) == 0 && etype == ETHERTYPE_SLOW) {
2711 m = lacp_input(lp, m);
2712 if (m == NULL)
2713 return (NULL);
2714 }
2715
2716 /*
2717 * If the port is not collecting or not in the active aggregator then
2718 * free and return.
2719 */
2720 if (!lacp_iscollecting(lp) || !lacp_isactive(lp)) {
2721 m_freem(m);
2722 return (NULL);
2723 }
2724
2725 m->m_pkthdr.rcvif = ifp;
2726 return (m);
2727 }
2728
2729 /* Default input */
2730 static struct mbuf *
lagg_default_input(struct lagg_softc * sc,struct lagg_port * lp,struct mbuf * m)2731 lagg_default_input(struct lagg_softc *sc, struct lagg_port *lp, struct mbuf *m)
2732 {
2733 struct ifnet *ifp = sc->sc_ifp;
2734
2735 /* Just pass in the packet to our lagg device */
2736 m->m_pkthdr.rcvif = ifp;
2737
2738 return (m);
2739 }
2740