1 /*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13 /*
14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18 #include "bna.h"
19
20 static inline int
ethport_can_be_up(struct bna_ethport * ethport)21 ethport_can_be_up(struct bna_ethport *ethport)
22 {
23 int ready = 0;
24 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
25 ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
26 (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
27 (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
28 else
29 ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
30 (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
31 !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
32 return ready;
33 }
34
35 #define ethport_is_up ethport_can_be_up
36
37 enum bna_ethport_event {
38 ETHPORT_E_START = 1,
39 ETHPORT_E_STOP = 2,
40 ETHPORT_E_FAIL = 3,
41 ETHPORT_E_UP = 4,
42 ETHPORT_E_DOWN = 5,
43 ETHPORT_E_FWRESP_UP_OK = 6,
44 ETHPORT_E_FWRESP_DOWN = 7,
45 ETHPORT_E_FWRESP_UP_FAIL = 8,
46 };
47
48 enum bna_enet_event {
49 ENET_E_START = 1,
50 ENET_E_STOP = 2,
51 ENET_E_FAIL = 3,
52 ENET_E_PAUSE_CFG = 4,
53 ENET_E_MTU_CFG = 5,
54 ENET_E_FWRESP_PAUSE = 6,
55 ENET_E_CHLD_STOPPED = 7,
56 };
57
58 enum bna_ioceth_event {
59 IOCETH_E_ENABLE = 1,
60 IOCETH_E_DISABLE = 2,
61 IOCETH_E_IOC_RESET = 3,
62 IOCETH_E_IOC_FAILED = 4,
63 IOCETH_E_IOC_READY = 5,
64 IOCETH_E_ENET_ATTR_RESP = 6,
65 IOCETH_E_ENET_STOPPED = 7,
66 IOCETH_E_IOC_DISABLED = 8,
67 };
68
69 #define bna_stats_copy(_name, _type) \
70 do { \
71 count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \
72 stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \
73 stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \
74 for (i = 0; i < count; i++) \
75 stats_dst[i] = be64_to_cpu(stats_src[i]); \
76 } while (0) \
77
78 /*
79 * FW response handlers
80 */
81
82 static void
bna_bfi_ethport_enable_aen(struct bna_ethport * ethport,struct bfi_msgq_mhdr * msghdr)83 bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
84 struct bfi_msgq_mhdr *msghdr)
85 {
86 ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
87
88 if (ethport_can_be_up(ethport))
89 bfa_fsm_send_event(ethport, ETHPORT_E_UP);
90 }
91
92 static void
bna_bfi_ethport_disable_aen(struct bna_ethport * ethport,struct bfi_msgq_mhdr * msghdr)93 bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
94 struct bfi_msgq_mhdr *msghdr)
95 {
96 int ethport_up = ethport_is_up(ethport);
97
98 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
99
100 if (ethport_up)
101 bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
102 }
103
104 static void
bna_bfi_ethport_admin_rsp(struct bna_ethport * ethport,struct bfi_msgq_mhdr * msghdr)105 bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
106 struct bfi_msgq_mhdr *msghdr)
107 {
108 struct bfi_enet_enable_req *admin_req =
109 ðport->bfi_enet_cmd.admin_req;
110 struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
111
112 switch (admin_req->enable) {
113 case BNA_STATUS_T_ENABLED:
114 if (rsp->error == BFI_ENET_CMD_OK)
115 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
116 else {
117 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
118 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
119 }
120 break;
121
122 case BNA_STATUS_T_DISABLED:
123 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
124 ethport->link_status = BNA_LINK_DOWN;
125 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
126 break;
127 }
128 }
129
130 static void
bna_bfi_ethport_lpbk_rsp(struct bna_ethport * ethport,struct bfi_msgq_mhdr * msghdr)131 bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
132 struct bfi_msgq_mhdr *msghdr)
133 {
134 struct bfi_enet_diag_lb_req *diag_lb_req =
135 ðport->bfi_enet_cmd.lpbk_req;
136 struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
137
138 switch (diag_lb_req->enable) {
139 case BNA_STATUS_T_ENABLED:
140 if (rsp->error == BFI_ENET_CMD_OK)
141 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
142 else {
143 ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
144 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
145 }
146 break;
147
148 case BNA_STATUS_T_DISABLED:
149 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
150 break;
151 }
152 }
153
154 static void
bna_bfi_pause_set_rsp(struct bna_enet * enet,struct bfi_msgq_mhdr * msghdr)155 bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
156 {
157 bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
158 }
159
160 static void
bna_bfi_attr_get_rsp(struct bna_ioceth * ioceth,struct bfi_msgq_mhdr * msghdr)161 bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
162 struct bfi_msgq_mhdr *msghdr)
163 {
164 struct bfi_enet_attr_rsp *rsp = (struct bfi_enet_attr_rsp *)msghdr;
165
166 /**
167 * Store only if not set earlier, since BNAD can override the HW
168 * attributes
169 */
170 if (!ioceth->attr.fw_query_complete) {
171 ioceth->attr.num_txq = ntohl(rsp->max_cfg);
172 ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
173 ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
174 ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
175 ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
176 ioceth->attr.fw_query_complete = true;
177 }
178
179 bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
180 }
181
182 static void
bna_bfi_stats_get_rsp(struct bna * bna,struct bfi_msgq_mhdr * msghdr)183 bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
184 {
185 struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
186 u64 *stats_src;
187 u64 *stats_dst;
188 u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
189 u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
190 int count;
191 int i;
192
193 bna_stats_copy(mac, mac);
194 bna_stats_copy(bpc, bpc);
195 bna_stats_copy(rad, rad);
196 bna_stats_copy(rlb, rad);
197 bna_stats_copy(fc_rx, fc_rx);
198 bna_stats_copy(fc_tx, fc_tx);
199
200 stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
201
202 /* Copy Rxf stats to SW area, scatter them while copying */
203 for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
204 stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
205 memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
206 if (rx_enet_mask & ((u32)(1 << i))) {
207 int k;
208 count = sizeof(struct bfi_enet_stats_rxf) /
209 sizeof(u64);
210 for (k = 0; k < count; k++) {
211 stats_dst[k] = be64_to_cpu(*stats_src);
212 stats_src++;
213 }
214 }
215 }
216
217 /* Copy Txf stats to SW area, scatter them while copying */
218 for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
219 stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
220 memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
221 if (tx_enet_mask & ((u32)(1 << i))) {
222 int k;
223 count = sizeof(struct bfi_enet_stats_txf) /
224 sizeof(u64);
225 for (k = 0; k < count; k++) {
226 stats_dst[k] = be64_to_cpu(*stats_src);
227 stats_src++;
228 }
229 }
230 }
231
232 bna->stats_mod.stats_get_busy = false;
233 bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
234 }
235
236 static void
bna_bfi_ethport_linkup_aen(struct bna_ethport * ethport,struct bfi_msgq_mhdr * msghdr)237 bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
238 struct bfi_msgq_mhdr *msghdr)
239 {
240 ethport->link_status = BNA_LINK_UP;
241
242 /* Dispatch events */
243 ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
244 }
245
246 static void
bna_bfi_ethport_linkdown_aen(struct bna_ethport * ethport,struct bfi_msgq_mhdr * msghdr)247 bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
248 struct bfi_msgq_mhdr *msghdr)
249 {
250 ethport->link_status = BNA_LINK_DOWN;
251
252 /* Dispatch events */
253 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
254 }
255
256 static void
bna_err_handler(struct bna * bna,u32 intr_status)257 bna_err_handler(struct bna *bna, u32 intr_status)
258 {
259 if (BNA_IS_HALT_INTR(bna, intr_status))
260 bna_halt_clear(bna);
261
262 bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
263 }
264
265 void
bna_mbox_handler(struct bna * bna,u32 intr_status)266 bna_mbox_handler(struct bna *bna, u32 intr_status)
267 {
268 if (BNA_IS_ERR_INTR(bna, intr_status)) {
269 bna_err_handler(bna, intr_status);
270 return;
271 }
272 if (BNA_IS_MBOX_INTR(bna, intr_status))
273 bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
274 }
275
276 static void
bna_msgq_rsp_handler(void * arg,struct bfi_msgq_mhdr * msghdr)277 bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
278 {
279 struct bna *bna = (struct bna *)arg;
280 struct bna_tx *tx;
281 struct bna_rx *rx;
282
283 switch (msghdr->msg_id) {
284 case BFI_ENET_I2H_RX_CFG_SET_RSP:
285 bna_rx_from_rid(bna, msghdr->enet_id, rx);
286 if (rx)
287 bna_bfi_rx_enet_start_rsp(rx, msghdr);
288 break;
289
290 case BFI_ENET_I2H_RX_CFG_CLR_RSP:
291 bna_rx_from_rid(bna, msghdr->enet_id, rx);
292 if (rx)
293 bna_bfi_rx_enet_stop_rsp(rx, msghdr);
294 break;
295
296 case BFI_ENET_I2H_RIT_CFG_RSP:
297 case BFI_ENET_I2H_RSS_CFG_RSP:
298 case BFI_ENET_I2H_RSS_ENABLE_RSP:
299 case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
300 case BFI_ENET_I2H_RX_DEFAULT_RSP:
301 case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
302 case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
303 case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
304 case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
305 case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
306 case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
307 case BFI_ENET_I2H_RX_VLAN_SET_RSP:
308 case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
309 bna_rx_from_rid(bna, msghdr->enet_id, rx);
310 if (rx)
311 bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
312 break;
313
314 case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
315 bna_rx_from_rid(bna, msghdr->enet_id, rx);
316 if (rx)
317 bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
318 break;
319
320 case BFI_ENET_I2H_TX_CFG_SET_RSP:
321 bna_tx_from_rid(bna, msghdr->enet_id, tx);
322 if (tx)
323 bna_bfi_tx_enet_start_rsp(tx, msghdr);
324 break;
325
326 case BFI_ENET_I2H_TX_CFG_CLR_RSP:
327 bna_tx_from_rid(bna, msghdr->enet_id, tx);
328 if (tx)
329 bna_bfi_tx_enet_stop_rsp(tx, msghdr);
330 break;
331
332 case BFI_ENET_I2H_PORT_ADMIN_RSP:
333 bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
334 break;
335
336 case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
337 bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
338 break;
339
340 case BFI_ENET_I2H_SET_PAUSE_RSP:
341 bna_bfi_pause_set_rsp(&bna->enet, msghdr);
342 break;
343
344 case BFI_ENET_I2H_GET_ATTR_RSP:
345 bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
346 break;
347
348 case BFI_ENET_I2H_STATS_GET_RSP:
349 bna_bfi_stats_get_rsp(bna, msghdr);
350 break;
351
352 case BFI_ENET_I2H_STATS_CLR_RSP:
353 /* No-op */
354 break;
355
356 case BFI_ENET_I2H_LINK_UP_AEN:
357 bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
358 break;
359
360 case BFI_ENET_I2H_LINK_DOWN_AEN:
361 bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
362 break;
363
364 case BFI_ENET_I2H_PORT_ENABLE_AEN:
365 bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
366 break;
367
368 case BFI_ENET_I2H_PORT_DISABLE_AEN:
369 bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
370 break;
371
372 case BFI_ENET_I2H_BW_UPDATE_AEN:
373 bna_bfi_bw_update_aen(&bna->tx_mod);
374 break;
375
376 default:
377 break;
378 }
379 }
380
381 /**
382 * ETHPORT
383 */
384 #define call_ethport_stop_cbfn(_ethport) \
385 do { \
386 if ((_ethport)->stop_cbfn) { \
387 void (*cbfn)(struct bna_enet *); \
388 cbfn = (_ethport)->stop_cbfn; \
389 (_ethport)->stop_cbfn = NULL; \
390 cbfn(&(_ethport)->bna->enet); \
391 } \
392 } while (0)
393
394 #define call_ethport_adminup_cbfn(ethport, status) \
395 do { \
396 if ((ethport)->adminup_cbfn) { \
397 void (*cbfn)(struct bnad *, enum bna_cb_status); \
398 cbfn = (ethport)->adminup_cbfn; \
399 (ethport)->adminup_cbfn = NULL; \
400 cbfn((ethport)->bna->bnad, status); \
401 } \
402 } while (0)
403
404 static void
bna_bfi_ethport_admin_up(struct bna_ethport * ethport)405 bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
406 {
407 struct bfi_enet_enable_req *admin_up_req =
408 ðport->bfi_enet_cmd.admin_req;
409
410 bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
411 BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
412 admin_up_req->mh.num_entries = htons(
413 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
414 admin_up_req->enable = BNA_STATUS_T_ENABLED;
415
416 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
417 sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
418 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
419 }
420
421 static void
bna_bfi_ethport_admin_down(struct bna_ethport * ethport)422 bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
423 {
424 struct bfi_enet_enable_req *admin_down_req =
425 ðport->bfi_enet_cmd.admin_req;
426
427 bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
428 BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
429 admin_down_req->mh.num_entries = htons(
430 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
431 admin_down_req->enable = BNA_STATUS_T_DISABLED;
432
433 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
434 sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
435 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
436 }
437
438 static void
bna_bfi_ethport_lpbk_up(struct bna_ethport * ethport)439 bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
440 {
441 struct bfi_enet_diag_lb_req *lpbk_up_req =
442 ðport->bfi_enet_cmd.lpbk_req;
443
444 bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
445 BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
446 lpbk_up_req->mh.num_entries = htons(
447 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
448 lpbk_up_req->mode = (ethport->bna->enet.type ==
449 BNA_ENET_T_LOOPBACK_INTERNAL) ?
450 BFI_ENET_DIAG_LB_OPMODE_EXT :
451 BFI_ENET_DIAG_LB_OPMODE_CBL;
452 lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
453
454 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
455 sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
456 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
457 }
458
459 static void
bna_bfi_ethport_lpbk_down(struct bna_ethport * ethport)460 bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
461 {
462 struct bfi_enet_diag_lb_req *lpbk_down_req =
463 ðport->bfi_enet_cmd.lpbk_req;
464
465 bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
466 BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
467 lpbk_down_req->mh.num_entries = htons(
468 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
469 lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
470
471 bfa_msgq_cmd_set(ðport->msgq_cmd, NULL, NULL,
472 sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
473 bfa_msgq_cmd_post(ðport->bna->msgq, ðport->msgq_cmd);
474 }
475
476 static void
bna_bfi_ethport_up(struct bna_ethport * ethport)477 bna_bfi_ethport_up(struct bna_ethport *ethport)
478 {
479 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
480 bna_bfi_ethport_admin_up(ethport);
481 else
482 bna_bfi_ethport_lpbk_up(ethport);
483 }
484
485 static void
bna_bfi_ethport_down(struct bna_ethport * ethport)486 bna_bfi_ethport_down(struct bna_ethport *ethport)
487 {
488 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
489 bna_bfi_ethport_admin_down(ethport);
490 else
491 bna_bfi_ethport_lpbk_down(ethport);
492 }
493
494 bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
495 enum bna_ethport_event);
496 bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
497 enum bna_ethport_event);
498 bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
499 enum bna_ethport_event);
500 bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
501 enum bna_ethport_event);
502 bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
503 enum bna_ethport_event);
504 bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
505 enum bna_ethport_event);
506
507 static void
bna_ethport_sm_stopped_entry(struct bna_ethport * ethport)508 bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
509 {
510 call_ethport_stop_cbfn(ethport);
511 }
512
513 static void
bna_ethport_sm_stopped(struct bna_ethport * ethport,enum bna_ethport_event event)514 bna_ethport_sm_stopped(struct bna_ethport *ethport,
515 enum bna_ethport_event event)
516 {
517 switch (event) {
518 case ETHPORT_E_START:
519 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
520 break;
521
522 case ETHPORT_E_STOP:
523 call_ethport_stop_cbfn(ethport);
524 break;
525
526 case ETHPORT_E_FAIL:
527 /* No-op */
528 break;
529
530 case ETHPORT_E_DOWN:
531 /* This event is received due to Rx objects failing */
532 /* No-op */
533 break;
534
535 default:
536 bfa_sm_fault(event);
537 }
538 }
539
540 static void
bna_ethport_sm_down_entry(struct bna_ethport * ethport)541 bna_ethport_sm_down_entry(struct bna_ethport *ethport)
542 {
543 }
544
545 static void
bna_ethport_sm_down(struct bna_ethport * ethport,enum bna_ethport_event event)546 bna_ethport_sm_down(struct bna_ethport *ethport,
547 enum bna_ethport_event event)
548 {
549 switch (event) {
550 case ETHPORT_E_STOP:
551 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
552 break;
553
554 case ETHPORT_E_FAIL:
555 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
556 break;
557
558 case ETHPORT_E_UP:
559 bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
560 bna_bfi_ethport_up(ethport);
561 break;
562
563 default:
564 bfa_sm_fault(event);
565 }
566 }
567
568 static void
bna_ethport_sm_up_resp_wait_entry(struct bna_ethport * ethport)569 bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
570 {
571 }
572
573 static void
bna_ethport_sm_up_resp_wait(struct bna_ethport * ethport,enum bna_ethport_event event)574 bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
575 enum bna_ethport_event event)
576 {
577 switch (event) {
578 case ETHPORT_E_STOP:
579 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
580 break;
581
582 case ETHPORT_E_FAIL:
583 call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
584 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
585 break;
586
587 case ETHPORT_E_DOWN:
588 call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
589 bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
590 break;
591
592 case ETHPORT_E_FWRESP_UP_OK:
593 call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
594 bfa_fsm_set_state(ethport, bna_ethport_sm_up);
595 break;
596
597 case ETHPORT_E_FWRESP_UP_FAIL:
598 call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
599 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
600 break;
601
602 case ETHPORT_E_FWRESP_DOWN:
603 /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
604 bna_bfi_ethport_up(ethport);
605 break;
606
607 default:
608 bfa_sm_fault(event);
609 }
610 }
611
612 static void
bna_ethport_sm_down_resp_wait_entry(struct bna_ethport * ethport)613 bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
614 {
615 /**
616 * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
617 * mbox due to up_resp_wait -> down_resp_wait transition on event
618 * ETHPORT_E_DOWN
619 */
620 }
621
622 static void
bna_ethport_sm_down_resp_wait(struct bna_ethport * ethport,enum bna_ethport_event event)623 bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
624 enum bna_ethport_event event)
625 {
626 switch (event) {
627 case ETHPORT_E_STOP:
628 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
629 break;
630
631 case ETHPORT_E_FAIL:
632 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
633 break;
634
635 case ETHPORT_E_UP:
636 bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
637 break;
638
639 case ETHPORT_E_FWRESP_UP_OK:
640 /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
641 bna_bfi_ethport_down(ethport);
642 break;
643
644 case ETHPORT_E_FWRESP_UP_FAIL:
645 case ETHPORT_E_FWRESP_DOWN:
646 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
647 break;
648
649 default:
650 bfa_sm_fault(event);
651 }
652 }
653
654 static void
bna_ethport_sm_up_entry(struct bna_ethport * ethport)655 bna_ethport_sm_up_entry(struct bna_ethport *ethport)
656 {
657 }
658
659 static void
bna_ethport_sm_up(struct bna_ethport * ethport,enum bna_ethport_event event)660 bna_ethport_sm_up(struct bna_ethport *ethport,
661 enum bna_ethport_event event)
662 {
663 switch (event) {
664 case ETHPORT_E_STOP:
665 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
666 bna_bfi_ethport_down(ethport);
667 break;
668
669 case ETHPORT_E_FAIL:
670 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
671 break;
672
673 case ETHPORT_E_DOWN:
674 bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
675 bna_bfi_ethport_down(ethport);
676 break;
677
678 default:
679 bfa_sm_fault(event);
680 }
681 }
682
683 static void
bna_ethport_sm_last_resp_wait_entry(struct bna_ethport * ethport)684 bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
685 {
686 }
687
688 static void
bna_ethport_sm_last_resp_wait(struct bna_ethport * ethport,enum bna_ethport_event event)689 bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
690 enum bna_ethport_event event)
691 {
692 switch (event) {
693 case ETHPORT_E_FAIL:
694 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
695 break;
696
697 case ETHPORT_E_DOWN:
698 /**
699 * This event is received due to Rx objects stopping in
700 * parallel to ethport
701 */
702 /* No-op */
703 break;
704
705 case ETHPORT_E_FWRESP_UP_OK:
706 /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
707 bna_bfi_ethport_down(ethport);
708 break;
709
710 case ETHPORT_E_FWRESP_UP_FAIL:
711 case ETHPORT_E_FWRESP_DOWN:
712 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
713 break;
714
715 default:
716 bfa_sm_fault(event);
717 }
718 }
719
720 static void
bna_ethport_init(struct bna_ethport * ethport,struct bna * bna)721 bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
722 {
723 ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
724 ethport->bna = bna;
725
726 ethport->link_status = BNA_LINK_DOWN;
727 ethport->link_cbfn = bnad_cb_ethport_link_status;
728
729 ethport->rx_started_count = 0;
730
731 ethport->stop_cbfn = NULL;
732 ethport->adminup_cbfn = NULL;
733
734 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
735 }
736
737 static void
bna_ethport_uninit(struct bna_ethport * ethport)738 bna_ethport_uninit(struct bna_ethport *ethport)
739 {
740 ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
741 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
742
743 ethport->bna = NULL;
744 }
745
746 static void
bna_ethport_start(struct bna_ethport * ethport)747 bna_ethport_start(struct bna_ethport *ethport)
748 {
749 bfa_fsm_send_event(ethport, ETHPORT_E_START);
750 }
751
752 static void
bna_enet_cb_ethport_stopped(struct bna_enet * enet)753 bna_enet_cb_ethport_stopped(struct bna_enet *enet)
754 {
755 bfa_wc_down(&enet->chld_stop_wc);
756 }
757
758 static void
bna_ethport_stop(struct bna_ethport * ethport)759 bna_ethport_stop(struct bna_ethport *ethport)
760 {
761 ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
762 bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
763 }
764
765 static void
bna_ethport_fail(struct bna_ethport * ethport)766 bna_ethport_fail(struct bna_ethport *ethport)
767 {
768 /* Reset the physical port status to enabled */
769 ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
770
771 if (ethport->link_status != BNA_LINK_DOWN) {
772 ethport->link_status = BNA_LINK_DOWN;
773 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
774 }
775 bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
776 }
777
778 /* Should be called only when ethport is disabled */
779 void
bna_ethport_cb_rx_started(struct bna_ethport * ethport)780 bna_ethport_cb_rx_started(struct bna_ethport *ethport)
781 {
782 ethport->rx_started_count++;
783
784 if (ethport->rx_started_count == 1) {
785 ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
786
787 if (ethport_can_be_up(ethport))
788 bfa_fsm_send_event(ethport, ETHPORT_E_UP);
789 }
790 }
791
792 void
bna_ethport_cb_rx_stopped(struct bna_ethport * ethport)793 bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
794 {
795 int ethport_up = ethport_is_up(ethport);
796
797 ethport->rx_started_count--;
798
799 if (ethport->rx_started_count == 0) {
800 ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
801
802 if (ethport_up)
803 bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
804 }
805 }
806
807 /**
808 * ENET
809 */
810 #define bna_enet_chld_start(enet) \
811 do { \
812 enum bna_tx_type tx_type = \
813 ((enet)->type == BNA_ENET_T_REGULAR) ? \
814 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
815 enum bna_rx_type rx_type = \
816 ((enet)->type == BNA_ENET_T_REGULAR) ? \
817 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
818 bna_ethport_start(&(enet)->bna->ethport); \
819 bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \
820 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
821 } while (0)
822
823 #define bna_enet_chld_stop(enet) \
824 do { \
825 enum bna_tx_type tx_type = \
826 ((enet)->type == BNA_ENET_T_REGULAR) ? \
827 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
828 enum bna_rx_type rx_type = \
829 ((enet)->type == BNA_ENET_T_REGULAR) ? \
830 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
831 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
832 bfa_wc_up(&(enet)->chld_stop_wc); \
833 bna_ethport_stop(&(enet)->bna->ethport); \
834 bfa_wc_up(&(enet)->chld_stop_wc); \
835 bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \
836 bfa_wc_up(&(enet)->chld_stop_wc); \
837 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
838 bfa_wc_wait(&(enet)->chld_stop_wc); \
839 } while (0)
840
841 #define bna_enet_chld_fail(enet) \
842 do { \
843 bna_ethport_fail(&(enet)->bna->ethport); \
844 bna_tx_mod_fail(&(enet)->bna->tx_mod); \
845 bna_rx_mod_fail(&(enet)->bna->rx_mod); \
846 } while (0)
847
848 #define bna_enet_rx_start(enet) \
849 do { \
850 enum bna_rx_type rx_type = \
851 ((enet)->type == BNA_ENET_T_REGULAR) ? \
852 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
853 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
854 } while (0)
855
856 #define bna_enet_rx_stop(enet) \
857 do { \
858 enum bna_rx_type rx_type = \
859 ((enet)->type == BNA_ENET_T_REGULAR) ? \
860 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
861 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
862 bfa_wc_up(&(enet)->chld_stop_wc); \
863 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
864 bfa_wc_wait(&(enet)->chld_stop_wc); \
865 } while (0)
866
867 #define call_enet_stop_cbfn(enet) \
868 do { \
869 if ((enet)->stop_cbfn) { \
870 void (*cbfn)(void *); \
871 void *cbarg; \
872 cbfn = (enet)->stop_cbfn; \
873 cbarg = (enet)->stop_cbarg; \
874 (enet)->stop_cbfn = NULL; \
875 (enet)->stop_cbarg = NULL; \
876 cbfn(cbarg); \
877 } \
878 } while (0)
879
880 #define call_enet_pause_cbfn(enet) \
881 do { \
882 if ((enet)->pause_cbfn) { \
883 void (*cbfn)(struct bnad *); \
884 cbfn = (enet)->pause_cbfn; \
885 (enet)->pause_cbfn = NULL; \
886 cbfn((enet)->bna->bnad); \
887 } \
888 } while (0)
889
890 #define call_enet_mtu_cbfn(enet) \
891 do { \
892 if ((enet)->mtu_cbfn) { \
893 void (*cbfn)(struct bnad *); \
894 cbfn = (enet)->mtu_cbfn; \
895 (enet)->mtu_cbfn = NULL; \
896 cbfn((enet)->bna->bnad); \
897 } \
898 } while (0)
899
900 static void bna_enet_cb_chld_stopped(void *arg);
901 static void bna_bfi_pause_set(struct bna_enet *enet);
902
903 bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
904 enum bna_enet_event);
905 bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
906 enum bna_enet_event);
907 bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
908 enum bna_enet_event);
909 bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
910 enum bna_enet_event);
911 bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
912 enum bna_enet_event);
913 bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
914 enum bna_enet_event);
915 bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
916 enum bna_enet_event);
917
918 static void
bna_enet_sm_stopped_entry(struct bna_enet * enet)919 bna_enet_sm_stopped_entry(struct bna_enet *enet)
920 {
921 call_enet_pause_cbfn(enet);
922 call_enet_mtu_cbfn(enet);
923 call_enet_stop_cbfn(enet);
924 }
925
926 static void
bna_enet_sm_stopped(struct bna_enet * enet,enum bna_enet_event event)927 bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
928 {
929 switch (event) {
930 case ENET_E_START:
931 bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
932 break;
933
934 case ENET_E_STOP:
935 call_enet_stop_cbfn(enet);
936 break;
937
938 case ENET_E_FAIL:
939 /* No-op */
940 break;
941
942 case ENET_E_PAUSE_CFG:
943 call_enet_pause_cbfn(enet);
944 break;
945
946 case ENET_E_MTU_CFG:
947 call_enet_mtu_cbfn(enet);
948 break;
949
950 case ENET_E_CHLD_STOPPED:
951 /**
952 * This event is received due to Ethport, Tx and Rx objects
953 * failing
954 */
955 /* No-op */
956 break;
957
958 default:
959 bfa_sm_fault(event);
960 }
961 }
962
963 static void
bna_enet_sm_pause_init_wait_entry(struct bna_enet * enet)964 bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
965 {
966 bna_bfi_pause_set(enet);
967 }
968
969 static void
bna_enet_sm_pause_init_wait(struct bna_enet * enet,enum bna_enet_event event)970 bna_enet_sm_pause_init_wait(struct bna_enet *enet,
971 enum bna_enet_event event)
972 {
973 switch (event) {
974 case ENET_E_STOP:
975 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
976 bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
977 break;
978
979 case ENET_E_FAIL:
980 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
981 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
982 break;
983
984 case ENET_E_PAUSE_CFG:
985 enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
986 break;
987
988 case ENET_E_MTU_CFG:
989 /* No-op */
990 break;
991
992 case ENET_E_FWRESP_PAUSE:
993 if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
994 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
995 bna_bfi_pause_set(enet);
996 } else {
997 bfa_fsm_set_state(enet, bna_enet_sm_started);
998 bna_enet_chld_start(enet);
999 }
1000 break;
1001
1002 default:
1003 bfa_sm_fault(event);
1004 }
1005 }
1006
1007 static void
bna_enet_sm_last_resp_wait_entry(struct bna_enet * enet)1008 bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
1009 {
1010 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1011 }
1012
1013 static void
bna_enet_sm_last_resp_wait(struct bna_enet * enet,enum bna_enet_event event)1014 bna_enet_sm_last_resp_wait(struct bna_enet *enet,
1015 enum bna_enet_event event)
1016 {
1017 switch (event) {
1018 case ENET_E_FAIL:
1019 case ENET_E_FWRESP_PAUSE:
1020 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1021 break;
1022
1023 default:
1024 bfa_sm_fault(event);
1025 }
1026 }
1027
1028 static void
bna_enet_sm_started_entry(struct bna_enet * enet)1029 bna_enet_sm_started_entry(struct bna_enet *enet)
1030 {
1031 /**
1032 * NOTE: Do not call bna_enet_chld_start() here, since it will be
1033 * inadvertently called during cfg_wait->started transition as well
1034 */
1035 call_enet_pause_cbfn(enet);
1036 call_enet_mtu_cbfn(enet);
1037 }
1038
1039 static void
bna_enet_sm_started(struct bna_enet * enet,enum bna_enet_event event)1040 bna_enet_sm_started(struct bna_enet *enet,
1041 enum bna_enet_event event)
1042 {
1043 switch (event) {
1044 case ENET_E_STOP:
1045 bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1046 break;
1047
1048 case ENET_E_FAIL:
1049 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1050 bna_enet_chld_fail(enet);
1051 break;
1052
1053 case ENET_E_PAUSE_CFG:
1054 bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1055 bna_bfi_pause_set(enet);
1056 break;
1057
1058 case ENET_E_MTU_CFG:
1059 bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1060 bna_enet_rx_stop(enet);
1061 break;
1062
1063 default:
1064 bfa_sm_fault(event);
1065 }
1066 }
1067
1068 static void
bna_enet_sm_cfg_wait_entry(struct bna_enet * enet)1069 bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
1070 {
1071 }
1072
1073 static void
bna_enet_sm_cfg_wait(struct bna_enet * enet,enum bna_enet_event event)1074 bna_enet_sm_cfg_wait(struct bna_enet *enet,
1075 enum bna_enet_event event)
1076 {
1077 switch (event) {
1078 case ENET_E_STOP:
1079 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1080 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1081 bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
1082 break;
1083
1084 case ENET_E_FAIL:
1085 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1086 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1087 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1088 bna_enet_chld_fail(enet);
1089 break;
1090
1091 case ENET_E_PAUSE_CFG:
1092 enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
1093 break;
1094
1095 case ENET_E_MTU_CFG:
1096 enet->flags |= BNA_ENET_F_MTU_CHANGED;
1097 break;
1098
1099 case ENET_E_CHLD_STOPPED:
1100 bna_enet_rx_start(enet);
1101 /* Fall through */
1102 case ENET_E_FWRESP_PAUSE:
1103 if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
1104 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1105 bna_bfi_pause_set(enet);
1106 } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
1107 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1108 bna_enet_rx_stop(enet);
1109 } else {
1110 bfa_fsm_set_state(enet, bna_enet_sm_started);
1111 }
1112 break;
1113
1114 default:
1115 bfa_sm_fault(event);
1116 }
1117 }
1118
1119 static void
bna_enet_sm_cfg_stop_wait_entry(struct bna_enet * enet)1120 bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
1121 {
1122 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1123 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1124 }
1125
1126 static void
bna_enet_sm_cfg_stop_wait(struct bna_enet * enet,enum bna_enet_event event)1127 bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
1128 enum bna_enet_event event)
1129 {
1130 switch (event) {
1131 case ENET_E_FAIL:
1132 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1133 bna_enet_chld_fail(enet);
1134 break;
1135
1136 case ENET_E_FWRESP_PAUSE:
1137 case ENET_E_CHLD_STOPPED:
1138 bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1139 break;
1140
1141 default:
1142 bfa_sm_fault(event);
1143 }
1144 }
1145
1146 static void
bna_enet_sm_chld_stop_wait_entry(struct bna_enet * enet)1147 bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
1148 {
1149 bna_enet_chld_stop(enet);
1150 }
1151
1152 static void
bna_enet_sm_chld_stop_wait(struct bna_enet * enet,enum bna_enet_event event)1153 bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
1154 enum bna_enet_event event)
1155 {
1156 switch (event) {
1157 case ENET_E_FAIL:
1158 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1159 bna_enet_chld_fail(enet);
1160 break;
1161
1162 case ENET_E_CHLD_STOPPED:
1163 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1164 break;
1165
1166 default:
1167 bfa_sm_fault(event);
1168 }
1169 }
1170
1171 static void
bna_bfi_pause_set(struct bna_enet * enet)1172 bna_bfi_pause_set(struct bna_enet *enet)
1173 {
1174 struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
1175
1176 bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
1177 BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
1178 pause_req->mh.num_entries = htons(
1179 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
1180 pause_req->tx_pause = enet->pause_config.tx_pause;
1181 pause_req->rx_pause = enet->pause_config.rx_pause;
1182
1183 bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL,
1184 sizeof(struct bfi_enet_set_pause_req), &pause_req->mh);
1185 bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd);
1186 }
1187
1188 static void
bna_enet_cb_chld_stopped(void * arg)1189 bna_enet_cb_chld_stopped(void *arg)
1190 {
1191 struct bna_enet *enet = (struct bna_enet *)arg;
1192
1193 bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED);
1194 }
1195
1196 static void
bna_enet_init(struct bna_enet * enet,struct bna * bna)1197 bna_enet_init(struct bna_enet *enet, struct bna *bna)
1198 {
1199 enet->bna = bna;
1200 enet->flags = 0;
1201 enet->mtu = 0;
1202 enet->type = BNA_ENET_T_REGULAR;
1203
1204 enet->stop_cbfn = NULL;
1205 enet->stop_cbarg = NULL;
1206
1207 enet->pause_cbfn = NULL;
1208
1209 enet->mtu_cbfn = NULL;
1210
1211 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1212 }
1213
1214 static void
bna_enet_uninit(struct bna_enet * enet)1215 bna_enet_uninit(struct bna_enet *enet)
1216 {
1217 enet->flags = 0;
1218
1219 enet->bna = NULL;
1220 }
1221
1222 static void
bna_enet_start(struct bna_enet * enet)1223 bna_enet_start(struct bna_enet *enet)
1224 {
1225 enet->flags |= BNA_ENET_F_IOCETH_READY;
1226 if (enet->flags & BNA_ENET_F_ENABLED)
1227 bfa_fsm_send_event(enet, ENET_E_START);
1228 }
1229
1230 static void
bna_ioceth_cb_enet_stopped(void * arg)1231 bna_ioceth_cb_enet_stopped(void *arg)
1232 {
1233 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1234
1235 bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED);
1236 }
1237
1238 static void
bna_enet_stop(struct bna_enet * enet)1239 bna_enet_stop(struct bna_enet *enet)
1240 {
1241 enet->stop_cbfn = bna_ioceth_cb_enet_stopped;
1242 enet->stop_cbarg = &enet->bna->ioceth;
1243
1244 enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1245 bfa_fsm_send_event(enet, ENET_E_STOP);
1246 }
1247
1248 static void
bna_enet_fail(struct bna_enet * enet)1249 bna_enet_fail(struct bna_enet *enet)
1250 {
1251 enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1252 bfa_fsm_send_event(enet, ENET_E_FAIL);
1253 }
1254
1255 void
bna_enet_cb_tx_stopped(struct bna_enet * enet)1256 bna_enet_cb_tx_stopped(struct bna_enet *enet)
1257 {
1258 bfa_wc_down(&enet->chld_stop_wc);
1259 }
1260
1261 void
bna_enet_cb_rx_stopped(struct bna_enet * enet)1262 bna_enet_cb_rx_stopped(struct bna_enet *enet)
1263 {
1264 bfa_wc_down(&enet->chld_stop_wc);
1265 }
1266
1267 int
bna_enet_mtu_get(struct bna_enet * enet)1268 bna_enet_mtu_get(struct bna_enet *enet)
1269 {
1270 return enet->mtu;
1271 }
1272
1273 void
bna_enet_enable(struct bna_enet * enet)1274 bna_enet_enable(struct bna_enet *enet)
1275 {
1276 if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
1277 return;
1278
1279 enet->flags |= BNA_ENET_F_ENABLED;
1280
1281 if (enet->flags & BNA_ENET_F_IOCETH_READY)
1282 bfa_fsm_send_event(enet, ENET_E_START);
1283 }
1284
1285 void
bna_enet_disable(struct bna_enet * enet,enum bna_cleanup_type type,void (* cbfn)(void *))1286 bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
1287 void (*cbfn)(void *))
1288 {
1289 if (type == BNA_SOFT_CLEANUP) {
1290 (*cbfn)(enet->bna->bnad);
1291 return;
1292 }
1293
1294 enet->stop_cbfn = cbfn;
1295 enet->stop_cbarg = enet->bna->bnad;
1296
1297 enet->flags &= ~BNA_ENET_F_ENABLED;
1298
1299 bfa_fsm_send_event(enet, ENET_E_STOP);
1300 }
1301
1302 void
bna_enet_pause_config(struct bna_enet * enet,struct bna_pause_config * pause_config,void (* cbfn)(struct bnad *))1303 bna_enet_pause_config(struct bna_enet *enet,
1304 struct bna_pause_config *pause_config,
1305 void (*cbfn)(struct bnad *))
1306 {
1307 enet->pause_config = *pause_config;
1308
1309 enet->pause_cbfn = cbfn;
1310
1311 bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
1312 }
1313
1314 void
bna_enet_mtu_set(struct bna_enet * enet,int mtu,void (* cbfn)(struct bnad *))1315 bna_enet_mtu_set(struct bna_enet *enet, int mtu,
1316 void (*cbfn)(struct bnad *))
1317 {
1318 enet->mtu = mtu;
1319
1320 enet->mtu_cbfn = cbfn;
1321
1322 bfa_fsm_send_event(enet, ENET_E_MTU_CFG);
1323 }
1324
1325 void
bna_enet_perm_mac_get(struct bna_enet * enet,mac_t * mac)1326 bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
1327 {
1328 *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
1329 }
1330
1331 /**
1332 * IOCETH
1333 */
1334 #define enable_mbox_intr(_ioceth) \
1335 do { \
1336 u32 intr_status; \
1337 bna_intr_status_get((_ioceth)->bna, intr_status); \
1338 bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \
1339 bna_mbox_intr_enable((_ioceth)->bna); \
1340 } while (0)
1341
1342 #define disable_mbox_intr(_ioceth) \
1343 do { \
1344 bna_mbox_intr_disable((_ioceth)->bna); \
1345 bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \
1346 } while (0)
1347
1348 #define call_ioceth_stop_cbfn(_ioceth) \
1349 do { \
1350 if ((_ioceth)->stop_cbfn) { \
1351 void (*cbfn)(struct bnad *); \
1352 struct bnad *cbarg; \
1353 cbfn = (_ioceth)->stop_cbfn; \
1354 cbarg = (_ioceth)->stop_cbarg; \
1355 (_ioceth)->stop_cbfn = NULL; \
1356 (_ioceth)->stop_cbarg = NULL; \
1357 cbfn(cbarg); \
1358 } \
1359 } while (0)
1360
1361 #define bna_stats_mod_uninit(_stats_mod) \
1362 do { \
1363 } while (0)
1364
1365 #define bna_stats_mod_start(_stats_mod) \
1366 do { \
1367 (_stats_mod)->ioc_ready = true; \
1368 } while (0)
1369
1370 #define bna_stats_mod_stop(_stats_mod) \
1371 do { \
1372 (_stats_mod)->ioc_ready = false; \
1373 } while (0)
1374
1375 #define bna_stats_mod_fail(_stats_mod) \
1376 do { \
1377 (_stats_mod)->ioc_ready = false; \
1378 (_stats_mod)->stats_get_busy = false; \
1379 (_stats_mod)->stats_clr_busy = false; \
1380 } while (0)
1381
1382 static void bna_bfi_attr_get(struct bna_ioceth *ioceth);
1383
1384 bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth,
1385 enum bna_ioceth_event);
1386 bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth,
1387 enum bna_ioceth_event);
1388 bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth,
1389 enum bna_ioceth_event);
1390 bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth,
1391 enum bna_ioceth_event);
1392 bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth,
1393 enum bna_ioceth_event);
1394 bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth,
1395 enum bna_ioceth_event);
1396 bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth,
1397 enum bna_ioceth_event);
1398 bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth,
1399 enum bna_ioceth_event);
1400
1401 static void
bna_ioceth_sm_stopped_entry(struct bna_ioceth * ioceth)1402 bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth)
1403 {
1404 call_ioceth_stop_cbfn(ioceth);
1405 }
1406
1407 static void
bna_ioceth_sm_stopped(struct bna_ioceth * ioceth,enum bna_ioceth_event event)1408 bna_ioceth_sm_stopped(struct bna_ioceth *ioceth,
1409 enum bna_ioceth_event event)
1410 {
1411 switch (event) {
1412 case IOCETH_E_ENABLE:
1413 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1414 bfa_nw_ioc_enable(&ioceth->ioc);
1415 break;
1416
1417 case IOCETH_E_DISABLE:
1418 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1419 break;
1420
1421 case IOCETH_E_IOC_RESET:
1422 enable_mbox_intr(ioceth);
1423 break;
1424
1425 case IOCETH_E_IOC_FAILED:
1426 disable_mbox_intr(ioceth);
1427 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1428 break;
1429
1430 default:
1431 bfa_sm_fault(event);
1432 }
1433 }
1434
1435 static void
bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth * ioceth)1436 bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth)
1437 {
1438 /**
1439 * Do not call bfa_nw_ioc_enable() here. It must be called in the
1440 * previous state due to failed -> ioc_ready_wait transition.
1441 */
1442 }
1443
1444 static void
bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth * ioceth,enum bna_ioceth_event event)1445 bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth,
1446 enum bna_ioceth_event event)
1447 {
1448 switch (event) {
1449 case IOCETH_E_DISABLE:
1450 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1451 bfa_nw_ioc_disable(&ioceth->ioc);
1452 break;
1453
1454 case IOCETH_E_IOC_RESET:
1455 enable_mbox_intr(ioceth);
1456 break;
1457
1458 case IOCETH_E_IOC_FAILED:
1459 disable_mbox_intr(ioceth);
1460 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1461 break;
1462
1463 case IOCETH_E_IOC_READY:
1464 bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait);
1465 break;
1466
1467 default:
1468 bfa_sm_fault(event);
1469 }
1470 }
1471
1472 static void
bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth * ioceth)1473 bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth)
1474 {
1475 bna_bfi_attr_get(ioceth);
1476 }
1477
1478 static void
bna_ioceth_sm_enet_attr_wait(struct bna_ioceth * ioceth,enum bna_ioceth_event event)1479 bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth,
1480 enum bna_ioceth_event event)
1481 {
1482 switch (event) {
1483 case IOCETH_E_DISABLE:
1484 bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait);
1485 break;
1486
1487 case IOCETH_E_IOC_FAILED:
1488 disable_mbox_intr(ioceth);
1489 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1490 break;
1491
1492 case IOCETH_E_ENET_ATTR_RESP:
1493 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready);
1494 break;
1495
1496 default:
1497 bfa_sm_fault(event);
1498 }
1499 }
1500
1501 static void
bna_ioceth_sm_ready_entry(struct bna_ioceth * ioceth)1502 bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth)
1503 {
1504 bna_enet_start(&ioceth->bna->enet);
1505 bna_stats_mod_start(&ioceth->bna->stats_mod);
1506 bnad_cb_ioceth_ready(ioceth->bna->bnad);
1507 }
1508
1509 static void
bna_ioceth_sm_ready(struct bna_ioceth * ioceth,enum bna_ioceth_event event)1510 bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event)
1511 {
1512 switch (event) {
1513 case IOCETH_E_DISABLE:
1514 bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait);
1515 break;
1516
1517 case IOCETH_E_IOC_FAILED:
1518 disable_mbox_intr(ioceth);
1519 bna_enet_fail(&ioceth->bna->enet);
1520 bna_stats_mod_fail(&ioceth->bna->stats_mod);
1521 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1522 break;
1523
1524 default:
1525 bfa_sm_fault(event);
1526 }
1527 }
1528
1529 static void
bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth * ioceth)1530 bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth)
1531 {
1532 }
1533
1534 static void
bna_ioceth_sm_last_resp_wait(struct bna_ioceth * ioceth,enum bna_ioceth_event event)1535 bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth,
1536 enum bna_ioceth_event event)
1537 {
1538 switch (event) {
1539 case IOCETH_E_IOC_FAILED:
1540 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1541 disable_mbox_intr(ioceth);
1542 bfa_nw_ioc_disable(&ioceth->ioc);
1543 break;
1544
1545 case IOCETH_E_ENET_ATTR_RESP:
1546 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1547 bfa_nw_ioc_disable(&ioceth->ioc);
1548 break;
1549
1550 default:
1551 bfa_sm_fault(event);
1552 }
1553 }
1554
1555 static void
bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth * ioceth)1556 bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth)
1557 {
1558 bna_stats_mod_stop(&ioceth->bna->stats_mod);
1559 bna_enet_stop(&ioceth->bna->enet);
1560 }
1561
1562 static void
bna_ioceth_sm_enet_stop_wait(struct bna_ioceth * ioceth,enum bna_ioceth_event event)1563 bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth,
1564 enum bna_ioceth_event event)
1565 {
1566 switch (event) {
1567 case IOCETH_E_IOC_FAILED:
1568 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1569 disable_mbox_intr(ioceth);
1570 bna_enet_fail(&ioceth->bna->enet);
1571 bna_stats_mod_fail(&ioceth->bna->stats_mod);
1572 bfa_nw_ioc_disable(&ioceth->ioc);
1573 break;
1574
1575 case IOCETH_E_ENET_STOPPED:
1576 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1577 bfa_nw_ioc_disable(&ioceth->ioc);
1578 break;
1579
1580 default:
1581 bfa_sm_fault(event);
1582 }
1583 }
1584
1585 static void
bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth * ioceth)1586 bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth)
1587 {
1588 }
1589
1590 static void
bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth * ioceth,enum bna_ioceth_event event)1591 bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth,
1592 enum bna_ioceth_event event)
1593 {
1594 switch (event) {
1595 case IOCETH_E_IOC_DISABLED:
1596 disable_mbox_intr(ioceth);
1597 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1598 break;
1599
1600 case IOCETH_E_ENET_STOPPED:
1601 /* This event is received due to enet failing */
1602 /* No-op */
1603 break;
1604
1605 default:
1606 bfa_sm_fault(event);
1607 }
1608 }
1609
1610 static void
bna_ioceth_sm_failed_entry(struct bna_ioceth * ioceth)1611 bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth)
1612 {
1613 bnad_cb_ioceth_failed(ioceth->bna->bnad);
1614 }
1615
1616 static void
bna_ioceth_sm_failed(struct bna_ioceth * ioceth,enum bna_ioceth_event event)1617 bna_ioceth_sm_failed(struct bna_ioceth *ioceth,
1618 enum bna_ioceth_event event)
1619 {
1620 switch (event) {
1621 case IOCETH_E_DISABLE:
1622 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1623 bfa_nw_ioc_disable(&ioceth->ioc);
1624 break;
1625
1626 case IOCETH_E_IOC_RESET:
1627 enable_mbox_intr(ioceth);
1628 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1629 break;
1630
1631 case IOCETH_E_IOC_FAILED:
1632 break;
1633
1634 default:
1635 bfa_sm_fault(event);
1636 }
1637 }
1638
1639 static void
bna_bfi_attr_get(struct bna_ioceth * ioceth)1640 bna_bfi_attr_get(struct bna_ioceth *ioceth)
1641 {
1642 struct bfi_enet_attr_req *attr_req = &ioceth->attr_req;
1643
1644 bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET,
1645 BFI_ENET_H2I_GET_ATTR_REQ, 0, 0);
1646 attr_req->mh.num_entries = htons(
1647 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req)));
1648 bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL,
1649 sizeof(struct bfi_enet_attr_req), &attr_req->mh);
1650 bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd);
1651 }
1652
1653 /* IOC callback functions */
1654
1655 static void
bna_cb_ioceth_enable(void * arg,enum bfa_status error)1656 bna_cb_ioceth_enable(void *arg, enum bfa_status error)
1657 {
1658 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1659
1660 if (error)
1661 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1662 else
1663 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY);
1664 }
1665
1666 static void
bna_cb_ioceth_disable(void * arg)1667 bna_cb_ioceth_disable(void *arg)
1668 {
1669 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1670
1671 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED);
1672 }
1673
1674 static void
bna_cb_ioceth_hbfail(void * arg)1675 bna_cb_ioceth_hbfail(void *arg)
1676 {
1677 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1678
1679 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1680 }
1681
1682 static void
bna_cb_ioceth_reset(void * arg)1683 bna_cb_ioceth_reset(void *arg)
1684 {
1685 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1686
1687 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET);
1688 }
1689
1690 static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
1691 bna_cb_ioceth_enable,
1692 bna_cb_ioceth_disable,
1693 bna_cb_ioceth_hbfail,
1694 bna_cb_ioceth_reset
1695 };
1696
bna_attr_init(struct bna_ioceth * ioceth)1697 static void bna_attr_init(struct bna_ioceth *ioceth)
1698 {
1699 ioceth->attr.num_txq = BFI_ENET_DEF_TXQ;
1700 ioceth->attr.num_rxp = BFI_ENET_DEF_RXP;
1701 ioceth->attr.num_ucmac = BFI_ENET_DEF_UCAM;
1702 ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
1703 ioceth->attr.max_rit_size = BFI_ENET_DEF_RITSZ;
1704 ioceth->attr.fw_query_complete = false;
1705 }
1706
1707 static void
bna_ioceth_init(struct bna_ioceth * ioceth,struct bna * bna,struct bna_res_info * res_info)1708 bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
1709 struct bna_res_info *res_info)
1710 {
1711 u64 dma;
1712 u8 *kva;
1713
1714 ioceth->bna = bna;
1715
1716 /**
1717 * Attach IOC and claim:
1718 * 1. DMA memory for IOC attributes
1719 * 2. Kernel memory for FW trace
1720 */
1721 bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn);
1722 bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH);
1723
1724 BNA_GET_DMA_ADDR(
1725 &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
1726 kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva;
1727 bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
1728
1729 kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1730 bfa_nw_ioc_debug_memclaim(&ioceth->ioc, kva);
1731
1732 /**
1733 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1734 * DMA memory.
1735 */
1736 BNA_GET_DMA_ADDR(
1737 &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1738 kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1739 bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna);
1740 bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1741 kva += bfa_nw_cee_meminfo();
1742 dma += bfa_nw_cee_meminfo();
1743
1744 bfa_nw_flash_attach(&bna->flash, &ioceth->ioc, bna);
1745 bfa_nw_flash_memclaim(&bna->flash, kva, dma);
1746 kva += bfa_nw_flash_meminfo();
1747 dma += bfa_nw_flash_meminfo();
1748
1749 bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
1750 bfa_msgq_memclaim(&bna->msgq, kva, dma);
1751 bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
1752 kva += bfa_msgq_meminfo();
1753 dma += bfa_msgq_meminfo();
1754
1755 ioceth->stop_cbfn = NULL;
1756 ioceth->stop_cbarg = NULL;
1757
1758 bna_attr_init(ioceth);
1759
1760 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1761 }
1762
1763 static void
bna_ioceth_uninit(struct bna_ioceth * ioceth)1764 bna_ioceth_uninit(struct bna_ioceth *ioceth)
1765 {
1766 bfa_nw_ioc_detach(&ioceth->ioc);
1767
1768 ioceth->bna = NULL;
1769 }
1770
1771 void
bna_ioceth_enable(struct bna_ioceth * ioceth)1772 bna_ioceth_enable(struct bna_ioceth *ioceth)
1773 {
1774 if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
1775 bnad_cb_ioceth_ready(ioceth->bna->bnad);
1776 return;
1777 }
1778
1779 if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
1780 bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
1781 }
1782
1783 void
bna_ioceth_disable(struct bna_ioceth * ioceth,enum bna_cleanup_type type)1784 bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type)
1785 {
1786 if (type == BNA_SOFT_CLEANUP) {
1787 bnad_cb_ioceth_disabled(ioceth->bna->bnad);
1788 return;
1789 }
1790
1791 ioceth->stop_cbfn = bnad_cb_ioceth_disabled;
1792 ioceth->stop_cbarg = ioceth->bna->bnad;
1793
1794 bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE);
1795 }
1796
1797 static void
bna_ucam_mod_init(struct bna_ucam_mod * ucam_mod,struct bna * bna,struct bna_res_info * res_info)1798 bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
1799 struct bna_res_info *res_info)
1800 {
1801 int i;
1802
1803 ucam_mod->ucmac = (struct bna_mac *)
1804 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1805
1806 INIT_LIST_HEAD(&ucam_mod->free_q);
1807 for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) {
1808 bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
1809 list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
1810 }
1811
1812 ucam_mod->bna = bna;
1813 }
1814
1815 static void
bna_ucam_mod_uninit(struct bna_ucam_mod * ucam_mod)1816 bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
1817 {
1818 struct list_head *qe;
1819 int i = 0;
1820
1821 list_for_each(qe, &ucam_mod->free_q)
1822 i++;
1823
1824 ucam_mod->bna = NULL;
1825 }
1826
1827 static void
bna_mcam_mod_init(struct bna_mcam_mod * mcam_mod,struct bna * bna,struct bna_res_info * res_info)1828 bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
1829 struct bna_res_info *res_info)
1830 {
1831 int i;
1832
1833 mcam_mod->mcmac = (struct bna_mac *)
1834 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1835
1836 INIT_LIST_HEAD(&mcam_mod->free_q);
1837 for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1838 bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
1839 list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
1840 }
1841
1842 mcam_mod->mchandle = (struct bna_mcam_handle *)
1843 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
1844
1845 INIT_LIST_HEAD(&mcam_mod->free_handle_q);
1846 for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1847 bfa_q_qe_init(&mcam_mod->mchandle[i].qe);
1848 list_add_tail(&mcam_mod->mchandle[i].qe,
1849 &mcam_mod->free_handle_q);
1850 }
1851
1852 mcam_mod->bna = bna;
1853 }
1854
1855 static void
bna_mcam_mod_uninit(struct bna_mcam_mod * mcam_mod)1856 bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
1857 {
1858 struct list_head *qe;
1859 int i;
1860
1861 i = 0;
1862 list_for_each(qe, &mcam_mod->free_q) i++;
1863
1864 i = 0;
1865 list_for_each(qe, &mcam_mod->free_handle_q) i++;
1866
1867 mcam_mod->bna = NULL;
1868 }
1869
1870 static void
bna_bfi_stats_get(struct bna * bna)1871 bna_bfi_stats_get(struct bna *bna)
1872 {
1873 struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
1874
1875 bna->stats_mod.stats_get_busy = true;
1876
1877 bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET,
1878 BFI_ENET_H2I_STATS_GET_REQ, 0, 0);
1879 stats_req->mh.num_entries = htons(
1880 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req)));
1881 stats_req->stats_mask = htons(BFI_ENET_STATS_ALL);
1882 stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask);
1883 stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask);
1884 stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb;
1885 stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb;
1886
1887 bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL,
1888 sizeof(struct bfi_enet_stats_req), &stats_req->mh);
1889 bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd);
1890 }
1891
1892 void
bna_res_req(struct bna_res_info * res_info)1893 bna_res_req(struct bna_res_info *res_info)
1894 {
1895 /* DMA memory for COMMON_MODULE */
1896 res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
1897 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1898 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
1899 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
1900 (bfa_nw_cee_meminfo() +
1901 bfa_nw_flash_meminfo() +
1902 bfa_msgq_meminfo()), PAGE_SIZE);
1903
1904 /* DMA memory for retrieving IOC attributes */
1905 res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
1906 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1907 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
1908 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
1909 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
1910
1911 /* Virtual memory for retreiving fw_trc */
1912 res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
1913 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1914 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 1;
1915 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = BNA_DBG_FWTRC_LEN;
1916
1917 /* DMA memory for retreiving stats */
1918 res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
1919 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1920 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
1921 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
1922 ALIGN(sizeof(struct bfi_enet_stats),
1923 PAGE_SIZE);
1924 }
1925
1926 void
bna_mod_res_req(struct bna * bna,struct bna_res_info * res_info)1927 bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
1928 {
1929 struct bna_attr *attr = &bna->ioceth.attr;
1930
1931 /* Virtual memory for Tx objects - stored by Tx module */
1932 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
1933 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
1934 BNA_MEM_T_KVA;
1935 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
1936 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
1937 attr->num_txq * sizeof(struct bna_tx);
1938
1939 /* Virtual memory for TxQ - stored by Tx module */
1940 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
1941 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
1942 BNA_MEM_T_KVA;
1943 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
1944 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
1945 attr->num_txq * sizeof(struct bna_txq);
1946
1947 /* Virtual memory for Rx objects - stored by Rx module */
1948 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
1949 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
1950 BNA_MEM_T_KVA;
1951 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
1952 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
1953 attr->num_rxp * sizeof(struct bna_rx);
1954
1955 /* Virtual memory for RxPath - stored by Rx module */
1956 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
1957 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
1958 BNA_MEM_T_KVA;
1959 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
1960 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
1961 attr->num_rxp * sizeof(struct bna_rxp);
1962
1963 /* Virtual memory for RxQ - stored by Rx module */
1964 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
1965 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
1966 BNA_MEM_T_KVA;
1967 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
1968 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
1969 (attr->num_rxp * 2) * sizeof(struct bna_rxq);
1970
1971 /* Virtual memory for Unicast MAC address - stored by ucam module */
1972 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
1973 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
1974 BNA_MEM_T_KVA;
1975 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
1976 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
1977 attr->num_ucmac * sizeof(struct bna_mac);
1978
1979 /* Virtual memory for Multicast MAC address - stored by mcam module */
1980 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
1981 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
1982 BNA_MEM_T_KVA;
1983 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
1984 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
1985 attr->num_mcmac * sizeof(struct bna_mac);
1986
1987 /* Virtual memory for Multicast handle - stored by mcam module */
1988 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
1989 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type =
1990 BNA_MEM_T_KVA;
1991 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1;
1992 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len =
1993 attr->num_mcmac * sizeof(struct bna_mcam_handle);
1994 }
1995
1996 void
bna_init(struct bna * bna,struct bnad * bnad,struct bfa_pcidev * pcidev,struct bna_res_info * res_info)1997 bna_init(struct bna *bna, struct bnad *bnad,
1998 struct bfa_pcidev *pcidev, struct bna_res_info *res_info)
1999 {
2000 bna->bnad = bnad;
2001 bna->pcidev = *pcidev;
2002
2003 bna->stats.hw_stats_kva = (struct bfi_enet_stats *)
2004 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
2005 bna->stats.hw_stats_dma.msb =
2006 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
2007 bna->stats.hw_stats_dma.lsb =
2008 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
2009
2010 bna_reg_addr_init(bna, &bna->pcidev);
2011
2012 /* Also initializes diag, cee, sfp, phy_port, msgq */
2013 bna_ioceth_init(&bna->ioceth, bna, res_info);
2014
2015 bna_enet_init(&bna->enet, bna);
2016 bna_ethport_init(&bna->ethport, bna);
2017 }
2018
2019 void
bna_mod_init(struct bna * bna,struct bna_res_info * res_info)2020 bna_mod_init(struct bna *bna, struct bna_res_info *res_info)
2021 {
2022 bna_tx_mod_init(&bna->tx_mod, bna, res_info);
2023
2024 bna_rx_mod_init(&bna->rx_mod, bna, res_info);
2025
2026 bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
2027
2028 bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
2029
2030 bna->default_mode_rid = BFI_INVALID_RID;
2031 bna->promisc_rid = BFI_INVALID_RID;
2032
2033 bna->mod_flags |= BNA_MOD_F_INIT_DONE;
2034 }
2035
2036 void
bna_uninit(struct bna * bna)2037 bna_uninit(struct bna *bna)
2038 {
2039 if (bna->mod_flags & BNA_MOD_F_INIT_DONE) {
2040 bna_mcam_mod_uninit(&bna->mcam_mod);
2041 bna_ucam_mod_uninit(&bna->ucam_mod);
2042 bna_rx_mod_uninit(&bna->rx_mod);
2043 bna_tx_mod_uninit(&bna->tx_mod);
2044 bna->mod_flags &= ~BNA_MOD_F_INIT_DONE;
2045 }
2046
2047 bna_stats_mod_uninit(&bna->stats_mod);
2048 bna_ethport_uninit(&bna->ethport);
2049 bna_enet_uninit(&bna->enet);
2050
2051 bna_ioceth_uninit(&bna->ioceth);
2052
2053 bna->bnad = NULL;
2054 }
2055
2056 int
bna_num_txq_set(struct bna * bna,int num_txq)2057 bna_num_txq_set(struct bna *bna, int num_txq)
2058 {
2059 if (bna->ioceth.attr.fw_query_complete &&
2060 (num_txq <= bna->ioceth.attr.num_txq)) {
2061 bna->ioceth.attr.num_txq = num_txq;
2062 return BNA_CB_SUCCESS;
2063 }
2064
2065 return BNA_CB_FAIL;
2066 }
2067
2068 int
bna_num_rxp_set(struct bna * bna,int num_rxp)2069 bna_num_rxp_set(struct bna *bna, int num_rxp)
2070 {
2071 if (bna->ioceth.attr.fw_query_complete &&
2072 (num_rxp <= bna->ioceth.attr.num_rxp)) {
2073 bna->ioceth.attr.num_rxp = num_rxp;
2074 return BNA_CB_SUCCESS;
2075 }
2076
2077 return BNA_CB_FAIL;
2078 }
2079
2080 struct bna_mac *
bna_ucam_mod_mac_get(struct bna_ucam_mod * ucam_mod)2081 bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
2082 {
2083 struct list_head *qe;
2084
2085 if (list_empty(&ucam_mod->free_q))
2086 return NULL;
2087
2088 bfa_q_deq(&ucam_mod->free_q, &qe);
2089
2090 return (struct bna_mac *)qe;
2091 }
2092
2093 void
bna_ucam_mod_mac_put(struct bna_ucam_mod * ucam_mod,struct bna_mac * mac)2094 bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
2095 {
2096 list_add_tail(&mac->qe, &ucam_mod->free_q);
2097 }
2098
2099 struct bna_mac *
bna_mcam_mod_mac_get(struct bna_mcam_mod * mcam_mod)2100 bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
2101 {
2102 struct list_head *qe;
2103
2104 if (list_empty(&mcam_mod->free_q))
2105 return NULL;
2106
2107 bfa_q_deq(&mcam_mod->free_q, &qe);
2108
2109 return (struct bna_mac *)qe;
2110 }
2111
2112 void
bna_mcam_mod_mac_put(struct bna_mcam_mod * mcam_mod,struct bna_mac * mac)2113 bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
2114 {
2115 list_add_tail(&mac->qe, &mcam_mod->free_q);
2116 }
2117
2118 struct bna_mcam_handle *
bna_mcam_mod_handle_get(struct bna_mcam_mod * mcam_mod)2119 bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
2120 {
2121 struct list_head *qe;
2122
2123 if (list_empty(&mcam_mod->free_handle_q))
2124 return NULL;
2125
2126 bfa_q_deq(&mcam_mod->free_handle_q, &qe);
2127
2128 return (struct bna_mcam_handle *)qe;
2129 }
2130
2131 void
bna_mcam_mod_handle_put(struct bna_mcam_mod * mcam_mod,struct bna_mcam_handle * handle)2132 bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
2133 struct bna_mcam_handle *handle)
2134 {
2135 list_add_tail(&handle->qe, &mcam_mod->free_handle_q);
2136 }
2137
2138 void
bna_hw_stats_get(struct bna * bna)2139 bna_hw_stats_get(struct bna *bna)
2140 {
2141 if (!bna->stats_mod.ioc_ready) {
2142 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2143 return;
2144 }
2145 if (bna->stats_mod.stats_get_busy) {
2146 bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats);
2147 return;
2148 }
2149
2150 bna_bfi_stats_get(bna);
2151 }
2152