1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3
4 #include <linux/bitfield.h>
5 #include <linux/etherdevice.h>
6 #include <linux/delay.h>
7 #include <linux/dev_printk.h>
8 #include <linux/dma-mapping.h>
9 #include <linux/gfp.h>
10 #include <linux/types.h>
11
12 #include "fbnic.h"
13 #include "fbnic_tlv.h"
14
__fbnic_mbx_wr_desc(struct fbnic_dev * fbd,int mbx_idx,int desc_idx,u64 desc)15 static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx,
16 int desc_idx, u64 desc)
17 {
18 u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
19
20 /* Write the upper 32b and then the lower 32b. Doing this the
21 * FW can then read lower, upper, lower to verify that the state
22 * of the descriptor wasn't changed mid-transaction.
23 */
24 fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc));
25 fw_wrfl(fbd);
26 fw_wr32(fbd, desc_offset, lower_32_bits(desc));
27 }
28
__fbnic_mbx_invalidate_desc(struct fbnic_dev * fbd,int mbx_idx,int desc_idx,u32 desc)29 static void __fbnic_mbx_invalidate_desc(struct fbnic_dev *fbd, int mbx_idx,
30 int desc_idx, u32 desc)
31 {
32 u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
33
34 /* For initialization we write the lower 32b of the descriptor first.
35 * This way we can set the state to mark it invalid before we clear the
36 * upper 32b.
37 */
38 fw_wr32(fbd, desc_offset, desc);
39 fw_wrfl(fbd);
40 fw_wr32(fbd, desc_offset + 1, 0);
41 }
42
__fbnic_mbx_rd_desc(struct fbnic_dev * fbd,int mbx_idx,int desc_idx)43 static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx)
44 {
45 u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx);
46 u64 desc;
47
48 desc = fw_rd32(fbd, desc_offset);
49 desc |= (u64)fw_rd32(fbd, desc_offset + 1) << 32;
50
51 return desc;
52 }
53
fbnic_mbx_reset_desc_ring(struct fbnic_dev * fbd,int mbx_idx)54 static void fbnic_mbx_reset_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
55 {
56 int desc_idx;
57
58 /* Disable DMA transactions from the device,
59 * and flush any transactions triggered during cleaning
60 */
61 switch (mbx_idx) {
62 case FBNIC_IPC_MBX_RX_IDX:
63 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
64 FBNIC_PUL_OB_TLP_HDR_AW_CFG_FLUSH);
65 break;
66 case FBNIC_IPC_MBX_TX_IDX:
67 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
68 FBNIC_PUL_OB_TLP_HDR_AR_CFG_FLUSH);
69 break;
70 }
71
72 wrfl(fbd);
73
74 /* Initialize first descriptor to all 0s. Doing this gives us a
75 * solid stop for the firmware to hit when it is done looping
76 * through the ring.
77 */
78 __fbnic_mbx_invalidate_desc(fbd, mbx_idx, 0, 0);
79
80 /* We then fill the rest of the ring starting at the end and moving
81 * back toward descriptor 0 with skip descriptors that have no
82 * length nor address, and tell the firmware that they can skip
83 * them and just move past them to the one we initialized to 0.
84 */
85 for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;)
86 __fbnic_mbx_invalidate_desc(fbd, mbx_idx, desc_idx,
87 FBNIC_IPC_MBX_DESC_FW_CMPL |
88 FBNIC_IPC_MBX_DESC_HOST_CMPL);
89 }
90
fbnic_mbx_init(struct fbnic_dev * fbd)91 void fbnic_mbx_init(struct fbnic_dev *fbd)
92 {
93 int i;
94
95 /* Initialize lock to protect Tx ring */
96 spin_lock_init(&fbd->fw_tx_lock);
97
98 /* Reinitialize mailbox memory */
99 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
100 memset(&fbd->mbx[i], 0, sizeof(struct fbnic_fw_mbx));
101
102 /* Do not auto-clear the FW mailbox interrupt, let SW clear it */
103 wr32(fbd, FBNIC_INTR_SW_AC_MODE(0), ~(1u << FBNIC_FW_MSIX_ENTRY));
104
105 /* Clear any stale causes in vector 0 as that is used for doorbell */
106 wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
107
108 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
109 fbnic_mbx_reset_desc_ring(fbd, i);
110 }
111
fbnic_mbx_map_msg(struct fbnic_dev * fbd,int mbx_idx,struct fbnic_tlv_msg * msg,u16 length,u8 eom)112 static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx,
113 struct fbnic_tlv_msg *msg, u16 length, u8 eom)
114 {
115 struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
116 u8 tail = mbx->tail;
117 dma_addr_t addr;
118 int direction;
119
120 if (!mbx->ready || !fbnic_fw_present(fbd))
121 return -ENODEV;
122
123 direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
124 DMA_TO_DEVICE;
125
126 if (mbx->head == ((tail + 1) % FBNIC_IPC_MBX_DESC_LEN))
127 return -EBUSY;
128
129 addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction);
130 if (dma_mapping_error(fbd->dev, addr)) {
131 free_page((unsigned long)msg);
132
133 return -ENOSPC;
134 }
135
136 mbx->buf_info[tail].msg = msg;
137 mbx->buf_info[tail].addr = addr;
138
139 mbx->tail = (tail + 1) % FBNIC_IPC_MBX_DESC_LEN;
140
141 fw_wr32(fbd, FBNIC_IPC_MBX(mbx_idx, mbx->tail), 0);
142
143 __fbnic_mbx_wr_desc(fbd, mbx_idx, tail,
144 FIELD_PREP(FBNIC_IPC_MBX_DESC_LEN_MASK, length) |
145 (addr & FBNIC_IPC_MBX_DESC_ADDR_MASK) |
146 (eom ? FBNIC_IPC_MBX_DESC_EOM : 0) |
147 FBNIC_IPC_MBX_DESC_HOST_CMPL);
148
149 return 0;
150 }
151
fbnic_mbx_unmap_and_free_msg(struct fbnic_dev * fbd,int mbx_idx,int desc_idx)152 static void fbnic_mbx_unmap_and_free_msg(struct fbnic_dev *fbd, int mbx_idx,
153 int desc_idx)
154 {
155 struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
156 int direction;
157
158 if (!mbx->buf_info[desc_idx].msg)
159 return;
160
161 direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE :
162 DMA_TO_DEVICE;
163 dma_unmap_single(fbd->dev, mbx->buf_info[desc_idx].addr,
164 PAGE_SIZE, direction);
165
166 free_page((unsigned long)mbx->buf_info[desc_idx].msg);
167 mbx->buf_info[desc_idx].msg = NULL;
168 }
169
fbnic_mbx_clean_desc_ring(struct fbnic_dev * fbd,int mbx_idx)170 static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
171 {
172 int i;
173
174 fbnic_mbx_reset_desc_ring(fbd, mbx_idx);
175
176 for (i = FBNIC_IPC_MBX_DESC_LEN; i--;)
177 fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i);
178 }
179
fbnic_mbx_clean(struct fbnic_dev * fbd)180 void fbnic_mbx_clean(struct fbnic_dev *fbd)
181 {
182 int i;
183
184 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
185 fbnic_mbx_clean_desc_ring(fbd, i);
186 }
187
188 #define FBNIC_MBX_MAX_PAGE_SIZE FIELD_MAX(FBNIC_IPC_MBX_DESC_LEN_MASK)
189 #define FBNIC_RX_PAGE_SIZE min_t(int, PAGE_SIZE, FBNIC_MBX_MAX_PAGE_SIZE)
190
fbnic_mbx_alloc_rx_msgs(struct fbnic_dev * fbd)191 static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd)
192 {
193 struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
194 u8 tail = rx_mbx->tail, head = rx_mbx->head, count;
195 int err = 0;
196
197 /* Do nothing if mailbox is not ready, or we already have pages on
198 * the ring that can be used by the firmware
199 */
200 if (!rx_mbx->ready)
201 return -ENODEV;
202
203 /* Fill all but 1 unused descriptors in the Rx queue. */
204 count = (head - tail - 1) % FBNIC_IPC_MBX_DESC_LEN;
205 while (!err && count--) {
206 struct fbnic_tlv_msg *msg;
207
208 msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_ATOMIC |
209 __GFP_NOWARN);
210 if (!msg) {
211 err = -ENOMEM;
212 break;
213 }
214
215 err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_RX_IDX, msg,
216 FBNIC_RX_PAGE_SIZE, 0);
217 if (err)
218 free_page((unsigned long)msg);
219 }
220
221 return err;
222 }
223
fbnic_mbx_map_tlv_msg(struct fbnic_dev * fbd,struct fbnic_tlv_msg * msg)224 static int fbnic_mbx_map_tlv_msg(struct fbnic_dev *fbd,
225 struct fbnic_tlv_msg *msg)
226 {
227 unsigned long flags;
228 int err;
229
230 spin_lock_irqsave(&fbd->fw_tx_lock, flags);
231
232 err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
233 le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
234
235 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
236
237 return err;
238 }
239
fbnic_mbx_process_tx_msgs(struct fbnic_dev * fbd)240 static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd)
241 {
242 struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
243 u8 head = tx_mbx->head;
244 u64 desc;
245
246 while (head != tx_mbx->tail) {
247 desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_TX_IDX, head);
248 if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
249 break;
250
251 fbnic_mbx_unmap_and_free_msg(fbd, FBNIC_IPC_MBX_TX_IDX, head);
252
253 head++;
254 head %= FBNIC_IPC_MBX_DESC_LEN;
255 }
256
257 /* Record head for next interrupt */
258 tx_mbx->head = head;
259 }
260
fbnic_mbx_map_req_w_cmpl(struct fbnic_dev * fbd,struct fbnic_tlv_msg * msg,struct fbnic_fw_completion * cmpl_data)261 static int fbnic_mbx_map_req_w_cmpl(struct fbnic_dev *fbd,
262 struct fbnic_tlv_msg *msg,
263 struct fbnic_fw_completion *cmpl_data)
264 {
265 unsigned long flags;
266 int err;
267
268 spin_lock_irqsave(&fbd->fw_tx_lock, flags);
269
270 /* If we are already waiting on a completion then abort */
271 if (cmpl_data && fbd->cmpl_data) {
272 err = -EBUSY;
273 goto unlock_mbx;
274 }
275
276 /* Record completion location and submit request */
277 if (cmpl_data)
278 fbd->cmpl_data = cmpl_data;
279
280 err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg,
281 le16_to_cpu(msg->hdr.len) * sizeof(u32), 1);
282
283 /* If msg failed then clear completion data for next caller */
284 if (err && cmpl_data)
285 fbd->cmpl_data = NULL;
286
287 unlock_mbx:
288 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
289
290 return err;
291 }
292
fbnic_fw_release_cmpl_data(struct kref * kref)293 static void fbnic_fw_release_cmpl_data(struct kref *kref)
294 {
295 struct fbnic_fw_completion *cmpl_data;
296
297 cmpl_data = container_of(kref, struct fbnic_fw_completion,
298 ref_count);
299 kfree(cmpl_data);
300 }
301
302 static struct fbnic_fw_completion *
fbnic_fw_get_cmpl_by_type(struct fbnic_dev * fbd,u32 msg_type)303 fbnic_fw_get_cmpl_by_type(struct fbnic_dev *fbd, u32 msg_type)
304 {
305 struct fbnic_fw_completion *cmpl_data = NULL;
306 unsigned long flags;
307
308 spin_lock_irqsave(&fbd->fw_tx_lock, flags);
309 if (fbd->cmpl_data && fbd->cmpl_data->msg_type == msg_type) {
310 cmpl_data = fbd->cmpl_data;
311 kref_get(&fbd->cmpl_data->ref_count);
312 }
313 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
314
315 return cmpl_data;
316 }
317
318 /**
319 * fbnic_fw_xmit_simple_msg - Transmit a simple single TLV message w/o data
320 * @fbd: FBNIC device structure
321 * @msg_type: ENUM value indicating message type to send
322 *
323 * Return:
324 * One the following values:
325 * -EOPNOTSUPP: Is not ASIC so mailbox is not supported
326 * -ENODEV: Device I/O error
327 * -ENOMEM: Failed to allocate message
328 * -EBUSY: No space in mailbox
329 * -ENOSPC: DMA mapping failed
330 *
331 * This function sends a single TLV header indicating the host wants to take
332 * some action. However there are no other side effects which means that any
333 * response will need to be caught via a completion if this action is
334 * expected to kick off a resultant action.
335 */
fbnic_fw_xmit_simple_msg(struct fbnic_dev * fbd,u32 msg_type)336 static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type)
337 {
338 struct fbnic_tlv_msg *msg;
339 int err = 0;
340
341 if (!fbnic_fw_present(fbd))
342 return -ENODEV;
343
344 msg = fbnic_tlv_msg_alloc(msg_type);
345 if (!msg)
346 return -ENOMEM;
347
348 err = fbnic_mbx_map_tlv_msg(fbd, msg);
349 if (err)
350 free_page((unsigned long)msg);
351
352 return err;
353 }
354
fbnic_mbx_init_desc_ring(struct fbnic_dev * fbd,int mbx_idx)355 static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx)
356 {
357 struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx];
358
359 mbx->ready = true;
360
361 switch (mbx_idx) {
362 case FBNIC_IPC_MBX_RX_IDX:
363 /* Enable DMA writes from the device */
364 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG,
365 FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME);
366
367 /* Make sure we have a page for the FW to write to */
368 fbnic_mbx_alloc_rx_msgs(fbd);
369 break;
370 case FBNIC_IPC_MBX_TX_IDX:
371 /* Enable DMA reads from the device */
372 wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG,
373 FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME);
374 break;
375 }
376 }
377
fbnic_mbx_event(struct fbnic_dev * fbd)378 static bool fbnic_mbx_event(struct fbnic_dev *fbd)
379 {
380 /* We only need to do this on the first interrupt following reset.
381 * this primes the mailbox so that we will have cleared all the
382 * skip descriptors.
383 */
384 if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY)))
385 return false;
386
387 wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY);
388
389 return true;
390 }
391
392 /**
393 * fbnic_fw_xmit_ownership_msg - Create and transmit a host ownership message
394 * to FW mailbox
395 *
396 * @fbd: FBNIC device structure
397 * @take_ownership: take/release the ownership
398 *
399 * Return: zero on success, negative value on failure
400 *
401 * Notifies the firmware that the driver either takes ownership of the NIC
402 * (when @take_ownership is true) or releases it.
403 */
fbnic_fw_xmit_ownership_msg(struct fbnic_dev * fbd,bool take_ownership)404 int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership)
405 {
406 unsigned long req_time = jiffies;
407 struct fbnic_tlv_msg *msg;
408 int err = 0;
409
410 if (!fbnic_fw_present(fbd))
411 return -ENODEV;
412
413 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_OWNERSHIP_REQ);
414 if (!msg)
415 return -ENOMEM;
416
417 if (take_ownership) {
418 err = fbnic_tlv_attr_put_flag(msg, FBNIC_FW_OWNERSHIP_FLAG);
419 if (err)
420 goto free_message;
421 }
422
423 err = fbnic_mbx_map_tlv_msg(fbd, msg);
424 if (err)
425 goto free_message;
426
427 /* Initialize heartbeat, set last response to 1 second in the past
428 * so that we will trigger a timeout if the firmware doesn't respond
429 */
430 fbd->last_heartbeat_response = req_time - HZ;
431
432 fbd->last_heartbeat_request = req_time;
433
434 /* Set heartbeat detection based on if we are taking ownership */
435 fbd->fw_heartbeat_enabled = take_ownership;
436
437 return err;
438
439 free_message:
440 free_page((unsigned long)msg);
441 return err;
442 }
443
444 static const struct fbnic_tlv_index fbnic_fw_cap_resp_index[] = {
445 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_VERSION),
446 FBNIC_TLV_ATTR_FLAG(FBNIC_FW_CAP_RESP_BMC_PRESENT),
447 FBNIC_TLV_ATTR_MAC_ADDR(FBNIC_FW_CAP_RESP_BMC_MAC_ADDR),
448 FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY),
449 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_VERSION),
450 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT),
451 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
452 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
453 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI),
454 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_SPEED),
455 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_FEC),
456 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
457 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
458 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_CMRT_VERSION),
459 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION),
460 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
461 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
462 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
463 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
464 FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_UEFI_VERSION),
465 FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
466 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE),
467 FBNIC_TLV_ATTR_LAST
468 };
469
fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],struct fbnic_tlv_msg * attr,int len)470 static int fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN],
471 struct fbnic_tlv_msg *attr, int len)
472 {
473 int attr_len = le16_to_cpu(attr->hdr.len) / sizeof(u32) - 1;
474 struct fbnic_tlv_msg *mac_results[8];
475 int err, i = 0;
476
477 /* Make sure we have enough room to process all the MAC addresses */
478 if (len > 8)
479 return -ENOSPC;
480
481 /* Parse the array */
482 err = fbnic_tlv_attr_parse_array(&attr[1], attr_len, mac_results,
483 fbnic_fw_cap_resp_index,
484 FBNIC_FW_CAP_RESP_BMC_MAC_ADDR, len);
485 if (err)
486 return err;
487
488 /* Copy results into MAC addr array */
489 for (i = 0; i < len && mac_results[i]; i++)
490 fbnic_tlv_attr_addr_copy(bmc_mac_addr[i], mac_results[i]);
491
492 /* Zero remaining unused addresses */
493 while (i < len)
494 eth_zero_addr(bmc_mac_addr[i++]);
495
496 return 0;
497 }
498
fbnic_fw_parse_cap_resp(void * opaque,struct fbnic_tlv_msg ** results)499 static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results)
500 {
501 u32 all_multi = 0, version = 0;
502 struct fbnic_dev *fbd = opaque;
503 bool bmc_present;
504 int err;
505
506 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_VERSION);
507 fbd->fw_cap.running.mgmt.version = version;
508 if (!fbd->fw_cap.running.mgmt.version)
509 return -EINVAL;
510
511 if (fbd->fw_cap.running.mgmt.version < MIN_FW_VERSION_CODE) {
512 char running_ver[FBNIC_FW_VER_MAX_SIZE];
513
514 fbnic_mk_fw_ver_str(fbd->fw_cap.running.mgmt.version,
515 running_ver);
516 dev_err(fbd->dev, "Device firmware version(%s) is older than minimum required version(%02d.%02d.%02d)\n",
517 running_ver,
518 MIN_FW_MAJOR_VERSION,
519 MIN_FW_MINOR_VERSION,
520 MIN_FW_BUILD_VERSION);
521 /* Disable TX mailbox to prevent card use until firmware is
522 * updated.
523 */
524 fbd->mbx[FBNIC_IPC_MBX_TX_IDX].ready = false;
525 return -EINVAL;
526 }
527
528 if (fta_get_str(results, FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR,
529 fbd->fw_cap.running.mgmt.commit,
530 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE) <= 0)
531 dev_warn(fbd->dev, "Firmware did not send mgmt commit!\n");
532
533 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_VERSION);
534 fbd->fw_cap.stored.mgmt.version = version;
535 fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_COMMIT_STR,
536 fbd->fw_cap.stored.mgmt.commit,
537 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
538
539 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_CMRT_VERSION);
540 fbd->fw_cap.running.bootloader.version = version;
541 fta_get_str(results, FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR,
542 fbd->fw_cap.running.bootloader.commit,
543 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
544
545 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION);
546 fbd->fw_cap.stored.bootloader.version = version;
547 fta_get_str(results, FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR,
548 fbd->fw_cap.stored.bootloader.commit,
549 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
550
551 version = fta_get_uint(results, FBNIC_FW_CAP_RESP_UEFI_VERSION);
552 fbd->fw_cap.stored.undi.version = version;
553 fta_get_str(results, FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR,
554 fbd->fw_cap.stored.undi.commit,
555 FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE);
556
557 fbd->fw_cap.active_slot =
558 fta_get_uint(results, FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT);
559 fbd->fw_cap.link_speed =
560 fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_SPEED);
561 fbd->fw_cap.link_fec =
562 fta_get_uint(results, FBNIC_FW_CAP_RESP_FW_LINK_FEC);
563
564 bmc_present = !!results[FBNIC_FW_CAP_RESP_BMC_PRESENT];
565 if (bmc_present) {
566 struct fbnic_tlv_msg *attr;
567
568 attr = results[FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY];
569 if (!attr)
570 return -EINVAL;
571
572 err = fbnic_fw_parse_bmc_addrs(fbd->fw_cap.bmc_mac_addr,
573 attr, 4);
574 if (err)
575 return err;
576
577 all_multi =
578 fta_get_uint(results, FBNIC_FW_CAP_RESP_BMC_ALL_MULTI);
579 } else {
580 memset(fbd->fw_cap.bmc_mac_addr, 0,
581 sizeof(fbd->fw_cap.bmc_mac_addr));
582 }
583
584 fbd->fw_cap.bmc_present = bmc_present;
585
586 if (results[FBNIC_FW_CAP_RESP_BMC_ALL_MULTI] || !bmc_present)
587 fbd->fw_cap.all_multi = all_multi;
588
589 return 0;
590 }
591
592 static const struct fbnic_tlv_index fbnic_ownership_resp_index[] = {
593 FBNIC_TLV_ATTR_LAST
594 };
595
fbnic_fw_parse_ownership_resp(void * opaque,struct fbnic_tlv_msg ** results)596 static int fbnic_fw_parse_ownership_resp(void *opaque,
597 struct fbnic_tlv_msg **results)
598 {
599 struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
600
601 /* Count the ownership response as a heartbeat reply */
602 fbd->last_heartbeat_response = jiffies;
603
604 return 0;
605 }
606
607 static const struct fbnic_tlv_index fbnic_heartbeat_resp_index[] = {
608 FBNIC_TLV_ATTR_LAST
609 };
610
fbnic_fw_parse_heartbeat_resp(void * opaque,struct fbnic_tlv_msg ** results)611 static int fbnic_fw_parse_heartbeat_resp(void *opaque,
612 struct fbnic_tlv_msg **results)
613 {
614 struct fbnic_dev *fbd = (struct fbnic_dev *)opaque;
615
616 fbd->last_heartbeat_response = jiffies;
617
618 return 0;
619 }
620
fbnic_fw_xmit_heartbeat_message(struct fbnic_dev * fbd)621 static int fbnic_fw_xmit_heartbeat_message(struct fbnic_dev *fbd)
622 {
623 unsigned long req_time = jiffies;
624 struct fbnic_tlv_msg *msg;
625 int err = 0;
626
627 if (!fbnic_fw_present(fbd))
628 return -ENODEV;
629
630 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_HEARTBEAT_REQ);
631 if (!msg)
632 return -ENOMEM;
633
634 err = fbnic_mbx_map_tlv_msg(fbd, msg);
635 if (err)
636 goto free_message;
637
638 fbd->last_heartbeat_request = req_time;
639
640 return err;
641
642 free_message:
643 free_page((unsigned long)msg);
644 return err;
645 }
646
fbnic_fw_heartbeat_current(struct fbnic_dev * fbd)647 static bool fbnic_fw_heartbeat_current(struct fbnic_dev *fbd)
648 {
649 unsigned long last_response = fbd->last_heartbeat_response;
650 unsigned long last_request = fbd->last_heartbeat_request;
651
652 return !time_before(last_response, last_request);
653 }
654
fbnic_fw_init_heartbeat(struct fbnic_dev * fbd,bool poll)655 int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll)
656 {
657 int err = -ETIMEDOUT;
658 int attempts = 50;
659
660 if (!fbnic_fw_present(fbd))
661 return -ENODEV;
662
663 while (attempts--) {
664 msleep(200);
665 if (poll)
666 fbnic_mbx_poll(fbd);
667
668 if (!fbnic_fw_heartbeat_current(fbd))
669 continue;
670
671 /* Place new message on mailbox to elicit a response */
672 err = fbnic_fw_xmit_heartbeat_message(fbd);
673 if (err)
674 dev_warn(fbd->dev,
675 "Failed to send heartbeat message: %d\n",
676 err);
677 break;
678 }
679
680 return err;
681 }
682
fbnic_fw_check_heartbeat(struct fbnic_dev * fbd)683 void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd)
684 {
685 unsigned long last_request = fbd->last_heartbeat_request;
686 int err;
687
688 /* Do not check heartbeat or send another request until current
689 * period has expired. Otherwise we might start spamming requests.
690 */
691 if (time_is_after_jiffies(last_request + FW_HEARTBEAT_PERIOD))
692 return;
693
694 /* We already reported no mailbox. Wait for it to come back */
695 if (!fbd->fw_heartbeat_enabled)
696 return;
697
698 /* Was the last heartbeat response long time ago? */
699 if (!fbnic_fw_heartbeat_current(fbd)) {
700 dev_warn(fbd->dev,
701 "Firmware did not respond to heartbeat message\n");
702 fbd->fw_heartbeat_enabled = false;
703 }
704
705 /* Place new message on mailbox to elicit a response */
706 err = fbnic_fw_xmit_heartbeat_message(fbd);
707 if (err)
708 dev_warn(fbd->dev, "Failed to send heartbeat message\n");
709 }
710
711 /**
712 * fbnic_fw_xmit_tsene_read_msg - Create and transmit a sensor read request
713 * @fbd: FBNIC device structure
714 * @cmpl_data: Completion data structure to store sensor response
715 *
716 * Asks the firmware to provide an update with the latest sensor data.
717 * The response will contain temperature and voltage readings.
718 *
719 * Return: 0 on success, negative error value on failure
720 */
fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev * fbd,struct fbnic_fw_completion * cmpl_data)721 int fbnic_fw_xmit_tsene_read_msg(struct fbnic_dev *fbd,
722 struct fbnic_fw_completion *cmpl_data)
723 {
724 struct fbnic_tlv_msg *msg;
725 int err;
726
727 if (!fbnic_fw_present(fbd))
728 return -ENODEV;
729
730 msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_TSENE_READ_REQ);
731 if (!msg)
732 return -ENOMEM;
733
734 err = fbnic_mbx_map_req_w_cmpl(fbd, msg, cmpl_data);
735 if (err)
736 goto free_message;
737
738 return 0;
739
740 free_message:
741 free_page((unsigned long)msg);
742 return err;
743 }
744
745 static const struct fbnic_tlv_index fbnic_tsene_read_resp_index[] = {
746 FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_THERM),
747 FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_VOLT),
748 FBNIC_TLV_ATTR_S32(FBNIC_FW_TSENE_ERROR),
749 FBNIC_TLV_ATTR_LAST
750 };
751
fbnic_fw_parse_tsene_read_resp(void * opaque,struct fbnic_tlv_msg ** results)752 static int fbnic_fw_parse_tsene_read_resp(void *opaque,
753 struct fbnic_tlv_msg **results)
754 {
755 struct fbnic_fw_completion *cmpl_data;
756 struct fbnic_dev *fbd = opaque;
757 s32 err_resp;
758 int err = 0;
759
760 /* Verify we have a completion pointer to provide with data */
761 cmpl_data = fbnic_fw_get_cmpl_by_type(fbd,
762 FBNIC_TLV_MSG_ID_TSENE_READ_RESP);
763 if (!cmpl_data)
764 return -ENOSPC;
765
766 err_resp = fta_get_sint(results, FBNIC_FW_TSENE_ERROR);
767 if (err_resp)
768 goto msg_err;
769
770 if (!results[FBNIC_FW_TSENE_THERM] || !results[FBNIC_FW_TSENE_VOLT]) {
771 err = -EINVAL;
772 goto msg_err;
773 }
774
775 cmpl_data->u.tsene.millidegrees =
776 fta_get_sint(results, FBNIC_FW_TSENE_THERM);
777 cmpl_data->u.tsene.millivolts =
778 fta_get_sint(results, FBNIC_FW_TSENE_VOLT);
779
780 msg_err:
781 cmpl_data->result = err_resp ? : err;
782 complete(&cmpl_data->done);
783 fbnic_fw_put_cmpl(cmpl_data);
784
785 return err;
786 }
787
788 static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = {
789 FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index,
790 fbnic_fw_parse_cap_resp),
791 FBNIC_TLV_PARSER(OWNERSHIP_RESP, fbnic_ownership_resp_index,
792 fbnic_fw_parse_ownership_resp),
793 FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index,
794 fbnic_fw_parse_heartbeat_resp),
795 FBNIC_TLV_PARSER(TSENE_READ_RESP,
796 fbnic_tsene_read_resp_index,
797 fbnic_fw_parse_tsene_read_resp),
798 FBNIC_TLV_MSG_ERROR
799 };
800
fbnic_mbx_process_rx_msgs(struct fbnic_dev * fbd)801 static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd)
802 {
803 struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX];
804 u8 head = rx_mbx->head;
805 u64 desc, length;
806
807 while (head != rx_mbx->tail) {
808 struct fbnic_tlv_msg *msg;
809 int err;
810
811 desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_RX_IDX, head);
812 if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL))
813 break;
814
815 dma_unmap_single(fbd->dev, rx_mbx->buf_info[head].addr,
816 PAGE_SIZE, DMA_FROM_DEVICE);
817
818 msg = rx_mbx->buf_info[head].msg;
819
820 length = FIELD_GET(FBNIC_IPC_MBX_DESC_LEN_MASK, desc);
821
822 /* Ignore NULL mailbox descriptors */
823 if (!length)
824 goto next_page;
825
826 /* Report descriptors with length greater than page size */
827 if (length > PAGE_SIZE) {
828 dev_warn(fbd->dev,
829 "Invalid mailbox descriptor length: %lld\n",
830 length);
831 goto next_page;
832 }
833
834 if (le16_to_cpu(msg->hdr.len) * sizeof(u32) > length)
835 dev_warn(fbd->dev, "Mailbox message length mismatch\n");
836
837 /* If parsing fails dump contents of message to dmesg */
838 err = fbnic_tlv_msg_parse(fbd, msg, fbnic_fw_tlv_parser);
839 if (err) {
840 dev_warn(fbd->dev, "Unable to process message: %d\n",
841 err);
842 print_hex_dump(KERN_WARNING, "fbnic:",
843 DUMP_PREFIX_OFFSET, 16, 2,
844 msg, length, true);
845 }
846
847 dev_dbg(fbd->dev, "Parsed msg type %d\n", msg->hdr.type);
848 next_page:
849
850 free_page((unsigned long)rx_mbx->buf_info[head].msg);
851 rx_mbx->buf_info[head].msg = NULL;
852
853 head++;
854 head %= FBNIC_IPC_MBX_DESC_LEN;
855 }
856
857 /* Record head for next interrupt */
858 rx_mbx->head = head;
859
860 /* Make sure we have at least one page for the FW to write to */
861 fbnic_mbx_alloc_rx_msgs(fbd);
862 }
863
fbnic_mbx_poll(struct fbnic_dev * fbd)864 void fbnic_mbx_poll(struct fbnic_dev *fbd)
865 {
866 fbnic_mbx_event(fbd);
867
868 fbnic_mbx_process_tx_msgs(fbd);
869 fbnic_mbx_process_rx_msgs(fbd);
870 }
871
fbnic_mbx_poll_tx_ready(struct fbnic_dev * fbd)872 int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd)
873 {
874 unsigned long timeout = jiffies + 10 * HZ + 1;
875 int err, i;
876
877 do {
878 if (!time_is_after_jiffies(timeout))
879 return -ETIMEDOUT;
880
881 /* Force the firmware to trigger an interrupt response to
882 * avoid the mailbox getting stuck closed if the interrupt
883 * is reset.
884 */
885 fbnic_mbx_reset_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX);
886
887 /* Immediate fail if BAR4 went away */
888 if (!fbnic_fw_present(fbd))
889 return -ENODEV;
890
891 msleep(20);
892 } while (!fbnic_mbx_event(fbd));
893
894 /* FW has shown signs of life. Enable DMA and start Tx/Rx */
895 for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++)
896 fbnic_mbx_init_desc_ring(fbd, i);
897
898 /* Request an update from the firmware. This should overwrite
899 * mgmt.version once we get the actual version from the firmware
900 * in the capabilities request message.
901 */
902 err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ);
903 if (err)
904 goto clean_mbx;
905
906 /* Use "1" to indicate we entered the state waiting for a response */
907 fbd->fw_cap.running.mgmt.version = 1;
908
909 return 0;
910 clean_mbx:
911 /* Cleanup Rx buffers and disable mailbox */
912 fbnic_mbx_clean(fbd);
913 return err;
914 }
915
__fbnic_fw_evict_cmpl(struct fbnic_fw_completion * cmpl_data)916 static void __fbnic_fw_evict_cmpl(struct fbnic_fw_completion *cmpl_data)
917 {
918 cmpl_data->result = -EPIPE;
919 complete(&cmpl_data->done);
920 }
921
fbnic_mbx_evict_all_cmpl(struct fbnic_dev * fbd)922 static void fbnic_mbx_evict_all_cmpl(struct fbnic_dev *fbd)
923 {
924 if (fbd->cmpl_data) {
925 __fbnic_fw_evict_cmpl(fbd->cmpl_data);
926 fbd->cmpl_data = NULL;
927 }
928 }
929
fbnic_mbx_flush_tx(struct fbnic_dev * fbd)930 void fbnic_mbx_flush_tx(struct fbnic_dev *fbd)
931 {
932 unsigned long timeout = jiffies + 10 * HZ + 1;
933 struct fbnic_fw_mbx *tx_mbx;
934 u8 tail;
935
936 /* Record current Rx stats */
937 tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX];
938
939 spin_lock_irq(&fbd->fw_tx_lock);
940
941 /* Clear ready to prevent any further attempts to transmit */
942 tx_mbx->ready = false;
943
944 /* Read tail to determine the last tail state for the ring */
945 tail = tx_mbx->tail;
946
947 /* Flush any completions as we are no longer processing Rx */
948 fbnic_mbx_evict_all_cmpl(fbd);
949
950 spin_unlock_irq(&fbd->fw_tx_lock);
951
952 /* Give firmware time to process packet,
953 * we will wait up to 10 seconds which is 500 waits of 20ms.
954 */
955 do {
956 u8 head = tx_mbx->head;
957
958 /* Tx ring is empty once head == tail */
959 if (head == tail)
960 break;
961
962 msleep(20);
963 fbnic_mbx_process_tx_msgs(fbd);
964 } while (time_is_after_jiffies(timeout));
965 }
966
fbnic_get_fw_ver_commit_str(struct fbnic_dev * fbd,char * fw_version,const size_t str_sz)967 void fbnic_get_fw_ver_commit_str(struct fbnic_dev *fbd, char *fw_version,
968 const size_t str_sz)
969 {
970 struct fbnic_fw_ver *mgmt = &fbd->fw_cap.running.mgmt;
971 const char *delim = "";
972
973 if (mgmt->commit[0])
974 delim = "_";
975
976 fbnic_mk_full_fw_ver_str(mgmt->version, delim, mgmt->commit,
977 fw_version, str_sz);
978 }
979
fbnic_fw_init_cmpl(struct fbnic_fw_completion * fw_cmpl,u32 msg_type)980 void fbnic_fw_init_cmpl(struct fbnic_fw_completion *fw_cmpl,
981 u32 msg_type)
982 {
983 fw_cmpl->msg_type = msg_type;
984 init_completion(&fw_cmpl->done);
985 kref_init(&fw_cmpl->ref_count);
986 }
987
fbnic_fw_clear_compl(struct fbnic_dev * fbd)988 void fbnic_fw_clear_compl(struct fbnic_dev *fbd)
989 {
990 unsigned long flags;
991
992 spin_lock_irqsave(&fbd->fw_tx_lock, flags);
993 fbd->cmpl_data = NULL;
994 spin_unlock_irqrestore(&fbd->fw_tx_lock, flags);
995 }
996
fbnic_fw_put_cmpl(struct fbnic_fw_completion * fw_cmpl)997 void fbnic_fw_put_cmpl(struct fbnic_fw_completion *fw_cmpl)
998 {
999 kref_put(&fw_cmpl->ref_count, fbnic_fw_release_cmpl_data);
1000 }
1001