1 /******************************************************************************
2 SPDX-License-Identifier: BSD-3-Clause
3
4 Copyright (c) 2025, Intel Corporation
5 All rights reserved.
6
7 Redistribution and use in source and binary forms, with or without
8 modification, are permitted provided that the following conditions are met:
9
10 1. Redistributions of source code must retain the above copyright notice,
11 this list of conditions and the following disclaimer.
12
13 2. Redistributions in binary form must reproduce the above copyright
14 notice, this list of conditions and the following disclaimer in the
15 documentation and/or other materials provided with the distribution.
16
17 3. Neither the name of the Intel Corporation nor the names of its
18 contributors may be used to endorse or promote products derived from
19 this software without specific prior written permission.
20
21 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 POSSIBILITY OF SUCH DAMAGE.
32
33 ******************************************************************************/
34
35 #include "ixgbe_type.h"
36 #include "ixgbe_e610.h"
37 #include "ixgbe_x550.h"
38 #include "ixgbe_common.h"
39 #include "ixgbe_phy.h"
40 #include "ixgbe_api.h"
41
42 /**
43 * ixgbe_init_aci - initialization routine for Admin Command Interface
44 * @hw: pointer to the hardware structure
45 *
46 * Initialize the ACI lock.
47 */
ixgbe_init_aci(struct ixgbe_hw * hw)48 void ixgbe_init_aci(struct ixgbe_hw *hw)
49 {
50 ixgbe_init_lock(&hw->aci.lock);
51 }
52
53 /**
54 * ixgbe_shutdown_aci - shutdown routine for Admin Command Interface
55 * @hw: pointer to the hardware structure
56 *
57 * Destroy the ACI lock.
58 */
ixgbe_shutdown_aci(struct ixgbe_hw * hw)59 void ixgbe_shutdown_aci(struct ixgbe_hw *hw)
60 {
61 ixgbe_destroy_lock(&hw->aci.lock);
62 }
63
64 /**
65 * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should
66 * be resent
67 * @opcode: ACI opcode
68 *
69 * Check if ACI command should be sent again depending on the provided opcode.
70 *
71 * Return: true if the sending command routine should be repeated,
72 * otherwise false.
73 */
ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)74 static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)
75 {
76 switch (opcode) {
77 case ixgbe_aci_opc_disable_rxen:
78 case ixgbe_aci_opc_get_phy_caps:
79 case ixgbe_aci_opc_get_link_status:
80 case ixgbe_aci_opc_get_link_topo:
81 return true;
82 }
83
84 return false;
85 }
86
87 /**
88 * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin
89 * Command Interface
90 * @hw: pointer to the HW struct
91 * @desc: descriptor describing the command
92 * @buf: buffer to use for indirect commands (NULL for direct commands)
93 * @buf_size: size of buffer for indirect commands (0 for direct commands)
94 *
95 * Admin Command is sent using CSR by setting descriptor and buffer in specific
96 * registers.
97 *
98 * Return: the exit code of the operation.
99 * * - IXGBE_SUCCESS - success.
100 * * - IXGBE_ERR_ACI_DISABLED - CSR mechanism is not enabled.
101 * * - IXGBE_ERR_ACI_BUSY - CSR mechanism is busy.
102 * * - IXGBE_ERR_PARAM - buf_size is too big or
103 * invalid argument buf or buf_size.
104 * * - IXGBE_ERR_ACI_TIMEOUT - Admin Command X command timeout.
105 * * - IXGBE_ERR_ACI_ERROR - Admin Command X invalid state of HICR register or
106 * Admin Command failed because of bad opcode was returned or
107 * Admin Command failed with error Y.
108 */
109 static s32
ixgbe_aci_send_cmd_execute(struct ixgbe_hw * hw,struct ixgbe_aci_desc * desc,void * buf,u16 buf_size)110 ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
111 void *buf, u16 buf_size)
112 {
113 u32 hicr = 0, tmp_buf_size = 0, i = 0;
114 u32 *raw_desc = (u32 *)desc;
115 s32 status = IXGBE_SUCCESS;
116 bool valid_buf = false;
117 u32 *tmp_buf = NULL;
118 u16 opcode = 0;
119
120 do {
121 hw->aci.last_status = IXGBE_ACI_RC_OK;
122
123 /* It's necessary to check if mechanism is enabled */
124 hicr = IXGBE_READ_REG(hw, PF_HICR);
125 if (!(hicr & PF_HICR_EN)) {
126 status = IXGBE_ERR_ACI_DISABLED;
127 break;
128 }
129 if (hicr & PF_HICR_C) {
130 hw->aci.last_status = IXGBE_ACI_RC_EBUSY;
131 status = IXGBE_ERR_ACI_BUSY;
132 break;
133 }
134 opcode = desc->opcode;
135
136 if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE) {
137 status = IXGBE_ERR_PARAM;
138 break;
139 }
140
141 if (buf)
142 desc->flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF);
143
144 /* Check if buf and buf_size are proper params */
145 if (desc->flags & IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF)) {
146 if ((buf && buf_size == 0) ||
147 (buf == NULL && buf_size)) {
148 status = IXGBE_ERR_PARAM;
149 break;
150 }
151 if (buf && buf_size)
152 valid_buf = true;
153 }
154
155 if (valid_buf == true) {
156 if (buf_size % 4 == 0)
157 tmp_buf_size = buf_size;
158 else
159 tmp_buf_size = (buf_size & (u16)(~0x03)) + 4;
160
161 tmp_buf = (u32*)ixgbe_malloc(hw, tmp_buf_size);
162 if (!tmp_buf)
163 return IXGBE_ERR_OUT_OF_MEM;
164
165 /* tmp_buf will be firstly filled with 0xFF and after
166 * that the content of buf will be written into it.
167 * This approach lets us use valid buf_size and
168 * prevents us from reading past buf area
169 * when buf_size mod 4 not equal to 0.
170 */
171 memset(tmp_buf, 0xFF, tmp_buf_size);
172 memcpy(tmp_buf, buf, buf_size);
173
174 if (tmp_buf_size > IXGBE_ACI_LG_BUF)
175 desc->flags |=
176 IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_LB);
177
178 desc->datalen = IXGBE_CPU_TO_LE16(buf_size);
179
180 if (desc->flags & IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD)) {
181 for (i = 0; i < tmp_buf_size / 4; i++) {
182 IXGBE_WRITE_REG(hw, PF_HIBA(i),
183 IXGBE_LE32_TO_CPU(tmp_buf[i]));
184 }
185 }
186 }
187
188 /* Descriptor is written to specific registers */
189 for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++)
190 IXGBE_WRITE_REG(hw, PF_HIDA(i),
191 IXGBE_LE32_TO_CPU(raw_desc[i]));
192
193 /* SW has to set PF_HICR.C bit and clear PF_HICR.SV and
194 * PF_HICR_EV
195 */
196 hicr = IXGBE_READ_REG(hw, PF_HICR);
197 hicr = (hicr | PF_HICR_C) & ~(PF_HICR_SV | PF_HICR_EV);
198 IXGBE_WRITE_REG(hw, PF_HICR, hicr);
199
200 /* Wait for sync Admin Command response */
201 for (i = 0; i < IXGBE_ACI_SYNC_RESPONSE_TIMEOUT; i += 1) {
202 hicr = IXGBE_READ_REG(hw, PF_HICR);
203 if ((hicr & PF_HICR_SV) || !(hicr & PF_HICR_C))
204 break;
205
206 msec_delay(1);
207 }
208
209 /* Wait for async Admin Command response */
210 if ((hicr & PF_HICR_SV) && (hicr & PF_HICR_C)) {
211 for (i = 0; i < IXGBE_ACI_ASYNC_RESPONSE_TIMEOUT;
212 i += 1) {
213 hicr = IXGBE_READ_REG(hw, PF_HICR);
214 if ((hicr & PF_HICR_EV) || !(hicr & PF_HICR_C))
215 break;
216
217 msec_delay(1);
218 }
219 }
220
221 /* Read sync Admin Command response */
222 if ((hicr & PF_HICR_SV)) {
223 for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
224 raw_desc[i] = IXGBE_READ_REG(hw, PF_HIDA(i));
225 raw_desc[i] = IXGBE_CPU_TO_LE32(raw_desc[i]);
226 }
227 }
228
229 /* Read async Admin Command response */
230 if ((hicr & PF_HICR_EV) && !(hicr & PF_HICR_C)) {
231 for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
232 raw_desc[i] = IXGBE_READ_REG(hw, PF_HIDA_2(i));
233 raw_desc[i] = IXGBE_CPU_TO_LE32(raw_desc[i]);
234 }
235 }
236
237 /* Handle timeout and invalid state of HICR register */
238 if (hicr & PF_HICR_C) {
239 status = IXGBE_ERR_ACI_TIMEOUT;
240 break;
241 } else if (!(hicr & PF_HICR_SV) && !(hicr & PF_HICR_EV)) {
242 status = IXGBE_ERR_ACI_ERROR;
243 break;
244 }
245
246 /* For every command other than 0x0014 treat opcode mismatch
247 * as an error. Response to 0x0014 command read from HIDA_2
248 * is a descriptor of an event which is expected to contain
249 * different opcode than the command.
250 */
251 if (desc->opcode != opcode &&
252 opcode != IXGBE_CPU_TO_LE16(ixgbe_aci_opc_get_fw_event)) {
253 status = IXGBE_ERR_ACI_ERROR;
254 break;
255 }
256
257 if (desc->retval != IXGBE_ACI_RC_OK) {
258 hw->aci.last_status = (enum ixgbe_aci_err)desc->retval;
259 status = IXGBE_ERR_ACI_ERROR;
260 break;
261 }
262
263 /* Write a response values to a buf */
264 if (valid_buf && (desc->flags &
265 IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_BUF))) {
266 for (i = 0; i < tmp_buf_size / 4; i++) {
267 tmp_buf[i] = IXGBE_READ_REG(hw, PF_HIBA(i));
268 tmp_buf[i] = IXGBE_CPU_TO_LE32(tmp_buf[i]);
269 }
270 memcpy(buf, tmp_buf, buf_size);
271 }
272 } while (0);
273
274 if (tmp_buf)
275 ixgbe_free(hw, tmp_buf);
276
277 return status;
278 }
279
280 /**
281 * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface
282 * @hw: pointer to the HW struct
283 * @desc: descriptor describing the command
284 * @buf: buffer to use for indirect commands (NULL for direct commands)
285 * @buf_size: size of buffer for indirect commands (0 for direct commands)
286 *
287 * Helper function to send FW Admin Commands to the FW Admin Command Interface.
288 *
289 * Retry sending the FW Admin Command multiple times to the FW ACI
290 * if the EBUSY Admin Command error is returned.
291 *
292 * Return: the exit code of the operation.
293 */
ixgbe_aci_send_cmd(struct ixgbe_hw * hw,struct ixgbe_aci_desc * desc,void * buf,u16 buf_size)294 s32 ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
295 void *buf, u16 buf_size)
296 {
297 struct ixgbe_aci_desc desc_cpy;
298 enum ixgbe_aci_err last_status;
299 bool is_cmd_for_retry;
300 u8 *buf_cpy = NULL;
301 s32 status;
302 u16 opcode;
303 u8 idx = 0;
304
305 opcode = IXGBE_LE16_TO_CPU(desc->opcode);
306 is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode);
307 memset(&desc_cpy, 0, sizeof(desc_cpy));
308
309 if (is_cmd_for_retry) {
310 if (buf) {
311 buf_cpy = (u8 *)ixgbe_malloc(hw, buf_size);
312 if (!buf_cpy)
313 return IXGBE_ERR_OUT_OF_MEM;
314 }
315 memcpy(&desc_cpy, desc, sizeof(desc_cpy));
316 }
317
318 do {
319 ixgbe_acquire_lock(&hw->aci.lock);
320 status = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size);
321 last_status = hw->aci.last_status;
322 ixgbe_release_lock(&hw->aci.lock);
323
324 if (!is_cmd_for_retry || status == IXGBE_SUCCESS ||
325 (last_status != IXGBE_ACI_RC_EBUSY && status != IXGBE_ERR_ACI_ERROR))
326 break;
327
328 if (buf)
329 memcpy(buf, buf_cpy, buf_size);
330 memcpy(desc, &desc_cpy, sizeof(desc_cpy));
331
332 msec_delay(IXGBE_ACI_SEND_DELAY_TIME_MS);
333 } while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE);
334
335 if (buf_cpy)
336 ixgbe_free(hw, buf_cpy);
337
338 return status;
339 }
340
341 /**
342 * ixgbe_aci_check_event_pending - check if there are any pending events
343 * @hw: pointer to the HW struct
344 *
345 * Determine if there are any pending events.
346 *
347 * Return: true if there are any currently pending events
348 * otherwise false.
349 */
ixgbe_aci_check_event_pending(struct ixgbe_hw * hw)350 bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw)
351 {
352 u32 ep_bit_mask;
353 u32 fwsts;
354
355 ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0;
356
357 /* Check state of Event Pending (EP) bit */
358 fwsts = IXGBE_READ_REG(hw, GL_FWSTS);
359 return (fwsts & ep_bit_mask) ? true : false;
360 }
361
362 /**
363 * ixgbe_aci_get_event - get an event from ACI
364 * @hw: pointer to the HW struct
365 * @e: event information structure
366 * @pending: optional flag signaling that there are more pending events
367 *
368 * Obtain an event from ACI and return its content
369 * through 'e' using ACI command (0x0014).
370 * Provide information if there are more events
371 * to retrieve through 'pending'.
372 *
373 * Return: the exit code of the operation.
374 */
ixgbe_aci_get_event(struct ixgbe_hw * hw,struct ixgbe_aci_event * e,bool * pending)375 s32 ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
376 bool *pending)
377 {
378 struct ixgbe_aci_desc desc;
379 s32 status;
380
381 if (!e || (!e->msg_buf && e->buf_len) || (e->msg_buf && !e->buf_len))
382 return IXGBE_ERR_PARAM;
383
384 ixgbe_acquire_lock(&hw->aci.lock);
385
386 /* Check if there are any events pending */
387 if (!ixgbe_aci_check_event_pending(hw)) {
388 status = IXGBE_ERR_ACI_NO_EVENTS;
389 goto aci_get_event_exit;
390 }
391
392 /* Obtain pending event */
393 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event);
394 status = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len);
395 if (status)
396 goto aci_get_event_exit;
397
398 /* Returned 0x0014 opcode indicates that no event was obtained */
399 if (desc.opcode == IXGBE_CPU_TO_LE16(ixgbe_aci_opc_get_fw_event)) {
400 status = IXGBE_ERR_ACI_NO_EVENTS;
401 goto aci_get_event_exit;
402 }
403
404 /* Determine size of event data */
405 e->msg_len = MIN_T(u16, IXGBE_LE16_TO_CPU(desc.datalen), e->buf_len);
406 /* Write event descriptor to event info structure */
407 memcpy(&e->desc, &desc, sizeof(e->desc));
408
409 /* Check if there are any further events pending */
410 if (pending) {
411 *pending = ixgbe_aci_check_event_pending(hw);
412 }
413
414 aci_get_event_exit:
415 ixgbe_release_lock(&hw->aci.lock);
416
417 return status;
418 }
419
420 /**
421 * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values.
422 * @desc: pointer to the temp descriptor (non DMA mem)
423 * @opcode: the opcode can be used to decide which flags to turn off or on
424 *
425 * Helper function to fill the descriptor desc with default values
426 * and the provided opcode.
427 */
ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc * desc,u16 opcode)428 void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode)
429 {
430 /* zero out the desc */
431 memset(desc, 0, sizeof(*desc));
432 desc->opcode = IXGBE_CPU_TO_LE16(opcode);
433 desc->flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_SI);
434 }
435
436 /**
437 * ixgbe_aci_get_fw_ver - get the firmware version
438 * @hw: pointer to the HW struct
439 *
440 * Get the firmware version using ACI command (0x0001).
441 *
442 * Return: the exit code of the operation.
443 */
ixgbe_aci_get_fw_ver(struct ixgbe_hw * hw)444 s32 ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw)
445 {
446 struct ixgbe_aci_cmd_get_ver *resp;
447 struct ixgbe_aci_desc desc;
448 s32 status;
449
450 resp = &desc.params.get_ver;
451
452 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_ver);
453
454 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
455
456 if (!status) {
457 hw->fw_branch = resp->fw_branch;
458 hw->fw_maj_ver = resp->fw_major;
459 hw->fw_min_ver = resp->fw_minor;
460 hw->fw_patch = resp->fw_patch;
461 hw->fw_build = IXGBE_LE32_TO_CPU(resp->fw_build);
462 hw->api_branch = resp->api_branch;
463 hw->api_maj_ver = resp->api_major;
464 hw->api_min_ver = resp->api_minor;
465 hw->api_patch = resp->api_patch;
466 }
467
468 return status;
469 }
470
471 /**
472 * ixgbe_aci_send_driver_ver - send the driver version to firmware
473 * @hw: pointer to the HW struct
474 * @dv: driver's major, minor version
475 *
476 * Send the driver version to the firmware
477 * using the ACI command (0x0002).
478 *
479 * Return: the exit code of the operation.
480 * Returns IXGBE_ERR_PARAM, if dv is NULL.
481 */
ixgbe_aci_send_driver_ver(struct ixgbe_hw * hw,struct ixgbe_driver_ver * dv)482 s32 ixgbe_aci_send_driver_ver(struct ixgbe_hw *hw, struct ixgbe_driver_ver *dv)
483 {
484 struct ixgbe_aci_cmd_driver_ver *cmd;
485 struct ixgbe_aci_desc desc;
486 u16 len;
487
488 cmd = &desc.params.driver_ver;
489
490 if (!dv)
491 return IXGBE_ERR_PARAM;
492
493 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_driver_ver);
494
495 desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
496 cmd->major_ver = dv->major_ver;
497 cmd->minor_ver = dv->minor_ver;
498 cmd->build_ver = dv->build_ver;
499 cmd->subbuild_ver = dv->subbuild_ver;
500
501 len = 0;
502 while (len < sizeof(dv->driver_string) &&
503 IS_ASCII(dv->driver_string[len]) && dv->driver_string[len])
504 len++;
505
506 return ixgbe_aci_send_cmd(hw, &desc, dv->driver_string, len);
507 }
508
509 /**
510 * ixgbe_aci_req_res - request a common resource
511 * @hw: pointer to the HW struct
512 * @res: resource ID
513 * @access: access type
514 * @sdp_number: resource number
515 * @timeout: the maximum time in ms that the driver may hold the resource
516 *
517 * Requests a common resource using the ACI command (0x0008).
518 * Specifies the maximum time the driver may hold the resource.
519 * If the requested resource is currently occupied by some other driver,
520 * a busy return value is returned and the timeout field value indicates the
521 * maximum time the current owner has to free it.
522 *
523 * Return: the exit code of the operation.
524 */
525 static s32
ixgbe_aci_req_res(struct ixgbe_hw * hw,enum ixgbe_aci_res_ids res,enum ixgbe_aci_res_access_type access,u8 sdp_number,u32 * timeout)526 ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
527 enum ixgbe_aci_res_access_type access, u8 sdp_number,
528 u32 *timeout)
529 {
530 struct ixgbe_aci_cmd_req_res *cmd_resp;
531 struct ixgbe_aci_desc desc;
532 s32 status;
533
534 cmd_resp = &desc.params.res_owner;
535
536 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res);
537
538 cmd_resp->res_id = IXGBE_CPU_TO_LE16(res);
539 cmd_resp->access_type = IXGBE_CPU_TO_LE16(access);
540 cmd_resp->res_number = IXGBE_CPU_TO_LE32(sdp_number);
541 cmd_resp->timeout = IXGBE_CPU_TO_LE32(*timeout);
542 *timeout = 0;
543
544 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
545
546 /* The completion specifies the maximum time in ms that the driver
547 * may hold the resource in the Timeout field.
548 * If the resource is held by some other driver, the command completes
549 * with a busy return value and the timeout field indicates the maximum
550 * time the current owner of the resource has to free it.
551 */
552 if (!status || hw->aci.last_status == IXGBE_ACI_RC_EBUSY)
553 *timeout = IXGBE_LE32_TO_CPU(cmd_resp->timeout);
554
555 return status;
556 }
557
558 /**
559 * ixgbe_aci_release_res - release a common resource using ACI
560 * @hw: pointer to the HW struct
561 * @res: resource ID
562 * @sdp_number: resource number
563 *
564 * Release a common resource using ACI command (0x0009).
565 *
566 * Return: the exit code of the operation.
567 */
568 static s32
ixgbe_aci_release_res(struct ixgbe_hw * hw,enum ixgbe_aci_res_ids res,u8 sdp_number)569 ixgbe_aci_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
570 u8 sdp_number)
571 {
572 struct ixgbe_aci_cmd_req_res *cmd;
573 struct ixgbe_aci_desc desc;
574
575 cmd = &desc.params.res_owner;
576
577 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res);
578
579 cmd->res_id = IXGBE_CPU_TO_LE16(res);
580 cmd->res_number = IXGBE_CPU_TO_LE32(sdp_number);
581
582 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
583 }
584
585 /**
586 * ixgbe_acquire_res - acquire the ownership of a resource
587 * @hw: pointer to the HW structure
588 * @res: resource ID
589 * @access: access type (read or write)
590 * @timeout: timeout in milliseconds
591 *
592 * Make an attempt to acquire the ownership of a resource using
593 * the ixgbe_aci_req_res to utilize ACI.
594 * In case if some other driver has previously acquired the resource and
595 * performed any necessary updates, the IXGBE_ERR_ACI_NO_WORK is returned,
596 * and the caller does not obtain the resource and has no further work to do.
597 * If needed, the function will poll until the current lock owner timeouts.
598 *
599 * Return: the exit code of the operation.
600 */
ixgbe_acquire_res(struct ixgbe_hw * hw,enum ixgbe_aci_res_ids res,enum ixgbe_aci_res_access_type access,u32 timeout)601 s32 ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
602 enum ixgbe_aci_res_access_type access, u32 timeout)
603 {
604 #define IXGBE_RES_POLLING_DELAY_MS 10
605 u32 delay = IXGBE_RES_POLLING_DELAY_MS;
606 u32 res_timeout = timeout;
607 u32 retry_timeout = 0;
608 s32 status;
609
610 status = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
611
612 /* A return code of IXGBE_ERR_ACI_NO_WORK means that another driver has
613 * previously acquired the resource and performed any necessary updates;
614 * in this case the caller does not obtain the resource and has no
615 * further work to do.
616 */
617 if (status == IXGBE_ERR_ACI_NO_WORK)
618 goto ixgbe_acquire_res_exit;
619
620 /* If necessary, poll until the current lock owner timeouts.
621 * Set retry_timeout to the timeout value reported by the FW in the
622 * response to the "Request Resource Ownership" (0x0008) Admin Command
623 * as it indicates the maximum time the current owner of the resource
624 * is allowed to hold it.
625 */
626 retry_timeout = res_timeout;
627 while (status && retry_timeout && res_timeout) {
628 msec_delay(delay);
629 retry_timeout = (retry_timeout > delay) ?
630 retry_timeout - delay : 0;
631 status = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
632
633 if (status == IXGBE_ERR_ACI_NO_WORK)
634 /* lock free, but no work to do */
635 break;
636
637 if (!status)
638 /* lock acquired */
639 break;
640 }
641
642 ixgbe_acquire_res_exit:
643 return status;
644 }
645
646 /**
647 * ixgbe_release_res - release a common resource
648 * @hw: pointer to the HW structure
649 * @res: resource ID
650 *
651 * Release a common resource using ixgbe_aci_release_res.
652 */
ixgbe_release_res(struct ixgbe_hw * hw,enum ixgbe_aci_res_ids res)653 void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res)
654 {
655 u32 total_delay = 0;
656 s32 status;
657
658 status = ixgbe_aci_release_res(hw, res, 0);
659
660 /* There are some rare cases when trying to release the resource
661 * results in an admin command timeout, so handle them correctly.
662 */
663 while ((status == IXGBE_ERR_ACI_TIMEOUT) &&
664 (total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT)) {
665 msec_delay(1);
666 status = ixgbe_aci_release_res(hw, res, 0);
667 total_delay++;
668 }
669 }
670
671 /**
672 * ixgbe_parse_common_caps - Parse common device/function capabilities
673 * @hw: pointer to the HW struct
674 * @caps: pointer to common capabilities structure
675 * @elem: the capability element to parse
676 * @prefix: message prefix for tracing capabilities
677 *
678 * Given a capability element, extract relevant details into the common
679 * capability structure.
680 *
681 * Return: true if the capability matches one of the common capability ids,
682 * false otherwise.
683 */
684 static bool
ixgbe_parse_common_caps(struct ixgbe_hw * hw,struct ixgbe_hw_common_caps * caps,struct ixgbe_aci_cmd_list_caps_elem * elem,const char * prefix)685 ixgbe_parse_common_caps(struct ixgbe_hw *hw, struct ixgbe_hw_common_caps *caps,
686 struct ixgbe_aci_cmd_list_caps_elem *elem,
687 const char *prefix)
688 {
689 u32 logical_id = IXGBE_LE32_TO_CPU(elem->logical_id);
690 u32 phys_id = IXGBE_LE32_TO_CPU(elem->phys_id);
691 u32 number = IXGBE_LE32_TO_CPU(elem->number);
692 u16 cap = IXGBE_LE16_TO_CPU(elem->cap);
693 bool found = true;
694
695 UNREFERENCED_1PARAMETER(hw);
696
697 switch (cap) {
698 case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
699 caps->valid_functions = number;
700 break;
701 case IXGBE_ACI_CAPS_SRIOV:
702 caps->sr_iov_1_1 = (number == 1);
703 break;
704 case IXGBE_ACI_CAPS_VMDQ:
705 caps->vmdq = (number == 1);
706 break;
707 case IXGBE_ACI_CAPS_DCB:
708 caps->dcb = (number == 1);
709 caps->active_tc_bitmap = logical_id;
710 caps->maxtc = phys_id;
711 break;
712 case IXGBE_ACI_CAPS_RSS:
713 caps->rss_table_size = number;
714 caps->rss_table_entry_width = logical_id;
715 break;
716 case IXGBE_ACI_CAPS_RXQS:
717 caps->num_rxq = number;
718 caps->rxq_first_id = phys_id;
719 break;
720 case IXGBE_ACI_CAPS_TXQS:
721 caps->num_txq = number;
722 caps->txq_first_id = phys_id;
723 break;
724 case IXGBE_ACI_CAPS_MSIX:
725 caps->num_msix_vectors = number;
726 caps->msix_vector_first_id = phys_id;
727 break;
728 case IXGBE_ACI_CAPS_NVM_VER:
729 break;
730 case IXGBE_ACI_CAPS_NVM_MGMT:
731 caps->sec_rev_disabled =
732 (number & IXGBE_NVM_MGMT_SEC_REV_DISABLED) ?
733 true : false;
734 caps->update_disabled =
735 (number & IXGBE_NVM_MGMT_UPDATE_DISABLED) ?
736 true : false;
737 caps->nvm_unified_update =
738 (number & IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
739 true : false;
740 caps->netlist_auth =
741 (number & IXGBE_NVM_MGMT_NETLIST_AUTH_SUPPORT) ?
742 true : false;
743 break;
744 case IXGBE_ACI_CAPS_MAX_MTU:
745 caps->max_mtu = number;
746 break;
747 case IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE:
748 caps->pcie_reset_avoidance = (number > 0);
749 break;
750 case IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT:
751 caps->reset_restrict_support = (number == 1);
752 break;
753 case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0:
754 case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1:
755 case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2:
756 case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3:
757 {
758 u8 index = cap - IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0;
759
760 caps->ext_topo_dev_img_ver_high[index] = number;
761 caps->ext_topo_dev_img_ver_low[index] = logical_id;
762 caps->ext_topo_dev_img_part_num[index] =
763 (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M) >>
764 IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_S;
765 caps->ext_topo_dev_img_load_en[index] =
766 (phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
767 caps->ext_topo_dev_img_prog_en[index] =
768 (phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
769 break;
770 }
771 case IXGBE_ACI_CAPS_OROM_RECOVERY_UPDATE:
772 caps->orom_recovery_update = (number == 1);
773 break;
774 case IXGBE_ACI_CAPS_NEXT_CLUSTER_ID:
775 caps->next_cluster_id_support = (number == 1);
776 DEBUGOUT2("%s: next_cluster_id_support = %d\n",
777 prefix, caps->next_cluster_id_support);
778 break;
779 case IXGBE_ACI_CAPS_EEE:
780 caps->eee_support = (u8)number;
781 DEBUGOUT2("%s: eee_support = %x\n", prefix, caps->eee_support);
782 break;
783 default:
784 /* Not one of the recognized common capabilities */
785 found = false;
786 }
787
788 return found;
789 }
790
791 /**
792 * ixgbe_hweight8 - count set bits among the 8 lowest bits
793 * @w: variable storing set bits to count
794 *
795 * Return: the number of set bits among the 8 lowest bits in the provided value.
796 */
ixgbe_hweight8(u32 w)797 static u8 ixgbe_hweight8(u32 w)
798 {
799 u8 hweight = 0, i;
800
801 for (i = 0; i < 8; i++)
802 if (w & (1 << i))
803 hweight++;
804
805 return hweight;
806 }
807
808 /**
809 * ixgbe_hweight32 - count set bits among the 32 lowest bits
810 * @w: variable storing set bits to count
811 *
812 * Return: the number of set bits among the 32 lowest bits in the
813 * provided value.
814 */
ixgbe_hweight32(u32 w)815 static u8 ixgbe_hweight32(u32 w)
816 {
817 u32 bitMask = 0x1, i;
818 u8 bitCnt = 0;
819
820 for (i = 0; i < 32; i++)
821 {
822 if (w & bitMask)
823 bitCnt++;
824
825 bitMask = bitMask << 0x1;
826 }
827
828 return bitCnt;
829 }
830
831 /**
832 * ixgbe_parse_valid_functions_cap - Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS caps
833 * @hw: pointer to the HW struct
834 * @dev_p: pointer to device capabilities structure
835 * @cap: capability element to parse
836 *
837 * Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS for device capabilities.
838 */
839 static void
ixgbe_parse_valid_functions_cap(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,struct ixgbe_aci_cmd_list_caps_elem * cap)840 ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw,
841 struct ixgbe_hw_dev_caps *dev_p,
842 struct ixgbe_aci_cmd_list_caps_elem *cap)
843 {
844 u32 number = IXGBE_LE32_TO_CPU(cap->number);
845
846 UNREFERENCED_1PARAMETER(hw);
847
848 dev_p->num_funcs = ixgbe_hweight32(number);
849 }
850
851 /**
852 * ixgbe_parse_vf_dev_caps - Parse IXGBE_ACI_CAPS_VF device caps
853 * @hw: pointer to the HW struct
854 * @dev_p: pointer to device capabilities structure
855 * @cap: capability element to parse
856 *
857 * Parse IXGBE_ACI_CAPS_VF for device capabilities.
858 */
ixgbe_parse_vf_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,struct ixgbe_aci_cmd_list_caps_elem * cap)859 static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw,
860 struct ixgbe_hw_dev_caps *dev_p,
861 struct ixgbe_aci_cmd_list_caps_elem *cap)
862 {
863 u32 number = IXGBE_LE32_TO_CPU(cap->number);
864
865 UNREFERENCED_1PARAMETER(hw);
866
867 dev_p->num_vfs_exposed = number;
868 }
869
870 /**
871 * ixgbe_parse_vsi_dev_caps - Parse IXGBE_ACI_CAPS_VSI device caps
872 * @hw: pointer to the HW struct
873 * @dev_p: pointer to device capabilities structure
874 * @cap: capability element to parse
875 *
876 * Parse IXGBE_ACI_CAPS_VSI for device capabilities.
877 */
ixgbe_parse_vsi_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,struct ixgbe_aci_cmd_list_caps_elem * cap)878 static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw,
879 struct ixgbe_hw_dev_caps *dev_p,
880 struct ixgbe_aci_cmd_list_caps_elem *cap)
881 {
882 u32 number = IXGBE_LE32_TO_CPU(cap->number);
883
884 UNREFERENCED_1PARAMETER(hw);
885
886 dev_p->num_vsi_allocd_to_host = number;
887 }
888
889 /**
890 * ixgbe_parse_fdir_dev_caps - Parse IXGBE_ACI_CAPS_FD device caps
891 * @hw: pointer to the HW struct
892 * @dev_p: pointer to device capabilities structure
893 * @cap: capability element to parse
894 *
895 * Parse IXGBE_ACI_CAPS_FD for device capabilities.
896 */
ixgbe_parse_fdir_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,struct ixgbe_aci_cmd_list_caps_elem * cap)897 static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw,
898 struct ixgbe_hw_dev_caps *dev_p,
899 struct ixgbe_aci_cmd_list_caps_elem *cap)
900 {
901 u32 number = IXGBE_LE32_TO_CPU(cap->number);
902
903 UNREFERENCED_1PARAMETER(hw);
904
905 dev_p->num_flow_director_fltr = number;
906 }
907
908 /**
909 * ixgbe_parse_dev_caps - Parse device capabilities
910 * @hw: pointer to the HW struct
911 * @dev_p: pointer to device capabilities structure
912 * @buf: buffer containing the device capability records
913 * @cap_count: the number of capabilities
914 *
915 * Helper device to parse device (0x000B) capabilities list. For
916 * capabilities shared between device and function, this relies on
917 * ixgbe_parse_common_caps.
918 *
919 * Loop through the list of provided capabilities and extract the relevant
920 * data into the device capabilities structured.
921 */
ixgbe_parse_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_p,void * buf,u32 cap_count)922 static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw,
923 struct ixgbe_hw_dev_caps *dev_p,
924 void *buf, u32 cap_count)
925 {
926 struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
927 u32 i;
928
929 cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
930
931 memset(dev_p, 0, sizeof(*dev_p));
932
933 for (i = 0; i < cap_count; i++) {
934 u16 cap = IXGBE_LE16_TO_CPU(cap_resp[i].cap);
935 bool found;
936
937 found = ixgbe_parse_common_caps(hw, &dev_p->common_cap,
938 &cap_resp[i], "dev caps");
939
940 switch (cap) {
941 case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
942 ixgbe_parse_valid_functions_cap(hw, dev_p,
943 &cap_resp[i]);
944 break;
945 case IXGBE_ACI_CAPS_VF:
946 ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
947 break;
948 case IXGBE_ACI_CAPS_VSI:
949 ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
950 break;
951 case IXGBE_ACI_CAPS_FD:
952 ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
953 break;
954 default:
955 /* Don't list common capabilities as unknown */
956 if (!found)
957 break;
958 }
959 }
960
961 }
962
963 /**
964 * ixgbe_parse_vf_func_caps - Parse IXGBE_ACI_CAPS_VF function caps
965 * @hw: pointer to the HW struct
966 * @func_p: pointer to function capabilities structure
967 * @cap: pointer to the capability element to parse
968 *
969 * Extract function capabilities for IXGBE_ACI_CAPS_VF.
970 */
ixgbe_parse_vf_func_caps(struct ixgbe_hw * hw,struct ixgbe_hw_func_caps * func_p,struct ixgbe_aci_cmd_list_caps_elem * cap)971 static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw,
972 struct ixgbe_hw_func_caps *func_p,
973 struct ixgbe_aci_cmd_list_caps_elem *cap)
974 {
975 u32 logical_id = IXGBE_LE32_TO_CPU(cap->logical_id);
976 u32 number = IXGBE_LE32_TO_CPU(cap->number);
977
978 UNREFERENCED_1PARAMETER(hw);
979
980 func_p->num_allocd_vfs = number;
981 func_p->vf_base_id = logical_id;
982 }
983
984 /**
985 * ixgbe_get_num_per_func - determine number of resources per PF
986 * @hw: pointer to the HW structure
987 * @max: value to be evenly split between each PF
988 *
989 * Determine the number of valid functions by going through the bitmap returned
990 * from parsing capabilities and use this to calculate the number of resources
991 * per PF based on the max value passed in.
992 *
993 * Return: the number of resources per PF or 0, if no PH are available.
994 */
ixgbe_get_num_per_func(struct ixgbe_hw * hw,u32 max)995 static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max)
996 {
997 u8 funcs;
998
999 #define IXGBE_CAPS_VALID_FUNCS_M 0xFF
1000 funcs = ixgbe_hweight8(hw->dev_caps.common_cap.valid_functions &
1001 IXGBE_CAPS_VALID_FUNCS_M);
1002
1003 if (!funcs)
1004 return 0;
1005
1006 return max / funcs;
1007 }
1008
1009 /**
1010 * ixgbe_parse_vsi_func_caps - Parse IXGBE_ACI_CAPS_VSI function caps
1011 * @hw: pointer to the HW struct
1012 * @func_p: pointer to function capabilities structure
1013 * @cap: pointer to the capability element to parse
1014 *
1015 * Extract function capabilities for IXGBE_ACI_CAPS_VSI.
1016 */
ixgbe_parse_vsi_func_caps(struct ixgbe_hw * hw,struct ixgbe_hw_func_caps * func_p,struct ixgbe_aci_cmd_list_caps_elem * cap)1017 static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw,
1018 struct ixgbe_hw_func_caps *func_p,
1019 struct ixgbe_aci_cmd_list_caps_elem *cap)
1020 {
1021 func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI);
1022 }
1023
1024 /**
1025 * ixgbe_parse_func_caps - Parse function capabilities
1026 * @hw: pointer to the HW struct
1027 * @func_p: pointer to function capabilities structure
1028 * @buf: buffer containing the function capability records
1029 * @cap_count: the number of capabilities
1030 *
1031 * Helper function to parse function (0x000A) capabilities list. For
1032 * capabilities shared between device and function, this relies on
1033 * ixgbe_parse_common_caps.
1034 *
1035 * Loop through the list of provided capabilities and extract the relevant
1036 * data into the function capabilities structured.
1037 */
ixgbe_parse_func_caps(struct ixgbe_hw * hw,struct ixgbe_hw_func_caps * func_p,void * buf,u32 cap_count)1038 static void ixgbe_parse_func_caps(struct ixgbe_hw *hw,
1039 struct ixgbe_hw_func_caps *func_p,
1040 void *buf, u32 cap_count)
1041 {
1042 struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
1043 u32 i;
1044
1045 cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
1046
1047 memset(func_p, 0, sizeof(*func_p));
1048
1049 for (i = 0; i < cap_count; i++) {
1050 u16 cap = IXGBE_LE16_TO_CPU(cap_resp[i].cap);
1051 ixgbe_parse_common_caps(hw, &func_p->common_cap,
1052 &cap_resp[i], "func caps");
1053
1054 switch (cap) {
1055 case IXGBE_ACI_CAPS_VF:
1056 ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
1057 break;
1058 case IXGBE_ACI_CAPS_VSI:
1059 ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
1060 break;
1061 default:
1062 /* Don't list common capabilities as unknown */
1063 break;
1064 }
1065 }
1066
1067 }
1068
1069 /**
1070 * ixgbe_aci_list_caps - query function/device capabilities
1071 * @hw: pointer to the HW struct
1072 * @buf: a buffer to hold the capabilities
1073 * @buf_size: size of the buffer
1074 * @cap_count: if not NULL, set to the number of capabilities reported
1075 * @opc: capabilities type to discover, device or function
1076 *
1077 * Get the function (0x000A) or device (0x000B) capabilities description from
1078 * firmware and store it in the buffer.
1079 *
1080 * If the cap_count pointer is not NULL, then it is set to the number of
1081 * capabilities firmware will report. Note that if the buffer size is too
1082 * small, it is possible the command will return IXGBE_ERR_OUT_OF_MEM. The
1083 * cap_count will still be updated in this case. It is recommended that the
1084 * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible
1085 * buffer that firmware could return) to avoid this.
1086 *
1087 * Return: the exit code of the operation.
1088 * Exit code of IXGBE_ERR_OUT_OF_MEM means the buffer size is too small.
1089 */
ixgbe_aci_list_caps(struct ixgbe_hw * hw,void * buf,u16 buf_size,u32 * cap_count,enum ixgbe_aci_opc opc)1090 s32 ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
1091 u32 *cap_count, enum ixgbe_aci_opc opc)
1092 {
1093 struct ixgbe_aci_cmd_list_caps *cmd;
1094 struct ixgbe_aci_desc desc;
1095 s32 status;
1096
1097 cmd = &desc.params.get_cap;
1098
1099 if (opc != ixgbe_aci_opc_list_func_caps &&
1100 opc != ixgbe_aci_opc_list_dev_caps)
1101 return IXGBE_ERR_PARAM;
1102
1103 ixgbe_fill_dflt_direct_cmd_desc(&desc, opc);
1104 status = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
1105
1106 if (cap_count)
1107 *cap_count = IXGBE_LE32_TO_CPU(cmd->count);
1108
1109 return status;
1110 }
1111
1112 /**
1113 * ixgbe_discover_dev_caps - Read and extract device capabilities
1114 * @hw: pointer to the hardware structure
1115 * @dev_caps: pointer to device capabilities structure
1116 *
1117 * Read the device capabilities and extract them into the dev_caps structure
1118 * for later use.
1119 *
1120 * Return: the exit code of the operation.
1121 */
ixgbe_discover_dev_caps(struct ixgbe_hw * hw,struct ixgbe_hw_dev_caps * dev_caps)1122 s32 ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
1123 struct ixgbe_hw_dev_caps *dev_caps)
1124 {
1125 u32 status, cap_count = 0;
1126 u8 *cbuf = NULL;
1127
1128 cbuf = (u8*)ixgbe_malloc(hw, IXGBE_ACI_MAX_BUFFER_SIZE);
1129 if (!cbuf)
1130 return IXGBE_ERR_OUT_OF_MEM;
1131 /* Although the driver doesn't know the number of capabilities the
1132 * device will return, we can simply send a 4KB buffer, the maximum
1133 * possible size that firmware can return.
1134 */
1135 cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
1136 sizeof(struct ixgbe_aci_cmd_list_caps_elem);
1137
1138 status = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
1139 &cap_count,
1140 ixgbe_aci_opc_list_dev_caps);
1141 if (!status)
1142 ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
1143
1144 if (cbuf)
1145 ixgbe_free(hw, cbuf);
1146
1147 return status;
1148 }
1149
1150 /**
1151 * ixgbe_discover_func_caps - Read and extract function capabilities
1152 * @hw: pointer to the hardware structure
1153 * @func_caps: pointer to function capabilities structure
1154 *
1155 * Read the function capabilities and extract them into the func_caps structure
1156 * for later use.
1157 *
1158 * Return: the exit code of the operation.
1159 */
ixgbe_discover_func_caps(struct ixgbe_hw * hw,struct ixgbe_hw_func_caps * func_caps)1160 s32 ixgbe_discover_func_caps(struct ixgbe_hw *hw,
1161 struct ixgbe_hw_func_caps *func_caps)
1162 {
1163 u32 cap_count = 0;
1164 u8 *cbuf = NULL;
1165 s32 status;
1166
1167 cbuf = (u8*)ixgbe_malloc(hw, IXGBE_ACI_MAX_BUFFER_SIZE);
1168 if(!cbuf)
1169 return IXGBE_ERR_OUT_OF_MEM;
1170 /* Although the driver doesn't know the number of capabilities the
1171 * device will return, we can simply send a 4KB buffer, the maximum
1172 * possible size that firmware can return.
1173 */
1174 cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
1175 sizeof(struct ixgbe_aci_cmd_list_caps_elem);
1176
1177 status = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
1178 &cap_count,
1179 ixgbe_aci_opc_list_func_caps);
1180 if (!status)
1181 ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count);
1182
1183 if (cbuf)
1184 ixgbe_free(hw, cbuf);
1185
1186 return status;
1187 }
1188
1189 /**
1190 * ixgbe_get_caps - get info about the HW
1191 * @hw: pointer to the hardware structure
1192 *
1193 * Retrieve both device and function capabilities.
1194 *
1195 * Return: the exit code of the operation.
1196 */
ixgbe_get_caps(struct ixgbe_hw * hw)1197 s32 ixgbe_get_caps(struct ixgbe_hw *hw)
1198 {
1199 s32 status;
1200
1201 status = ixgbe_discover_dev_caps(hw, &hw->dev_caps);
1202 if (status)
1203 return status;
1204
1205 return ixgbe_discover_func_caps(hw, &hw->func_caps);
1206 }
1207
1208 /**
1209 * ixgbe_aci_disable_rxen - disable RX
1210 * @hw: pointer to the HW struct
1211 *
1212 * Request a safe disable of Receive Enable using ACI command (0x000C).
1213 *
1214 * Return: the exit code of the operation.
1215 */
ixgbe_aci_disable_rxen(struct ixgbe_hw * hw)1216 s32 ixgbe_aci_disable_rxen(struct ixgbe_hw *hw)
1217 {
1218 struct ixgbe_aci_cmd_disable_rxen *cmd;
1219 struct ixgbe_aci_desc desc;
1220
1221 UNREFERENCED_1PARAMETER(hw);
1222
1223 cmd = &desc.params.disable_rxen;
1224
1225 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen);
1226
1227 cmd->lport_num = (u8)hw->bus.func;
1228
1229 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1230 }
1231
1232 /**
1233 * ixgbe_aci_get_phy_caps - returns PHY capabilities
1234 * @hw: pointer to the HW struct
1235 * @qual_mods: report qualified modules
1236 * @report_mode: report mode capabilities
1237 * @pcaps: structure for PHY capabilities to be filled
1238 *
1239 * Returns the various PHY capabilities supported on the Port
1240 * using ACI command (0x0600).
1241 *
1242 * Return: the exit code of the operation.
1243 */
ixgbe_aci_get_phy_caps(struct ixgbe_hw * hw,bool qual_mods,u8 report_mode,struct ixgbe_aci_cmd_get_phy_caps_data * pcaps)1244 s32 ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
1245 struct ixgbe_aci_cmd_get_phy_caps_data *pcaps)
1246 {
1247 struct ixgbe_aci_cmd_get_phy_caps *cmd;
1248 u16 pcaps_size = sizeof(*pcaps);
1249 struct ixgbe_aci_desc desc;
1250 s32 status;
1251
1252 cmd = &desc.params.get_phy;
1253
1254 if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M))
1255 return IXGBE_ERR_PARAM;
1256
1257 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps);
1258
1259 if (qual_mods)
1260 cmd->param0 |= IXGBE_CPU_TO_LE16(IXGBE_ACI_GET_PHY_RQM);
1261
1262 cmd->param0 |= IXGBE_CPU_TO_LE16(report_mode);
1263 status = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size);
1264
1265 if (status == IXGBE_SUCCESS &&
1266 report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) {
1267 hw->phy.phy_type_low = IXGBE_LE64_TO_CPU(pcaps->phy_type_low);
1268 hw->phy.phy_type_high = IXGBE_LE64_TO_CPU(pcaps->phy_type_high);
1269 memcpy(hw->link.link_info.module_type, &pcaps->module_type,
1270 sizeof(hw->link.link_info.module_type));
1271 }
1272
1273 return status;
1274 }
1275
1276 /**
1277 * ixgbe_phy_caps_equals_cfg - check if capabilities match the PHY config
1278 * @phy_caps: PHY capabilities
1279 * @phy_cfg: PHY configuration
1280 *
1281 * Helper function to determine if PHY capabilities match PHY
1282 * configuration
1283 *
1284 * Return: true if PHY capabilities match PHY configuration.
1285 */
1286 bool
ixgbe_phy_caps_equals_cfg(struct ixgbe_aci_cmd_get_phy_caps_data * phy_caps,struct ixgbe_aci_cmd_set_phy_cfg_data * phy_cfg)1287 ixgbe_phy_caps_equals_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *phy_caps,
1288 struct ixgbe_aci_cmd_set_phy_cfg_data *phy_cfg)
1289 {
1290 u8 caps_mask, cfg_mask;
1291
1292 if (!phy_caps || !phy_cfg)
1293 return false;
1294
1295 /* These bits are not common between capabilities and configuration.
1296 * Do not use them to determine equality.
1297 */
1298 caps_mask = IXGBE_ACI_PHY_CAPS_MASK & ~(IXGBE_ACI_PHY_AN_MODE |
1299 IXGBE_ACI_PHY_EN_MOD_QUAL);
1300 cfg_mask = IXGBE_ACI_PHY_ENA_VALID_MASK &
1301 ~IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
1302
1303 if (phy_caps->phy_type_low != phy_cfg->phy_type_low ||
1304 phy_caps->phy_type_high != phy_cfg->phy_type_high ||
1305 ((phy_caps->caps & caps_mask) != (phy_cfg->caps & cfg_mask)) ||
1306 phy_caps->low_power_ctrl_an != phy_cfg->low_power_ctrl_an ||
1307 phy_caps->eee_cap != phy_cfg->eee_cap ||
1308 phy_caps->eeer_value != phy_cfg->eeer_value ||
1309 phy_caps->link_fec_options != phy_cfg->link_fec_opt)
1310 return false;
1311
1312 return true;
1313 }
1314
1315 /**
1316 * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
1317 * @caps: PHY ability structure to copy data from
1318 * @cfg: PHY configuration structure to copy data to
1319 *
1320 * Helper function to copy data from PHY capabilities data structure
1321 * to PHY configuration data structure
1322 */
ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data * caps,struct ixgbe_aci_cmd_set_phy_cfg_data * cfg)1323 void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
1324 struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
1325 {
1326 if (!caps || !cfg)
1327 return;
1328
1329 memset(cfg, 0, sizeof(*cfg));
1330 cfg->phy_type_low = caps->phy_type_low;
1331 cfg->phy_type_high = caps->phy_type_high;
1332 cfg->caps = caps->caps;
1333 cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
1334 cfg->eee_cap = caps->eee_cap;
1335 cfg->eeer_value = caps->eeer_value;
1336 cfg->link_fec_opt = caps->link_fec_options;
1337 cfg->module_compliance_enforcement =
1338 caps->module_compliance_enforcement;
1339 cfg->eee_entry_delay = caps->eee_entry_delay;
1340 }
1341
1342 /**
1343 * ixgbe_aci_set_phy_cfg - set PHY configuration
1344 * @hw: pointer to the HW struct
1345 * @cfg: structure with PHY configuration data to be set
1346 *
1347 * Set the various PHY configuration parameters supported on the Port
1348 * using ACI command (0x0601).
1349 * One or more of the Set PHY config parameters may be ignored in an MFP
1350 * mode as the PF may not have the privilege to set some of the PHY Config
1351 * parameters.
1352 *
1353 * Return: the exit code of the operation.
1354 */
ixgbe_aci_set_phy_cfg(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_set_phy_cfg_data * cfg)1355 s32 ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
1356 struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
1357 {
1358 struct ixgbe_aci_desc desc;
1359 bool use_1p40_buff;
1360 s32 status;
1361
1362 if (!cfg)
1363 return IXGBE_ERR_PARAM;
1364 use_1p40_buff = hw->func_caps.common_cap.eee_support != 0;
1365
1366 /* Ensure that only valid bits of cfg->caps can be turned on. */
1367 if (cfg->caps & ~IXGBE_ACI_PHY_ENA_VALID_MASK) {
1368 cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK;
1369 }
1370
1371 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg);
1372 desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
1373
1374 if (use_1p40_buff) {
1375 status = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg));
1376 } else {
1377 struct ixgbe_aci_cmd_set_phy_cfg_data_pre_1_40 cfg_obsolete;
1378
1379 memcpy(&cfg_obsolete, cfg, sizeof(cfg_obsolete));
1380
1381 status = ixgbe_aci_send_cmd(hw, &desc, &cfg_obsolete,
1382 sizeof(cfg_obsolete));
1383 }
1384
1385 /* even if the old buffer is used no need to worry about conversion */
1386 if (!status)
1387 hw->phy.curr_user_phy_cfg = *cfg;
1388
1389 return status;
1390 }
1391
1392 /**
1393 * ixgbe_aci_set_link_restart_an - set up link and restart AN
1394 * @hw: pointer to the HW struct
1395 * @ena_link: if true: enable link, if false: disable link
1396 *
1397 * Function sets up the link and restarts the Auto-Negotiation over the link.
1398 *
1399 * Return: the exit code of the operation.
1400 */
ixgbe_aci_set_link_restart_an(struct ixgbe_hw * hw,bool ena_link)1401 s32 ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link)
1402 {
1403 struct ixgbe_aci_cmd_restart_an *cmd;
1404 struct ixgbe_aci_desc desc;
1405
1406 cmd = &desc.params.restart_an;
1407
1408 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an);
1409
1410 cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART;
1411 if (ena_link)
1412 cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE;
1413 else
1414 cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE;
1415
1416 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1417 }
1418
1419 /**
1420 * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type
1421 * @hw: pointer to the HW struct
1422 *
1423 * Try to identify the media type based on the phy type.
1424 * If more than one media type, the ixgbe_media_type_unknown is returned.
1425 * First, phy_type_low is checked, then phy_type_high.
1426 * If none are identified, the ixgbe_media_type_unknown is returned
1427 *
1428 * Return: type of a media based on phy type in form of enum.
1429 */
1430 static enum ixgbe_media_type
ixgbe_get_media_type_from_phy_type(struct ixgbe_hw * hw)1431 ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw)
1432 {
1433 struct ixgbe_link_status *hw_link_info;
1434
1435 if (!hw)
1436 return ixgbe_media_type_unknown;
1437
1438 hw_link_info = &hw->link.link_info;
1439 if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
1440 /* If more than one media type is selected, report unknown */
1441 return ixgbe_media_type_unknown;
1442
1443 if (hw_link_info->phy_type_low) {
1444 /* 1G SGMII is a special case where some DA cable PHYs
1445 * may show this as an option when it really shouldn't
1446 * be since SGMII is meant to be between a MAC and a PHY
1447 * in a backplane. Try to detect this case and handle it
1448 */
1449 if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII &&
1450 (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
1451 IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
1452 hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
1453 IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
1454 return ixgbe_media_type_da;
1455
1456 switch (hw_link_info->phy_type_low) {
1457 case IXGBE_PHY_TYPE_LOW_1000BASE_SX:
1458 case IXGBE_PHY_TYPE_LOW_1000BASE_LX:
1459 case IXGBE_PHY_TYPE_LOW_10GBASE_SR:
1460 case IXGBE_PHY_TYPE_LOW_10GBASE_LR:
1461 return ixgbe_media_type_fiber;
1462 case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
1463 return ixgbe_media_type_fiber;
1464 case IXGBE_PHY_TYPE_LOW_100BASE_TX:
1465 case IXGBE_PHY_TYPE_LOW_1000BASE_T:
1466 case IXGBE_PHY_TYPE_LOW_2500BASE_T:
1467 case IXGBE_PHY_TYPE_LOW_5GBASE_T:
1468 case IXGBE_PHY_TYPE_LOW_10GBASE_T:
1469 return ixgbe_media_type_copper;
1470 case IXGBE_PHY_TYPE_LOW_10G_SFI_DA:
1471 return ixgbe_media_type_da;
1472 case IXGBE_PHY_TYPE_LOW_1000BASE_KX:
1473 case IXGBE_PHY_TYPE_LOW_2500BASE_KX:
1474 case IXGBE_PHY_TYPE_LOW_2500BASE_X:
1475 case IXGBE_PHY_TYPE_LOW_5GBASE_KR:
1476 case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1:
1477 case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C:
1478 return ixgbe_media_type_backplane;
1479 }
1480 } else {
1481 switch (hw_link_info->phy_type_high) {
1482 case IXGBE_PHY_TYPE_HIGH_10BASE_T:
1483 return ixgbe_media_type_copper;
1484 }
1485 }
1486 return ixgbe_media_type_unknown;
1487 }
1488
1489 /**
1490 * ixgbe_update_link_info - update status of the HW network link
1491 * @hw: pointer to the HW struct
1492 *
1493 * Update the status of the HW network link.
1494 *
1495 * Return: the exit code of the operation.
1496 */
ixgbe_update_link_info(struct ixgbe_hw * hw)1497 s32 ixgbe_update_link_info(struct ixgbe_hw *hw)
1498 {
1499 struct ixgbe_aci_cmd_get_phy_caps_data *pcaps;
1500 struct ixgbe_link_status *li;
1501 s32 status;
1502
1503 if (!hw)
1504 return IXGBE_ERR_PARAM;
1505
1506 li = &hw->link.link_info;
1507
1508 status = ixgbe_aci_get_link_info(hw, true, NULL);
1509 if (status)
1510 return status;
1511
1512 if (li->link_info & IXGBE_ACI_MEDIA_AVAILABLE) {
1513 pcaps = (struct ixgbe_aci_cmd_get_phy_caps_data *)
1514 ixgbe_malloc(hw, sizeof(*pcaps));
1515 if (!pcaps)
1516 return IXGBE_ERR_OUT_OF_MEM;
1517
1518 status = ixgbe_aci_get_phy_caps(hw, false,
1519 IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
1520 pcaps);
1521
1522 if (status == IXGBE_SUCCESS)
1523 memcpy(li->module_type, &pcaps->module_type,
1524 sizeof(li->module_type));
1525
1526 ixgbe_free(hw, pcaps);
1527 }
1528
1529 return status;
1530 }
1531
1532 /**
1533 * ixgbe_get_link_status - get status of the HW network link
1534 * @hw: pointer to the HW struct
1535 * @link_up: pointer to bool (true/false = linkup/linkdown)
1536 *
1537 * Variable link_up is true if link is up, false if link is down.
1538 * The variable link_up is invalid if status is non zero. As a
1539 * result of this call, link status reporting becomes enabled
1540 *
1541 * Return: the exit code of the operation.
1542 */
ixgbe_get_link_status(struct ixgbe_hw * hw,bool * link_up)1543 s32 ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up)
1544 {
1545 s32 status = IXGBE_SUCCESS;
1546
1547 if (!hw || !link_up)
1548 return IXGBE_ERR_PARAM;
1549
1550 if (hw->link.get_link_info) {
1551 status = ixgbe_update_link_info(hw);
1552 if (status) {
1553 return status;
1554 }
1555 }
1556
1557 *link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP;
1558
1559 return status;
1560 }
1561
1562 /**
1563 * ixgbe_aci_get_link_info - get the link status
1564 * @hw: pointer to the HW struct
1565 * @ena_lse: enable/disable LinkStatusEvent reporting
1566 * @link: pointer to link status structure - optional
1567 *
1568 * Get the current Link Status using ACI command (0x607).
1569 * The current link can be optionally provided to update
1570 * the status.
1571 *
1572 * Return: the link status of the adapter.
1573 */
ixgbe_aci_get_link_info(struct ixgbe_hw * hw,bool ena_lse,struct ixgbe_link_status * link)1574 s32 ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
1575 struct ixgbe_link_status *link)
1576 {
1577 struct ixgbe_aci_cmd_get_link_status_data link_data = { 0 };
1578 struct ixgbe_aci_cmd_get_link_status *resp;
1579 struct ixgbe_link_status *li_old, *li;
1580 struct ixgbe_fc_info *hw_fc_info;
1581 struct ixgbe_aci_desc desc;
1582 bool tx_pause, rx_pause;
1583 u8 cmd_flags;
1584 s32 status;
1585
1586 if (!hw)
1587 return IXGBE_ERR_PARAM;
1588
1589 li_old = &hw->link.link_info_old;
1590 li = &hw->link.link_info;
1591 hw_fc_info = &hw->fc;
1592
1593 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
1594 cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS;
1595 resp = &desc.params.get_link_status;
1596 resp->cmd_flags = cmd_flags;
1597
1598 status = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
1599
1600 if (status != IXGBE_SUCCESS)
1601 return status;
1602
1603 /* save off old link status information */
1604 *li_old = *li;
1605
1606 /* update current link status information */
1607 li->link_speed = IXGBE_LE16_TO_CPU(link_data.link_speed);
1608 li->phy_type_low = IXGBE_LE64_TO_CPU(link_data.phy_type_low);
1609 li->phy_type_high = IXGBE_LE64_TO_CPU(link_data.phy_type_high);
1610 li->link_info = link_data.link_info;
1611 li->link_cfg_err = link_data.link_cfg_err;
1612 li->an_info = link_data.an_info;
1613 li->ext_info = link_data.ext_info;
1614 li->max_frame_size = IXGBE_LE16_TO_CPU(link_data.max_frame_size);
1615 li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK;
1616 li->topo_media_conflict = link_data.topo_media_conflict;
1617 li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M |
1618 IXGBE_ACI_CFG_PACING_TYPE_M);
1619 li->eee_status = link_data.eee_status;
1620
1621 /* update fc info */
1622 tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX);
1623 rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX);
1624 if (tx_pause && rx_pause)
1625 hw_fc_info->current_mode = ixgbe_fc_full;
1626 else if (tx_pause)
1627 hw_fc_info->current_mode = ixgbe_fc_tx_pause;
1628 else if (rx_pause)
1629 hw_fc_info->current_mode = ixgbe_fc_rx_pause;
1630 else
1631 hw_fc_info->current_mode = ixgbe_fc_none;
1632
1633 li->lse_ena = !!(resp->cmd_flags & IXGBE_ACI_LSE_IS_ENABLED);
1634
1635 /* save link status information */
1636 if (link)
1637 *link = *li;
1638
1639 /* flag cleared so calling functions don't call AQ again */
1640 hw->link.get_link_info = false;
1641
1642 return IXGBE_SUCCESS;
1643 }
1644
1645 /**
1646 * ixgbe_aci_set_event_mask - set event mask
1647 * @hw: pointer to the HW struct
1648 * @port_num: port number of the physical function
1649 * @mask: event mask to be set
1650 *
1651 * Set the event mask using ACI command (0x0613).
1652 *
1653 * Return: the exit code of the operation.
1654 */
ixgbe_aci_set_event_mask(struct ixgbe_hw * hw,u8 port_num,u16 mask)1655 s32 ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask)
1656 {
1657 struct ixgbe_aci_cmd_set_event_mask *cmd;
1658 struct ixgbe_aci_desc desc;
1659
1660 cmd = &desc.params.set_event_mask;
1661
1662 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask);
1663
1664 cmd->event_mask = IXGBE_CPU_TO_LE16(mask);
1665 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1666 }
1667
1668 /**
1669 * ixgbe_configure_lse - enable/disable link status events
1670 * @hw: pointer to the HW struct
1671 * @activate: bool value deciding if lse should be enabled nor disabled
1672 * @mask: event mask to be set; a set bit means deactivation of the
1673 * corresponding event
1674 *
1675 * Set the event mask and then enable or disable link status events
1676 *
1677 * Return: the exit code of the operation.
1678 */
ixgbe_configure_lse(struct ixgbe_hw * hw,bool activate,u16 mask)1679 s32 ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask)
1680 {
1681 s32 rc;
1682
1683 rc = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask);
1684 if (rc) {
1685 return rc;
1686 }
1687
1688 /* Enabling link status events generation by fw */
1689 rc = ixgbe_aci_get_link_info(hw, activate, NULL);
1690 if (rc) {
1691 return rc;
1692 }
1693 return IXGBE_SUCCESS;
1694 }
1695
1696 /**
1697 * ixgbe_aci_get_netlist_node - get a node handle
1698 * @hw: pointer to the hw struct
1699 * @cmd: get_link_topo AQ structure
1700 * @node_part_number: output node part number if node found
1701 * @node_handle: output node handle parameter if node found
1702 *
1703 * Get the netlist node and assigns it to
1704 * the provided handle using ACI command (0x06E0).
1705 *
1706 * Return: the exit code of the operation.
1707 */
ixgbe_aci_get_netlist_node(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_get_link_topo * cmd,u8 * node_part_number,u16 * node_handle)1708 s32 ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
1709 struct ixgbe_aci_cmd_get_link_topo *cmd,
1710 u8 *node_part_number, u16 *node_handle)
1711 {
1712 struct ixgbe_aci_desc desc;
1713
1714 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
1715 desc.params.get_link_topo = *cmd;
1716
1717 if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0))
1718 return IXGBE_ERR_NOT_SUPPORTED;
1719
1720 if (node_handle)
1721 *node_handle =
1722 IXGBE_LE16_TO_CPU(desc.params.get_link_topo.addr.handle);
1723 if (node_part_number)
1724 *node_part_number = desc.params.get_link_topo.node_part_num;
1725
1726 return IXGBE_SUCCESS;
1727 }
1728
1729 /**
1730 * ixgbe_find_netlist_node - find a node handle
1731 * @hw: pointer to the hw struct
1732 * @node_type_ctx: type of netlist node to look for
1733 * @node_part_number: node part number to look for
1734 * @node_handle: output parameter if node found - optional
1735 *
1736 * Find and return the node handle for a given node type and part number in the
1737 * netlist. When found IXGBE_SUCCESS is returned, IXGBE_ERR_NOT_SUPPORTED
1738 * otherwise. If @node_handle provided, it would be set to found node handle.
1739 *
1740 * Return: the exit code of the operation.
1741 */
ixgbe_find_netlist_node(struct ixgbe_hw * hw,u8 node_type_ctx,u8 node_part_number,u16 * node_handle)1742 s32 ixgbe_find_netlist_node(struct ixgbe_hw *hw, u8 node_type_ctx,
1743 u8 node_part_number, u16 *node_handle)
1744 {
1745 struct ixgbe_aci_cmd_get_link_topo cmd;
1746 u8 rec_node_part_number;
1747 u16 rec_node_handle;
1748 s32 status;
1749 u8 idx;
1750
1751 for (idx = 0; idx < IXGBE_MAX_NETLIST_SIZE; idx++) {
1752 memset(&cmd, 0, sizeof(cmd));
1753
1754 cmd.addr.topo_params.node_type_ctx =
1755 (node_type_ctx << IXGBE_ACI_LINK_TOPO_NODE_TYPE_S);
1756 cmd.addr.topo_params.index = idx;
1757
1758 status = ixgbe_aci_get_netlist_node(hw, &cmd,
1759 &rec_node_part_number,
1760 &rec_node_handle);
1761 if (status)
1762 return status;
1763
1764 if (rec_node_part_number == node_part_number) {
1765 if (node_handle)
1766 *node_handle = rec_node_handle;
1767 return IXGBE_SUCCESS;
1768 }
1769 }
1770
1771 return IXGBE_ERR_NOT_SUPPORTED;
1772 }
1773
1774 /**
1775 * ixgbe_aci_read_i2c - read I2C register value
1776 * @hw: pointer to the hw struct
1777 * @topo_addr: topology address for a device to communicate with
1778 * @bus_addr: 7-bit I2C bus address
1779 * @addr: I2C memory address (I2C offset) with up to 16 bits
1780 * @params: I2C parameters: bit [7] - Repeated start,
1781 * bits [6:5] data offset size,
1782 * bit [4] - I2C address type, bits [3:0] - data size
1783 * to read (0-16 bytes)
1784 * @data: pointer to data (0 to 16 bytes) to be read from the I2C device
1785 *
1786 * Read the value of the I2C pin register using ACI command (0x06E2).
1787 *
1788 * Return: the exit code of the operation.
1789 */
ixgbe_aci_read_i2c(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_link_topo_addr topo_addr,u16 bus_addr,__le16 addr,u8 params,u8 * data)1790 s32 ixgbe_aci_read_i2c(struct ixgbe_hw *hw,
1791 struct ixgbe_aci_cmd_link_topo_addr topo_addr,
1792 u16 bus_addr, __le16 addr, u8 params, u8 *data)
1793 {
1794 struct ixgbe_aci_desc desc = { 0 };
1795 struct ixgbe_aci_cmd_i2c *cmd;
1796 u8 data_size;
1797 s32 status;
1798
1799 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_i2c);
1800 cmd = &desc.params.read_write_i2c;
1801
1802 if (!data)
1803 return IXGBE_ERR_PARAM;
1804
1805 data_size = (params & IXGBE_ACI_I2C_DATA_SIZE_M) >>
1806 IXGBE_ACI_I2C_DATA_SIZE_S;
1807
1808 cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(bus_addr);
1809 cmd->topo_addr = topo_addr;
1810 cmd->i2c_params = params;
1811 cmd->i2c_addr = addr;
1812
1813 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1814 if (!status) {
1815 struct ixgbe_aci_cmd_read_i2c_resp *resp;
1816 u8 i;
1817
1818 resp = &desc.params.read_i2c_resp;
1819 for (i = 0; i < data_size; i++) {
1820 *data = resp->i2c_data[i];
1821 data++;
1822 }
1823 }
1824
1825 return status;
1826 }
1827
1828 /**
1829 * ixgbe_aci_write_i2c - write a value to I2C register
1830 * @hw: pointer to the hw struct
1831 * @topo_addr: topology address for a device to communicate with
1832 * @bus_addr: 7-bit I2C bus address
1833 * @addr: I2C memory address (I2C offset) with up to 16 bits
1834 * @params: I2C parameters: bit [4] - I2C address type, bits [3:0] - data size
1835 * to write (0-7 bytes)
1836 * @data: pointer to data (0 to 4 bytes) to be written to the I2C device
1837 *
1838 * Write a value to the I2C pin register using ACI command (0x06E3).
1839 *
1840 * Return: the exit code of the operation.
1841 */
ixgbe_aci_write_i2c(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_link_topo_addr topo_addr,u16 bus_addr,__le16 addr,u8 params,u8 * data)1842 s32 ixgbe_aci_write_i2c(struct ixgbe_hw *hw,
1843 struct ixgbe_aci_cmd_link_topo_addr topo_addr,
1844 u16 bus_addr, __le16 addr, u8 params, u8 *data)
1845 {
1846 struct ixgbe_aci_desc desc = { 0 };
1847 struct ixgbe_aci_cmd_i2c *cmd;
1848 u8 i, data_size;
1849
1850 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_write_i2c);
1851 cmd = &desc.params.read_write_i2c;
1852
1853 data_size = (params & IXGBE_ACI_I2C_DATA_SIZE_M) >>
1854 IXGBE_ACI_I2C_DATA_SIZE_S;
1855
1856 /* data_size limited to 4 */
1857 if (data_size > 4)
1858 return IXGBE_ERR_PARAM;
1859
1860 cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(bus_addr);
1861 cmd->topo_addr = topo_addr;
1862 cmd->i2c_params = params;
1863 cmd->i2c_addr = addr;
1864
1865 for (i = 0; i < data_size; i++) {
1866 cmd->i2c_data[i] = *data;
1867 data++;
1868 }
1869
1870 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1871 }
1872
1873 /**
1874 * ixgbe_aci_set_port_id_led - set LED value for the given port
1875 * @hw: pointer to the HW struct
1876 * @orig_mode: set LED original mode
1877 *
1878 * Set LED value for the given port (0x06E9)
1879 *
1880 * Return: the exit code of the operation.
1881 */
ixgbe_aci_set_port_id_led(struct ixgbe_hw * hw,bool orig_mode)1882 s32 ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode)
1883 {
1884 struct ixgbe_aci_cmd_set_port_id_led *cmd;
1885 struct ixgbe_aci_desc desc;
1886
1887 cmd = &desc.params.set_port_id_led;
1888
1889 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_port_id_led);
1890
1891 cmd->lport_num = (u8)hw->bus.func;
1892 cmd->lport_num_valid = IXGBE_ACI_PORT_ID_PORT_NUM_VALID;
1893
1894 if (orig_mode)
1895 cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_ORIG;
1896 else
1897 cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_BLINK;
1898
1899 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1900 }
1901
1902 /**
1903 * ixgbe_aci_set_gpio - set GPIO pin state
1904 * @hw: pointer to the hw struct
1905 * @gpio_ctrl_handle: GPIO controller node handle
1906 * @pin_idx: IO Number of the GPIO that needs to be set
1907 * @value: SW provide IO value to set in the LSB
1908 *
1909 * Set the GPIO pin state that is a part of the topology
1910 * using ACI command (0x06EC).
1911 *
1912 * Return: the exit code of the operation.
1913 */
ixgbe_aci_set_gpio(struct ixgbe_hw * hw,u16 gpio_ctrl_handle,u8 pin_idx,bool value)1914 s32 ixgbe_aci_set_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
1915 bool value)
1916 {
1917 struct ixgbe_aci_cmd_gpio *cmd;
1918 struct ixgbe_aci_desc desc;
1919
1920 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_gpio);
1921 cmd = &desc.params.read_write_gpio;
1922 cmd->gpio_ctrl_handle = IXGBE_CPU_TO_LE16(gpio_ctrl_handle);
1923 cmd->gpio_num = pin_idx;
1924 cmd->gpio_val = value ? 1 : 0;
1925
1926 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1927 }
1928
1929 /**
1930 * ixgbe_aci_get_gpio - get GPIO pin state
1931 * @hw: pointer to the hw struct
1932 * @gpio_ctrl_handle: GPIO controller node handle
1933 * @pin_idx: IO Number of the GPIO that needs to be set
1934 * @value: IO value read
1935 *
1936 * Get the value of a GPIO signal which is part of the topology
1937 * using ACI command (0x06ED).
1938 *
1939 * Return: the exit code of the operation.
1940 */
ixgbe_aci_get_gpio(struct ixgbe_hw * hw,u16 gpio_ctrl_handle,u8 pin_idx,bool * value)1941 s32 ixgbe_aci_get_gpio(struct ixgbe_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx,
1942 bool *value)
1943 {
1944 struct ixgbe_aci_cmd_gpio *cmd;
1945 struct ixgbe_aci_desc desc;
1946 s32 status;
1947
1948 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_gpio);
1949 cmd = &desc.params.read_write_gpio;
1950 cmd->gpio_ctrl_handle = IXGBE_CPU_TO_LE16(gpio_ctrl_handle);
1951 cmd->gpio_num = pin_idx;
1952
1953 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1954 if (status)
1955 return status;
1956
1957 *value = !!cmd->gpio_val;
1958 return IXGBE_SUCCESS;
1959 }
1960
1961 /**
1962 * ixgbe_aci_sff_eeprom - read/write SFF EEPROM
1963 * @hw: pointer to the HW struct
1964 * @lport: bits [7:0] = logical port, bit [8] = logical port valid
1965 * @bus_addr: I2C bus address of the eeprom (typically 0xA0, 0=topo default)
1966 * @mem_addr: I2C offset. lower 8 bits for address, 8 upper bits zero padding.
1967 * @page: QSFP page
1968 * @page_bank_ctrl: configuration of SFF/CMIS paging and banking control
1969 * @data: pointer to data buffer to be read/written to the I2C device.
1970 * @length: 1-16 for read, 1 for write.
1971 * @write: 0 read, 1 for write.
1972 *
1973 * Read/write SFF EEPROM using ACI command (0x06EE).
1974 *
1975 * Return: the exit code of the operation.
1976 */
ixgbe_aci_sff_eeprom(struct ixgbe_hw * hw,u16 lport,u8 bus_addr,u16 mem_addr,u8 page,u8 page_bank_ctrl,u8 * data,u8 length,bool write)1977 s32 ixgbe_aci_sff_eeprom(struct ixgbe_hw *hw, u16 lport, u8 bus_addr,
1978 u16 mem_addr, u8 page, u8 page_bank_ctrl, u8 *data,
1979 u8 length, bool write)
1980 {
1981 struct ixgbe_aci_cmd_sff_eeprom *cmd;
1982 struct ixgbe_aci_desc desc;
1983 s32 status;
1984
1985 if (!data || (mem_addr & 0xff00))
1986 return IXGBE_ERR_PARAM;
1987
1988 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_sff_eeprom);
1989 cmd = &desc.params.read_write_sff_param;
1990 desc.flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
1991 cmd->lport_num = (u8)(lport & 0xff);
1992 cmd->lport_num_valid = (u8)((lport >> 8) & 0x01);
1993 cmd->i2c_bus_addr = IXGBE_CPU_TO_LE16(((bus_addr >> 1) &
1994 IXGBE_ACI_SFF_I2CBUS_7BIT_M) |
1995 ((page_bank_ctrl <<
1996 IXGBE_ACI_SFF_PAGE_BANK_CTRL_S) &
1997 IXGBE_ACI_SFF_PAGE_BANK_CTRL_M));
1998 cmd->i2c_offset = IXGBE_CPU_TO_LE16(mem_addr & 0xff);
1999 cmd->module_page = page;
2000 if (write)
2001 cmd->i2c_bus_addr |= IXGBE_CPU_TO_LE16(IXGBE_ACI_SFF_IS_WRITE);
2002
2003 status = ixgbe_aci_send_cmd(hw, &desc, data, length);
2004 return status;
2005 }
2006
2007 /**
2008 * ixgbe_aci_prog_topo_dev_nvm - program Topology Device NVM
2009 * @hw: pointer to the hardware structure
2010 * @topo_params: pointer to structure storing topology parameters for a device
2011 *
2012 * Program Topology Device NVM using ACI command (0x06F2).
2013 *
2014 * Return: the exit code of the operation.
2015 */
ixgbe_aci_prog_topo_dev_nvm(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_link_topo_params * topo_params)2016 s32 ixgbe_aci_prog_topo_dev_nvm(struct ixgbe_hw *hw,
2017 struct ixgbe_aci_cmd_link_topo_params *topo_params)
2018 {
2019 struct ixgbe_aci_cmd_prog_topo_dev_nvm *cmd;
2020 struct ixgbe_aci_desc desc;
2021
2022 cmd = &desc.params.prog_topo_dev_nvm;
2023
2024 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_prog_topo_dev_nvm);
2025
2026 memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params));
2027
2028 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2029 }
2030
2031 /**
2032 * ixgbe_aci_read_topo_dev_nvm - read Topology Device NVM
2033 * @hw: pointer to the hardware structure
2034 * @topo_params: pointer to structure storing topology parameters for a device
2035 * @start_address: byte offset in the topology device NVM
2036 * @data: pointer to data buffer
2037 * @data_size: number of bytes to be read from the topology device NVM
2038 * Read Topology Device NVM (0x06F3)
2039 *
2040 * Read Topology of Device NVM using ACI command (0x06F3).
2041 *
2042 * Return: the exit code of the operation.
2043 */
ixgbe_aci_read_topo_dev_nvm(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_link_topo_params * topo_params,u32 start_address,u8 * data,u8 data_size)2044 s32 ixgbe_aci_read_topo_dev_nvm(struct ixgbe_hw *hw,
2045 struct ixgbe_aci_cmd_link_topo_params *topo_params,
2046 u32 start_address, u8 *data, u8 data_size)
2047 {
2048 struct ixgbe_aci_cmd_read_topo_dev_nvm *cmd;
2049 struct ixgbe_aci_desc desc;
2050 s32 status;
2051
2052 if (!data || data_size == 0 ||
2053 data_size > IXGBE_ACI_READ_TOPO_DEV_NVM_DATA_READ_SIZE)
2054 return IXGBE_ERR_PARAM;
2055
2056 cmd = &desc.params.read_topo_dev_nvm;
2057
2058 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_topo_dev_nvm);
2059
2060 desc.datalen = IXGBE_CPU_TO_LE16(data_size);
2061 memcpy(&cmd->topo_params, topo_params, sizeof(*topo_params));
2062 cmd->start_address = IXGBE_CPU_TO_LE32(start_address);
2063
2064 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2065 if (status)
2066 return status;
2067
2068 memcpy(data, cmd->data_read, data_size);
2069
2070 return IXGBE_SUCCESS;
2071 }
2072
2073 /**
2074 * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership
2075 * @hw: pointer to the HW structure
2076 * @access: NVM access type (read or write)
2077 *
2078 * Request NVM ownership.
2079 *
2080 * Return: the exit code of the operation.
2081 */
ixgbe_acquire_nvm(struct ixgbe_hw * hw,enum ixgbe_aci_res_access_type access)2082 s32 ixgbe_acquire_nvm(struct ixgbe_hw *hw,
2083 enum ixgbe_aci_res_access_type access)
2084 {
2085 u32 fla;
2086
2087 /* Skip if we are in blank NVM programming mode */
2088 fla = IXGBE_READ_REG(hw, GLNVM_FLA);
2089 if ((fla & GLNVM_FLA_LOCKED_M) == 0)
2090 return IXGBE_SUCCESS;
2091
2092 return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access,
2093 IXGBE_NVM_TIMEOUT);
2094 }
2095
2096 /**
2097 * ixgbe_release_nvm - Generic request for releasing the NVM ownership
2098 * @hw: pointer to the HW structure
2099 *
2100 * Release NVM ownership.
2101 */
ixgbe_release_nvm(struct ixgbe_hw * hw)2102 void ixgbe_release_nvm(struct ixgbe_hw *hw)
2103 {
2104 u32 fla;
2105
2106 /* Skip if we are in blank NVM programming mode */
2107 fla = IXGBE_READ_REG(hw, GLNVM_FLA);
2108 if ((fla & GLNVM_FLA_LOCKED_M) == 0)
2109 return;
2110
2111 ixgbe_release_res(hw, IXGBE_NVM_RES_ID);
2112 }
2113
2114
2115 /**
2116 * ixgbe_aci_read_nvm - read NVM
2117 * @hw: pointer to the HW struct
2118 * @module_typeid: module pointer location in words from the NVM beginning
2119 * @offset: byte offset from the module beginning
2120 * @length: length of the section to be read (in bytes from the offset)
2121 * @data: command buffer (size [bytes] = length)
2122 * @last_command: tells if this is the last command in a series
2123 * @read_shadow_ram: tell if this is a shadow RAM read
2124 *
2125 * Read the NVM using ACI command (0x0701).
2126 *
2127 * Return: the exit code of the operation.
2128 */
ixgbe_aci_read_nvm(struct ixgbe_hw * hw,u16 module_typeid,u32 offset,u16 length,void * data,bool last_command,bool read_shadow_ram)2129 s32 ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
2130 u16 length, void *data, bool last_command,
2131 bool read_shadow_ram)
2132 {
2133 struct ixgbe_aci_desc desc;
2134 struct ixgbe_aci_cmd_nvm *cmd;
2135
2136 cmd = &desc.params.nvm;
2137
2138 if (offset > IXGBE_ACI_NVM_MAX_OFFSET)
2139 return IXGBE_ERR_PARAM;
2140
2141 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read);
2142
2143 if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT)
2144 cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY;
2145
2146 /* If this is the last command in a series, set the proper flag. */
2147 if (last_command)
2148 cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
2149 cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
2150 cmd->offset_low = IXGBE_CPU_TO_LE16(offset & 0xFFFF);
2151 cmd->offset_high = (offset >> 16) & 0xFF;
2152 cmd->length = IXGBE_CPU_TO_LE16(length);
2153
2154 return ixgbe_aci_send_cmd(hw, &desc, data, length);
2155 }
2156
2157 /**
2158 * ixgbe_aci_erase_nvm - erase NVM sector
2159 * @hw: pointer to the HW struct
2160 * @module_typeid: module pointer location in words from the NVM beginning
2161 *
2162 * Erase the NVM sector using the ACI command (0x0702).
2163 *
2164 * Return: the exit code of the operation.
2165 */
ixgbe_aci_erase_nvm(struct ixgbe_hw * hw,u16 module_typeid)2166 s32 ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid)
2167 {
2168 struct ixgbe_aci_desc desc;
2169 struct ixgbe_aci_cmd_nvm *cmd;
2170 s32 status;
2171 __le16 len;
2172
2173 /* read a length value from SR, so module_typeid is equal to 0 */
2174 /* calculate offset where module size is placed from bytes to words */
2175 /* set last command and read from SR values to true */
2176 status = ixgbe_aci_read_nvm(hw, 0, 2 * module_typeid + 2, 2, &len, true,
2177 true);
2178 if (status)
2179 return status;
2180
2181 cmd = &desc.params.nvm;
2182
2183 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_erase);
2184
2185 cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
2186 cmd->length = len;
2187 cmd->offset_low = 0;
2188 cmd->offset_high = 0;
2189
2190 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2191 }
2192
2193 /**
2194 * ixgbe_aci_update_nvm - update NVM
2195 * @hw: pointer to the HW struct
2196 * @module_typeid: module pointer location in words from the NVM beginning
2197 * @offset: byte offset from the module beginning
2198 * @length: length of the section to be written (in bytes from the offset)
2199 * @data: command buffer (size [bytes] = length)
2200 * @last_command: tells if this is the last command in a series
2201 * @command_flags: command parameters
2202 *
2203 * Update the NVM using the ACI command (0x0703).
2204 *
2205 * Return: the exit code of the operation.
2206 */
ixgbe_aci_update_nvm(struct ixgbe_hw * hw,u16 module_typeid,u32 offset,u16 length,void * data,bool last_command,u8 command_flags)2207 s32 ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
2208 u32 offset, u16 length, void *data,
2209 bool last_command, u8 command_flags)
2210 {
2211 struct ixgbe_aci_desc desc;
2212 struct ixgbe_aci_cmd_nvm *cmd;
2213
2214 cmd = &desc.params.nvm;
2215
2216 /* In offset the highest byte must be zeroed. */
2217 if (offset & 0xFF000000)
2218 return IXGBE_ERR_PARAM;
2219
2220 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_write);
2221
2222 cmd->cmd_flags |= command_flags;
2223
2224 /* If this is the last command in a series, set the proper flag. */
2225 if (last_command)
2226 cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
2227 cmd->module_typeid = IXGBE_CPU_TO_LE16(module_typeid);
2228 cmd->offset_low = IXGBE_CPU_TO_LE16(offset & 0xFFFF);
2229 cmd->offset_high = (offset >> 16) & 0xFF;
2230 cmd->length = IXGBE_CPU_TO_LE16(length);
2231
2232 desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
2233
2234 return ixgbe_aci_send_cmd(hw, &desc, data, length);
2235 }
2236
2237 /**
2238 * ixgbe_aci_read_nvm_cfg - read an NVM config block
2239 * @hw: pointer to the HW struct
2240 * @cmd_flags: NVM access admin command bits
2241 * @field_id: field or feature ID
2242 * @data: buffer for result
2243 * @buf_size: buffer size
2244 * @elem_count: pointer to count of elements read by FW
2245 *
2246 * Reads a single or multiple feature/field ID and data using ACI command
2247 * (0x0704).
2248 *
2249 * Return: the exit code of the operation.
2250 */
ixgbe_aci_read_nvm_cfg(struct ixgbe_hw * hw,u8 cmd_flags,u16 field_id,void * data,u16 buf_size,u16 * elem_count)2251 s32 ixgbe_aci_read_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
2252 u16 field_id, void *data, u16 buf_size,
2253 u16 *elem_count)
2254 {
2255 struct ixgbe_aci_cmd_nvm_cfg *cmd;
2256 struct ixgbe_aci_desc desc;
2257 s32 status;
2258
2259 cmd = &desc.params.nvm_cfg;
2260
2261 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_cfg_read);
2262
2263 cmd->cmd_flags = cmd_flags;
2264 cmd->id = IXGBE_CPU_TO_LE16(field_id);
2265
2266 status = ixgbe_aci_send_cmd(hw, &desc, data, buf_size);
2267 if (!status && elem_count)
2268 *elem_count = IXGBE_LE16_TO_CPU(cmd->count);
2269
2270 return status;
2271 }
2272
2273 /**
2274 * ixgbe_aci_write_nvm_cfg - write an NVM config block
2275 * @hw: pointer to the HW struct
2276 * @cmd_flags: NVM access admin command bits
2277 * @data: buffer for result
2278 * @buf_size: buffer size
2279 * @elem_count: count of elements to be written
2280 *
2281 * Writes a single or multiple feature/field ID and data using ACI command
2282 * (0x0705).
2283 *
2284 * Return: the exit code of the operation.
2285 */
ixgbe_aci_write_nvm_cfg(struct ixgbe_hw * hw,u8 cmd_flags,void * data,u16 buf_size,u16 elem_count)2286 s32 ixgbe_aci_write_nvm_cfg(struct ixgbe_hw *hw, u8 cmd_flags,
2287 void *data, u16 buf_size, u16 elem_count)
2288 {
2289 struct ixgbe_aci_cmd_nvm_cfg *cmd;
2290 struct ixgbe_aci_desc desc;
2291
2292 cmd = &desc.params.nvm_cfg;
2293
2294 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_cfg_write);
2295 desc.flags |= IXGBE_CPU_TO_LE16(IXGBE_ACI_FLAG_RD);
2296
2297 cmd->count = IXGBE_CPU_TO_LE16(elem_count);
2298 cmd->cmd_flags = cmd_flags;
2299
2300 return ixgbe_aci_send_cmd(hw, &desc, data, buf_size);
2301 }
2302
2303 /**
2304 * ixgbe_nvm_validate_checksum - validate checksum
2305 * @hw: pointer to the HW struct
2306 *
2307 * Verify NVM PFA checksum validity using ACI command (0x0706).
2308 * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned.
2309 * The function acquires and then releases the NVM ownership.
2310 *
2311 * Return: the exit code of the operation.
2312 */
ixgbe_nvm_validate_checksum(struct ixgbe_hw * hw)2313 s32 ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw)
2314 {
2315 struct ixgbe_aci_cmd_nvm_checksum *cmd;
2316 struct ixgbe_aci_desc desc;
2317 s32 status;
2318
2319 status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
2320 if (status)
2321 return status;
2322
2323 cmd = &desc.params.nvm_checksum;
2324
2325 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
2326 cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY;
2327
2328 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2329
2330 ixgbe_release_nvm(hw);
2331
2332 if (!status)
2333 if (IXGBE_LE16_TO_CPU(cmd->checksum) !=
2334 IXGBE_ACI_NVM_CHECKSUM_CORRECT) {
2335 ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE,
2336 "Invalid Shadow Ram checksum");
2337 status = IXGBE_ERR_NVM_CHECKSUM;
2338 }
2339
2340 return status;
2341 }
2342
2343 /**
2344 * ixgbe_nvm_recalculate_checksum - recalculate checksum
2345 * @hw: pointer to the HW struct
2346 *
2347 * Recalculate NVM PFA checksum using ACI command (0x0706).
2348 * The function acquires and then releases the NVM ownership.
2349 *
2350 * Return: the exit code of the operation.
2351 */
ixgbe_nvm_recalculate_checksum(struct ixgbe_hw * hw)2352 s32 ixgbe_nvm_recalculate_checksum(struct ixgbe_hw *hw)
2353 {
2354 struct ixgbe_aci_cmd_nvm_checksum *cmd;
2355 struct ixgbe_aci_desc desc;
2356 s32 status;
2357
2358 status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
2359 if (status)
2360 return status;
2361
2362 cmd = &desc.params.nvm_checksum;
2363
2364 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
2365 cmd->flags = IXGBE_ACI_NVM_CHECKSUM_RECALC;
2366
2367 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2368
2369 ixgbe_release_nvm(hw);
2370
2371 return status;
2372 }
2373
2374 /**
2375 * ixgbe_nvm_write_activate - NVM activate write
2376 * @hw: pointer to the HW struct
2377 * @cmd_flags: flags for write activate command
2378 * @response_flags: response indicators from firmware
2379 *
2380 * Update the control word with the required banks' validity bits
2381 * and dumps the Shadow RAM to flash using ACI command (0x0707).
2382 *
2383 * cmd_flags controls which banks to activate, the preservation level to use
2384 * when activating the NVM bank, and whether an EMP reset is required for
2385 * activation.
2386 *
2387 * Note that the 16bit cmd_flags value is split between two separate 1 byte
2388 * flag values in the descriptor.
2389 *
2390 * On successful return of the firmware command, the response_flags variable
2391 * is updated with the flags reported by firmware indicating certain status,
2392 * such as whether EMP reset is enabled.
2393 *
2394 * Return: the exit code of the operation.
2395 */
ixgbe_nvm_write_activate(struct ixgbe_hw * hw,u16 cmd_flags,u8 * response_flags)2396 s32 ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
2397 u8 *response_flags)
2398 {
2399 struct ixgbe_aci_desc desc;
2400 struct ixgbe_aci_cmd_nvm *cmd;
2401 s32 status;
2402
2403 cmd = &desc.params.nvm;
2404 ixgbe_fill_dflt_direct_cmd_desc(&desc,
2405 ixgbe_aci_opc_nvm_write_activate);
2406
2407 cmd->cmd_flags = LO_BYTE(cmd_flags);
2408 cmd->offset_high = HI_BYTE(cmd_flags);
2409
2410 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2411 if (!status && response_flags)
2412 *response_flags = cmd->cmd_flags;
2413
2414 return status;
2415 }
2416
2417 /**
2418 * ixgbe_get_flash_bank_offset - Get offset into requested flash bank
2419 * @hw: pointer to the HW structure
2420 * @bank: whether to read from the active or inactive flash bank
2421 * @module: the module to read from
2422 *
2423 * Based on the module, lookup the module offset from the beginning of the
2424 * flash.
2425 *
2426 * Return: the flash offset. Note that a value of zero is invalid and must be
2427 * treated as an error.
2428 */
ixgbe_get_flash_bank_offset(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u16 module)2429 static u32 ixgbe_get_flash_bank_offset(struct ixgbe_hw *hw,
2430 enum ixgbe_bank_select bank,
2431 u16 module)
2432 {
2433 struct ixgbe_bank_info *banks = &hw->flash.banks;
2434 enum ixgbe_flash_bank active_bank;
2435 bool second_bank_active;
2436 u32 offset, size;
2437
2438 switch (module) {
2439 case E610_SR_1ST_NVM_BANK_PTR:
2440 offset = banks->nvm_ptr;
2441 size = banks->nvm_size;
2442 active_bank = banks->nvm_bank;
2443 break;
2444 case E610_SR_1ST_OROM_BANK_PTR:
2445 offset = banks->orom_ptr;
2446 size = banks->orom_size;
2447 active_bank = banks->orom_bank;
2448 break;
2449 case E610_SR_NETLIST_BANK_PTR:
2450 offset = banks->netlist_ptr;
2451 size = banks->netlist_size;
2452 active_bank = banks->netlist_bank;
2453 break;
2454 default:
2455 return 0;
2456 }
2457
2458 switch (active_bank) {
2459 case IXGBE_1ST_FLASH_BANK:
2460 second_bank_active = false;
2461 break;
2462 case IXGBE_2ND_FLASH_BANK:
2463 second_bank_active = true;
2464 break;
2465 default:
2466 return 0;
2467 }
2468
2469 /* The second flash bank is stored immediately following the first
2470 * bank. Based on whether the 1st or 2nd bank is active, and whether
2471 * we want the active or inactive bank, calculate the desired offset.
2472 */
2473 switch (bank) {
2474 case IXGBE_ACTIVE_FLASH_BANK:
2475 return offset + (second_bank_active ? size : 0);
2476 case IXGBE_INACTIVE_FLASH_BANK:
2477 return offset + (second_bank_active ? 0 : size);
2478 }
2479
2480 return 0;
2481 }
2482
2483 /**
2484 * ixgbe_read_flash_module - Read a word from one of the main NVM modules
2485 * @hw: pointer to the HW structure
2486 * @bank: which bank of the module to read
2487 * @module: the module to read
2488 * @offset: the offset into the module in bytes
2489 * @data: storage for the word read from the flash
2490 * @length: bytes of data to read
2491 *
2492 * Read data from the specified flash module. The bank parameter indicates
2493 * whether or not to read from the active bank or the inactive bank of that
2494 * module.
2495 *
2496 * The word will be read using flat NVM access, and relies on the
2497 * hw->flash.banks data being setup by ixgbe_determine_active_flash_banks()
2498 * during initialization.
2499 *
2500 * Return: the exit code of the operation.
2501 */
ixgbe_read_flash_module(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u16 module,u32 offset,u8 * data,u32 length)2502 static s32 ixgbe_read_flash_module(struct ixgbe_hw *hw,
2503 enum ixgbe_bank_select bank,
2504 u16 module, u32 offset, u8 *data, u32 length)
2505 {
2506 s32 status;
2507 u32 start;
2508
2509 start = ixgbe_get_flash_bank_offset(hw, bank, module);
2510 if (!start) {
2511 return IXGBE_ERR_PARAM;
2512 }
2513
2514 status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
2515 if (status)
2516 return status;
2517
2518 status = ixgbe_read_flat_nvm(hw, start + offset, &length, data, false);
2519
2520 ixgbe_release_nvm(hw);
2521
2522 return status;
2523 }
2524
2525 /**
2526 * ixgbe_read_netlist_module - Read data from the netlist module area
2527 * @hw: pointer to the HW structure
2528 * @bank: whether to read from the active or inactive module
2529 * @offset: offset into the netlist to read from
2530 * @data: storage for returned word value
2531 *
2532 * Read a word from the specified netlist bank.
2533 *
2534 * Return: the exit code of the operation.
2535 */
ixgbe_read_netlist_module(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u32 offset,u16 * data)2536 static s32 ixgbe_read_netlist_module(struct ixgbe_hw *hw,
2537 enum ixgbe_bank_select bank,
2538 u32 offset, u16 *data)
2539 {
2540 __le16 data_local;
2541 s32 status;
2542
2543 status = ixgbe_read_flash_module(hw, bank, E610_SR_NETLIST_BANK_PTR,
2544 offset * sizeof(u16),
2545 (u8 *)&data_local,
2546 sizeof(u16));
2547 if (!status)
2548 *data = IXGBE_LE16_TO_CPU(data_local);
2549
2550 return status;
2551 }
2552
2553 /**
2554 * ixgbe_read_nvm_module - Read from the active main NVM module
2555 * @hw: pointer to the HW structure
2556 * @bank: whether to read from active or inactive NVM module
2557 * @offset: offset into the NVM module to read, in words
2558 * @data: storage for returned word value
2559 *
2560 * Read the specified word from the active NVM module. This includes the CSS
2561 * header at the start of the NVM module.
2562 *
2563 * Return: the exit code of the operation.
2564 */
ixgbe_read_nvm_module(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u32 offset,u16 * data)2565 static s32 ixgbe_read_nvm_module(struct ixgbe_hw *hw,
2566 enum ixgbe_bank_select bank,
2567 u32 offset, u16 *data)
2568 {
2569 __le16 data_local;
2570 s32 status;
2571
2572 status = ixgbe_read_flash_module(hw, bank, E610_SR_1ST_NVM_BANK_PTR,
2573 offset * sizeof(u16),
2574 (u8 *)&data_local,
2575 sizeof(u16));
2576 if (!status)
2577 *data = IXGBE_LE16_TO_CPU(data_local);
2578
2579 return status;
2580 }
2581
2582 /**
2583 * ixgbe_get_nvm_css_hdr_len - Read the CSS header length from the
2584 * NVM CSS header
2585 * @hw: pointer to the HW struct
2586 * @bank: whether to read from the active or inactive flash bank
2587 * @hdr_len: storage for header length in words
2588 *
2589 * Read the CSS header length from the NVM CSS header and add the
2590 * Authentication header size, and then convert to words.
2591 *
2592 * Return: the exit code of the operation.
2593 */
ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u32 * hdr_len)2594 static s32 ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw *hw,
2595 enum ixgbe_bank_select bank,
2596 u32 *hdr_len)
2597 {
2598 u16 hdr_len_l, hdr_len_h;
2599 u32 hdr_len_dword;
2600 s32 status;
2601
2602 status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_L,
2603 &hdr_len_l);
2604 if (status)
2605 return status;
2606
2607 status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_H,
2608 &hdr_len_h);
2609 if (status)
2610 return status;
2611
2612 /* CSS header length is in DWORD, so convert to words and add
2613 * authentication header size
2614 */
2615 hdr_len_dword = hdr_len_h << 16 | hdr_len_l;
2616 *hdr_len = (hdr_len_dword * 2) + IXGBE_NVM_AUTH_HEADER_LEN;
2617
2618 return IXGBE_SUCCESS;
2619 }
2620
2621 /**
2622 * ixgbe_read_nvm_sr_copy - Read a word from the Shadow RAM copy in the NVM bank
2623 * @hw: pointer to the HW structure
2624 * @bank: whether to read from the active or inactive NVM module
2625 * @offset: offset into the Shadow RAM copy to read, in words
2626 * @data: storage for returned word value
2627 *
2628 * Read the specified word from the copy of the Shadow RAM found in the
2629 * specified NVM module.
2630 *
2631 * Return: the exit code of the operation.
2632 */
ixgbe_read_nvm_sr_copy(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u32 offset,u16 * data)2633 static s32 ixgbe_read_nvm_sr_copy(struct ixgbe_hw *hw,
2634 enum ixgbe_bank_select bank,
2635 u32 offset, u16 *data)
2636 {
2637 u32 hdr_len;
2638 s32 status;
2639
2640 status = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
2641 if (status)
2642 return status;
2643
2644 hdr_len = ROUND_UP(hdr_len, 32);
2645
2646 return ixgbe_read_nvm_module(hw, bank, hdr_len + offset, data);
2647 }
2648
2649 /**
2650 * ixgbe_get_nvm_minsrevs - Get the minsrevs values from flash
2651 * @hw: pointer to the HW struct
2652 * @minsrevs: structure to store NVM and OROM minsrev values
2653 *
2654 * Read the Minimum Security Revision TLV and extract
2655 * the revision values from the flash image
2656 * into a readable structure for processing.
2657 *
2658 * Return: the exit code of the operation.
2659 */
ixgbe_get_nvm_minsrevs(struct ixgbe_hw * hw,struct ixgbe_minsrev_info * minsrevs)2660 s32 ixgbe_get_nvm_minsrevs(struct ixgbe_hw *hw,
2661 struct ixgbe_minsrev_info *minsrevs)
2662 {
2663 struct ixgbe_aci_cmd_nvm_minsrev data;
2664 s32 status;
2665 u16 valid;
2666
2667 status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
2668 if (status)
2669 return status;
2670
2671 status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID,
2672 0, sizeof(data), &data,
2673 true, false);
2674
2675 ixgbe_release_nvm(hw);
2676
2677 if (status)
2678 return status;
2679
2680 valid = IXGBE_LE16_TO_CPU(data.validity);
2681
2682 /* Extract NVM minimum security revision */
2683 if (valid & IXGBE_ACI_NVM_MINSREV_NVM_VALID) {
2684 u16 minsrev_l = IXGBE_LE16_TO_CPU(data.nvm_minsrev_l);
2685 u16 minsrev_h = IXGBE_LE16_TO_CPU(data.nvm_minsrev_h);
2686
2687 minsrevs->nvm = minsrev_h << 16 | minsrev_l;
2688 minsrevs->nvm_valid = true;
2689 }
2690
2691 /* Extract the OROM minimum security revision */
2692 if (valid & IXGBE_ACI_NVM_MINSREV_OROM_VALID) {
2693 u16 minsrev_l = IXGBE_LE16_TO_CPU(data.orom_minsrev_l);
2694 u16 minsrev_h = IXGBE_LE16_TO_CPU(data.orom_minsrev_h);
2695
2696 minsrevs->orom = minsrev_h << 16 | minsrev_l;
2697 minsrevs->orom_valid = true;
2698 }
2699
2700 return IXGBE_SUCCESS;
2701 }
2702
2703 /**
2704 * ixgbe_update_nvm_minsrevs - Update minsrevs TLV data in flash
2705 * @hw: pointer to the HW struct
2706 * @minsrevs: minimum security revision information
2707 *
2708 * Update the NVM or Option ROM minimum security revision fields in the PFA
2709 * area of the flash. Reads the minsrevs->nvm_valid and minsrevs->orom_valid
2710 * fields to determine what update is being requested. If the valid bit is not
2711 * set for that module, then the associated minsrev will be left as is.
2712 *
2713 * Return: the exit code of the operation.
2714 */
ixgbe_update_nvm_minsrevs(struct ixgbe_hw * hw,struct ixgbe_minsrev_info * minsrevs)2715 s32 ixgbe_update_nvm_minsrevs(struct ixgbe_hw *hw,
2716 struct ixgbe_minsrev_info *minsrevs)
2717 {
2718 struct ixgbe_aci_cmd_nvm_minsrev data;
2719 s32 status;
2720
2721 if (!minsrevs->nvm_valid && !minsrevs->orom_valid) {
2722 return IXGBE_ERR_PARAM;
2723 }
2724
2725 status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
2726 if (status)
2727 return status;
2728
2729 /* Get current data */
2730 status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID, 0,
2731 sizeof(data), &data, true, false);
2732 if (status)
2733 goto exit_release_res;
2734
2735 if (minsrevs->nvm_valid) {
2736 data.nvm_minsrev_l = IXGBE_CPU_TO_LE16(minsrevs->nvm & 0xFFFF);
2737 data.nvm_minsrev_h = IXGBE_CPU_TO_LE16(minsrevs->nvm >> 16);
2738 data.validity |=
2739 IXGBE_CPU_TO_LE16(IXGBE_ACI_NVM_MINSREV_NVM_VALID);
2740 }
2741
2742 if (minsrevs->orom_valid) {
2743 data.orom_minsrev_l = IXGBE_CPU_TO_LE16(minsrevs->orom & 0xFFFF);
2744 data.orom_minsrev_h = IXGBE_CPU_TO_LE16(minsrevs->orom >> 16);
2745 data.validity |=
2746 IXGBE_CPU_TO_LE16(IXGBE_ACI_NVM_MINSREV_OROM_VALID);
2747 }
2748
2749 /* Update flash data */
2750 status = ixgbe_aci_update_nvm(hw, IXGBE_ACI_NVM_MINSREV_MOD_ID, 0,
2751 sizeof(data), &data, false,
2752 IXGBE_ACI_NVM_SPECIAL_UPDATE);
2753 if (status)
2754 goto exit_release_res;
2755
2756 /* Dump the Shadow RAM to the flash */
2757 status = ixgbe_nvm_write_activate(hw, 0, NULL);
2758
2759 exit_release_res:
2760 ixgbe_release_nvm(hw);
2761
2762 return status;
2763 }
2764
2765 /**
2766 * ixgbe_get_nvm_srev - Read the security revision from the NVM CSS header
2767 * @hw: pointer to the HW struct
2768 * @bank: whether to read from the active or inactive flash bank
2769 * @srev: storage for security revision
2770 *
2771 * Read the security revision out of the CSS header of the active NVM module
2772 * bank.
2773 *
2774 * Return: the exit code of the operation.
2775 */
ixgbe_get_nvm_srev(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,u32 * srev)2776 static s32 ixgbe_get_nvm_srev(struct ixgbe_hw *hw,
2777 enum ixgbe_bank_select bank, u32 *srev)
2778 {
2779 u16 srev_l, srev_h;
2780 s32 status;
2781
2782 status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_L, &srev_l);
2783 if (status)
2784 return status;
2785
2786 status = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_H, &srev_h);
2787 if (status)
2788 return status;
2789
2790 *srev = srev_h << 16 | srev_l;
2791
2792 return IXGBE_SUCCESS;
2793 }
2794
2795 /**
2796 * ixgbe_get_nvm_ver_info - Read NVM version information
2797 * @hw: pointer to the HW struct
2798 * @bank: whether to read from the active or inactive flash bank
2799 * @nvm: pointer to NVM info structure
2800 *
2801 * Read the NVM EETRACK ID and map version of the main NVM image bank, filling
2802 * in the nvm info structure.
2803 *
2804 * Return: the exit code of the operation.
2805 */
ixgbe_get_nvm_ver_info(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,struct ixgbe_nvm_info * nvm)2806 static s32 ixgbe_get_nvm_ver_info(struct ixgbe_hw *hw,
2807 enum ixgbe_bank_select bank,
2808 struct ixgbe_nvm_info *nvm)
2809 {
2810 u16 eetrack_lo, eetrack_hi, ver;
2811 s32 status;
2812
2813 status = ixgbe_read_nvm_sr_copy(hw, bank,
2814 E610_SR_NVM_DEV_STARTER_VER, &ver);
2815 if (status) {
2816 return status;
2817 }
2818
2819 nvm->major = (ver & E610_NVM_VER_HI_MASK) >> E610_NVM_VER_HI_SHIFT;
2820 nvm->minor = (ver & E610_NVM_VER_LO_MASK) >> E610_NVM_VER_LO_SHIFT;
2821
2822 status = ixgbe_read_nvm_sr_copy(hw, bank, E610_SR_NVM_EETRACK_LO,
2823 &eetrack_lo);
2824 if (status) {
2825 return status;
2826 }
2827 status = ixgbe_read_nvm_sr_copy(hw, bank, E610_SR_NVM_EETRACK_HI,
2828 &eetrack_hi);
2829 if (status) {
2830 return status;
2831 }
2832
2833 nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
2834
2835 status = ixgbe_get_nvm_srev(hw, bank, &nvm->srev);
2836
2837 return IXGBE_SUCCESS;
2838 }
2839
2840 /**
2841 * ixgbe_get_inactive_nvm_ver - Read Option ROM version from the inactive bank
2842 * @hw: pointer to the HW structure
2843 * @nvm: storage for Option ROM version information
2844 *
2845 * Reads the NVM EETRACK ID, Map version, and security revision of the
2846 * inactive NVM bank. Used to access version data for a pending update that
2847 * has not yet been activated.
2848 *
2849 * Return: the exit code of the operation.
2850 */
ixgbe_get_inactive_nvm_ver(struct ixgbe_hw * hw,struct ixgbe_nvm_info * nvm)2851 s32 ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
2852 {
2853 return ixgbe_get_nvm_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, nvm);
2854 }
2855
2856 /**
2857 * ixgbe_get_active_nvm_ver - Read Option ROM version from the active bank
2858 * @hw: pointer to the HW structure
2859 * @nvm: storage for Option ROM version information
2860 *
2861 * Reads the NVM EETRACK ID, Map version, and security revision of the
2862 * active NVM bank.
2863 *
2864 * Return: the exit code of the operation.
2865 */
ixgbe_get_active_nvm_ver(struct ixgbe_hw * hw,struct ixgbe_nvm_info * nvm)2866 s32 ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
2867 {
2868 return ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, nvm);
2869 }
2870
2871 /**
2872 * ixgbe_get_netlist_info
2873 * @hw: pointer to the HW struct
2874 * @bank: whether to read from the active or inactive flash bank
2875 * @netlist: pointer to netlist version info structure
2876 *
2877 * Get the netlist version information from the requested bank. Reads the Link
2878 * Topology section to find the Netlist ID block and extract the relevant
2879 * information into the netlist version structure.
2880 *
2881 * Return: the exit code of the operation.
2882 */
ixgbe_get_netlist_info(struct ixgbe_hw * hw,enum ixgbe_bank_select bank,struct ixgbe_netlist_info * netlist)2883 static s32 ixgbe_get_netlist_info(struct ixgbe_hw *hw,
2884 enum ixgbe_bank_select bank,
2885 struct ixgbe_netlist_info *netlist)
2886 {
2887 u16 module_id, length, node_count, i;
2888 u16 *id_blk;
2889 s32 status;
2890
2891 status = ixgbe_read_netlist_module(hw, bank, IXGBE_NETLIST_TYPE_OFFSET,
2892 &module_id);
2893 if (status)
2894 return status;
2895
2896 if (module_id != IXGBE_NETLIST_LINK_TOPO_MOD_ID) {
2897 return IXGBE_ERR_NVM;
2898 }
2899
2900 status = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_MODULE_LEN,
2901 &length);
2902 if (status)
2903 return status;
2904
2905 /* sanity check that we have at least enough words to store the
2906 * netlist ID block
2907 */
2908 if (length < IXGBE_NETLIST_ID_BLK_SIZE) {
2909 return IXGBE_ERR_NVM;
2910 }
2911
2912 status = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_NODE_COUNT,
2913 &node_count);
2914 if (status)
2915 return status;
2916 node_count &= IXGBE_LINK_TOPO_NODE_COUNT_M;
2917
2918 id_blk = (u16 *)ixgbe_calloc(hw, IXGBE_NETLIST_ID_BLK_SIZE,
2919 sizeof(*id_blk));
2920 if (!id_blk)
2921 return IXGBE_ERR_NO_SPACE;
2922
2923 /* Read out the entire Netlist ID Block at once. */
2924 status = ixgbe_read_flash_module(hw, bank, E610_SR_NETLIST_BANK_PTR,
2925 IXGBE_NETLIST_ID_BLK_OFFSET(node_count) * sizeof(u16),
2926 (u8 *)id_blk,
2927 IXGBE_NETLIST_ID_BLK_SIZE * sizeof(u16));
2928 if (status)
2929 goto exit_error;
2930
2931 for (i = 0; i < IXGBE_NETLIST_ID_BLK_SIZE; i++)
2932 id_blk[i] = IXGBE_LE16_TO_CPU(((__le16 *)id_blk)[i]);
2933
2934 netlist->major = id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 |
2935 id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW];
2936 netlist->minor = id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 |
2937 id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW];
2938 netlist->type = id_blk[IXGBE_NETLIST_ID_BLK_TYPE_HIGH] << 16 |
2939 id_blk[IXGBE_NETLIST_ID_BLK_TYPE_LOW];
2940 netlist->rev = id_blk[IXGBE_NETLIST_ID_BLK_REV_HIGH] << 16 |
2941 id_blk[IXGBE_NETLIST_ID_BLK_REV_LOW];
2942 netlist->cust_ver = id_blk[IXGBE_NETLIST_ID_BLK_CUST_VER];
2943 /* Read the left most 4 bytes of SHA */
2944 netlist->hash = id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 |
2945 id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(14)];
2946
2947 exit_error:
2948 ixgbe_free(hw, id_blk);
2949
2950 return status;
2951 }
2952
2953 /**
2954 * ixgbe_get_inactive_netlist_ver
2955 * @hw: pointer to the HW struct
2956 * @netlist: pointer to netlist version info structure
2957 *
2958 * Read the netlist version data from the inactive netlist bank. Used to
2959 * extract version data of a pending flash update in order to display the
2960 * version data.
2961 *
2962 * Return: the exit code of the operation.
2963 */
ixgbe_get_inactive_netlist_ver(struct ixgbe_hw * hw,struct ixgbe_netlist_info * netlist)2964 s32 ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw,
2965 struct ixgbe_netlist_info *netlist)
2966 {
2967 return ixgbe_get_netlist_info(hw, IXGBE_INACTIVE_FLASH_BANK, netlist);
2968 }
2969
2970 /**
2971 * ixgbe_read_sr_pointer - Read the value of a Shadow RAM pointer word
2972 * @hw: pointer to the HW structure
2973 * @offset: the word offset of the Shadow RAM word to read
2974 * @pointer: pointer value read from Shadow RAM
2975 *
2976 * Read the given Shadow RAM word, and convert it to a pointer value specified
2977 * in bytes. This function assumes the specified offset is a valid pointer
2978 * word.
2979 *
2980 * Each pointer word specifies whether it is stored in word size or 4KB
2981 * sector size by using the highest bit. The reported pointer value will be in
2982 * bytes, intended for flat NVM reads.
2983 *
2984 * Return: the exit code of the operation.
2985 */
ixgbe_read_sr_pointer(struct ixgbe_hw * hw,u16 offset,u32 * pointer)2986 static s32 ixgbe_read_sr_pointer(struct ixgbe_hw *hw, u16 offset, u32 *pointer)
2987 {
2988 s32 status;
2989 u16 value;
2990
2991 status = ixgbe_read_ee_aci_E610(hw, offset, &value);
2992 if (status)
2993 return status;
2994
2995 /* Determine if the pointer is in 4KB or word units */
2996 if (value & IXGBE_SR_NVM_PTR_4KB_UNITS)
2997 *pointer = (value & ~IXGBE_SR_NVM_PTR_4KB_UNITS) * 4 * 1024;
2998 else
2999 *pointer = value * 2;
3000
3001 return IXGBE_SUCCESS;
3002 }
3003
3004 /**
3005 * ixgbe_read_sr_area_size - Read an area size from a Shadow RAM word
3006 * @hw: pointer to the HW structure
3007 * @offset: the word offset of the Shadow RAM to read
3008 * @size: size value read from the Shadow RAM
3009 *
3010 * Read the given Shadow RAM word, and convert it to an area size value
3011 * specified in bytes. This function assumes the specified offset is a valid
3012 * area size word.
3013 *
3014 * Each area size word is specified in 4KB sector units. This function reports
3015 * the size in bytes, intended for flat NVM reads.
3016 *
3017 * Return: the exit code of the operation.
3018 */
ixgbe_read_sr_area_size(struct ixgbe_hw * hw,u16 offset,u32 * size)3019 static s32 ixgbe_read_sr_area_size(struct ixgbe_hw *hw, u16 offset, u32 *size)
3020 {
3021 s32 status;
3022 u16 value;
3023
3024 status = ixgbe_read_ee_aci_E610(hw, offset, &value);
3025 if (status)
3026 return status;
3027
3028 /* Area sizes are always specified in 4KB units */
3029 *size = value * 4 * 1024;
3030
3031 return IXGBE_SUCCESS;
3032 }
3033
3034 /**
3035 * ixgbe_discover_flash_size - Discover the available flash size.
3036 * @hw: pointer to the HW struct
3037 *
3038 * The device flash could be up to 16MB in size. However, it is possible that
3039 * the actual size is smaller. Use bisection to determine the accessible size
3040 * of flash memory.
3041 *
3042 * Return: the exit code of the operation.
3043 */
ixgbe_discover_flash_size(struct ixgbe_hw * hw)3044 static s32 ixgbe_discover_flash_size(struct ixgbe_hw *hw)
3045 {
3046 u32 min_size = 0, max_size = IXGBE_ACI_NVM_MAX_OFFSET + 1;
3047 s32 status;
3048
3049 status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
3050 if (status)
3051 return status;
3052
3053 while ((max_size - min_size) > 1) {
3054 u32 offset = (max_size + min_size) / 2;
3055 u32 len = 1;
3056 u8 data;
3057
3058 status = ixgbe_read_flat_nvm(hw, offset, &len, &data, false);
3059 if (status == IXGBE_ERR_ACI_ERROR &&
3060 hw->aci.last_status == IXGBE_ACI_RC_EINVAL) {
3061 status = IXGBE_SUCCESS;
3062 max_size = offset;
3063 } else if (!status) {
3064 min_size = offset;
3065 } else {
3066 /* an unexpected error occurred */
3067 goto err_read_flat_nvm;
3068 }
3069 }
3070
3071 hw->flash.flash_size = max_size;
3072
3073 err_read_flat_nvm:
3074 ixgbe_release_nvm(hw);
3075
3076 return status;
3077 }
3078
3079 /**
3080 * ixgbe_determine_active_flash_banks - Discover active bank for each module
3081 * @hw: pointer to the HW struct
3082 *
3083 * Read the Shadow RAM control word and determine which banks are active for
3084 * the NVM, OROM, and Netlist modules. Also read and calculate the associated
3085 * pointer and size. These values are then cached into the ixgbe_flash_info
3086 * structure for later use in order to calculate the correct offset to read
3087 * from the active module.
3088 *
3089 * Return: the exit code of the operation.
3090 */
ixgbe_determine_active_flash_banks(struct ixgbe_hw * hw)3091 static s32 ixgbe_determine_active_flash_banks(struct ixgbe_hw *hw)
3092 {
3093 struct ixgbe_bank_info *banks = &hw->flash.banks;
3094 u16 ctrl_word;
3095 s32 status;
3096
3097 status = ixgbe_read_ee_aci_E610(hw, E610_SR_NVM_CTRL_WORD, &ctrl_word);
3098 if (status) {
3099 return status;
3100 }
3101
3102 /* Check that the control word indicates validity */
3103 if ((ctrl_word & IXGBE_SR_CTRL_WORD_1_M) >> IXGBE_SR_CTRL_WORD_1_S !=
3104 IXGBE_SR_CTRL_WORD_VALID) {
3105 return IXGBE_ERR_CONFIG;
3106 }
3107
3108 if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NVM_BANK))
3109 banks->nvm_bank = IXGBE_1ST_FLASH_BANK;
3110 else
3111 banks->nvm_bank = IXGBE_2ND_FLASH_BANK;
3112
3113 if (!(ctrl_word & IXGBE_SR_CTRL_WORD_OROM_BANK))
3114 banks->orom_bank = IXGBE_1ST_FLASH_BANK;
3115 else
3116 banks->orom_bank = IXGBE_2ND_FLASH_BANK;
3117
3118 if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NETLIST_BANK))
3119 banks->netlist_bank = IXGBE_1ST_FLASH_BANK;
3120 else
3121 banks->netlist_bank = IXGBE_2ND_FLASH_BANK;
3122
3123 status = ixgbe_read_sr_pointer(hw, E610_SR_1ST_NVM_BANK_PTR,
3124 &banks->nvm_ptr);
3125 if (status) {
3126 return status;
3127 }
3128
3129 status = ixgbe_read_sr_area_size(hw, E610_SR_NVM_BANK_SIZE,
3130 &banks->nvm_size);
3131 if (status) {
3132 return status;
3133 }
3134
3135 status = ixgbe_read_sr_pointer(hw, E610_SR_1ST_OROM_BANK_PTR,
3136 &banks->orom_ptr);
3137 if (status) {
3138 return status;
3139 }
3140
3141 status = ixgbe_read_sr_area_size(hw, E610_SR_OROM_BANK_SIZE,
3142 &banks->orom_size);
3143 if (status) {
3144 return status;
3145 }
3146
3147 status = ixgbe_read_sr_pointer(hw, E610_SR_NETLIST_BANK_PTR,
3148 &banks->netlist_ptr);
3149 if (status) {
3150 return status;
3151 }
3152
3153 status = ixgbe_read_sr_area_size(hw, E610_SR_NETLIST_BANK_SIZE,
3154 &banks->netlist_size);
3155 if (status) {
3156 return status;
3157 }
3158
3159 return IXGBE_SUCCESS;
3160 }
3161
3162 /**
3163 * ixgbe_init_nvm - initializes NVM setting
3164 * @hw: pointer to the HW struct
3165 *
3166 * Read and populate NVM settings such as Shadow RAM size,
3167 * max_timeout, and blank_nvm_mode
3168 *
3169 * Return: the exit code of the operation.
3170 */
ixgbe_init_nvm(struct ixgbe_hw * hw)3171 s32 ixgbe_init_nvm(struct ixgbe_hw *hw)
3172 {
3173 struct ixgbe_flash_info *flash = &hw->flash;
3174 u32 fla, gens_stat, status;
3175 u8 sr_size;
3176
3177 /* The SR size is stored regardless of the NVM programming mode
3178 * as the blank mode may be used in the factory line.
3179 */
3180 gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
3181 sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >> GLNVM_GENS_SR_SIZE_S;
3182
3183 /* Switching to words (sr_size contains power of 2) */
3184 flash->sr_words = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
3185
3186 /* Check if we are in the normal or blank NVM programming mode */
3187 fla = IXGBE_READ_REG(hw, GLNVM_FLA);
3188 if (fla & GLNVM_FLA_LOCKED_M) { /* Normal programming mode */
3189 flash->blank_nvm_mode = false;
3190 } else {
3191 /* Blank programming mode */
3192 flash->blank_nvm_mode = true;
3193 return IXGBE_ERR_NVM_BLANK_MODE;
3194 }
3195
3196 status = ixgbe_discover_flash_size(hw);
3197 if (status) {
3198 return status;
3199 }
3200
3201 status = ixgbe_determine_active_flash_banks(hw);
3202 if (status) {
3203 return status;
3204 }
3205
3206 status = ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
3207 &flash->nvm);
3208 if (status) {
3209 return status;
3210 }
3211
3212 /* read the netlist version information */
3213 status = ixgbe_get_netlist_info(hw, IXGBE_ACTIVE_FLASH_BANK,
3214 &flash->netlist);
3215
3216 return IXGBE_SUCCESS;
3217 }
3218
3219 /**
3220 * ixgbe_sanitize_operate - Clear the user data
3221 * @hw: pointer to the HW struct
3222 *
3223 * Clear user data from NVM using ACI command (0x070C).
3224 *
3225 * Return: the exit code of the operation.
3226 */
ixgbe_sanitize_operate(struct ixgbe_hw * hw)3227 s32 ixgbe_sanitize_operate(struct ixgbe_hw *hw)
3228 {
3229 s32 status;
3230 u8 values;
3231
3232 u8 cmd_flags = IXGBE_ACI_SANITIZE_REQ_OPERATE |
3233 IXGBE_ACI_SANITIZE_OPERATE_SUBJECT_CLEAR;
3234
3235 status = ixgbe_sanitize_nvm(hw, cmd_flags, &values);
3236 if (status)
3237 return status;
3238 if ((!(values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
3239 !(values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE)) ||
3240 ((values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_DONE) &&
3241 !(values & IXGBE_ACI_SANITIZE_OPERATE_HOST_CLEAN_SUCCESS)) ||
3242 ((values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_DONE) &&
3243 !(values & IXGBE_ACI_SANITIZE_OPERATE_BMC_CLEAN_SUCCESS)))
3244 return IXGBE_ERR_ACI_ERROR;
3245
3246 return IXGBE_SUCCESS;
3247 }
3248
3249 /**
3250 * ixgbe_sanitize_nvm - Sanitize NVM
3251 * @hw: pointer to the HW struct
3252 * @cmd_flags: flag to the ACI command
3253 * @values: values returned from the command
3254 *
3255 * Sanitize NVM using ACI command (0x070C).
3256 *
3257 * Return: the exit code of the operation.
3258 */
ixgbe_sanitize_nvm(struct ixgbe_hw * hw,u8 cmd_flags,u8 * values)3259 s32 ixgbe_sanitize_nvm(struct ixgbe_hw *hw, u8 cmd_flags, u8 *values)
3260 {
3261 struct ixgbe_aci_desc desc;
3262 struct ixgbe_aci_cmd_nvm_sanitization *cmd;
3263 s32 status;
3264
3265 cmd = &desc.params.nvm_sanitization;
3266 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_sanitization);
3267 cmd->cmd_flags = cmd_flags;
3268
3269 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3270 if (values)
3271 *values = cmd->values;
3272
3273 return status;
3274 }
3275
3276 /**
3277 * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI
3278 * @hw: pointer to the HW structure
3279 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
3280 * @data: word read from the Shadow RAM
3281 *
3282 * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm.
3283 *
3284 * Return: the exit code of the operation.
3285 */
ixgbe_read_sr_word_aci(struct ixgbe_hw * hw,u16 offset,u16 * data)3286 s32 ixgbe_read_sr_word_aci(struct ixgbe_hw *hw, u16 offset, u16 *data)
3287 {
3288 u32 bytes = sizeof(u16);
3289 __le16 data_local;
3290 s32 status;
3291
3292 status = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
3293 (u8 *)&data_local, true);
3294 if (status)
3295 return status;
3296
3297 *data = IXGBE_LE16_TO_CPU(data_local);
3298 return IXGBE_SUCCESS;
3299 }
3300
3301 /**
3302 * ixgbe_read_sr_buf_aci - Reads Shadow RAM buf via ACI
3303 * @hw: pointer to the HW structure
3304 * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
3305 * @words: (in) number of words to read; (out) number of words actually read
3306 * @data: words read from the Shadow RAM
3307 *
3308 * Reads 16 bit words (data buf) from the Shadow RAM. Ownership of the NVM is
3309 * taken before reading the buffer and later released.
3310 *
3311 * Return: the exit code of the operation.
3312 */
ixgbe_read_sr_buf_aci(struct ixgbe_hw * hw,u16 offset,u16 * words,u16 * data)3313 s32 ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
3314 u16 *data)
3315 {
3316 u32 bytes = *words * 2, i;
3317 s32 status;
3318
3319 status = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
3320
3321 *words = bytes / 2;
3322
3323 for (i = 0; i < *words; i++)
3324 data[i] = IXGBE_LE16_TO_CPU(((__le16 *)data)[i]);
3325
3326 return status;
3327 }
3328
3329 /**
3330 * ixgbe_read_flat_nvm - Read portion of NVM by flat offset
3331 * @hw: pointer to the HW struct
3332 * @offset: offset from beginning of NVM
3333 * @length: (in) number of bytes to read; (out) number of bytes actually read
3334 * @data: buffer to return data in (sized to fit the specified length)
3335 * @read_shadow_ram: if true, read from shadow RAM instead of NVM
3336 *
3337 * Reads a portion of the NVM, as a flat memory space. This function correctly
3338 * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size
3339 * from being exceeded in case of Shadow RAM read requests and ensures that no
3340 * single read request exceeds the maximum 4KB read for a single admin command.
3341 *
3342 * Returns a status code on failure. Note that the data pointer may be
3343 * partially updated if some reads succeed before a failure.
3344 *
3345 * Return: the exit code of the operation.
3346 */
ixgbe_read_flat_nvm(struct ixgbe_hw * hw,u32 offset,u32 * length,u8 * data,bool read_shadow_ram)3347 s32 ixgbe_read_flat_nvm(struct ixgbe_hw *hw, u32 offset, u32 *length,
3348 u8 *data, bool read_shadow_ram)
3349 {
3350 u32 inlen = *length;
3351 u32 bytes_read = 0;
3352 bool last_cmd;
3353 s32 status;
3354
3355 *length = 0;
3356
3357 /* Verify the length of the read if this is for the Shadow RAM */
3358 if (read_shadow_ram && ((offset + inlen) >
3359 (hw->eeprom.word_size * 2u))) {
3360 return IXGBE_ERR_PARAM;
3361 }
3362
3363 do {
3364 u32 read_size, sector_offset;
3365
3366 /* ixgbe_aci_read_nvm cannot read more than 4KB at a time.
3367 * Additionally, a read from the Shadow RAM may not cross over
3368 * a sector boundary. Conveniently, the sector size is also 4KB.
3369 */
3370 sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE;
3371 read_size = MIN_T(u32,
3372 IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset,
3373 inlen - bytes_read);
3374
3375 last_cmd = !(bytes_read + read_size < inlen);
3376
3377 /* ixgbe_aci_read_nvm takes the length as a u16. Our read_size
3378 * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE
3379 * maximum size guarantees that it will fit within the 2 bytes.
3380 */
3381 status = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT,
3382 offset, (u16)read_size,
3383 data + bytes_read, last_cmd,
3384 read_shadow_ram);
3385 if (status)
3386 break;
3387
3388 bytes_read += read_size;
3389 offset += read_size;
3390 } while (!last_cmd);
3391
3392 *length = bytes_read;
3393 return status;
3394 }
3395
3396 /**
3397 * ixgbe_check_sr_access_params - verify params for Shadow RAM R/W operations.
3398 * @hw: pointer to the HW structure
3399 * @offset: offset in words from module start
3400 * @words: number of words to access
3401 *
3402 * Check if all the parameters are valid
3403 * before performing any Shadow RAM read/write operations.
3404 *
3405 * Return: the exit code of the operation.
3406 * * - IXGBE_SUCCESS - success.
3407 * * - IXGBE_ERR_PARAM - NVM error: offset beyond SR limit or
3408 * NVM error: tried to access more words then the set limit or
3409 * NVM error: cannot spread over two sectors.
3410 */
ixgbe_check_sr_access_params(struct ixgbe_hw * hw,u32 offset,u16 words)3411 static s32 ixgbe_check_sr_access_params(struct ixgbe_hw *hw, u32 offset,
3412 u16 words)
3413 {
3414 if ((offset + words) > hw->eeprom.word_size) {
3415 return IXGBE_ERR_PARAM;
3416 }
3417
3418 if (words > IXGBE_SR_SECTOR_SIZE_IN_WORDS) {
3419 /* We can access only up to 4KB (one sector),
3420 * in one Admin Command write
3421 */
3422 return IXGBE_ERR_PARAM;
3423 }
3424
3425 if (((offset + (words - 1)) / IXGBE_SR_SECTOR_SIZE_IN_WORDS) !=
3426 (offset / IXGBE_SR_SECTOR_SIZE_IN_WORDS)) {
3427 /* A single access cannot spread over two sectors */
3428 return IXGBE_ERR_PARAM;
3429 }
3430
3431 return IXGBE_SUCCESS;
3432 }
3433
3434 /**
3435 * ixgbe_write_sr_word_aci - Writes Shadow RAM word
3436 * @hw: pointer to the HW structure
3437 * @offset: offset of the Shadow RAM word to write
3438 * @data: word to write to the Shadow RAM
3439 *
3440 * Writes a 16 bit word to the Shadow RAM using the admin command.
3441 * NVM ownership must be acquired before calling this function and released
3442 * by a caller. To commit SR to NVM update checksum function should be called.
3443 *
3444 * Return: the exit code of the operation.
3445 */
ixgbe_write_sr_word_aci(struct ixgbe_hw * hw,u32 offset,const u16 * data)3446 s32 ixgbe_write_sr_word_aci(struct ixgbe_hw *hw, u32 offset, const u16 *data)
3447 {
3448 __le16 data_local = IXGBE_CPU_TO_LE16(*data);
3449 s32 status;
3450
3451 status = ixgbe_check_sr_access_params(hw, offset, 1);
3452 if (!status)
3453 status = ixgbe_aci_update_nvm(hw, 0, BYTES_PER_WORD * offset,
3454 BYTES_PER_WORD, &data_local,
3455 false, 0);
3456
3457 return status;
3458 }
3459
3460 /**
3461 * ixgbe_write_sr_buf_aci - Writes Shadow RAM buf
3462 * @hw: pointer to the HW structure
3463 * @offset: offset of the Shadow RAM buffer to write
3464 * @words: number of words to write
3465 * @data: words to write to the Shadow RAM
3466 *
3467 * Writes a 16 bit word to the Shadow RAM using the admin command.
3468 * NVM ownership must be acquired before calling this function and released
3469 * by a caller. To commit SR to NVM update checksum function should be called.
3470 *
3471 * Return: the exit code of the operation.
3472 */
ixgbe_write_sr_buf_aci(struct ixgbe_hw * hw,u32 offset,u16 words,const u16 * data)3473 s32 ixgbe_write_sr_buf_aci(struct ixgbe_hw *hw, u32 offset, u16 words,
3474 const u16 *data)
3475 {
3476 __le16 *data_local;
3477 s32 status;
3478 void *vmem;
3479 u32 i;
3480
3481 vmem = ixgbe_calloc(hw, words, sizeof(u16));
3482 if (!vmem)
3483 return IXGBE_ERR_OUT_OF_MEM;
3484 data_local = (__le16 *)vmem;
3485
3486 for (i = 0; i < words; i++)
3487 data_local[i] = IXGBE_CPU_TO_LE16(data[i]);
3488
3489 /* Here we will only write one buffer as the size of the modules
3490 * mirrored in the Shadow RAM is always less than 4K.
3491 */
3492 status = ixgbe_check_sr_access_params(hw, offset, words);
3493 if (!status)
3494 status = ixgbe_aci_update_nvm(hw, 0, BYTES_PER_WORD * offset,
3495 BYTES_PER_WORD * words,
3496 data_local, false, 0);
3497
3498 ixgbe_free(hw, vmem);
3499
3500 return status;
3501 }
3502
3503 /**
3504 * ixgbe_aci_alternate_write - write to alternate structure
3505 * @hw: pointer to the hardware structure
3506 * @reg_addr0: address of first dword to be written
3507 * @reg_val0: value to be written under 'reg_addr0'
3508 * @reg_addr1: address of second dword to be written
3509 * @reg_val1: value to be written under 'reg_addr1'
3510 *
3511 * Write one or two dwords to alternate structure using ACI command (0x0900).
3512 * Fields are indicated by 'reg_addr0' and 'reg_addr1' register numbers.
3513 *
3514 * Return: 0 on success and error code on failure.
3515 */
ixgbe_aci_alternate_write(struct ixgbe_hw * hw,u32 reg_addr0,u32 reg_val0,u32 reg_addr1,u32 reg_val1)3516 s32 ixgbe_aci_alternate_write(struct ixgbe_hw *hw, u32 reg_addr0,
3517 u32 reg_val0, u32 reg_addr1, u32 reg_val1)
3518 {
3519 struct ixgbe_aci_cmd_read_write_alt_direct *cmd;
3520 struct ixgbe_aci_desc desc;
3521 s32 status;
3522
3523 cmd = &desc.params.read_write_alt_direct;
3524
3525 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_write_alt_direct);
3526 cmd->dword0_addr = IXGBE_CPU_TO_LE32(reg_addr0);
3527 cmd->dword1_addr = IXGBE_CPU_TO_LE32(reg_addr1);
3528 cmd->dword0_value = IXGBE_CPU_TO_LE32(reg_val0);
3529 cmd->dword1_value = IXGBE_CPU_TO_LE32(reg_val1);
3530
3531 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3532
3533 return status;
3534 }
3535
3536 /**
3537 * ixgbe_aci_alternate_read - read from alternate structure
3538 * @hw: pointer to the hardware structure
3539 * @reg_addr0: address of first dword to be read
3540 * @reg_val0: pointer for data read from 'reg_addr0'
3541 * @reg_addr1: address of second dword to be read
3542 * @reg_val1: pointer for data read from 'reg_addr1'
3543 *
3544 * Read one or two dwords from alternate structure using ACI command (0x0902).
3545 * Fields are indicated by 'reg_addr0' and 'reg_addr1' register numbers.
3546 * If 'reg_val1' pointer is not passed then only register at 'reg_addr0'
3547 * is read.
3548 *
3549 * Return: 0 on success and error code on failure.
3550 */
ixgbe_aci_alternate_read(struct ixgbe_hw * hw,u32 reg_addr0,u32 * reg_val0,u32 reg_addr1,u32 * reg_val1)3551 s32 ixgbe_aci_alternate_read(struct ixgbe_hw *hw, u32 reg_addr0,
3552 u32 *reg_val0, u32 reg_addr1, u32 *reg_val1)
3553 {
3554 struct ixgbe_aci_cmd_read_write_alt_direct *cmd;
3555 struct ixgbe_aci_desc desc;
3556 s32 status;
3557
3558 cmd = &desc.params.read_write_alt_direct;
3559
3560 if (!reg_val0)
3561 return IXGBE_ERR_PARAM;
3562
3563 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_read_alt_direct);
3564 cmd->dword0_addr = IXGBE_CPU_TO_LE32(reg_addr0);
3565 cmd->dword1_addr = IXGBE_CPU_TO_LE32(reg_addr1);
3566
3567 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3568
3569 if (status == IXGBE_SUCCESS) {
3570 *reg_val0 = IXGBE_LE32_TO_CPU(cmd->dword0_value);
3571
3572 if (reg_val1)
3573 *reg_val1 = IXGBE_LE32_TO_CPU(cmd->dword1_value);
3574 }
3575
3576 return status;
3577 }
3578
3579 /**
3580 * ixgbe_aci_alternate_write_done - check if writing to alternate structure
3581 * is done
3582 * @hw: pointer to the HW structure.
3583 * @bios_mode: indicates whether the command is executed by UEFI or legacy BIOS
3584 * @reset_needed: indicates the SW should trigger GLOBAL reset
3585 *
3586 * Indicates to the FW that alternate structures have been changed.
3587 *
3588 * Return: 0 on success and error code on failure.
3589 */
ixgbe_aci_alternate_write_done(struct ixgbe_hw * hw,u8 bios_mode,bool * reset_needed)3590 s32 ixgbe_aci_alternate_write_done(struct ixgbe_hw *hw, u8 bios_mode,
3591 bool *reset_needed)
3592 {
3593 struct ixgbe_aci_cmd_done_alt_write *cmd;
3594 struct ixgbe_aci_desc desc;
3595 s32 status;
3596
3597 cmd = &desc.params.done_alt_write;
3598
3599 if (!reset_needed)
3600 return IXGBE_ERR_PARAM;
3601
3602 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_done_alt_write);
3603 cmd->flags = bios_mode;
3604
3605 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3606 if (!status)
3607 *reset_needed = (IXGBE_LE16_TO_CPU(cmd->flags) &
3608 IXGBE_ACI_RESP_RESET_NEEDED) != 0;
3609
3610 return status;
3611 }
3612
3613 /**
3614 * ixgbe_aci_alternate_clear - clear alternate structure
3615 * @hw: pointer to the HW structure.
3616 *
3617 * Clear the alternate structures of the port from which the function
3618 * is called.
3619 *
3620 * Return: 0 on success and error code on failure.
3621 */
ixgbe_aci_alternate_clear(struct ixgbe_hw * hw)3622 s32 ixgbe_aci_alternate_clear(struct ixgbe_hw *hw)
3623 {
3624 struct ixgbe_aci_desc desc;
3625 s32 status;
3626
3627 ixgbe_fill_dflt_direct_cmd_desc(&desc,
3628 ixgbe_aci_opc_clear_port_alt_write);
3629
3630 status = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3631
3632 return status;
3633 }
3634
3635 /**
3636 * ixgbe_aci_get_internal_data - get internal FW/HW data
3637 * @hw: pointer to the hardware structure
3638 * @cluster_id: specific cluster to dump
3639 * @table_id: table ID within cluster
3640 * @start: index of line in the block to read
3641 * @buf: dump buffer
3642 * @buf_size: dump buffer size
3643 * @ret_buf_size: return buffer size (returned by FW)
3644 * @ret_next_cluster: next cluster to read (returned by FW)
3645 * @ret_next_table: next block to read (returned by FW)
3646 * @ret_next_index: next index to read (returned by FW)
3647 *
3648 * Get internal FW/HW data using ACI command (0xFF08) for debug purposes.
3649 *
3650 * Return: the exit code of the operation.
3651 */
ixgbe_aci_get_internal_data(struct ixgbe_hw * hw,u16 cluster_id,u16 table_id,u32 start,void * buf,u16 buf_size,u16 * ret_buf_size,u16 * ret_next_cluster,u16 * ret_next_table,u32 * ret_next_index)3652 s32 ixgbe_aci_get_internal_data(struct ixgbe_hw *hw, u16 cluster_id,
3653 u16 table_id, u32 start, void *buf,
3654 u16 buf_size, u16 *ret_buf_size,
3655 u16 *ret_next_cluster, u16 *ret_next_table,
3656 u32 *ret_next_index)
3657 {
3658 struct ixgbe_aci_cmd_debug_dump_internals *cmd;
3659 struct ixgbe_aci_desc desc;
3660 s32 status;
3661
3662 cmd = &desc.params.debug_dump;
3663
3664 if (buf_size == 0 || !buf)
3665 return IXGBE_ERR_PARAM;
3666
3667 ixgbe_fill_dflt_direct_cmd_desc(&desc,
3668 ixgbe_aci_opc_debug_dump_internals);
3669
3670 cmd->cluster_id = IXGBE_CPU_TO_LE16(cluster_id);
3671 cmd->table_id = IXGBE_CPU_TO_LE16(table_id);
3672 cmd->idx = IXGBE_CPU_TO_LE32(start);
3673
3674 status = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
3675
3676 if (!status) {
3677 if (ret_buf_size)
3678 *ret_buf_size = IXGBE_LE16_TO_CPU(desc.datalen);
3679 if (ret_next_cluster)
3680 *ret_next_cluster = IXGBE_LE16_TO_CPU(cmd->cluster_id);
3681 if (ret_next_table)
3682 *ret_next_table = IXGBE_LE16_TO_CPU(cmd->table_id);
3683 if (ret_next_index)
3684 *ret_next_index = IXGBE_LE32_TO_CPU(cmd->idx);
3685 }
3686
3687 return status;
3688 }
3689
3690 /**
3691 * ixgbe_validate_nvm_rw_reg - Check that an NVM access request is valid
3692 * @cmd: NVM access command structure
3693 *
3694 * Validates that an NVM access structure is request to read or write a valid
3695 * register offset. First validates that the module and flags are correct, and
3696 * then ensures that the register offset is one of the accepted registers.
3697 *
3698 * Return: 0 if the register access is valid, out of range error code otherwise.
3699 */
3700 static s32
ixgbe_validate_nvm_rw_reg(struct ixgbe_nvm_access_cmd * cmd)3701 ixgbe_validate_nvm_rw_reg(struct ixgbe_nvm_access_cmd *cmd)
3702 {
3703 u16 i;
3704
3705 switch (cmd->offset) {
3706 case GL_HICR:
3707 case GL_HICR_EN: /* Note, this register is read only */
3708 case GL_FWSTS:
3709 case GL_MNG_FWSM:
3710 case GLNVM_GENS:
3711 case GLNVM_FLA:
3712 case GL_FWRESETCNT:
3713 return 0;
3714 default:
3715 break;
3716 }
3717
3718 for (i = 0; i <= GL_HIDA_MAX_INDEX; i++)
3719 if (cmd->offset == (u32)GL_HIDA(i))
3720 return 0;
3721
3722 for (i = 0; i <= GL_HIBA_MAX_INDEX; i++)
3723 if (cmd->offset == (u32)GL_HIBA(i))
3724 return 0;
3725
3726 /* All other register offsets are not valid */
3727 return IXGBE_ERR_OUT_OF_RANGE;
3728 }
3729
3730 /**
3731 * ixgbe_nvm_access_read - Handle an NVM read request
3732 * @hw: pointer to the HW struct
3733 * @cmd: NVM access command to process
3734 * @data: storage for the register value read
3735 *
3736 * Process an NVM access request to read a register.
3737 *
3738 * Return: 0 if the register read is valid and successful,
3739 * out of range error code otherwise.
3740 */
ixgbe_nvm_access_read(struct ixgbe_hw * hw,struct ixgbe_nvm_access_cmd * cmd,struct ixgbe_nvm_access_data * data)3741 static s32 ixgbe_nvm_access_read(struct ixgbe_hw *hw,
3742 struct ixgbe_nvm_access_cmd *cmd,
3743 struct ixgbe_nvm_access_data *data)
3744 {
3745 s32 status;
3746
3747 /* Always initialize the output data, even on failure */
3748 memset(&data->regval, 0, cmd->data_size);
3749
3750 /* Make sure this is a valid read/write access request */
3751 status = ixgbe_validate_nvm_rw_reg(cmd);
3752 if (status)
3753 return status;
3754
3755 DEBUGOUT1("NVM access: reading register %08x\n", cmd->offset);
3756
3757 /* Read the register and store the contents in the data field */
3758 data->regval = IXGBE_READ_REG(hw, cmd->offset);
3759
3760 return 0;
3761 }
3762
3763 /**
3764 * ixgbe_nvm_access_write - Handle an NVM write request
3765 * @hw: pointer to the HW struct
3766 * @cmd: NVM access command to process
3767 * @data: NVM access data to write
3768 *
3769 * Process an NVM access request to write a register.
3770 *
3771 * Return: 0 if the register write is valid and successful,
3772 * out of range error code otherwise.
3773 */
ixgbe_nvm_access_write(struct ixgbe_hw * hw,struct ixgbe_nvm_access_cmd * cmd,struct ixgbe_nvm_access_data * data)3774 static s32 ixgbe_nvm_access_write(struct ixgbe_hw *hw,
3775 struct ixgbe_nvm_access_cmd *cmd,
3776 struct ixgbe_nvm_access_data *data)
3777 {
3778 s32 status;
3779
3780 /* Make sure this is a valid read/write access request */
3781 status = ixgbe_validate_nvm_rw_reg(cmd);
3782 if (status)
3783 return status;
3784
3785 /* Reject requests to write to read-only registers */
3786 switch (cmd->offset) {
3787 case GL_HICR_EN:
3788 return IXGBE_ERR_OUT_OF_RANGE;
3789 default:
3790 break;
3791 }
3792
3793 DEBUGOUT2("NVM access: writing register %08x with value %08x\n",
3794 cmd->offset, data->regval);
3795
3796 /* Write the data field to the specified register */
3797 IXGBE_WRITE_REG(hw, cmd->offset, data->regval);
3798
3799 return 0;
3800 }
3801
3802 /**
3803 * ixgbe_handle_nvm_access - Handle an NVM access request
3804 * @hw: pointer to the HW struct
3805 * @cmd: NVM access command info
3806 * @data: pointer to read or return data
3807 *
3808 * Process an NVM access request. Read the command structure information and
3809 * determine if it is valid. If not, report an error indicating the command
3810 * was invalid.
3811 *
3812 * For valid commands, perform the necessary function, copying the data into
3813 * the provided data buffer.
3814 *
3815 * Return: 0 if the nvm access request is valid and successful,
3816 * error code otherwise.
3817 */
ixgbe_handle_nvm_access(struct ixgbe_hw * hw,struct ixgbe_nvm_access_cmd * cmd,struct ixgbe_nvm_access_data * data)3818 s32 ixgbe_handle_nvm_access(struct ixgbe_hw *hw,
3819 struct ixgbe_nvm_access_cmd *cmd,
3820 struct ixgbe_nvm_access_data *data)
3821 {
3822 switch (cmd->command) {
3823 case IXGBE_NVM_CMD_READ:
3824 return ixgbe_nvm_access_read(hw, cmd, data);
3825 case IXGBE_NVM_CMD_WRITE:
3826 return ixgbe_nvm_access_write(hw, cmd, data);
3827 default:
3828 return IXGBE_ERR_PARAM;
3829 }
3830 }
3831
3832 /**
3833 * ixgbe_aci_set_health_status_config - Configure FW health events
3834 * @hw: pointer to the HW struct
3835 * @event_source: type of diagnostic events to enable
3836 *
3837 * Configure the health status event types that the firmware will send to this
3838 * PF using ACI command (0xFF20). The supported event types are: PF-specific,
3839 * all PFs, and global.
3840 *
3841 * Return: the exit code of the operation.
3842 */
ixgbe_aci_set_health_status_config(struct ixgbe_hw * hw,u8 event_source)3843 s32 ixgbe_aci_set_health_status_config(struct ixgbe_hw *hw, u8 event_source)
3844 {
3845 struct ixgbe_aci_cmd_set_health_status_config *cmd;
3846 struct ixgbe_aci_desc desc;
3847
3848 cmd = &desc.params.set_health_status_config;
3849
3850 ixgbe_fill_dflt_direct_cmd_desc(&desc,
3851 ixgbe_aci_opc_set_health_status_config);
3852
3853 cmd->event_source = event_source;
3854
3855 return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3856 }
3857
3858 /**
3859 * ixgbe_init_ops_E610 - Inits func ptrs and MAC type
3860 * @hw: pointer to hardware structure
3861 *
3862 * Initialize the function pointers and assign the MAC type for E610.
3863 * Does not touch the hardware.
3864 *
3865 * Return: the exit code of the operation.
3866 */
ixgbe_init_ops_E610(struct ixgbe_hw * hw)3867 s32 ixgbe_init_ops_E610(struct ixgbe_hw *hw)
3868 {
3869 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
3870 struct ixgbe_mac_info *mac = &hw->mac;
3871 struct ixgbe_phy_info *phy = &hw->phy;
3872 s32 ret_val;
3873
3874 ret_val = ixgbe_init_ops_X550(hw);
3875
3876 /* MAC */
3877 mac->ops.reset_hw = ixgbe_reset_hw_E610;
3878 mac->ops.start_hw = ixgbe_start_hw_E610;
3879 mac->ops.get_media_type = ixgbe_get_media_type_E610;
3880 mac->ops.get_supported_physical_layer =
3881 ixgbe_get_supported_physical_layer_E610;
3882 mac->ops.get_san_mac_addr = NULL;
3883 mac->ops.set_san_mac_addr = NULL;
3884 mac->ops.get_wwn_prefix = NULL;
3885 mac->ops.setup_link = ixgbe_setup_link_E610;
3886 mac->ops.check_link = ixgbe_check_link_E610;
3887 mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_E610;
3888 mac->ops.setup_fc = ixgbe_setup_fc_E610;
3889 mac->ops.fc_autoneg = ixgbe_fc_autoneg_E610;
3890 mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_E610;
3891 mac->ops.disable_rx = ixgbe_disable_rx_E610;
3892 mac->ops.setup_eee = ixgbe_setup_eee_E610;
3893 mac->ops.fw_recovery_mode = ixgbe_fw_recovery_mode_E610;
3894 mac->ops.fw_rollback_mode = ixgbe_fw_rollback_mode_E610;
3895 mac->ops.get_fw_tsam_mode = ixgbe_get_fw_tsam_mode_E610;
3896 mac->ops.get_fw_version = ixgbe_aci_get_fw_ver;
3897 mac->ops.get_nvm_version = ixgbe_get_active_nvm_ver;
3898 mac->ops.get_thermal_sensor_data = NULL;
3899 mac->ops.init_thermal_sensor_thresh = NULL;
3900
3901 /* PHY */
3902 phy->ops.init = ixgbe_init_phy_ops_E610;
3903 phy->ops.identify = ixgbe_identify_phy_E610;
3904
3905 if (hw->device_id == IXGBE_DEV_ID_E610_2_5G_T)
3906 phy->eee_speeds_supported = IXGBE_LINK_SPEED_2_5GB_FULL;
3907 else
3908 phy->eee_speeds_supported = IXGBE_LINK_SPEED_2_5GB_FULL |
3909 IXGBE_LINK_SPEED_5GB_FULL |
3910 IXGBE_LINK_SPEED_10GB_FULL;
3911
3912 phy->eee_speeds_advertised = phy->eee_speeds_supported;
3913
3914 /* Additional ops overrides for e610 to go here */
3915 eeprom->ops.init_params = ixgbe_init_eeprom_params_E610;
3916 eeprom->ops.read = ixgbe_read_ee_aci_E610;
3917 eeprom->ops.read_buffer = ixgbe_read_ee_aci_buffer_E610;
3918 eeprom->ops.write = ixgbe_write_ee_aci_E610;
3919 eeprom->ops.write_buffer = ixgbe_write_ee_aci_buffer_E610;
3920 eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_E610;
3921 eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_E610;
3922 eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_E610;
3923 eeprom->ops.read_pba_string = ixgbe_read_pba_string_E610;
3924
3925 /* Initialize bus function number */
3926 hw->mac.ops.set_lan_id(hw);
3927
3928 return ret_val;
3929 }
3930
3931 /**
3932 * ixgbe_reset_hw_E610 - Perform hardware reset
3933 * @hw: pointer to hardware structure
3934 *
3935 * Resets the hardware by resetting the transmit and receive units, masks
3936 * and clears all interrupts, and perform a reset.
3937 *
3938 * Return: the exit code of the operation.
3939 */
ixgbe_reset_hw_E610(struct ixgbe_hw * hw)3940 s32 ixgbe_reset_hw_E610(struct ixgbe_hw *hw)
3941 {
3942 u32 swfw_mask = hw->phy.phy_semaphore_mask;
3943 u32 ctrl, i;
3944 s32 status;
3945
3946 DEBUGFUNC("ixgbe_reset_hw_E610");
3947
3948 /* Call adapter stop to disable tx/rx and clear interrupts */
3949 status = hw->mac.ops.stop_adapter(hw);
3950 if (status != IXGBE_SUCCESS)
3951 goto reset_hw_out;
3952
3953 /* flush pending Tx transactions */
3954 ixgbe_clear_tx_pending(hw);
3955
3956 status = hw->phy.ops.init(hw);
3957 if (status != IXGBE_SUCCESS)
3958 DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n",
3959 status);
3960 mac_reset_top:
3961 status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
3962 if (status != IXGBE_SUCCESS) {
3963 ERROR_REPORT2(IXGBE_ERROR_CAUTION,
3964 "semaphore failed with %d", status);
3965 return IXGBE_ERR_SWFW_SYNC;
3966 }
3967 ctrl = IXGBE_CTRL_RST;
3968 ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
3969 IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
3970 IXGBE_WRITE_FLUSH(hw);
3971 hw->mac.ops.release_swfw_sync(hw, swfw_mask);
3972
3973 /* Poll for reset bit to self-clear indicating reset is complete */
3974 for (i = 0; i < 10; i++) {
3975 usec_delay(1);
3976 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
3977 if (!(ctrl & IXGBE_CTRL_RST_MASK))
3978 break;
3979 }
3980
3981 if (ctrl & IXGBE_CTRL_RST_MASK) {
3982 status = IXGBE_ERR_RESET_FAILED;
3983 ERROR_REPORT1(IXGBE_ERROR_POLLING,
3984 "Reset polling failed to complete.\n");
3985 }
3986 msec_delay(100);
3987
3988 /*
3989 * Double resets are required for recovery from certain error
3990 * conditions. Between resets, it is necessary to stall to allow time
3991 * for any pending HW events to complete.
3992 */
3993 if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
3994 hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3995 goto mac_reset_top;
3996 }
3997
3998 /* Set the Rx packet buffer size. */
3999 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
4000
4001 /* Store the permanent mac address */
4002 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
4003
4004 /*
4005 * Store MAC address from RAR0, clear receive address registers, and
4006 * clear the multicast table. Also reset num_rar_entries to 128,
4007 * since we modify this value when programming the SAN MAC address.
4008 */
4009 hw->mac.num_rar_entries = 128;
4010 hw->mac.ops.init_rx_addrs(hw);
4011
4012 reset_hw_out:
4013 return status;
4014 }
4015
4016 /**
4017 * ixgbe_start_hw_E610 - Prepare hardware for Tx/Rx
4018 * @hw: pointer to hardware structure
4019 *
4020 * Gets firmware version and if API version matches it
4021 * starts the hardware using the generic start_hw function
4022 * and the generation start_hw function.
4023 * Then performs revision-specific operations, if any.
4024 **/
ixgbe_start_hw_E610(struct ixgbe_hw * hw)4025 s32 ixgbe_start_hw_E610(struct ixgbe_hw *hw)
4026 {
4027 s32 ret_val = IXGBE_SUCCESS;
4028
4029 ret_val = hw->mac.ops.get_fw_version(hw);
4030 if (ret_val)
4031 goto out;
4032
4033 ret_val = ixgbe_start_hw_generic(hw);
4034 if (ret_val != IXGBE_SUCCESS)
4035 goto out;
4036
4037 ixgbe_start_hw_gen2(hw);
4038
4039 out:
4040 return ret_val;
4041 }
4042
4043 /**
4044 * ixgbe_get_media_type_E610 - Gets media type
4045 * @hw: pointer to the HW struct
4046 *
4047 * In order to get the media type, the function gets PHY
4048 * capabilities and later on use them to identify the PHY type
4049 * checking phy_type_high and phy_type_low.
4050 *
4051 * Return: the type of media in form of ixgbe_media_type enum
4052 * or ixgbe_media_type_unknown in case of an error.
4053 */
ixgbe_get_media_type_E610(struct ixgbe_hw * hw)4054 enum ixgbe_media_type ixgbe_get_media_type_E610(struct ixgbe_hw *hw)
4055 {
4056 struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
4057 u64 phy_mask = 0;
4058 s32 rc;
4059 u8 i;
4060
4061 rc = ixgbe_update_link_info(hw);
4062 if (rc) {
4063 return ixgbe_media_type_unknown;
4064 }
4065
4066 /* If there is no link but PHY (dongle) is available SW should use
4067 * Get PHY Caps admin command instead of Get Link Status, find most
4068 * significant bit that is set in PHY types reported by the command
4069 * and use it to discover media type.
4070 */
4071 if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) &&
4072 (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) {
4073 /* Get PHY Capabilities */
4074 rc = ixgbe_aci_get_phy_caps(hw, false,
4075 IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
4076 &pcaps);
4077 if (rc) {
4078 return ixgbe_media_type_unknown;
4079 }
4080
4081 /* Check if there is some bit set in phy_type_high */
4082 for (i = 64; i > 0; i--) {
4083 phy_mask = (u64)((u64)1 << (i - 1));
4084 if ((pcaps.phy_type_high & phy_mask) != 0) {
4085 /* If any bit is set treat it as PHY type */
4086 hw->link.link_info.phy_type_high = phy_mask;
4087 hw->link.link_info.phy_type_low = 0;
4088 break;
4089 }
4090 phy_mask = 0;
4091 }
4092
4093 /* If nothing found in phy_type_high search in phy_type_low */
4094 if (phy_mask == 0) {
4095 for (i = 64; i > 0; i--) {
4096 phy_mask = (u64)((u64)1 << (i - 1));
4097 if ((pcaps.phy_type_low & phy_mask) != 0) {
4098 /* If any bit is set treat it as PHY type */
4099 hw->link.link_info.phy_type_high = 0;
4100 hw->link.link_info.phy_type_low = phy_mask;
4101 break;
4102 }
4103 }
4104 }
4105
4106 }
4107
4108 /* Based on link status or search above try to discover media type */
4109 hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw);
4110
4111 return hw->phy.media_type;
4112 }
4113
4114 /**
4115 * ixgbe_get_supported_physical_layer_E610 - Returns physical layer type
4116 * @hw: pointer to hardware structure
4117 *
4118 * Determines physical layer capabilities of the current configuration.
4119 *
4120 * Return: the exit code of the operation.
4121 **/
ixgbe_get_supported_physical_layer_E610(struct ixgbe_hw * hw)4122 u64 ixgbe_get_supported_physical_layer_E610(struct ixgbe_hw *hw)
4123 {
4124 u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
4125 struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
4126 u64 phy_type;
4127 s32 rc;
4128
4129 rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
4130 &pcaps);
4131 if (rc)
4132 return IXGBE_PHYSICAL_LAYER_UNKNOWN;
4133
4134 phy_type = IXGBE_LE64_TO_CPU(pcaps.phy_type_low);
4135 if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_T)
4136 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
4137 if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_T)
4138 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
4139 if(phy_type & IXGBE_PHY_TYPE_LOW_100BASE_TX)
4140 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
4141 if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_LR)
4142 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_LR;
4143 if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_SR)
4144 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_SR;
4145 if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_KX)
4146 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
4147 if(phy_type & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1)
4148 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
4149 if(phy_type & IXGBE_PHY_TYPE_LOW_1000BASE_SX)
4150 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_SX;
4151 if(phy_type & IXGBE_PHY_TYPE_LOW_2500BASE_KX)
4152 physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_KX;
4153 if(phy_type & IXGBE_PHY_TYPE_LOW_2500BASE_T)
4154 physical_layer |= IXGBE_PHYSICAL_LAYER_2500BASE_T;
4155 if(phy_type & IXGBE_PHY_TYPE_LOW_5GBASE_T)
4156 physical_layer |= IXGBE_PHYSICAL_LAYER_5000BASE_T;
4157
4158 phy_type = IXGBE_LE64_TO_CPU(pcaps.phy_type_high);
4159 if(phy_type & IXGBE_PHY_TYPE_HIGH_10BASE_T)
4160 physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T;
4161
4162 return physical_layer;
4163 }
4164
4165 /**
4166 * ixgbe_setup_link_E610 - Set up link
4167 * @hw: pointer to hardware structure
4168 * @speed: new link speed
4169 * @autoneg_wait: true when waiting for completion is needed
4170 *
4171 * Set up the link with the specified speed.
4172 *
4173 * Return: the exit code of the operation.
4174 */
ixgbe_setup_link_E610(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg_wait)4175 s32 ixgbe_setup_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
4176 bool autoneg_wait)
4177 {
4178 /* Simply request FW to perform proper PHY setup */
4179 return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
4180 }
4181
4182 /**
4183 * ixgbe_check_link_E610 - Determine link and speed status
4184 * @hw: pointer to hardware structure
4185 * @speed: pointer to link speed
4186 * @link_up: true when link is up
4187 * @link_up_wait_to_complete: bool used to wait for link up or not
4188 *
4189 * Determine if the link is up and the current link speed
4190 * using ACI command (0x0607).
4191 *
4192 * Return: the exit code of the operation.
4193 */
ixgbe_check_link_E610(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool link_up_wait_to_complete)4194 s32 ixgbe_check_link_E610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
4195 bool *link_up, bool link_up_wait_to_complete)
4196 {
4197 s32 rc;
4198 u32 i;
4199
4200 if (!speed || !link_up)
4201 return IXGBE_ERR_PARAM;
4202
4203 /* Set get_link_info flag to ensure that fresh
4204 * link information will be obtained from FW
4205 * by sending Get Link Status admin command. */
4206 hw->link.get_link_info = true;
4207
4208 /* Update link information in adapter context. */
4209 rc = ixgbe_get_link_status(hw, link_up);
4210 if (rc)
4211 return rc;
4212
4213 /* Wait for link up if it was requested. */
4214 if (link_up_wait_to_complete && *link_up == false) {
4215 for (i = 0; i < hw->mac.max_link_up_time; i++) {
4216 msec_delay(100);
4217 hw->link.get_link_info = true;
4218 rc = ixgbe_get_link_status(hw, link_up);
4219 if (rc)
4220 return rc;
4221 if (*link_up)
4222 break;
4223 }
4224 }
4225
4226 /* Use link information in adapter context updated by the call
4227 * to ixgbe_get_link_status() to determine current link speed.
4228 * Link speed information is valid only when link up was
4229 * reported by FW. */
4230 if (*link_up) {
4231 switch (hw->link.link_info.link_speed) {
4232 case IXGBE_ACI_LINK_SPEED_10MB:
4233 *speed = IXGBE_LINK_SPEED_10_FULL;
4234 break;
4235 case IXGBE_ACI_LINK_SPEED_100MB:
4236 *speed = IXGBE_LINK_SPEED_100_FULL;
4237 break;
4238 case IXGBE_ACI_LINK_SPEED_1000MB:
4239 *speed = IXGBE_LINK_SPEED_1GB_FULL;
4240 break;
4241 case IXGBE_ACI_LINK_SPEED_2500MB:
4242 *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
4243 break;
4244 case IXGBE_ACI_LINK_SPEED_5GB:
4245 *speed = IXGBE_LINK_SPEED_5GB_FULL;
4246 break;
4247 case IXGBE_ACI_LINK_SPEED_10GB:
4248 *speed = IXGBE_LINK_SPEED_10GB_FULL;
4249 break;
4250 default:
4251 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4252 break;
4253 }
4254 } else {
4255 *speed = IXGBE_LINK_SPEED_UNKNOWN;
4256 }
4257
4258 return IXGBE_SUCCESS;
4259 }
4260
4261 /**
4262 * ixgbe_get_link_capabilities_E610 - Determine link capabilities
4263 * @hw: pointer to hardware structure
4264 * @speed: pointer to link speed
4265 * @autoneg: true when autoneg or autotry is enabled
4266 *
4267 * Determine speed and AN parameters of a link.
4268 *
4269 * Return: the exit code of the operation.
4270 */
ixgbe_get_link_capabilities_E610(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * autoneg)4271 s32 ixgbe_get_link_capabilities_E610(struct ixgbe_hw *hw,
4272 ixgbe_link_speed *speed,
4273 bool *autoneg)
4274 {
4275 if (!speed || !autoneg)
4276 return IXGBE_ERR_PARAM;
4277
4278 *autoneg = true;
4279 *speed = hw->phy.speeds_supported;
4280
4281 return IXGBE_SUCCESS;
4282 }
4283
4284 /**
4285 * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode
4286 * @hw: pointer to hardware structure
4287 * @cfg: PHY configuration data to set FC mode
4288 * @req_mode: FC mode to configure
4289 *
4290 * Configures PHY Flow Control according to the provided configuration.
4291 *
4292 * Return: the exit code of the operation.
4293 */
ixgbe_cfg_phy_fc(struct ixgbe_hw * hw,struct ixgbe_aci_cmd_set_phy_cfg_data * cfg,enum ixgbe_fc_mode req_mode)4294 s32 ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
4295 struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
4296 enum ixgbe_fc_mode req_mode)
4297 {
4298 struct ixgbe_aci_cmd_get_phy_caps_data* pcaps = NULL;
4299 s32 status = IXGBE_SUCCESS;
4300 u8 pause_mask = 0x0;
4301
4302 if (!cfg)
4303 return IXGBE_ERR_PARAM;
4304
4305 switch (req_mode) {
4306 case ixgbe_fc_auto:
4307 {
4308 pcaps = (struct ixgbe_aci_cmd_get_phy_caps_data *)
4309 ixgbe_malloc(hw, sizeof(*pcaps));
4310 if (!pcaps) {
4311 status = IXGBE_ERR_OUT_OF_MEM;
4312 goto out;
4313 }
4314
4315 /* Query the value of FC that both the NIC and the attached
4316 * media can do. */
4317 status = ixgbe_aci_get_phy_caps(hw, false,
4318 IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, pcaps);
4319 if (status)
4320 goto out;
4321
4322 pause_mask |= pcaps->caps & IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
4323 pause_mask |= pcaps->caps & IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
4324
4325 break;
4326 }
4327 case ixgbe_fc_full:
4328 pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
4329 pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
4330 break;
4331 case ixgbe_fc_rx_pause:
4332 pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
4333 break;
4334 case ixgbe_fc_tx_pause:
4335 pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
4336 break;
4337 default:
4338 break;
4339 }
4340
4341 /* clear the old pause settings */
4342 cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE |
4343 IXGBE_ACI_PHY_EN_RX_LINK_PAUSE);
4344
4345 /* set the new capabilities */
4346 cfg->caps |= pause_mask;
4347
4348 out:
4349 if (pcaps)
4350 ixgbe_free(hw, pcaps);
4351 return status;
4352 }
4353
4354 /**
4355 * ixgbe_setup_fc_E610 - Set up flow control
4356 * @hw: pointer to hardware structure
4357 *
4358 * Set up flow control. This has to be done during init time.
4359 *
4360 * Return: the exit code of the operation.
4361 */
ixgbe_setup_fc_E610(struct ixgbe_hw * hw)4362 s32 ixgbe_setup_fc_E610(struct ixgbe_hw *hw)
4363 {
4364 struct ixgbe_aci_cmd_get_phy_caps_data pcaps = { 0 };
4365 struct ixgbe_aci_cmd_set_phy_cfg_data cfg = { 0 };
4366 s32 status;
4367
4368 /* Get the current PHY config */
4369 status = ixgbe_aci_get_phy_caps(hw, false,
4370 IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps);
4371 if (status)
4372 return status;
4373
4374 ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg);
4375
4376 /* Configure the set PHY data */
4377 status = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode);
4378 if (status)
4379 return status;
4380
4381 /* If the capabilities have changed, then set the new config */
4382 if (cfg.caps != pcaps.caps) {
4383 cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
4384
4385 status = ixgbe_aci_set_phy_cfg(hw, &cfg);
4386 if (status)
4387 return status;
4388 }
4389
4390 return status;
4391 }
4392
4393 /**
4394 * ixgbe_fc_autoneg_E610 - Configure flow control
4395 * @hw: pointer to hardware structure
4396 *
4397 * Configure Flow Control.
4398 */
ixgbe_fc_autoneg_E610(struct ixgbe_hw * hw)4399 void ixgbe_fc_autoneg_E610(struct ixgbe_hw *hw)
4400 {
4401 s32 status;
4402
4403 /* Get current link status.
4404 * Current FC mode will be stored in the hw context. */
4405 status = ixgbe_aci_get_link_info(hw, false, NULL);
4406 if (status) {
4407 goto out;
4408 }
4409
4410 /* Check if the link is up */
4411 if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP)) {
4412 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4413 goto out;
4414 }
4415
4416 /* Check if auto-negotiation has completed */
4417 if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED)) {
4418 status = IXGBE_ERR_FC_NOT_NEGOTIATED;
4419 goto out;
4420 }
4421
4422 out:
4423 if (status == IXGBE_SUCCESS) {
4424 hw->fc.fc_was_autonegged = true;
4425 } else {
4426 hw->fc.fc_was_autonegged = false;
4427 hw->fc.current_mode = hw->fc.requested_mode;
4428 }
4429 }
4430
4431 /**
4432 * ixgbe_set_fw_drv_ver_E610 - Send driver version to FW
4433 * @hw: pointer to the HW structure
4434 * @maj: driver version major number
4435 * @minor: driver version minor number
4436 * @build: driver version build number
4437 * @sub: driver version sub build number
4438 * @len: length of driver_ver string
4439 * @driver_ver: driver string
4440 *
4441 * Send driver version number to Firmware using ACI command (0x0002).
4442 *
4443 * Return: the exit code of the operation.
4444 * IXGBE_SUCCESS - OK
4445 * IXGBE_ERR_PARAM - incorrect parameters were given
4446 * IXGBE_ERR_ACI_ERROR - encountered an error during sending the command
4447 * IXGBE_ERR_ACI_TIMEOUT - a timeout occurred
4448 * IXGBE_ERR_OUT_OF_MEM - ran out of memory
4449 */
ixgbe_set_fw_drv_ver_E610(struct ixgbe_hw * hw,u8 maj,u8 minor,u8 build,u8 sub,u16 len,const char * driver_ver)4450 s32 ixgbe_set_fw_drv_ver_E610(struct ixgbe_hw *hw, u8 maj, u8 minor, u8 build,
4451 u8 sub, u16 len, const char *driver_ver)
4452 {
4453 size_t limited_len = min(len, (u16)IXGBE_DRV_VER_STR_LEN_E610);
4454 struct ixgbe_driver_ver dv;
4455
4456 DEBUGFUNC("ixgbe_set_fw_drv_ver_E610");
4457
4458 if (!len || !driver_ver)
4459 return IXGBE_ERR_PARAM;
4460
4461 dv.major_ver = maj;
4462 dv.minor_ver = minor;
4463 dv.build_ver = build;
4464 dv.subbuild_ver = sub;
4465
4466 memset(dv.driver_string, 0, IXGBE_DRV_VER_STR_LEN_E610);
4467 memcpy(dv.driver_string, driver_ver, limited_len);
4468
4469 return ixgbe_aci_send_driver_ver(hw, &dv);
4470 }
4471
4472 /**
4473 * ixgbe_disable_rx_E610 - Disable RX unit
4474 * @hw: pointer to hardware structure
4475 *
4476 * Disable RX DMA unit on E610 with use of ACI command (0x000C).
4477 *
4478 * Return: the exit code of the operation.
4479 */
ixgbe_disable_rx_E610(struct ixgbe_hw * hw)4480 void ixgbe_disable_rx_E610(struct ixgbe_hw *hw)
4481 {
4482 u32 rxctrl;
4483
4484 DEBUGFUNC("ixgbe_disable_rx_E610");
4485
4486 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4487 if (rxctrl & IXGBE_RXCTRL_RXEN) {
4488 u32 pfdtxgswc;
4489 s32 status;
4490
4491 pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
4492 if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
4493 pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
4494 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
4495 hw->mac.set_lben = true;
4496 } else {
4497 hw->mac.set_lben = false;
4498 }
4499
4500 status = ixgbe_aci_disable_rxen(hw);
4501
4502 /* If we fail - disable RX using register write */
4503 if (status) {
4504 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
4505 if (rxctrl & IXGBE_RXCTRL_RXEN) {
4506 rxctrl &= ~IXGBE_RXCTRL_RXEN;
4507 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
4508 }
4509 }
4510 }
4511 }
4512
4513 /**
4514 * ixgbe_setup_eee_E610 - Enable/disable EEE support
4515 * @hw: pointer to the HW structure
4516 * @enable_eee: boolean flag to enable EEE
4517 *
4518 * Enables/disable EEE based on enable_eee flag.
4519 *
4520 * Return: the exit code of the operation.
4521 */
ixgbe_setup_eee_E610(struct ixgbe_hw * hw,bool enable_eee)4522 s32 ixgbe_setup_eee_E610(struct ixgbe_hw *hw, bool enable_eee)
4523 {
4524 struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
4525 struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
4526 u16 eee_cap = 0;
4527 s32 status;
4528
4529 status = ixgbe_aci_get_phy_caps(hw, false,
4530 IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
4531 if (status != IXGBE_SUCCESS)
4532 return status;
4533
4534 ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
4535
4536 phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
4537 phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
4538
4539 /* setup only speeds which are defined for [0x0601/0x0600].eee_cap */
4540 if (enable_eee) {
4541 if (hw->phy.eee_speeds_advertised & IXGBE_LINK_SPEED_100_FULL)
4542 eee_cap |= IXGBE_ACI_PHY_EEE_EN_100BASE_TX;
4543 if (hw->phy.eee_speeds_advertised & IXGBE_LINK_SPEED_1GB_FULL)
4544 eee_cap |= IXGBE_ACI_PHY_EEE_EN_1000BASE_T;
4545 if (hw->phy.eee_speeds_advertised & IXGBE_LINK_SPEED_2_5GB_FULL)
4546 eee_cap |= IXGBE_ACI_PHY_EEE_EN_2_5GBASE_T;
4547 if (hw->phy.eee_speeds_advertised & IXGBE_LINK_SPEED_5GB_FULL)
4548 eee_cap |= IXGBE_ACI_PHY_EEE_EN_5GBASE_T;
4549 if (hw->phy.eee_speeds_advertised & IXGBE_LINK_SPEED_10GB_FULL)
4550 eee_cap |= IXGBE_ACI_PHY_EEE_EN_10GBASE_T;
4551 }
4552
4553 /* Set EEE capability for particular PHY types */
4554 phy_cfg.eee_cap = IXGBE_CPU_TO_LE16(eee_cap);
4555
4556 status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
4557
4558 return status;
4559 }
4560
4561 /**
4562 * ixgbe_fw_recovery_mode_E610 - Check FW NVM recovery mode
4563 * @hw: pointer to hardware structure
4564 *
4565 * Checks FW NVM recovery mode by
4566 * reading the value of the dedicated register.
4567 *
4568 * Return: true if FW is in recovery mode, otherwise false.
4569 */
ixgbe_fw_recovery_mode_E610(struct ixgbe_hw * hw)4570 bool ixgbe_fw_recovery_mode_E610(struct ixgbe_hw *hw)
4571 {
4572 u32 fwsm = IXGBE_READ_REG(hw, GL_MNG_FWSM);
4573
4574 return !!(fwsm & GL_MNG_FWSM_FW_MODES_RECOVERY_M);
4575 }
4576
4577 /**
4578 * ixgbe_fw_rollback_mode_E610 - Check FW NVM Rollback
4579 * @hw: pointer to hardware structure
4580 *
4581 * Checks FW NVM Rollback mode by reading the
4582 * value of the dedicated register.
4583 *
4584 * Return: true if FW is in Rollback mode, otherwise false.
4585 */
ixgbe_fw_rollback_mode_E610(struct ixgbe_hw * hw)4586 bool ixgbe_fw_rollback_mode_E610(struct ixgbe_hw *hw)
4587 {
4588 u32 fwsm = IXGBE_READ_REG(hw, GL_MNG_FWSM);
4589
4590 return !!(fwsm & GL_MNG_FWSM_FW_MODES_ROLLBACK_M);
4591 }
4592
4593 /**
4594 * ixgbe_get_fw_tsam_mode_E610 - Check FW NVM Thermal Sensor Autonomous Mode
4595 * @hw: pointer to hardware structure
4596 *
4597 * Checks Thermal Sensor Autonomous Mode by reading the
4598 * value of the dedicated register.
4599 *
4600 * Return: true if FW is in TSAM, otherwise false.
4601 */
ixgbe_get_fw_tsam_mode_E610(struct ixgbe_hw * hw)4602 bool ixgbe_get_fw_tsam_mode_E610(struct ixgbe_hw *hw)
4603 {
4604 u32 fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_X550EM_a);
4605
4606 return !!(fwsm & IXGBE_FWSM_TS_ENABLED);
4607 }
4608
4609 /**
4610 * ixgbe_init_phy_ops_E610 - PHY specific init
4611 * @hw: pointer to hardware structure
4612 *
4613 * Initialize any function pointers that were not able to be
4614 * set during init_shared_code because the PHY type was not known.
4615 *
4616 * Return: the exit code of the operation.
4617 */
ixgbe_init_phy_ops_E610(struct ixgbe_hw * hw)4618 s32 ixgbe_init_phy_ops_E610(struct ixgbe_hw *hw)
4619 {
4620 struct ixgbe_mac_info *mac = &hw->mac;
4621 struct ixgbe_phy_info *phy = &hw->phy;
4622 s32 ret_val;
4623
4624 phy->ops.identify_sfp = ixgbe_identify_module_E610;
4625 phy->ops.read_reg = NULL; /* PHY reg access is not required */
4626 phy->ops.write_reg = NULL;
4627 phy->ops.read_reg_mdi = NULL;
4628 phy->ops.write_reg_mdi = NULL;
4629 phy->ops.setup_link = ixgbe_setup_phy_link_E610;
4630 phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_E610;
4631 phy->ops.read_i2c_byte = NULL; /* disabled for E610 */
4632 phy->ops.write_i2c_byte = NULL; /* disabled for E610 */
4633 phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_E610;
4634 phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_E610;
4635 phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_E610;
4636 phy->ops.i2c_bus_clear = NULL; /* do not use generic implementation */
4637 phy->ops.check_overtemp = ixgbe_check_overtemp_E610;
4638 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
4639 phy->ops.set_phy_power = ixgbe_set_phy_power_E610;
4640 else
4641 phy->ops.set_phy_power = NULL;
4642 phy->ops.enter_lplu = ixgbe_enter_lplu_E610;
4643 phy->ops.handle_lasi = NULL; /* no implementation for E610 */
4644 phy->ops.read_i2c_byte_unlocked = NULL; /* disabled for E610 */
4645 phy->ops.write_i2c_byte_unlocked = NULL; /* disabled for E610 */
4646
4647 /* TODO: Set functions pointers based on device ID */
4648
4649 /* Identify the PHY */
4650 ret_val = phy->ops.identify(hw);
4651 if (ret_val != IXGBE_SUCCESS)
4652 return ret_val;
4653
4654 /* TODO: Set functions pointers based on PHY type */
4655
4656 return ret_val;
4657 }
4658
4659 /**
4660 * ixgbe_identify_phy_E610 - Identify PHY
4661 * @hw: pointer to hardware structure
4662 *
4663 * Determine PHY type, supported speeds and PHY ID.
4664 *
4665 * Return: the exit code of the operation.
4666 */
ixgbe_identify_phy_E610(struct ixgbe_hw * hw)4667 s32 ixgbe_identify_phy_E610(struct ixgbe_hw *hw)
4668 {
4669 struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
4670 s32 rc;
4671
4672 /* Set PHY type */
4673 hw->phy.type = ixgbe_phy_fw;
4674
4675 rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
4676 &pcaps);
4677 if (rc)
4678 return rc;
4679
4680 if (!(pcaps.module_compliance_enforcement &
4681 IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) {
4682 /* Handle lenient mode */
4683 rc = ixgbe_aci_get_phy_caps(hw, false,
4684 IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA,
4685 &pcaps);
4686 if (rc)
4687 return rc;
4688 }
4689
4690 /* Determine supported speeds */
4691 hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN;
4692
4693 if (pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T ||
4694 pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII)
4695 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL;
4696 if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_100BASE_TX ||
4697 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_100M_SGMII ||
4698 pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII)
4699 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
4700 if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_T ||
4701 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_SX ||
4702 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_LX ||
4703 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1000BASE_KX ||
4704 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_1G_SGMII ||
4705 pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII)
4706 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
4707 if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_T ||
4708 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_DA ||
4709 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_SR ||
4710 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_LR ||
4711 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1 ||
4712 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
4713 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C ||
4714 pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII)
4715 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
4716
4717 /* 2.5 and 5 Gbps link speeds must be excluded from the
4718 * auto-negotiation set used during driver initialization due to
4719 * compatibility issues with certain switches. Those issues do not
4720 * exist in case of E610 2.5G SKU device (0x57b1).
4721 */
4722 if (!hw->phy.autoneg_advertised &&
4723 hw->device_id != IXGBE_DEV_ID_E610_2_5G_T)
4724 hw->phy.autoneg_advertised = hw->phy.speeds_supported;
4725
4726 if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_T ||
4727 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_X ||
4728 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_2500BASE_KX ||
4729 pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
4730 pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
4731 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
4732
4733 if (!hw->phy.autoneg_advertised &&
4734 hw->device_id == IXGBE_DEV_ID_E610_2_5G_T)
4735 hw->phy.autoneg_advertised = hw->phy.speeds_supported;
4736
4737 if (pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_T ||
4738 pcaps.phy_type_low & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
4739 pcaps.phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
4740 hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
4741
4742 /* Set PHY ID */
4743 memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32));
4744
4745 return IXGBE_SUCCESS;
4746 }
4747
4748 /**
4749 * ixgbe_identify_module_E610 - Identify SFP module type
4750 * @hw: pointer to hardware structure
4751 *
4752 * Identify the SFP module type.
4753 *
4754 * Return: the exit code of the operation.
4755 */
ixgbe_identify_module_E610(struct ixgbe_hw * hw)4756 s32 ixgbe_identify_module_E610(struct ixgbe_hw *hw)
4757 {
4758 bool media_available;
4759 u8 module_type;
4760 s32 rc;
4761
4762 rc = ixgbe_update_link_info(hw);
4763 if (rc)
4764 goto err;
4765
4766 media_available =
4767 (hw->link.link_info.link_info &
4768 IXGBE_ACI_MEDIA_AVAILABLE) ? true : false;
4769
4770 if (media_available) {
4771 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
4772
4773 /* Get module type from hw context updated by ixgbe_update_link_info() */
4774 module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT];
4775
4776 if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) ||
4777 (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) {
4778 hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
4779 } else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) {
4780 hw->phy.sfp_type = ixgbe_sfp_type_sr;
4781 } else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) ||
4782 (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) {
4783 hw->phy.sfp_type = ixgbe_sfp_type_lr;
4784 }
4785 rc = IXGBE_SUCCESS;
4786 } else {
4787 hw->phy.sfp_type = ixgbe_sfp_type_not_present;
4788 rc = IXGBE_ERR_SFP_NOT_PRESENT;
4789 }
4790 err:
4791 return rc;
4792 }
4793
4794 /**
4795 * ixgbe_setup_phy_link_E610 - Sets up firmware-controlled PHYs
4796 * @hw: pointer to hardware structure
4797 *
4798 * Set the parameters for the firmware-controlled PHYs.
4799 *
4800 * Return: the exit code of the operation.
4801 */
ixgbe_setup_phy_link_E610(struct ixgbe_hw * hw)4802 s32 ixgbe_setup_phy_link_E610(struct ixgbe_hw *hw)
4803 {
4804 struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
4805 struct ixgbe_aci_cmd_set_phy_cfg_data pcfg;
4806 u8 rmode = IXGBE_ACI_REPORT_TOPO_CAP_MEDIA;
4807 u64 sup_phy_type_low, sup_phy_type_high;
4808 s32 rc;
4809
4810 rc = ixgbe_aci_get_link_info(hw, false, NULL);
4811 if (rc) {
4812 goto err;
4813 }
4814
4815 /* If media is not available get default config */
4816 if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
4817 rmode = IXGBE_ACI_REPORT_DFLT_CFG;
4818
4819 rc = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps);
4820 if (rc) {
4821 goto err;
4822 }
4823
4824 sup_phy_type_low = pcaps.phy_type_low;
4825 sup_phy_type_high = pcaps.phy_type_high;
4826
4827 /* Get Active configuration to avoid unintended changes */
4828 rc = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_ACTIVE_CFG,
4829 &pcaps);
4830 if (rc) {
4831 goto err;
4832 }
4833 ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg);
4834
4835 /* Set default PHY types for a given speed */
4836 pcfg.phy_type_low = 0;
4837 pcfg.phy_type_high = 0;
4838
4839 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) {
4840 pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T;
4841 pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII;
4842 }
4843 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) {
4844 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_100BASE_TX;
4845 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_100M_SGMII;
4846 pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII;
4847 }
4848 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
4849 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_T;
4850 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_SX;
4851 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_LX;
4852 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1000BASE_KX;
4853 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_1G_SGMII;
4854 pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII;
4855 }
4856 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) {
4857 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_T;
4858 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_X;
4859 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_2500BASE_KX;
4860 pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII;
4861 pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII;
4862 }
4863 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) {
4864 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_T;
4865 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_5GBASE_KR;
4866 pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII;
4867 }
4868 if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) {
4869 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_T;
4870 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA;
4871 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_SR;
4872 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_LR;
4873 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1;
4874 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC;
4875 pcfg.phy_type_low |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C;
4876 pcfg.phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII;
4877 }
4878
4879 /* Mask the set values to avoid requesting unsupported link types */
4880 pcfg.phy_type_low &= sup_phy_type_low;
4881 pcfg.phy_type_high &= sup_phy_type_high;
4882
4883 if (pcfg.phy_type_high != pcaps.phy_type_high ||
4884 pcfg.phy_type_low != pcaps.phy_type_low ||
4885 pcfg.caps != pcaps.caps) {
4886 pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
4887 pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
4888
4889 rc = ixgbe_aci_set_phy_cfg(hw, &pcfg);
4890 }
4891
4892 err:
4893 return rc;
4894 }
4895
4896 /**
4897 * ixgbe_get_phy_firmware_version_E610 - Gets the PHY Firmware Version
4898 * @hw: pointer to hardware structure
4899 * @firmware_version: pointer to the PHY Firmware Version
4900 *
4901 * Determines PHY FW version based on response to Get PHY Capabilities
4902 * admin command (0x0600).
4903 *
4904 * Return: the exit code of the operation.
4905 */
ixgbe_get_phy_firmware_version_E610(struct ixgbe_hw * hw,u16 * firmware_version)4906 s32 ixgbe_get_phy_firmware_version_E610(struct ixgbe_hw *hw,
4907 u16 *firmware_version)
4908 {
4909 struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
4910 s32 status;
4911
4912 if (!firmware_version)
4913 return IXGBE_ERR_PARAM;
4914
4915 status = ixgbe_aci_get_phy_caps(hw, false,
4916 IXGBE_ACI_REPORT_ACTIVE_CFG,
4917 &pcaps);
4918 if (status)
4919 return status;
4920
4921 /* TODO: determine which bytes of the 8-byte phy_fw_ver
4922 * field should be written to the 2-byte firmware_version
4923 * output argument. */
4924 memcpy(firmware_version, pcaps.phy_fw_ver, sizeof(u16));
4925
4926 return IXGBE_SUCCESS;
4927 }
4928
4929 /**
4930 * ixgbe_read_i2c_sff8472_E610 - Reads 8 bit word over I2C interface
4931 * @hw: pointer to hardware structure
4932 * @byte_offset: byte offset at address 0xA2
4933 * @sff8472_data: value read
4934 *
4935 * Performs byte read operation from SFP module's SFF-8472 data over I2C.
4936 *
4937 * Return: the exit code of the operation.
4938 **/
ixgbe_read_i2c_sff8472_E610(struct ixgbe_hw * hw,u8 byte_offset,u8 * sff8472_data)4939 s32 ixgbe_read_i2c_sff8472_E610(struct ixgbe_hw *hw, u8 byte_offset,
4940 u8 *sff8472_data)
4941 {
4942 return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR2,
4943 byte_offset, 0,
4944 IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
4945 sff8472_data, 1, false);
4946 }
4947
4948 /**
4949 * ixgbe_read_i2c_eeprom_E610 - Reads 8 bit EEPROM word over I2C interface
4950 * @hw: pointer to hardware structure
4951 * @byte_offset: EEPROM byte offset to read
4952 * @eeprom_data: value read
4953 *
4954 * Performs byte read operation from SFP module's EEPROM over I2C interface.
4955 *
4956 * Return: the exit code of the operation.
4957 **/
ixgbe_read_i2c_eeprom_E610(struct ixgbe_hw * hw,u8 byte_offset,u8 * eeprom_data)4958 s32 ixgbe_read_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
4959 u8 *eeprom_data)
4960 {
4961 return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR,
4962 byte_offset, 0,
4963 IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
4964 eeprom_data, 1, false);
4965 }
4966
4967 /**
4968 * ixgbe_write_i2c_eeprom_E610 - Writes 8 bit EEPROM word over I2C interface
4969 * @hw: pointer to hardware structure
4970 * @byte_offset: EEPROM byte offset to write
4971 * @eeprom_data: value to write
4972 *
4973 * Performs byte write operation to SFP module's EEPROM over I2C interface.
4974 *
4975 * Return: the exit code of the operation.
4976 **/
ixgbe_write_i2c_eeprom_E610(struct ixgbe_hw * hw,u8 byte_offset,u8 eeprom_data)4977 s32 ixgbe_write_i2c_eeprom_E610(struct ixgbe_hw *hw, u8 byte_offset,
4978 u8 eeprom_data)
4979 {
4980 return ixgbe_aci_sff_eeprom(hw, 0, IXGBE_I2C_EEPROM_DEV_ADDR,
4981 byte_offset, 0,
4982 IXGBE_ACI_SFF_NO_PAGE_BANK_UPDATE,
4983 &eeprom_data, 1, true);
4984 }
4985
4986 /**
4987 * ixgbe_check_overtemp_E610 - Check firmware-controlled PHYs for overtemp
4988 * @hw: pointer to hardware structure
4989 *
4990 * Get the link status and check if the PHY temperature alarm detected.
4991 *
4992 * Return: the exit code of the operation.
4993 */
ixgbe_check_overtemp_E610(struct ixgbe_hw * hw)4994 s32 ixgbe_check_overtemp_E610(struct ixgbe_hw *hw)
4995 {
4996 struct ixgbe_aci_cmd_get_link_status_data link_data = { 0 };
4997 struct ixgbe_aci_cmd_get_link_status *resp;
4998 struct ixgbe_aci_desc desc;
4999 s32 status = IXGBE_SUCCESS;
5000
5001 if (!hw)
5002 return IXGBE_ERR_PARAM;
5003
5004 ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
5005 resp = &desc.params.get_link_status;
5006 resp->cmd_flags = IXGBE_CPU_TO_LE16(IXGBE_ACI_LSE_NOP);
5007
5008 status = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
5009 if (status != IXGBE_SUCCESS)
5010 return status;
5011
5012 if (link_data.ext_info & IXGBE_ACI_LINK_PHY_TEMP_ALARM) {
5013 ERROR_REPORT1(IXGBE_ERROR_CAUTION,
5014 "PHY Temperature Alarm detected");
5015 status = IXGBE_ERR_OVERTEMP;
5016 }
5017
5018 return status;
5019 }
5020
5021 /**
5022 * ixgbe_set_phy_power_E610 - Control power for copper PHY
5023 * @hw: pointer to hardware structure
5024 * @on: true for on, false for off
5025 *
5026 * Set the power on/off of the PHY
5027 * by getting its capabilities and setting the appropriate
5028 * configuration parameters.
5029 *
5030 * Return: the exit code of the operation.
5031 */
ixgbe_set_phy_power_E610(struct ixgbe_hw * hw,bool on)5032 s32 ixgbe_set_phy_power_E610(struct ixgbe_hw *hw, bool on)
5033 {
5034 struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
5035 struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
5036 s32 status;
5037
5038 status = ixgbe_aci_get_phy_caps(hw, false,
5039 IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
5040 if (status != IXGBE_SUCCESS)
5041 return status;
5042
5043 ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
5044
5045 if (on) {
5046 phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER;
5047 } else {
5048 phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER;
5049 }
5050
5051 /* PHY is already in requested power mode */
5052 if (phy_caps.caps == phy_cfg.caps)
5053 return IXGBE_SUCCESS;
5054
5055 phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
5056 phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
5057
5058 status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
5059
5060 return status;
5061 }
5062
5063 /**
5064 * ixgbe_enter_lplu_E610 - Transition to low power states
5065 * @hw: pointer to hardware structure
5066 *
5067 * Configures Low Power Link Up on transition to low power states
5068 * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
5069 * X557 PHY immediately prior to entering LPLU.
5070 *
5071 * Return: the exit code of the operation.
5072 */
ixgbe_enter_lplu_E610(struct ixgbe_hw * hw)5073 s32 ixgbe_enter_lplu_E610(struct ixgbe_hw *hw)
5074 {
5075 struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = { 0 };
5076 struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = { 0 };
5077 s32 status;
5078
5079 status = ixgbe_aci_get_phy_caps(hw, false,
5080 IXGBE_ACI_REPORT_ACTIVE_CFG, &phy_caps);
5081 if (status != IXGBE_SUCCESS)
5082 return status;
5083
5084 ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
5085
5086 phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG;
5087
5088 status = ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
5089
5090 return status;
5091 }
5092
5093 /**
5094 * ixgbe_init_eeprom_params_E610 - Initialize EEPROM params
5095 * @hw: pointer to hardware structure
5096 *
5097 * Initializes the EEPROM parameters ixgbe_eeprom_info within the
5098 * ixgbe_hw struct in order to set up EEPROM access.
5099 *
5100 * Return: the exit code of the operation.
5101 */
ixgbe_init_eeprom_params_E610(struct ixgbe_hw * hw)5102 s32 ixgbe_init_eeprom_params_E610(struct ixgbe_hw *hw)
5103 {
5104 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
5105 u32 gens_stat;
5106 u8 sr_size;
5107
5108 if (eeprom->type == ixgbe_eeprom_uninitialized) {
5109 eeprom->type = ixgbe_flash;
5110
5111 gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
5112 sr_size = (gens_stat & GLNVM_GENS_SR_SIZE_M) >>
5113 GLNVM_GENS_SR_SIZE_S;
5114
5115 /* Switching to words (sr_size contains power of 2) */
5116 eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
5117
5118 DEBUGOUT2("Eeprom params: type = %d, size = %d\n",
5119 eeprom->type, eeprom->word_size);
5120 }
5121
5122 return IXGBE_SUCCESS;
5123 }
5124
5125 /**
5126 * ixgbe_read_ee_aci_E610 - Read EEPROM word using the admin command.
5127 * @hw: pointer to hardware structure
5128 * @offset: offset of word in the EEPROM to read
5129 * @data: word read from the EEPROM
5130 *
5131 * Reads a 16 bit word from the EEPROM using the ACI.
5132 * If the EEPROM params are not initialized, the function
5133 * initialize them before proceeding with reading.
5134 * The function acquires and then releases the NVM ownership.
5135 *
5136 * Return: the exit code of the operation.
5137 */
ixgbe_read_ee_aci_E610(struct ixgbe_hw * hw,u16 offset,u16 * data)5138 s32 ixgbe_read_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 *data)
5139 {
5140 s32 status;
5141
5142 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5143 status = ixgbe_init_eeprom_params(hw);
5144 if (status)
5145 return status;
5146 }
5147
5148 status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
5149 if (status)
5150 return status;
5151
5152 status = ixgbe_read_sr_word_aci(hw, offset, data);
5153 ixgbe_release_nvm(hw);
5154
5155 return status;
5156 }
5157
5158 /**
5159 * ixgbe_read_ee_aci_buffer_E610- Read EEPROM word(s) using admin commands.
5160 * @hw: pointer to hardware structure
5161 * @offset: offset of word in the EEPROM to read
5162 * @words: number of words
5163 * @data: word(s) read from the EEPROM
5164 *
5165 * Reads a 16 bit word(s) from the EEPROM using the ACI.
5166 * If the EEPROM params are not initialized, the function
5167 * initialize them before proceeding with reading.
5168 * The function acquires and then releases the NVM ownership.
5169 *
5170 * Return: the exit code of the operation.
5171 */
ixgbe_read_ee_aci_buffer_E610(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)5172 s32 ixgbe_read_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
5173 u16 words, u16 *data)
5174 {
5175 s32 status;
5176
5177 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5178 status = ixgbe_init_eeprom_params(hw);
5179 if (status)
5180 return status;
5181 }
5182
5183 status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
5184 if (status)
5185 return status;
5186
5187 status = ixgbe_read_sr_buf_aci(hw, offset, &words, data);
5188 ixgbe_release_nvm(hw);
5189
5190 return status;
5191 }
5192
5193 /**
5194 * ixgbe_write_ee_aci_E610 - Write EEPROM word using the admin command.
5195 * @hw: pointer to hardware structure
5196 * @offset: offset of word in the EEPROM to write
5197 * @data: word write to the EEPROM
5198 *
5199 * Write a 16 bit word to the EEPROM using the ACI.
5200 * If the EEPROM params are not initialized, the function
5201 * initialize them before proceeding with writing.
5202 * The function acquires and then releases the NVM ownership.
5203 *
5204 * Return: the exit code of the operation.
5205 */
ixgbe_write_ee_aci_E610(struct ixgbe_hw * hw,u16 offset,u16 data)5206 s32 ixgbe_write_ee_aci_E610(struct ixgbe_hw *hw, u16 offset, u16 data)
5207 {
5208 s32 status;
5209
5210 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5211 status = ixgbe_init_eeprom_params(hw);
5212 if (status)
5213 return status;
5214 }
5215
5216 status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
5217 if (status)
5218 return status;
5219
5220 status = ixgbe_write_sr_word_aci(hw, (u32)offset, &data);
5221 ixgbe_release_nvm(hw);
5222
5223 return status;
5224 }
5225
5226 /**
5227 * ixgbe_write_ee_aci_buffer_E610 - Write EEPROM word(s) using admin commands.
5228 * @hw: pointer to hardware structure
5229 * @offset: offset of word in the EEPROM to write
5230 * @words: number of words
5231 * @data: word(s) write to the EEPROM
5232 *
5233 * Write a 16 bit word(s) to the EEPROM using the ACI.
5234 * If the EEPROM params are not initialized, the function
5235 * initialize them before proceeding with writing.
5236 * The function acquires and then releases the NVM ownership.
5237 *
5238 * Return: the exit code of the operation.
5239 */
ixgbe_write_ee_aci_buffer_E610(struct ixgbe_hw * hw,u16 offset,u16 words,u16 * data)5240 s32 ixgbe_write_ee_aci_buffer_E610(struct ixgbe_hw *hw, u16 offset,
5241 u16 words, u16 *data)
5242 {
5243 s32 status;
5244
5245 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5246 status = ixgbe_init_eeprom_params(hw);
5247 if (status)
5248 return status;
5249 }
5250
5251 status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
5252 if (status)
5253 return status;
5254
5255 status = ixgbe_write_sr_buf_aci(hw, (u32)offset, words, data);
5256 ixgbe_release_nvm(hw);
5257
5258 return status;
5259 }
5260
5261 /**
5262 * ixgbe_calc_eeprom_checksum_E610 - Calculates and returns the checksum
5263 * @hw: pointer to hardware structure
5264 *
5265 * Calculate SW Checksum that covers the whole 64kB shadow RAM
5266 * except the VPD and PCIe ALT Auto-load modules. The structure and size of VPD
5267 * is customer specific and unknown. Therefore, this function skips all maximum
5268 * possible size of VPD (1kB).
5269 * If the EEPROM params are not initialized, the function
5270 * initializes them before proceeding.
5271 * The function acquires and then releases the NVM ownership.
5272 *
5273 * Return: the negative error code on error, or the 16-bit checksum
5274 */
ixgbe_calc_eeprom_checksum_E610(struct ixgbe_hw * hw)5275 s32 ixgbe_calc_eeprom_checksum_E610(struct ixgbe_hw *hw)
5276 {
5277 bool nvm_acquired = false;
5278 u16 pcie_alt_module = 0;
5279 u16 checksum_local = 0;
5280 u16 checksum = 0;
5281 u16 vpd_module;
5282 void *vmem;
5283 s32 status;
5284 u16 *data;
5285 u16 i;
5286
5287 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5288 status = ixgbe_init_eeprom_params(hw);
5289 if (status)
5290 return status;
5291 }
5292
5293 vmem = ixgbe_calloc(hw, IXGBE_SR_SECTOR_SIZE_IN_WORDS, sizeof(u16));
5294 if (!vmem)
5295 return IXGBE_ERR_OUT_OF_MEM;
5296 data = (u16 *)vmem;
5297 status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
5298 if (status)
5299 goto ixgbe_calc_sr_checksum_exit;
5300 nvm_acquired = true;
5301
5302 /* read pointer to VPD area */
5303 status = ixgbe_read_sr_word_aci(hw, E610_SR_VPD_PTR, &vpd_module);
5304 if (status)
5305 goto ixgbe_calc_sr_checksum_exit;
5306
5307 /* read pointer to PCIe Alt Auto-load module */
5308 status = ixgbe_read_sr_word_aci(hw, E610_SR_PCIE_ALT_AUTO_LOAD_PTR,
5309 &pcie_alt_module);
5310 if (status)
5311 goto ixgbe_calc_sr_checksum_exit;
5312
5313 /* Calculate SW checksum that covers the whole 64kB shadow RAM
5314 * except the VPD and PCIe ALT Auto-load modules
5315 */
5316 for (i = 0; i < hw->eeprom.word_size; i++) {
5317 /* Read SR page */
5318 if ((i % IXGBE_SR_SECTOR_SIZE_IN_WORDS) == 0) {
5319 u16 words = IXGBE_SR_SECTOR_SIZE_IN_WORDS;
5320
5321 status = ixgbe_read_sr_buf_aci(hw, i, &words, data);
5322 if (status != IXGBE_SUCCESS)
5323 goto ixgbe_calc_sr_checksum_exit;
5324 }
5325
5326 /* Skip Checksum word */
5327 if (i == E610_SR_SW_CHECKSUM_WORD)
5328 continue;
5329 /* Skip VPD module (convert byte size to word count) */
5330 if (i >= (u32)vpd_module &&
5331 i < ((u32)vpd_module + E610_SR_VPD_SIZE_WORDS))
5332 continue;
5333 /* Skip PCIe ALT module (convert byte size to word count) */
5334 if (i >= (u32)pcie_alt_module &&
5335 i < ((u32)pcie_alt_module + E610_SR_PCIE_ALT_SIZE_WORDS))
5336 continue;
5337
5338 checksum_local += data[i % IXGBE_SR_SECTOR_SIZE_IN_WORDS];
5339 }
5340
5341 checksum = (u16)IXGBE_SR_SW_CHECKSUM_BASE - checksum_local;
5342
5343 ixgbe_calc_sr_checksum_exit:
5344 if(nvm_acquired)
5345 ixgbe_release_nvm(hw);
5346 ixgbe_free(hw, vmem);
5347
5348 if(!status)
5349 return (s32)checksum;
5350 else
5351 return status;
5352 }
5353
5354 /**
5355 * ixgbe_update_eeprom_checksum_E610 - Updates the EEPROM checksum and flash
5356 * @hw: pointer to hardware structure
5357 *
5358 * After writing EEPROM to Shadow RAM, software sends the admin command
5359 * to recalculate and update EEPROM checksum and instructs the hardware
5360 * to update the flash.
5361 * If the EEPROM params are not initialized, the function
5362 * initialize them before proceeding.
5363 * The function acquires and then releases the NVM ownership.
5364 *
5365 * Return: the exit code of the operation.
5366 */
ixgbe_update_eeprom_checksum_E610(struct ixgbe_hw * hw)5367 s32 ixgbe_update_eeprom_checksum_E610(struct ixgbe_hw *hw)
5368 {
5369 s32 status;
5370
5371 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5372 status = ixgbe_init_eeprom_params(hw);
5373 if (status)
5374 return status;
5375 }
5376
5377 status = ixgbe_nvm_recalculate_checksum(hw);
5378 if (status)
5379 return status;
5380 status = ixgbe_acquire_nvm(hw, IXGBE_RES_WRITE);
5381 if (status)
5382 return status;
5383
5384 status = ixgbe_nvm_write_activate(hw, IXGBE_ACI_NVM_ACTIV_REQ_EMPR,
5385 NULL);
5386 ixgbe_release_nvm(hw);
5387
5388 return status;
5389 }
5390
5391 /**
5392 * ixgbe_validate_eeprom_checksum_E610 - Validate EEPROM checksum
5393 * @hw: pointer to hardware structure
5394 * @checksum_val: calculated checksum
5395 *
5396 * Performs checksum calculation and validates the EEPROM checksum. If the
5397 * caller does not need checksum_val, the value can be NULL.
5398 * If the EEPROM params are not initialized, the function
5399 * initialize them before proceeding.
5400 * The function acquires and then releases the NVM ownership.
5401 *
5402 * Return: the exit code of the operation.
5403 */
ixgbe_validate_eeprom_checksum_E610(struct ixgbe_hw * hw,u16 * checksum_val)5404 s32 ixgbe_validate_eeprom_checksum_E610(struct ixgbe_hw *hw, u16 *checksum_val)
5405 {
5406 u32 status;
5407
5408 if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
5409 status = ixgbe_init_eeprom_params(hw);
5410 if (status)
5411 return status;
5412 }
5413
5414 status = ixgbe_nvm_validate_checksum(hw);
5415
5416 if (status)
5417 return status;
5418
5419 if (checksum_val) {
5420 u16 tmp_checksum;
5421 status = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
5422 if (status)
5423 return status;
5424
5425 status = ixgbe_read_sr_word_aci(hw, E610_SR_SW_CHECKSUM_WORD,
5426 &tmp_checksum);
5427 ixgbe_release_nvm(hw);
5428
5429 if (!status)
5430 *checksum_val = tmp_checksum;
5431 }
5432
5433 return status;
5434 }
5435
5436 /**
5437 * ixgbe_get_pfa_module_tlv - Reads sub module TLV from NVM PFA
5438 * @hw: pointer to hardware structure
5439 * @module_tlv: pointer to module TLV to return
5440 * @module_tlv_len: pointer to module TLV length to return
5441 * @module_type: module type requested
5442 *
5443 * Finds the requested sub module TLV type from the Preserved Field
5444 * Area (PFA) and returns the TLV pointer and length. The caller can
5445 * use these to read the variable length TLV value.
5446 *
5447 * Return: the exit code of the operation.
5448 */
ixgbe_get_pfa_module_tlv(struct ixgbe_hw * hw,u16 * module_tlv,u16 * module_tlv_len,u16 module_type)5449 static s32 ixgbe_get_pfa_module_tlv(struct ixgbe_hw *hw, u16 *module_tlv,
5450 u16 *module_tlv_len, u16 module_type)
5451 {
5452 u16 pfa_len, pfa_ptr, pfa_end_ptr;
5453 u16 next_tlv;
5454 s32 status;
5455
5456 status = ixgbe_read_ee_aci_E610(hw, E610_SR_PFA_PTR, &pfa_ptr);
5457 if (status != IXGBE_SUCCESS) {
5458 return status;
5459 }
5460 status = ixgbe_read_ee_aci_E610(hw, pfa_ptr, &pfa_len);
5461 if (status != IXGBE_SUCCESS) {
5462 return status;
5463 }
5464 /* Starting with first TLV after PFA length, iterate through the list
5465 * of TLVs to find the requested one.
5466 */
5467 next_tlv = pfa_ptr + 1;
5468 pfa_end_ptr = pfa_ptr + pfa_len;
5469 while (next_tlv < pfa_end_ptr) {
5470 u16 tlv_sub_module_type, tlv_len;
5471
5472 /* Read TLV type */
5473 status = ixgbe_read_ee_aci_E610(hw, next_tlv,
5474 &tlv_sub_module_type);
5475 if (status != IXGBE_SUCCESS) {
5476 break;
5477 }
5478 /* Read TLV length */
5479 status = ixgbe_read_ee_aci_E610(hw, next_tlv + 1, &tlv_len);
5480 if (status != IXGBE_SUCCESS) {
5481 break;
5482 }
5483 if (tlv_sub_module_type == module_type) {
5484 if (tlv_len) {
5485 *module_tlv = next_tlv;
5486 *module_tlv_len = tlv_len;
5487 return IXGBE_SUCCESS;
5488 }
5489 return IXGBE_ERR_INVAL_SIZE;
5490 }
5491 /* Check next TLV, i.e. current TLV pointer + length + 2 words
5492 * (for current TLV's type and length)
5493 */
5494 next_tlv = next_tlv + tlv_len + 2;
5495 }
5496 /* Module does not exist */
5497 return IXGBE_ERR_DOES_NOT_EXIST;
5498 }
5499
5500 /**
5501 * ixgbe_read_pba_string_E610 - Reads part number string from NVM
5502 * @hw: pointer to hardware structure
5503 * @pba_num: stores the part number string from the NVM
5504 * @pba_num_size: part number string buffer length
5505 *
5506 * Reads the part number string from the NVM.
5507 *
5508 * Return: the exit code of the operation.
5509 */
ixgbe_read_pba_string_E610(struct ixgbe_hw * hw,u8 * pba_num,u32 pba_num_size)5510 s32 ixgbe_read_pba_string_E610(struct ixgbe_hw *hw, u8 *pba_num,
5511 u32 pba_num_size)
5512 {
5513 u16 pba_tlv, pba_tlv_len;
5514 u16 pba_word, pba_size;
5515 s32 status;
5516 u16 i;
5517
5518 status = ixgbe_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
5519 E610_SR_PBA_BLOCK_PTR);
5520 if (status != IXGBE_SUCCESS) {
5521 return status;
5522 }
5523
5524 /* pba_size is the next word */
5525 status = ixgbe_read_ee_aci_E610(hw, (pba_tlv + 2), &pba_size);
5526 if (status != IXGBE_SUCCESS) {
5527 return status;
5528 }
5529
5530 if (pba_tlv_len < pba_size) {
5531 return IXGBE_ERR_INVAL_SIZE;
5532 }
5533
5534 /* Subtract one to get PBA word count (PBA Size word is included in
5535 * total size)
5536 */
5537 pba_size--;
5538 if (pba_num_size < (((u32)pba_size * 2) + 1)) {
5539 return IXGBE_ERR_PARAM;
5540 }
5541
5542 for (i = 0; i < pba_size; i++) {
5543 status = ixgbe_read_ee_aci_E610(hw, (pba_tlv + 2 + 1) + i,
5544 &pba_word);
5545 if (status != IXGBE_SUCCESS) {
5546 return status;
5547 }
5548
5549 pba_num[(i * 2)] = (pba_word >> 8) & 0xFF;
5550 pba_num[(i * 2) + 1] = pba_word & 0xFF;
5551 }
5552 pba_num[(pba_size * 2)] = '\0';
5553
5554 return status;
5555 }
5556