xref: /linux/drivers/net/ethernet/intel/ixgbe/ixgbe_e610.c (revision b803c4a4f78834b31ebfbbcea350473333760559)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2024 Intel Corporation. */
3 
4 #include "ixgbe_common.h"
5 #include "ixgbe_e610.h"
6 #include "ixgbe_x550.h"
7 #include "ixgbe_type.h"
8 #include "ixgbe_x540.h"
9 #include "ixgbe_mbx.h"
10 #include "ixgbe_phy.h"
11 
12 /**
13  * ixgbe_should_retry_aci_send_cmd_execute - decide if ACI command should
14  * be resent
15  * @opcode: ACI opcode
16  *
17  * Check if ACI command should be sent again depending on the provided opcode.
18  * It may happen when CSR is busy during link state changes.
19  *
20  * Return: true if the sending command routine should be repeated,
21  * otherwise false.
22  */
23 static bool ixgbe_should_retry_aci_send_cmd_execute(u16 opcode)
24 {
25 	switch (opcode) {
26 	case ixgbe_aci_opc_disable_rxen:
27 	case ixgbe_aci_opc_get_phy_caps:
28 	case ixgbe_aci_opc_get_link_status:
29 	case ixgbe_aci_opc_get_link_topo:
30 		return true;
31 	}
32 
33 	return false;
34 }
35 
36 /**
37  * ixgbe_aci_send_cmd_execute - execute sending FW Admin Command to FW Admin
38  * Command Interface
39  * @hw: pointer to the HW struct
40  * @desc: descriptor describing the command
41  * @buf: buffer to use for indirect commands (NULL for direct commands)
42  * @buf_size: size of buffer for indirect commands (0 for direct commands)
43  *
44  * Admin Command is sent using CSR by setting descriptor and buffer in specific
45  * registers.
46  *
47  * Return: the exit code of the operation.
48  * * - 0 - success.
49  * * - -EIO - CSR mechanism is not enabled.
50  * * - -EBUSY - CSR mechanism is busy.
51  * * - -EINVAL - buf_size is too big or
52  * invalid argument buf or buf_size.
53  * * - -ETIME - Admin Command X command timeout.
54  * * - -EIO - Admin Command X invalid state of HICR register or
55  * Admin Command failed because of bad opcode was returned or
56  * Admin Command failed with error Y.
57  */
58 static int ixgbe_aci_send_cmd_execute(struct ixgbe_hw *hw,
59 				      struct ixgbe_aci_desc *desc,
60 				      void *buf, u16 buf_size)
61 {
62 	u16 opcode, buf_tail_size = buf_size % 4;
63 	u32 *raw_desc = (u32 *)desc;
64 	u32 hicr, i, buf_tail = 0;
65 	bool valid_buf = false;
66 
67 	hw->aci.last_status = IXGBE_ACI_RC_OK;
68 
69 	/* It's necessary to check if mechanism is enabled */
70 	hicr = IXGBE_READ_REG(hw, IXGBE_PF_HICR);
71 
72 	if (!(hicr & IXGBE_PF_HICR_EN))
73 		return -EIO;
74 
75 	if (hicr & IXGBE_PF_HICR_C) {
76 		hw->aci.last_status = IXGBE_ACI_RC_EBUSY;
77 		return -EBUSY;
78 	}
79 
80 	opcode = le16_to_cpu(desc->opcode);
81 
82 	if (buf_size > IXGBE_ACI_MAX_BUFFER_SIZE)
83 		return -EINVAL;
84 
85 	if (buf)
86 		desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_BUF);
87 
88 	if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_BUF)) {
89 		if ((buf && !buf_size) ||
90 		    (!buf && buf_size))
91 			return -EINVAL;
92 		if (buf && buf_size)
93 			valid_buf = true;
94 	}
95 
96 	if (valid_buf) {
97 		if (buf_tail_size)
98 			memcpy(&buf_tail, buf + buf_size - buf_tail_size,
99 			       buf_tail_size);
100 
101 		if (((buf_size + 3) & ~0x3) > IXGBE_ACI_LG_BUF)
102 			desc->flags |= cpu_to_le16(IXGBE_ACI_FLAG_LB);
103 
104 		desc->datalen = cpu_to_le16(buf_size);
105 
106 		if (desc->flags & cpu_to_le16(IXGBE_ACI_FLAG_RD)) {
107 			for (i = 0; i < buf_size / 4; i++)
108 				IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), ((u32 *)buf)[i]);
109 			if (buf_tail_size)
110 				IXGBE_WRITE_REG(hw, IXGBE_PF_HIBA(i), buf_tail);
111 		}
112 	}
113 
114 	/* Descriptor is written to specific registers */
115 	for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++)
116 		IXGBE_WRITE_REG(hw, IXGBE_PF_HIDA(i), raw_desc[i]);
117 
118 	/* SW has to set PF_HICR.C bit and clear PF_HICR.SV and
119 	 * PF_HICR_EV
120 	 */
121 	hicr = (IXGBE_READ_REG(hw, IXGBE_PF_HICR) | IXGBE_PF_HICR_C) &
122 	       ~(IXGBE_PF_HICR_SV | IXGBE_PF_HICR_EV);
123 	IXGBE_WRITE_REG(hw, IXGBE_PF_HICR, hicr);
124 
125 #define MAX_SLEEP_RESP_US 1000
126 #define MAX_TMOUT_RESP_SYNC_US 100000000
127 
128 	/* Wait for sync Admin Command response */
129 	read_poll_timeout(IXGBE_READ_REG, hicr,
130 			  (hicr & IXGBE_PF_HICR_SV) ||
131 			  !(hicr & IXGBE_PF_HICR_C),
132 			  MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_SYNC_US, true, hw,
133 			  IXGBE_PF_HICR);
134 
135 #define MAX_TMOUT_RESP_ASYNC_US 150000000
136 
137 	/* Wait for async Admin Command response */
138 	read_poll_timeout(IXGBE_READ_REG, hicr,
139 			  (hicr & IXGBE_PF_HICR_EV) ||
140 			  !(hicr & IXGBE_PF_HICR_C),
141 			  MAX_SLEEP_RESP_US, MAX_TMOUT_RESP_ASYNC_US, true, hw,
142 			  IXGBE_PF_HICR);
143 
144 	/* Read sync Admin Command response */
145 	if ((hicr & IXGBE_PF_HICR_SV)) {
146 		for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
147 			raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA(i));
148 			raw_desc[i] = raw_desc[i];
149 		}
150 	}
151 
152 	/* Read async Admin Command response */
153 	if ((hicr & IXGBE_PF_HICR_EV) && !(hicr & IXGBE_PF_HICR_C)) {
154 		for (i = 0; i < IXGBE_ACI_DESC_SIZE_IN_DWORDS; i++) {
155 			raw_desc[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIDA_2(i));
156 			raw_desc[i] = raw_desc[i];
157 		}
158 	}
159 
160 	/* Handle timeout and invalid state of HICR register */
161 	if (hicr & IXGBE_PF_HICR_C)
162 		return -ETIME;
163 
164 	if (!(hicr & IXGBE_PF_HICR_SV) && !(hicr & IXGBE_PF_HICR_EV))
165 		return -EIO;
166 
167 	/* For every command other than 0x0014 treat opcode mismatch
168 	 * as an error. Response to 0x0014 command read from HIDA_2
169 	 * is a descriptor of an event which is expected to contain
170 	 * different opcode than the command.
171 	 */
172 	if (desc->opcode != cpu_to_le16(opcode) &&
173 	    opcode != ixgbe_aci_opc_get_fw_event)
174 		return -EIO;
175 
176 	if (desc->retval) {
177 		hw->aci.last_status = (enum ixgbe_aci_err)
178 			le16_to_cpu(desc->retval);
179 		return -EIO;
180 	}
181 
182 	/* Write a response values to a buf */
183 	if (valid_buf) {
184 		for (i = 0; i < buf_size / 4; i++)
185 			((u32 *)buf)[i] = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i));
186 		if (buf_tail_size) {
187 			buf_tail = IXGBE_READ_REG(hw, IXGBE_PF_HIBA(i));
188 			memcpy(buf + buf_size - buf_tail_size, &buf_tail,
189 			       buf_tail_size);
190 		}
191 	}
192 
193 	return 0;
194 }
195 
196 /**
197  * ixgbe_aci_send_cmd - send FW Admin Command to FW Admin Command Interface
198  * @hw: pointer to the HW struct
199  * @desc: descriptor describing the command
200  * @buf: buffer to use for indirect commands (NULL for direct commands)
201  * @buf_size: size of buffer for indirect commands (0 for direct commands)
202  *
203  * Helper function to send FW Admin Commands to the FW Admin Command Interface.
204  *
205  * Retry sending the FW Admin Command multiple times to the FW ACI
206  * if the EBUSY Admin Command error is returned.
207  *
208  * Return: the exit code of the operation.
209  */
210 int ixgbe_aci_send_cmd(struct ixgbe_hw *hw, struct ixgbe_aci_desc *desc,
211 		       void *buf, u16 buf_size)
212 {
213 	u16 opcode = le16_to_cpu(desc->opcode);
214 	struct ixgbe_aci_desc desc_cpy;
215 	enum ixgbe_aci_err last_status;
216 	u8 idx = 0, *buf_cpy = NULL;
217 	bool is_cmd_for_retry;
218 	unsigned long timeout;
219 	int err;
220 
221 	is_cmd_for_retry = ixgbe_should_retry_aci_send_cmd_execute(opcode);
222 	if (is_cmd_for_retry) {
223 		if (buf) {
224 			buf_cpy = kmalloc(buf_size, GFP_KERNEL);
225 			if (!buf_cpy)
226 				return -ENOMEM;
227 			*buf_cpy = *(u8 *)buf;
228 		}
229 		desc_cpy = *desc;
230 	}
231 
232 	timeout = jiffies + msecs_to_jiffies(IXGBE_ACI_SEND_TIMEOUT_MS);
233 	do {
234 		mutex_lock(&hw->aci.lock);
235 		err = ixgbe_aci_send_cmd_execute(hw, desc, buf, buf_size);
236 		last_status = hw->aci.last_status;
237 		mutex_unlock(&hw->aci.lock);
238 
239 		if (!is_cmd_for_retry || !err ||
240 		    last_status != IXGBE_ACI_RC_EBUSY)
241 			break;
242 
243 		if (buf)
244 			memcpy(buf, buf_cpy, buf_size);
245 		*desc = desc_cpy;
246 
247 		msleep(IXGBE_ACI_SEND_DELAY_TIME_MS);
248 	} while (++idx < IXGBE_ACI_SEND_MAX_EXECUTE &&
249 		 time_before(jiffies, timeout));
250 
251 	kfree(buf_cpy);
252 
253 	return err;
254 }
255 
256 /**
257  * ixgbe_aci_check_event_pending - check if there are any pending events
258  * @hw: pointer to the HW struct
259  *
260  * Determine if there are any pending events.
261  *
262  * Return: true if there are any currently pending events
263  * otherwise false.
264  */
265 bool ixgbe_aci_check_event_pending(struct ixgbe_hw *hw)
266 {
267 	u32 ep_bit_mask = hw->bus.func ? GL_FWSTS_EP_PF1 : GL_FWSTS_EP_PF0;
268 	u32 fwsts = IXGBE_READ_REG(hw, GL_FWSTS);
269 
270 	return (fwsts & ep_bit_mask) ? true : false;
271 }
272 
273 /**
274  * ixgbe_aci_get_event - get an event from ACI
275  * @hw: pointer to the HW struct
276  * @e: event information structure
277  * @pending: optional flag signaling that there are more pending events
278  *
279  * Obtain an event from ACI and return its content
280  * through 'e' using ACI command (0x0014).
281  * Provide information if there are more events
282  * to retrieve through 'pending'.
283  *
284  * Return: the exit code of the operation.
285  */
286 int ixgbe_aci_get_event(struct ixgbe_hw *hw, struct ixgbe_aci_event *e,
287 			bool *pending)
288 {
289 	struct ixgbe_aci_desc desc;
290 	int err;
291 
292 	if (!e || (!e->msg_buf && e->buf_len))
293 		return -EINVAL;
294 
295 	mutex_lock(&hw->aci.lock);
296 
297 	/* Check if there are any events pending */
298 	if (!ixgbe_aci_check_event_pending(hw)) {
299 		err = -ENOENT;
300 		goto aci_get_event_exit;
301 	}
302 
303 	/* Obtain pending event */
304 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_fw_event);
305 	err = ixgbe_aci_send_cmd_execute(hw, &desc, e->msg_buf, e->buf_len);
306 	if (err)
307 		goto aci_get_event_exit;
308 
309 	/* Returned 0x0014 opcode indicates that no event was obtained */
310 	if (desc.opcode == cpu_to_le16(ixgbe_aci_opc_get_fw_event)) {
311 		err = -ENOENT;
312 		goto aci_get_event_exit;
313 	}
314 
315 	/* Determine size of event data */
316 	e->msg_len = min_t(u16, le16_to_cpu(desc.datalen), e->buf_len);
317 	/* Write event descriptor to event info structure */
318 	memcpy(&e->desc, &desc, sizeof(e->desc));
319 
320 	/* Check if there are any further events pending */
321 	if (pending)
322 		*pending = ixgbe_aci_check_event_pending(hw);
323 
324 aci_get_event_exit:
325 	mutex_unlock(&hw->aci.lock);
326 
327 	return err;
328 }
329 
330 /**
331  * ixgbe_fill_dflt_direct_cmd_desc - fill ACI descriptor with default values.
332  * @desc: pointer to the temp descriptor (non DMA mem)
333  * @opcode: the opcode can be used to decide which flags to turn off or on
334  *
335  * Helper function to fill the descriptor desc with default values
336  * and the provided opcode.
337  */
338 void ixgbe_fill_dflt_direct_cmd_desc(struct ixgbe_aci_desc *desc, u16 opcode)
339 {
340 	/* Zero out the desc. */
341 	memset(desc, 0, sizeof(*desc));
342 	desc->opcode = cpu_to_le16(opcode);
343 	desc->flags = cpu_to_le16(IXGBE_ACI_FLAG_SI);
344 }
345 
346 /**
347  * ixgbe_aci_get_fw_ver - Get the firmware version
348  * @hw: pointer to the HW struct
349  *
350  * Get the firmware version using ACI command (0x0001).
351  *
352  * Return: the exit code of the operation.
353  */
354 static int ixgbe_aci_get_fw_ver(struct ixgbe_hw *hw)
355 {
356 	struct ixgbe_aci_cmd_get_ver *resp;
357 	struct ixgbe_aci_desc desc;
358 	int err;
359 
360 	resp = &desc.params.get_ver;
361 
362 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_ver);
363 
364 	err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
365 	if (!err) {
366 		hw->fw_branch = resp->fw_branch;
367 		hw->fw_maj_ver = resp->fw_major;
368 		hw->fw_min_ver = resp->fw_minor;
369 		hw->fw_patch = resp->fw_patch;
370 		hw->fw_build = le32_to_cpu(resp->fw_build);
371 		hw->api_branch = resp->api_branch;
372 		hw->api_maj_ver = resp->api_major;
373 		hw->api_min_ver = resp->api_minor;
374 		hw->api_patch = resp->api_patch;
375 	}
376 
377 	return err;
378 }
379 
380 /**
381  * ixgbe_aci_req_res - request a common resource
382  * @hw: pointer to the HW struct
383  * @res: resource ID
384  * @access: access type
385  * @sdp_number: resource number
386  * @timeout: the maximum time in ms that the driver may hold the resource
387  *
388  * Requests a common resource using the ACI command (0x0008).
389  * Specifies the maximum time the driver may hold the resource.
390  * If the requested resource is currently occupied by some other driver,
391  * a busy return value is returned and the timeout field value indicates the
392  * maximum time the current owner has to free it.
393  *
394  * Return: the exit code of the operation.
395  */
396 static int ixgbe_aci_req_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
397 			     enum ixgbe_aci_res_access_type access,
398 			     u8 sdp_number, u32 *timeout)
399 {
400 	struct ixgbe_aci_cmd_req_res *cmd_resp;
401 	struct ixgbe_aci_desc desc;
402 	int err;
403 
404 	cmd_resp = &desc.params.res_owner;
405 
406 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_req_res);
407 
408 	cmd_resp->res_id = cpu_to_le16(res);
409 	cmd_resp->access_type = cpu_to_le16(access);
410 	cmd_resp->res_number = cpu_to_le32(sdp_number);
411 	cmd_resp->timeout = cpu_to_le32(*timeout);
412 	*timeout = 0;
413 
414 	err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
415 
416 	/* If the resource is held by some other driver, the command completes
417 	 * with a busy return value and the timeout field indicates the maximum
418 	 * time the current owner of the resource has to free it.
419 	 */
420 	if (!err || hw->aci.last_status == IXGBE_ACI_RC_EBUSY)
421 		*timeout = le32_to_cpu(cmd_resp->timeout);
422 
423 	return err;
424 }
425 
426 /**
427  * ixgbe_aci_release_res - release a common resource using ACI
428  * @hw: pointer to the HW struct
429  * @res: resource ID
430  * @sdp_number: resource number
431  *
432  * Release a common resource using ACI command (0x0009).
433  *
434  * Return: the exit code of the operation.
435  */
436 static int ixgbe_aci_release_res(struct ixgbe_hw *hw,
437 				 enum ixgbe_aci_res_ids res, u8 sdp_number)
438 {
439 	struct ixgbe_aci_cmd_req_res *cmd;
440 	struct ixgbe_aci_desc desc;
441 
442 	cmd = &desc.params.res_owner;
443 
444 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_release_res);
445 
446 	cmd->res_id = cpu_to_le16(res);
447 	cmd->res_number = cpu_to_le32(sdp_number);
448 
449 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
450 }
451 
452 /**
453  * ixgbe_acquire_res - acquire the ownership of a resource
454  * @hw: pointer to the HW structure
455  * @res: resource ID
456  * @access: access type (read or write)
457  * @timeout: timeout in milliseconds
458  *
459  * Make an attempt to acquire the ownership of a resource using
460  * the ixgbe_aci_req_res to utilize ACI.
461  * In case if some other driver has previously acquired the resource and
462  * performed any necessary updates, the -EALREADY is returned,
463  * and the caller does not obtain the resource and has no further work to do.
464  * If needed, the function will poll until the current lock owner timeouts.
465  *
466  * Return: the exit code of the operation.
467  */
468 int ixgbe_acquire_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res,
469 		      enum ixgbe_aci_res_access_type access, u32 timeout)
470 {
471 #define IXGBE_RES_POLLING_DELAY_MS	10
472 	u32 delay = IXGBE_RES_POLLING_DELAY_MS;
473 	u32 res_timeout = timeout;
474 	u32 retry_timeout;
475 	int err;
476 
477 	err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
478 
479 	/* A return code of -EALREADY means that another driver has
480 	 * previously acquired the resource and performed any necessary updates;
481 	 * in this case the caller does not obtain the resource and has no
482 	 * further work to do.
483 	 */
484 	if (err == -EALREADY)
485 		return err;
486 
487 	/* If necessary, poll until the current lock owner timeouts.
488 	 * Set retry_timeout to the timeout value reported by the FW in the
489 	 * response to the "Request Resource Ownership" (0x0008) Admin Command
490 	 * as it indicates the maximum time the current owner of the resource
491 	 * is allowed to hold it.
492 	 */
493 	retry_timeout = res_timeout;
494 	while (err && retry_timeout && res_timeout) {
495 		msleep(delay);
496 		retry_timeout = (retry_timeout > delay) ?
497 			retry_timeout - delay : 0;
498 		err = ixgbe_aci_req_res(hw, res, access, 0, &res_timeout);
499 
500 		/* Success - lock acquired.
501 		 * -EALREADY - lock free, no work to do.
502 		 */
503 		if (!err || err == -EALREADY)
504 			break;
505 	}
506 
507 	return err;
508 }
509 
510 /**
511  * ixgbe_release_res - release a common resource
512  * @hw: pointer to the HW structure
513  * @res: resource ID
514  *
515  * Release a common resource using ixgbe_aci_release_res.
516  */
517 void ixgbe_release_res(struct ixgbe_hw *hw, enum ixgbe_aci_res_ids res)
518 {
519 	u32 total_delay = 0;
520 	int err;
521 
522 	err = ixgbe_aci_release_res(hw, res, 0);
523 
524 	/* There are some rare cases when trying to release the resource
525 	 * results in an admin command timeout, so handle them correctly.
526 	 */
527 	while (err == -ETIME &&
528 	       total_delay < IXGBE_ACI_RELEASE_RES_TIMEOUT) {
529 		usleep_range(1000, 1500);
530 		err = ixgbe_aci_release_res(hw, res, 0);
531 		total_delay++;
532 	}
533 }
534 
535 /**
536  * ixgbe_parse_e610_caps - Parse common device/function capabilities
537  * @hw: pointer to the HW struct
538  * @caps: pointer to common capabilities structure
539  * @elem: the capability element to parse
540  * @prefix: message prefix for tracing capabilities
541  *
542  * Given a capability element, extract relevant details into the common
543  * capability structure.
544  *
545  * Return: true if the capability matches one of the common capability ids,
546  * false otherwise.
547  */
548 static bool ixgbe_parse_e610_caps(struct ixgbe_hw *hw,
549 				  struct ixgbe_hw_caps *caps,
550 				  struct ixgbe_aci_cmd_list_caps_elem *elem,
551 				  const char *prefix)
552 {
553 	u32 logical_id = le32_to_cpu(elem->logical_id);
554 	u32 phys_id = le32_to_cpu(elem->phys_id);
555 	u32 number = le32_to_cpu(elem->number);
556 	u16 cap = le16_to_cpu(elem->cap);
557 
558 	switch (cap) {
559 	case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
560 		caps->valid_functions = number;
561 		break;
562 	case IXGBE_ACI_CAPS_SRIOV:
563 		caps->sr_iov_1_1 = (number == 1);
564 		break;
565 	case IXGBE_ACI_CAPS_VMDQ:
566 		caps->vmdq = (number == 1);
567 		break;
568 	case IXGBE_ACI_CAPS_DCB:
569 		caps->dcb = (number == 1);
570 		caps->active_tc_bitmap = logical_id;
571 		caps->maxtc = phys_id;
572 		break;
573 	case IXGBE_ACI_CAPS_RSS:
574 		caps->rss_table_size = number;
575 		caps->rss_table_entry_width = logical_id;
576 		break;
577 	case IXGBE_ACI_CAPS_RXQS:
578 		caps->num_rxq = number;
579 		caps->rxq_first_id = phys_id;
580 		break;
581 	case IXGBE_ACI_CAPS_TXQS:
582 		caps->num_txq = number;
583 		caps->txq_first_id = phys_id;
584 		break;
585 	case IXGBE_ACI_CAPS_MSIX:
586 		caps->num_msix_vectors = number;
587 		caps->msix_vector_first_id = phys_id;
588 		break;
589 	case IXGBE_ACI_CAPS_NVM_VER:
590 		break;
591 	case IXGBE_ACI_CAPS_PENDING_NVM_VER:
592 		caps->nvm_update_pending_nvm = true;
593 		break;
594 	case IXGBE_ACI_CAPS_PENDING_OROM_VER:
595 		caps->nvm_update_pending_orom = true;
596 		break;
597 	case IXGBE_ACI_CAPS_PENDING_NET_VER:
598 		caps->nvm_update_pending_netlist = true;
599 		break;
600 	case IXGBE_ACI_CAPS_NVM_MGMT:
601 		caps->nvm_unified_update =
602 			(number & IXGBE_NVM_MGMT_UNIFIED_UPD_SUPPORT) ?
603 			true : false;
604 		break;
605 	case IXGBE_ACI_CAPS_MAX_MTU:
606 		caps->max_mtu = number;
607 		break;
608 	case IXGBE_ACI_CAPS_PCIE_RESET_AVOIDANCE:
609 		caps->pcie_reset_avoidance = (number > 0);
610 		break;
611 	case IXGBE_ACI_CAPS_POST_UPDATE_RESET_RESTRICT:
612 		caps->reset_restrict_support = (number == 1);
613 		break;
614 	case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0:
615 	case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG1:
616 	case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG2:
617 	case IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG3:
618 	{
619 		u8 index = cap - IXGBE_ACI_CAPS_EXT_TOPO_DEV_IMG0;
620 
621 		caps->ext_topo_dev_img_ver_high[index] = number;
622 		caps->ext_topo_dev_img_ver_low[index] = logical_id;
623 		caps->ext_topo_dev_img_part_num[index] =
624 			FIELD_GET(IXGBE_EXT_TOPO_DEV_IMG_PART_NUM_M, phys_id);
625 		caps->ext_topo_dev_img_load_en[index] =
626 			(phys_id & IXGBE_EXT_TOPO_DEV_IMG_LOAD_EN) != 0;
627 		caps->ext_topo_dev_img_prog_en[index] =
628 			(phys_id & IXGBE_EXT_TOPO_DEV_IMG_PROG_EN) != 0;
629 		break;
630 	}
631 	default:
632 		/* Not one of the recognized common capabilities */
633 		return false;
634 	}
635 
636 	return true;
637 }
638 
639 /**
640  * ixgbe_parse_valid_functions_cap - Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS caps
641  * @hw: pointer to the HW struct
642  * @dev_p: pointer to device capabilities structure
643  * @cap: capability element to parse
644  *
645  * Parse IXGBE_ACI_CAPS_VALID_FUNCTIONS for device capabilities.
646  */
647 static void
648 ixgbe_parse_valid_functions_cap(struct ixgbe_hw *hw,
649 				struct ixgbe_hw_dev_caps *dev_p,
650 				struct ixgbe_aci_cmd_list_caps_elem *cap)
651 {
652 	dev_p->num_funcs = hweight32(le32_to_cpu(cap->number));
653 }
654 
655 /**
656  * ixgbe_parse_vf_dev_caps - Parse IXGBE_ACI_CAPS_VF device caps
657  * @hw: pointer to the HW struct
658  * @dev_p: pointer to device capabilities structure
659  * @cap: capability element to parse
660  *
661  * Parse IXGBE_ACI_CAPS_VF for device capabilities.
662  */
663 static void ixgbe_parse_vf_dev_caps(struct ixgbe_hw *hw,
664 				    struct ixgbe_hw_dev_caps *dev_p,
665 				    struct ixgbe_aci_cmd_list_caps_elem *cap)
666 {
667 	dev_p->num_vfs_exposed = le32_to_cpu(cap->number);
668 }
669 
670 /**
671  * ixgbe_parse_vsi_dev_caps - Parse IXGBE_ACI_CAPS_VSI device caps
672  * @hw: pointer to the HW struct
673  * @dev_p: pointer to device capabilities structure
674  * @cap: capability element to parse
675  *
676  * Parse IXGBE_ACI_CAPS_VSI for device capabilities.
677  */
678 static void ixgbe_parse_vsi_dev_caps(struct ixgbe_hw *hw,
679 				     struct ixgbe_hw_dev_caps *dev_p,
680 				     struct ixgbe_aci_cmd_list_caps_elem *cap)
681 {
682 	dev_p->num_vsi_allocd_to_host = le32_to_cpu(cap->number);
683 }
684 
685 /**
686  * ixgbe_parse_fdir_dev_caps - Parse IXGBE_ACI_CAPS_FD device caps
687  * @hw: pointer to the HW struct
688  * @dev_p: pointer to device capabilities structure
689  * @cap: capability element to parse
690  *
691  * Parse IXGBE_ACI_CAPS_FD for device capabilities.
692  */
693 static void ixgbe_parse_fdir_dev_caps(struct ixgbe_hw *hw,
694 				      struct ixgbe_hw_dev_caps *dev_p,
695 				      struct ixgbe_aci_cmd_list_caps_elem *cap)
696 {
697 	dev_p->num_flow_director_fltr = le32_to_cpu(cap->number);
698 }
699 
700 /**
701  * ixgbe_parse_dev_caps - Parse device capabilities
702  * @hw: pointer to the HW struct
703  * @dev_p: pointer to device capabilities structure
704  * @buf: buffer containing the device capability records
705  * @cap_count: the number of capabilities
706  *
707  * Helper device to parse device (0x000B) capabilities list. For
708  * capabilities shared between device and function, this relies on
709  * ixgbe_parse_e610_caps.
710  *
711  * Loop through the list of provided capabilities and extract the relevant
712  * data into the device capabilities structured.
713  */
714 static void ixgbe_parse_dev_caps(struct ixgbe_hw *hw,
715 				 struct ixgbe_hw_dev_caps *dev_p,
716 				 void *buf, u32 cap_count)
717 {
718 	struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
719 	u32 i;
720 
721 	cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
722 
723 	memset(dev_p, 0, sizeof(*dev_p));
724 
725 	for (i = 0; i < cap_count; i++) {
726 		u16 cap = le16_to_cpu(cap_resp[i].cap);
727 
728 		ixgbe_parse_e610_caps(hw, &dev_p->common_cap, &cap_resp[i],
729 				      "dev caps");
730 
731 		switch (cap) {
732 		case IXGBE_ACI_CAPS_VALID_FUNCTIONS:
733 			ixgbe_parse_valid_functions_cap(hw, dev_p,
734 							&cap_resp[i]);
735 			break;
736 		case IXGBE_ACI_CAPS_VF:
737 			ixgbe_parse_vf_dev_caps(hw, dev_p, &cap_resp[i]);
738 			break;
739 		case IXGBE_ACI_CAPS_VSI:
740 			ixgbe_parse_vsi_dev_caps(hw, dev_p, &cap_resp[i]);
741 			break;
742 		case  IXGBE_ACI_CAPS_FD:
743 			ixgbe_parse_fdir_dev_caps(hw, dev_p, &cap_resp[i]);
744 			break;
745 		default:
746 			/* Don't list common capabilities as unknown */
747 			break;
748 		}
749 	}
750 }
751 
752 /**
753  * ixgbe_parse_vf_func_caps - Parse IXGBE_ACI_CAPS_VF function caps
754  * @hw: pointer to the HW struct
755  * @func_p: pointer to function capabilities structure
756  * @cap: pointer to the capability element to parse
757  *
758  * Extract function capabilities for IXGBE_ACI_CAPS_VF.
759  */
760 static void ixgbe_parse_vf_func_caps(struct ixgbe_hw *hw,
761 				     struct ixgbe_hw_func_caps *func_p,
762 				     struct ixgbe_aci_cmd_list_caps_elem *cap)
763 {
764 	func_p->num_allocd_vfs = le32_to_cpu(cap->number);
765 	func_p->vf_base_id = le32_to_cpu(cap->logical_id);
766 }
767 
768 /**
769  * ixgbe_get_num_per_func - determine number of resources per PF
770  * @hw: pointer to the HW structure
771  * @max: value to be evenly split between each PF
772  *
773  * Determine the number of valid functions by going through the bitmap returned
774  * from parsing capabilities and use this to calculate the number of resources
775  * per PF based on the max value passed in.
776  *
777  * Return: the number of resources per PF or 0, if no PH are available.
778  */
779 static u32 ixgbe_get_num_per_func(struct ixgbe_hw *hw, u32 max)
780 {
781 #define IXGBE_CAPS_VALID_FUNCS_M	GENMASK(7, 0)
782 	u8 funcs = hweight8(hw->dev_caps.common_cap.valid_functions &
783 			    IXGBE_CAPS_VALID_FUNCS_M);
784 
785 	return funcs ? (max / funcs) : 0;
786 }
787 
788 /**
789  * ixgbe_parse_vsi_func_caps - Parse IXGBE_ACI_CAPS_VSI function caps
790  * @hw: pointer to the HW struct
791  * @func_p: pointer to function capabilities structure
792  * @cap: pointer to the capability element to parse
793  *
794  * Extract function capabilities for IXGBE_ACI_CAPS_VSI.
795  */
796 static void ixgbe_parse_vsi_func_caps(struct ixgbe_hw *hw,
797 				      struct ixgbe_hw_func_caps *func_p,
798 				      struct ixgbe_aci_cmd_list_caps_elem *cap)
799 {
800 	func_p->guar_num_vsi = ixgbe_get_num_per_func(hw, IXGBE_MAX_VSI);
801 }
802 
803 /**
804  * ixgbe_parse_func_caps - Parse function capabilities
805  * @hw: pointer to the HW struct
806  * @func_p: pointer to function capabilities structure
807  * @buf: buffer containing the function capability records
808  * @cap_count: the number of capabilities
809  *
810  * Helper function to parse function (0x000A) capabilities list. For
811  * capabilities shared between device and function, this relies on
812  * ixgbe_parse_e610_caps.
813  *
814  * Loop through the list of provided capabilities and extract the relevant
815  * data into the function capabilities structured.
816  */
817 static void ixgbe_parse_func_caps(struct ixgbe_hw *hw,
818 				  struct ixgbe_hw_func_caps *func_p,
819 				  void *buf, u32 cap_count)
820 {
821 	struct ixgbe_aci_cmd_list_caps_elem *cap_resp;
822 	u32 i;
823 
824 	cap_resp = (struct ixgbe_aci_cmd_list_caps_elem *)buf;
825 
826 	memset(func_p, 0, sizeof(*func_p));
827 
828 	for (i = 0; i < cap_count; i++) {
829 		u16 cap = le16_to_cpu(cap_resp[i].cap);
830 
831 		ixgbe_parse_e610_caps(hw, &func_p->common_cap,
832 				      &cap_resp[i], "func caps");
833 
834 		switch (cap) {
835 		case IXGBE_ACI_CAPS_VF:
836 			ixgbe_parse_vf_func_caps(hw, func_p, &cap_resp[i]);
837 			break;
838 		case IXGBE_ACI_CAPS_VSI:
839 			ixgbe_parse_vsi_func_caps(hw, func_p, &cap_resp[i]);
840 			break;
841 		default:
842 			/* Don't list common capabilities as unknown */
843 			break;
844 		}
845 	}
846 }
847 
848 /**
849  * ixgbe_aci_list_caps - query function/device capabilities
850  * @hw: pointer to the HW struct
851  * @buf: a buffer to hold the capabilities
852  * @buf_size: size of the buffer
853  * @cap_count: if not NULL, set to the number of capabilities reported
854  * @opc: capabilities type to discover, device or function
855  *
856  * Get the function (0x000A) or device (0x000B) capabilities description from
857  * firmware and store it in the buffer.
858  *
859  * If the cap_count pointer is not NULL, then it is set to the number of
860  * capabilities firmware will report. Note that if the buffer size is too
861  * small, it is possible the command will return -ENOMEM. The
862  * cap_count will still be updated in this case. It is recommended that the
863  * buffer size be set to IXGBE_ACI_MAX_BUFFER_SIZE (the largest possible
864  * buffer that firmware could return) to avoid this.
865  *
866  * Return: the exit code of the operation.
867  * Exit code of -ENOMEM means the buffer size is too small.
868  */
869 int ixgbe_aci_list_caps(struct ixgbe_hw *hw, void *buf, u16 buf_size,
870 			u32 *cap_count, enum ixgbe_aci_opc opc)
871 {
872 	struct ixgbe_aci_cmd_list_caps *cmd;
873 	struct ixgbe_aci_desc desc;
874 	int err;
875 
876 	cmd = &desc.params.get_cap;
877 
878 	if (opc != ixgbe_aci_opc_list_func_caps &&
879 	    opc != ixgbe_aci_opc_list_dev_caps)
880 		return -EINVAL;
881 
882 	ixgbe_fill_dflt_direct_cmd_desc(&desc, opc);
883 	err = ixgbe_aci_send_cmd(hw, &desc, buf, buf_size);
884 
885 	if (cap_count)
886 		*cap_count = le32_to_cpu(cmd->count);
887 
888 	return err;
889 }
890 
891 /**
892  * ixgbe_discover_dev_caps - Read and extract device capabilities
893  * @hw: pointer to the hardware structure
894  * @dev_caps: pointer to device capabilities structure
895  *
896  * Read the device capabilities and extract them into the dev_caps structure
897  * for later use.
898  *
899  * Return: the exit code of the operation.
900  */
901 int ixgbe_discover_dev_caps(struct ixgbe_hw *hw,
902 			    struct ixgbe_hw_dev_caps *dev_caps)
903 {
904 	u32 cap_count;
905 	u8 *cbuf;
906 	int err;
907 
908 	cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
909 	if (!cbuf)
910 		return -ENOMEM;
911 
912 	/* Although the driver doesn't know the number of capabilities the
913 	 * device will return, we can simply send a 4KB buffer, the maximum
914 	 * possible size that firmware can return.
915 	 */
916 	cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
917 		    sizeof(struct ixgbe_aci_cmd_list_caps_elem);
918 
919 	err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
920 				  &cap_count,
921 				  ixgbe_aci_opc_list_dev_caps);
922 	if (!err)
923 		ixgbe_parse_dev_caps(hw, dev_caps, cbuf, cap_count);
924 
925 	kfree(cbuf);
926 
927 	return 0;
928 }
929 
930 /**
931  * ixgbe_discover_func_caps - Read and extract function capabilities
932  * @hw: pointer to the hardware structure
933  * @func_caps: pointer to function capabilities structure
934  *
935  * Read the function capabilities and extract them into the func_caps structure
936  * for later use.
937  *
938  * Return: the exit code of the operation.
939  */
940 int ixgbe_discover_func_caps(struct ixgbe_hw *hw,
941 			     struct ixgbe_hw_func_caps *func_caps)
942 {
943 	u32 cap_count;
944 	u8 *cbuf;
945 	int err;
946 
947 	cbuf = kzalloc(IXGBE_ACI_MAX_BUFFER_SIZE, GFP_KERNEL);
948 	if (!cbuf)
949 		return -ENOMEM;
950 
951 	/* Although the driver doesn't know the number of capabilities the
952 	 * device will return, we can simply send a 4KB buffer, the maximum
953 	 * possible size that firmware can return.
954 	 */
955 	cap_count = IXGBE_ACI_MAX_BUFFER_SIZE /
956 		    sizeof(struct ixgbe_aci_cmd_list_caps_elem);
957 
958 	err = ixgbe_aci_list_caps(hw, cbuf, IXGBE_ACI_MAX_BUFFER_SIZE,
959 				  &cap_count,
960 				  ixgbe_aci_opc_list_func_caps);
961 	if (!err)
962 		ixgbe_parse_func_caps(hw, func_caps, cbuf, cap_count);
963 
964 	kfree(cbuf);
965 
966 	return 0;
967 }
968 
969 /**
970  * ixgbe_get_caps - get info about the HW
971  * @hw: pointer to the hardware structure
972  *
973  * Retrieve both device and function capabilities.
974  *
975  * Return: the exit code of the operation.
976  */
977 int ixgbe_get_caps(struct ixgbe_hw *hw)
978 {
979 	int err;
980 
981 	err = ixgbe_discover_dev_caps(hw, &hw->dev_caps);
982 	if (err)
983 		return err;
984 
985 	return ixgbe_discover_func_caps(hw, &hw->func_caps);
986 }
987 
988 /**
989  * ixgbe_aci_disable_rxen - disable RX
990  * @hw: pointer to the HW struct
991  *
992  * Request a safe disable of Receive Enable using ACI command (0x000C).
993  *
994  * Return: the exit code of the operation.
995  */
996 int ixgbe_aci_disable_rxen(struct ixgbe_hw *hw)
997 {
998 	struct ixgbe_aci_cmd_disable_rxen *cmd;
999 	struct ixgbe_aci_desc desc;
1000 
1001 	cmd = &desc.params.disable_rxen;
1002 
1003 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_disable_rxen);
1004 
1005 	cmd->lport_num = hw->bus.func;
1006 
1007 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1008 }
1009 
1010 /**
1011  * ixgbe_aci_get_phy_caps - returns PHY capabilities
1012  * @hw: pointer to the HW struct
1013  * @qual_mods: report qualified modules
1014  * @report_mode: report mode capabilities
1015  * @pcaps: structure for PHY capabilities to be filled
1016  *
1017  * Returns the various PHY capabilities supported on the Port
1018  * using ACI command (0x0600).
1019  *
1020  * Return: the exit code of the operation.
1021  */
1022 int ixgbe_aci_get_phy_caps(struct ixgbe_hw *hw, bool qual_mods, u8 report_mode,
1023 			   struct ixgbe_aci_cmd_get_phy_caps_data *pcaps)
1024 {
1025 	struct ixgbe_aci_cmd_get_phy_caps *cmd;
1026 	u16 pcaps_size = sizeof(*pcaps);
1027 	struct ixgbe_aci_desc desc;
1028 	int err;
1029 
1030 	cmd = &desc.params.get_phy;
1031 
1032 	if (!pcaps || (report_mode & ~IXGBE_ACI_REPORT_MODE_M))
1033 		return -EINVAL;
1034 
1035 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_phy_caps);
1036 
1037 	if (qual_mods)
1038 		cmd->param0 |= cpu_to_le16(IXGBE_ACI_GET_PHY_RQM);
1039 
1040 	cmd->param0 |= cpu_to_le16(report_mode);
1041 	err = ixgbe_aci_send_cmd(hw, &desc, pcaps, pcaps_size);
1042 	if (!err && report_mode == IXGBE_ACI_REPORT_TOPO_CAP_MEDIA) {
1043 		hw->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
1044 		hw->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
1045 		memcpy(hw->link.link_info.module_type, &pcaps->module_type,
1046 		       sizeof(hw->link.link_info.module_type));
1047 	}
1048 
1049 	return err;
1050 }
1051 
1052 /**
1053  * ixgbe_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
1054  * @caps: PHY ability structure to copy data from
1055  * @cfg: PHY configuration structure to copy data to
1056  *
1057  * Helper function to copy data from PHY capabilities data structure
1058  * to PHY configuration data structure
1059  */
1060 void ixgbe_copy_phy_caps_to_cfg(struct ixgbe_aci_cmd_get_phy_caps_data *caps,
1061 				struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
1062 {
1063 	if (!caps || !cfg)
1064 		return;
1065 
1066 	memset(cfg, 0, sizeof(*cfg));
1067 	cfg->phy_type_low = caps->phy_type_low;
1068 	cfg->phy_type_high = caps->phy_type_high;
1069 	cfg->caps = caps->caps;
1070 	cfg->low_power_ctrl_an = caps->low_power_ctrl_an;
1071 	cfg->eee_cap = caps->eee_cap;
1072 	cfg->eeer_value = caps->eeer_value;
1073 	cfg->link_fec_opt = caps->link_fec_options;
1074 	cfg->module_compliance_enforcement =
1075 		caps->module_compliance_enforcement;
1076 }
1077 
1078 /**
1079  * ixgbe_aci_set_phy_cfg - set PHY configuration
1080  * @hw: pointer to the HW struct
1081  * @cfg: structure with PHY configuration data to be set
1082  *
1083  * Set the various PHY configuration parameters supported on the Port
1084  * using ACI command (0x0601).
1085  * One or more of the Set PHY config parameters may be ignored in an MFP
1086  * mode as the PF may not have the privilege to set some of the PHY Config
1087  * parameters.
1088  *
1089  * Return: the exit code of the operation.
1090  */
1091 int ixgbe_aci_set_phy_cfg(struct ixgbe_hw *hw,
1092 			  struct ixgbe_aci_cmd_set_phy_cfg_data *cfg)
1093 {
1094 	struct ixgbe_aci_desc desc;
1095 	int err;
1096 
1097 	if (!cfg)
1098 		return -EINVAL;
1099 
1100 	/* Ensure that only valid bits of cfg->caps can be turned on. */
1101 	cfg->caps &= IXGBE_ACI_PHY_ENA_VALID_MASK;
1102 
1103 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_phy_cfg);
1104 	desc.params.set_phy.lport_num = hw->bus.func;
1105 	desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
1106 
1107 	err = ixgbe_aci_send_cmd(hw, &desc, cfg, sizeof(*cfg));
1108 	if (!err)
1109 		hw->phy.curr_user_phy_cfg = *cfg;
1110 
1111 	return err;
1112 }
1113 
1114 /**
1115  * ixgbe_aci_set_link_restart_an - set up link and restart AN
1116  * @hw: pointer to the HW struct
1117  * @ena_link: if true: enable link, if false: disable link
1118  *
1119  * Function sets up the link and restarts the Auto-Negotiation over the link.
1120  *
1121  * Return: the exit code of the operation.
1122  */
1123 int ixgbe_aci_set_link_restart_an(struct ixgbe_hw *hw, bool ena_link)
1124 {
1125 	struct ixgbe_aci_cmd_restart_an *cmd;
1126 	struct ixgbe_aci_desc desc;
1127 
1128 	cmd = &desc.params.restart_an;
1129 
1130 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_restart_an);
1131 
1132 	cmd->cmd_flags = IXGBE_ACI_RESTART_AN_LINK_RESTART;
1133 	cmd->lport_num = hw->bus.func;
1134 	if (ena_link)
1135 		cmd->cmd_flags |= IXGBE_ACI_RESTART_AN_LINK_ENABLE;
1136 	else
1137 		cmd->cmd_flags &= ~IXGBE_ACI_RESTART_AN_LINK_ENABLE;
1138 
1139 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1140 }
1141 
1142 /**
1143  * ixgbe_is_media_cage_present - check if media cage is present
1144  * @hw: pointer to the HW struct
1145  *
1146  * Identify presence of media cage using the ACI command (0x06E0).
1147  *
1148  * Return: true if media cage is present, else false. If no cage, then
1149  * media type is backplane or BASE-T.
1150  */
1151 static bool ixgbe_is_media_cage_present(struct ixgbe_hw *hw)
1152 {
1153 	struct ixgbe_aci_cmd_get_link_topo *cmd;
1154 	struct ixgbe_aci_desc desc;
1155 
1156 	cmd = &desc.params.get_link_topo;
1157 
1158 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
1159 
1160 	cmd->addr.topo_params.node_type_ctx =
1161 		FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_CTX_M,
1162 			   IXGBE_ACI_LINK_TOPO_NODE_CTX_PORT);
1163 
1164 	/* Set node type. */
1165 	cmd->addr.topo_params.node_type_ctx |=
1166 		FIELD_PREP(IXGBE_ACI_LINK_TOPO_NODE_TYPE_M,
1167 			   IXGBE_ACI_LINK_TOPO_NODE_TYPE_CAGE);
1168 
1169 	/* Node type cage can be used to determine if cage is present. If AQC
1170 	 * returns error (ENOENT), then no cage present. If no cage present then
1171 	 * connection type is backplane or BASE-T.
1172 	 */
1173 	return !ixgbe_aci_get_netlist_node(hw, cmd, NULL, NULL);
1174 }
1175 
1176 /**
1177  * ixgbe_get_media_type_from_phy_type - Gets media type based on phy type
1178  * @hw: pointer to the HW struct
1179  *
1180  * Try to identify the media type based on the phy type.
1181  * If more than one media type, the ixgbe_media_type_unknown is returned.
1182  * First, phy_type_low is checked, then phy_type_high.
1183  * If none are identified, the ixgbe_media_type_unknown is returned
1184  *
1185  * Return: type of a media based on phy type in form of enum.
1186  */
1187 static enum ixgbe_media_type
1188 ixgbe_get_media_type_from_phy_type(struct ixgbe_hw *hw)
1189 {
1190 	struct ixgbe_link_status *hw_link_info;
1191 
1192 	if (!hw)
1193 		return ixgbe_media_type_unknown;
1194 
1195 	hw_link_info = &hw->link.link_info;
1196 	if (hw_link_info->phy_type_low && hw_link_info->phy_type_high)
1197 		/* If more than one media type is selected, report unknown */
1198 		return ixgbe_media_type_unknown;
1199 
1200 	if (hw_link_info->phy_type_low) {
1201 		/* 1G SGMII is a special case where some DA cable PHYs
1202 		 * may show this as an option when it really shouldn't
1203 		 * be since SGMII is meant to be between a MAC and a PHY
1204 		 * in a backplane. Try to detect this case and handle it
1205 		 */
1206 		if (hw_link_info->phy_type_low == IXGBE_PHY_TYPE_LOW_1G_SGMII &&
1207 		    (hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
1208 		    IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE ||
1209 		    hw_link_info->module_type[IXGBE_ACI_MOD_TYPE_IDENT] ==
1210 		    IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE))
1211 			return ixgbe_media_type_da;
1212 
1213 		switch (hw_link_info->phy_type_low) {
1214 		case IXGBE_PHY_TYPE_LOW_1000BASE_SX:
1215 		case IXGBE_PHY_TYPE_LOW_1000BASE_LX:
1216 		case IXGBE_PHY_TYPE_LOW_10GBASE_SR:
1217 		case IXGBE_PHY_TYPE_LOW_10GBASE_LR:
1218 		case IXGBE_PHY_TYPE_LOW_25GBASE_SR:
1219 		case IXGBE_PHY_TYPE_LOW_25GBASE_LR:
1220 			return ixgbe_media_type_fiber;
1221 		case IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
1222 		case IXGBE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
1223 			return ixgbe_media_type_fiber;
1224 		case IXGBE_PHY_TYPE_LOW_100BASE_TX:
1225 		case IXGBE_PHY_TYPE_LOW_1000BASE_T:
1226 		case IXGBE_PHY_TYPE_LOW_2500BASE_T:
1227 		case IXGBE_PHY_TYPE_LOW_5GBASE_T:
1228 		case IXGBE_PHY_TYPE_LOW_10GBASE_T:
1229 		case IXGBE_PHY_TYPE_LOW_25GBASE_T:
1230 			return ixgbe_media_type_copper;
1231 		case IXGBE_PHY_TYPE_LOW_10G_SFI_DA:
1232 		case IXGBE_PHY_TYPE_LOW_25GBASE_CR:
1233 		case IXGBE_PHY_TYPE_LOW_25GBASE_CR_S:
1234 		case IXGBE_PHY_TYPE_LOW_25GBASE_CR1:
1235 			return ixgbe_media_type_da;
1236 		case IXGBE_PHY_TYPE_LOW_25G_AUI_C2C:
1237 			if (ixgbe_is_media_cage_present(hw))
1238 				return ixgbe_media_type_aui;
1239 			fallthrough;
1240 		case IXGBE_PHY_TYPE_LOW_1000BASE_KX:
1241 		case IXGBE_PHY_TYPE_LOW_2500BASE_KX:
1242 		case IXGBE_PHY_TYPE_LOW_2500BASE_X:
1243 		case IXGBE_PHY_TYPE_LOW_5GBASE_KR:
1244 		case IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1:
1245 		case IXGBE_PHY_TYPE_LOW_10G_SFI_C2C:
1246 		case IXGBE_PHY_TYPE_LOW_25GBASE_KR:
1247 		case IXGBE_PHY_TYPE_LOW_25GBASE_KR1:
1248 		case IXGBE_PHY_TYPE_LOW_25GBASE_KR_S:
1249 			return ixgbe_media_type_backplane;
1250 		}
1251 	} else {
1252 		switch (hw_link_info->phy_type_high) {
1253 		case IXGBE_PHY_TYPE_HIGH_10BASE_T:
1254 			return ixgbe_media_type_copper;
1255 		}
1256 	}
1257 	return ixgbe_media_type_unknown;
1258 }
1259 
1260 /**
1261  * ixgbe_update_link_info - update status of the HW network link
1262  * @hw: pointer to the HW struct
1263  *
1264  * Update the status of the HW network link.
1265  *
1266  * Return: the exit code of the operation.
1267  */
1268 int ixgbe_update_link_info(struct ixgbe_hw *hw)
1269 {
1270 	struct ixgbe_aci_cmd_get_phy_caps_data *pcaps;
1271 	struct ixgbe_link_status *li;
1272 	int err;
1273 
1274 	if (!hw)
1275 		return -EINVAL;
1276 
1277 	li = &hw->link.link_info;
1278 
1279 	err = ixgbe_aci_get_link_info(hw, true, NULL);
1280 	if (err)
1281 		return err;
1282 
1283 	if (!(li->link_info & IXGBE_ACI_MEDIA_AVAILABLE))
1284 		return 0;
1285 
1286 	pcaps =	kzalloc(sizeof(*pcaps), GFP_KERNEL);
1287 	if (!pcaps)
1288 		return -ENOMEM;
1289 
1290 	err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
1291 				     pcaps);
1292 
1293 	if (!err)
1294 		memcpy(li->module_type, &pcaps->module_type,
1295 		       sizeof(li->module_type));
1296 
1297 	kfree(pcaps);
1298 
1299 	return err;
1300 }
1301 
1302 /**
1303  * ixgbe_get_link_status - get status of the HW network link
1304  * @hw: pointer to the HW struct
1305  * @link_up: pointer to bool (true/false = linkup/linkdown)
1306  *
1307  * Variable link_up is true if link is up, false if link is down.
1308  * The variable link_up is invalid if status is non zero. As a
1309  * result of this call, link status reporting becomes enabled
1310  *
1311  * Return: the exit code of the operation.
1312  */
1313 int ixgbe_get_link_status(struct ixgbe_hw *hw, bool *link_up)
1314 {
1315 	if (!hw || !link_up)
1316 		return -EINVAL;
1317 
1318 	if (hw->link.get_link_info) {
1319 		int err = ixgbe_update_link_info(hw);
1320 
1321 		if (err)
1322 			return err;
1323 	}
1324 
1325 	*link_up = hw->link.link_info.link_info & IXGBE_ACI_LINK_UP;
1326 
1327 	return 0;
1328 }
1329 
1330 /**
1331  * ixgbe_aci_get_link_info - get the link status
1332  * @hw: pointer to the HW struct
1333  * @ena_lse: enable/disable LinkStatusEvent reporting
1334  * @link: pointer to link status structure - optional
1335  *
1336  * Get the current Link Status using ACI command (0x607).
1337  * The current link can be optionally provided to update
1338  * the status.
1339  *
1340  * Return: the link status of the adapter.
1341  */
1342 int ixgbe_aci_get_link_info(struct ixgbe_hw *hw, bool ena_lse,
1343 			    struct ixgbe_link_status *link)
1344 {
1345 	struct ixgbe_aci_cmd_get_link_status_data link_data = {};
1346 	struct ixgbe_aci_cmd_get_link_status *resp;
1347 	struct ixgbe_link_status *li_old, *li;
1348 	struct ixgbe_fc_info *hw_fc_info;
1349 	struct ixgbe_aci_desc desc;
1350 	bool tx_pause, rx_pause;
1351 	u8 cmd_flags;
1352 	int err;
1353 
1354 	if (!hw)
1355 		return -EINVAL;
1356 
1357 	li_old = &hw->link.link_info_old;
1358 	li = &hw->link.link_info;
1359 	hw_fc_info = &hw->fc;
1360 
1361 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_status);
1362 	cmd_flags = (ena_lse) ? IXGBE_ACI_LSE_ENA : IXGBE_ACI_LSE_DIS;
1363 	resp = &desc.params.get_link_status;
1364 	resp->cmd_flags = cpu_to_le16(cmd_flags);
1365 	resp->lport_num = hw->bus.func;
1366 
1367 	err = ixgbe_aci_send_cmd(hw, &desc, &link_data, sizeof(link_data));
1368 	if (err)
1369 		return err;
1370 
1371 	/* Save off old link status information. */
1372 	*li_old = *li;
1373 
1374 	/* Update current link status information. */
1375 	li->link_speed = le16_to_cpu(link_data.link_speed);
1376 	li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
1377 	li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
1378 	li->link_info = link_data.link_info;
1379 	li->link_cfg_err = link_data.link_cfg_err;
1380 	li->an_info = link_data.an_info;
1381 	li->ext_info = link_data.ext_info;
1382 	li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
1383 	li->fec_info = link_data.cfg & IXGBE_ACI_FEC_MASK;
1384 	li->topo_media_conflict = link_data.topo_media_conflict;
1385 	li->pacing = link_data.cfg & (IXGBE_ACI_CFG_PACING_M |
1386 				      IXGBE_ACI_CFG_PACING_TYPE_M);
1387 
1388 	/* Update fc info. */
1389 	tx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_TX);
1390 	rx_pause = !!(link_data.an_info & IXGBE_ACI_LINK_PAUSE_RX);
1391 	if (tx_pause && rx_pause)
1392 		hw_fc_info->current_mode = ixgbe_fc_full;
1393 	else if (tx_pause)
1394 		hw_fc_info->current_mode = ixgbe_fc_tx_pause;
1395 	else if (rx_pause)
1396 		hw_fc_info->current_mode = ixgbe_fc_rx_pause;
1397 	else
1398 		hw_fc_info->current_mode = ixgbe_fc_none;
1399 
1400 	li->lse_ena = !!(le16_to_cpu(resp->cmd_flags) &
1401 			 IXGBE_ACI_LSE_IS_ENABLED);
1402 
1403 	/* Save link status information. */
1404 	if (link)
1405 		*link = *li;
1406 
1407 	/* Flag cleared so calling functions don't call AQ again. */
1408 	hw->link.get_link_info = false;
1409 
1410 	return 0;
1411 }
1412 
1413 /**
1414  * ixgbe_aci_set_event_mask - set event mask
1415  * @hw: pointer to the HW struct
1416  * @port_num: port number of the physical function
1417  * @mask: event mask to be set
1418  *
1419  * Set the event mask using ACI command (0x0613).
1420  *
1421  * Return: the exit code of the operation.
1422  */
1423 int ixgbe_aci_set_event_mask(struct ixgbe_hw *hw, u8 port_num, u16 mask)
1424 {
1425 	struct ixgbe_aci_cmd_set_event_mask *cmd;
1426 	struct ixgbe_aci_desc desc;
1427 
1428 	cmd = &desc.params.set_event_mask;
1429 
1430 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_event_mask);
1431 
1432 	cmd->lport_num = port_num;
1433 
1434 	cmd->event_mask = cpu_to_le16(mask);
1435 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1436 }
1437 
1438 /**
1439  * ixgbe_configure_lse - enable/disable link status events
1440  * @hw: pointer to the HW struct
1441  * @activate: true for enable lse, false otherwise
1442  * @mask: event mask to be set; a set bit means deactivation of the
1443  * corresponding event
1444  *
1445  * Set the event mask and then enable or disable link status events
1446  *
1447  * Return: the exit code of the operation.
1448  */
1449 int ixgbe_configure_lse(struct ixgbe_hw *hw, bool activate, u16 mask)
1450 {
1451 	int err;
1452 
1453 	err = ixgbe_aci_set_event_mask(hw, (u8)hw->bus.func, mask);
1454 	if (err)
1455 		return err;
1456 
1457 	/* Enabling link status events generation by fw. */
1458 	return ixgbe_aci_get_link_info(hw, activate, NULL);
1459 }
1460 
1461 /**
1462  * ixgbe_start_hw_e610 - Prepare hardware for Tx/Rx
1463  * @hw: pointer to hardware structure
1464  *
1465  * Get firmware version and start the hardware using the generic
1466  * start_hw() and ixgbe_start_hw_gen2() functions.
1467  *
1468  * Return: the exit code of the operation.
1469  */
1470 static int ixgbe_start_hw_e610(struct ixgbe_hw *hw)
1471 {
1472 	int err;
1473 
1474 	err = ixgbe_aci_get_fw_ver(hw);
1475 	if (err)
1476 		return err;
1477 
1478 	err = ixgbe_start_hw_generic(hw);
1479 	if (err)
1480 		return err;
1481 
1482 	ixgbe_start_hw_gen2(hw);
1483 
1484 	return 0;
1485 }
1486 
1487 /**
1488  * ixgbe_aci_set_port_id_led - set LED value for the given port
1489  * @hw: pointer to the HW struct
1490  * @orig_mode: set LED original mode
1491  *
1492  * Set LED value for the given port (0x06E9)
1493  *
1494  * Return: the exit code of the operation.
1495  */
1496 int ixgbe_aci_set_port_id_led(struct ixgbe_hw *hw, bool orig_mode)
1497 {
1498 	struct ixgbe_aci_cmd_set_port_id_led *cmd;
1499 	struct ixgbe_aci_desc desc;
1500 
1501 	cmd = &desc.params.set_port_id_led;
1502 
1503 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_set_port_id_led);
1504 
1505 	cmd->lport_num = (u8)hw->bus.func;
1506 	cmd->lport_num_valid = IXGBE_ACI_PORT_ID_PORT_NUM_VALID;
1507 
1508 	if (orig_mode)
1509 		cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_ORIG;
1510 	else
1511 		cmd->ident_mode = IXGBE_ACI_PORT_IDENT_LED_BLINK;
1512 
1513 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
1514 }
1515 
1516 /**
1517  * ixgbe_get_media_type_e610 - Gets media type
1518  * @hw: pointer to the HW struct
1519  *
1520  * In order to get the media type, the function gets PHY
1521  * capabilities and later on use them to identify the PHY type
1522  * checking phy_type_high and phy_type_low.
1523  *
1524  * Return: the type of media in form of ixgbe_media_type enum
1525  * or ixgbe_media_type_unknown in case of an error.
1526  */
1527 enum ixgbe_media_type ixgbe_get_media_type_e610(struct ixgbe_hw *hw)
1528 {
1529 	struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
1530 	int rc;
1531 
1532 	rc = ixgbe_update_link_info(hw);
1533 	if (rc)
1534 		return ixgbe_media_type_unknown;
1535 
1536 	/* If there is no link but PHY (dongle) is available SW should use
1537 	 * Get PHY Caps admin command instead of Get Link Status, find most
1538 	 * significant bit that is set in PHY types reported by the command
1539 	 * and use it to discover media type.
1540 	 */
1541 	if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP) &&
1542 	    (hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE)) {
1543 		int highest_bit;
1544 
1545 		/* Get PHY Capabilities */
1546 		rc = ixgbe_aci_get_phy_caps(hw, false,
1547 					    IXGBE_ACI_REPORT_TOPO_CAP_MEDIA,
1548 					    &pcaps);
1549 		if (rc)
1550 			return ixgbe_media_type_unknown;
1551 
1552 		highest_bit = fls64(le64_to_cpu(pcaps.phy_type_high));
1553 		if (highest_bit) {
1554 			hw->link.link_info.phy_type_high =
1555 				BIT_ULL(highest_bit - 1);
1556 			hw->link.link_info.phy_type_low = 0;
1557 		} else {
1558 			highest_bit = fls64(le64_to_cpu(pcaps.phy_type_low));
1559 			if (highest_bit) {
1560 				hw->link.link_info.phy_type_low =
1561 					BIT_ULL(highest_bit - 1);
1562 				hw->link.link_info.phy_type_high = 0;
1563 			}
1564 		}
1565 	}
1566 
1567 	/* Based on link status or search above try to discover media type. */
1568 	hw->phy.media_type = ixgbe_get_media_type_from_phy_type(hw);
1569 
1570 	return hw->phy.media_type;
1571 }
1572 
1573 /**
1574  * ixgbe_setup_link_e610 - Set up link
1575  * @hw: pointer to hardware structure
1576  * @speed: new link speed
1577  * @autoneg_wait: true when waiting for completion is needed
1578  *
1579  * Set up the link with the specified speed.
1580  *
1581  * Return: the exit code of the operation.
1582  */
1583 int ixgbe_setup_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed speed,
1584 			  bool autoneg_wait)
1585 {
1586 	/* Simply request FW to perform proper PHY setup */
1587 	return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait);
1588 }
1589 
1590 /**
1591  * ixgbe_check_link_e610 - Determine link and speed status
1592  * @hw: pointer to hardware structure
1593  * @speed: pointer to link speed
1594  * @link_up: true when link is up
1595  * @link_up_wait_to_complete: bool used to wait for link up or not
1596  *
1597  * Determine if the link is up and the current link speed
1598  * using ACI command (0x0607).
1599  *
1600  * Return: the exit code of the operation.
1601  */
1602 int ixgbe_check_link_e610(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
1603 			  bool *link_up, bool link_up_wait_to_complete)
1604 {
1605 	int err;
1606 	u32 i;
1607 
1608 	if (!speed || !link_up)
1609 		return -EINVAL;
1610 
1611 	/* Set get_link_info flag to ensure that fresh
1612 	 * link information will be obtained from FW
1613 	 * by sending Get Link Status admin command.
1614 	 */
1615 	hw->link.get_link_info = true;
1616 
1617 	/* Update link information in adapter context. */
1618 	err = ixgbe_get_link_status(hw, link_up);
1619 	if (err)
1620 		return err;
1621 
1622 	/* Wait for link up if it was requested. */
1623 	if (link_up_wait_to_complete && !(*link_up)) {
1624 		for (i = 0; i < hw->mac.max_link_up_time; i++) {
1625 			msleep(100);
1626 			hw->link.get_link_info = true;
1627 			err = ixgbe_get_link_status(hw, link_up);
1628 			if (err)
1629 				return err;
1630 			if (*link_up)
1631 				break;
1632 		}
1633 	}
1634 
1635 	/* Use link information in adapter context updated by the call
1636 	 * to ixgbe_get_link_status() to determine current link speed.
1637 	 * Link speed information is valid only when link up was
1638 	 * reported by FW.
1639 	 */
1640 	if (*link_up) {
1641 		switch (hw->link.link_info.link_speed) {
1642 		case IXGBE_ACI_LINK_SPEED_10MB:
1643 			*speed = IXGBE_LINK_SPEED_10_FULL;
1644 			break;
1645 		case IXGBE_ACI_LINK_SPEED_100MB:
1646 			*speed = IXGBE_LINK_SPEED_100_FULL;
1647 			break;
1648 		case IXGBE_ACI_LINK_SPEED_1000MB:
1649 			*speed = IXGBE_LINK_SPEED_1GB_FULL;
1650 			break;
1651 		case IXGBE_ACI_LINK_SPEED_2500MB:
1652 			*speed = IXGBE_LINK_SPEED_2_5GB_FULL;
1653 			break;
1654 		case IXGBE_ACI_LINK_SPEED_5GB:
1655 			*speed = IXGBE_LINK_SPEED_5GB_FULL;
1656 			break;
1657 		case IXGBE_ACI_LINK_SPEED_10GB:
1658 			*speed = IXGBE_LINK_SPEED_10GB_FULL;
1659 			break;
1660 		default:
1661 			*speed = IXGBE_LINK_SPEED_UNKNOWN;
1662 			break;
1663 		}
1664 	} else {
1665 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
1666 	}
1667 
1668 	return 0;
1669 }
1670 
1671 /**
1672  * ixgbe_get_link_capabilities_e610 - Determine link capabilities
1673  * @hw: pointer to hardware structure
1674  * @speed: pointer to link speed
1675  * @autoneg: true when autoneg or autotry is enabled
1676  *
1677  * Determine speed and AN parameters of a link.
1678  *
1679  * Return: the exit code of the operation.
1680  */
1681 int ixgbe_get_link_capabilities_e610(struct ixgbe_hw *hw,
1682 				     ixgbe_link_speed *speed,
1683 				     bool *autoneg)
1684 {
1685 	if (!speed || !autoneg)
1686 		return -EINVAL;
1687 
1688 	*autoneg = true;
1689 	*speed = hw->phy.speeds_supported;
1690 
1691 	return 0;
1692 }
1693 
1694 /**
1695  * ixgbe_cfg_phy_fc - Configure PHY Flow Control (FC) data based on FC mode
1696  * @hw: pointer to hardware structure
1697  * @cfg: PHY configuration data to set FC mode
1698  * @req_mode: FC mode to configure
1699  *
1700  * Configures PHY Flow Control according to the provided configuration.
1701  *
1702  * Return: the exit code of the operation.
1703  */
1704 int ixgbe_cfg_phy_fc(struct ixgbe_hw *hw,
1705 		     struct ixgbe_aci_cmd_set_phy_cfg_data *cfg,
1706 		     enum ixgbe_fc_mode req_mode)
1707 {
1708 	u8 pause_mask = 0x0;
1709 
1710 	if (!cfg)
1711 		return -EINVAL;
1712 
1713 	switch (req_mode) {
1714 	case ixgbe_fc_full:
1715 		pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
1716 		pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
1717 		break;
1718 	case ixgbe_fc_rx_pause:
1719 		pause_mask |= IXGBE_ACI_PHY_EN_RX_LINK_PAUSE;
1720 		break;
1721 	case ixgbe_fc_tx_pause:
1722 		pause_mask |= IXGBE_ACI_PHY_EN_TX_LINK_PAUSE;
1723 		break;
1724 	default:
1725 		break;
1726 	}
1727 
1728 	/* Clear the old pause settings. */
1729 	cfg->caps &= ~(IXGBE_ACI_PHY_EN_TX_LINK_PAUSE |
1730 		IXGBE_ACI_PHY_EN_RX_LINK_PAUSE);
1731 
1732 	/* Set the new capabilities. */
1733 	cfg->caps |= pause_mask;
1734 
1735 	return 0;
1736 }
1737 
1738 /**
1739  * ixgbe_setup_fc_e610 - Set up flow control
1740  * @hw: pointer to hardware structure
1741  *
1742  * Set up flow control. This has to be done during init time.
1743  *
1744  * Return: the exit code of the operation.
1745  */
1746 int ixgbe_setup_fc_e610(struct ixgbe_hw *hw)
1747 {
1748 	struct ixgbe_aci_cmd_get_phy_caps_data pcaps = {};
1749 	struct ixgbe_aci_cmd_set_phy_cfg_data cfg = {};
1750 	int err;
1751 
1752 	/* Get the current PHY config */
1753 	err = ixgbe_aci_get_phy_caps(hw, false,
1754 				     IXGBE_ACI_REPORT_ACTIVE_CFG, &pcaps);
1755 	if (err)
1756 		return err;
1757 
1758 	ixgbe_copy_phy_caps_to_cfg(&pcaps, &cfg);
1759 
1760 	/* Configure the set PHY data */
1761 	err = ixgbe_cfg_phy_fc(hw, &cfg, hw->fc.requested_mode);
1762 	if (err)
1763 		return err;
1764 
1765 	/* If the capabilities have changed, then set the new config */
1766 	if (cfg.caps != pcaps.caps) {
1767 		cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
1768 
1769 		err = ixgbe_aci_set_phy_cfg(hw, &cfg);
1770 		if (err)
1771 			return err;
1772 	}
1773 
1774 	return err;
1775 }
1776 
1777 /**
1778  * ixgbe_fc_autoneg_e610 - Configure flow control
1779  * @hw: pointer to hardware structure
1780  *
1781  * Configure Flow Control.
1782  */
1783 void ixgbe_fc_autoneg_e610(struct ixgbe_hw *hw)
1784 {
1785 	int err;
1786 
1787 	/* Get current link err.
1788 	 * Current FC mode will be stored in the hw context.
1789 	 */
1790 	err = ixgbe_aci_get_link_info(hw, false, NULL);
1791 	if (err)
1792 		goto no_autoneg;
1793 
1794 	/* Check if the link is up */
1795 	if (!(hw->link.link_info.link_info & IXGBE_ACI_LINK_UP))
1796 		goto no_autoneg;
1797 
1798 	/* Check if auto-negotiation has completed */
1799 	if (!(hw->link.link_info.an_info & IXGBE_ACI_AN_COMPLETED))
1800 		goto no_autoneg;
1801 
1802 	hw->fc.fc_was_autonegged = true;
1803 	return;
1804 
1805 no_autoneg:
1806 	hw->fc.fc_was_autonegged = false;
1807 	hw->fc.current_mode = hw->fc.requested_mode;
1808 }
1809 
1810 /**
1811  * ixgbe_disable_rx_e610 - Disable RX unit
1812  * @hw: pointer to hardware structure
1813  *
1814  * Disable RX DMA unit on E610 with use of ACI command (0x000C).
1815  *
1816  * Return: the exit code of the operation.
1817  */
1818 void ixgbe_disable_rx_e610(struct ixgbe_hw *hw)
1819 {
1820 	u32 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1821 	u32 pfdtxgswc;
1822 	int err;
1823 
1824 	if (!(rxctrl & IXGBE_RXCTRL_RXEN))
1825 		return;
1826 
1827 	pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
1828 	if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) {
1829 		pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN;
1830 		IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc);
1831 		hw->mac.set_lben = true;
1832 	} else {
1833 		hw->mac.set_lben = false;
1834 	}
1835 
1836 	err = ixgbe_aci_disable_rxen(hw);
1837 
1838 	/* If we fail - disable RX using register write */
1839 	if (err) {
1840 		rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1841 		if (rxctrl & IXGBE_RXCTRL_RXEN) {
1842 			rxctrl &= ~IXGBE_RXCTRL_RXEN;
1843 			IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl);
1844 		}
1845 	}
1846 }
1847 
1848 /**
1849  * ixgbe_fw_recovery_mode_e610 - Check FW NVM recovery mode
1850  * @hw: pointer to hardware structure
1851  *
1852  * Check FW NVM recovery mode by reading the value of
1853  * the dedicated register.
1854  *
1855  * Return: true if FW is in recovery mode, otherwise false.
1856  */
1857 static bool ixgbe_fw_recovery_mode_e610(struct ixgbe_hw *hw)
1858 {
1859 	u32 fwsm = IXGBE_READ_REG(hw, IXGBE_GL_MNG_FWSM);
1860 
1861 	return !!(fwsm & IXGBE_GL_MNG_FWSM_RECOVERY_M);
1862 }
1863 
1864 /**
1865  * ixgbe_fw_rollback_mode_e610 - Check FW NVM rollback mode
1866  * @hw: pointer to hardware structure
1867  *
1868  * Check FW NVM rollback mode by reading the value of
1869  * the dedicated register.
1870  *
1871  * Return: true if FW is in rollback mode, otherwise false.
1872  */
1873 static bool ixgbe_fw_rollback_mode_e610(struct ixgbe_hw *hw)
1874 {
1875 	u32 fwsm = IXGBE_READ_REG(hw, IXGBE_GL_MNG_FWSM);
1876 
1877 	return !!(fwsm & IXGBE_GL_MNG_FWSM_ROLLBACK_M);
1878 }
1879 
1880 /**
1881  * ixgbe_init_phy_ops_e610 - PHY specific init
1882  * @hw: pointer to hardware structure
1883  *
1884  * Initialize any function pointers that were not able to be
1885  * set during init_shared_code because the PHY type was not known.
1886  *
1887  * Return: the exit code of the operation.
1888  */
1889 int ixgbe_init_phy_ops_e610(struct ixgbe_hw *hw)
1890 {
1891 	struct ixgbe_mac_info *mac = &hw->mac;
1892 	struct ixgbe_phy_info *phy = &hw->phy;
1893 
1894 	if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper)
1895 		phy->ops.set_phy_power = ixgbe_set_phy_power_e610;
1896 	else
1897 		phy->ops.set_phy_power = NULL;
1898 
1899 	/* Identify the PHY */
1900 	return phy->ops.identify(hw);
1901 }
1902 
1903 /**
1904  * ixgbe_identify_phy_e610 - Identify PHY
1905  * @hw: pointer to hardware structure
1906  *
1907  * Determine PHY type, supported speeds and PHY ID.
1908  *
1909  * Return: the exit code of the operation.
1910  */
1911 int ixgbe_identify_phy_e610(struct ixgbe_hw *hw)
1912 {
1913 	struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
1914 	u64 phy_type_low, phy_type_high;
1915 	int err;
1916 
1917 	/* Set PHY type */
1918 	hw->phy.type = ixgbe_phy_fw;
1919 
1920 	err = ixgbe_aci_get_phy_caps(hw, false,
1921 				     IXGBE_ACI_REPORT_TOPO_CAP_MEDIA, &pcaps);
1922 	if (err)
1923 		return err;
1924 
1925 	if (!(pcaps.module_compliance_enforcement &
1926 	      IXGBE_ACI_MOD_ENFORCE_STRICT_MODE)) {
1927 		/* Handle lenient mode */
1928 		err = ixgbe_aci_get_phy_caps(hw, false,
1929 					     IXGBE_ACI_REPORT_TOPO_CAP_NO_MEDIA,
1930 					     &pcaps);
1931 		if (err)
1932 			return err;
1933 	}
1934 
1935 	/* Determine supported speeds */
1936 	hw->phy.speeds_supported = IXGBE_LINK_SPEED_UNKNOWN;
1937 	phy_type_high = le64_to_cpu(pcaps.phy_type_high);
1938 	phy_type_low = le64_to_cpu(pcaps.phy_type_low);
1939 
1940 	if (phy_type_high & IXGBE_PHY_TYPE_HIGH_10BASE_T ||
1941 	    phy_type_high & IXGBE_PHY_TYPE_HIGH_10M_SGMII)
1942 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10_FULL;
1943 	if (phy_type_low  & IXGBE_PHY_TYPE_LOW_100BASE_TX ||
1944 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_100M_SGMII ||
1945 	    phy_type_high & IXGBE_PHY_TYPE_HIGH_100M_USXGMII)
1946 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL;
1947 	if (phy_type_low  & IXGBE_PHY_TYPE_LOW_1000BASE_T  ||
1948 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_1000BASE_SX ||
1949 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_1000BASE_LX ||
1950 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_1000BASE_KX ||
1951 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_1G_SGMII    ||
1952 	    phy_type_high & IXGBE_PHY_TYPE_HIGH_1G_USXGMII)
1953 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL;
1954 	if (phy_type_low  & IXGBE_PHY_TYPE_LOW_10GBASE_T       ||
1955 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_10G_SFI_DA      ||
1956 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_10GBASE_SR      ||
1957 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_10GBASE_LR      ||
1958 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1  ||
1959 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC ||
1960 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_10G_SFI_C2C     ||
1961 	    phy_type_high & IXGBE_PHY_TYPE_HIGH_10G_USXGMII)
1962 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL;
1963 
1964 	/* 2.5 and 5 Gbps link speeds must be excluded from the
1965 	 * auto-negotiation set used during driver initialization due to
1966 	 * compatibility issues with certain switches. Those issues do not
1967 	 * exist in case of E610 2.5G SKU device (0x57b1).
1968 	 */
1969 	if (!hw->phy.autoneg_advertised &&
1970 	    hw->device_id != IXGBE_DEV_ID_E610_2_5G_T)
1971 		hw->phy.autoneg_advertised = hw->phy.speeds_supported;
1972 
1973 	if (phy_type_low  & IXGBE_PHY_TYPE_LOW_2500BASE_T   ||
1974 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_2500BASE_X   ||
1975 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_2500BASE_KX  ||
1976 	    phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_SGMII ||
1977 	    phy_type_high & IXGBE_PHY_TYPE_HIGH_2500M_USXGMII)
1978 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL;
1979 
1980 	if (!hw->phy.autoneg_advertised &&
1981 	    hw->device_id == IXGBE_DEV_ID_E610_2_5G_T)
1982 		hw->phy.autoneg_advertised = hw->phy.speeds_supported;
1983 
1984 	if (phy_type_low  & IXGBE_PHY_TYPE_LOW_5GBASE_T  ||
1985 	    phy_type_low  & IXGBE_PHY_TYPE_LOW_5GBASE_KR ||
1986 	    phy_type_high & IXGBE_PHY_TYPE_HIGH_5G_USXGMII)
1987 		hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL;
1988 
1989 	/* Set PHY ID */
1990 	memcpy(&hw->phy.id, pcaps.phy_id_oui, sizeof(u32));
1991 
1992 	hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_10_FULL |
1993 				       IXGBE_LINK_SPEED_100_FULL |
1994 				       IXGBE_LINK_SPEED_1GB_FULL;
1995 	hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported;
1996 
1997 	return 0;
1998 }
1999 
2000 /**
2001  * ixgbe_identify_module_e610 - Identify SFP module type
2002  * @hw: pointer to hardware structure
2003  *
2004  * Identify the SFP module type.
2005  *
2006  * Return: the exit code of the operation.
2007  */
2008 int ixgbe_identify_module_e610(struct ixgbe_hw *hw)
2009 {
2010 	bool media_available;
2011 	u8 module_type;
2012 	int err;
2013 
2014 	err = ixgbe_update_link_info(hw);
2015 	if (err)
2016 		return err;
2017 
2018 	media_available =
2019 		(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE);
2020 
2021 	if (media_available) {
2022 		hw->phy.sfp_type = ixgbe_sfp_type_unknown;
2023 
2024 		/* Get module type from hw context updated by
2025 		 * ixgbe_update_link_info()
2026 		 */
2027 		module_type = hw->link.link_info.module_type[IXGBE_ACI_MOD_TYPE_IDENT];
2028 
2029 		if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_PASSIVE) ||
2030 		    (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_SFP_PLUS_CU_ACTIVE)) {
2031 			hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
2032 		} else if (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_SR) {
2033 			hw->phy.sfp_type = ixgbe_sfp_type_sr;
2034 		} else if ((module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LR) ||
2035 			   (module_type & IXGBE_ACI_MOD_TYPE_BYTE1_10G_BASE_LRM)) {
2036 			hw->phy.sfp_type = ixgbe_sfp_type_lr;
2037 		}
2038 	} else {
2039 		hw->phy.sfp_type = ixgbe_sfp_type_not_present;
2040 		return -ENOENT;
2041 	}
2042 
2043 	return 0;
2044 }
2045 
2046 /**
2047  * ixgbe_setup_phy_link_e610 - Sets up firmware-controlled PHYs
2048  * @hw: pointer to hardware structure
2049  *
2050  * Set the parameters for the firmware-controlled PHYs.
2051  *
2052  * Return: the exit code of the operation.
2053  */
2054 int ixgbe_setup_phy_link_e610(struct ixgbe_hw *hw)
2055 {
2056 	struct ixgbe_aci_cmd_get_phy_caps_data pcaps;
2057 	struct ixgbe_aci_cmd_set_phy_cfg_data pcfg;
2058 	u8 rmode = IXGBE_ACI_REPORT_TOPO_CAP_MEDIA;
2059 	u64 sup_phy_type_low, sup_phy_type_high;
2060 	u64 phy_type_low = 0, phy_type_high = 0;
2061 	int err;
2062 
2063 	err = ixgbe_aci_get_link_info(hw, false, NULL);
2064 	if (err)
2065 		return err;
2066 
2067 	/* If media is not available get default config. */
2068 	if (!(hw->link.link_info.link_info & IXGBE_ACI_MEDIA_AVAILABLE))
2069 		rmode = IXGBE_ACI_REPORT_DFLT_CFG;
2070 
2071 	err = ixgbe_aci_get_phy_caps(hw, false, rmode, &pcaps);
2072 	if (err)
2073 		return err;
2074 
2075 	sup_phy_type_low = le64_to_cpu(pcaps.phy_type_low);
2076 	sup_phy_type_high = le64_to_cpu(pcaps.phy_type_high);
2077 
2078 	/* Get Active configuration to avoid unintended changes. */
2079 	err = ixgbe_aci_get_phy_caps(hw, false, IXGBE_ACI_REPORT_ACTIVE_CFG,
2080 				     &pcaps);
2081 	if (err)
2082 		return err;
2083 
2084 	ixgbe_copy_phy_caps_to_cfg(&pcaps, &pcfg);
2085 
2086 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) {
2087 		phy_type_high |= IXGBE_PHY_TYPE_HIGH_10BASE_T;
2088 		phy_type_high |= IXGBE_PHY_TYPE_HIGH_10M_SGMII;
2089 	}
2090 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) {
2091 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_100BASE_TX;
2092 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_100M_SGMII;
2093 		phy_type_high |= IXGBE_PHY_TYPE_HIGH_100M_USXGMII;
2094 	}
2095 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) {
2096 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_1000BASE_T;
2097 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_1000BASE_SX;
2098 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_1000BASE_LX;
2099 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_1000BASE_KX;
2100 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_1G_SGMII;
2101 		phy_type_high |= IXGBE_PHY_TYPE_HIGH_1G_USXGMII;
2102 	}
2103 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) {
2104 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_2500BASE_T;
2105 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_2500BASE_X;
2106 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_2500BASE_KX;
2107 		phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_SGMII;
2108 		phy_type_high |= IXGBE_PHY_TYPE_HIGH_2500M_USXGMII;
2109 	}
2110 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) {
2111 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_5GBASE_T;
2112 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_5GBASE_KR;
2113 		phy_type_high |= IXGBE_PHY_TYPE_HIGH_5G_USXGMII;
2114 	}
2115 	if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) {
2116 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_10GBASE_T;
2117 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_10G_SFI_DA;
2118 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_10GBASE_SR;
2119 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_10GBASE_LR;
2120 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_10GBASE_KR_CR1;
2121 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_10G_SFI_AOC_ACC;
2122 		phy_type_low  |= IXGBE_PHY_TYPE_LOW_10G_SFI_C2C;
2123 		phy_type_high |= IXGBE_PHY_TYPE_HIGH_10G_USXGMII;
2124 	}
2125 
2126 	/* Mask the set values to avoid requesting unsupported link types. */
2127 	phy_type_low &= sup_phy_type_low;
2128 	pcfg.phy_type_low = cpu_to_le64(phy_type_low);
2129 	phy_type_high &= sup_phy_type_high;
2130 	pcfg.phy_type_high = cpu_to_le64(phy_type_high);
2131 
2132 	if (pcfg.phy_type_high != pcaps.phy_type_high ||
2133 	    pcfg.phy_type_low != pcaps.phy_type_low ||
2134 	    pcfg.caps != pcaps.caps) {
2135 		pcfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
2136 		pcfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
2137 
2138 		err = ixgbe_aci_set_phy_cfg(hw, &pcfg);
2139 		if (err)
2140 			return err;
2141 	}
2142 
2143 	return 0;
2144 }
2145 
2146 /**
2147  * ixgbe_set_phy_power_e610 - Control power for copper PHY
2148  * @hw: pointer to hardware structure
2149  * @on: true for on, false for off
2150  *
2151  * Set the power on/off of the PHY
2152  * by getting its capabilities and setting the appropriate
2153  * configuration parameters.
2154  *
2155  * Return: the exit code of the operation.
2156  */
2157 int ixgbe_set_phy_power_e610(struct ixgbe_hw *hw, bool on)
2158 {
2159 	struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {};
2160 	struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {};
2161 	int err;
2162 
2163 	err = ixgbe_aci_get_phy_caps(hw, false,
2164 				     IXGBE_ACI_REPORT_ACTIVE_CFG,
2165 				     &phy_caps);
2166 	if (err)
2167 		return err;
2168 
2169 	ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
2170 
2171 	if (on)
2172 		phy_cfg.caps &= ~IXGBE_ACI_PHY_ENA_LOW_POWER;
2173 	else
2174 		phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LOW_POWER;
2175 
2176 	/* PHY is already in requested power mode. */
2177 	if (phy_caps.caps == phy_cfg.caps)
2178 		return 0;
2179 
2180 	phy_cfg.caps |= IXGBE_ACI_PHY_ENA_LINK;
2181 	phy_cfg.caps |= IXGBE_ACI_PHY_ENA_AUTO_LINK_UPDT;
2182 
2183 	return ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
2184 }
2185 
2186 /**
2187  * ixgbe_enter_lplu_e610 - Transition to low power states
2188  * @hw: pointer to hardware structure
2189  *
2190  * Configures Low Power Link Up on transition to low power states
2191  * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the
2192  * X557 PHY immediately prior to entering LPLU.
2193  *
2194  * Return: the exit code of the operation.
2195  */
2196 int ixgbe_enter_lplu_e610(struct ixgbe_hw *hw)
2197 {
2198 	struct ixgbe_aci_cmd_get_phy_caps_data phy_caps = {};
2199 	struct ixgbe_aci_cmd_set_phy_cfg_data phy_cfg = {};
2200 	int err;
2201 
2202 	err = ixgbe_aci_get_phy_caps(hw, false,
2203 				     IXGBE_ACI_REPORT_ACTIVE_CFG,
2204 				     &phy_caps);
2205 	if (err)
2206 		return err;
2207 
2208 	ixgbe_copy_phy_caps_to_cfg(&phy_caps, &phy_cfg);
2209 
2210 	phy_cfg.low_power_ctrl_an |= IXGBE_ACI_PHY_EN_D3COLD_LOW_POWER_AUTONEG;
2211 
2212 	return ixgbe_aci_set_phy_cfg(hw, &phy_cfg);
2213 }
2214 
2215 /**
2216  * ixgbe_init_eeprom_params_e610 - Initialize EEPROM params
2217  * @hw: pointer to hardware structure
2218  *
2219  * Initialize the EEPROM parameters ixgbe_eeprom_info within the ixgbe_hw
2220  * struct in order to set up EEPROM access.
2221  *
2222  * Return: the operation exit code.
2223  */
2224 int ixgbe_init_eeprom_params_e610(struct ixgbe_hw *hw)
2225 {
2226 	struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
2227 	u32 gens_stat;
2228 	u8 sr_size;
2229 
2230 	if (eeprom->type != ixgbe_eeprom_uninitialized)
2231 		return 0;
2232 
2233 	eeprom->type = ixgbe_flash;
2234 
2235 	gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
2236 	sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat);
2237 
2238 	/* Switching to words (sr_size contains power of 2). */
2239 	eeprom->word_size = BIT(sr_size) * IXGBE_SR_WORDS_IN_1KB;
2240 
2241 	hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", eeprom->type,
2242 	       eeprom->word_size);
2243 
2244 	return 0;
2245 }
2246 
2247 /**
2248  * ixgbe_aci_get_netlist_node - get a node handle
2249  * @hw: pointer to the hw struct
2250  * @cmd: get_link_topo AQ structure
2251  * @node_part_number: output node part number if node found
2252  * @node_handle: output node handle parameter if node found
2253  *
2254  * Get the netlist node and assigns it to
2255  * the provided handle using ACI command (0x06E0).
2256  *
2257  * Return: the exit code of the operation.
2258  */
2259 int ixgbe_aci_get_netlist_node(struct ixgbe_hw *hw,
2260 			       struct ixgbe_aci_cmd_get_link_topo *cmd,
2261 			       u8 *node_part_number, u16 *node_handle)
2262 {
2263 	struct ixgbe_aci_desc desc;
2264 
2265 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_get_link_topo);
2266 	desc.params.get_link_topo = *cmd;
2267 
2268 	if (ixgbe_aci_send_cmd(hw, &desc, NULL, 0))
2269 		return -EOPNOTSUPP;
2270 
2271 	if (node_handle)
2272 		*node_handle =
2273 			le16_to_cpu(desc.params.get_link_topo.addr.handle);
2274 	if (node_part_number)
2275 		*node_part_number = desc.params.get_link_topo.node_part_num;
2276 
2277 	return 0;
2278 }
2279 
2280 /**
2281  * ixgbe_acquire_nvm - Generic request for acquiring the NVM ownership
2282  * @hw: pointer to the HW structure
2283  * @access: NVM access type (read or write)
2284  *
2285  * Request NVM ownership.
2286  *
2287  * Return: the exit code of the operation.
2288  */
2289 int ixgbe_acquire_nvm(struct ixgbe_hw *hw,
2290 		      enum ixgbe_aci_res_access_type access)
2291 {
2292 	u32 fla;
2293 
2294 	/* Skip if we are in blank NVM programming mode */
2295 	fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
2296 	if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0)
2297 		return 0;
2298 
2299 	return ixgbe_acquire_res(hw, IXGBE_NVM_RES_ID, access,
2300 				 IXGBE_NVM_TIMEOUT);
2301 }
2302 
2303 /**
2304  * ixgbe_release_nvm - Generic request for releasing the NVM ownership
2305  * @hw: pointer to the HW structure
2306  *
2307  * Release NVM ownership.
2308  */
2309 void ixgbe_release_nvm(struct ixgbe_hw *hw)
2310 {
2311 	u32 fla;
2312 
2313 	/* Skip if we are in blank NVM programming mode */
2314 	fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
2315 	if ((fla & IXGBE_GLNVM_FLA_LOCKED_M) == 0)
2316 		return;
2317 
2318 	ixgbe_release_res(hw, IXGBE_NVM_RES_ID);
2319 }
2320 
2321 /**
2322  * ixgbe_aci_read_nvm - read NVM
2323  * @hw: pointer to the HW struct
2324  * @module_typeid: module pointer location in words from the NVM beginning
2325  * @offset: byte offset from the module beginning
2326  * @length: length of the section to be read (in bytes from the offset)
2327  * @data: command buffer (size [bytes] = length)
2328  * @last_command: tells if this is the last command in a series
2329  * @read_shadow_ram: tell if this is a shadow RAM read
2330  *
2331  * Read the NVM using ACI command (0x0701).
2332  *
2333  * Return: the exit code of the operation.
2334  */
2335 int ixgbe_aci_read_nvm(struct ixgbe_hw *hw, u16 module_typeid, u32 offset,
2336 		       u16 length, void *data, bool last_command,
2337 		       bool read_shadow_ram)
2338 {
2339 	struct ixgbe_aci_cmd_nvm *cmd;
2340 	struct ixgbe_aci_desc desc;
2341 
2342 	if (offset > IXGBE_ACI_NVM_MAX_OFFSET)
2343 		return -EINVAL;
2344 
2345 	cmd = &desc.params.nvm;
2346 
2347 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_read);
2348 
2349 	if (!read_shadow_ram && module_typeid == IXGBE_ACI_NVM_START_POINT)
2350 		cmd->cmd_flags |= IXGBE_ACI_NVM_FLASH_ONLY;
2351 
2352 	/* If this is the last command in a series, set the proper flag. */
2353 	if (last_command)
2354 		cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
2355 	cmd->module_typeid = cpu_to_le16(module_typeid);
2356 	cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
2357 	cmd->offset_high = (offset >> 16) & 0xFF;
2358 	cmd->length = cpu_to_le16(length);
2359 
2360 	return ixgbe_aci_send_cmd(hw, &desc, data, length);
2361 }
2362 
2363 /**
2364  * ixgbe_aci_erase_nvm - erase NVM sector
2365  * @hw: pointer to the HW struct
2366  * @module_typeid: module pointer location in words from the NVM beginning
2367  *
2368  * Erase the NVM sector using the ACI command (0x0702).
2369  *
2370  * Return: the exit code of the operation.
2371  */
2372 int ixgbe_aci_erase_nvm(struct ixgbe_hw *hw, u16 module_typeid)
2373 {
2374 	struct ixgbe_aci_cmd_nvm *cmd;
2375 	struct ixgbe_aci_desc desc;
2376 	__le16 len;
2377 	int err;
2378 
2379 	/* Read a length value from SR, so module_typeid is equal to 0,
2380 	 * calculate offset where module size is placed from bytes to words
2381 	 * set last command and read from SR values to true.
2382 	 */
2383 	err = ixgbe_aci_read_nvm(hw, 0, 2 * module_typeid + 2, 2, &len, true,
2384 				 true);
2385 	if (err)
2386 		return err;
2387 
2388 	cmd = &desc.params.nvm;
2389 
2390 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_erase);
2391 
2392 	cmd->module_typeid = cpu_to_le16(module_typeid);
2393 	cmd->length = len;
2394 	cmd->offset_low = 0;
2395 	cmd->offset_high = 0;
2396 
2397 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2398 }
2399 
2400 /**
2401  * ixgbe_aci_update_nvm - update NVM
2402  * @hw: pointer to the HW struct
2403  * @module_typeid: module pointer location in words from the NVM beginning
2404  * @offset: byte offset from the module beginning
2405  * @length: length of the section to be written (in bytes from the offset)
2406  * @data: command buffer (size [bytes] = length)
2407  * @last_command: tells if this is the last command in a series
2408  * @command_flags: command parameters
2409  *
2410  * Update the NVM using the ACI command (0x0703).
2411  *
2412  * Return: the exit code of the operation.
2413  */
2414 int ixgbe_aci_update_nvm(struct ixgbe_hw *hw, u16 module_typeid,
2415 			 u32 offset, u16 length, void *data,
2416 			 bool last_command, u8 command_flags)
2417 {
2418 	struct ixgbe_aci_cmd_nvm *cmd;
2419 	struct ixgbe_aci_desc desc;
2420 
2421 	cmd = &desc.params.nvm;
2422 
2423 	/* In offset the highest byte must be zeroed. */
2424 	if (offset & 0xFF000000)
2425 		return -EINVAL;
2426 
2427 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_write);
2428 
2429 	cmd->cmd_flags |= command_flags;
2430 
2431 	/* If this is the last command in a series, set the proper flag. */
2432 	if (last_command)
2433 		cmd->cmd_flags |= IXGBE_ACI_NVM_LAST_CMD;
2434 	cmd->module_typeid = cpu_to_le16(module_typeid);
2435 	cmd->offset_low = cpu_to_le16(offset & 0xFFFF);
2436 	cmd->offset_high = FIELD_GET(IXGBE_ACI_NVM_OFFSET_HI_U_MASK, offset);
2437 	cmd->length = cpu_to_le16(length);
2438 
2439 	desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
2440 
2441 	return ixgbe_aci_send_cmd(hw, &desc, data, length);
2442 }
2443 
2444 /**
2445  * ixgbe_nvm_write_activate - NVM activate write
2446  * @hw: pointer to the HW struct
2447  * @cmd_flags: flags for write activate command
2448  * @response_flags: response indicators from firmware
2449  *
2450  * Update the control word with the required banks' validity bits
2451  * and dumps the Shadow RAM to flash using ACI command (0x0707).
2452  *
2453  * cmd_flags controls which banks to activate, the preservation level to use
2454  * when activating the NVM bank, and whether an EMP reset is required for
2455  * activation.
2456  *
2457  * Note that the 16bit cmd_flags value is split between two separate 1 byte
2458  * flag values in the descriptor.
2459  *
2460  * On successful return of the firmware command, the response_flags variable
2461  * is updated with the flags reported by firmware indicating certain status,
2462  * such as whether EMP reset is enabled.
2463  *
2464  * Return: the exit code of the operation.
2465  */
2466 int ixgbe_nvm_write_activate(struct ixgbe_hw *hw, u16 cmd_flags,
2467 			     u8 *response_flags)
2468 {
2469 	struct ixgbe_aci_cmd_nvm *cmd;
2470 	struct ixgbe_aci_desc desc;
2471 	s32 err;
2472 
2473 	cmd = &desc.params.nvm;
2474 	ixgbe_fill_dflt_direct_cmd_desc(&desc,
2475 					ixgbe_aci_opc_nvm_write_activate);
2476 
2477 	cmd->cmd_flags = (u8)(cmd_flags & 0xFF);
2478 	cmd->offset_high = (u8)FIELD_GET(IXGBE_ACI_NVM_OFFSET_HI_A_MASK,
2479 					 cmd_flags);
2480 
2481 	err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2482 	if (!err && response_flags)
2483 		*response_flags = cmd->cmd_flags;
2484 
2485 	return err;
2486 }
2487 
2488 /**
2489  * ixgbe_nvm_validate_checksum - validate checksum
2490  * @hw: pointer to the HW struct
2491  *
2492  * Verify NVM PFA checksum validity using ACI command (0x0706).
2493  * If the checksum verification failed, IXGBE_ERR_NVM_CHECKSUM is returned.
2494  * The function acquires and then releases the NVM ownership.
2495  *
2496  * Return: the exit code of the operation.
2497  */
2498 int ixgbe_nvm_validate_checksum(struct ixgbe_hw *hw)
2499 {
2500 	struct ixgbe_aci_cmd_nvm_checksum *cmd;
2501 	struct ixgbe_aci_desc desc;
2502 	int err;
2503 
2504 	err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
2505 	if (err)
2506 		return err;
2507 
2508 	cmd = &desc.params.nvm_checksum;
2509 
2510 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_checksum);
2511 	cmd->flags = IXGBE_ACI_NVM_CHECKSUM_VERIFY;
2512 
2513 	err = ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
2514 
2515 	ixgbe_release_nvm(hw);
2516 
2517 	if (!err && cmd->checksum !=
2518 		cpu_to_le16(IXGBE_ACI_NVM_CHECKSUM_CORRECT)) {
2519 		struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
2520 							     hw);
2521 
2522 		err = -EIO;
2523 		netdev_err(adapter->netdev, "Invalid Shadow Ram checksum");
2524 	}
2525 
2526 	return err;
2527 }
2528 
2529 /**
2530  * ixgbe_discover_flash_size - Discover the available flash size
2531  * @hw: pointer to the HW struct
2532  *
2533  * The device flash could be up to 16MB in size. However, it is possible that
2534  * the actual size is smaller. Use bisection to determine the accessible size
2535  * of flash memory.
2536  *
2537  * Return: the exit code of the operation.
2538  */
2539 static int ixgbe_discover_flash_size(struct ixgbe_hw *hw)
2540 {
2541 	u32 min_size = 0, max_size = IXGBE_ACI_NVM_MAX_OFFSET + 1;
2542 	int err;
2543 
2544 	err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
2545 	if (err)
2546 		return err;
2547 
2548 	while ((max_size - min_size) > 1) {
2549 		u32 offset = (max_size + min_size) / 2;
2550 		u32 len = 1;
2551 		u8 data;
2552 
2553 		err = ixgbe_read_flat_nvm(hw, offset, &len, &data, false);
2554 		if (err == -EIO &&
2555 		    hw->aci.last_status == IXGBE_ACI_RC_EINVAL) {
2556 			err = 0;
2557 			max_size = offset;
2558 		} else if (!err) {
2559 			min_size = offset;
2560 		} else {
2561 			/* an unexpected error occurred */
2562 			goto err_read_flat_nvm;
2563 		}
2564 	}
2565 
2566 	hw->flash.flash_size = max_size;
2567 
2568 err_read_flat_nvm:
2569 	ixgbe_release_nvm(hw);
2570 
2571 	return err;
2572 }
2573 
2574 /**
2575  * ixgbe_read_sr_base_address - Read the value of a Shadow RAM pointer word
2576  * @hw: pointer to the HW structure
2577  * @offset: the word offset of the Shadow RAM word to read
2578  * @pointer: pointer value read from Shadow RAM
2579  *
2580  * Read the given Shadow RAM word, and convert it to a pointer value specified
2581  * in bytes. This function assumes the specified offset is a valid pointer
2582  * word.
2583  *
2584  * Each pointer word specifies whether it is stored in word size or 4KB
2585  * sector size by using the highest bit. The reported pointer value will be in
2586  * bytes, intended for flat NVM reads.
2587  *
2588  * Return: the exit code of the operation.
2589  */
2590 static int ixgbe_read_sr_base_address(struct ixgbe_hw *hw, u16 offset,
2591 				      u32 *pointer)
2592 {
2593 	u16 value;
2594 	int err;
2595 
2596 	err = ixgbe_read_ee_aci_e610(hw, offset, &value);
2597 	if (err)
2598 		return err;
2599 
2600 	/* Determine if the pointer is in 4KB or word units */
2601 	if (value & IXGBE_SR_NVM_PTR_4KB_UNITS)
2602 		*pointer = (value & ~IXGBE_SR_NVM_PTR_4KB_UNITS) * SZ_4K;
2603 	else
2604 		*pointer = value * sizeof(u16);
2605 
2606 	return 0;
2607 }
2608 
2609 /**
2610  * ixgbe_read_sr_area_size - Read an area size from a Shadow RAM word
2611  * @hw: pointer to the HW structure
2612  * @offset: the word offset of the Shadow RAM to read
2613  * @size: size value read from the Shadow RAM
2614  *
2615  * Read the given Shadow RAM word, and convert it to an area size value
2616  * specified in bytes. This function assumes the specified offset is a valid
2617  * area size word.
2618  *
2619  * Each area size word is specified in 4KB sector units. This function reports
2620  * the size in bytes, intended for flat NVM reads.
2621  *
2622  * Return: the exit code of the operation.
2623  */
2624 static int ixgbe_read_sr_area_size(struct ixgbe_hw *hw, u16 offset, u32 *size)
2625 {
2626 	u16 value;
2627 	int err;
2628 
2629 	err = ixgbe_read_ee_aci_e610(hw, offset, &value);
2630 	if (err)
2631 		return err;
2632 
2633 	/* Area sizes are always specified in 4KB units */
2634 	*size = value * SZ_4K;
2635 
2636 	return 0;
2637 }
2638 
2639 /**
2640  * ixgbe_determine_active_flash_banks - Discover active bank for each module
2641  * @hw: pointer to the HW struct
2642  *
2643  * Read the Shadow RAM control word and determine which banks are active for
2644  * the NVM, OROM, and Netlist modules. Also read and calculate the associated
2645  * pointer and size. These values are then cached into the ixgbe_flash_info
2646  * structure for later use in order to calculate the correct offset to read
2647  * from the active module.
2648  *
2649  * Return: the exit code of the operation.
2650  */
2651 static int ixgbe_determine_active_flash_banks(struct ixgbe_hw *hw)
2652 {
2653 	struct ixgbe_bank_info *banks = &hw->flash.banks;
2654 	u16 ctrl_word;
2655 	int err;
2656 
2657 	err = ixgbe_read_ee_aci_e610(hw, IXGBE_E610_SR_NVM_CTRL_WORD,
2658 				     &ctrl_word);
2659 	if (err)
2660 		return err;
2661 
2662 	if (FIELD_GET(IXGBE_SR_CTRL_WORD_1_M, ctrl_word) !=
2663 	    IXGBE_SR_CTRL_WORD_VALID)
2664 		return -ENODATA;
2665 
2666 	if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NVM_BANK))
2667 		banks->nvm_bank = IXGBE_1ST_FLASH_BANK;
2668 	else
2669 		banks->nvm_bank = IXGBE_2ND_FLASH_BANK;
2670 
2671 	if (!(ctrl_word & IXGBE_SR_CTRL_WORD_OROM_BANK))
2672 		banks->orom_bank = IXGBE_1ST_FLASH_BANK;
2673 	else
2674 		banks->orom_bank = IXGBE_2ND_FLASH_BANK;
2675 
2676 	if (!(ctrl_word & IXGBE_SR_CTRL_WORD_NETLIST_BANK))
2677 		banks->netlist_bank = IXGBE_1ST_FLASH_BANK;
2678 	else
2679 		banks->netlist_bank = IXGBE_2ND_FLASH_BANK;
2680 
2681 	err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_1ST_NVM_BANK_PTR,
2682 					 &banks->nvm_ptr);
2683 	if (err)
2684 		return err;
2685 
2686 	err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_NVM_BANK_SIZE,
2687 				      &banks->nvm_size);
2688 	if (err)
2689 		return err;
2690 
2691 	err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_1ST_OROM_BANK_PTR,
2692 					 &banks->orom_ptr);
2693 	if (err)
2694 		return err;
2695 
2696 	err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_OROM_BANK_SIZE,
2697 				      &banks->orom_size);
2698 	if (err)
2699 		return err;
2700 
2701 	err = ixgbe_read_sr_base_address(hw, IXGBE_E610_SR_NETLIST_BANK_PTR,
2702 					 &banks->netlist_ptr);
2703 	if (err)
2704 		return err;
2705 
2706 	err = ixgbe_read_sr_area_size(hw, IXGBE_E610_SR_NETLIST_BANK_SIZE,
2707 				      &banks->netlist_size);
2708 
2709 	return err;
2710 }
2711 
2712 /**
2713  * ixgbe_get_flash_bank_offset - Get offset into requested flash bank
2714  * @hw: pointer to the HW structure
2715  * @bank: whether to read from the active or inactive flash bank
2716  * @module: the module to read from
2717  *
2718  * Based on the module, lookup the module offset from the beginning of the
2719  * flash.
2720  *
2721  * Return: the flash offset. Note that a value of zero is invalid and must be
2722  * treated as an error.
2723  */
2724 static int ixgbe_get_flash_bank_offset(struct ixgbe_hw *hw,
2725 				       enum ixgbe_bank_select bank,
2726 				       u16 module)
2727 {
2728 	struct ixgbe_bank_info *banks = &hw->flash.banks;
2729 	enum ixgbe_flash_bank active_bank;
2730 	bool second_bank_active;
2731 	u32 offset, size;
2732 
2733 	switch (module) {
2734 	case IXGBE_E610_SR_1ST_NVM_BANK_PTR:
2735 		offset = banks->nvm_ptr;
2736 		size = banks->nvm_size;
2737 		active_bank = banks->nvm_bank;
2738 		break;
2739 	case IXGBE_E610_SR_1ST_OROM_BANK_PTR:
2740 		offset = banks->orom_ptr;
2741 		size = banks->orom_size;
2742 		active_bank = banks->orom_bank;
2743 		break;
2744 	case IXGBE_E610_SR_NETLIST_BANK_PTR:
2745 		offset = banks->netlist_ptr;
2746 		size = banks->netlist_size;
2747 		active_bank = banks->netlist_bank;
2748 		break;
2749 	default:
2750 		return 0;
2751 	}
2752 
2753 	switch (active_bank) {
2754 	case IXGBE_1ST_FLASH_BANK:
2755 		second_bank_active = false;
2756 		break;
2757 	case IXGBE_2ND_FLASH_BANK:
2758 		second_bank_active = true;
2759 		break;
2760 	default:
2761 		return 0;
2762 	}
2763 
2764 	/* The second flash bank is stored immediately following the first
2765 	 * bank. Based on whether the 1st or 2nd bank is active, and whether
2766 	 * we want the active or inactive bank, calculate the desired offset.
2767 	 */
2768 	switch (bank) {
2769 	case IXGBE_ACTIVE_FLASH_BANK:
2770 		return offset + (second_bank_active ? size : 0);
2771 	case IXGBE_INACTIVE_FLASH_BANK:
2772 		return offset + (second_bank_active ? 0 : size);
2773 	}
2774 
2775 	return 0;
2776 }
2777 
2778 /**
2779  * ixgbe_read_flash_module - Read a word from one of the main NVM modules
2780  * @hw: pointer to the HW structure
2781  * @bank: which bank of the module to read
2782  * @module: the module to read
2783  * @offset: the offset into the module in bytes
2784  * @data: storage for the word read from the flash
2785  * @length: bytes of data to read
2786  *
2787  * Read data from the specified flash module. The bank parameter indicates
2788  * whether or not to read from the active bank or the inactive bank of that
2789  * module.
2790  *
2791  * The word will be read using flat NVM access, and relies on the
2792  * hw->flash.banks data being setup by ixgbe_determine_active_flash_banks()
2793  * during initialization.
2794  *
2795  * Return: the exit code of the operation.
2796  */
2797 static int ixgbe_read_flash_module(struct ixgbe_hw *hw,
2798 				   enum ixgbe_bank_select bank,
2799 				   u16 module, u32 offset, u8 *data, u32 length)
2800 {
2801 	u32 start;
2802 	int err;
2803 
2804 	start = ixgbe_get_flash_bank_offset(hw, bank, module);
2805 	if (!start)
2806 		return -EINVAL;
2807 
2808 	err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
2809 	if (err)
2810 		return err;
2811 
2812 	err = ixgbe_read_flat_nvm(hw, start + offset, &length, data, false);
2813 
2814 	ixgbe_release_nvm(hw);
2815 
2816 	return err;
2817 }
2818 
2819 /**
2820  * ixgbe_read_nvm_module - Read from the active main NVM module
2821  * @hw: pointer to the HW structure
2822  * @bank: whether to read from active or inactive NVM module
2823  * @offset: offset into the NVM module to read, in words
2824  * @data: storage for returned word value
2825  *
2826  * Read the specified word from the active NVM module. This includes the CSS
2827  * header at the start of the NVM module.
2828  *
2829  * Return: the exit code of the operation.
2830  */
2831 static int ixgbe_read_nvm_module(struct ixgbe_hw *hw,
2832 				 enum ixgbe_bank_select bank,
2833 				 u32 offset, u16 *data)
2834 {
2835 	__le16 data_local;
2836 	int err;
2837 
2838 	err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_1ST_NVM_BANK_PTR,
2839 				      offset * sizeof(data_local),
2840 				      (u8 *)&data_local,
2841 				      sizeof(data_local));
2842 	if (!err)
2843 		*data = le16_to_cpu(data_local);
2844 
2845 	return err;
2846 }
2847 
2848 /**
2849  * ixgbe_read_netlist_module - Read data from the netlist module area
2850  * @hw: pointer to the HW structure
2851  * @bank: whether to read from the active or inactive module
2852  * @offset: offset into the netlist to read from
2853  * @data: storage for returned word value
2854  *
2855  * Read a word from the specified netlist bank.
2856  *
2857  * Return: the exit code of the operation.
2858  */
2859 static int ixgbe_read_netlist_module(struct ixgbe_hw *hw,
2860 				     enum ixgbe_bank_select bank,
2861 				     u32 offset, u16 *data)
2862 {
2863 	__le16 data_local;
2864 	int err;
2865 
2866 	err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_NETLIST_BANK_PTR,
2867 				      offset * sizeof(data_local),
2868 				      (u8 *)&data_local, sizeof(data_local));
2869 	if (!err)
2870 		*data = le16_to_cpu(data_local);
2871 
2872 	return err;
2873 }
2874 
2875 /**
2876  * ixgbe_read_orom_module - Read from the active Option ROM module
2877  * @hw: pointer to the HW structure
2878  * @bank: whether to read from active or inactive OROM module
2879  * @offset: offset into the OROM module to read, in words
2880  * @data: storage for returned word value
2881  *
2882  * Read the specified word from the active Option ROM module of the flash.
2883  * Note that unlike the NVM module, the CSS data is stored at the end of the
2884  * module instead of at the beginning.
2885  *
2886  * Return: the exit code of the operation.
2887  */
2888 static int ixgbe_read_orom_module(struct ixgbe_hw *hw,
2889 				  enum ixgbe_bank_select bank,
2890 				  u32 offset, u16 *data)
2891 {
2892 	__le16 data_local;
2893 	int err;
2894 
2895 	err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_1ST_OROM_BANK_PTR,
2896 				      offset * sizeof(data_local),
2897 				      (u8 *)&data_local, sizeof(data_local));
2898 	if (!err)
2899 		*data = le16_to_cpu(data_local);
2900 
2901 	return err;
2902 }
2903 
2904 /**
2905  * ixgbe_get_nvm_css_hdr_len - Read the CSS header length
2906  * @hw: pointer to the HW struct
2907  * @bank: whether to read from the active or inactive flash bank
2908  * @hdr_len: storage for header length in words
2909  *
2910  * Read the CSS header length from the NVM CSS header and add the
2911  * Authentication header size, and then convert to words.
2912  *
2913  * Return: the exit code of the operation.
2914  */
2915 static int ixgbe_get_nvm_css_hdr_len(struct ixgbe_hw *hw,
2916 				     enum ixgbe_bank_select bank,
2917 				     u32 *hdr_len)
2918 {
2919 	u16 hdr_len_l, hdr_len_h;
2920 	u32 hdr_len_dword;
2921 	int err;
2922 
2923 	err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_L,
2924 				    &hdr_len_l);
2925 	if (err)
2926 		return err;
2927 
2928 	err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_HDR_LEN_H,
2929 				    &hdr_len_h);
2930 	if (err)
2931 		return err;
2932 
2933 	/* CSS header length is in DWORD, so convert to words and add
2934 	 * authentication header size.
2935 	 */
2936 	hdr_len_dword = (hdr_len_h << 16) | hdr_len_l;
2937 	*hdr_len = hdr_len_dword * 2 + IXGBE_NVM_AUTH_HEADER_LEN;
2938 
2939 	return 0;
2940 }
2941 
2942 /**
2943  * ixgbe_read_nvm_sr_copy - Read a word from the Shadow RAM copy
2944  * @hw: pointer to the HW structure
2945  * @bank: whether to read from the active or inactive NVM module
2946  * @offset: offset into the Shadow RAM copy to read, in words
2947  * @data: storage for returned word value
2948  *
2949  * Read the specified word from the copy of the Shadow RAM found in the
2950  * specified NVM module.
2951  *
2952  * Return: the exit code of the operation.
2953  */
2954 static int ixgbe_read_nvm_sr_copy(struct ixgbe_hw *hw,
2955 				  enum ixgbe_bank_select bank,
2956 				  u32 offset, u16 *data)
2957 {
2958 	u32 hdr_len;
2959 	int err;
2960 
2961 	err = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
2962 	if (err)
2963 		return err;
2964 
2965 	hdr_len = round_up(hdr_len, IXGBE_HDR_LEN_ROUNDUP);
2966 
2967 	return ixgbe_read_nvm_module(hw, bank, hdr_len + offset, data);
2968 }
2969 
2970 /**
2971  * ixgbe_get_nvm_srev - Read the security revision from the NVM CSS header
2972  * @hw: pointer to the HW struct
2973  * @bank: whether to read from the active or inactive flash bank
2974  * @srev: storage for security revision
2975  *
2976  * Read the security revision out of the CSS header of the active NVM module
2977  * bank.
2978  *
2979  * Return: the exit code of the operation.
2980  */
2981 static int ixgbe_get_nvm_srev(struct ixgbe_hw *hw,
2982 			      enum ixgbe_bank_select bank, u32 *srev)
2983 {
2984 	u16 srev_l, srev_h;
2985 	int err;
2986 
2987 	err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_L, &srev_l);
2988 	if (err)
2989 		return err;
2990 
2991 	err = ixgbe_read_nvm_module(hw, bank, IXGBE_NVM_CSS_SREV_H, &srev_h);
2992 	if (err)
2993 		return err;
2994 
2995 	*srev = (srev_h << 16) | srev_l;
2996 
2997 	return 0;
2998 }
2999 
3000 /**
3001  * ixgbe_get_orom_civd_data - Get the combo version information from Option ROM
3002  * @hw: pointer to the HW struct
3003  * @bank: whether to read from the active or inactive flash module
3004  * @civd: storage for the Option ROM CIVD data.
3005  *
3006  * Searches through the Option ROM flash contents to locate the CIVD data for
3007  * the image.
3008  *
3009  * Return: the exit code of the operation.
3010  */
3011 static int
3012 ixgbe_get_orom_civd_data(struct ixgbe_hw *hw, enum ixgbe_bank_select bank,
3013 			 struct ixgbe_orom_civd_info *civd)
3014 {
3015 	struct ixgbe_orom_civd_info tmp;
3016 	u32 offset;
3017 	int err;
3018 
3019 	/* The CIVD section is located in the Option ROM aligned to 512 bytes.
3020 	 * The first 4 bytes must contain the ASCII characters "$CIV".
3021 	 * A simple modulo 256 sum of all of the bytes of the structure must
3022 	 * equal 0.
3023 	 */
3024 	for (offset = 0; (offset + SZ_512) <= hw->flash.banks.orom_size;
3025 	     offset += SZ_512) {
3026 		u8 sum = 0;
3027 		u32 i;
3028 
3029 		err = ixgbe_read_flash_module(hw, bank,
3030 					      IXGBE_E610_SR_1ST_OROM_BANK_PTR,
3031 					      offset,
3032 					      (u8 *)&tmp, sizeof(tmp));
3033 		if (err)
3034 			return err;
3035 
3036 		/* Skip forward until we find a matching signature */
3037 		if (memcmp(IXGBE_OROM_CIV_SIGNATURE, tmp.signature,
3038 			   sizeof(tmp.signature)))
3039 			continue;
3040 
3041 		/* Verify that the simple checksum is zero */
3042 		for (i = 0; i < sizeof(tmp); i++)
3043 			sum += ((u8 *)&tmp)[i];
3044 
3045 		if (sum)
3046 			return -EDOM;
3047 
3048 		*civd = tmp;
3049 		return 0;
3050 	}
3051 
3052 	return -ENODATA;
3053 }
3054 
3055 /**
3056  * ixgbe_get_orom_srev - Read the security revision from the OROM CSS header
3057  * @hw: pointer to the HW struct
3058  * @bank: whether to read from active or inactive flash module
3059  * @srev: storage for security revision
3060  *
3061  * Read the security revision out of the CSS header of the active OROM module
3062  * bank.
3063  *
3064  * Return: the exit code of the operation.
3065  */
3066 static int ixgbe_get_orom_srev(struct ixgbe_hw *hw,
3067 			       enum ixgbe_bank_select bank,
3068 			       u32 *srev)
3069 {
3070 	u32 orom_size_word = hw->flash.banks.orom_size / 2;
3071 	u32 css_start, hdr_len;
3072 	u16 srev_l, srev_h;
3073 	int err;
3074 
3075 	err = ixgbe_get_nvm_css_hdr_len(hw, bank, &hdr_len);
3076 	if (err)
3077 		return err;
3078 
3079 	if (orom_size_word < hdr_len)
3080 		return -EINVAL;
3081 
3082 	/* Calculate how far into the Option ROM the CSS header starts. Note
3083 	 * that ixgbe_read_orom_module takes a word offset.
3084 	 */
3085 	css_start = orom_size_word - hdr_len;
3086 	err = ixgbe_read_orom_module(hw, bank,
3087 				     css_start + IXGBE_NVM_CSS_SREV_L,
3088 				     &srev_l);
3089 	if (err)
3090 		return err;
3091 
3092 	err = ixgbe_read_orom_module(hw, bank,
3093 				     css_start + IXGBE_NVM_CSS_SREV_H,
3094 				     &srev_h);
3095 	if (err)
3096 		return err;
3097 
3098 	*srev = srev_h << 16 | srev_l;
3099 
3100 	return 0;
3101 }
3102 
3103 /**
3104  * ixgbe_get_orom_ver_info - Read Option ROM version information
3105  * @hw: pointer to the HW struct
3106  * @bank: whether to read from the active or inactive flash module
3107  * @orom: pointer to Option ROM info structure
3108  *
3109  * Read Option ROM version and security revision from the Option ROM flash
3110  * section.
3111  *
3112  * Return: the exit code of the operation.
3113  */
3114 static int ixgbe_get_orom_ver_info(struct ixgbe_hw *hw,
3115 				   enum ixgbe_bank_select bank,
3116 				   struct ixgbe_orom_info *orom)
3117 {
3118 	struct ixgbe_orom_civd_info civd;
3119 	u32 combo_ver;
3120 	int err;
3121 
3122 	err = ixgbe_get_orom_civd_data(hw, bank, &civd);
3123 	if (err)
3124 		return err;
3125 
3126 	combo_ver = le32_to_cpu(civd.combo_ver);
3127 
3128 	orom->major = (u8)FIELD_GET(IXGBE_OROM_VER_MASK, combo_ver);
3129 	orom->patch = (u8)FIELD_GET(IXGBE_OROM_VER_PATCH_MASK, combo_ver);
3130 	orom->build = (u16)FIELD_GET(IXGBE_OROM_VER_BUILD_MASK, combo_ver);
3131 
3132 	return ixgbe_get_orom_srev(hw, bank, &orom->srev);
3133 }
3134 
3135 /**
3136  * ixgbe_get_inactive_orom_ver - Read Option ROM version from the inactive bank
3137  * @hw: pointer to the HW structure
3138  * @orom: storage for Option ROM version information
3139  *
3140  * Read the Option ROM version and security revision data for the inactive
3141  * section of flash. Used to access version data for a pending update that has
3142  * not yet been activated.
3143  *
3144  * Return: the exit code of the operation.
3145  */
3146 int ixgbe_get_inactive_orom_ver(struct ixgbe_hw *hw,
3147 				struct ixgbe_orom_info *orom)
3148 {
3149 	return ixgbe_get_orom_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, orom);
3150 }
3151 
3152 /**
3153  * ixgbe_get_nvm_ver_info - Read NVM version information
3154  * @hw: pointer to the HW struct
3155  * @bank: whether to read from the active or inactive flash bank
3156  * @nvm: pointer to NVM info structure
3157  *
3158  * Read the NVM EETRACK ID and map version of the main NVM image bank, filling
3159  * in the nvm info structure.
3160  *
3161  * Return: the exit code of the operation.
3162  */
3163 static int ixgbe_get_nvm_ver_info(struct ixgbe_hw *hw,
3164 				  enum ixgbe_bank_select bank,
3165 				  struct ixgbe_nvm_info *nvm)
3166 {
3167 	u16 eetrack_lo, eetrack_hi, ver;
3168 	int err;
3169 
3170 	err = ixgbe_read_nvm_sr_copy(hw, bank,
3171 				     IXGBE_E610_SR_NVM_DEV_STARTER_VER, &ver);
3172 	if (err)
3173 		return err;
3174 
3175 	nvm->major = FIELD_GET(IXGBE_E610_NVM_VER_HI_MASK, ver);
3176 	nvm->minor = FIELD_GET(IXGBE_E610_NVM_VER_LO_MASK, ver);
3177 
3178 	err = ixgbe_read_nvm_sr_copy(hw, bank, IXGBE_E610_SR_NVM_EETRACK_LO,
3179 				     &eetrack_lo);
3180 	if (err)
3181 		return err;
3182 
3183 	err = ixgbe_read_nvm_sr_copy(hw, bank, IXGBE_E610_SR_NVM_EETRACK_HI,
3184 				     &eetrack_hi);
3185 	if (err)
3186 		return err;
3187 
3188 	nvm->eetrack = (eetrack_hi << 16) | eetrack_lo;
3189 
3190 	ixgbe_get_nvm_srev(hw, bank, &nvm->srev);
3191 
3192 	return 0;
3193 }
3194 
3195 /**
3196  * ixgbe_get_inactive_nvm_ver - Read Option ROM version from the inactive bank
3197  * @hw: pointer to the HW structure
3198  * @nvm: storage for Option ROM version information
3199  *
3200  * Read the NVM EETRACK ID, Map version, and security revision of the
3201  * inactive NVM bank. Used to access version data for a pending update that
3202  * has not yet been activated.
3203  *
3204  * Return: the exit code of the operation.
3205  */
3206 int ixgbe_get_inactive_nvm_ver(struct ixgbe_hw *hw, struct ixgbe_nvm_info *nvm)
3207 {
3208 	return ixgbe_get_nvm_ver_info(hw, IXGBE_INACTIVE_FLASH_BANK, nvm);
3209 }
3210 
3211 /**
3212  * ixgbe_get_active_nvm_ver - Read Option ROM version from the active bank
3213  * @hw: pointer to the HW structure
3214  * @nvm: storage for Option ROM version information
3215  *
3216  * Reads the NVM EETRACK ID, Map version, and security revision of the
3217  * active NVM bank.
3218  *
3219  * Return: the exit code of the operation.
3220  */
3221 static int ixgbe_get_active_nvm_ver(struct ixgbe_hw *hw,
3222 				    struct ixgbe_nvm_info *nvm)
3223 {
3224 	return ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK, nvm);
3225 }
3226 
3227 /**
3228  * ixgbe_get_netlist_info - Read the netlist version information
3229  * @hw: pointer to the HW struct
3230  * @bank: whether to read from the active or inactive flash bank
3231  * @netlist: pointer to netlist version info structure
3232  *
3233  * Get the netlist version information from the requested bank. Reads the Link
3234  * Topology section to find the Netlist ID block and extract the relevant
3235  * information into the netlist version structure.
3236  *
3237  * Return: the exit code of the operation.
3238  */
3239 static int ixgbe_get_netlist_info(struct ixgbe_hw *hw,
3240 				  enum ixgbe_bank_select bank,
3241 				  struct ixgbe_netlist_info *netlist)
3242 {
3243 	u16 module_id, length, node_count, i;
3244 	u16 *id_blk;
3245 	int err;
3246 
3247 	err = ixgbe_read_netlist_module(hw, bank, IXGBE_NETLIST_TYPE_OFFSET,
3248 					&module_id);
3249 	if (err)
3250 		return err;
3251 
3252 	if (module_id != IXGBE_NETLIST_LINK_TOPO_MOD_ID)
3253 		return -EIO;
3254 
3255 	err = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_MODULE_LEN,
3256 					&length);
3257 	if (err)
3258 		return err;
3259 
3260 	/* Sanity check that we have at least enough words to store the
3261 	 * netlist ID block.
3262 	 */
3263 	if (length < IXGBE_NETLIST_ID_BLK_SIZE)
3264 		return -EIO;
3265 
3266 	err = ixgbe_read_netlist_module(hw, bank, IXGBE_LINK_TOPO_NODE_COUNT,
3267 					&node_count);
3268 	if (err)
3269 		return err;
3270 
3271 	node_count &= IXGBE_LINK_TOPO_NODE_COUNT_M;
3272 
3273 	id_blk = kcalloc(IXGBE_NETLIST_ID_BLK_SIZE, sizeof(*id_blk), GFP_KERNEL);
3274 	if (!id_blk)
3275 		return -ENOMEM;
3276 
3277 	/* Read out the entire Netlist ID Block at once. */
3278 	err = ixgbe_read_flash_module(hw, bank, IXGBE_E610_SR_NETLIST_BANK_PTR,
3279 				      IXGBE_NETLIST_ID_BLK_OFFSET(node_count) *
3280 				      sizeof(*id_blk), (u8 *)id_blk,
3281 				      IXGBE_NETLIST_ID_BLK_SIZE *
3282 				      sizeof(*id_blk));
3283 	if (err)
3284 		goto free_id_blk;
3285 
3286 	for (i = 0; i < IXGBE_NETLIST_ID_BLK_SIZE; i++)
3287 		id_blk[i] = le16_to_cpu(((__le16 *)id_blk)[i]);
3288 
3289 	netlist->major = id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_HIGH] << 16 |
3290 			 id_blk[IXGBE_NETLIST_ID_BLK_MAJOR_VER_LOW];
3291 	netlist->minor = id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_HIGH] << 16 |
3292 			 id_blk[IXGBE_NETLIST_ID_BLK_MINOR_VER_LOW];
3293 	netlist->type = id_blk[IXGBE_NETLIST_ID_BLK_TYPE_HIGH] << 16 |
3294 			id_blk[IXGBE_NETLIST_ID_BLK_TYPE_LOW];
3295 	netlist->rev = id_blk[IXGBE_NETLIST_ID_BLK_REV_HIGH] << 16 |
3296 		       id_blk[IXGBE_NETLIST_ID_BLK_REV_LOW];
3297 	netlist->cust_ver = id_blk[IXGBE_NETLIST_ID_BLK_CUST_VER];
3298 	/* Read the left most 4 bytes of SHA */
3299 	netlist->hash = id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(15)] << 16 |
3300 			id_blk[IXGBE_NETLIST_ID_BLK_SHA_HASH_WORD(14)];
3301 
3302 free_id_blk:
3303 	kfree(id_blk);
3304 	return err;
3305 }
3306 
3307 /**
3308  * ixgbe_get_inactive_netlist_ver - Read netlist version from the inactive bank
3309  * @hw: pointer to the HW struct
3310  * @netlist: pointer to netlist version info structure
3311  *
3312  * Read the netlist version data from the inactive netlist bank. Used to
3313  * extract version data of a pending flash update in order to display the
3314  * version data.
3315  *
3316  * Return: the exit code of the operation.
3317  */
3318 int ixgbe_get_inactive_netlist_ver(struct ixgbe_hw *hw,
3319 				   struct ixgbe_netlist_info *netlist)
3320 {
3321 	return ixgbe_get_netlist_info(hw, IXGBE_INACTIVE_FLASH_BANK, netlist);
3322 }
3323 
3324 /**
3325  * ixgbe_get_flash_data - get flash data
3326  * @hw: pointer to the HW struct
3327  *
3328  * Read and populate flash data such as Shadow RAM size,
3329  * max_timeout and blank_nvm_mode
3330  *
3331  * Return: the exit code of the operation.
3332  */
3333 int ixgbe_get_flash_data(struct ixgbe_hw *hw)
3334 {
3335 	struct ixgbe_flash_info *flash = &hw->flash;
3336 	u32 fla, gens_stat;
3337 	u8 sr_size;
3338 	int err;
3339 
3340 	/* The SR size is stored regardless of the NVM programming mode
3341 	 * as the blank mode may be used in the factory line.
3342 	 */
3343 	gens_stat = IXGBE_READ_REG(hw, GLNVM_GENS);
3344 	sr_size = FIELD_GET(GLNVM_GENS_SR_SIZE_M, gens_stat);
3345 
3346 	/* Switching to words (sr_size contains power of 2) */
3347 	flash->sr_words = BIT(sr_size) * (SZ_1K / sizeof(u16));
3348 
3349 	/* Check if we are in the normal or blank NVM programming mode */
3350 	fla = IXGBE_READ_REG(hw, IXGBE_GLNVM_FLA);
3351 	if (fla & IXGBE_GLNVM_FLA_LOCKED_M) {
3352 		flash->blank_nvm_mode = false;
3353 	} else {
3354 		flash->blank_nvm_mode = true;
3355 		return -EIO;
3356 	}
3357 
3358 	err = ixgbe_discover_flash_size(hw);
3359 	if (err)
3360 		return err;
3361 
3362 	err = ixgbe_determine_active_flash_banks(hw);
3363 	if (err)
3364 		return err;
3365 
3366 	err = ixgbe_get_nvm_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
3367 				     &flash->nvm);
3368 	if (err)
3369 		return err;
3370 
3371 	err = ixgbe_get_orom_ver_info(hw, IXGBE_ACTIVE_FLASH_BANK,
3372 				      &flash->orom);
3373 	if (err)
3374 		return err;
3375 
3376 	err = ixgbe_get_netlist_info(hw, IXGBE_ACTIVE_FLASH_BANK,
3377 				     &flash->netlist);
3378 	return err;
3379 }
3380 
3381 /**
3382  * ixgbe_aci_nvm_update_empr - update NVM using EMPR
3383  * @hw: pointer to the HW struct
3384  *
3385  * Force EMP reset using ACI command (0x0709). This command allows SW to
3386  * request an EMPR to activate new FW.
3387  *
3388  * Return: the exit code of the operation.
3389  */
3390 int ixgbe_aci_nvm_update_empr(struct ixgbe_hw *hw)
3391 {
3392 	struct ixgbe_aci_desc desc;
3393 
3394 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_update_empr);
3395 
3396 	return ixgbe_aci_send_cmd(hw, &desc, NULL, 0);
3397 }
3398 
3399 /* ixgbe_nvm_set_pkg_data - NVM set package data
3400  * @hw: pointer to the HW struct
3401  * @del_pkg_data_flag: If is set then the current pkg_data store by FW
3402  *		       is deleted.
3403  *		       If bit is set to 1, then buffer should be size 0.
3404  * @data: pointer to buffer
3405  * @length: length of the buffer
3406  *
3407  * Set package data using ACI command (0x070A).
3408  * This command is equivalent to the reception of
3409  * a PLDM FW Update GetPackageData cmd. This command should be sent
3410  * as part of the NVM update as the first cmd in the flow.
3411  *
3412  * Return: the exit code of the operation.
3413  */
3414 int ixgbe_nvm_set_pkg_data(struct ixgbe_hw *hw, bool del_pkg_data_flag,
3415 			   u8 *data, u16 length)
3416 {
3417 	struct ixgbe_aci_cmd_nvm_pkg_data *cmd;
3418 	struct ixgbe_aci_desc desc;
3419 
3420 	if (length != 0 && !data)
3421 		return -EINVAL;
3422 
3423 	cmd = &desc.params.pkg_data;
3424 
3425 	ixgbe_fill_dflt_direct_cmd_desc(&desc, ixgbe_aci_opc_nvm_pkg_data);
3426 	desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
3427 
3428 	if (del_pkg_data_flag)
3429 		cmd->cmd_flags |= IXGBE_ACI_NVM_PKG_DELETE;
3430 
3431 	return ixgbe_aci_send_cmd(hw, &desc, data, length);
3432 }
3433 
3434 /* ixgbe_nvm_pass_component_tbl - NVM pass component table
3435  * @hw: pointer to the HW struct
3436  * @data: pointer to buffer
3437  * @length: length of the buffer
3438  * @transfer_flag: parameter for determining stage of the update
3439  * @comp_response: a pointer to the response from the 0x070B ACI.
3440  * @comp_response_code: a pointer to the response code from the 0x070B ACI.
3441  *
3442  * Pass component table using ACI command (0x070B). This command is equivalent
3443  * to the reception of a PLDM FW Update PassComponentTable cmd.
3444  * This command should be sent once per component. It can be only sent after
3445  * Set Package Data cmd and before actual update. FW will assume these
3446  * commands are going to be sent until the TransferFlag is set to End or
3447  * StartAndEnd.
3448  *
3449  * Return: the exit code of the operation.
3450  */
3451 int ixgbe_nvm_pass_component_tbl(struct ixgbe_hw *hw, u8 *data, u16 length,
3452 				 u8 transfer_flag, u8 *comp_response,
3453 				 u8 *comp_response_code)
3454 {
3455 	struct ixgbe_aci_cmd_nvm_pass_comp_tbl *cmd;
3456 	struct ixgbe_aci_desc desc;
3457 	int err;
3458 
3459 	if (!data || !comp_response || !comp_response_code)
3460 		return -EINVAL;
3461 
3462 	cmd = &desc.params.pass_comp_tbl;
3463 
3464 	ixgbe_fill_dflt_direct_cmd_desc(&desc,
3465 					ixgbe_aci_opc_nvm_pass_component_tbl);
3466 	desc.flags |= cpu_to_le16(IXGBE_ACI_FLAG_RD);
3467 
3468 	cmd->transfer_flag = transfer_flag;
3469 	err = ixgbe_aci_send_cmd(hw, &desc, data, length);
3470 	if (!err) {
3471 		*comp_response = cmd->component_response;
3472 		*comp_response_code = cmd->component_response_code;
3473 	}
3474 
3475 	return err;
3476 }
3477 
3478 /**
3479  * ixgbe_read_sr_word_aci - Reads Shadow RAM via ACI
3480  * @hw: pointer to the HW structure
3481  * @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF)
3482  * @data: word read from the Shadow RAM
3483  *
3484  * Reads one 16 bit word from the Shadow RAM using ixgbe_read_flat_nvm.
3485  *
3486  * Return: the exit code of the operation.
3487  */
3488 int ixgbe_read_sr_word_aci(struct ixgbe_hw  *hw, u16 offset, u16 *data)
3489 {
3490 	u32 bytes = sizeof(u16);
3491 	u16 data_local;
3492 	int err;
3493 
3494 	err = ixgbe_read_flat_nvm(hw, offset * sizeof(u16), &bytes,
3495 				  (u8 *)&data_local, true);
3496 	if (err)
3497 		return err;
3498 
3499 	*data = data_local;
3500 	return 0;
3501 }
3502 
3503 /**
3504  * ixgbe_read_flat_nvm - Read portion of NVM by flat offset
3505  * @hw: pointer to the HW struct
3506  * @offset: offset from beginning of NVM
3507  * @length: (in) number of bytes to read; (out) number of bytes actually read
3508  * @data: buffer to return data in (sized to fit the specified length)
3509  * @read_shadow_ram: if true, read from shadow RAM instead of NVM
3510  *
3511  * Reads a portion of the NVM, as a flat memory space. This function correctly
3512  * breaks read requests across Shadow RAM sectors, prevents Shadow RAM size
3513  * from being exceeded in case of Shadow RAM read requests and ensures that no
3514  * single read request exceeds the maximum 4KB read for a single admin command.
3515  *
3516  * Returns an error code on failure. Note that the data pointer may be
3517  * partially updated if some reads succeed before a failure.
3518  *
3519  * Return: the exit code of the operation.
3520  */
3521 int ixgbe_read_flat_nvm(struct ixgbe_hw  *hw, u32 offset, u32 *length,
3522 			u8 *data, bool read_shadow_ram)
3523 {
3524 	u32 inlen = *length;
3525 	u32 bytes_read = 0;
3526 	bool last_cmd;
3527 	int err;
3528 
3529 	/* Verify the length of the read if this is for the Shadow RAM */
3530 	if (read_shadow_ram && ((offset + inlen) >
3531 				(hw->eeprom.word_size * 2u)))
3532 		return -EINVAL;
3533 
3534 	do {
3535 		u32 read_size, sector_offset;
3536 
3537 		/* ixgbe_aci_read_nvm cannot read more than 4KB at a time.
3538 		 * Additionally, a read from the Shadow RAM may not cross over
3539 		 * a sector boundary. Conveniently, the sector size is also 4KB.
3540 		 */
3541 		sector_offset = offset % IXGBE_ACI_MAX_BUFFER_SIZE;
3542 		read_size = min_t(u32,
3543 				  IXGBE_ACI_MAX_BUFFER_SIZE - sector_offset,
3544 				  inlen - bytes_read);
3545 
3546 		last_cmd = !(bytes_read + read_size < inlen);
3547 
3548 		/* ixgbe_aci_read_nvm takes the length as a u16. Our read_size
3549 		 * is calculated using a u32, but the IXGBE_ACI_MAX_BUFFER_SIZE
3550 		 * maximum size guarantees that it will fit within the 2 bytes.
3551 		 */
3552 		err = ixgbe_aci_read_nvm(hw, IXGBE_ACI_NVM_START_POINT,
3553 					 offset, (u16)read_size,
3554 					 data + bytes_read, last_cmd,
3555 					 read_shadow_ram);
3556 		if (err)
3557 			break;
3558 
3559 		bytes_read += read_size;
3560 		offset += read_size;
3561 	} while (!last_cmd);
3562 
3563 	*length = bytes_read;
3564 	return err;
3565 }
3566 
3567 /**
3568  * ixgbe_read_sr_buf_aci - Read Shadow RAM buffer via ACI
3569  * @hw: pointer to the HW structure
3570  * @offset: offset of the Shadow RAM words to read (0x000000 - 0x001FFF)
3571  * @words: (in) number of words to read; (out) number of words actually read
3572  * @data: words read from the Shadow RAM
3573  *
3574  * Read 16 bit words (data buf) from the Shadow RAM. Acquire/release the NVM
3575  * ownership.
3576  *
3577  * Return: the operation exit code.
3578  */
3579 int ixgbe_read_sr_buf_aci(struct ixgbe_hw *hw, u16 offset, u16 *words,
3580 			  u16 *data)
3581 {
3582 	u32 bytes = *words * 2;
3583 	int err;
3584 
3585 	err = ixgbe_read_flat_nvm(hw, offset * 2, &bytes, (u8 *)data, true);
3586 	if (err)
3587 		return err;
3588 
3589 	*words = bytes / 2;
3590 
3591 	for (int i = 0; i < *words; i++)
3592 		data[i] = le16_to_cpu(((__le16 *)data)[i]);
3593 
3594 	return 0;
3595 }
3596 
3597 /**
3598  * ixgbe_read_ee_aci_e610 - Read EEPROM word using the admin command.
3599  * @hw: pointer to hardware structure
3600  * @offset: offset of  word in the EEPROM to read
3601  * @data: word read from the EEPROM
3602  *
3603  * Reads a 16 bit word from the EEPROM using the ACI.
3604  * If the EEPROM params are not initialized, the function
3605  * initialize them before proceeding with reading.
3606  * The function acquires and then releases the NVM ownership.
3607  *
3608  * Return: the exit code of the operation.
3609  */
3610 int ixgbe_read_ee_aci_e610(struct ixgbe_hw *hw, u16 offset, u16 *data)
3611 {
3612 	int err;
3613 
3614 	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
3615 		err = hw->eeprom.ops.init_params(hw);
3616 		if (err)
3617 			return err;
3618 	}
3619 
3620 	err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
3621 	if (err)
3622 		return err;
3623 
3624 	err = ixgbe_read_sr_word_aci(hw, offset, data);
3625 	ixgbe_release_nvm(hw);
3626 
3627 	return err;
3628 }
3629 
3630 /**
3631  * ixgbe_read_ee_aci_buffer_e610 - Read EEPROM words via ACI
3632  * @hw: pointer to hardware structure
3633  * @offset: offset of words in the EEPROM to read
3634  * @words: number of words to read
3635  * @data: words to read from the EEPROM
3636  *
3637  * Read 16 bit words from the EEPROM via the ACI. Initialize the EEPROM params
3638  * prior to the read. Acquire/release the NVM ownership.
3639  *
3640  * Return: the operation exit code.
3641  */
3642 int ixgbe_read_ee_aci_buffer_e610(struct ixgbe_hw *hw, u16 offset,
3643 				  u16 words, u16 *data)
3644 {
3645 	int err;
3646 
3647 	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
3648 		err = hw->eeprom.ops.init_params(hw);
3649 		if (err)
3650 			return err;
3651 	}
3652 
3653 	err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
3654 	if (err)
3655 		return err;
3656 
3657 	err = ixgbe_read_sr_buf_aci(hw, offset, &words, data);
3658 	ixgbe_release_nvm(hw);
3659 
3660 	return err;
3661 }
3662 
3663 /**
3664  * ixgbe_validate_eeprom_checksum_e610 - Validate EEPROM checksum
3665  * @hw: pointer to hardware structure
3666  * @checksum_val: calculated checksum
3667  *
3668  * Performs checksum calculation and validates the EEPROM checksum. If the
3669  * caller does not need checksum_val, the value can be NULL.
3670  * If the EEPROM params are not initialized, the function
3671  * initialize them before proceeding.
3672  * The function acquires and then releases the NVM ownership.
3673  *
3674  * Return: the exit code of the operation.
3675  */
3676 int ixgbe_validate_eeprom_checksum_e610(struct ixgbe_hw *hw, u16 *checksum_val)
3677 {
3678 	int err;
3679 
3680 	if (hw->eeprom.type == ixgbe_eeprom_uninitialized) {
3681 		err = hw->eeprom.ops.init_params(hw);
3682 		if (err)
3683 			return err;
3684 	}
3685 
3686 	err = ixgbe_nvm_validate_checksum(hw);
3687 	if (err)
3688 		return err;
3689 
3690 	if (checksum_val) {
3691 		u16 tmp_checksum;
3692 
3693 		err = ixgbe_acquire_nvm(hw, IXGBE_RES_READ);
3694 		if (err)
3695 			return err;
3696 
3697 		err = ixgbe_read_sr_word_aci(hw, IXGBE_E610_SR_SW_CHECKSUM_WORD,
3698 					     &tmp_checksum);
3699 		ixgbe_release_nvm(hw);
3700 
3701 		if (!err)
3702 			*checksum_val = tmp_checksum;
3703 	}
3704 
3705 	return err;
3706 }
3707 
3708 /**
3709  * ixgbe_reset_hw_e610 - Perform hardware reset
3710  * @hw: pointer to hardware structure
3711  *
3712  * Resets the hardware by resetting the transmit and receive units, masks
3713  * and clears all interrupts, and performs a reset.
3714  *
3715  * Return: the exit code of the operation.
3716  */
3717 int ixgbe_reset_hw_e610(struct ixgbe_hw *hw)
3718 {
3719 	u32 swfw_mask = hw->phy.phy_semaphore_mask;
3720 	u32 ctrl, i;
3721 	int err;
3722 
3723 	/* Call adapter stop to disable tx/rx and clear interrupts */
3724 	err = hw->mac.ops.stop_adapter(hw);
3725 	if (err)
3726 		goto reset_hw_out;
3727 
3728 	/* Flush pending Tx transactions. */
3729 	ixgbe_clear_tx_pending(hw);
3730 
3731 	hw->phy.ops.init(hw);
3732 mac_reset_top:
3733 	err = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask);
3734 	if (err)
3735 		return -EBUSY;
3736 	ctrl = IXGBE_CTRL_RST;
3737 	ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL);
3738 	IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl);
3739 	IXGBE_WRITE_FLUSH(hw);
3740 	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
3741 
3742 	/* Poll for reset bit to self-clear indicating reset is complete */
3743 	for (i = 0; i < 10; i++) {
3744 		udelay(1);
3745 		ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
3746 		if (!(ctrl & IXGBE_CTRL_RST_MASK))
3747 			break;
3748 	}
3749 
3750 	if (ctrl & IXGBE_CTRL_RST_MASK) {
3751 		struct ixgbe_adapter *adapter = container_of(hw, struct ixgbe_adapter,
3752 							     hw);
3753 
3754 		err = -EIO;
3755 		netdev_err(adapter->netdev, "Reset polling failed to complete.");
3756 	}
3757 
3758 	/* Double resets are required for recovery from certain error
3759 	 * conditions. Between resets, it is necessary to stall to allow time
3760 	 * for any pending HW events to complete.
3761 	 */
3762 	msleep(100);
3763 	if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) {
3764 		hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED;
3765 		goto mac_reset_top;
3766 	}
3767 
3768 	/* Set the Rx packet buffer size. */
3769 	IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), GENMASK(18, 17));
3770 
3771 	/* Store the permanent mac address */
3772 	hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
3773 
3774 	/* Maximum number of Receive Address Registers. */
3775 #define IXGBE_MAX_NUM_RAR		128
3776 
3777 	/* Store MAC address from RAR0, clear receive address registers, and
3778 	 * clear the multicast table.  Also reset num_rar_entries to the
3779 	 * maximum number of Receive Address Registers, since we modify this
3780 	 * value when programming the SAN MAC address.
3781 	 */
3782 	hw->mac.num_rar_entries = IXGBE_MAX_NUM_RAR;
3783 	hw->mac.ops.init_rx_addrs(hw);
3784 
3785 	/* Initialize bus function number */
3786 	hw->mac.ops.set_lan_id(hw);
3787 
3788 reset_hw_out:
3789 	return err;
3790 }
3791 
3792 /**
3793  * ixgbe_get_pfa_module_tlv - Read sub module TLV from NVM PFA
3794  * @hw: pointer to hardware structure
3795  * @module_tlv: pointer to module TLV to return
3796  * @module_tlv_len: pointer to module TLV length to return
3797  * @module_type: module type requested
3798  *
3799  * Find the requested sub module TLV type from the Preserved Field
3800  * Area (PFA) and returns the TLV pointer and length. The caller can
3801  * use these to read the variable length TLV value.
3802  *
3803  * Return: the exit code of the operation.
3804  */
3805 static int ixgbe_get_pfa_module_tlv(struct ixgbe_hw *hw, u16 *module_tlv,
3806 				    u16 *module_tlv_len, u16 module_type)
3807 {
3808 	u16 pfa_len, pfa_ptr, pfa_end_ptr;
3809 	u16 next_tlv;
3810 	int err;
3811 
3812 	err = ixgbe_read_ee_aci_e610(hw, IXGBE_E610_SR_PFA_PTR, &pfa_ptr);
3813 	if (err)
3814 		return err;
3815 
3816 	err = ixgbe_read_ee_aci_e610(hw, pfa_ptr, &pfa_len);
3817 	if (err)
3818 		return err;
3819 
3820 	/* Starting with first TLV after PFA length, iterate through the list
3821 	 * of TLVs to find the requested one.
3822 	 */
3823 	next_tlv = pfa_ptr + 1;
3824 	pfa_end_ptr = pfa_ptr + pfa_len;
3825 	while (next_tlv < pfa_end_ptr) {
3826 		u16 tlv_sub_module_type, tlv_len;
3827 
3828 		/* Read TLV type */
3829 		err = ixgbe_read_ee_aci_e610(hw, next_tlv,
3830 					     &tlv_sub_module_type);
3831 		if (err)
3832 			break;
3833 
3834 		/* Read TLV length */
3835 		err = ixgbe_read_ee_aci_e610(hw, next_tlv + 1, &tlv_len);
3836 		if (err)
3837 			break;
3838 
3839 		if (tlv_sub_module_type == module_type) {
3840 			if (tlv_len) {
3841 				*module_tlv = next_tlv;
3842 				*module_tlv_len = tlv_len;
3843 				return 0;
3844 			}
3845 			return -EIO;
3846 		}
3847 		/* Check next TLV, i.e. current TLV pointer + length + 2 words
3848 		 * (for current TLV's type and length).
3849 		 */
3850 		next_tlv = next_tlv + tlv_len + 2;
3851 	}
3852 	/* Module does not exist */
3853 	return -ENODATA;
3854 }
3855 
3856 /**
3857  * ixgbe_read_pba_string_e610 - Read PBA string from NVM
3858  * @hw: pointer to hardware structure
3859  * @pba_num: stores the part number string from the NVM
3860  * @pba_num_size: part number string buffer length
3861  *
3862  * Read the part number string from the NVM.
3863  *
3864  * Return: the exit code of the operation.
3865  */
3866 static int ixgbe_read_pba_string_e610(struct ixgbe_hw *hw, u8 *pba_num,
3867 				      u32 pba_num_size)
3868 {
3869 	u16 pba_tlv, pba_tlv_len;
3870 	u16 pba_word, pba_size;
3871 	int err;
3872 
3873 	*pba_num = '\0';
3874 
3875 	err = ixgbe_get_pfa_module_tlv(hw, &pba_tlv, &pba_tlv_len,
3876 				       IXGBE_E610_SR_PBA_BLOCK_PTR);
3877 	if (err)
3878 		return err;
3879 
3880 	/* pba_size is the next word */
3881 	err = ixgbe_read_ee_aci_e610(hw, (pba_tlv + 2), &pba_size);
3882 	if (err)
3883 		return err;
3884 
3885 	if (pba_tlv_len < pba_size)
3886 		return -EINVAL;
3887 
3888 	/* Subtract one to get PBA word count (PBA Size word is included in
3889 	 * total size).
3890 	 */
3891 	pba_size--;
3892 
3893 	if (pba_num_size < (((u32)pba_size * 2) + 1))
3894 		return -EINVAL;
3895 
3896 	for (u16 i = 0; i < pba_size; i++) {
3897 		err = ixgbe_read_ee_aci_e610(hw, (pba_tlv + 2 + 1) + i,
3898 					     &pba_word);
3899 		if (err)
3900 			return err;
3901 
3902 		pba_num[(i * 2)] = FIELD_GET(IXGBE_E610_SR_PBA_BLOCK_MASK,
3903 					     pba_word);
3904 		pba_num[(i * 2) + 1] = pba_word & 0xFF;
3905 	}
3906 
3907 	pba_num[(pba_size * 2)] = '\0';
3908 
3909 	return err;
3910 }
3911 
3912 static const struct ixgbe_mac_operations mac_ops_e610 = {
3913 	.init_hw			= ixgbe_init_hw_generic,
3914 	.start_hw			= ixgbe_start_hw_e610,
3915 	.clear_hw_cntrs			= ixgbe_clear_hw_cntrs_generic,
3916 	.enable_rx_dma			= ixgbe_enable_rx_dma_generic,
3917 	.get_mac_addr			= ixgbe_get_mac_addr_generic,
3918 	.get_device_caps		= ixgbe_get_device_caps_generic,
3919 	.stop_adapter			= ixgbe_stop_adapter_generic,
3920 	.set_lan_id			= ixgbe_set_lan_id_multi_port_pcie,
3921 	.set_rxpba			= ixgbe_set_rxpba_generic,
3922 	.check_link			= ixgbe_check_link_e610,
3923 	.blink_led_start		= ixgbe_blink_led_start_X540,
3924 	.blink_led_stop			= ixgbe_blink_led_stop_X540,
3925 	.set_rar			= ixgbe_set_rar_generic,
3926 	.clear_rar			= ixgbe_clear_rar_generic,
3927 	.set_vmdq			= ixgbe_set_vmdq_generic,
3928 	.set_vmdq_san_mac		= ixgbe_set_vmdq_san_mac_generic,
3929 	.clear_vmdq			= ixgbe_clear_vmdq_generic,
3930 	.init_rx_addrs			= ixgbe_init_rx_addrs_generic,
3931 	.update_mc_addr_list		= ixgbe_update_mc_addr_list_generic,
3932 	.enable_mc			= ixgbe_enable_mc_generic,
3933 	.disable_mc			= ixgbe_disable_mc_generic,
3934 	.clear_vfta			= ixgbe_clear_vfta_generic,
3935 	.set_vfta			= ixgbe_set_vfta_generic,
3936 	.fc_enable			= ixgbe_fc_enable_generic,
3937 	.set_fw_drv_ver			= ixgbe_set_fw_drv_ver_x550,
3938 	.init_uta_tables		= ixgbe_init_uta_tables_generic,
3939 	.set_mac_anti_spoofing		= ixgbe_set_mac_anti_spoofing,
3940 	.set_vlan_anti_spoofing		= ixgbe_set_vlan_anti_spoofing,
3941 	.set_source_address_pruning	=
3942 				ixgbe_set_source_address_pruning_x550,
3943 	.set_ethertype_anti_spoofing	=
3944 				ixgbe_set_ethertype_anti_spoofing_x550,
3945 	.disable_rx_buff		= ixgbe_disable_rx_buff_generic,
3946 	.enable_rx_buff			= ixgbe_enable_rx_buff_generic,
3947 	.enable_rx			= ixgbe_enable_rx_generic,
3948 	.disable_rx			= ixgbe_disable_rx_e610,
3949 	.led_on				= ixgbe_led_on_generic,
3950 	.led_off			= ixgbe_led_off_generic,
3951 	.init_led_link_act		= ixgbe_init_led_link_act_generic,
3952 	.reset_hw			= ixgbe_reset_hw_e610,
3953 	.get_fw_ver                     = ixgbe_aci_get_fw_ver,
3954 	.get_media_type			= ixgbe_get_media_type_e610,
3955 	.setup_link			= ixgbe_setup_link_e610,
3956 	.fw_recovery_mode		= ixgbe_fw_recovery_mode_e610,
3957 	.fw_rollback_mode		= ixgbe_fw_rollback_mode_e610,
3958 	.get_nvm_ver			= ixgbe_get_active_nvm_ver,
3959 	.get_link_capabilities		= ixgbe_get_link_capabilities_e610,
3960 	.get_bus_info			= ixgbe_get_bus_info_generic,
3961 	.acquire_swfw_sync		= ixgbe_acquire_swfw_sync_X540,
3962 	.release_swfw_sync		= ixgbe_release_swfw_sync_X540,
3963 	.init_swfw_sync			= ixgbe_init_swfw_sync_X540,
3964 	.prot_autoc_read		= prot_autoc_read_generic,
3965 	.prot_autoc_write		= prot_autoc_write_generic,
3966 	.setup_fc			= ixgbe_setup_fc_e610,
3967 	.fc_autoneg			= ixgbe_fc_autoneg_e610,
3968 };
3969 
3970 static const struct ixgbe_phy_operations phy_ops_e610 = {
3971 	.init				= ixgbe_init_phy_ops_e610,
3972 	.identify			= ixgbe_identify_phy_e610,
3973 	.identify_sfp			= ixgbe_identify_module_e610,
3974 	.setup_link_speed		= ixgbe_setup_phy_link_speed_generic,
3975 	.setup_link			= ixgbe_setup_phy_link_e610,
3976 	.enter_lplu			= ixgbe_enter_lplu_e610,
3977 };
3978 
3979 static const struct ixgbe_eeprom_operations eeprom_ops_e610 = {
3980 	.read				= ixgbe_read_ee_aci_e610,
3981 	.read_buffer			= ixgbe_read_ee_aci_buffer_e610,
3982 	.validate_checksum		= ixgbe_validate_eeprom_checksum_e610,
3983 	.read_pba_string		= ixgbe_read_pba_string_e610,
3984 	.init_params			= ixgbe_init_eeprom_params_e610,
3985 };
3986 
3987 const struct ixgbe_info ixgbe_e610_info = {
3988 	.mac			= ixgbe_mac_e610,
3989 	.get_invariants		= ixgbe_get_invariants_X540,
3990 	.mac_ops		= &mac_ops_e610,
3991 	.eeprom_ops		= &eeprom_ops_e610,
3992 	.phy_ops		= &phy_ops_e610,
3993 	.mbx_ops		= &mbx_ops_generic,
3994 	.mvals			= ixgbe_mvals_x550em_a,
3995 };
3996