xref: /linux/drivers/net/ethernet/intel/ixgbevf/vf.c (revision a55f7f5f29b32c2c53cc291899cf9b0c25a07f7c)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2024 Intel Corporation. */
3 
4 #include "vf.h"
5 #include "ixgbevf.h"
6 
7 /* On Hyper-V, to reset, we need to read from this offset
8  * from the PCI config space. This is the mechanism used on
9  * Hyper-V to support PF/VF communication.
10  */
11 #define IXGBE_HV_RESET_OFFSET           0x201
12 
ixgbevf_write_msg_read_ack(struct ixgbe_hw * hw,u32 * msg,u32 * retmsg,u16 size)13 static inline s32 ixgbevf_write_msg_read_ack(struct ixgbe_hw *hw, u32 *msg,
14 					     u32 *retmsg, u16 size)
15 {
16 	s32 retval = ixgbevf_write_mbx(hw, msg, size);
17 
18 	if (retval)
19 		return retval;
20 
21 	return ixgbevf_poll_mbx(hw, retmsg, size);
22 }
23 
24 /**
25  *  ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
26  *  @hw: pointer to hardware structure
27  *
28  *  Starts the hardware by filling the bus info structure and media type, clears
29  *  all on chip counters, initializes receive address registers, multicast
30  *  table, VLAN filter table, calls routine to set up link and flow control
31  *  settings, and leaves transmit and receive units disabled and uninitialized
32  **/
ixgbevf_start_hw_vf(struct ixgbe_hw * hw)33 static s32 ixgbevf_start_hw_vf(struct ixgbe_hw *hw)
34 {
35 	/* Clear adapter stopped flag */
36 	hw->adapter_stopped = false;
37 
38 	return 0;
39 }
40 
41 /**
42  *  ixgbevf_init_hw_vf - virtual function hardware initialization
43  *  @hw: pointer to hardware structure
44  *
45  *  Initialize the hardware by resetting the hardware and then starting
46  *  the hardware
47  **/
ixgbevf_init_hw_vf(struct ixgbe_hw * hw)48 static s32 ixgbevf_init_hw_vf(struct ixgbe_hw *hw)
49 {
50 	s32 status = hw->mac.ops.start_hw(hw);
51 
52 	hw->mac.ops.get_mac_addr(hw, hw->mac.addr);
53 
54 	return status;
55 }
56 
57 /**
58  *  ixgbevf_reset_hw_vf - Performs hardware reset
59  *  @hw: pointer to hardware structure
60  *
61  *  Resets the hardware by resetting the transmit and receive units, masks and
62  *  clears all interrupts.
63  **/
ixgbevf_reset_hw_vf(struct ixgbe_hw * hw)64 static s32 ixgbevf_reset_hw_vf(struct ixgbe_hw *hw)
65 {
66 	struct ixgbe_mbx_info *mbx = &hw->mbx;
67 	u32 timeout = IXGBE_VF_INIT_TIMEOUT;
68 	u32 msgbuf[IXGBE_VF_PERMADDR_MSG_LEN];
69 	u8 *addr = (u8 *)(&msgbuf[1]);
70 	s32 ret_val;
71 
72 	/* Call adapter stop to disable tx/rx and clear interrupts */
73 	hw->mac.ops.stop_adapter(hw);
74 
75 	/* reset the api version */
76 	hw->api_version = ixgbe_mbox_api_10;
77 	hw->mbx.ops.init_params(hw);
78 	memcpy(&hw->mbx.ops, &ixgbevf_mbx_ops_legacy,
79 	       sizeof(struct ixgbe_mbx_operations));
80 
81 	IXGBE_WRITE_REG(hw, IXGBE_VFCTRL, IXGBE_CTRL_RST);
82 	IXGBE_WRITE_FLUSH(hw);
83 
84 	/* we cannot reset while the RSTI / RSTD bits are asserted */
85 	while (!mbx->ops.check_for_rst(hw) && timeout) {
86 		timeout--;
87 		udelay(5);
88 	}
89 
90 	if (!timeout)
91 		return IXGBE_ERR_RESET_FAILED;
92 
93 	/* mailbox timeout can now become active */
94 	mbx->timeout = IXGBE_VF_MBX_INIT_TIMEOUT;
95 
96 	msgbuf[0] = IXGBE_VF_RESET;
97 	ixgbevf_write_mbx(hw, msgbuf, 1);
98 
99 	mdelay(10);
100 
101 	/* set our "perm_addr" based on info provided by PF
102 	 * also set up the mc_filter_type which is piggy backed
103 	 * on the mac address in word 3
104 	 */
105 	ret_val = ixgbevf_poll_mbx(hw, msgbuf, IXGBE_VF_PERMADDR_MSG_LEN);
106 	if (ret_val)
107 		return ret_val;
108 
109 	/* New versions of the PF may NACK the reset return message
110 	 * to indicate that no MAC address has yet been assigned for
111 	 * the VF.
112 	 */
113 	if (msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS) &&
114 	    msgbuf[0] != (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_FAILURE))
115 		return IXGBE_ERR_INVALID_MAC_ADDR;
116 
117 	if (msgbuf[0] == (IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_SUCCESS))
118 		ether_addr_copy(hw->mac.perm_addr, addr);
119 
120 	hw->mac.mc_filter_type = msgbuf[IXGBE_VF_MC_TYPE_WORD];
121 
122 	return 0;
123 }
124 
125 /**
126  * ixgbevf_hv_reset_hw_vf - reset via Hyper-V
127  * @hw: pointer to private hardware struct
128  *
129  * Hyper-V variant; the VF/PF communication is through the PCI
130  * config space.
131  */
ixgbevf_hv_reset_hw_vf(struct ixgbe_hw * hw)132 static s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw)
133 {
134 #if IS_ENABLED(CONFIG_PCI_MMCONFIG)
135 	struct ixgbevf_adapter *adapter = hw->back;
136 	int i;
137 
138 	for (i = 0; i < 6; i++)
139 		pci_read_config_byte(adapter->pdev,
140 				     (i + IXGBE_HV_RESET_OFFSET),
141 				     &hw->mac.perm_addr[i]);
142 	return 0;
143 #else
144 	pr_err("PCI_MMCONFIG needs to be enabled for Hyper-V\n");
145 	return -EOPNOTSUPP;
146 #endif
147 }
148 
149 /**
150  *  ixgbevf_stop_hw_vf - Generic stop Tx/Rx units
151  *  @hw: pointer to hardware structure
152  *
153  *  Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts,
154  *  disables transmit and receive units. The adapter_stopped flag is used by
155  *  the shared code and drivers to determine if the adapter is in a stopped
156  *  state and should not touch the hardware.
157  **/
ixgbevf_stop_hw_vf(struct ixgbe_hw * hw)158 static s32 ixgbevf_stop_hw_vf(struct ixgbe_hw *hw)
159 {
160 	u32 number_of_queues;
161 	u32 reg_val;
162 	u16 i;
163 
164 	/* Set the adapter_stopped flag so other driver functions stop touching
165 	 * the hardware
166 	 */
167 	hw->adapter_stopped = true;
168 
169 	/* Disable the receive unit by stopped each queue */
170 	number_of_queues = hw->mac.max_rx_queues;
171 	for (i = 0; i < number_of_queues; i++) {
172 		reg_val = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
173 		if (reg_val & IXGBE_RXDCTL_ENABLE) {
174 			reg_val &= ~IXGBE_RXDCTL_ENABLE;
175 			IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(i), reg_val);
176 		}
177 	}
178 
179 	IXGBE_WRITE_FLUSH(hw);
180 
181 	/* Clear interrupt mask to stop from interrupts being generated */
182 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
183 
184 	/* Clear any pending interrupts */
185 	IXGBE_READ_REG(hw, IXGBE_VTEICR);
186 
187 	/* Disable the transmit unit.  Each queue must be disabled. */
188 	number_of_queues = hw->mac.max_tx_queues;
189 	for (i = 0; i < number_of_queues; i++) {
190 		reg_val = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
191 		if (reg_val & IXGBE_TXDCTL_ENABLE) {
192 			reg_val &= ~IXGBE_TXDCTL_ENABLE;
193 			IXGBE_WRITE_REG(hw, IXGBE_VFTXDCTL(i), reg_val);
194 		}
195 	}
196 
197 	return 0;
198 }
199 
200 /**
201  *  ixgbevf_mta_vector - Determines bit-vector in multicast table to set
202  *  @hw: pointer to hardware structure
203  *  @mc_addr: the multicast address
204  *
205  *  Extracts the 12 bits, from a multicast address, to determine which
206  *  bit-vector to set in the multicast table. The hardware uses 12 bits, from
207  *  incoming Rx multicast addresses, to determine the bit-vector to check in
208  *  the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
209  *  by the MO field of the MCSTCTRL. The MO field is set during initialization
210  *  to mc_filter_type.
211  **/
ixgbevf_mta_vector(struct ixgbe_hw * hw,u8 * mc_addr)212 static s32 ixgbevf_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr)
213 {
214 	u32 vector = 0;
215 
216 	switch (hw->mac.mc_filter_type) {
217 	case 0:   /* use bits [47:36] of the address */
218 		vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
219 		break;
220 	case 1:   /* use bits [46:35] of the address */
221 		vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
222 		break;
223 	case 2:   /* use bits [45:34] of the address */
224 		vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
225 		break;
226 	case 3:   /* use bits [43:32] of the address */
227 		vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
228 		break;
229 	default:  /* Invalid mc_filter_type */
230 		break;
231 	}
232 
233 	/* vector can only be 12-bits or boundary will be exceeded */
234 	vector &= 0xFFF;
235 	return vector;
236 }
237 
238 /**
239  *  ixgbevf_get_mac_addr_vf - Read device MAC address
240  *  @hw: pointer to the HW structure
241  *  @mac_addr: pointer to storage for retrieved MAC address
242  **/
ixgbevf_get_mac_addr_vf(struct ixgbe_hw * hw,u8 * mac_addr)243 static s32 ixgbevf_get_mac_addr_vf(struct ixgbe_hw *hw, u8 *mac_addr)
244 {
245 	ether_addr_copy(mac_addr, hw->mac.perm_addr);
246 
247 	return 0;
248 }
249 
ixgbevf_set_uc_addr_vf(struct ixgbe_hw * hw,u32 index,u8 * addr)250 static s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
251 {
252 	u32 msgbuf[3], msgbuf_chk;
253 	u8 *msg_addr = (u8 *)(&msgbuf[1]);
254 	s32 ret_val;
255 
256 	memset(msgbuf, 0, sizeof(msgbuf));
257 	/* If index is one then this is the start of a new list and needs
258 	 * indication to the PF so it can do its own list management.
259 	 * If it is zero then that tells the PF to just clear all of
260 	 * this VF's macvlans and there is no new list.
261 	 */
262 	msgbuf[0] |= index << IXGBE_VT_MSGINFO_SHIFT;
263 	msgbuf[0] |= IXGBE_VF_SET_MACVLAN;
264 	msgbuf_chk = msgbuf[0];
265 
266 	if (addr)
267 		ether_addr_copy(msg_addr, addr);
268 
269 	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
270 					     ARRAY_SIZE(msgbuf));
271 	if (!ret_val) {
272 		msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
273 
274 		if (msgbuf[0] == (msgbuf_chk | IXGBE_VT_MSGTYPE_FAILURE))
275 			return -ENOMEM;
276 	}
277 
278 	return ret_val;
279 }
280 
ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw * hw,u32 index,u8 * addr)281 static s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr)
282 {
283 	return -EOPNOTSUPP;
284 }
285 
286 /**
287  * ixgbevf_get_reta_locked - get the RSS redirection table (RETA) contents.
288  * @hw: pointer to hardware structure
289  * @reta: buffer to fill with RETA contents.
290  * @num_rx_queues: Number of Rx queues configured for this port
291  *
292  * The "reta" buffer should be big enough to contain 32 registers.
293  *
294  * Returns: 0 on success.
295  *          if API doesn't support this operation - (-EOPNOTSUPP).
296  */
ixgbevf_get_reta_locked(struct ixgbe_hw * hw,u32 * reta,int num_rx_queues)297 int ixgbevf_get_reta_locked(struct ixgbe_hw *hw, u32 *reta, int num_rx_queues)
298 {
299 	int err, i, j;
300 	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
301 	u32 *hw_reta = &msgbuf[1];
302 	u32 mask = 0;
303 
304 	/* We have to use a mailbox for 82599 and x540 devices only.
305 	 * For these devices RETA has 128 entries.
306 	 * Also these VFs support up to 4 RSS queues. Therefore PF will compress
307 	 * 16 RETA entries in each DWORD giving 2 bits to each entry.
308 	 */
309 	int dwords = IXGBEVF_82599_RETA_SIZE / 16;
310 
311 	/* We support the RSS querying for 82599 and x540 devices only.
312 	 * Thus return an error if API doesn't support RETA querying or querying
313 	 * is not supported for this device type.
314 	 */
315 	switch (hw->api_version) {
316 	case ixgbe_mbox_api_17:
317 	case ixgbe_mbox_api_16:
318 	case ixgbe_mbox_api_15:
319 	case ixgbe_mbox_api_14:
320 	case ixgbe_mbox_api_13:
321 	case ixgbe_mbox_api_12:
322 		if (hw->mac.type < ixgbe_mac_X550_vf)
323 			break;
324 		fallthrough;
325 	default:
326 		return -EOPNOTSUPP;
327 	}
328 
329 	msgbuf[0] = IXGBE_VF_GET_RETA;
330 
331 	err = ixgbevf_write_mbx(hw, msgbuf, 1);
332 
333 	if (err)
334 		return err;
335 
336 	err = ixgbevf_poll_mbx(hw, msgbuf, dwords + 1);
337 
338 	if (err)
339 		return err;
340 
341 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
342 
343 	/* If the operation has been refused by a PF return -EPERM */
344 	if (msgbuf[0] == (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_FAILURE))
345 		return -EPERM;
346 
347 	/* If we didn't get an ACK there must have been
348 	 * some sort of mailbox error so we should treat it
349 	 * as such.
350 	 */
351 	if (msgbuf[0] != (IXGBE_VF_GET_RETA | IXGBE_VT_MSGTYPE_SUCCESS))
352 		return IXGBE_ERR_MBX;
353 
354 	/* ixgbevf doesn't support more than 2 queues at the moment */
355 	if (num_rx_queues > 1)
356 		mask = 0x1;
357 
358 	for (i = 0; i < dwords; i++)
359 		for (j = 0; j < 16; j++)
360 			reta[i * 16 + j] = (hw_reta[i] >> (2 * j)) & mask;
361 
362 	return 0;
363 }
364 
365 /**
366  * ixgbevf_get_rss_key_locked - get the RSS Random Key
367  * @hw: pointer to the HW structure
368  * @rss_key: buffer to fill with RSS Hash Key contents.
369  *
370  * The "rss_key" buffer should be big enough to contain 10 registers.
371  *
372  * Returns: 0 on success.
373  *          if API doesn't support this operation - (-EOPNOTSUPP).
374  */
ixgbevf_get_rss_key_locked(struct ixgbe_hw * hw,u8 * rss_key)375 int ixgbevf_get_rss_key_locked(struct ixgbe_hw *hw, u8 *rss_key)
376 {
377 	int err;
378 	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
379 
380 	/* We currently support the RSS Random Key retrieval for 82599 and x540
381 	 * devices only.
382 	 *
383 	 * Thus return an error if API doesn't support RSS Random Key retrieval
384 	 * or if the operation is not supported for this device type.
385 	 */
386 	switch (hw->api_version) {
387 	case ixgbe_mbox_api_17:
388 	case ixgbe_mbox_api_16:
389 	case ixgbe_mbox_api_15:
390 	case ixgbe_mbox_api_14:
391 	case ixgbe_mbox_api_13:
392 	case ixgbe_mbox_api_12:
393 		if (hw->mac.type < ixgbe_mac_X550_vf)
394 			break;
395 		fallthrough;
396 	default:
397 		return -EOPNOTSUPP;
398 	}
399 
400 	msgbuf[0] = IXGBE_VF_GET_RSS_KEY;
401 	err = ixgbevf_write_mbx(hw, msgbuf, 1);
402 
403 	if (err)
404 		return err;
405 
406 	err = ixgbevf_poll_mbx(hw, msgbuf, 11);
407 
408 	if (err)
409 		return err;
410 
411 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
412 
413 	/* If the operation has been refused by a PF return -EPERM */
414 	if (msgbuf[0] == (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_FAILURE))
415 		return -EPERM;
416 
417 	/* If we didn't get an ACK there must have been
418 	 * some sort of mailbox error so we should treat it
419 	 * as such.
420 	 */
421 	if (msgbuf[0] != (IXGBE_VF_GET_RSS_KEY | IXGBE_VT_MSGTYPE_SUCCESS))
422 		return IXGBE_ERR_MBX;
423 
424 	memcpy(rss_key, msgbuf + 1, IXGBEVF_RSS_HASH_KEY_SIZE);
425 
426 	return 0;
427 }
428 
429 /**
430  *  ixgbevf_set_rar_vf - set device MAC address
431  *  @hw: pointer to hardware structure
432  *  @index: Receive address register to write
433  *  @addr: Address to put into receive address register
434  *  @vmdq: Unused in this implementation
435  **/
ixgbevf_set_rar_vf(struct ixgbe_hw * hw,u32 index,u8 * addr,u32 vmdq)436 static s32 ixgbevf_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
437 			      u32 vmdq)
438 {
439 	u32 msgbuf[3];
440 	u8 *msg_addr = (u8 *)(&msgbuf[1]);
441 	s32 ret_val;
442 
443 	memset(msgbuf, 0, sizeof(msgbuf));
444 	msgbuf[0] = IXGBE_VF_SET_MAC_ADDR;
445 	ether_addr_copy(msg_addr, addr);
446 
447 	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
448 					     ARRAY_SIZE(msgbuf));
449 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
450 
451 	/* if nacked the address was rejected, use "perm_addr" */
452 	if (!ret_val &&
453 	    (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_FAILURE))) {
454 		ixgbevf_get_mac_addr_vf(hw, hw->mac.addr);
455 		return IXGBE_ERR_MBX;
456 	}
457 
458 	return ret_val;
459 }
460 
461 /**
462  *  ixgbevf_hv_set_rar_vf - set device MAC address Hyper-V variant
463  *  @hw: pointer to hardware structure
464  *  @index: Receive address register to write
465  *  @addr: Address to put into receive address register
466  *  @vmdq: Unused in this implementation
467  *
468  * We don't really allow setting the device MAC address. However,
469  * if the address being set is the permanent MAC address we will
470  * permit that.
471  **/
ixgbevf_hv_set_rar_vf(struct ixgbe_hw * hw,u32 index,u8 * addr,u32 vmdq)472 static s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr,
473 				 u32 vmdq)
474 {
475 	if (ether_addr_equal(addr, hw->mac.perm_addr))
476 		return 0;
477 
478 	return -EOPNOTSUPP;
479 }
480 
481 /**
482  *  ixgbevf_update_mc_addr_list_vf - Update Multicast addresses
483  *  @hw: pointer to the HW structure
484  *  @netdev: pointer to net device structure
485  *
486  *  Updates the Multicast Table Array.
487  **/
ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw * hw,struct net_device * netdev)488 static s32 ixgbevf_update_mc_addr_list_vf(struct ixgbe_hw *hw,
489 					  struct net_device *netdev)
490 {
491 	struct netdev_hw_addr *ha;
492 	u32 msgbuf[IXGBE_VFMAILBOX_SIZE];
493 	u16 *vector_list = (u16 *)&msgbuf[1];
494 	u32 cnt, i;
495 
496 	/* Each entry in the list uses 1 16 bit word.  We have 30
497 	 * 16 bit words available in our HW msg buffer (minus 1 for the
498 	 * msg type).  That's 30 hash values if we pack 'em right.  If
499 	 * there are more than 30 MC addresses to add then punt the
500 	 * extras for now and then add code to handle more than 30 later.
501 	 * It would be unusual for a server to request that many multi-cast
502 	 * addresses except for in large enterprise network environments.
503 	 */
504 
505 	cnt = netdev_mc_count(netdev);
506 	if (cnt > 30)
507 		cnt = 30;
508 	msgbuf[0] = IXGBE_VF_SET_MULTICAST;
509 	msgbuf[0] |= cnt << IXGBE_VT_MSGINFO_SHIFT;
510 
511 	i = 0;
512 	netdev_for_each_mc_addr(ha, netdev) {
513 		if (i == cnt)
514 			break;
515 		if (is_link_local_ether_addr(ha->addr))
516 			continue;
517 
518 		vector_list[i++] = ixgbevf_mta_vector(hw, ha->addr);
519 	}
520 
521 	return ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
522 			IXGBE_VFMAILBOX_SIZE);
523 }
524 
525 /**
526  * ixgbevf_hv_update_mc_addr_list_vf - stub
527  * @hw: unused
528  * @netdev: unused
529  *
530  * Hyper-V variant - just a stub.
531  */
ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw * hw,struct net_device * netdev)532 static s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw,
533 					     struct net_device *netdev)
534 {
535 	return -EOPNOTSUPP;
536 }
537 
538 /**
539  *  ixgbevf_update_xcast_mode - Update Multicast mode
540  *  @hw: pointer to the HW structure
541  *  @xcast_mode: new multicast mode
542  *
543  *  Updates the Multicast Mode of VF.
544  **/
ixgbevf_update_xcast_mode(struct ixgbe_hw * hw,int xcast_mode)545 static s32 ixgbevf_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
546 {
547 	u32 msgbuf[2];
548 	s32 err;
549 
550 	switch (hw->api_version) {
551 	case ixgbe_mbox_api_12:
552 		/* promisc introduced in 1.3 version */
553 		if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC)
554 			return -EOPNOTSUPP;
555 		fallthrough;
556 	case ixgbe_mbox_api_13:
557 	case ixgbe_mbox_api_14:
558 	case ixgbe_mbox_api_15:
559 	case ixgbe_mbox_api_16:
560 	case ixgbe_mbox_api_17:
561 		break;
562 	default:
563 		return -EOPNOTSUPP;
564 	}
565 
566 	msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
567 	msgbuf[1] = xcast_mode;
568 
569 	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
570 					 ARRAY_SIZE(msgbuf));
571 	if (err)
572 		return err;
573 
574 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
575 	if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_FAILURE))
576 		return -EPERM;
577 
578 	return 0;
579 }
580 
581 /**
582  * ixgbevf_hv_update_xcast_mode - stub
583  * @hw: unused
584  * @xcast_mode: unused
585  *
586  * Hyper-V variant - just a stub.
587  */
ixgbevf_hv_update_xcast_mode(struct ixgbe_hw * hw,int xcast_mode)588 static s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode)
589 {
590 	return -EOPNOTSUPP;
591 }
592 
593 /**
594  * ixgbevf_get_link_state_vf - Get VF link state from PF
595  * @hw: pointer to the HW structure
596  * @link_state: link state storage
597  *
598  * Returns state of the operation error or success.
599  */
ixgbevf_get_link_state_vf(struct ixgbe_hw * hw,bool * link_state)600 static s32 ixgbevf_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
601 {
602 	u32 msgbuf[2];
603 	s32 ret_val;
604 	s32 err;
605 
606 	msgbuf[0] = IXGBE_VF_GET_LINK_STATE;
607 	msgbuf[1] = 0x0;
608 
609 	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf, 2);
610 
611 	if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
612 		ret_val = IXGBE_ERR_MBX;
613 	} else {
614 		ret_val = 0;
615 		*link_state = msgbuf[1];
616 	}
617 
618 	return ret_val;
619 }
620 
621 /**
622  * ixgbevf_hv_get_link_state_vf - * Hyper-V variant - just a stub.
623  * @hw: unused
624  * @link_state: unused
625  *
626  * Hyper-V variant; there is no mailbox communication.
627  */
ixgbevf_hv_get_link_state_vf(struct ixgbe_hw * hw,bool * link_state)628 static s32 ixgbevf_hv_get_link_state_vf(struct ixgbe_hw *hw, bool *link_state)
629 {
630 	return -EOPNOTSUPP;
631 }
632 
633 /**
634  * ixgbevf_get_pf_link_state - Get PF's link status
635  * @hw: pointer to the HW structure
636  * @speed: link speed
637  * @link_up: indicate if link is up/down
638  *
639  * Ask PF to provide link_up state and speed of the link.
640  *
641  * Return: IXGBE_ERR_MBX in the case of mailbox error,
642  * -EOPNOTSUPP if the op is not supported or 0 on success.
643  */
ixgbevf_get_pf_link_state(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up)644 static int ixgbevf_get_pf_link_state(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
645 				     bool *link_up)
646 {
647 	u32 msgbuf[3] = {};
648 	int err;
649 
650 	switch (hw->api_version) {
651 	case ixgbe_mbox_api_16:
652 	case ixgbe_mbox_api_17:
653 		break;
654 	default:
655 		return -EOPNOTSUPP;
656 	}
657 
658 	msgbuf[0] = IXGBE_VF_GET_PF_LINK_STATE;
659 
660 	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
661 					 ARRAY_SIZE(msgbuf));
662 	if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
663 		err = IXGBE_ERR_MBX;
664 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
665 		/* No need to set @link_up to false as it will be done by
666 		 * ixgbe_check_mac_link_vf().
667 		 */
668 	} else {
669 		*speed = msgbuf[1];
670 		*link_up = msgbuf[2];
671 	}
672 
673 	return err;
674 }
675 
676 /**
677  * ixgbevf_negotiate_features_vf - negotiate supported features with PF driver
678  * @hw: pointer to the HW structure
679  * @pf_features: bitmask of features supported by PF
680  *
681  * Return: IXGBE_ERR_MBX in the  case of mailbox error,
682  * -EOPNOTSUPP if the op is not supported or 0 on success.
683  */
ixgbevf_negotiate_features_vf(struct ixgbe_hw * hw,u32 * pf_features)684 static int ixgbevf_negotiate_features_vf(struct ixgbe_hw *hw, u32 *pf_features)
685 {
686 	u32 msgbuf[2] = {};
687 	int err;
688 
689 	switch (hw->api_version) {
690 	case ixgbe_mbox_api_17:
691 		break;
692 	default:
693 		return -EOPNOTSUPP;
694 	}
695 
696 	msgbuf[0] = IXGBE_VF_FEATURES_NEGOTIATE;
697 	msgbuf[1] = IXGBEVF_SUPPORTED_FEATURES;
698 
699 	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
700 					 ARRAY_SIZE(msgbuf));
701 
702 	if (err || (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE)) {
703 		err = IXGBE_ERR_MBX;
704 		*pf_features = 0x0;
705 	} else {
706 		*pf_features = msgbuf[1];
707 	}
708 
709 	return err;
710 }
711 
ixgbevf_hv_negotiate_features_vf(struct ixgbe_hw * hw,u32 * pf_features)712 static int ixgbevf_hv_negotiate_features_vf(struct ixgbe_hw *hw,
713 					    u32 *pf_features)
714 {
715 	return -EOPNOTSUPP;
716 }
717 
718 /**
719  *  ixgbevf_set_vfta_vf - Set/Unset VLAN filter table address
720  *  @hw: pointer to the HW structure
721  *  @vlan: 12 bit VLAN ID
722  *  @vind: unused by VF drivers
723  *  @vlan_on: if true then set bit, else clear bit
724  **/
ixgbevf_set_vfta_vf(struct ixgbe_hw * hw,u32 vlan,u32 vind,bool vlan_on)725 static s32 ixgbevf_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
726 			       bool vlan_on)
727 {
728 	u32 msgbuf[2];
729 	s32 err;
730 
731 	msgbuf[0] = IXGBE_VF_SET_VLAN;
732 	msgbuf[1] = vlan;
733 	/* Setting the 8 bit field MSG INFO to TRUE indicates "add" */
734 	msgbuf[0] |= vlan_on << IXGBE_VT_MSGINFO_SHIFT;
735 
736 	err = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
737 					 ARRAY_SIZE(msgbuf));
738 	if (err)
739 		goto mbx_err;
740 
741 	/* remove extra bits from the message */
742 	msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
743 	msgbuf[0] &= ~(0xFF << IXGBE_VT_MSGINFO_SHIFT);
744 
745 	if (msgbuf[0] != (IXGBE_VF_SET_VLAN | IXGBE_VT_MSGTYPE_SUCCESS))
746 		err = IXGBE_ERR_INVALID_ARGUMENT;
747 
748 mbx_err:
749 	return err;
750 }
751 
752 /**
753  * ixgbe_read_vflinks - Read VFLINKS register
754  * @hw: pointer to the HW structure
755  * @speed: link speed
756  * @link_up: indicate if link is up/down
757  *
758  * Get linkup status and link speed from the VFLINKS register.
759  */
ixgbe_read_vflinks(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up)760 static void ixgbe_read_vflinks(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
761 			       bool *link_up)
762 {
763 	u32 vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
764 
765 	/* if link status is down no point in checking to see if PF is up */
766 	if (!(vflinks & IXGBE_LINKS_UP)) {
767 		*link_up = false;
768 		return;
769 	}
770 
771 	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
772 	 * before the link status is correct
773 	 */
774 	if (hw->mac.type == ixgbe_mac_82599_vf) {
775 		for (int i = 0; i < 5; i++) {
776 			udelay(100);
777 			vflinks = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
778 
779 			if (!(vflinks & IXGBE_LINKS_UP)) {
780 				*link_up = false;
781 				return;
782 			}
783 		}
784 	}
785 
786 	/* We reached this point so there's link */
787 	*link_up = true;
788 
789 	switch (vflinks & IXGBE_LINKS_SPEED_82599) {
790 	case IXGBE_LINKS_SPEED_10G_82599:
791 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
792 		break;
793 	case IXGBE_LINKS_SPEED_1G_82599:
794 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
795 		break;
796 	case IXGBE_LINKS_SPEED_100_82599:
797 		*speed = IXGBE_LINK_SPEED_100_FULL;
798 		break;
799 	default:
800 		*speed = IXGBE_LINK_SPEED_UNKNOWN;
801 	}
802 }
803 
804 /**
805  * ixgbevf_hv_set_vfta_vf - * Hyper-V variant - just a stub.
806  * @hw: unused
807  * @vlan: unused
808  * @vind: unused
809  * @vlan_on: unused
810  */
ixgbevf_hv_set_vfta_vf(struct ixgbe_hw * hw,u32 vlan,u32 vind,bool vlan_on)811 static s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind,
812 				  bool vlan_on)
813 {
814 	return -EOPNOTSUPP;
815 }
816 
817 /**
818  *  ixgbevf_setup_mac_link_vf - Setup MAC link settings
819  *  @hw: pointer to hardware structure
820  *  @speed: Unused in this implementation
821  *  @autoneg: Unused in this implementation
822  *  @autoneg_wait_to_complete: Unused in this implementation
823  *
824  *  Do nothing and return success.  VF drivers are not allowed to change
825  *  global settings.  Maintained for driver compatibility.
826  **/
ixgbevf_setup_mac_link_vf(struct ixgbe_hw * hw,ixgbe_link_speed speed,bool autoneg,bool autoneg_wait_to_complete)827 static s32 ixgbevf_setup_mac_link_vf(struct ixgbe_hw *hw,
828 				     ixgbe_link_speed speed, bool autoneg,
829 				     bool autoneg_wait_to_complete)
830 {
831 	return 0;
832 }
833 
834 /**
835  *  ixgbevf_check_mac_link_vf - Get link/speed status
836  *  @hw: pointer to hardware structure
837  *  @speed: pointer to link speed
838  *  @link_up: true is link is up, false otherwise
839  *  @autoneg_wait_to_complete: unused
840  *
841  *  Reads the links register to determine if link is up and the current speed
842  **/
ixgbevf_check_mac_link_vf(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool autoneg_wait_to_complete)843 static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
844 				     ixgbe_link_speed *speed,
845 				     bool *link_up,
846 				     bool autoneg_wait_to_complete)
847 {
848 	struct ixgbevf_adapter *adapter = hw->back;
849 	struct ixgbe_mbx_info *mbx = &hw->mbx;
850 	struct ixgbe_mac_info *mac = &hw->mac;
851 	s32 ret_val = 0;
852 	u32 in_msg = 0;
853 
854 	/* If we were hit with a reset drop the link */
855 	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
856 		mac->get_link_status = true;
857 
858 	if (!mac->get_link_status)
859 		goto out;
860 
861 	if (hw->mac.type == ixgbe_mac_e610_vf &&
862 	    hw->api_version >= ixgbe_mbox_api_16) {
863 		ret_val = ixgbevf_get_pf_link_state(hw, speed, link_up);
864 		if (ret_val)
865 			goto out;
866 	} else {
867 		ixgbe_read_vflinks(hw, speed, link_up);
868 		if (*link_up == false)
869 			goto out;
870 	}
871 
872 	/* if the read failed it could just be a mailbox collision, best wait
873 	 * until we are called again and don't report an error
874 	 */
875 	if (mbx->ops.read(hw, &in_msg, 1)) {
876 		if (adapter->pf_features & IXGBEVF_PF_SUP_ESX_MBX)
877 			mac->get_link_status = false;
878 		goto out;
879 	}
880 
881 	if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
882 		/* msg is not CTS and is NACK we must have lost CTS status */
883 		if (in_msg & IXGBE_VT_MSGTYPE_FAILURE)
884 			ret_val = -1;
885 		goto out;
886 	}
887 
888 	/* the pf is talking, if we timed out in the past we reinit */
889 	if (!mbx->timeout) {
890 		ret_val = -1;
891 		goto out;
892 	}
893 
894 	/* if we passed all the tests above then the link is up and we no
895 	 * longer need to check for link
896 	 */
897 	mac->get_link_status = false;
898 
899 out:
900 	*link_up = !mac->get_link_status;
901 	return ret_val;
902 }
903 
904 /**
905  * ixgbevf_hv_check_mac_link_vf - check link
906  * @hw: pointer to private hardware struct
907  * @speed: pointer to link speed
908  * @link_up: true is link is up, false otherwise
909  * @autoneg_wait_to_complete: unused
910  *
911  * Hyper-V variant; there is no mailbox communication.
912  */
ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw * hw,ixgbe_link_speed * speed,bool * link_up,bool autoneg_wait_to_complete)913 static s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw,
914 					ixgbe_link_speed *speed,
915 					bool *link_up,
916 					bool autoneg_wait_to_complete)
917 {
918 	struct ixgbe_mbx_info *mbx = &hw->mbx;
919 	struct ixgbe_mac_info *mac = &hw->mac;
920 	u32 links_reg;
921 
922 	/* If we were hit with a reset drop the link */
923 	if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
924 		mac->get_link_status = true;
925 
926 	if (!mac->get_link_status)
927 		goto out;
928 
929 	/* if link status is down no point in checking to see if pf is up */
930 	links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
931 	if (!(links_reg & IXGBE_LINKS_UP))
932 		goto out;
933 
934 	/* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
935 	 * before the link status is correct
936 	 */
937 	if (mac->type == ixgbe_mac_82599_vf) {
938 		int i;
939 
940 		for (i = 0; i < 5; i++) {
941 			udelay(100);
942 			links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
943 
944 			if (!(links_reg & IXGBE_LINKS_UP))
945 				goto out;
946 		}
947 	}
948 
949 	switch (links_reg & IXGBE_LINKS_SPEED_82599) {
950 	case IXGBE_LINKS_SPEED_10G_82599:
951 		*speed = IXGBE_LINK_SPEED_10GB_FULL;
952 		break;
953 	case IXGBE_LINKS_SPEED_1G_82599:
954 		*speed = IXGBE_LINK_SPEED_1GB_FULL;
955 		break;
956 	case IXGBE_LINKS_SPEED_100_82599:
957 		*speed = IXGBE_LINK_SPEED_100_FULL;
958 		break;
959 	}
960 
961 	/* if we passed all the tests above then the link is up and we no
962 	 * longer need to check for link
963 	 */
964 	mac->get_link_status = false;
965 
966 out:
967 	*link_up = !mac->get_link_status;
968 	return 0;
969 }
970 
971 /**
972  *  ixgbevf_set_rlpml_vf - Set the maximum receive packet length
973  *  @hw: pointer to the HW structure
974  *  @max_size: value to assign to max frame size
975  **/
ixgbevf_set_rlpml_vf(struct ixgbe_hw * hw,u16 max_size)976 static s32 ixgbevf_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
977 {
978 	u32 msgbuf[2];
979 	s32 ret_val;
980 
981 	msgbuf[0] = IXGBE_VF_SET_LPE;
982 	msgbuf[1] = max_size;
983 
984 	ret_val = ixgbevf_write_msg_read_ack(hw, msgbuf, msgbuf,
985 					     ARRAY_SIZE(msgbuf));
986 	if (ret_val)
987 		return ret_val;
988 	if ((msgbuf[0] & IXGBE_VF_SET_LPE) &&
989 	    (msgbuf[0] & IXGBE_VT_MSGTYPE_FAILURE))
990 		return IXGBE_ERR_MBX;
991 
992 	return 0;
993 }
994 
995 /**
996  * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length
997  * @hw: pointer to the HW structure
998  * @max_size: value to assign to max frame size
999  * Hyper-V variant.
1000  **/
ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw * hw,u16 max_size)1001 static s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size)
1002 {
1003 	u32 reg;
1004 
1005 	/* If we are on Hyper-V, we implement this functionality
1006 	 * differently.
1007 	 */
1008 	reg =  IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0));
1009 	/* CRC == 4 */
1010 	reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN);
1011 	IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg);
1012 
1013 	return 0;
1014 }
1015 
1016 /**
1017  *  ixgbevf_negotiate_api_version_vf - Negotiate supported API version
1018  *  @hw: pointer to the HW structure
1019  *  @api: integer containing requested API version
1020  **/
ixgbevf_negotiate_api_version_vf(struct ixgbe_hw * hw,int api)1021 static int ixgbevf_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
1022 {
1023 	int err;
1024 	u32 msg[3];
1025 
1026 	/* Negotiate the mailbox API version */
1027 	msg[0] = IXGBE_VF_API_NEGOTIATE;
1028 	msg[1] = api;
1029 	msg[2] = 0;
1030 
1031 	err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
1032 	if (!err) {
1033 		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
1034 
1035 		/* Store value and return 0 on success */
1036 		if (msg[0] == (IXGBE_VF_API_NEGOTIATE |
1037 			      IXGBE_VT_MSGTYPE_SUCCESS)) {
1038 			hw->api_version = api;
1039 			return 0;
1040 		}
1041 
1042 		err = IXGBE_ERR_INVALID_ARGUMENT;
1043 	}
1044 
1045 	return err;
1046 }
1047 
1048 /**
1049  *  ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version
1050  *  @hw: pointer to the HW structure
1051  *  @api: integer containing requested API version
1052  *  Hyper-V version - only ixgbe_mbox_api_10 supported.
1053  **/
ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw * hw,int api)1054 static int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api)
1055 {
1056 	/* Hyper-V only supports api version ixgbe_mbox_api_10 */
1057 	if (api != ixgbe_mbox_api_10)
1058 		return IXGBE_ERR_INVALID_ARGUMENT;
1059 
1060 	return 0;
1061 }
1062 
ixgbevf_get_queues(struct ixgbe_hw * hw,unsigned int * num_tcs,unsigned int * default_tc)1063 int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs,
1064 		       unsigned int *default_tc)
1065 {
1066 	int err;
1067 	u32 msg[5];
1068 
1069 	/* do nothing if API doesn't support ixgbevf_get_queues */
1070 	switch (hw->api_version) {
1071 	case ixgbe_mbox_api_11:
1072 	case ixgbe_mbox_api_12:
1073 	case ixgbe_mbox_api_13:
1074 	case ixgbe_mbox_api_14:
1075 	case ixgbe_mbox_api_15:
1076 	case ixgbe_mbox_api_16:
1077 	case ixgbe_mbox_api_17:
1078 		break;
1079 	default:
1080 		return 0;
1081 	}
1082 
1083 	/* Fetch queue configuration from the PF */
1084 	msg[0] = IXGBE_VF_GET_QUEUE;
1085 	msg[1] = msg[2] = msg[3] = msg[4] = 0;
1086 
1087 	err = ixgbevf_write_msg_read_ack(hw, msg, msg, ARRAY_SIZE(msg));
1088 	if (!err) {
1089 		msg[0] &= ~IXGBE_VT_MSGTYPE_CTS;
1090 
1091 		/* if we didn't get an ACK there must have been
1092 		 * some sort of mailbox error so we should treat it
1093 		 * as such
1094 		 */
1095 		if (msg[0] != (IXGBE_VF_GET_QUEUE | IXGBE_VT_MSGTYPE_SUCCESS))
1096 			return IXGBE_ERR_MBX;
1097 
1098 		/* record and validate values from message */
1099 		hw->mac.max_tx_queues = msg[IXGBE_VF_TX_QUEUES];
1100 		if (hw->mac.max_tx_queues == 0 ||
1101 		    hw->mac.max_tx_queues > IXGBE_VF_MAX_TX_QUEUES)
1102 			hw->mac.max_tx_queues = IXGBE_VF_MAX_TX_QUEUES;
1103 
1104 		hw->mac.max_rx_queues = msg[IXGBE_VF_RX_QUEUES];
1105 		if (hw->mac.max_rx_queues == 0 ||
1106 		    hw->mac.max_rx_queues > IXGBE_VF_MAX_RX_QUEUES)
1107 			hw->mac.max_rx_queues = IXGBE_VF_MAX_RX_QUEUES;
1108 
1109 		*num_tcs = msg[IXGBE_VF_TRANS_VLAN];
1110 		/* in case of unknown state assume we cannot tag frames */
1111 		if (*num_tcs > hw->mac.max_rx_queues)
1112 			*num_tcs = 1;
1113 
1114 		*default_tc = msg[IXGBE_VF_DEF_QUEUE];
1115 		/* default to queue 0 on out-of-bounds queue number */
1116 		if (*default_tc >= hw->mac.max_tx_queues)
1117 			*default_tc = 0;
1118 	}
1119 
1120 	return err;
1121 }
1122 
1123 static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
1124 	.init_hw		= ixgbevf_init_hw_vf,
1125 	.reset_hw		= ixgbevf_reset_hw_vf,
1126 	.start_hw		= ixgbevf_start_hw_vf,
1127 	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
1128 	.stop_adapter		= ixgbevf_stop_hw_vf,
1129 	.setup_link		= ixgbevf_setup_mac_link_vf,
1130 	.check_link		= ixgbevf_check_mac_link_vf,
1131 	.negotiate_api_version	= ixgbevf_negotiate_api_version_vf,
1132 	.negotiate_features	= ixgbevf_negotiate_features_vf,
1133 	.set_rar		= ixgbevf_set_rar_vf,
1134 	.update_mc_addr_list	= ixgbevf_update_mc_addr_list_vf,
1135 	.update_xcast_mode	= ixgbevf_update_xcast_mode,
1136 	.get_link_state		= ixgbevf_get_link_state_vf,
1137 	.set_uc_addr		= ixgbevf_set_uc_addr_vf,
1138 	.set_vfta		= ixgbevf_set_vfta_vf,
1139 	.set_rlpml		= ixgbevf_set_rlpml_vf,
1140 };
1141 
1142 static const struct ixgbe_mac_operations ixgbevf_hv_mac_ops = {
1143 	.init_hw		= ixgbevf_init_hw_vf,
1144 	.reset_hw		= ixgbevf_hv_reset_hw_vf,
1145 	.start_hw		= ixgbevf_start_hw_vf,
1146 	.get_mac_addr		= ixgbevf_get_mac_addr_vf,
1147 	.stop_adapter		= ixgbevf_stop_hw_vf,
1148 	.setup_link		= ixgbevf_setup_mac_link_vf,
1149 	.check_link		= ixgbevf_hv_check_mac_link_vf,
1150 	.negotiate_api_version	= ixgbevf_hv_negotiate_api_version_vf,
1151 	.negotiate_features	= ixgbevf_hv_negotiate_features_vf,
1152 	.set_rar		= ixgbevf_hv_set_rar_vf,
1153 	.update_mc_addr_list	= ixgbevf_hv_update_mc_addr_list_vf,
1154 	.update_xcast_mode	= ixgbevf_hv_update_xcast_mode,
1155 	.get_link_state		= ixgbevf_hv_get_link_state_vf,
1156 	.set_uc_addr		= ixgbevf_hv_set_uc_addr_vf,
1157 	.set_vfta		= ixgbevf_hv_set_vfta_vf,
1158 	.set_rlpml		= ixgbevf_hv_set_rlpml_vf,
1159 };
1160 
1161 const struct ixgbevf_info ixgbevf_82599_vf_info = {
1162 	.mac = ixgbe_mac_82599_vf,
1163 	.mac_ops = &ixgbevf_mac_ops,
1164 };
1165 
1166 const struct ixgbevf_info ixgbevf_82599_vf_hv_info = {
1167 	.mac = ixgbe_mac_82599_vf,
1168 	.mac_ops = &ixgbevf_hv_mac_ops,
1169 };
1170 
1171 const struct ixgbevf_info ixgbevf_X540_vf_info = {
1172 	.mac = ixgbe_mac_X540_vf,
1173 	.mac_ops = &ixgbevf_mac_ops,
1174 };
1175 
1176 const struct ixgbevf_info ixgbevf_X540_vf_hv_info = {
1177 	.mac = ixgbe_mac_X540_vf,
1178 	.mac_ops = &ixgbevf_hv_mac_ops,
1179 };
1180 
1181 const struct ixgbevf_info ixgbevf_X550_vf_info = {
1182 	.mac = ixgbe_mac_X550_vf,
1183 	.mac_ops = &ixgbevf_mac_ops,
1184 };
1185 
1186 const struct ixgbevf_info ixgbevf_X550_vf_hv_info = {
1187 	.mac = ixgbe_mac_X550_vf,
1188 	.mac_ops = &ixgbevf_hv_mac_ops,
1189 };
1190 
1191 const struct ixgbevf_info ixgbevf_X550EM_x_vf_info = {
1192 	.mac = ixgbe_mac_X550EM_x_vf,
1193 	.mac_ops = &ixgbevf_mac_ops,
1194 };
1195 
1196 const struct ixgbevf_info ixgbevf_X550EM_x_vf_hv_info = {
1197 	.mac = ixgbe_mac_X550EM_x_vf,
1198 	.mac_ops = &ixgbevf_hv_mac_ops,
1199 };
1200 
1201 const struct ixgbevf_info ixgbevf_x550em_a_vf_info = {
1202 	.mac = ixgbe_mac_x550em_a_vf,
1203 	.mac_ops = &ixgbevf_mac_ops,
1204 };
1205 
1206 const struct ixgbevf_info ixgbevf_e610_vf_info = {
1207 	.mac                    = ixgbe_mac_e610_vf,
1208 	.mac_ops                = &ixgbevf_mac_ops,
1209 };
1210 
1211 const struct ixgbevf_info ixgbevf_e610_vf_hv_info = {
1212 	.mac            = ixgbe_mac_e610_vf,
1213 	.mac_ops        = &ixgbevf_hv_mac_ops,
1214 };
1215