1 /****************************************************************************** 2 3 Copyright (c) 2001-2009, Intel Corporation 4 All rights reserved. 5 6 Redistribution and use in source and binary forms, with or without 7 modification, are permitted provided that the following conditions are met: 8 9 1. Redistributions of source code must retain the above copyright notice, 10 this list of conditions and the following disclaimer. 11 12 2. Redistributions in binary form must reproduce the above copyright 13 notice, this list of conditions and the following disclaimer in the 14 documentation and/or other materials provided with the distribution. 15 16 3. Neither the name of the Intel Corporation nor the names of its 17 contributors may be used to endorse or promote products derived from 18 this software without specific prior written permission. 19 20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 24 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 25 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 26 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 27 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 28 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 29 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 30 POSSIBILITY OF SUCH DAMAGE. 31 32 ******************************************************************************/ 33 /*$FreeBSD$*/ 34 35 #include "ixgbe_type.h" 36 #include "ixgbe_api.h" 37 #include "ixgbe_common.h" 38 #include "ixgbe_phy.h" 39 40 u32 ixgbe_get_pcie_msix_count_82599(struct ixgbe_hw *hw); 41 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw); 42 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, 43 ixgbe_link_speed *speed, 44 bool *autoneg); 45 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw); 46 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw); 47 s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw, 48 ixgbe_link_speed speed, bool autoneg, 49 bool autoneg_wait_to_complete); 50 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw); 51 s32 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw, 52 ixgbe_link_speed *speed, 53 bool *link_up, bool link_up_wait_to_complete); 54 s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw, 55 ixgbe_link_speed speed, 56 bool autoneg, 57 bool autoneg_wait_to_complete); 58 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw); 59 static s32 ixgbe_setup_copper_link_speed_82599(struct ixgbe_hw *hw, 60 ixgbe_link_speed speed, 61 bool autoneg, 62 bool autoneg_wait_to_complete); 63 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw); 64 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw); 65 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw); 66 s32 ixgbe_set_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq); 67 s32 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq); 68 s32 ixgbe_insert_mac_addr_82599(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); 69 s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, 70 u32 vind, bool vlan_on); 71 s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw); 72 s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw); 73 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val); 74 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val); 75 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw); 76 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw); 77 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw); 78 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw); 79 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval); 80 s32 ixgbe_get_san_mac_addr_offset_82599(struct ixgbe_hw *hw, 81 u16 *san_mac_offset); 82 s32 ixgbe_get_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr); 83 s32 ixgbe_set_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr); 84 s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps); 85 86 void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) 87 { 88 struct ixgbe_mac_info *mac = &hw->mac; 89 90 DEBUGFUNC("ixgbe_init_mac_link_ops_82599"); 91 92 if (hw->phy.multispeed_fiber) { 93 /* Set up dual speed SFP+ support */ 94 mac->ops.setup_link = 95 &ixgbe_setup_mac_link_multispeed_fiber; 96 mac->ops.setup_link_speed = 97 &ixgbe_setup_mac_link_speed_multispeed_fiber; 98 } else { 99 mac->ops.setup_link = 100 &ixgbe_setup_mac_link_82599; 101 mac->ops.setup_link_speed = 102 &ixgbe_setup_mac_link_speed_82599; 103 } 104 } 105 106 /** 107 * ixgbe_init_phy_ops_82599 - PHY/SFP specific init 108 * @hw: pointer to hardware structure 109 * 110 * Initialize any function pointers that were not able to be 111 * set during init_shared_code because the PHY/SFP type was 112 * not known. Perform the SFP init if necessary. 113 * 114 **/ 115 s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) 116 { 117 struct ixgbe_mac_info *mac = &hw->mac; 118 struct ixgbe_phy_info *phy = &hw->phy; 119 s32 ret_val = IXGBE_SUCCESS; 120 121 DEBUGFUNC("ixgbe_init_phy_ops_82599"); 122 123 /* Identify the PHY or SFP module */ 124 ret_val = phy->ops.identify(hw); 125 if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) 126 goto init_phy_ops_out; 127 128 /* Setup function pointers based on detected SFP module and speeds */ 129 ixgbe_init_mac_link_ops_82599(hw); 130 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) 131 hw->phy.ops.reset = NULL; 132 133 /* If copper media, overwrite with copper function pointers */ 134 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 135 mac->ops.setup_link = &ixgbe_setup_copper_link_82599; 136 mac->ops.setup_link_speed = 137 &ixgbe_setup_copper_link_speed_82599; 138 mac->ops.get_link_capabilities = 139 &ixgbe_get_copper_link_capabilities_generic; 140 } 141 142 /* Set necessary function pointers based on phy type */ 143 switch (hw->phy.type) { 144 case ixgbe_phy_tn: 145 phy->ops.check_link = &ixgbe_check_phy_link_tnx; 146 phy->ops.get_firmware_version = 147 &ixgbe_get_phy_firmware_version_tnx; 148 break; 149 case ixgbe_phy_aq: 150 phy->ops.get_firmware_version = 151 &ixgbe_get_phy_firmware_version_aq; 152 break; 153 default: 154 break; 155 } 156 init_phy_ops_out: 157 return ret_val; 158 } 159 160 s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) 161 { 162 s32 ret_val = IXGBE_SUCCESS; 163 u16 list_offset, data_offset, data_value; 164 165 DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); 166 167 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { 168 ixgbe_init_mac_link_ops_82599(hw); 169 170 hw->phy.ops.reset = NULL; 171 172 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, 173 &data_offset); 174 if (ret_val != IXGBE_SUCCESS) 175 goto setup_sfp_out; 176 177 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 178 while (data_value != 0xffff) { 179 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); 180 IXGBE_WRITE_FLUSH(hw); 181 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 182 } 183 /* Now restart DSP */ 184 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, 0x00000102); 185 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, 0x00000b1d); 186 IXGBE_WRITE_FLUSH(hw); 187 } 188 189 setup_sfp_out: 190 return ret_val; 191 } 192 193 /** 194 * ixgbe_get_pcie_msix_count_82599 - Gets MSI-X vector count 195 * @hw: pointer to hardware structure 196 * 197 * Read PCIe configuration space, and get the MSI-X vector count from 198 * the capabilities table. 199 **/ 200 u32 ixgbe_get_pcie_msix_count_82599(struct ixgbe_hw *hw) 201 { 202 u32 msix_count = 64; 203 204 if (hw->mac.msix_vectors_from_pcie) { 205 msix_count = IXGBE_READ_PCIE_WORD(hw, 206 IXGBE_PCIE_MSIX_82599_CAPS); 207 msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; 208 209 /* MSI-X count is zero-based in HW, so increment to give 210 * proper value */ 211 msix_count++; 212 } 213 214 return msix_count; 215 } 216 217 /** 218 * ixgbe_init_ops_82599 - Inits func ptrs and MAC type 219 * @hw: pointer to hardware structure 220 * 221 * Initialize the function pointers and assign the MAC type for 82599. 222 * Does not touch the hardware. 223 **/ 224 225 s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw) 226 { 227 struct ixgbe_mac_info *mac = &hw->mac; 228 struct ixgbe_phy_info *phy = &hw->phy; 229 s32 ret_val; 230 231 ret_val = ixgbe_init_phy_ops_generic(hw); 232 ret_val = ixgbe_init_ops_generic(hw); 233 234 /* PHY */ 235 phy->ops.identify = &ixgbe_identify_phy_82599; 236 phy->ops.init = &ixgbe_init_phy_ops_82599; 237 238 /* MAC */ 239 mac->ops.reset_hw = &ixgbe_reset_hw_82599; 240 mac->ops.get_media_type = &ixgbe_get_media_type_82599; 241 mac->ops.get_supported_physical_layer = 242 &ixgbe_get_supported_physical_layer_82599; 243 mac->ops.enable_rx_dma = &ixgbe_enable_rx_dma_82599; 244 mac->ops.read_analog_reg8 = &ixgbe_read_analog_reg8_82599; 245 mac->ops.write_analog_reg8 = &ixgbe_write_analog_reg8_82599; 246 mac->ops.start_hw = &ixgbe_start_hw_rev_1_82599; 247 mac->ops.get_san_mac_addr = &ixgbe_get_san_mac_addr_82599; 248 mac->ops.set_san_mac_addr = &ixgbe_set_san_mac_addr_82599; 249 mac->ops.get_device_caps = &ixgbe_get_device_caps_82599; 250 251 /* RAR, Multicast, VLAN */ 252 mac->ops.set_vmdq = &ixgbe_set_vmdq_82599; 253 mac->ops.clear_vmdq = &ixgbe_clear_vmdq_82599; 254 mac->ops.insert_mac_addr = &ixgbe_insert_mac_addr_82599; 255 mac->rar_highwater = 1; 256 mac->ops.set_vfta = &ixgbe_set_vfta_82599; 257 mac->ops.clear_vfta = &ixgbe_clear_vfta_82599; 258 mac->ops.init_uta_tables = &ixgbe_init_uta_tables_82599; 259 mac->ops.setup_sfp = &ixgbe_setup_sfp_modules_82599; 260 261 /* Link */ 262 mac->ops.get_link_capabilities = &ixgbe_get_link_capabilities_82599; 263 mac->ops.check_link = &ixgbe_check_mac_link_82599; 264 ixgbe_init_mac_link_ops_82599(hw); 265 266 mac->mcft_size = 128; 267 mac->vft_size = 128; 268 mac->num_rar_entries = 128; 269 mac->max_tx_queues = 128; 270 mac->max_rx_queues = 128; 271 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_82599(hw); 272 273 return ret_val; 274 } 275 276 /** 277 * ixgbe_get_link_capabilities_82599 - Determines link capabilities 278 * @hw: pointer to hardware structure 279 * @speed: pointer to link speed 280 * @negotiation: TRUE when autoneg or autotry is enabled 281 * 282 * Determines the link capabilities by reading the AUTOC register. 283 **/ 284 s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, 285 ixgbe_link_speed *speed, 286 bool *negotiation) 287 { 288 s32 status = IXGBE_SUCCESS; 289 u32 autoc = 0; 290 291 /* 292 * Determine link capabilities based on the stored value of AUTOC, 293 * which represents EEPROM defaults. If AUTOC value has not 294 * been stored, use the current register values. 295 */ 296 if (hw->mac.orig_link_settings_stored) 297 autoc = hw->mac.orig_autoc; 298 else 299 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 300 301 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 302 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 303 *speed = IXGBE_LINK_SPEED_1GB_FULL; 304 *negotiation = FALSE; 305 break; 306 307 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 308 *speed = IXGBE_LINK_SPEED_10GB_FULL; 309 *negotiation = FALSE; 310 break; 311 312 case IXGBE_AUTOC_LMS_1G_AN: 313 *speed = IXGBE_LINK_SPEED_1GB_FULL; 314 *negotiation = TRUE; 315 break; 316 317 case IXGBE_AUTOC_LMS_10G_SERIAL: 318 *speed = IXGBE_LINK_SPEED_10GB_FULL; 319 *negotiation = FALSE; 320 break; 321 322 case IXGBE_AUTOC_LMS_KX4_KX_KR: 323 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 324 *speed = IXGBE_LINK_SPEED_UNKNOWN; 325 if (autoc & IXGBE_AUTOC_KR_SUPP) 326 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 327 if (autoc & IXGBE_AUTOC_KX4_SUPP) 328 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 329 if (autoc & IXGBE_AUTOC_KX_SUPP) 330 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 331 *negotiation = TRUE; 332 break; 333 334 case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: 335 *speed = IXGBE_LINK_SPEED_100_FULL; 336 if (autoc & IXGBE_AUTOC_KR_SUPP) 337 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 338 if (autoc & IXGBE_AUTOC_KX4_SUPP) 339 *speed |= IXGBE_LINK_SPEED_10GB_FULL; 340 if (autoc & IXGBE_AUTOC_KX_SUPP) 341 *speed |= IXGBE_LINK_SPEED_1GB_FULL; 342 *negotiation = TRUE; 343 break; 344 345 case IXGBE_AUTOC_LMS_SGMII_1G_100M: 346 *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; 347 *negotiation = FALSE; 348 break; 349 350 default: 351 status = IXGBE_ERR_LINK_SETUP; 352 goto out; 353 break; 354 } 355 356 if (hw->phy.multispeed_fiber) { 357 *speed |= IXGBE_LINK_SPEED_10GB_FULL | 358 IXGBE_LINK_SPEED_1GB_FULL; 359 *negotiation = TRUE; 360 } 361 362 out: 363 return status; 364 } 365 366 /** 367 * ixgbe_get_media_type_82599 - Get media type 368 * @hw: pointer to hardware structure 369 * 370 * Returns the media type (fiber, copper, backplane) 371 **/ 372 enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) 373 { 374 enum ixgbe_media_type media_type; 375 376 /* Detect if there is a copper PHY attached. */ 377 if (hw->phy.type == ixgbe_phy_cu_unknown || 378 hw->phy.type == ixgbe_phy_tn || 379 hw->phy.type == ixgbe_phy_aq) { 380 media_type = ixgbe_media_type_copper; 381 goto out; 382 } 383 384 switch (hw->device_id) { 385 case IXGBE_DEV_ID_82599_KX4: 386 /* Default device ID is mezzanine card KX/KX4 */ 387 media_type = ixgbe_media_type_backplane; 388 break; 389 case IXGBE_DEV_ID_82599_SFP: 390 media_type = ixgbe_media_type_fiber; 391 break; 392 case IXGBE_DEV_ID_82599_CX4: 393 media_type = ixgbe_media_type_fiber; 394 break; 395 default: 396 media_type = ixgbe_media_type_unknown; 397 break; 398 } 399 out: 400 return media_type; 401 } 402 403 /** 404 * ixgbe_setup_mac_link_82599 - Setup MAC link settings 405 * @hw: pointer to hardware structure 406 * 407 * Configures link settings based on values in the ixgbe_hw struct. 408 * Restarts the link. Performs autonegotiation if needed. 409 **/ 410 s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw) 411 { 412 u32 autoc_reg; 413 u32 links_reg; 414 u32 i; 415 s32 status = IXGBE_SUCCESS; 416 417 /* Restart link */ 418 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); 419 autoc_reg |= IXGBE_AUTOC_AN_RESTART; 420 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); 421 422 /* Only poll for autoneg to complete if specified to do so */ 423 if (hw->phy.autoneg_wait_to_complete) { 424 if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == 425 IXGBE_AUTOC_LMS_KX4_KX_KR || 426 (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 427 IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN 428 || (autoc_reg & IXGBE_AUTOC_LMS_MASK) == 429 IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 430 links_reg = 0; /* Just in case Autoneg time = 0 */ 431 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 432 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 433 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 434 break; 435 msec_delay(100); 436 } 437 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 438 status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; 439 DEBUGOUT("Autoneg did not complete.\n"); 440 } 441 } 442 } 443 444 /* Add delay to filter out noises during initial link setup */ 445 msec_delay(50); 446 447 return status; 448 } 449 450 /** 451 * ixgbe_setup_mac_link_multispeed_fiber - Setup MAC link settings 452 * @hw: pointer to hardware structure 453 * 454 * Configures link settings based on values in the ixgbe_hw struct. 455 * Restarts the link for multi-speed fiber at 1G speed, if link 456 * fails at 10G. 457 * Performs autonegotiation if needed. 458 **/ 459 s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw) 460 { 461 s32 status = IXGBE_SUCCESS; 462 ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_82599_AUTONEG; 463 DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); 464 465 status = ixgbe_setup_mac_link_speed_multispeed_fiber(hw, 466 link_speed, TRUE, true); 467 return status; 468 } 469 470 /** 471 * ixgbe_setup_mac_link_speed_multispeed_fiber - Set MAC link speed 472 * @hw: pointer to hardware structure 473 * @speed: new link speed 474 * @autoneg: TRUE if autonegotiation enabled 475 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 476 * 477 * Set the link speed in the AUTOC register and restarts link. 478 **/ 479 s32 ixgbe_setup_mac_link_speed_multispeed_fiber(struct ixgbe_hw *hw, 480 ixgbe_link_speed speed, bool autoneg, 481 bool autoneg_wait_to_complete) 482 { 483 s32 status = IXGBE_SUCCESS; 484 ixgbe_link_speed link_speed; 485 ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; 486 u32 speedcnt = 0; 487 u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); 488 bool link_up = FALSE; 489 bool negotiation; 490 491 /* Mask off requested but non-supported speeds */ 492 status = ixgbe_get_link_capabilities(hw, &link_speed, &negotiation); 493 if (status != IXGBE_SUCCESS) 494 goto out; 495 496 speed &= link_speed; 497 498 /* 499 * Try each speed one by one, highest priority first. We do this in 500 * software because 10gb fiber doesn't support speed autonegotiation. 501 */ 502 if (speed & IXGBE_LINK_SPEED_10GB_FULL) { 503 speedcnt++; 504 highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; 505 506 /* If we already have link at this speed, just jump out */ 507 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 508 if (status != IXGBE_SUCCESS) 509 goto out; 510 511 if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) 512 goto out; 513 514 /* Set hardware SDP's */ 515 esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); 516 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 517 518 /* Allow module to change analog characteristics (1G->10G) */ 519 msec_delay(40); 520 521 status = ixgbe_setup_mac_link_speed_82599( 522 hw, IXGBE_LINK_SPEED_10GB_FULL, autoneg, 523 autoneg_wait_to_complete); 524 if (status != IXGBE_SUCCESS) 525 goto out; 526 527 msec_delay(100); 528 529 /* If we have link, just jump out */ 530 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 531 if (status != IXGBE_SUCCESS) 532 goto out; 533 534 if (link_up) 535 goto out; 536 } 537 538 if (speed & IXGBE_LINK_SPEED_1GB_FULL) { 539 speedcnt++; 540 if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) 541 highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; 542 543 /* If we already have link at this speed, just jump out */ 544 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 545 if (status != IXGBE_SUCCESS) 546 goto out; 547 548 if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) 549 goto out; 550 551 /* Set hardware SDP's */ 552 esdp_reg &= ~IXGBE_ESDP_SDP5; 553 esdp_reg |= IXGBE_ESDP_SDP5_DIR; 554 IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); 555 556 /* Allow module to change analog characteristics (10G->1G) */ 557 msec_delay(40); 558 559 status = ixgbe_setup_mac_link_speed_82599( 560 hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg, 561 autoneg_wait_to_complete); 562 if (status != IXGBE_SUCCESS) 563 goto out; 564 565 msec_delay(100); 566 567 /* If we have link, just jump out */ 568 status = ixgbe_check_link(hw, &link_speed, &link_up, FALSE); 569 if (status != IXGBE_SUCCESS) 570 goto out; 571 572 if (link_up) 573 goto out; 574 } 575 576 /* 577 * We didn't get link. Configure back to the highest speed we tried, 578 * (if there was more than one). We call ourselves back with just the 579 * single highest speed that the user requested. 580 */ 581 if (speedcnt > 1) 582 status = ixgbe_setup_mac_link_speed_multispeed_fiber(hw, 583 highest_link_speed, autoneg, autoneg_wait_to_complete); 584 585 out: 586 return status; 587 } 588 589 /** 590 * ixgbe_check_mac_link_82599 - Determine link and speed status 591 * @hw: pointer to hardware structure 592 * @speed: pointer to link speed 593 * @link_up: TRUE when link is up 594 * @link_up_wait_to_complete: bool used to wait for link up or not 595 * 596 * Reads the links register to determine if link is up and the current speed 597 **/ 598 s32 ixgbe_check_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed *speed, 599 bool *link_up, bool link_up_wait_to_complete) 600 { 601 u32 links_reg; 602 u32 i; 603 604 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 605 if (link_up_wait_to_complete) { 606 for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { 607 if (links_reg & IXGBE_LINKS_UP) { 608 *link_up = TRUE; 609 break; 610 } else { 611 *link_up = FALSE; 612 } 613 msec_delay(100); 614 links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); 615 } 616 } else { 617 if (links_reg & IXGBE_LINKS_UP) 618 *link_up = TRUE; 619 else 620 *link_up = FALSE; 621 } 622 623 if ((links_reg & IXGBE_LINKS_SPEED_82599) == 624 IXGBE_LINKS_SPEED_10G_82599) 625 *speed = IXGBE_LINK_SPEED_10GB_FULL; 626 else if ((links_reg & IXGBE_LINKS_SPEED_82599) == 627 IXGBE_LINKS_SPEED_1G_82599) 628 *speed = IXGBE_LINK_SPEED_1GB_FULL; 629 else 630 *speed = IXGBE_LINK_SPEED_100_FULL; 631 632 /* if link is down, zero out the current_mode */ 633 if (*link_up == FALSE) { 634 hw->fc.current_mode = ixgbe_fc_none; 635 hw->fc.fc_was_autonegged = FALSE; 636 } 637 638 return IXGBE_SUCCESS; 639 } 640 641 /** 642 * ixgbe_setup_mac_link_speed_82599 - Set MAC link speed 643 * @hw: pointer to hardware structure 644 * @speed: new link speed 645 * @autoneg: TRUE if autonegotiation enabled 646 * @autoneg_wait_to_complete: TRUE when waiting for completion is needed 647 * 648 * Set the link speed in the AUTOC register and restarts link. 649 **/ 650 s32 ixgbe_setup_mac_link_speed_82599(struct ixgbe_hw *hw, 651 ixgbe_link_speed speed, bool autoneg, 652 bool autoneg_wait_to_complete) 653 { 654 s32 status = IXGBE_SUCCESS; 655 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 656 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 657 u32 orig_autoc = 0; 658 u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; 659 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 660 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 661 u32 links_reg; 662 u32 i; 663 ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; 664 665 /* Check to see if speed passed in is supported. */ 666 status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); 667 if (status != IXGBE_SUCCESS) 668 goto out; 669 670 speed &= link_capabilities; 671 672 /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ 673 if (hw->mac.orig_link_settings_stored) 674 orig_autoc = hw->mac.orig_autoc; 675 else 676 orig_autoc = autoc; 677 678 679 if (speed == IXGBE_LINK_SPEED_UNKNOWN) { 680 status = IXGBE_ERR_LINK_SETUP; 681 } else if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 682 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 683 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 684 /* Set KX4/KX/KR support according to speed requested */ 685 autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); 686 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 687 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) 688 autoc |= IXGBE_AUTOC_KX4_SUPP; 689 if (orig_autoc & IXGBE_AUTOC_KR_SUPP) 690 autoc |= IXGBE_AUTOC_KR_SUPP; 691 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 692 autoc |= IXGBE_AUTOC_KX_SUPP; 693 } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && 694 (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || 695 link_mode == IXGBE_AUTOC_LMS_1G_AN)) { 696 /* Switch from 1G SFI to 10G SFI if requested */ 697 if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && 698 (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { 699 autoc &= ~IXGBE_AUTOC_LMS_MASK; 700 autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; 701 } 702 } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && 703 (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { 704 /* Switch from 10G SFI to 1G SFI if requested */ 705 if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && 706 (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { 707 autoc &= ~IXGBE_AUTOC_LMS_MASK; 708 if (autoneg) 709 autoc |= IXGBE_AUTOC_LMS_1G_AN; 710 else 711 autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; 712 } 713 } 714 715 if (status == IXGBE_SUCCESS) { 716 /* Restart link */ 717 autoc |= IXGBE_AUTOC_AN_RESTART; 718 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); 719 720 /* Only poll for autoneg to complete if specified to do so */ 721 if (autoneg_wait_to_complete) { 722 if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || 723 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || 724 link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { 725 links_reg = 0; /*Just in case Autoneg time=0*/ 726 for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { 727 links_reg = 728 IXGBE_READ_REG(hw, IXGBE_LINKS); 729 if (links_reg & IXGBE_LINKS_KX_AN_COMP) 730 break; 731 msec_delay(100); 732 } 733 if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { 734 status = 735 IXGBE_ERR_AUTONEG_NOT_COMPLETE; 736 DEBUGOUT("Autoneg did not complete.\n"); 737 } 738 } 739 } 740 741 /* Add delay to filter out noises during initial link setup */ 742 msec_delay(50); 743 } 744 745 out: 746 return status; 747 } 748 749 /** 750 * ixgbe_setup_copper_link_82599 - Setup copper link settings 751 * @hw: pointer to hardware structure 752 * 753 * Restarts the link on PHY and then MAC. Performs autonegotiation if needed. 754 **/ 755 static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw) 756 { 757 s32 status; 758 759 /* Restart autonegotiation on PHY */ 760 status = hw->phy.ops.setup_link(hw); 761 762 /* Set up MAC */ 763 ixgbe_setup_mac_link_82599(hw); 764 765 return status; 766 } 767 768 /** 769 * ixgbe_setup_copper_link_speed_82599 - Set the PHY autoneg advertised field 770 * @hw: pointer to hardware structure 771 * @speed: new link speed 772 * @autoneg: TRUE if autonegotiation enabled 773 * @autoneg_wait_to_complete: TRUE if waiting is needed to complete 774 * 775 * Restarts link on PHY and MAC based on settings passed in. 776 **/ 777 static s32 ixgbe_setup_copper_link_speed_82599(struct ixgbe_hw *hw, 778 ixgbe_link_speed speed, 779 bool autoneg, 780 bool autoneg_wait_to_complete) 781 { 782 s32 status; 783 784 /* Setup the PHY according to input speed */ 785 status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, 786 autoneg_wait_to_complete); 787 /* Set up MAC */ 788 ixgbe_setup_mac_link_82599(hw); 789 790 return status; 791 } 792 /** 793 * ixgbe_reset_hw_82599 - Perform hardware reset 794 * @hw: pointer to hardware structure 795 * 796 * Resets the hardware by resetting the transmit and receive units, masks 797 * and clears all interrupts, perform a PHY reset, and perform a link (MAC) 798 * reset. 799 **/ 800 s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) 801 { 802 s32 status = IXGBE_SUCCESS; 803 u32 ctrl, ctrl_ext; 804 u32 i; 805 u32 autoc; 806 u32 autoc2; 807 808 /* Call adapter stop to disable tx/rx and clear interrupts */ 809 hw->mac.ops.stop_adapter(hw); 810 811 /* PHY ops must be identified and initialized prior to reset */ 812 813 /* Identify PHY and related function pointers */ 814 status = hw->phy.ops.init(hw); 815 816 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 817 goto reset_hw_out; 818 819 820 /* Setup SFP module if there is one present. */ 821 if (hw->phy.sfp_setup_needed) { 822 status = hw->mac.ops.setup_sfp(hw); 823 hw->phy.sfp_setup_needed = FALSE; 824 } 825 826 if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) 827 goto reset_hw_out; 828 829 /* Reset PHY */ 830 if (hw->phy.reset_disable == FALSE && hw->phy.ops.reset != NULL) 831 hw->phy.ops.reset(hw); 832 833 /* 834 * Prevent the PCI-E bus from from hanging by disabling PCI-E master 835 * access and verify no pending requests before reset 836 */ 837 status = ixgbe_disable_pcie_master(hw); 838 if (status != IXGBE_SUCCESS) { 839 status = IXGBE_ERR_MASTER_REQUESTS_PENDING; 840 DEBUGOUT("PCI-E Master disable polling has failed.\n"); 841 } 842 843 /* 844 * Issue global reset to the MAC. This needs to be a SW reset. 845 * If link reset is used, it might reset the MAC when mng is using it 846 */ 847 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 848 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST)); 849 IXGBE_WRITE_FLUSH(hw); 850 851 /* Poll for reset bit to self-clear indicating reset is complete */ 852 for (i = 0; i < 10; i++) { 853 usec_delay(1); 854 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); 855 if (!(ctrl & IXGBE_CTRL_RST)) 856 break; 857 } 858 if (ctrl & IXGBE_CTRL_RST) { 859 status = IXGBE_ERR_RESET_FAILED; 860 DEBUGOUT("Reset polling failed to complete.\n"); 861 } 862 /* Clear PF Reset Done bit so PF/VF Mail Ops can work */ 863 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); 864 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; 865 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext); 866 867 msec_delay(50); 868 869 870 871 /* 872 * Store the original AUTOC/AUTOC2 values if they have not been 873 * stored off yet. Otherwise restore the stored original 874 * values since the reset operation sets back to defaults. 875 */ 876 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 877 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 878 if (hw->mac.orig_link_settings_stored == FALSE) { 879 hw->mac.orig_autoc = autoc; 880 hw->mac.orig_autoc2 = autoc2; 881 hw->mac.orig_link_settings_stored = TRUE; 882 } else { 883 if (autoc != hw->mac.orig_autoc) 884 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | 885 IXGBE_AUTOC_AN_RESTART)); 886 887 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != 888 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { 889 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; 890 autoc2 |= (hw->mac.orig_autoc2 & 891 IXGBE_AUTOC2_UPPER_MASK); 892 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); 893 } 894 } 895 896 /* 897 * Store MAC address from RAR0, clear receive address registers, and 898 * clear the multicast table. Also reset num_rar_entries to 128, 899 * since we modify this value when programming the SAN MAC address. 900 */ 901 hw->mac.num_rar_entries = 128; 902 hw->mac.ops.init_rx_addrs(hw); 903 904 /* Store the permanent mac address */ 905 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); 906 907 /* Store the permanent SAN mac address */ 908 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); 909 910 /* Add the SAN MAC address to the RAR only if it's a valid address */ 911 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { 912 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, 913 hw->mac.san_addr, 0, IXGBE_RAH_AV); 914 915 /* Reserve the last RAR for the SAN MAC address */ 916 hw->mac.num_rar_entries--; 917 } 918 919 reset_hw_out: 920 return status; 921 } 922 923 /** 924 * ixgbe_insert_mac_addr_82599 - Find a RAR for this mac address 925 * @hw: pointer to hardware structure 926 * @addr: Address to put into receive address register 927 * @vmdq: VMDq pool to assign 928 * 929 * Puts an ethernet address into a receive address register, or 930 * finds the rar that it is aleady in; adds to the pool list 931 **/ 932 s32 ixgbe_insert_mac_addr_82599(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) 933 { 934 static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; 935 u32 first_empty_rar = NO_EMPTY_RAR_FOUND; 936 u32 rar; 937 u32 rar_low, rar_high; 938 u32 addr_low, addr_high; 939 940 /* swap bytes for HW little endian */ 941 addr_low = addr[0] | (addr[1] << 8) 942 | (addr[2] << 16) 943 | (addr[3] << 24); 944 addr_high = addr[4] | (addr[5] << 8); 945 946 /* 947 * Either find the mac_id in rar or find the first empty space. 948 * rar_highwater points to just after the highest currently used 949 * rar in order to shorten the search. It grows when we add a new 950 * rar to the top. 951 */ 952 for (rar = 0; rar < hw->mac.rar_highwater; rar++) { 953 rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); 954 955 if (((IXGBE_RAH_AV & rar_high) == 0) 956 && first_empty_rar == NO_EMPTY_RAR_FOUND) { 957 first_empty_rar = rar; 958 } else if ((rar_high & 0xFFFF) == addr_high) { 959 rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar)); 960 if (rar_low == addr_low) 961 break; /* found it already in the rars */ 962 } 963 } 964 965 if (rar < hw->mac.rar_highwater) { 966 /* already there so just add to the pool bits */ 967 ixgbe_set_vmdq(hw, rar, vmdq); 968 } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { 969 /* stick it into first empty RAR slot we found */ 970 rar = first_empty_rar; 971 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 972 } else if (rar == hw->mac.rar_highwater) { 973 /* add it to the top of the list and inc the highwater mark */ 974 ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); 975 hw->mac.rar_highwater++; 976 } else if (rar >= hw->mac.num_rar_entries) { 977 return IXGBE_ERR_INVALID_MAC_ADDR; 978 } 979 980 /* 981 * If we found rar[0], make sure the default pool bit (we use pool 0) 982 * remains cleared to be sure default pool packets will get delivered 983 */ 984 if (rar == 0) 985 ixgbe_clear_vmdq(hw, rar, 0); 986 987 return rar; 988 } 989 990 /** 991 * ixgbe_clear_vmdq_82599 - Disassociate a VMDq pool index from a rx address 992 * @hw: pointer to hardware struct 993 * @rar: receive address register index to disassociate 994 * @vmdq: VMDq pool index to remove from the rar 995 **/ 996 s32 ixgbe_clear_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 997 { 998 u32 mpsar_lo, mpsar_hi; 999 u32 rar_entries = hw->mac.num_rar_entries; 1000 1001 if (rar < rar_entries) { 1002 mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 1003 mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 1004 1005 if (!mpsar_lo && !mpsar_hi) 1006 goto done; 1007 1008 if (vmdq == IXGBE_CLEAR_VMDQ_ALL) { 1009 if (mpsar_lo) { 1010 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 0); 1011 mpsar_lo = 0; 1012 } 1013 if (mpsar_hi) { 1014 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); 1015 mpsar_hi = 0; 1016 } 1017 } else if (vmdq < 32) { 1018 mpsar_lo &= ~(1 << vmdq); 1019 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar_lo); 1020 } else { 1021 mpsar_hi &= ~(1 << (vmdq - 32)); 1022 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar_hi); 1023 } 1024 1025 /* was that the last pool using this rar? */ 1026 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) 1027 hw->mac.ops.clear_rar(hw, rar); 1028 } else { 1029 DEBUGOUT1("RAR index %d is out of range.\n", rar); 1030 } 1031 1032 done: 1033 return IXGBE_SUCCESS; 1034 } 1035 1036 /** 1037 * ixgbe_set_vmdq_82599 - Associate a VMDq pool index with a rx address 1038 * @hw: pointer to hardware struct 1039 * @rar: receive address register index to associate with a VMDq index 1040 * @vmdq: VMDq pool index 1041 **/ 1042 s32 ixgbe_set_vmdq_82599(struct ixgbe_hw *hw, u32 rar, u32 vmdq) 1043 { 1044 u32 mpsar; 1045 u32 rar_entries = hw->mac.num_rar_entries; 1046 1047 if (rar < rar_entries) { 1048 if (vmdq < 32) { 1049 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); 1050 mpsar |= 1 << vmdq; 1051 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), mpsar); 1052 } else { 1053 mpsar = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); 1054 mpsar |= 1 << (vmdq - 32); 1055 IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); 1056 } 1057 } else { 1058 DEBUGOUT1("RAR index %d is out of range.\n", rar); 1059 } 1060 return IXGBE_SUCCESS; 1061 } 1062 1063 /** 1064 * ixgbe_set_vfta_82599 - Set VLAN filter table 1065 * @hw: pointer to hardware structure 1066 * @vlan: VLAN id to write to VLAN filter 1067 * @vind: VMDq output index that maps queue to VLAN id in VFVFB 1068 * @vlan_on: boolean flag to turn on/off VLAN in VFVF 1069 * 1070 * Turn on/off specified VLAN in the VLAN filter table. 1071 **/ 1072 s32 ixgbe_set_vfta_82599(struct ixgbe_hw *hw, u32 vlan, u32 vind, 1073 bool vlan_on) 1074 { 1075 u32 regindex; 1076 u32 bitindex; 1077 u32 bits; 1078 u32 first_empty_slot; 1079 1080 if (vlan > 4095) 1081 return IXGBE_ERR_PARAM; 1082 1083 /* 1084 * this is a 2 part operation - first the VFTA, then the 1085 * VLVF and VLVFB if vind is set 1086 */ 1087 1088 /* Part 1 1089 * The VFTA is a bitstring made up of 128 32-bit registers 1090 * that enable the particular VLAN id, much like the MTA: 1091 * bits[11-5]: which register 1092 * bits[4-0]: which bit in the register 1093 */ 1094 regindex = (vlan >> 5) & 0x7F; 1095 bitindex = vlan & 0x1F; 1096 bits = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); 1097 if (vlan_on) 1098 bits |= (1 << bitindex); 1099 else 1100 bits &= ~(1 << bitindex); 1101 IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); 1102 1103 1104 /* Part 2 1105 * If the vind is set 1106 * Either vlan_on 1107 * make sure the vlan is in VLVF 1108 * set the vind bit in the matching VLVFB 1109 * Or !vlan_on 1110 * clear the pool bit and possibly the vind 1111 */ 1112 if (vind) { 1113 /* find the vlanid or the first empty slot */ 1114 first_empty_slot = 0; 1115 1116 for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { 1117 bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); 1118 if (!bits && !first_empty_slot) 1119 first_empty_slot = regindex; 1120 else if ((bits & 0x0FFF) == vlan) 1121 break; 1122 } 1123 1124 if (regindex >= IXGBE_VLVF_ENTRIES) { 1125 if (first_empty_slot) 1126 regindex = first_empty_slot; 1127 else { 1128 DEBUGOUT("No space in VLVF.\n"); 1129 goto out; 1130 } 1131 } 1132 1133 1134 if (vlan_on) { 1135 /* set the pool bit */ 1136 if (vind < 32) { 1137 bits = 1138 IXGBE_READ_REG(hw, IXGBE_VLVFB(regindex*2)); 1139 bits |= (1 << vind); 1140 IXGBE_WRITE_REG(hw, 1141 IXGBE_VLVFB(regindex*2), bits); 1142 } else { 1143 bits = IXGBE_READ_REG(hw, 1144 IXGBE_VLVFB((regindex*2)+1)); 1145 bits |= (1 << vind); 1146 IXGBE_WRITE_REG(hw, 1147 IXGBE_VLVFB((regindex*2)+1), bits); 1148 } 1149 } else { 1150 /* clear the pool bit */ 1151 if (vind < 32) { 1152 bits = IXGBE_READ_REG(hw, 1153 IXGBE_VLVFB(regindex*2)); 1154 bits &= ~(1 << vind); 1155 IXGBE_WRITE_REG(hw, 1156 IXGBE_VLVFB(regindex*2), bits); 1157 bits |= IXGBE_READ_REG(hw, 1158 IXGBE_VLVFB((regindex*2)+1)); 1159 } else { 1160 bits = IXGBE_READ_REG(hw, 1161 IXGBE_VLVFB((regindex*2)+1)); 1162 bits &= ~(1 << vind); 1163 IXGBE_WRITE_REG(hw, 1164 IXGBE_VLVFB((regindex*2)+1), bits); 1165 bits |= IXGBE_READ_REG(hw, 1166 IXGBE_VLVFB(regindex*2)); 1167 } 1168 } 1169 1170 if (bits) 1171 IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 1172 (IXGBE_VLVF_VIEN | vlan)); 1173 else 1174 IXGBE_WRITE_REG(hw, IXGBE_VLVF(regindex), 0); 1175 } 1176 1177 out: 1178 return IXGBE_SUCCESS; 1179 } 1180 1181 /** 1182 * ixgbe_clear_vfta_82599 - Clear VLAN filter table 1183 * @hw: pointer to hardware structure 1184 * 1185 * Clears the VLAN filer table, and the VMDq index associated with the filter 1186 **/ 1187 s32 ixgbe_clear_vfta_82599(struct ixgbe_hw *hw) 1188 { 1189 u32 offset; 1190 1191 for (offset = 0; offset < hw->mac.vft_size; offset++) 1192 IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); 1193 1194 for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { 1195 IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); 1196 IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0); 1197 IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0); 1198 } 1199 1200 return IXGBE_SUCCESS; 1201 } 1202 1203 /** 1204 * ixgbe_init_uta_tables_82599 - Initialize the Unicast Table Array 1205 * @hw: pointer to hardware structure 1206 **/ 1207 s32 ixgbe_init_uta_tables_82599(struct ixgbe_hw *hw) 1208 { 1209 int i; 1210 DEBUGOUT(" Clearing UTA\n"); 1211 1212 for (i = 0; i < 128; i++) 1213 IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); 1214 1215 return IXGBE_SUCCESS; 1216 } 1217 1218 /** 1219 * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. 1220 * @hw: pointer to hardware structure 1221 **/ 1222 s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) 1223 { 1224 u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); 1225 fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; 1226 IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); 1227 IXGBE_WRITE_FLUSH(hw); 1228 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1229 1230 return IXGBE_SUCCESS; 1231 } 1232 1233 #define IXGBE_FDIR_INIT_DONE_POLL 10 1234 /** 1235 * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters 1236 * @hw: pointer to hardware structure 1237 * @pballoc: which mode to allocate filters with 1238 **/ 1239 s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc) 1240 { 1241 u32 fdirctrl = 0; 1242 u32 pbsize; 1243 int i; 1244 1245 /* 1246 * Before enabling Flow Director, the Rx Packet Buffer size 1247 * must be reduced. The new value is the current size minus 1248 * flow director memory usage size. 1249 */ 1250 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc)); 1251 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 1252 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize)); 1253 1254 /* 1255 * The defaults in the HW for RX PB 1-7 are not zero and so should be 1256 * intialized to zero for non DCB mode otherwise actual total RX PB 1257 * would be bigger than programmed and filter space would run into 1258 * the PB 0 region. 1259 */ 1260 for (i = 1; i < 8; i++) 1261 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); 1262 1263 /* Send interrupt when 64 filters are left */ 1264 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT; 1265 1266 /* Set the maximum length per hash bucket to 0xA filters */ 1267 fdirctrl |= 0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT; 1268 1269 switch (pballoc) { 1270 case IXGBE_FDIR_PBALLOC_64K: 1271 /* 8k - 1 signature filters */ 1272 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; 1273 break; 1274 case IXGBE_FDIR_PBALLOC_128K: 1275 /* 16k - 1 signature filters */ 1276 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; 1277 break; 1278 case IXGBE_FDIR_PBALLOC_256K: 1279 /* 32k - 1 signature filters */ 1280 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; 1281 break; 1282 default: 1283 /* bad value */ 1284 return IXGBE_ERR_CONFIG; 1285 }; 1286 1287 /* Move the flexible bytes to use the ethertype - shift 6 words */ 1288 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1289 1290 1291 /* Prime the keys for hashing */ 1292 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, 1293 IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY)); 1294 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, 1295 IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY)); 1296 1297 /* 1298 * Poll init-done after we write the register. Estimated times: 1299 * 10G: PBALLOC = 11b, timing is 60us 1300 * 1G: PBALLOC = 11b, timing is 600us 1301 * 100M: PBALLOC = 11b, timing is 6ms 1302 * 1303 * Multiple these timings by 4 if under full Rx load 1304 * 1305 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for 1306 * 1 msec per poll time. If we're at line rate and drop to 100M, then 1307 * this might not finish in our poll time, but we can live with that 1308 * for now. 1309 */ 1310 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1311 IXGBE_WRITE_FLUSH(hw); 1312 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1313 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1314 IXGBE_FDIRCTRL_INIT_DONE) 1315 break; 1316 msec_delay(1); 1317 } 1318 if (i >= IXGBE_FDIR_INIT_DONE_POLL) 1319 DEBUGOUT("Flow Director Signature poll time exceeded!\n"); 1320 1321 return IXGBE_SUCCESS; 1322 } 1323 1324 /** 1325 * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters 1326 * @hw: pointer to hardware structure 1327 * @pballoc: which mode to allocate filters with 1328 **/ 1329 s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc) 1330 { 1331 u32 fdirctrl = 0; 1332 u32 pbsize; 1333 int i; 1334 1335 /* 1336 * Before enabling Flow Director, the Rx Packet Buffer size 1337 * must be reduced. The new value is the current size minus 1338 * flow director memory usage size. 1339 */ 1340 1341 pbsize = (1 << (IXGBE_FDIR_PBALLOC_SIZE_SHIFT + pballoc)); 1342 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 1343 (IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(0)) - pbsize)); 1344 1345 /* 1346 * The defaults in the HW for RX PB 1-7 are not zero and so should be 1347 * intialized to zero for non DCB mode otherwise actual total RX PB 1348 * would be bigger than programmed and filter space would run into 1349 * the PB 0 region. 1350 */ 1351 for (i = 1; i < 8; i++) 1352 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); 1353 1354 /* Send interrupt when 64 filters are left */ 1355 fdirctrl |= 4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT; 1356 1357 switch (pballoc) { 1358 case IXGBE_FDIR_PBALLOC_64K: 1359 /* 2k - 1 perfect filters */ 1360 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_64K; 1361 break; 1362 case IXGBE_FDIR_PBALLOC_128K: 1363 /* 4k - 1 perfect filters */ 1364 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_128K; 1365 break; 1366 case IXGBE_FDIR_PBALLOC_256K: 1367 /* 8k - 1 perfect filters */ 1368 fdirctrl |= IXGBE_FDIRCTRL_PBALLOC_256K; 1369 break; 1370 default: 1371 /* bad value */ 1372 return IXGBE_ERR_CONFIG; 1373 }; 1374 1375 /* Turn perfect match filtering on */ 1376 fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH; 1377 fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS; 1378 1379 /* Move the flexible bytes to use the ethertype - shift 6 words */ 1380 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1381 1382 /* Prime the keys for hashing */ 1383 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, 1384 IXGBE_HTONL(IXGBE_ATR_BUCKET_HASH_KEY)); 1385 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, 1386 IXGBE_HTONL(IXGBE_ATR_SIGNATURE_HASH_KEY)); 1387 1388 /* 1389 * Poll init-done after we write the register. Estimated times: 1390 * 10G: PBALLOC = 11b, timing is 60us 1391 * 1G: PBALLOC = 11b, timing is 600us 1392 * 100M: PBALLOC = 11b, timing is 6ms 1393 * 1394 * Multiple these timings by 4 if under full Rx load 1395 * 1396 * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for 1397 * 1 msec per poll time. If we're at line rate and drop to 100M, then 1398 * this might not finish in our poll time, but we can live with that 1399 * for now. 1400 */ 1401 1402 /* Set the maximum length per hash bucket to 0xA filters */ 1403 fdirctrl |= (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT); 1404 1405 IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); 1406 IXGBE_WRITE_FLUSH(hw); 1407 for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { 1408 if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & 1409 IXGBE_FDIRCTRL_INIT_DONE) 1410 break; 1411 msec_delay(1); 1412 } 1413 if (i >= IXGBE_FDIR_INIT_DONE_POLL) 1414 DEBUGOUT("Flow Director Perfect poll time exceeded!\n"); 1415 1416 return IXGBE_SUCCESS; 1417 } 1418 1419 1420 /** 1421 * ixgbe_atr_compute_hash_82599 - Compute the hashes for SW ATR 1422 * @stream: input bitstream to compute the hash on 1423 * @key: 32-bit hash key 1424 **/ 1425 u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, u32 key) 1426 { 1427 /* 1428 * The algorithm is as follows: 1429 * Hash[15:0] = Sum { S[n] x K[n+16] }, n = 0...350 1430 * where Sum {A[n]}, n = 0...n is bitwise XOR of A[0], A[1]...A[n] 1431 * and A[n] x B[n] is bitwise AND between same length strings 1432 * 1433 * K[n] is 16 bits, defined as: 1434 * for n modulo 32 >= 15, K[n] = K[n % 32 : (n % 32) - 15] 1435 * for n modulo 32 < 15, K[n] = 1436 * K[(n % 32:0) | (31:31 - (14 - (n % 32)))] 1437 * 1438 * S[n] is 16 bits, defined as: 1439 * for n >= 15, S[n] = S[n:n - 15] 1440 * for n < 15, S[n] = S[(n:0) | (350:350 - (14 - n))] 1441 * 1442 * To simplify for programming, the algorithm is implemented 1443 * in software this way: 1444 * 1445 * Key[31:0], Stream[335:0] 1446 * 1447 * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times 1448 * int_key[350:0] = tmp_key[351:1] 1449 * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321] 1450 * 1451 * hash[15:0] = 0; 1452 * for (i = 0; i < 351; i++) { 1453 * if (int_key[i]) 1454 * hash ^= int_stream[(i + 15):i]; 1455 * } 1456 */ 1457 1458 union { 1459 u32 key[11]; 1460 u8 key_stream[44]; 1461 } tmp_key; 1462 1463 u8 *stream = (u8 *)atr_input; 1464 u8 int_key[44]; /* upper-most bit unused */ 1465 u8 hash_str[46]; /* upper-most 2 bits unused */ 1466 u16 hash_result = 0; 1467 u16 tmp = 0; 1468 int i, j, k, h; 1469 1470 memset(&tmp_key, 0, sizeof(tmp_key)); 1471 /* First load the temporary key stream */ 1472 for (i = 0; i < 11; i++) 1473 tmp_key.key[i] = key; 1474 1475 /* 1476 * Set the interim key for the hashing. Bit 352 is unused, so we must 1477 * shift and compensate when building the key. 1478 */ 1479 int_key[0] = tmp_key.key_stream[0] >> 1; 1480 for (i = 1, j = 0; i < 44; i++) { 1481 int_key[i] = (tmp_key.key_stream[j] & 0x1) << 7; 1482 j++; 1483 int_key[i] |= tmp_key.key_stream[j] >> 1; 1484 } 1485 1486 /* 1487 * Set the interim bit string for the hashing. Bits 368 and 367 are 1488 * unused, so shift and compensate when building the string. 1489 */ 1490 hash_str[0] = (stream[40] & 0x7f) >> 1; 1491 for (i = 1, j = 40; i < 46; i++) { 1492 hash_str[i] = (stream[j] & 0x1) << 7; 1493 j++; 1494 if (j > 41) 1495 j = 0; 1496 hash_str[i] |= stream[j] >> 1; 1497 } 1498 1499 /* 1500 * Now compute the hash. i is the index into hash_str, j is into our 1501 * key stream, k is counting the number of bits, and h interates within 1502 * each byte. 1503 */ 1504 for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) { 1505 for (h = 0; h < 8 && k < 351; h++, k++) { 1506 if ((int_key[j] >> h) & 0x1) { 1507 /* 1508 * Key bit is set, XOR in the current 16-bit 1509 * string. Example of processing: 1510 * h = 0, 1511 * tmp = (hash_str[i - 2] & 0 << 16) | 1512 * (hash_str[i - 1] & 0xff << 8) | 1513 * (hash_str[i] & 0xff >> 0) 1514 * So tmp = hash_str[15 + k:k], since the 1515 * i + 2 clause rolls off the 16-bit value 1516 * h = 7, 1517 * tmp = (hash_str[i - 2] & 0x7f << 9) | 1518 * (hash_str[i - 1] & 0xff << 1) | 1519 * (hash_str[i] & 0x80 >> 7) 1520 */ 1521 tmp = ((hash_str[i] & (0xff << h)) >> h); 1522 tmp |= ((hash_str[i - 1] & 0xff) << (8 - h)); 1523 tmp |= (hash_str[i - 2] & (0xff >> (8 - h))) 1524 << (16 - h); 1525 hash_result ^= tmp; 1526 } 1527 } 1528 } 1529 1530 return hash_result; 1531 } 1532 1533 /** 1534 * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream 1535 * @input: input stream to modify 1536 * @vlan: the VLAN id to load 1537 **/ 1538 s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan) 1539 { 1540 input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8; 1541 input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff; 1542 1543 return IXGBE_SUCCESS; 1544 } 1545 1546 /** 1547 * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address 1548 * @input: input stream to modify 1549 * @src_addr: the IP address to load 1550 **/ 1551 s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr) 1552 { 1553 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24; 1554 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] = 1555 (src_addr >> 16) & 0xff; 1556 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] = 1557 (src_addr >> 8) & 0xff; 1558 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff; 1559 1560 return IXGBE_SUCCESS; 1561 } 1562 1563 /** 1564 * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address 1565 * @input: input stream to modify 1566 * @dst_addr: the IP address to load 1567 **/ 1568 s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr) 1569 { 1570 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24; 1571 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] = 1572 (dst_addr >> 16) & 0xff; 1573 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] = 1574 (dst_addr >> 8) & 0xff; 1575 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff; 1576 1577 return IXGBE_SUCCESS; 1578 } 1579 1580 /** 1581 * ixgbe_atr_set_src_ipv6_82599 - Sets the source IPv6 address 1582 * @input: input stream to modify 1583 * @src_addr_1: the first 4 bytes of the IP address to load 1584 * @src_addr_2: the second 4 bytes of the IP address to load 1585 * @src_addr_3: the third 4 bytes of the IP address to load 1586 * @src_addr_4: the fourth 4 bytes of the IP address to load 1587 **/ 1588 s32 ixgbe_atr_set_src_ipv6_82599(struct ixgbe_atr_input *input, 1589 u32 src_addr_1, u32 src_addr_2, 1590 u32 src_addr_3, u32 src_addr_4) 1591 { 1592 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET] = src_addr_4 & 0xff; 1593 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] = 1594 (src_addr_4 >> 8) & 0xff; 1595 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] = 1596 (src_addr_4 >> 16) & 0xff; 1597 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] = src_addr_4 >> 24; 1598 1599 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4] = src_addr_3 & 0xff; 1600 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] = 1601 (src_addr_3 >> 8) & 0xff; 1602 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] = 1603 (src_addr_3 >> 16) & 0xff; 1604 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] = src_addr_3 >> 24; 1605 1606 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8] = src_addr_2 & 0xff; 1607 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] = 1608 (src_addr_2 >> 8) & 0xff; 1609 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] = 1610 (src_addr_2 >> 16) & 0xff; 1611 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] = src_addr_2 >> 24; 1612 1613 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12] = src_addr_1 & 0xff; 1614 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] = 1615 (src_addr_1 >> 8) & 0xff; 1616 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] = 1617 (src_addr_1 >> 16) & 0xff; 1618 input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] = src_addr_1 >> 24; 1619 1620 return IXGBE_SUCCESS; 1621 } 1622 1623 /** 1624 * ixgbe_atr_set_dst_ipv6_82599 - Sets the destination IPv6 address 1625 * @input: input stream to modify 1626 * @dst_addr_1: the first 4 bytes of the IP address to load 1627 * @dst_addr_2: the second 4 bytes of the IP address to load 1628 * @dst_addr_3: the third 4 bytes of the IP address to load 1629 * @dst_addr_4: the fourth 4 bytes of the IP address to load 1630 **/ 1631 s32 ixgbe_atr_set_dst_ipv6_82599(struct ixgbe_atr_input *input, 1632 u32 dst_addr_1, u32 dst_addr_2, 1633 u32 dst_addr_3, u32 dst_addr_4) 1634 { 1635 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET] = dst_addr_4 & 0xff; 1636 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] = 1637 (dst_addr_4 >> 8) & 0xff; 1638 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] = 1639 (dst_addr_4 >> 16) & 0xff; 1640 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] = dst_addr_4 >> 24; 1641 1642 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4] = dst_addr_3 & 0xff; 1643 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] = 1644 (dst_addr_3 >> 8) & 0xff; 1645 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] = 1646 (dst_addr_3 >> 16) & 0xff; 1647 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] = dst_addr_3 >> 24; 1648 1649 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8] = dst_addr_2 & 0xff; 1650 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] = 1651 (dst_addr_2 >> 8) & 0xff; 1652 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] = 1653 (dst_addr_2 >> 16) & 0xff; 1654 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] = dst_addr_2 >> 24; 1655 1656 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12] = dst_addr_1 & 0xff; 1657 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] = 1658 (dst_addr_1 >> 8) & 0xff; 1659 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] = 1660 (dst_addr_1 >> 16) & 0xff; 1661 input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] = dst_addr_1 >> 24; 1662 1663 return IXGBE_SUCCESS; 1664 } 1665 1666 /** 1667 * ixgbe_atr_set_src_port_82599 - Sets the source port 1668 * @input: input stream to modify 1669 * @src_port: the source port to load 1670 **/ 1671 s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port) 1672 { 1673 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8; 1674 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff; 1675 1676 return IXGBE_SUCCESS; 1677 } 1678 1679 /** 1680 * ixgbe_atr_set_dst_port_82599 - Sets the destination port 1681 * @input: input stream to modify 1682 * @dst_port: the destination port to load 1683 **/ 1684 s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port) 1685 { 1686 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8; 1687 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff; 1688 1689 return IXGBE_SUCCESS; 1690 } 1691 1692 /** 1693 * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes 1694 * @input: input stream to modify 1695 * @flex_bytes: the flexible bytes to load 1696 **/ 1697 s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte) 1698 { 1699 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8; 1700 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff; 1701 1702 return IXGBE_SUCCESS; 1703 } 1704 1705 /** 1706 * ixgbe_atr_set_vm_pool_82599 - Sets the Virtual Machine pool 1707 * @input: input stream to modify 1708 * @vm_pool: the Virtual Machine pool to load 1709 **/ 1710 s32 ixgbe_atr_set_vm_pool_82599(struct ixgbe_atr_input *input, u8 vm_pool) 1711 { 1712 input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET] = vm_pool; 1713 1714 return IXGBE_SUCCESS; 1715 } 1716 1717 /** 1718 * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type 1719 * @input: input stream to modify 1720 * @l4type: the layer 4 type value to load 1721 **/ 1722 s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type) 1723 { 1724 input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type; 1725 1726 return IXGBE_SUCCESS; 1727 } 1728 1729 /** 1730 * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream 1731 * @input: input stream to search 1732 * @vlan: the VLAN id to load 1733 **/ 1734 s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan) 1735 { 1736 *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET]; 1737 *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8; 1738 1739 return IXGBE_SUCCESS; 1740 } 1741 1742 /** 1743 * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address 1744 * @input: input stream to search 1745 * @src_addr: the IP address to load 1746 **/ 1747 s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input, u32 *src_addr) 1748 { 1749 *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET]; 1750 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8; 1751 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16; 1752 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24; 1753 1754 return IXGBE_SUCCESS; 1755 } 1756 1757 /** 1758 * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address 1759 * @input: input stream to search 1760 * @dst_addr: the IP address to load 1761 **/ 1762 s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 *dst_addr) 1763 { 1764 *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET]; 1765 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8; 1766 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16; 1767 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24; 1768 1769 return IXGBE_SUCCESS; 1770 } 1771 1772 /** 1773 * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address 1774 * @input: input stream to search 1775 * @src_addr_1: the first 4 bytes of the IP address to load 1776 * @src_addr_2: the second 4 bytes of the IP address to load 1777 * @src_addr_3: the third 4 bytes of the IP address to load 1778 * @src_addr_4: the fourth 4 bytes of the IP address to load 1779 **/ 1780 s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input, 1781 u32 *src_addr_1, u32 *src_addr_2, 1782 u32 *src_addr_3, u32 *src_addr_4) 1783 { 1784 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12]; 1785 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8; 1786 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16; 1787 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24; 1788 1789 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8]; 1790 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8; 1791 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16; 1792 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24; 1793 1794 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4]; 1795 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8; 1796 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16; 1797 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24; 1798 1799 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET]; 1800 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8; 1801 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16; 1802 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24; 1803 1804 return IXGBE_SUCCESS; 1805 } 1806 1807 /** 1808 * ixgbe_atr_get_dst_ipv6_82599 - Gets the destination IPv6 address 1809 * @input: input stream to search 1810 * @dst_addr_1: the first 4 bytes of the IP address to load 1811 * @dst_addr_2: the second 4 bytes of the IP address to load 1812 * @dst_addr_3: the third 4 bytes of the IP address to load 1813 * @dst_addr_4: the fourth 4 bytes of the IP address to load 1814 **/ 1815 s32 ixgbe_atr_get_dst_ipv6_82599(struct ixgbe_atr_input *input, 1816 u32 *dst_addr_1, u32 *dst_addr_2, 1817 u32 *dst_addr_3, u32 *dst_addr_4) 1818 { 1819 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 12]; 1820 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 13] << 8; 1821 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 14] << 16; 1822 *dst_addr_1 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 15] << 24; 1823 1824 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 8]; 1825 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 9] << 8; 1826 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 10] << 16; 1827 *dst_addr_2 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 11] << 24; 1828 1829 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 4]; 1830 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 5] << 8; 1831 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 6] << 16; 1832 *dst_addr_3 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 7] << 24; 1833 1834 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET]; 1835 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 1] << 8; 1836 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 2] << 16; 1837 *dst_addr_4 = input->byte_stream[IXGBE_ATR_DST_IPV6_OFFSET + 3] << 24; 1838 1839 return IXGBE_SUCCESS; 1840 } 1841 1842 /** 1843 * ixgbe_atr_get_src_port_82599 - Gets the source port 1844 * @input: input stream to modify 1845 * @src_port: the source port to load 1846 * 1847 * Even though the input is given in big-endian, the FDIRPORT registers 1848 * expect the ports to be programmed in little-endian. Hence the need to swap 1849 * endianness when retrieving the data. This can be confusing since the 1850 * internal hash engine expects it to be big-endian. 1851 **/ 1852 s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, u16 *src_port) 1853 { 1854 *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8; 1855 *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1]; 1856 1857 return IXGBE_SUCCESS; 1858 } 1859 1860 /** 1861 * ixgbe_atr_get_dst_port_82599 - Gets the destination port 1862 * @input: input stream to modify 1863 * @dst_port: the destination port to load 1864 * 1865 * Even though the input is given in big-endian, the FDIRPORT registers 1866 * expect the ports to be programmed in little-endian. Hence the need to swap 1867 * endianness when retrieving the data. This can be confusing since the 1868 * internal hash engine expects it to be big-endian. 1869 **/ 1870 s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input, u16 *dst_port) 1871 { 1872 *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8; 1873 *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1]; 1874 1875 return IXGBE_SUCCESS; 1876 } 1877 1878 /** 1879 * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes 1880 * @input: input stream to modify 1881 * @flex_bytes: the flexible bytes to load 1882 **/ 1883 s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input, u16 *flex_byte) 1884 { 1885 *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET]; 1886 *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8; 1887 1888 return IXGBE_SUCCESS; 1889 } 1890 1891 /** 1892 * ixgbe_atr_get_vm_pool_82599 - Gets the Virtual Machine pool 1893 * @input: input stream to modify 1894 * @vm_pool: the Virtual Machine pool to load 1895 **/ 1896 s32 ixgbe_atr_get_vm_pool_82599(struct ixgbe_atr_input *input, u8 *vm_pool) 1897 { 1898 *vm_pool = input->byte_stream[IXGBE_ATR_VM_POOL_OFFSET]; 1899 1900 return IXGBE_SUCCESS; 1901 } 1902 1903 /** 1904 * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type 1905 * @input: input stream to modify 1906 * @l4type: the layer 4 type value to load 1907 **/ 1908 s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, u8 *l4type) 1909 { 1910 *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET]; 1911 1912 return IXGBE_SUCCESS; 1913 } 1914 1915 /** 1916 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter 1917 * @hw: pointer to hardware structure 1918 * @stream: input bitstream 1919 * @queue: queue index to direct traffic to 1920 **/ 1921 s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1922 struct ixgbe_atr_input *input, 1923 u8 queue) 1924 { 1925 u64 fdirhashcmd; 1926 u64 fdircmd; 1927 u32 fdirhash; 1928 u16 bucket_hash, sig_hash; 1929 u8 l4type; 1930 1931 bucket_hash = ixgbe_atr_compute_hash_82599(input, 1932 IXGBE_ATR_BUCKET_HASH_KEY); 1933 1934 /* bucket_hash is only 15 bits */ 1935 bucket_hash &= IXGBE_ATR_HASH_MASK; 1936 1937 sig_hash = ixgbe_atr_compute_hash_82599(input, 1938 IXGBE_ATR_SIGNATURE_HASH_KEY); 1939 1940 /* Get the l4type in order to program FDIRCMD properly */ 1941 /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */ 1942 ixgbe_atr_get_l4type_82599(input, &l4type); 1943 1944 /* 1945 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits 1946 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. 1947 */ 1948 fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; 1949 1950 fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1951 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN); 1952 1953 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 1954 case IXGBE_ATR_L4TYPE_TCP: 1955 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; 1956 break; 1957 case IXGBE_ATR_L4TYPE_UDP: 1958 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; 1959 break; 1960 case IXGBE_ATR_L4TYPE_SCTP: 1961 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; 1962 break; 1963 default: 1964 DEBUGOUT(" Error on l4type input\n"); 1965 return IXGBE_ERR_CONFIG; 1966 } 1967 1968 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) 1969 fdircmd |= IXGBE_FDIRCMD_IPV6; 1970 1971 fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT); 1972 fdirhashcmd = ((fdircmd << 32) | fdirhash); 1973 1974 DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, fdirhash & 0x7FFF7FFF); 1975 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); 1976 1977 return IXGBE_SUCCESS; 1978 } 1979 1980 /** 1981 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter 1982 * @hw: pointer to hardware structure 1983 * @input: input bitstream 1984 * @queue: queue index to direct traffic to 1985 * 1986 * Note that the caller to this function must lock before calling, since the 1987 * hardware writes must be protected from one another. 1988 **/ 1989 s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 1990 struct ixgbe_atr_input *input, 1991 u16 soft_id, 1992 u8 queue) 1993 { 1994 u32 fdircmd = 0; 1995 u32 fdirhash; 1996 u32 src_ipv4, dst_ipv4; 1997 u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4; 1998 u16 src_port, dst_port, vlan_id, flex_bytes; 1999 u16 bucket_hash; 2000 u8 l4type; 2001 2002 /* Get our input values */ 2003 ixgbe_atr_get_l4type_82599(input, &l4type); 2004 2005 /* 2006 * Check l4type formatting, and bail out before we touch the hardware 2007 * if there's a configuration issue 2008 */ 2009 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 2010 case IXGBE_ATR_L4TYPE_TCP: 2011 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; 2012 break; 2013 case IXGBE_ATR_L4TYPE_UDP: 2014 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; 2015 break; 2016 case IXGBE_ATR_L4TYPE_SCTP: 2017 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; 2018 break; 2019 default: 2020 DEBUGOUT(" Error on l4type input\n"); 2021 return IXGBE_ERR_CONFIG; 2022 } 2023 2024 bucket_hash = ixgbe_atr_compute_hash_82599(input, 2025 IXGBE_ATR_BUCKET_HASH_KEY); 2026 2027 /* bucket_hash is only 15 bits */ 2028 bucket_hash &= IXGBE_ATR_HASH_MASK; 2029 2030 ixgbe_atr_get_vlan_id_82599(input, &vlan_id); 2031 ixgbe_atr_get_src_port_82599(input, &src_port); 2032 ixgbe_atr_get_dst_port_82599(input, &dst_port); 2033 ixgbe_atr_get_flex_byte_82599(input, &flex_bytes); 2034 2035 fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; 2036 2037 /* Now figure out if we're IPv4 or IPv6 */ 2038 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) { 2039 /* IPv6 */ 2040 ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2, 2041 &src_ipv6_3, &src_ipv6_4); 2042 2043 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1); 2044 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2); 2045 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3); 2046 /* The last 4 bytes is the same register as IPv4 */ 2047 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4); 2048 2049 fdircmd |= IXGBE_FDIRCMD_IPV6; 2050 fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH; 2051 } else { 2052 /* IPv4 */ 2053 ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4); 2054 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4); 2055 2056 } 2057 2058 ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4); 2059 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4); 2060 2061 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id | 2062 (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT))); 2063 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port | 2064 (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT))); 2065 2066 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW; 2067 fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE; 2068 fdircmd |= IXGBE_FDIRCMD_LAST; 2069 fdircmd |= IXGBE_FDIRCMD_QUEUE_EN; 2070 fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 2071 2072 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 2073 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); 2074 2075 return IXGBE_SUCCESS; 2076 } 2077 2078 /** 2079 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register 2080 * @hw: pointer to hardware structure 2081 * @reg: analog register to read 2082 * @val: read value 2083 * 2084 * Performs read operation to Omer analog register specified. 2085 **/ 2086 s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) 2087 { 2088 u32 core_ctl; 2089 2090 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | 2091 (reg << 8)); 2092 IXGBE_WRITE_FLUSH(hw); 2093 usec_delay(10); 2094 core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); 2095 *val = (u8)core_ctl; 2096 2097 return IXGBE_SUCCESS; 2098 } 2099 2100 /** 2101 * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register 2102 * @hw: pointer to hardware structure 2103 * @reg: atlas register to write 2104 * @val: value to write 2105 * 2106 * Performs write operation to Omer analog register specified. 2107 **/ 2108 s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) 2109 { 2110 u32 core_ctl; 2111 2112 core_ctl = (reg << 8) | val; 2113 IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); 2114 IXGBE_WRITE_FLUSH(hw); 2115 usec_delay(10); 2116 2117 return IXGBE_SUCCESS; 2118 } 2119 2120 /** 2121 * ixgbe_start_hw_rev_1_82599 - Prepare hardware for Tx/Rx 2122 * @hw: pointer to hardware structure 2123 * 2124 * Starts the hardware using the generic start_hw function. 2125 * Then performs revision-specific operations: 2126 * Clears the rate limiter registers. 2127 **/ 2128 s32 ixgbe_start_hw_rev_1_82599(struct ixgbe_hw *hw) 2129 { 2130 u32 q_num; 2131 s32 ret_val = IXGBE_SUCCESS; 2132 2133 ret_val = ixgbe_start_hw_generic(hw); 2134 2135 /* Clear the rate limiters */ 2136 for (q_num = 0; q_num < hw->mac.max_tx_queues; q_num++) { 2137 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, q_num); 2138 IXGBE_WRITE_REG(hw, IXGBE_RTTBCNRC, 0); 2139 } 2140 IXGBE_WRITE_FLUSH(hw); 2141 2142 return ret_val; 2143 } 2144 2145 /** 2146 * ixgbe_identify_phy_82599 - Get physical layer module 2147 * @hw: pointer to hardware structure 2148 * 2149 * Determines the physical layer module found on the current adapter. 2150 * If PHY already detected, maintains current PHY type in hw struct, 2151 * otherwise executes the PHY detection routine. 2152 **/ 2153 s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) 2154 { 2155 s32 status = IXGBE_ERR_PHY_ADDR_INVALID; 2156 2157 /* Detect PHY if not unknown - returns success if already detected. */ 2158 status = ixgbe_identify_phy_generic(hw); 2159 if (status != IXGBE_SUCCESS) 2160 status = ixgbe_identify_sfp_module_generic(hw); 2161 /* Set PHY type none if no PHY detected */ 2162 if (hw->phy.type == ixgbe_phy_unknown) { 2163 hw->phy.type = ixgbe_phy_none; 2164 status = IXGBE_SUCCESS; 2165 } 2166 2167 /* Return error if SFP module has been detected but is not supported */ 2168 if (hw->phy.type == ixgbe_phy_sfp_unsupported) 2169 status = IXGBE_ERR_SFP_NOT_SUPPORTED; 2170 2171 return status; 2172 } 2173 2174 /** 2175 * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type 2176 * @hw: pointer to hardware structure 2177 * 2178 * Determines physical layer capabilities of the current configuration. 2179 **/ 2180 u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) 2181 { 2182 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; 2183 u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); 2184 u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); 2185 u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; 2186 u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; 2187 u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; 2188 u16 ext_ability = 0; 2189 u8 comp_codes_10g = 0; 2190 2191 hw->phy.ops.identify(hw); 2192 2193 if (hw->phy.type == ixgbe_phy_tn || 2194 hw->phy.type == ixgbe_phy_aq || 2195 hw->phy.type == ixgbe_phy_cu_unknown) { 2196 hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, 2197 IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); 2198 if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) 2199 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; 2200 if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) 2201 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; 2202 if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) 2203 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; 2204 goto out; 2205 } 2206 2207 switch (autoc & IXGBE_AUTOC_LMS_MASK) { 2208 case IXGBE_AUTOC_LMS_1G_AN: 2209 case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: 2210 if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { 2211 physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | 2212 IXGBE_PHYSICAL_LAYER_1000BASE_BX; 2213 goto out; 2214 } else 2215 /* SFI mode so read SFP module */ 2216 goto sfp_check; 2217 break; 2218 case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: 2219 if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) 2220 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; 2221 else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) 2222 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 2223 goto out; 2224 break; 2225 case IXGBE_AUTOC_LMS_10G_SERIAL: 2226 if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { 2227 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; 2228 goto out; 2229 } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) 2230 goto sfp_check; 2231 break; 2232 case IXGBE_AUTOC_LMS_KX4_KX_KR: 2233 case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: 2234 if (autoc & IXGBE_AUTOC_KX_SUPP) 2235 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; 2236 if (autoc & IXGBE_AUTOC_KX4_SUPP) 2237 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; 2238 if (autoc & IXGBE_AUTOC_KR_SUPP) 2239 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; 2240 goto out; 2241 break; 2242 default: 2243 goto out; 2244 break; 2245 } 2246 2247 sfp_check: 2248 /* SFP check must be done last since DA modules are sometimes used to 2249 * test KR mode - we need to id KR mode correctly before SFP module. 2250 * Call identify_sfp because the pluggable module may have changed */ 2251 hw->phy.ops.identify_sfp(hw); 2252 if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) 2253 goto out; 2254 2255 switch (hw->phy.type) { 2256 case ixgbe_phy_tw_tyco: 2257 case ixgbe_phy_tw_unknown: 2258 physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; 2259 break; 2260 case ixgbe_phy_sfp_avago: 2261 case ixgbe_phy_sfp_ftl: 2262 case ixgbe_phy_sfp_intel: 2263 case ixgbe_phy_sfp_unknown: 2264 hw->phy.ops.read_i2c_eeprom(hw, 2265 IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); 2266 if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) 2267 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; 2268 else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) 2269 physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; 2270 break; 2271 default: 2272 break; 2273 } 2274 2275 out: 2276 return physical_layer; 2277 } 2278 2279 /** 2280 * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 2281 * @hw: pointer to hardware structure 2282 * @regval: register value to write to RXCTRL 2283 * 2284 * Enables the Rx DMA unit for 82599 2285 **/ 2286 s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) 2287 { 2288 #define IXGBE_MAX_SECRX_POLL 30 2289 int i; 2290 int secrxreg; 2291 2292 /* 2293 * Workaround for 82599 silicon errata when enabling the Rx datapath. 2294 * If traffic is incoming before we enable the Rx unit, it could hang 2295 * the Rx DMA unit. Therefore, make sure the security engine is 2296 * completely disabled prior to enabling the Rx unit. 2297 */ 2298 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2299 secrxreg |= IXGBE_SECRXCTRL_RX_DIS; 2300 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 2301 for (i = 0; i < IXGBE_MAX_SECRX_POLL; i++) { 2302 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXSTAT); 2303 if (secrxreg & IXGBE_SECRXSTAT_SECRX_RDY) 2304 break; 2305 else 2306 /* Use interrupt-safe sleep just in case */ 2307 usec_delay(10); 2308 } 2309 2310 /* For informational purposes only */ 2311 if (i >= IXGBE_MAX_SECRX_POLL) 2312 DEBUGOUT("Rx unit being enabled before security " 2313 "path fully disabled. Continuing with init.\n"); 2314 2315 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); 2316 secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); 2317 secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; 2318 IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); 2319 IXGBE_WRITE_FLUSH(hw); 2320 2321 return IXGBE_SUCCESS; 2322 } 2323 2324 /** 2325 * ixgbe_get_device_caps_82599 - Get additional device capabilities 2326 * @hw: pointer to hardware structure 2327 * @device_caps: the EEPROM word with the extra device capabilities 2328 * 2329 * This function will read the EEPROM location for the device capabilities, 2330 * and return the word through device_caps. 2331 **/ 2332 s32 ixgbe_get_device_caps_82599(struct ixgbe_hw *hw, u16 *device_caps) 2333 { 2334 hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); 2335 2336 return IXGBE_SUCCESS; 2337 } 2338 2339 /** 2340 * ixgbe_get_san_mac_addr_offset_82599 - SAN MAC address offset for 82599 2341 * @hw: pointer to hardware structure 2342 * @san_mac_offset: SAN MAC address offset 2343 * 2344 * This function will read the EEPROM location for the SAN MAC address 2345 * pointer, and returns the value at that location. This is used in both 2346 * get and set mac_addr routines. 2347 **/ 2348 s32 ixgbe_get_san_mac_addr_offset_82599(struct ixgbe_hw *hw, 2349 u16 *san_mac_offset) 2350 { 2351 /* 2352 * First read the EEPROM pointer to see if the MAC addresses are 2353 * available. 2354 */ 2355 hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, san_mac_offset); 2356 2357 return IXGBE_SUCCESS; 2358 } 2359 2360 /** 2361 * ixgbe_get_san_mac_addr_82599 - SAN MAC address retrieval for 82599 2362 * @hw: pointer to hardware structure 2363 * @san_mac_addr: SAN MAC address 2364 * 2365 * Reads the SAN MAC address from the EEPROM, if it's available. This is 2366 * per-port, so set_lan_id() must be called before reading the addresses. 2367 * set_lan_id() is called by identify_sfp(), but this cannot be relied 2368 * upon for non-SFP connections, so we must call it here. 2369 **/ 2370 s32 ixgbe_get_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr) 2371 { 2372 u16 san_mac_data, san_mac_offset; 2373 u8 i; 2374 2375 /* 2376 * First read the EEPROM pointer to see if the MAC addresses are 2377 * available. If they're not, no point in calling set_lan_id() here. 2378 */ 2379 ixgbe_get_san_mac_addr_offset_82599(hw, &san_mac_offset); 2380 2381 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { 2382 /* 2383 * No addresses available in this EEPROM. It's not an 2384 * error though, so just wipe the local address and return. 2385 */ 2386 for (i = 0; i < 6; i++) 2387 san_mac_addr[i] = 0xFF; 2388 2389 goto san_mac_addr_out; 2390 } 2391 2392 /* make sure we know which port we need to program */ 2393 hw->mac.ops.set_lan_id(hw); 2394 /* apply the port offset to the address offset */ 2395 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 2396 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2397 for (i = 0; i < 3; i++) { 2398 hw->eeprom.ops.read(hw, san_mac_offset, &san_mac_data); 2399 san_mac_addr[i * 2] = (u8)(san_mac_data); 2400 san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); 2401 san_mac_offset++; 2402 } 2403 2404 san_mac_addr_out: 2405 return IXGBE_SUCCESS; 2406 } 2407 2408 /** 2409 * ixgbe_set_san_mac_addr_82599 - Write the SAN MAC address to the EEPROM 2410 * @hw: pointer to hardware structure 2411 * @san_mac_addr: SAN MAC address 2412 * 2413 * Write a SAN MAC address to the EEPROM. 2414 **/ 2415 s32 ixgbe_set_san_mac_addr_82599(struct ixgbe_hw *hw, u8 *san_mac_addr) 2416 { 2417 s32 status = IXGBE_SUCCESS; 2418 u16 san_mac_data, san_mac_offset; 2419 u8 i; 2420 2421 /* Look for SAN mac address pointer. If not defined, return */ 2422 ixgbe_get_san_mac_addr_offset_82599(hw, &san_mac_offset); 2423 2424 if ((san_mac_offset == 0) || (san_mac_offset == 0xFFFF)) { 2425 status = IXGBE_ERR_NO_SAN_ADDR_PTR; 2426 goto san_mac_addr_out; 2427 } 2428 2429 /* Make sure we know which port we need to write */ 2430 hw->mac.ops.set_lan_id(hw); 2431 /* Apply the port offset to the address offset */ 2432 (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : 2433 (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); 2434 2435 for (i = 0; i < 3; i++) { 2436 san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8); 2437 san_mac_data |= (u16)(san_mac_addr[i * 2]); 2438 hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data); 2439 san_mac_offset++; 2440 } 2441 2442 san_mac_addr_out: 2443 return status; 2444 } 2445