1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2005 - 2016 Broadcom 4 * All rights reserved. 5 * 6 * Contact Information: 7 * linux-drivers@emulex.com 8 * 9 * Emulex 10 * 3333 Susan Street 11 * Costa Mesa, CA 92626 12 */ 13 14 #include <linux/module.h> 15 #include "be.h" 16 #include "be_cmds.h" 17 18 const char * const be_misconfig_evt_port_state[] = { 19 "Physical Link is functional", 20 "Optics faulted/incorrectly installed/not installed - Reseat optics. If issue not resolved, replace.", 21 "Optics of two types installed – Remove one optic or install matching pair of optics.", 22 "Incompatible optics – Replace with compatible optics for card to function.", 23 "Unqualified optics – Replace with Avago optics for Warranty and Technical Support.", 24 "Uncertified optics – Replace with Avago-certified optics to enable link operation." 25 }; 26 27 static char *be_port_misconfig_evt_severity[] = { 28 "KERN_WARN", 29 "KERN_INFO", 30 "KERN_ERR", 31 "KERN_WARN" 32 }; 33 34 static char *phy_state_oper_desc[] = { 35 "Link is non-operational", 36 "Link is operational", 37 "" 38 }; 39 40 static struct be_cmd_priv_map cmd_priv_map[] = { 41 { 42 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, 43 CMD_SUBSYSTEM_ETH, 44 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 45 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 46 }, 47 { 48 OPCODE_COMMON_GET_FLOW_CONTROL, 49 CMD_SUBSYSTEM_COMMON, 50 BE_PRIV_LNKQUERY | BE_PRIV_VHADM | 51 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 52 }, 53 { 54 OPCODE_COMMON_SET_FLOW_CONTROL, 55 CMD_SUBSYSTEM_COMMON, 56 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 57 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 58 }, 59 { 60 OPCODE_ETH_GET_PPORT_STATS, 61 CMD_SUBSYSTEM_ETH, 62 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 63 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 64 }, 65 { 66 OPCODE_COMMON_GET_PHY_DETAILS, 67 CMD_SUBSYSTEM_COMMON, 68 BE_PRIV_LNKMGMT | BE_PRIV_VHADM | 69 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 70 }, 71 { 72 OPCODE_LOWLEVEL_HOST_DDR_DMA, 73 CMD_SUBSYSTEM_LOWLEVEL, 74 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 75 }, 76 { 77 OPCODE_LOWLEVEL_LOOPBACK_TEST, 78 CMD_SUBSYSTEM_LOWLEVEL, 79 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 80 }, 81 { 82 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, 83 CMD_SUBSYSTEM_LOWLEVEL, 84 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC 85 }, 86 { 87 OPCODE_COMMON_SET_HSW_CONFIG, 88 CMD_SUBSYSTEM_COMMON, 89 BE_PRIV_DEVCFG | BE_PRIV_VHADM | 90 BE_PRIV_DEVSEC 91 }, 92 { 93 OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES, 94 CMD_SUBSYSTEM_COMMON, 95 BE_PRIV_DEVCFG 96 } 97 }; 98 99 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem) 100 { 101 int i; 102 int num_entries = ARRAY_SIZE(cmd_priv_map); 103 u32 cmd_privileges = adapter->cmd_privileges; 104 105 for (i = 0; i < num_entries; i++) 106 if (opcode == cmd_priv_map[i].opcode && 107 subsystem == cmd_priv_map[i].subsystem) 108 if (!(cmd_privileges & cmd_priv_map[i].priv_mask)) 109 return false; 110 111 return true; 112 } 113 114 static inline void *embedded_payload(struct be_mcc_wrb *wrb) 115 { 116 return wrb->payload.embedded_payload; 117 } 118 119 static int be_mcc_notify(struct be_adapter *adapter) 120 { 121 struct be_queue_info *mccq = &adapter->mcc_obj.q; 122 u32 val = 0; 123 124 if (be_check_error(adapter, BE_ERROR_ANY)) 125 return -EIO; 126 127 val |= mccq->id & DB_MCCQ_RING_ID_MASK; 128 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; 129 130 wmb(); 131 iowrite32(val, adapter->db + DB_MCCQ_OFFSET); 132 133 return 0; 134 } 135 136 /* To check if valid bit is set, check the entire word as we don't know 137 * the endianness of the data (old entry is host endian while a new entry is 138 * little endian) 139 */ 140 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) 141 { 142 u32 flags; 143 144 if (compl->flags != 0) { 145 flags = le32_to_cpu(compl->flags); 146 if (flags & CQE_FLAGS_VALID_MASK) { 147 compl->flags = flags; 148 return true; 149 } 150 } 151 return false; 152 } 153 154 /* Need to reset the entire word that houses the valid bit */ 155 static inline void be_mcc_compl_use(struct be_mcc_compl *compl) 156 { 157 compl->flags = 0; 158 } 159 160 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1) 161 { 162 unsigned long addr; 163 164 addr = tag1; 165 addr = ((addr << 16) << 16) | tag0; 166 return (void *)addr; 167 } 168 169 static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status) 170 { 171 if (base_status == MCC_STATUS_NOT_SUPPORTED || 172 base_status == MCC_STATUS_ILLEGAL_REQUEST || 173 addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES || 174 addl_status == MCC_ADDL_STATUS_INSUFFICIENT_VLANS || 175 (opcode == OPCODE_COMMON_WRITE_FLASHROM && 176 (base_status == MCC_STATUS_ILLEGAL_FIELD || 177 addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH))) 178 return true; 179 else 180 return false; 181 } 182 183 /* Place holder for all the async MCC cmds wherein the caller is not in a busy 184 * loop (has not issued be_mcc_notify_wait()) 185 */ 186 static void be_async_cmd_process(struct be_adapter *adapter, 187 struct be_mcc_compl *compl, 188 struct be_cmd_resp_hdr *resp_hdr) 189 { 190 enum mcc_base_status base_status = base_status(compl->status); 191 u8 opcode = 0, subsystem = 0; 192 193 if (resp_hdr) { 194 opcode = resp_hdr->opcode; 195 subsystem = resp_hdr->subsystem; 196 } 197 198 if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST && 199 subsystem == CMD_SUBSYSTEM_LOWLEVEL) { 200 complete(&adapter->et_cmd_compl); 201 return; 202 } 203 204 if (opcode == OPCODE_LOWLEVEL_SET_LOOPBACK_MODE && 205 subsystem == CMD_SUBSYSTEM_LOWLEVEL) { 206 complete(&adapter->et_cmd_compl); 207 return; 208 } 209 210 if ((opcode == OPCODE_COMMON_WRITE_FLASHROM || 211 opcode == OPCODE_COMMON_WRITE_OBJECT) && 212 subsystem == CMD_SUBSYSTEM_COMMON) { 213 adapter->flash_status = compl->status; 214 complete(&adapter->et_cmd_compl); 215 return; 216 } 217 218 if ((opcode == OPCODE_ETH_GET_STATISTICS || 219 opcode == OPCODE_ETH_GET_PPORT_STATS) && 220 subsystem == CMD_SUBSYSTEM_ETH && 221 base_status == MCC_STATUS_SUCCESS) { 222 be_parse_stats(adapter); 223 adapter->stats_cmd_sent = false; 224 return; 225 } 226 227 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES && 228 subsystem == CMD_SUBSYSTEM_COMMON) { 229 if (base_status == MCC_STATUS_SUCCESS) { 230 struct be_cmd_resp_get_cntl_addnl_attribs *resp = 231 (void *)resp_hdr; 232 adapter->hwmon_info.be_on_die_temp = 233 resp->on_die_temperature; 234 } else { 235 adapter->be_get_temp_freq = 0; 236 adapter->hwmon_info.be_on_die_temp = 237 BE_INVALID_DIE_TEMP; 238 } 239 return; 240 } 241 } 242 243 static int be_mcc_compl_process(struct be_adapter *adapter, 244 struct be_mcc_compl *compl) 245 { 246 enum mcc_base_status base_status; 247 enum mcc_addl_status addl_status; 248 struct be_cmd_resp_hdr *resp_hdr; 249 u8 opcode = 0, subsystem = 0; 250 251 /* Just swap the status to host endian; mcc tag is opaquely copied 252 * from mcc_wrb 253 */ 254 be_dws_le_to_cpu(compl, 4); 255 256 base_status = base_status(compl->status); 257 addl_status = addl_status(compl->status); 258 259 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1); 260 if (resp_hdr) { 261 opcode = resp_hdr->opcode; 262 subsystem = resp_hdr->subsystem; 263 } 264 265 be_async_cmd_process(adapter, compl, resp_hdr); 266 267 if (base_status != MCC_STATUS_SUCCESS && 268 !be_skip_err_log(opcode, base_status, addl_status)) { 269 if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST || 270 addl_status == MCC_ADDL_STATUS_INSUFFICIENT_PRIVILEGES) { 271 dev_warn(&adapter->pdev->dev, 272 "VF is not privileged to issue opcode %d-%d\n", 273 opcode, subsystem); 274 } else { 275 dev_err(&adapter->pdev->dev, 276 "opcode %d-%d failed:status %d-%d\n", 277 opcode, subsystem, base_status, addl_status); 278 } 279 } 280 return compl->status; 281 } 282 283 /* Link state evt is a string of bytes; no need for endian swapping */ 284 static void be_async_link_state_process(struct be_adapter *adapter, 285 struct be_mcc_compl *compl) 286 { 287 struct be_async_event_link_state *evt = 288 (struct be_async_event_link_state *)compl; 289 290 /* When link status changes, link speed must be re-queried from FW */ 291 adapter->phy.link_speed = -1; 292 293 /* On BEx the FW does not send a separate link status 294 * notification for physical and logical link. 295 * On other chips just process the logical link 296 * status notification 297 */ 298 if (!BEx_chip(adapter) && 299 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK)) 300 return; 301 302 /* For the initial link status do not rely on the ASYNC event as 303 * it may not be received in some cases. 304 */ 305 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT) 306 be_link_status_update(adapter, 307 evt->port_link_status & LINK_STATUS_MASK); 308 } 309 310 static void be_async_port_misconfig_event_process(struct be_adapter *adapter, 311 struct be_mcc_compl *compl) 312 { 313 struct be_async_event_misconfig_port *evt = 314 (struct be_async_event_misconfig_port *)compl; 315 u32 sfp_misconfig_evt_word1 = le32_to_cpu(evt->event_data_word1); 316 u32 sfp_misconfig_evt_word2 = le32_to_cpu(evt->event_data_word2); 317 u8 phy_oper_state = PHY_STATE_OPER_MSG_NONE; 318 struct device *dev = &adapter->pdev->dev; 319 u8 msg_severity = DEFAULT_MSG_SEVERITY; 320 u8 phy_state_info; 321 u8 new_phy_state; 322 323 new_phy_state = 324 (sfp_misconfig_evt_word1 >> (adapter->hba_port_num * 8)) & 0xff; 325 326 if (new_phy_state == adapter->phy_state) 327 return; 328 329 adapter->phy_state = new_phy_state; 330 331 /* for older fw that doesn't populate link effect data */ 332 if (!sfp_misconfig_evt_word2) 333 goto log_message; 334 335 phy_state_info = 336 (sfp_misconfig_evt_word2 >> (adapter->hba_port_num * 8)) & 0xff; 337 338 if (phy_state_info & PHY_STATE_INFO_VALID) { 339 msg_severity = (phy_state_info & PHY_STATE_MSG_SEVERITY) >> 1; 340 341 if (be_phy_unqualified(new_phy_state)) 342 phy_oper_state = (phy_state_info & PHY_STATE_OPER); 343 } 344 345 log_message: 346 /* Log an error message that would allow a user to determine 347 * whether the SFPs have an issue 348 */ 349 if (be_phy_state_unknown(new_phy_state)) 350 dev_printk(be_port_misconfig_evt_severity[msg_severity], dev, 351 "Port %c: Unrecognized Optics state: 0x%x. %s", 352 adapter->port_name, 353 new_phy_state, 354 phy_state_oper_desc[phy_oper_state]); 355 else 356 dev_printk(be_port_misconfig_evt_severity[msg_severity], dev, 357 "Port %c: %s %s", 358 adapter->port_name, 359 be_misconfig_evt_port_state[new_phy_state], 360 phy_state_oper_desc[phy_oper_state]); 361 362 /* Log Vendor name and part no. if a misconfigured SFP is detected */ 363 if (be_phy_misconfigured(new_phy_state)) 364 adapter->flags |= BE_FLAGS_PHY_MISCONFIGURED; 365 } 366 367 /* Grp5 CoS Priority evt */ 368 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, 369 struct be_mcc_compl *compl) 370 { 371 struct be_async_event_grp5_cos_priority *evt = 372 (struct be_async_event_grp5_cos_priority *)compl; 373 374 if (evt->valid) { 375 adapter->vlan_prio_bmap = evt->available_priority_bmap; 376 adapter->recommended_prio_bits = 377 evt->reco_default_priority << VLAN_PRIO_SHIFT; 378 } 379 } 380 381 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */ 382 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, 383 struct be_mcc_compl *compl) 384 { 385 struct be_async_event_grp5_qos_link_speed *evt = 386 (struct be_async_event_grp5_qos_link_speed *)compl; 387 388 if (adapter->phy.link_speed >= 0 && 389 evt->physical_port == adapter->port_num) 390 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10; 391 } 392 393 /*Grp5 PVID evt*/ 394 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, 395 struct be_mcc_compl *compl) 396 { 397 struct be_async_event_grp5_pvid_state *evt = 398 (struct be_async_event_grp5_pvid_state *)compl; 399 400 if (evt->enabled) { 401 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK; 402 dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid); 403 } else { 404 adapter->pvid = 0; 405 } 406 } 407 408 #define MGMT_ENABLE_MASK 0x4 409 static void be_async_grp5_fw_control_process(struct be_adapter *adapter, 410 struct be_mcc_compl *compl) 411 { 412 struct be_async_fw_control *evt = (struct be_async_fw_control *)compl; 413 u32 evt_dw1 = le32_to_cpu(evt->event_data_word1); 414 415 if (evt_dw1 & MGMT_ENABLE_MASK) { 416 adapter->flags |= BE_FLAGS_OS2BMC; 417 adapter->bmc_filt_mask = le32_to_cpu(evt->event_data_word2); 418 } else { 419 adapter->flags &= ~BE_FLAGS_OS2BMC; 420 } 421 } 422 423 static void be_async_grp5_evt_process(struct be_adapter *adapter, 424 struct be_mcc_compl *compl) 425 { 426 u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) & 427 ASYNC_EVENT_TYPE_MASK; 428 429 switch (event_type) { 430 case ASYNC_EVENT_COS_PRIORITY: 431 be_async_grp5_cos_priority_process(adapter, compl); 432 break; 433 case ASYNC_EVENT_QOS_SPEED: 434 be_async_grp5_qos_speed_process(adapter, compl); 435 break; 436 case ASYNC_EVENT_PVID_STATE: 437 be_async_grp5_pvid_state_process(adapter, compl); 438 break; 439 /* Async event to disable/enable os2bmc and/or mac-learning */ 440 case ASYNC_EVENT_FW_CONTROL: 441 be_async_grp5_fw_control_process(adapter, compl); 442 break; 443 default: 444 break; 445 } 446 } 447 448 static void be_async_dbg_evt_process(struct be_adapter *adapter, 449 struct be_mcc_compl *cmp) 450 { 451 u8 event_type = 0; 452 struct be_async_event_qnq *evt = (struct be_async_event_qnq *)cmp; 453 454 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) & 455 ASYNC_EVENT_TYPE_MASK; 456 457 switch (event_type) { 458 case ASYNC_DEBUG_EVENT_TYPE_QNQ: 459 if (evt->valid) 460 adapter->qnq_vid = le16_to_cpu(evt->vlan_tag); 461 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD; 462 break; 463 default: 464 dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n", 465 event_type); 466 break; 467 } 468 } 469 470 static void be_async_sliport_evt_process(struct be_adapter *adapter, 471 struct be_mcc_compl *cmp) 472 { 473 u8 event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) & 474 ASYNC_EVENT_TYPE_MASK; 475 476 if (event_type == ASYNC_EVENT_PORT_MISCONFIG) 477 be_async_port_misconfig_event_process(adapter, cmp); 478 } 479 480 static inline bool is_link_state_evt(u32 flags) 481 { 482 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 483 ASYNC_EVENT_CODE_LINK_STATE; 484 } 485 486 static inline bool is_grp5_evt(u32 flags) 487 { 488 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 489 ASYNC_EVENT_CODE_GRP_5; 490 } 491 492 static inline bool is_dbg_evt(u32 flags) 493 { 494 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 495 ASYNC_EVENT_CODE_QNQ; 496 } 497 498 static inline bool is_sliport_evt(u32 flags) 499 { 500 return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) == 501 ASYNC_EVENT_CODE_SLIPORT; 502 } 503 504 static void be_mcc_event_process(struct be_adapter *adapter, 505 struct be_mcc_compl *compl) 506 { 507 if (is_link_state_evt(compl->flags)) 508 be_async_link_state_process(adapter, compl); 509 else if (is_grp5_evt(compl->flags)) 510 be_async_grp5_evt_process(adapter, compl); 511 else if (is_dbg_evt(compl->flags)) 512 be_async_dbg_evt_process(adapter, compl); 513 else if (is_sliport_evt(compl->flags)) 514 be_async_sliport_evt_process(adapter, compl); 515 } 516 517 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) 518 { 519 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq; 520 struct be_mcc_compl *compl = queue_tail_node(mcc_cq); 521 522 if (be_mcc_compl_is_new(compl)) { 523 queue_tail_inc(mcc_cq); 524 return compl; 525 } 526 return NULL; 527 } 528 529 void be_async_mcc_enable(struct be_adapter *adapter) 530 { 531 spin_lock_bh(&adapter->mcc_cq_lock); 532 533 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0); 534 adapter->mcc_obj.rearm_cq = true; 535 536 spin_unlock_bh(&adapter->mcc_cq_lock); 537 } 538 539 void be_async_mcc_disable(struct be_adapter *adapter) 540 { 541 spin_lock_bh(&adapter->mcc_cq_lock); 542 543 adapter->mcc_obj.rearm_cq = false; 544 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0); 545 546 spin_unlock_bh(&adapter->mcc_cq_lock); 547 } 548 549 int be_process_mcc(struct be_adapter *adapter) 550 { 551 struct be_mcc_compl *compl; 552 int num = 0, status = 0; 553 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 554 555 spin_lock(&adapter->mcc_cq_lock); 556 557 while ((compl = be_mcc_compl_get(adapter))) { 558 if (compl->flags & CQE_FLAGS_ASYNC_MASK) { 559 be_mcc_event_process(adapter, compl); 560 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { 561 status = be_mcc_compl_process(adapter, compl); 562 atomic_dec(&mcc_obj->q.used); 563 } 564 be_mcc_compl_use(compl); 565 num++; 566 } 567 568 if (num) 569 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); 570 571 spin_unlock(&adapter->mcc_cq_lock); 572 return status; 573 } 574 575 /* Wait till no more pending mcc requests are present */ 576 static int be_mcc_wait_compl(struct be_adapter *adapter) 577 { 578 #define mcc_timeout 120000 /* 12s timeout */ 579 int i, status = 0; 580 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 581 582 for (i = 0; i < mcc_timeout; i++) { 583 if (be_check_error(adapter, BE_ERROR_ANY)) 584 return -EIO; 585 586 local_bh_disable(); 587 status = be_process_mcc(adapter); 588 local_bh_enable(); 589 590 if (atomic_read(&mcc_obj->q.used) == 0) 591 break; 592 udelay(100); 593 } 594 if (i == mcc_timeout) { 595 dev_err(&adapter->pdev->dev, "FW not responding\n"); 596 be_set_error(adapter, BE_ERROR_FW); 597 return -EIO; 598 } 599 return status; 600 } 601 602 /* Notify MCC requests and wait for completion */ 603 static int be_mcc_notify_wait(struct be_adapter *adapter) 604 { 605 int status; 606 struct be_mcc_wrb *wrb; 607 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; 608 u32 index = mcc_obj->q.head; 609 struct be_cmd_resp_hdr *resp; 610 611 index_dec(&index, mcc_obj->q.len); 612 wrb = queue_index_node(&mcc_obj->q, index); 613 614 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1); 615 616 status = be_mcc_notify(adapter); 617 if (status) 618 goto out; 619 620 status = be_mcc_wait_compl(adapter); 621 if (status == -EIO) 622 goto out; 623 624 status = (resp->base_status | 625 ((resp->addl_status & CQE_ADDL_STATUS_MASK) << 626 CQE_ADDL_STATUS_SHIFT)); 627 out: 628 return status; 629 } 630 631 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) 632 { 633 int msecs = 0; 634 u32 ready; 635 636 do { 637 if (be_check_error(adapter, BE_ERROR_ANY)) 638 return -EIO; 639 640 ready = ioread32(db); 641 if (ready == 0xffffffff) 642 return -1; 643 644 ready &= MPU_MAILBOX_DB_RDY_MASK; 645 if (ready) 646 break; 647 648 if (msecs > 4000) { 649 dev_err(&adapter->pdev->dev, "FW not responding\n"); 650 be_set_error(adapter, BE_ERROR_FW); 651 be_detect_error(adapter); 652 return -1; 653 } 654 655 msleep(1); 656 msecs++; 657 } while (true); 658 659 return 0; 660 } 661 662 /* Insert the mailbox address into the doorbell in two steps 663 * Polls on the mbox doorbell till a command completion (or a timeout) occurs 664 */ 665 static int be_mbox_notify_wait(struct be_adapter *adapter) 666 { 667 int status; 668 u32 val = 0; 669 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET; 670 struct be_dma_mem *mbox_mem = &adapter->mbox_mem; 671 struct be_mcc_mailbox *mbox = mbox_mem->va; 672 struct be_mcc_compl *compl = &mbox->compl; 673 674 /* wait for ready to be set */ 675 status = be_mbox_db_ready_wait(adapter, db); 676 if (status != 0) 677 return status; 678 679 val |= MPU_MAILBOX_DB_HI_MASK; 680 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ 681 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; 682 iowrite32(val, db); 683 684 /* wait for ready to be set */ 685 status = be_mbox_db_ready_wait(adapter, db); 686 if (status != 0) 687 return status; 688 689 val = 0; 690 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */ 691 val |= (u32)(mbox_mem->dma >> 4) << 2; 692 iowrite32(val, db); 693 694 status = be_mbox_db_ready_wait(adapter, db); 695 if (status != 0) 696 return status; 697 698 /* A cq entry has been made now */ 699 if (be_mcc_compl_is_new(compl)) { 700 status = be_mcc_compl_process(adapter, &mbox->compl); 701 be_mcc_compl_use(compl); 702 if (status) 703 return status; 704 } else { 705 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n"); 706 return -1; 707 } 708 return 0; 709 } 710 711 u16 be_POST_stage_get(struct be_adapter *adapter) 712 { 713 u32 sem; 714 715 if (BEx_chip(adapter)) 716 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx); 717 else 718 pci_read_config_dword(adapter->pdev, 719 SLIPORT_SEMAPHORE_OFFSET_SH, &sem); 720 721 return sem & POST_STAGE_MASK; 722 } 723 724 static int lancer_wait_ready(struct be_adapter *adapter) 725 { 726 #define SLIPORT_READY_TIMEOUT 30 727 u32 sliport_status; 728 int i; 729 730 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) { 731 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 732 if (sliport_status & SLIPORT_STATUS_RDY_MASK) 733 return 0; 734 735 if (sliport_status & SLIPORT_STATUS_ERR_MASK && 736 !(sliport_status & SLIPORT_STATUS_RN_MASK)) 737 return -EIO; 738 739 msleep(1000); 740 } 741 742 return sliport_status ? : -1; 743 } 744 745 int be_fw_wait_ready(struct be_adapter *adapter) 746 { 747 u16 stage; 748 int status, timeout = 0; 749 struct device *dev = &adapter->pdev->dev; 750 751 if (lancer_chip(adapter)) { 752 status = lancer_wait_ready(adapter); 753 if (status) { 754 stage = status; 755 goto err; 756 } 757 return 0; 758 } 759 760 do { 761 /* There's no means to poll POST state on BE2/3 VFs */ 762 if (BEx_chip(adapter) && be_virtfn(adapter)) 763 return 0; 764 765 stage = be_POST_stage_get(adapter); 766 if (stage == POST_STAGE_ARMFW_RDY) 767 return 0; 768 769 dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout); 770 if (msleep_interruptible(2000)) { 771 dev_err(dev, "Waiting for POST aborted\n"); 772 return -EINTR; 773 } 774 timeout += 2; 775 } while (timeout < 60); 776 777 err: 778 dev_err(dev, "POST timeout; stage=%#x\n", stage); 779 return -ETIMEDOUT; 780 } 781 782 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) 783 { 784 return &wrb->payload.sgl[0]; 785 } 786 787 static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr) 788 { 789 wrb->tag0 = addr & 0xFFFFFFFF; 790 wrb->tag1 = upper_32_bits(addr); 791 } 792 793 /* Don't touch the hdr after it's prepared */ 794 /* mem will be NULL for embedded commands */ 795 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, 796 u8 subsystem, u8 opcode, int cmd_len, 797 struct be_mcc_wrb *wrb, 798 struct be_dma_mem *mem) 799 { 800 struct be_sge *sge; 801 802 req_hdr->opcode = opcode; 803 req_hdr->subsystem = subsystem; 804 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); 805 req_hdr->version = 0; 806 fill_wrb_tags(wrb, (ulong)req_hdr); 807 wrb->payload_length = cmd_len; 808 if (mem) { 809 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) << 810 MCC_WRB_SGE_CNT_SHIFT; 811 sge = nonembedded_sgl(wrb); 812 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma)); 813 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF); 814 sge->len = cpu_to_le32(mem->size); 815 } else 816 wrb->embedded |= MCC_WRB_EMBEDDED_MASK; 817 be_dws_cpu_to_le(wrb, 8); 818 } 819 820 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, 821 struct be_dma_mem *mem) 822 { 823 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); 824 u64 dma = (u64)mem->dma; 825 826 for (i = 0; i < buf_pages; i++) { 827 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF); 828 pages[i].hi = cpu_to_le32(upper_32_bits(dma)); 829 dma += PAGE_SIZE_4K; 830 } 831 } 832 833 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter) 834 { 835 struct be_dma_mem *mbox_mem = &adapter->mbox_mem; 836 struct be_mcc_wrb *wrb = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; 837 838 memset(wrb, 0, sizeof(*wrb)); 839 return wrb; 840 } 841 842 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter) 843 { 844 struct be_queue_info *mccq = &adapter->mcc_obj.q; 845 struct be_mcc_wrb *wrb; 846 847 if (!mccq->created) 848 return NULL; 849 850 if (atomic_read(&mccq->used) >= mccq->len) 851 return NULL; 852 853 wrb = queue_head_node(mccq); 854 queue_head_inc(mccq); 855 atomic_inc(&mccq->used); 856 memset(wrb, 0, sizeof(*wrb)); 857 return wrb; 858 } 859 860 static bool use_mcc(struct be_adapter *adapter) 861 { 862 return adapter->mcc_obj.q.created; 863 } 864 865 /* Must be used only in process context */ 866 static int be_cmd_lock(struct be_adapter *adapter) 867 { 868 if (use_mcc(adapter)) { 869 spin_lock_bh(&adapter->mcc_lock); 870 return 0; 871 } else { 872 return mutex_lock_interruptible(&adapter->mbox_lock); 873 } 874 } 875 876 /* Must be used only in process context */ 877 static void be_cmd_unlock(struct be_adapter *adapter) 878 { 879 if (use_mcc(adapter)) 880 return spin_unlock_bh(&adapter->mcc_lock); 881 else 882 return mutex_unlock(&adapter->mbox_lock); 883 } 884 885 static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter, 886 struct be_mcc_wrb *wrb) 887 { 888 struct be_mcc_wrb *dest_wrb; 889 890 if (use_mcc(adapter)) { 891 dest_wrb = wrb_from_mccq(adapter); 892 if (!dest_wrb) 893 return NULL; 894 } else { 895 dest_wrb = wrb_from_mbox(adapter); 896 } 897 898 memcpy(dest_wrb, wrb, sizeof(*wrb)); 899 if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK)) 900 fill_wrb_tags(dest_wrb, (ulong)embedded_payload(wrb)); 901 902 return dest_wrb; 903 } 904 905 /* Must be used only in process context */ 906 static int be_cmd_notify_wait(struct be_adapter *adapter, 907 struct be_mcc_wrb *wrb) 908 { 909 struct be_mcc_wrb *dest_wrb; 910 int status; 911 912 status = be_cmd_lock(adapter); 913 if (status) 914 return status; 915 916 dest_wrb = be_cmd_copy(adapter, wrb); 917 if (!dest_wrb) { 918 status = -EBUSY; 919 goto unlock; 920 } 921 922 if (use_mcc(adapter)) 923 status = be_mcc_notify_wait(adapter); 924 else 925 status = be_mbox_notify_wait(adapter); 926 927 if (!status) 928 memcpy(wrb, dest_wrb, sizeof(*wrb)); 929 930 unlock: 931 be_cmd_unlock(adapter); 932 return status; 933 } 934 935 /* Tell fw we're about to start firing cmds by writing a 936 * special pattern across the wrb hdr; uses mbox 937 */ 938 int be_cmd_fw_init(struct be_adapter *adapter) 939 { 940 u8 *wrb; 941 int status; 942 943 if (lancer_chip(adapter)) 944 return 0; 945 946 if (mutex_lock_interruptible(&adapter->mbox_lock)) 947 return -1; 948 949 wrb = (u8 *)wrb_from_mbox(adapter); 950 *wrb++ = 0xFF; 951 *wrb++ = 0x12; 952 *wrb++ = 0x34; 953 *wrb++ = 0xFF; 954 *wrb++ = 0xFF; 955 *wrb++ = 0x56; 956 *wrb++ = 0x78; 957 *wrb = 0xFF; 958 959 status = be_mbox_notify_wait(adapter); 960 961 mutex_unlock(&adapter->mbox_lock); 962 return status; 963 } 964 965 /* Tell fw we're done with firing cmds by writing a 966 * special pattern across the wrb hdr; uses mbox 967 */ 968 int be_cmd_fw_clean(struct be_adapter *adapter) 969 { 970 u8 *wrb; 971 int status; 972 973 if (lancer_chip(adapter)) 974 return 0; 975 976 if (mutex_lock_interruptible(&adapter->mbox_lock)) 977 return -1; 978 979 wrb = (u8 *)wrb_from_mbox(adapter); 980 *wrb++ = 0xFF; 981 *wrb++ = 0xAA; 982 *wrb++ = 0xBB; 983 *wrb++ = 0xFF; 984 *wrb++ = 0xFF; 985 *wrb++ = 0xCC; 986 *wrb++ = 0xDD; 987 *wrb = 0xFF; 988 989 status = be_mbox_notify_wait(adapter); 990 991 mutex_unlock(&adapter->mbox_lock); 992 return status; 993 } 994 995 int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo) 996 { 997 struct be_mcc_wrb *wrb; 998 struct be_cmd_req_eq_create *req; 999 struct be_dma_mem *q_mem = &eqo->q.dma_mem; 1000 int status, ver = 0; 1001 1002 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1003 return -1; 1004 1005 wrb = wrb_from_mbox(adapter); 1006 req = embedded_payload(wrb); 1007 1008 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1009 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, 1010 NULL); 1011 1012 /* Support for EQ_CREATEv2 available only SH-R onwards */ 1013 if (!(BEx_chip(adapter) || lancer_chip(adapter))) 1014 ver = 2; 1015 1016 req->hdr.version = ver; 1017 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1018 1019 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); 1020 /* 4byte eqe*/ 1021 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); 1022 AMAP_SET_BITS(struct amap_eq_context, count, req->context, 1023 __ilog2_u32(eqo->q.len / 256)); 1024 be_dws_cpu_to_le(req->context, sizeof(req->context)); 1025 1026 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1027 1028 status = be_mbox_notify_wait(adapter); 1029 if (!status) { 1030 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); 1031 1032 eqo->q.id = le16_to_cpu(resp->eq_id); 1033 eqo->msix_idx = 1034 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx; 1035 eqo->q.created = true; 1036 } 1037 1038 mutex_unlock(&adapter->mbox_lock); 1039 return status; 1040 } 1041 1042 /* Use MCC */ 1043 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, 1044 bool permanent, u32 if_handle, u32 pmac_id) 1045 { 1046 struct be_mcc_wrb *wrb; 1047 struct be_cmd_req_mac_query *req; 1048 int status; 1049 1050 spin_lock_bh(&adapter->mcc_lock); 1051 1052 wrb = wrb_from_mccq(adapter); 1053 if (!wrb) { 1054 status = -EBUSY; 1055 goto err; 1056 } 1057 req = embedded_payload(wrb); 1058 1059 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1060 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, 1061 NULL); 1062 req->type = MAC_ADDRESS_TYPE_NETWORK; 1063 if (permanent) { 1064 req->permanent = 1; 1065 } else { 1066 req->if_id = cpu_to_le16((u16)if_handle); 1067 req->pmac_id = cpu_to_le32(pmac_id); 1068 req->permanent = 0; 1069 } 1070 1071 status = be_mcc_notify_wait(adapter); 1072 if (!status) { 1073 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); 1074 1075 memcpy(mac_addr, resp->mac.addr, ETH_ALEN); 1076 } 1077 1078 err: 1079 spin_unlock_bh(&adapter->mcc_lock); 1080 return status; 1081 } 1082 1083 /* Uses synchronous MCCQ */ 1084 int be_cmd_pmac_add(struct be_adapter *adapter, const u8 *mac_addr, 1085 u32 if_id, u32 *pmac_id, u32 domain) 1086 { 1087 struct be_mcc_wrb *wrb; 1088 struct be_cmd_req_pmac_add *req; 1089 int status; 1090 1091 spin_lock_bh(&adapter->mcc_lock); 1092 1093 wrb = wrb_from_mccq(adapter); 1094 if (!wrb) { 1095 status = -EBUSY; 1096 goto err; 1097 } 1098 req = embedded_payload(wrb); 1099 1100 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1101 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, 1102 NULL); 1103 1104 req->hdr.domain = domain; 1105 req->if_id = cpu_to_le32(if_id); 1106 memcpy(req->mac_address, mac_addr, ETH_ALEN); 1107 1108 status = be_mcc_notify_wait(adapter); 1109 if (!status) { 1110 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb); 1111 1112 *pmac_id = le32_to_cpu(resp->pmac_id); 1113 } 1114 1115 err: 1116 spin_unlock_bh(&adapter->mcc_lock); 1117 1118 if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST) 1119 status = -EPERM; 1120 1121 return status; 1122 } 1123 1124 /* Uses synchronous MCCQ */ 1125 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) 1126 { 1127 struct be_mcc_wrb *wrb; 1128 struct be_cmd_req_pmac_del *req; 1129 int status; 1130 1131 if (pmac_id == -1) 1132 return 0; 1133 1134 spin_lock_bh(&adapter->mcc_lock); 1135 1136 wrb = wrb_from_mccq(adapter); 1137 if (!wrb) { 1138 status = -EBUSY; 1139 goto err; 1140 } 1141 req = embedded_payload(wrb); 1142 1143 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1144 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), 1145 wrb, NULL); 1146 1147 req->hdr.domain = dom; 1148 req->if_id = cpu_to_le32(if_id); 1149 req->pmac_id = cpu_to_le32(pmac_id); 1150 1151 status = be_mcc_notify_wait(adapter); 1152 1153 err: 1154 spin_unlock_bh(&adapter->mcc_lock); 1155 return status; 1156 } 1157 1158 /* Uses Mbox */ 1159 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, 1160 struct be_queue_info *eq, bool no_delay, int coalesce_wm) 1161 { 1162 struct be_mcc_wrb *wrb; 1163 struct be_cmd_req_cq_create *req; 1164 struct be_dma_mem *q_mem = &cq->dma_mem; 1165 void *ctxt; 1166 int status; 1167 1168 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1169 return -1; 1170 1171 wrb = wrb_from_mbox(adapter); 1172 req = embedded_payload(wrb); 1173 ctxt = &req->context; 1174 1175 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1176 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, 1177 NULL); 1178 1179 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1180 1181 if (BEx_chip(adapter)) { 1182 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, 1183 coalesce_wm); 1184 AMAP_SET_BITS(struct amap_cq_context_be, nodelay, 1185 ctxt, no_delay); 1186 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, 1187 __ilog2_u32(cq->len / 256)); 1188 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); 1189 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); 1190 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); 1191 } else { 1192 req->hdr.version = 2; 1193 req->page_size = 1; /* 1 for 4K */ 1194 1195 /* coalesce-wm field in this cmd is not relevant to Lancer. 1196 * Lancer uses COMMON_MODIFY_CQ to set this field 1197 */ 1198 if (!lancer_chip(adapter)) 1199 AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm, 1200 ctxt, coalesce_wm); 1201 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt, 1202 no_delay); 1203 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt, 1204 __ilog2_u32(cq->len / 256)); 1205 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1); 1206 AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1); 1207 AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id); 1208 } 1209 1210 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1211 1212 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1213 1214 status = be_mbox_notify_wait(adapter); 1215 if (!status) { 1216 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); 1217 1218 cq->id = le16_to_cpu(resp->cq_id); 1219 cq->created = true; 1220 } 1221 1222 mutex_unlock(&adapter->mbox_lock); 1223 1224 return status; 1225 } 1226 1227 static u32 be_encoded_q_len(int q_len) 1228 { 1229 u32 len_encoded = fls(q_len); /* log2(len) + 1 */ 1230 1231 if (len_encoded == 16) 1232 len_encoded = 0; 1233 return len_encoded; 1234 } 1235 1236 static int be_cmd_mccq_ext_create(struct be_adapter *adapter, 1237 struct be_queue_info *mccq, 1238 struct be_queue_info *cq) 1239 { 1240 struct be_mcc_wrb *wrb; 1241 struct be_cmd_req_mcc_ext_create *req; 1242 struct be_dma_mem *q_mem = &mccq->dma_mem; 1243 void *ctxt; 1244 int status; 1245 1246 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1247 return -1; 1248 1249 wrb = wrb_from_mbox(adapter); 1250 req = embedded_payload(wrb); 1251 ctxt = &req->context; 1252 1253 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1254 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, 1255 NULL); 1256 1257 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1258 if (BEx_chip(adapter)) { 1259 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 1260 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 1261 be_encoded_q_len(mccq->len)); 1262 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 1263 } else { 1264 req->hdr.version = 1; 1265 req->cq_id = cpu_to_le16(cq->id); 1266 1267 AMAP_SET_BITS(struct amap_mcc_context_v1, ring_size, ctxt, 1268 be_encoded_q_len(mccq->len)); 1269 AMAP_SET_BITS(struct amap_mcc_context_v1, valid, ctxt, 1); 1270 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_id, 1271 ctxt, cq->id); 1272 AMAP_SET_BITS(struct amap_mcc_context_v1, async_cq_valid, 1273 ctxt, 1); 1274 } 1275 1276 /* Subscribe to Link State, Sliport Event and Group 5 Events 1277 * (bits 1, 5 and 17 set) 1278 */ 1279 req->async_event_bitmap[0] = 1280 cpu_to_le32(BIT(ASYNC_EVENT_CODE_LINK_STATE) | 1281 BIT(ASYNC_EVENT_CODE_GRP_5) | 1282 BIT(ASYNC_EVENT_CODE_QNQ) | 1283 BIT(ASYNC_EVENT_CODE_SLIPORT)); 1284 1285 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1286 1287 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1288 1289 status = be_mbox_notify_wait(adapter); 1290 if (!status) { 1291 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); 1292 1293 mccq->id = le16_to_cpu(resp->id); 1294 mccq->created = true; 1295 } 1296 mutex_unlock(&adapter->mbox_lock); 1297 1298 return status; 1299 } 1300 1301 static int be_cmd_mccq_org_create(struct be_adapter *adapter, 1302 struct be_queue_info *mccq, 1303 struct be_queue_info *cq) 1304 { 1305 struct be_mcc_wrb *wrb; 1306 struct be_cmd_req_mcc_create *req; 1307 struct be_dma_mem *q_mem = &mccq->dma_mem; 1308 void *ctxt; 1309 int status; 1310 1311 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1312 return -1; 1313 1314 wrb = wrb_from_mbox(adapter); 1315 req = embedded_payload(wrb); 1316 ctxt = &req->context; 1317 1318 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1319 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, 1320 NULL); 1321 1322 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 1323 1324 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); 1325 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, 1326 be_encoded_q_len(mccq->len)); 1327 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); 1328 1329 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 1330 1331 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1332 1333 status = be_mbox_notify_wait(adapter); 1334 if (!status) { 1335 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); 1336 1337 mccq->id = le16_to_cpu(resp->id); 1338 mccq->created = true; 1339 } 1340 1341 mutex_unlock(&adapter->mbox_lock); 1342 return status; 1343 } 1344 1345 int be_cmd_mccq_create(struct be_adapter *adapter, 1346 struct be_queue_info *mccq, struct be_queue_info *cq) 1347 { 1348 int status; 1349 1350 status = be_cmd_mccq_ext_create(adapter, mccq, cq); 1351 if (status && BEx_chip(adapter)) { 1352 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 " 1353 "or newer to avoid conflicting priorities between NIC " 1354 "and FCoE traffic"); 1355 status = be_cmd_mccq_org_create(adapter, mccq, cq); 1356 } 1357 return status; 1358 } 1359 1360 int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo) 1361 { 1362 struct be_mcc_wrb wrb = {0}; 1363 struct be_cmd_req_eth_tx_create *req; 1364 struct be_queue_info *txq = &txo->q; 1365 struct be_queue_info *cq = &txo->cq; 1366 struct be_dma_mem *q_mem = &txq->dma_mem; 1367 int status, ver = 0; 1368 1369 req = embedded_payload(&wrb); 1370 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1371 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL); 1372 1373 if (lancer_chip(adapter)) { 1374 req->hdr.version = 1; 1375 } else if (BEx_chip(adapter)) { 1376 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) 1377 req->hdr.version = 2; 1378 } else { /* For SH */ 1379 req->hdr.version = 2; 1380 } 1381 1382 if (req->hdr.version > 0) 1383 req->if_id = cpu_to_le16(adapter->if_handle); 1384 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); 1385 req->ulp_num = BE_ULP1_NUM; 1386 req->type = BE_ETH_TX_RING_TYPE_STANDARD; 1387 req->cq_id = cpu_to_le16(cq->id); 1388 req->queue_size = be_encoded_q_len(txq->len); 1389 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1390 ver = req->hdr.version; 1391 1392 status = be_cmd_notify_wait(adapter, &wrb); 1393 if (!status) { 1394 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb); 1395 1396 txq->id = le16_to_cpu(resp->cid); 1397 if (ver == 2) 1398 txo->db_offset = le32_to_cpu(resp->db_offset); 1399 else 1400 txo->db_offset = DB_TXULP1_OFFSET; 1401 txq->created = true; 1402 } 1403 1404 return status; 1405 } 1406 1407 /* Uses MCC */ 1408 int be_cmd_rxq_create(struct be_adapter *adapter, 1409 struct be_queue_info *rxq, u16 cq_id, u16 frag_size, 1410 u32 if_id, u32 rss, u8 *rss_id) 1411 { 1412 struct be_mcc_wrb *wrb; 1413 struct be_cmd_req_eth_rx_create *req; 1414 struct be_dma_mem *q_mem = &rxq->dma_mem; 1415 int status; 1416 1417 spin_lock_bh(&adapter->mcc_lock); 1418 1419 wrb = wrb_from_mccq(adapter); 1420 if (!wrb) { 1421 status = -EBUSY; 1422 goto err; 1423 } 1424 req = embedded_payload(wrb); 1425 1426 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1427 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL); 1428 1429 req->cq_id = cpu_to_le16(cq_id); 1430 req->frag_size = fls(frag_size) - 1; 1431 req->num_pages = 2; 1432 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 1433 req->interface_id = cpu_to_le32(if_id); 1434 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE); 1435 req->rss_queue = cpu_to_le32(rss); 1436 1437 status = be_mcc_notify_wait(adapter); 1438 if (!status) { 1439 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); 1440 1441 rxq->id = le16_to_cpu(resp->id); 1442 rxq->created = true; 1443 *rss_id = resp->rss_id; 1444 } 1445 1446 err: 1447 spin_unlock_bh(&adapter->mcc_lock); 1448 return status; 1449 } 1450 1451 /* Generic destroyer function for all types of queues 1452 * Uses Mbox 1453 */ 1454 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, 1455 int queue_type) 1456 { 1457 struct be_mcc_wrb *wrb; 1458 struct be_cmd_req_q_destroy *req; 1459 u8 subsys = 0, opcode = 0; 1460 int status; 1461 1462 if (mutex_lock_interruptible(&adapter->mbox_lock)) 1463 return -1; 1464 1465 wrb = wrb_from_mbox(adapter); 1466 req = embedded_payload(wrb); 1467 1468 switch (queue_type) { 1469 case QTYPE_EQ: 1470 subsys = CMD_SUBSYSTEM_COMMON; 1471 opcode = OPCODE_COMMON_EQ_DESTROY; 1472 break; 1473 case QTYPE_CQ: 1474 subsys = CMD_SUBSYSTEM_COMMON; 1475 opcode = OPCODE_COMMON_CQ_DESTROY; 1476 break; 1477 case QTYPE_TXQ: 1478 subsys = CMD_SUBSYSTEM_ETH; 1479 opcode = OPCODE_ETH_TX_DESTROY; 1480 break; 1481 case QTYPE_RXQ: 1482 subsys = CMD_SUBSYSTEM_ETH; 1483 opcode = OPCODE_ETH_RX_DESTROY; 1484 break; 1485 case QTYPE_MCCQ: 1486 subsys = CMD_SUBSYSTEM_COMMON; 1487 opcode = OPCODE_COMMON_MCC_DESTROY; 1488 break; 1489 default: 1490 BUG(); 1491 } 1492 1493 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb, 1494 NULL); 1495 req->id = cpu_to_le16(q->id); 1496 1497 status = be_mbox_notify_wait(adapter); 1498 q->created = false; 1499 1500 mutex_unlock(&adapter->mbox_lock); 1501 return status; 1502 } 1503 1504 /* Uses MCC */ 1505 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) 1506 { 1507 struct be_mcc_wrb *wrb; 1508 struct be_cmd_req_q_destroy *req; 1509 int status; 1510 1511 spin_lock_bh(&adapter->mcc_lock); 1512 1513 wrb = wrb_from_mccq(adapter); 1514 if (!wrb) { 1515 status = -EBUSY; 1516 goto err; 1517 } 1518 req = embedded_payload(wrb); 1519 1520 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1521 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL); 1522 req->id = cpu_to_le16(q->id); 1523 1524 status = be_mcc_notify_wait(adapter); 1525 q->created = false; 1526 1527 err: 1528 spin_unlock_bh(&adapter->mcc_lock); 1529 return status; 1530 } 1531 1532 /* Create an rx filtering policy configuration on an i/f 1533 * Will use MBOX only if MCCQ has not been created. 1534 */ 1535 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, 1536 u32 *if_handle, u32 domain) 1537 { 1538 struct be_mcc_wrb wrb = {0}; 1539 struct be_cmd_req_if_create *req; 1540 int status; 1541 1542 req = embedded_payload(&wrb); 1543 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1544 OPCODE_COMMON_NTWK_INTERFACE_CREATE, 1545 sizeof(*req), &wrb, NULL); 1546 req->hdr.domain = domain; 1547 req->capability_flags = cpu_to_le32(cap_flags); 1548 req->enable_flags = cpu_to_le32(en_flags); 1549 req->pmac_invalid = true; 1550 1551 status = be_cmd_notify_wait(adapter, &wrb); 1552 if (!status) { 1553 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb); 1554 1555 *if_handle = le32_to_cpu(resp->interface_id); 1556 1557 /* Hack to retrieve VF's pmac-id on BE3 */ 1558 if (BE3_chip(adapter) && be_virtfn(adapter)) 1559 adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id); 1560 } 1561 return status; 1562 } 1563 1564 /* Uses MCCQ if available else MBOX */ 1565 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain) 1566 { 1567 struct be_mcc_wrb wrb = {0}; 1568 struct be_cmd_req_if_destroy *req; 1569 int status; 1570 1571 if (interface_id == -1) 1572 return 0; 1573 1574 req = embedded_payload(&wrb); 1575 1576 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1577 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, 1578 sizeof(*req), &wrb, NULL); 1579 req->hdr.domain = domain; 1580 req->interface_id = cpu_to_le32(interface_id); 1581 1582 status = be_cmd_notify_wait(adapter, &wrb); 1583 return status; 1584 } 1585 1586 /* Get stats is a non embedded command: the request is not embedded inside 1587 * WRB but is a separate dma memory block 1588 * Uses asynchronous MCC 1589 */ 1590 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) 1591 { 1592 struct be_mcc_wrb *wrb; 1593 struct be_cmd_req_hdr *hdr; 1594 int status = 0; 1595 1596 spin_lock_bh(&adapter->mcc_lock); 1597 1598 wrb = wrb_from_mccq(adapter); 1599 if (!wrb) { 1600 status = -EBUSY; 1601 goto err; 1602 } 1603 hdr = nonemb_cmd->va; 1604 1605 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH, 1606 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, 1607 nonemb_cmd); 1608 1609 /* version 1 of the cmd is not supported only by BE2 */ 1610 if (BE2_chip(adapter)) 1611 hdr->version = 0; 1612 else if (BE3_chip(adapter) || lancer_chip(adapter)) 1613 hdr->version = 1; 1614 else 1615 hdr->version = 2; 1616 1617 status = be_mcc_notify(adapter); 1618 if (status) 1619 goto err; 1620 1621 adapter->stats_cmd_sent = true; 1622 1623 err: 1624 spin_unlock_bh(&adapter->mcc_lock); 1625 return status; 1626 } 1627 1628 /* Lancer Stats */ 1629 int lancer_cmd_get_pport_stats(struct be_adapter *adapter, 1630 struct be_dma_mem *nonemb_cmd) 1631 { 1632 struct be_mcc_wrb *wrb; 1633 struct lancer_cmd_req_pport_stats *req; 1634 int status = 0; 1635 1636 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS, 1637 CMD_SUBSYSTEM_ETH)) 1638 return -EPERM; 1639 1640 spin_lock_bh(&adapter->mcc_lock); 1641 1642 wrb = wrb_from_mccq(adapter); 1643 if (!wrb) { 1644 status = -EBUSY; 1645 goto err; 1646 } 1647 req = nonemb_cmd->va; 1648 1649 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 1650 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, 1651 wrb, nonemb_cmd); 1652 1653 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num); 1654 req->cmd_params.params.reset_stats = 0; 1655 1656 status = be_mcc_notify(adapter); 1657 if (status) 1658 goto err; 1659 1660 adapter->stats_cmd_sent = true; 1661 1662 err: 1663 spin_unlock_bh(&adapter->mcc_lock); 1664 return status; 1665 } 1666 1667 static int be_mac_to_link_speed(int mac_speed) 1668 { 1669 switch (mac_speed) { 1670 case PHY_LINK_SPEED_ZERO: 1671 return 0; 1672 case PHY_LINK_SPEED_10MBPS: 1673 return 10; 1674 case PHY_LINK_SPEED_100MBPS: 1675 return 100; 1676 case PHY_LINK_SPEED_1GBPS: 1677 return 1000; 1678 case PHY_LINK_SPEED_10GBPS: 1679 return 10000; 1680 case PHY_LINK_SPEED_20GBPS: 1681 return 20000; 1682 case PHY_LINK_SPEED_25GBPS: 1683 return 25000; 1684 case PHY_LINK_SPEED_40GBPS: 1685 return 40000; 1686 } 1687 return 0; 1688 } 1689 1690 /* Uses synchronous mcc 1691 * Returns link_speed in Mbps 1692 */ 1693 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed, 1694 u8 *link_status, u32 dom) 1695 { 1696 struct be_mcc_wrb *wrb; 1697 struct be_cmd_req_link_status *req; 1698 int status; 1699 1700 spin_lock_bh(&adapter->mcc_lock); 1701 1702 if (link_status) 1703 *link_status = LINK_DOWN; 1704 1705 wrb = wrb_from_mccq(adapter); 1706 if (!wrb) { 1707 status = -EBUSY; 1708 goto err; 1709 } 1710 req = embedded_payload(wrb); 1711 1712 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1713 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, 1714 sizeof(*req), wrb, NULL); 1715 1716 /* version 1 of the cmd is not supported only by BE2 */ 1717 if (!BE2_chip(adapter)) 1718 req->hdr.version = 1; 1719 1720 req->hdr.domain = dom; 1721 1722 status = be_mcc_notify_wait(adapter); 1723 if (!status) { 1724 struct be_cmd_resp_link_status *resp = embedded_payload(wrb); 1725 1726 if (link_speed) { 1727 *link_speed = resp->link_speed ? 1728 le16_to_cpu(resp->link_speed) * 10 : 1729 be_mac_to_link_speed(resp->mac_speed); 1730 1731 if (!resp->logical_link_status) 1732 *link_speed = 0; 1733 } 1734 if (link_status) 1735 *link_status = resp->logical_link_status; 1736 } 1737 1738 err: 1739 spin_unlock_bh(&adapter->mcc_lock); 1740 return status; 1741 } 1742 1743 /* Uses synchronous mcc */ 1744 int be_cmd_get_die_temperature(struct be_adapter *adapter) 1745 { 1746 struct be_mcc_wrb *wrb; 1747 struct be_cmd_req_get_cntl_addnl_attribs *req; 1748 int status = 0; 1749 1750 spin_lock_bh(&adapter->mcc_lock); 1751 1752 wrb = wrb_from_mccq(adapter); 1753 if (!wrb) { 1754 status = -EBUSY; 1755 goto err; 1756 } 1757 req = embedded_payload(wrb); 1758 1759 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1760 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, 1761 sizeof(*req), wrb, NULL); 1762 1763 status = be_mcc_notify(adapter); 1764 err: 1765 spin_unlock_bh(&adapter->mcc_lock); 1766 return status; 1767 } 1768 1769 /* Uses synchronous mcc */ 1770 int be_cmd_get_fat_dump_len(struct be_adapter *adapter, u32 *dump_size) 1771 { 1772 struct be_mcc_wrb wrb = {0}; 1773 struct be_cmd_req_get_fat *req; 1774 int status; 1775 1776 req = embedded_payload(&wrb); 1777 1778 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1779 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), 1780 &wrb, NULL); 1781 req->fat_operation = cpu_to_le32(QUERY_FAT); 1782 status = be_cmd_notify_wait(adapter, &wrb); 1783 if (!status) { 1784 struct be_cmd_resp_get_fat *resp = embedded_payload(&wrb); 1785 1786 if (dump_size && resp->log_size) 1787 *dump_size = le32_to_cpu(resp->log_size) - 1788 sizeof(u32); 1789 } 1790 return status; 1791 } 1792 1793 int be_cmd_get_fat_dump(struct be_adapter *adapter, u32 buf_len, void *buf) 1794 { 1795 struct be_dma_mem get_fat_cmd; 1796 struct be_mcc_wrb *wrb; 1797 struct be_cmd_req_get_fat *req; 1798 u32 offset = 0, total_size, buf_size, 1799 log_offset = sizeof(u32), payload_len; 1800 int status; 1801 1802 if (buf_len == 0) 1803 return 0; 1804 1805 total_size = buf_len; 1806 1807 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60 * 1024; 1808 get_fat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, 1809 get_fat_cmd.size, 1810 &get_fat_cmd.dma, GFP_ATOMIC); 1811 if (!get_fat_cmd.va) 1812 return -ENOMEM; 1813 1814 spin_lock_bh(&adapter->mcc_lock); 1815 1816 while (total_size) { 1817 buf_size = min(total_size, (u32)60 * 1024); 1818 total_size -= buf_size; 1819 1820 wrb = wrb_from_mccq(adapter); 1821 if (!wrb) { 1822 status = -EBUSY; 1823 goto err; 1824 } 1825 req = get_fat_cmd.va; 1826 1827 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size; 1828 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1829 OPCODE_COMMON_MANAGE_FAT, payload_len, 1830 wrb, &get_fat_cmd); 1831 1832 req->fat_operation = cpu_to_le32(RETRIEVE_FAT); 1833 req->read_log_offset = cpu_to_le32(log_offset); 1834 req->read_log_length = cpu_to_le32(buf_size); 1835 req->data_buffer_size = cpu_to_le32(buf_size); 1836 1837 status = be_mcc_notify_wait(adapter); 1838 if (!status) { 1839 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va; 1840 1841 memcpy(buf + offset, 1842 resp->data_buffer, 1843 le32_to_cpu(resp->read_log_length)); 1844 } else { 1845 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n"); 1846 goto err; 1847 } 1848 offset += buf_size; 1849 log_offset += buf_size; 1850 } 1851 err: 1852 spin_unlock_bh(&adapter->mcc_lock); 1853 dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size, 1854 get_fat_cmd.va, get_fat_cmd.dma); 1855 return status; 1856 } 1857 1858 /* Uses synchronous mcc */ 1859 int be_cmd_get_fw_ver(struct be_adapter *adapter) 1860 { 1861 struct be_mcc_wrb *wrb; 1862 struct be_cmd_req_get_fw_version *req; 1863 int status; 1864 1865 spin_lock_bh(&adapter->mcc_lock); 1866 1867 wrb = wrb_from_mccq(adapter); 1868 if (!wrb) { 1869 status = -EBUSY; 1870 goto err; 1871 } 1872 1873 req = embedded_payload(wrb); 1874 1875 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1876 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, 1877 NULL); 1878 status = be_mcc_notify_wait(adapter); 1879 if (!status) { 1880 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); 1881 1882 strscpy(adapter->fw_ver, resp->firmware_version_string, 1883 sizeof(adapter->fw_ver)); 1884 strscpy(adapter->fw_on_flash, resp->fw_on_flash_version_string, 1885 sizeof(adapter->fw_on_flash)); 1886 } 1887 err: 1888 spin_unlock_bh(&adapter->mcc_lock); 1889 return status; 1890 } 1891 1892 /* set the EQ delay interval of an EQ to specified value 1893 * Uses async mcc 1894 */ 1895 static int __be_cmd_modify_eqd(struct be_adapter *adapter, 1896 struct be_set_eqd *set_eqd, int num) 1897 { 1898 struct be_mcc_wrb *wrb; 1899 struct be_cmd_req_modify_eq_delay *req; 1900 int status = 0, i; 1901 1902 spin_lock_bh(&adapter->mcc_lock); 1903 1904 wrb = wrb_from_mccq(adapter); 1905 if (!wrb) { 1906 status = -EBUSY; 1907 goto err; 1908 } 1909 req = embedded_payload(wrb); 1910 1911 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1912 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, 1913 NULL); 1914 1915 req->num_eq = cpu_to_le32(num); 1916 for (i = 0; i < num; i++) { 1917 req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id); 1918 req->set_eqd[i].phase = 0; 1919 req->set_eqd[i].delay_multiplier = 1920 cpu_to_le32(set_eqd[i].delay_multiplier); 1921 } 1922 1923 status = be_mcc_notify(adapter); 1924 err: 1925 spin_unlock_bh(&adapter->mcc_lock); 1926 return status; 1927 } 1928 1929 int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd, 1930 int num) 1931 { 1932 int num_eqs, i = 0; 1933 1934 while (num) { 1935 num_eqs = min(num, 8); 1936 __be_cmd_modify_eqd(adapter, &set_eqd[i], num_eqs); 1937 i += num_eqs; 1938 num -= num_eqs; 1939 } 1940 1941 return 0; 1942 } 1943 1944 /* Uses sycnhronous mcc */ 1945 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, 1946 u32 num, u32 domain) 1947 { 1948 struct be_mcc_wrb *wrb; 1949 struct be_cmd_req_vlan_config *req; 1950 int status; 1951 1952 spin_lock_bh(&adapter->mcc_lock); 1953 1954 wrb = wrb_from_mccq(adapter); 1955 if (!wrb) { 1956 status = -EBUSY; 1957 goto err; 1958 } 1959 req = embedded_payload(wrb); 1960 1961 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1962 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), 1963 wrb, NULL); 1964 req->hdr.domain = domain; 1965 1966 req->interface_id = if_id; 1967 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; 1968 req->num_vlan = num; 1969 memcpy(req->normal_vlan, vtag_array, 1970 req->num_vlan * sizeof(vtag_array[0])); 1971 1972 status = be_mcc_notify_wait(adapter); 1973 err: 1974 spin_unlock_bh(&adapter->mcc_lock); 1975 return status; 1976 } 1977 1978 static int __be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) 1979 { 1980 struct be_mcc_wrb *wrb; 1981 struct be_dma_mem *mem = &adapter->rx_filter; 1982 struct be_cmd_req_rx_filter *req = mem->va; 1983 int status; 1984 1985 spin_lock_bh(&adapter->mcc_lock); 1986 1987 wrb = wrb_from_mccq(adapter); 1988 if (!wrb) { 1989 status = -EBUSY; 1990 goto err; 1991 } 1992 memset(req, 0, sizeof(*req)); 1993 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 1994 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req), 1995 wrb, mem); 1996 1997 req->if_id = cpu_to_le32(adapter->if_handle); 1998 req->if_flags_mask = cpu_to_le32(flags); 1999 req->if_flags = (value == ON) ? req->if_flags_mask : 0; 2000 2001 if (flags & BE_IF_FLAGS_MULTICAST) { 2002 int i; 2003 2004 /* Reset mcast promisc mode if already set by setting mask 2005 * and not setting flags field 2006 */ 2007 req->if_flags_mask |= 2008 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS & 2009 be_if_cap_flags(adapter)); 2010 req->mcast_num = cpu_to_le32(adapter->mc_count); 2011 for (i = 0; i < adapter->mc_count; i++) 2012 ether_addr_copy(req->mcast_mac[i].byte, 2013 adapter->mc_list[i].mac); 2014 } 2015 2016 status = be_mcc_notify_wait(adapter); 2017 err: 2018 spin_unlock_bh(&adapter->mcc_lock); 2019 return status; 2020 } 2021 2022 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) 2023 { 2024 struct device *dev = &adapter->pdev->dev; 2025 2026 if ((flags & be_if_cap_flags(adapter)) != flags) { 2027 dev_warn(dev, "Cannot set rx filter flags 0x%x\n", flags); 2028 dev_warn(dev, "Interface is capable of 0x%x flags only\n", 2029 be_if_cap_flags(adapter)); 2030 } 2031 flags &= be_if_cap_flags(adapter); 2032 if (!flags) 2033 return -ENOTSUPP; 2034 2035 return __be_cmd_rx_filter(adapter, flags, value); 2036 } 2037 2038 /* Uses synchrounous mcc */ 2039 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) 2040 { 2041 struct be_mcc_wrb *wrb; 2042 struct be_cmd_req_set_flow_control *req; 2043 int status; 2044 2045 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL, 2046 CMD_SUBSYSTEM_COMMON)) 2047 return -EPERM; 2048 2049 spin_lock_bh(&adapter->mcc_lock); 2050 2051 wrb = wrb_from_mccq(adapter); 2052 if (!wrb) { 2053 status = -EBUSY; 2054 goto err; 2055 } 2056 req = embedded_payload(wrb); 2057 2058 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2059 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), 2060 wrb, NULL); 2061 2062 req->hdr.version = 1; 2063 req->tx_flow_control = cpu_to_le16((u16)tx_fc); 2064 req->rx_flow_control = cpu_to_le16((u16)rx_fc); 2065 2066 status = be_mcc_notify_wait(adapter); 2067 2068 err: 2069 spin_unlock_bh(&adapter->mcc_lock); 2070 2071 if (base_status(status) == MCC_STATUS_FEATURE_NOT_SUPPORTED) 2072 return -EOPNOTSUPP; 2073 2074 return status; 2075 } 2076 2077 /* Uses sycn mcc */ 2078 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) 2079 { 2080 struct be_mcc_wrb *wrb; 2081 struct be_cmd_req_get_flow_control *req; 2082 int status; 2083 2084 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL, 2085 CMD_SUBSYSTEM_COMMON)) 2086 return -EPERM; 2087 2088 spin_lock_bh(&adapter->mcc_lock); 2089 2090 wrb = wrb_from_mccq(adapter); 2091 if (!wrb) { 2092 status = -EBUSY; 2093 goto err; 2094 } 2095 req = embedded_payload(wrb); 2096 2097 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2098 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), 2099 wrb, NULL); 2100 2101 status = be_mcc_notify_wait(adapter); 2102 if (!status) { 2103 struct be_cmd_resp_get_flow_control *resp = 2104 embedded_payload(wrb); 2105 2106 *tx_fc = le16_to_cpu(resp->tx_flow_control); 2107 *rx_fc = le16_to_cpu(resp->rx_flow_control); 2108 } 2109 2110 err: 2111 spin_unlock_bh(&adapter->mcc_lock); 2112 return status; 2113 } 2114 2115 /* Uses mbox */ 2116 int be_cmd_query_fw_cfg(struct be_adapter *adapter) 2117 { 2118 struct be_mcc_wrb *wrb; 2119 struct be_cmd_req_query_fw_cfg *req; 2120 int status; 2121 2122 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2123 return -1; 2124 2125 wrb = wrb_from_mbox(adapter); 2126 req = embedded_payload(wrb); 2127 2128 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2129 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, 2130 sizeof(*req), wrb, NULL); 2131 2132 status = be_mbox_notify_wait(adapter); 2133 if (!status) { 2134 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); 2135 2136 adapter->port_num = le32_to_cpu(resp->phys_port); 2137 adapter->function_mode = le32_to_cpu(resp->function_mode); 2138 adapter->function_caps = le32_to_cpu(resp->function_caps); 2139 adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF; 2140 dev_info(&adapter->pdev->dev, 2141 "FW config: function_mode=0x%x, function_caps=0x%x\n", 2142 adapter->function_mode, adapter->function_caps); 2143 } 2144 2145 mutex_unlock(&adapter->mbox_lock); 2146 return status; 2147 } 2148 2149 /* Uses mbox */ 2150 int be_cmd_reset_function(struct be_adapter *adapter) 2151 { 2152 struct be_mcc_wrb *wrb; 2153 struct be_cmd_req_hdr *req; 2154 int status; 2155 2156 if (lancer_chip(adapter)) { 2157 iowrite32(SLI_PORT_CONTROL_IP_MASK, 2158 adapter->db + SLIPORT_CONTROL_OFFSET); 2159 status = lancer_wait_ready(adapter); 2160 if (status) 2161 dev_err(&adapter->pdev->dev, 2162 "Adapter in non recoverable error\n"); 2163 return status; 2164 } 2165 2166 if (mutex_lock_interruptible(&adapter->mbox_lock)) 2167 return -1; 2168 2169 wrb = wrb_from_mbox(adapter); 2170 req = embedded_payload(wrb); 2171 2172 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON, 2173 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, 2174 NULL); 2175 2176 status = be_mbox_notify_wait(adapter); 2177 2178 mutex_unlock(&adapter->mbox_lock); 2179 return status; 2180 } 2181 2182 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, 2183 u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey) 2184 { 2185 struct be_mcc_wrb *wrb; 2186 struct be_cmd_req_rss_config *req; 2187 int status; 2188 2189 if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS)) 2190 return 0; 2191 2192 spin_lock_bh(&adapter->mcc_lock); 2193 2194 wrb = wrb_from_mccq(adapter); 2195 if (!wrb) { 2196 status = -EBUSY; 2197 goto err; 2198 } 2199 req = embedded_payload(wrb); 2200 2201 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 2202 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL); 2203 2204 req->if_id = cpu_to_le32(adapter->if_handle); 2205 req->enable_rss = cpu_to_le16(rss_hash_opts); 2206 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1); 2207 2208 if (!BEx_chip(adapter)) 2209 req->hdr.version = 1; 2210 2211 memcpy(req->cpu_table, rsstable, table_size); 2212 memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN); 2213 be_dws_cpu_to_le(req->hash, sizeof(req->hash)); 2214 2215 status = be_mcc_notify_wait(adapter); 2216 err: 2217 spin_unlock_bh(&adapter->mcc_lock); 2218 return status; 2219 } 2220 2221 /* Uses sync mcc */ 2222 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, 2223 u8 bcn, u8 sts, u8 state) 2224 { 2225 struct be_mcc_wrb *wrb; 2226 struct be_cmd_req_enable_disable_beacon *req; 2227 int status; 2228 2229 spin_lock_bh(&adapter->mcc_lock); 2230 2231 wrb = wrb_from_mccq(adapter); 2232 if (!wrb) { 2233 status = -EBUSY; 2234 goto err; 2235 } 2236 req = embedded_payload(wrb); 2237 2238 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2239 OPCODE_COMMON_ENABLE_DISABLE_BEACON, 2240 sizeof(*req), wrb, NULL); 2241 2242 req->port_num = port_num; 2243 req->beacon_state = state; 2244 req->beacon_duration = bcn; 2245 req->status_duration = sts; 2246 2247 status = be_mcc_notify_wait(adapter); 2248 2249 err: 2250 spin_unlock_bh(&adapter->mcc_lock); 2251 return status; 2252 } 2253 2254 /* Uses sync mcc */ 2255 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) 2256 { 2257 struct be_mcc_wrb *wrb; 2258 struct be_cmd_req_get_beacon_state *req; 2259 int status; 2260 2261 spin_lock_bh(&adapter->mcc_lock); 2262 2263 wrb = wrb_from_mccq(adapter); 2264 if (!wrb) { 2265 status = -EBUSY; 2266 goto err; 2267 } 2268 req = embedded_payload(wrb); 2269 2270 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2271 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), 2272 wrb, NULL); 2273 2274 req->port_num = port_num; 2275 2276 status = be_mcc_notify_wait(adapter); 2277 if (!status) { 2278 struct be_cmd_resp_get_beacon_state *resp = 2279 embedded_payload(wrb); 2280 2281 *state = resp->beacon_state; 2282 } 2283 2284 err: 2285 spin_unlock_bh(&adapter->mcc_lock); 2286 return status; 2287 } 2288 2289 /* Uses sync mcc */ 2290 int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, 2291 u8 page_num, u32 off, u32 len, u8 *data) 2292 { 2293 struct be_dma_mem cmd; 2294 struct be_mcc_wrb *wrb; 2295 struct be_cmd_req_port_type *req; 2296 int status; 2297 2298 if (page_num > TR_PAGE_A2) 2299 return -EINVAL; 2300 2301 cmd.size = sizeof(struct be_cmd_resp_port_type); 2302 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 2303 GFP_ATOMIC); 2304 if (!cmd.va) { 2305 dev_err(&adapter->pdev->dev, "Memory allocation failed\n"); 2306 return -ENOMEM; 2307 } 2308 2309 spin_lock_bh(&adapter->mcc_lock); 2310 2311 wrb = wrb_from_mccq(adapter); 2312 if (!wrb) { 2313 status = -EBUSY; 2314 goto err; 2315 } 2316 req = cmd.va; 2317 2318 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2319 OPCODE_COMMON_READ_TRANSRECV_DATA, 2320 cmd.size, wrb, &cmd); 2321 2322 req->port = cpu_to_le32(adapter->hba_port_num); 2323 req->page_num = cpu_to_le32(page_num); 2324 status = be_mcc_notify_wait(adapter); 2325 if (!status && len > 0) { 2326 struct be_cmd_resp_port_type *resp = cmd.va; 2327 2328 memcpy(data, resp->page_data + off, len); 2329 } 2330 err: 2331 spin_unlock_bh(&adapter->mcc_lock); 2332 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); 2333 return status; 2334 } 2335 2336 static int lancer_cmd_write_object(struct be_adapter *adapter, 2337 struct be_dma_mem *cmd, u32 data_size, 2338 u32 data_offset, const char *obj_name, 2339 u32 *data_written, u8 *change_status, 2340 u8 *addn_status) 2341 { 2342 struct be_mcc_wrb *wrb; 2343 struct lancer_cmd_req_write_object *req; 2344 struct lancer_cmd_resp_write_object *resp; 2345 void *ctxt = NULL; 2346 int status; 2347 2348 spin_lock_bh(&adapter->mcc_lock); 2349 adapter->flash_status = 0; 2350 2351 wrb = wrb_from_mccq(adapter); 2352 if (!wrb) { 2353 status = -EBUSY; 2354 goto err_unlock; 2355 } 2356 2357 req = embedded_payload(wrb); 2358 2359 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2360 OPCODE_COMMON_WRITE_OBJECT, 2361 sizeof(struct lancer_cmd_req_write_object), wrb, 2362 NULL); 2363 2364 ctxt = &req->context; 2365 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2366 write_length, ctxt, data_size); 2367 2368 if (data_size == 0) 2369 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2370 eof, ctxt, 1); 2371 else 2372 AMAP_SET_BITS(struct amap_lancer_write_obj_context, 2373 eof, ctxt, 0); 2374 2375 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 2376 req->write_offset = cpu_to_le32(data_offset); 2377 strscpy(req->object_name, obj_name, sizeof(req->object_name)); 2378 req->descriptor_count = cpu_to_le32(1); 2379 req->buf_len = cpu_to_le32(data_size); 2380 req->addr_low = cpu_to_le32((cmd->dma + 2381 sizeof(struct lancer_cmd_req_write_object)) 2382 & 0xFFFFFFFF); 2383 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma + 2384 sizeof(struct lancer_cmd_req_write_object))); 2385 2386 status = be_mcc_notify(adapter); 2387 if (status) 2388 goto err_unlock; 2389 2390 spin_unlock_bh(&adapter->mcc_lock); 2391 2392 if (!wait_for_completion_timeout(&adapter->et_cmd_compl, 2393 msecs_to_jiffies(60000))) 2394 status = -ETIMEDOUT; 2395 else 2396 status = adapter->flash_status; 2397 2398 resp = embedded_payload(wrb); 2399 if (!status) { 2400 *data_written = le32_to_cpu(resp->actual_write_len); 2401 *change_status = resp->change_status; 2402 } else { 2403 *addn_status = resp->additional_status; 2404 } 2405 2406 return status; 2407 2408 err_unlock: 2409 spin_unlock_bh(&adapter->mcc_lock); 2410 return status; 2411 } 2412 2413 int be_cmd_query_cable_type(struct be_adapter *adapter) 2414 { 2415 u8 page_data[PAGE_DATA_LEN]; 2416 int status; 2417 2418 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, 2419 0, PAGE_DATA_LEN, page_data); 2420 if (!status) { 2421 switch (adapter->phy.interface_type) { 2422 case PHY_TYPE_QSFP: 2423 adapter->phy.cable_type = 2424 page_data[QSFP_PLUS_CABLE_TYPE_OFFSET]; 2425 break; 2426 case PHY_TYPE_SFP_PLUS_10GB: 2427 adapter->phy.cable_type = 2428 page_data[SFP_PLUS_CABLE_TYPE_OFFSET]; 2429 break; 2430 default: 2431 adapter->phy.cable_type = 0; 2432 break; 2433 } 2434 } 2435 return status; 2436 } 2437 2438 int be_cmd_query_sfp_info(struct be_adapter *adapter) 2439 { 2440 u8 page_data[PAGE_DATA_LEN]; 2441 int status; 2442 2443 status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, 2444 0, PAGE_DATA_LEN, page_data); 2445 if (!status) { 2446 strscpy(adapter->phy.vendor_name, page_data + 2447 SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1); 2448 strscpy(adapter->phy.vendor_pn, 2449 page_data + SFP_VENDOR_PN_OFFSET, 2450 SFP_VENDOR_NAME_LEN - 1); 2451 } 2452 2453 return status; 2454 } 2455 2456 static int lancer_cmd_delete_object(struct be_adapter *adapter, 2457 const char *obj_name) 2458 { 2459 struct lancer_cmd_req_delete_object *req; 2460 struct be_mcc_wrb *wrb; 2461 int status; 2462 2463 spin_lock_bh(&adapter->mcc_lock); 2464 2465 wrb = wrb_from_mccq(adapter); 2466 if (!wrb) { 2467 status = -EBUSY; 2468 goto err; 2469 } 2470 2471 req = embedded_payload(wrb); 2472 2473 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2474 OPCODE_COMMON_DELETE_OBJECT, 2475 sizeof(*req), wrb, NULL); 2476 2477 strscpy(req->object_name, obj_name, sizeof(req->object_name)); 2478 2479 status = be_mcc_notify_wait(adapter); 2480 err: 2481 spin_unlock_bh(&adapter->mcc_lock); 2482 return status; 2483 } 2484 2485 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, 2486 u32 data_size, u32 data_offset, const char *obj_name, 2487 u32 *data_read, u32 *eof, u8 *addn_status) 2488 { 2489 struct be_mcc_wrb *wrb; 2490 struct lancer_cmd_req_read_object *req; 2491 struct lancer_cmd_resp_read_object *resp; 2492 int status; 2493 2494 spin_lock_bh(&adapter->mcc_lock); 2495 2496 wrb = wrb_from_mccq(adapter); 2497 if (!wrb) { 2498 status = -EBUSY; 2499 goto err_unlock; 2500 } 2501 2502 req = embedded_payload(wrb); 2503 2504 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2505 OPCODE_COMMON_READ_OBJECT, 2506 sizeof(struct lancer_cmd_req_read_object), wrb, 2507 NULL); 2508 2509 req->desired_read_len = cpu_to_le32(data_size); 2510 req->read_offset = cpu_to_le32(data_offset); 2511 strcpy(req->object_name, obj_name); 2512 req->descriptor_count = cpu_to_le32(1); 2513 req->buf_len = cpu_to_le32(data_size); 2514 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF)); 2515 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma)); 2516 2517 status = be_mcc_notify_wait(adapter); 2518 2519 resp = embedded_payload(wrb); 2520 if (!status) { 2521 *data_read = le32_to_cpu(resp->actual_read_len); 2522 *eof = le32_to_cpu(resp->eof); 2523 } else { 2524 *addn_status = resp->additional_status; 2525 } 2526 2527 err_unlock: 2528 spin_unlock_bh(&adapter->mcc_lock); 2529 return status; 2530 } 2531 2532 static int be_cmd_write_flashrom(struct be_adapter *adapter, 2533 struct be_dma_mem *cmd, u32 flash_type, 2534 u32 flash_opcode, u32 img_offset, u32 buf_size) 2535 { 2536 struct be_mcc_wrb *wrb; 2537 struct be_cmd_write_flashrom *req; 2538 int status; 2539 2540 spin_lock_bh(&adapter->mcc_lock); 2541 adapter->flash_status = 0; 2542 2543 wrb = wrb_from_mccq(adapter); 2544 if (!wrb) { 2545 status = -EBUSY; 2546 goto err_unlock; 2547 } 2548 req = cmd->va; 2549 2550 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2551 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, 2552 cmd); 2553 2554 req->params.op_type = cpu_to_le32(flash_type); 2555 if (flash_type == OPTYPE_OFFSET_SPECIFIED) 2556 req->params.offset = cpu_to_le32(img_offset); 2557 2558 req->params.op_code = cpu_to_le32(flash_opcode); 2559 req->params.data_buf_size = cpu_to_le32(buf_size); 2560 2561 status = be_mcc_notify(adapter); 2562 if (status) 2563 goto err_unlock; 2564 2565 spin_unlock_bh(&adapter->mcc_lock); 2566 2567 if (!wait_for_completion_timeout(&adapter->et_cmd_compl, 2568 msecs_to_jiffies(40000))) 2569 status = -ETIMEDOUT; 2570 else 2571 status = adapter->flash_status; 2572 2573 return status; 2574 2575 err_unlock: 2576 spin_unlock_bh(&adapter->mcc_lock); 2577 return status; 2578 } 2579 2580 static int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, 2581 u16 img_optype, u32 img_offset, u32 crc_offset) 2582 { 2583 struct be_cmd_read_flash_crc *req; 2584 struct be_mcc_wrb *wrb; 2585 int status; 2586 2587 spin_lock_bh(&adapter->mcc_lock); 2588 2589 wrb = wrb_from_mccq(adapter); 2590 if (!wrb) { 2591 status = -EBUSY; 2592 goto err; 2593 } 2594 req = embedded_payload(wrb); 2595 2596 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 2597 OPCODE_COMMON_READ_FLASHROM, sizeof(*req), 2598 wrb, NULL); 2599 2600 req->params.op_type = cpu_to_le32(img_optype); 2601 if (img_optype == OPTYPE_OFFSET_SPECIFIED) 2602 req->params.offset = cpu_to_le32(img_offset + crc_offset); 2603 else 2604 req->params.offset = cpu_to_le32(crc_offset); 2605 2606 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); 2607 req->params.data_buf_size = cpu_to_le32(0x4); 2608 2609 status = be_mcc_notify_wait(adapter); 2610 if (!status) 2611 memcpy(flashed_crc, req->crc, 4); 2612 2613 err: 2614 spin_unlock_bh(&adapter->mcc_lock); 2615 return status; 2616 } 2617 2618 /* 2619 * Since the cookie is text, add a parsing-skipped space to keep it from 2620 * ever being matched on storage holding this source file. 2621 */ 2622 static const char flash_cookie[32] __nonstring = "*** SE FLAS" "H DIRECTORY *** "; 2623 2624 static bool phy_flashing_required(struct be_adapter *adapter) 2625 { 2626 return (adapter->phy.phy_type == PHY_TYPE_TN_8022 && 2627 adapter->phy.interface_type == PHY_TYPE_BASET_10GB); 2628 } 2629 2630 static bool is_comp_in_ufi(struct be_adapter *adapter, 2631 struct flash_section_info *fsec, int type) 2632 { 2633 int i = 0, img_type = 0; 2634 struct flash_section_info_g2 *fsec_g2 = NULL; 2635 2636 if (BE2_chip(adapter)) 2637 fsec_g2 = (struct flash_section_info_g2 *)fsec; 2638 2639 for (i = 0; i < MAX_FLASH_COMP; i++) { 2640 if (fsec_g2) 2641 img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type); 2642 else 2643 img_type = le32_to_cpu(fsec->fsec_entry[i].type); 2644 2645 if (img_type == type) 2646 return true; 2647 } 2648 return false; 2649 } 2650 2651 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter, 2652 int header_size, 2653 const struct firmware *fw) 2654 { 2655 struct flash_section_info *fsec = NULL; 2656 const u8 *p = fw->data; 2657 2658 p += header_size; 2659 while (p < (fw->data + fw->size)) { 2660 fsec = (struct flash_section_info *)p; 2661 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie))) 2662 return fsec; 2663 p += 32; 2664 } 2665 return NULL; 2666 } 2667 2668 static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p, 2669 u32 img_offset, u32 img_size, int hdr_size, 2670 u16 img_optype, bool *crc_match) 2671 { 2672 u32 crc_offset; 2673 int status; 2674 u8 crc[4]; 2675 2676 status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_offset, 2677 img_size - 4); 2678 if (status) 2679 return status; 2680 2681 crc_offset = hdr_size + img_offset + img_size - 4; 2682 2683 /* Skip flashing, if crc of flashed region matches */ 2684 if (!memcmp(crc, p + crc_offset, 4)) 2685 *crc_match = true; 2686 else 2687 *crc_match = false; 2688 2689 return status; 2690 } 2691 2692 static int be_flash(struct be_adapter *adapter, const u8 *img, 2693 struct be_dma_mem *flash_cmd, int optype, int img_size, 2694 u32 img_offset) 2695 { 2696 u32 flash_op, num_bytes, total_bytes = img_size, bytes_sent = 0; 2697 struct be_cmd_write_flashrom *req = flash_cmd->va; 2698 int status; 2699 2700 while (total_bytes) { 2701 num_bytes = min_t(u32, 32 * 1024, total_bytes); 2702 2703 total_bytes -= num_bytes; 2704 2705 if (!total_bytes) { 2706 if (optype == OPTYPE_PHY_FW) 2707 flash_op = FLASHROM_OPER_PHY_FLASH; 2708 else 2709 flash_op = FLASHROM_OPER_FLASH; 2710 } else { 2711 if (optype == OPTYPE_PHY_FW) 2712 flash_op = FLASHROM_OPER_PHY_SAVE; 2713 else 2714 flash_op = FLASHROM_OPER_SAVE; 2715 } 2716 2717 memcpy(req->data_buf, img, num_bytes); 2718 img += num_bytes; 2719 status = be_cmd_write_flashrom(adapter, flash_cmd, optype, 2720 flash_op, img_offset + 2721 bytes_sent, num_bytes); 2722 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST && 2723 optype == OPTYPE_PHY_FW) 2724 break; 2725 else if (status) 2726 return status; 2727 2728 bytes_sent += num_bytes; 2729 } 2730 return 0; 2731 } 2732 2733 #define NCSI_UPDATE_LOG "NCSI section update is not supported in FW ver %s\n" 2734 static bool be_fw_ncsi_supported(char *ver) 2735 { 2736 int v1[4] = {3, 102, 148, 0}; /* Min ver that supports NCSI FW */ 2737 int v2[4]; 2738 int i; 2739 2740 if (sscanf(ver, "%d.%d.%d.%d", &v2[0], &v2[1], &v2[2], &v2[3]) != 4) 2741 return false; 2742 2743 for (i = 0; i < 4; i++) { 2744 if (v1[i] < v2[i]) 2745 return true; 2746 else if (v1[i] > v2[i]) 2747 return false; 2748 } 2749 2750 return true; 2751 } 2752 2753 /* For BE2, BE3 and BE3-R */ 2754 static int be_flash_BEx(struct be_adapter *adapter, 2755 const struct firmware *fw, 2756 struct be_dma_mem *flash_cmd, int num_of_images) 2757 { 2758 int img_hdrs_size = (num_of_images * sizeof(struct image_hdr)); 2759 struct device *dev = &adapter->pdev->dev; 2760 struct flash_section_info *fsec = NULL; 2761 int status, i, filehdr_size, num_comp; 2762 const struct flash_comp *pflashcomp; 2763 bool crc_match; 2764 const u8 *p; 2765 2766 static const struct flash_comp gen3_flash_types[] = { 2767 { BE3_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE, 2768 BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI}, 2769 { BE3_REDBOOT_START, OPTYPE_REDBOOT, 2770 BE3_REDBOOT_COMP_MAX_SIZE, IMAGE_BOOT_CODE}, 2771 { BE3_ISCSI_BIOS_START, OPTYPE_BIOS, 2772 BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_ISCSI}, 2773 { BE3_PXE_BIOS_START, OPTYPE_PXE_BIOS, 2774 BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_PXE}, 2775 { BE3_FCOE_BIOS_START, OPTYPE_FCOE_BIOS, 2776 BE3_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_FCOE}, 2777 { BE3_ISCSI_BACKUP_IMAGE_START, OPTYPE_ISCSI_BACKUP, 2778 BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_ISCSI}, 2779 { BE3_FCOE_PRIMARY_IMAGE_START, OPTYPE_FCOE_FW_ACTIVE, 2780 BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_FCOE}, 2781 { BE3_FCOE_BACKUP_IMAGE_START, OPTYPE_FCOE_FW_BACKUP, 2782 BE3_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_FCOE}, 2783 { BE3_NCSI_START, OPTYPE_NCSI_FW, 2784 BE3_NCSI_COMP_MAX_SIZE, IMAGE_NCSI}, 2785 { BE3_PHY_FW_START, OPTYPE_PHY_FW, 2786 BE3_PHY_FW_COMP_MAX_SIZE, IMAGE_FIRMWARE_PHY} 2787 }; 2788 2789 static const struct flash_comp gen2_flash_types[] = { 2790 { BE2_ISCSI_PRIMARY_IMAGE_START, OPTYPE_ISCSI_ACTIVE, 2791 BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_ISCSI}, 2792 { BE2_REDBOOT_START, OPTYPE_REDBOOT, 2793 BE2_REDBOOT_COMP_MAX_SIZE, IMAGE_BOOT_CODE}, 2794 { BE2_ISCSI_BIOS_START, OPTYPE_BIOS, 2795 BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_ISCSI}, 2796 { BE2_PXE_BIOS_START, OPTYPE_PXE_BIOS, 2797 BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_PXE}, 2798 { BE2_FCOE_BIOS_START, OPTYPE_FCOE_BIOS, 2799 BE2_BIOS_COMP_MAX_SIZE, IMAGE_OPTION_ROM_FCOE}, 2800 { BE2_ISCSI_BACKUP_IMAGE_START, OPTYPE_ISCSI_BACKUP, 2801 BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_ISCSI}, 2802 { BE2_FCOE_PRIMARY_IMAGE_START, OPTYPE_FCOE_FW_ACTIVE, 2803 BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_FCOE}, 2804 { BE2_FCOE_BACKUP_IMAGE_START, OPTYPE_FCOE_FW_BACKUP, 2805 BE2_COMP_MAX_SIZE, IMAGE_FIRMWARE_BACKUP_FCOE} 2806 }; 2807 2808 if (BE3_chip(adapter)) { 2809 pflashcomp = gen3_flash_types; 2810 filehdr_size = sizeof(struct flash_file_hdr_g3); 2811 num_comp = ARRAY_SIZE(gen3_flash_types); 2812 } else { 2813 pflashcomp = gen2_flash_types; 2814 filehdr_size = sizeof(struct flash_file_hdr_g2); 2815 num_comp = ARRAY_SIZE(gen2_flash_types); 2816 img_hdrs_size = 0; 2817 } 2818 2819 /* Get flash section info*/ 2820 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); 2821 if (!fsec) { 2822 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n"); 2823 return -1; 2824 } 2825 for (i = 0; i < num_comp; i++) { 2826 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type)) 2827 continue; 2828 2829 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) && 2830 !be_fw_ncsi_supported(adapter->fw_ver)) { 2831 dev_info(dev, NCSI_UPDATE_LOG, adapter->fw_ver); 2832 continue; 2833 } 2834 2835 if (pflashcomp[i].optype == OPTYPE_PHY_FW && 2836 !phy_flashing_required(adapter)) 2837 continue; 2838 2839 if (pflashcomp[i].optype == OPTYPE_REDBOOT) { 2840 status = be_check_flash_crc(adapter, fw->data, 2841 pflashcomp[i].offset, 2842 pflashcomp[i].size, 2843 filehdr_size + 2844 img_hdrs_size, 2845 OPTYPE_REDBOOT, &crc_match); 2846 if (status) { 2847 dev_err(dev, 2848 "Could not get CRC for 0x%x region\n", 2849 pflashcomp[i].optype); 2850 continue; 2851 } 2852 2853 if (crc_match) 2854 continue; 2855 } 2856 2857 p = fw->data + filehdr_size + pflashcomp[i].offset + 2858 img_hdrs_size; 2859 if (p + pflashcomp[i].size > fw->data + fw->size) 2860 return -1; 2861 2862 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype, 2863 pflashcomp[i].size, 0); 2864 if (status) { 2865 dev_err(dev, "Flashing section type 0x%x failed\n", 2866 pflashcomp[i].img_type); 2867 return status; 2868 } 2869 } 2870 return 0; 2871 } 2872 2873 static u16 be_get_img_optype(struct flash_section_entry fsec_entry) 2874 { 2875 u32 img_type = le32_to_cpu(fsec_entry.type); 2876 u16 img_optype = le16_to_cpu(fsec_entry.optype); 2877 2878 if (img_optype != 0xFFFF) 2879 return img_optype; 2880 2881 switch (img_type) { 2882 case IMAGE_FIRMWARE_ISCSI: 2883 img_optype = OPTYPE_ISCSI_ACTIVE; 2884 break; 2885 case IMAGE_BOOT_CODE: 2886 img_optype = OPTYPE_REDBOOT; 2887 break; 2888 case IMAGE_OPTION_ROM_ISCSI: 2889 img_optype = OPTYPE_BIOS; 2890 break; 2891 case IMAGE_OPTION_ROM_PXE: 2892 img_optype = OPTYPE_PXE_BIOS; 2893 break; 2894 case IMAGE_OPTION_ROM_FCOE: 2895 img_optype = OPTYPE_FCOE_BIOS; 2896 break; 2897 case IMAGE_FIRMWARE_BACKUP_ISCSI: 2898 img_optype = OPTYPE_ISCSI_BACKUP; 2899 break; 2900 case IMAGE_NCSI: 2901 img_optype = OPTYPE_NCSI_FW; 2902 break; 2903 case IMAGE_FLASHISM_JUMPVECTOR: 2904 img_optype = OPTYPE_FLASHISM_JUMPVECTOR; 2905 break; 2906 case IMAGE_FIRMWARE_PHY: 2907 img_optype = OPTYPE_SH_PHY_FW; 2908 break; 2909 case IMAGE_REDBOOT_DIR: 2910 img_optype = OPTYPE_REDBOOT_DIR; 2911 break; 2912 case IMAGE_REDBOOT_CONFIG: 2913 img_optype = OPTYPE_REDBOOT_CONFIG; 2914 break; 2915 case IMAGE_UFI_DIR: 2916 img_optype = OPTYPE_UFI_DIR; 2917 break; 2918 default: 2919 break; 2920 } 2921 2922 return img_optype; 2923 } 2924 2925 static int be_flash_skyhawk(struct be_adapter *adapter, 2926 const struct firmware *fw, 2927 struct be_dma_mem *flash_cmd, int num_of_images) 2928 { 2929 int img_hdrs_size = num_of_images * sizeof(struct image_hdr); 2930 bool crc_match, old_fw_img, flash_offset_support = true; 2931 struct device *dev = &adapter->pdev->dev; 2932 struct flash_section_info *fsec = NULL; 2933 u32 img_offset, img_size, img_type; 2934 u16 img_optype, flash_optype; 2935 int status, i, filehdr_size; 2936 const u8 *p; 2937 2938 filehdr_size = sizeof(struct flash_file_hdr_g3); 2939 fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw); 2940 if (!fsec) { 2941 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n"); 2942 return -EINVAL; 2943 } 2944 2945 retry_flash: 2946 for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) { 2947 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset); 2948 img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size); 2949 img_type = le32_to_cpu(fsec->fsec_entry[i].type); 2950 img_optype = be_get_img_optype(fsec->fsec_entry[i]); 2951 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF; 2952 2953 if (img_optype == 0xFFFF) 2954 continue; 2955 2956 if (flash_offset_support) 2957 flash_optype = OPTYPE_OFFSET_SPECIFIED; 2958 else 2959 flash_optype = img_optype; 2960 2961 /* Don't bother verifying CRC if an old FW image is being 2962 * flashed 2963 */ 2964 if (old_fw_img) 2965 goto flash; 2966 2967 status = be_check_flash_crc(adapter, fw->data, img_offset, 2968 img_size, filehdr_size + 2969 img_hdrs_size, flash_optype, 2970 &crc_match); 2971 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST || 2972 base_status(status) == MCC_STATUS_ILLEGAL_FIELD) { 2973 /* The current FW image on the card does not support 2974 * OFFSET based flashing. Retry using older mechanism 2975 * of OPTYPE based flashing 2976 */ 2977 if (flash_optype == OPTYPE_OFFSET_SPECIFIED) { 2978 flash_offset_support = false; 2979 goto retry_flash; 2980 } 2981 2982 /* The current FW image on the card does not recognize 2983 * the new FLASH op_type. The FW download is partially 2984 * complete. Reboot the server now to enable FW image 2985 * to recognize the new FLASH op_type. To complete the 2986 * remaining process, download the same FW again after 2987 * the reboot. 2988 */ 2989 dev_err(dev, "Flash incomplete. Reset the server\n"); 2990 dev_err(dev, "Download FW image again after reset\n"); 2991 return -EAGAIN; 2992 } else if (status) { 2993 dev_err(dev, "Could not get CRC for 0x%x region\n", 2994 img_optype); 2995 return -EFAULT; 2996 } 2997 2998 if (crc_match) 2999 continue; 3000 3001 flash: 3002 p = fw->data + filehdr_size + img_offset + img_hdrs_size; 3003 if (p + img_size > fw->data + fw->size) 3004 return -1; 3005 3006 status = be_flash(adapter, p, flash_cmd, flash_optype, img_size, 3007 img_offset); 3008 3009 /* The current FW image on the card does not support OFFSET 3010 * based flashing. Retry using older mechanism of OPTYPE based 3011 * flashing 3012 */ 3013 if (base_status(status) == MCC_STATUS_ILLEGAL_FIELD && 3014 flash_optype == OPTYPE_OFFSET_SPECIFIED) { 3015 flash_offset_support = false; 3016 goto retry_flash; 3017 } 3018 3019 /* For old FW images ignore ILLEGAL_FIELD error or errors on 3020 * UFI_DIR region 3021 */ 3022 if (old_fw_img && 3023 (base_status(status) == MCC_STATUS_ILLEGAL_FIELD || 3024 (img_optype == OPTYPE_UFI_DIR && 3025 base_status(status) == MCC_STATUS_FAILED))) { 3026 continue; 3027 } else if (status) { 3028 dev_err(dev, "Flashing section type 0x%x failed\n", 3029 img_type); 3030 3031 switch (addl_status(status)) { 3032 case MCC_ADDL_STATUS_MISSING_SIGNATURE: 3033 dev_err(dev, 3034 "Digital signature missing in FW\n"); 3035 return -EINVAL; 3036 case MCC_ADDL_STATUS_INVALID_SIGNATURE: 3037 dev_err(dev, 3038 "Invalid digital signature in FW\n"); 3039 return -EINVAL; 3040 default: 3041 return -EFAULT; 3042 } 3043 } 3044 } 3045 return 0; 3046 } 3047 3048 int lancer_fw_download(struct be_adapter *adapter, 3049 const struct firmware *fw) 3050 { 3051 struct device *dev = &adapter->pdev->dev; 3052 struct be_dma_mem flash_cmd; 3053 const u8 *data_ptr = NULL; 3054 u8 *dest_image_ptr = NULL; 3055 size_t image_size = 0; 3056 u32 chunk_size = 0; 3057 u32 data_written = 0; 3058 u32 offset = 0; 3059 int status = 0; 3060 u8 add_status = 0; 3061 u8 change_status; 3062 3063 if (!IS_ALIGNED(fw->size, sizeof(u32))) { 3064 dev_err(dev, "FW image size should be multiple of 4\n"); 3065 return -EINVAL; 3066 } 3067 3068 flash_cmd.size = sizeof(struct lancer_cmd_req_write_object) 3069 + LANCER_FW_DOWNLOAD_CHUNK; 3070 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, 3071 GFP_KERNEL); 3072 if (!flash_cmd.va) 3073 return -ENOMEM; 3074 3075 dest_image_ptr = flash_cmd.va + 3076 sizeof(struct lancer_cmd_req_write_object); 3077 image_size = fw->size; 3078 data_ptr = fw->data; 3079 3080 while (image_size) { 3081 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK); 3082 3083 /* Copy the image chunk content. */ 3084 memcpy(dest_image_ptr, data_ptr, chunk_size); 3085 3086 status = lancer_cmd_write_object(adapter, &flash_cmd, 3087 chunk_size, offset, 3088 LANCER_FW_DOWNLOAD_LOCATION, 3089 &data_written, &change_status, 3090 &add_status); 3091 if (status) 3092 break; 3093 3094 offset += data_written; 3095 data_ptr += data_written; 3096 image_size -= data_written; 3097 } 3098 3099 if (!status) { 3100 /* Commit the FW written */ 3101 status = lancer_cmd_write_object(adapter, &flash_cmd, 3102 0, offset, 3103 LANCER_FW_DOWNLOAD_LOCATION, 3104 &data_written, &change_status, 3105 &add_status); 3106 } 3107 3108 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma); 3109 if (status) { 3110 dev_err(dev, "Firmware load error\n"); 3111 return be_cmd_status(status); 3112 } 3113 3114 dev_info(dev, "Firmware flashed successfully\n"); 3115 3116 if (change_status == LANCER_FW_RESET_NEEDED) { 3117 dev_info(dev, "Resetting adapter to activate new FW\n"); 3118 status = lancer_physdev_ctrl(adapter, 3119 PHYSDEV_CONTROL_FW_RESET_MASK); 3120 if (status) { 3121 dev_err(dev, "Adapter busy, could not reset FW\n"); 3122 dev_err(dev, "Reboot server to activate new FW\n"); 3123 } 3124 } else if (change_status != LANCER_NO_RESET_NEEDED) { 3125 dev_info(dev, "Reboot server to activate new FW\n"); 3126 } 3127 3128 return 0; 3129 } 3130 3131 /* Check if the flash image file is compatible with the adapter that 3132 * is being flashed. 3133 */ 3134 static bool be_check_ufi_compatibility(struct be_adapter *adapter, 3135 struct flash_file_hdr_g3 *fhdr) 3136 { 3137 if (!fhdr) { 3138 dev_err(&adapter->pdev->dev, "Invalid FW UFI file"); 3139 return false; 3140 } 3141 3142 /* First letter of the build version is used to identify 3143 * which chip this image file is meant for. 3144 */ 3145 switch (fhdr->build[0]) { 3146 case BLD_STR_UFI_TYPE_SH: 3147 if (!skyhawk_chip(adapter)) 3148 return false; 3149 break; 3150 case BLD_STR_UFI_TYPE_BE3: 3151 if (!BE3_chip(adapter)) 3152 return false; 3153 break; 3154 case BLD_STR_UFI_TYPE_BE2: 3155 if (!BE2_chip(adapter)) 3156 return false; 3157 break; 3158 default: 3159 return false; 3160 } 3161 3162 /* In BE3 FW images the "asic_type_rev" field doesn't track the 3163 * asic_rev of the chips it is compatible with. 3164 * When asic_type_rev is 0 the image is compatible only with 3165 * pre-BE3-R chips (asic_rev < 0x10) 3166 */ 3167 if (BEx_chip(adapter) && fhdr->asic_type_rev == 0) 3168 return adapter->asic_rev < 0x10; 3169 else 3170 return (fhdr->asic_type_rev >= adapter->asic_rev); 3171 } 3172 3173 int be_fw_download(struct be_adapter *adapter, const struct firmware *fw) 3174 { 3175 struct device *dev = &adapter->pdev->dev; 3176 struct flash_file_hdr_g3 *fhdr3; 3177 struct image_hdr *img_hdr_ptr; 3178 int status = 0, i, num_imgs; 3179 struct be_dma_mem flash_cmd; 3180 3181 fhdr3 = (struct flash_file_hdr_g3 *)fw->data; 3182 if (!be_check_ufi_compatibility(adapter, fhdr3)) { 3183 dev_err(dev, "Flash image is not compatible with adapter\n"); 3184 return -EINVAL; 3185 } 3186 3187 flash_cmd.size = sizeof(struct be_cmd_write_flashrom); 3188 flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma, 3189 GFP_KERNEL); 3190 if (!flash_cmd.va) 3191 return -ENOMEM; 3192 3193 num_imgs = le32_to_cpu(fhdr3->num_imgs); 3194 for (i = 0; i < num_imgs; i++) { 3195 img_hdr_ptr = (struct image_hdr *)(fw->data + 3196 (sizeof(struct flash_file_hdr_g3) + 3197 i * sizeof(struct image_hdr))); 3198 if (!BE2_chip(adapter) && 3199 le32_to_cpu(img_hdr_ptr->imageid) != 1) 3200 continue; 3201 3202 if (skyhawk_chip(adapter)) 3203 status = be_flash_skyhawk(adapter, fw, &flash_cmd, 3204 num_imgs); 3205 else 3206 status = be_flash_BEx(adapter, fw, &flash_cmd, 3207 num_imgs); 3208 } 3209 3210 dma_free_coherent(dev, flash_cmd.size, flash_cmd.va, flash_cmd.dma); 3211 if (!status) 3212 dev_info(dev, "Firmware flashed successfully\n"); 3213 3214 return status; 3215 } 3216 3217 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, 3218 struct be_dma_mem *nonemb_cmd) 3219 { 3220 struct be_mcc_wrb *wrb; 3221 struct be_cmd_req_acpi_wol_magic_config *req; 3222 int status; 3223 3224 spin_lock_bh(&adapter->mcc_lock); 3225 3226 wrb = wrb_from_mccq(adapter); 3227 if (!wrb) { 3228 status = -EBUSY; 3229 goto err; 3230 } 3231 req = nonemb_cmd->va; 3232 3233 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 3234 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), 3235 wrb, nonemb_cmd); 3236 memcpy(req->magic_mac, mac, ETH_ALEN); 3237 3238 status = be_mcc_notify_wait(adapter); 3239 3240 err: 3241 spin_unlock_bh(&adapter->mcc_lock); 3242 return status; 3243 } 3244 3245 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, 3246 u8 loopback_type, u8 enable) 3247 { 3248 struct be_mcc_wrb *wrb; 3249 struct be_cmd_req_set_lmode *req; 3250 int status; 3251 3252 if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, 3253 CMD_SUBSYSTEM_LOWLEVEL)) 3254 return -EPERM; 3255 3256 spin_lock_bh(&adapter->mcc_lock); 3257 3258 wrb = wrb_from_mccq(adapter); 3259 if (!wrb) { 3260 status = -EBUSY; 3261 goto err_unlock; 3262 } 3263 3264 req = embedded_payload(wrb); 3265 3266 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 3267 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), 3268 wrb, NULL); 3269 3270 req->src_port = port_num; 3271 req->dest_port = port_num; 3272 req->loopback_type = loopback_type; 3273 req->loopback_state = enable; 3274 3275 status = be_mcc_notify(adapter); 3276 if (status) 3277 goto err_unlock; 3278 3279 spin_unlock_bh(&adapter->mcc_lock); 3280 3281 if (!wait_for_completion_timeout(&adapter->et_cmd_compl, 3282 msecs_to_jiffies(SET_LB_MODE_TIMEOUT))) 3283 status = -ETIMEDOUT; 3284 3285 return status; 3286 3287 err_unlock: 3288 spin_unlock_bh(&adapter->mcc_lock); 3289 return status; 3290 } 3291 3292 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, 3293 u32 loopback_type, u32 pkt_size, u32 num_pkts, 3294 u64 pattern) 3295 { 3296 struct be_mcc_wrb *wrb; 3297 struct be_cmd_req_loopback_test *req; 3298 struct be_cmd_resp_loopback_test *resp; 3299 int status; 3300 3301 if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_LOOPBACK_TEST, 3302 CMD_SUBSYSTEM_LOWLEVEL)) 3303 return -EPERM; 3304 3305 spin_lock_bh(&adapter->mcc_lock); 3306 3307 wrb = wrb_from_mccq(adapter); 3308 if (!wrb) { 3309 status = -EBUSY; 3310 goto err; 3311 } 3312 3313 req = embedded_payload(wrb); 3314 3315 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 3316 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, 3317 NULL); 3318 3319 req->hdr.timeout = cpu_to_le32(15); 3320 req->pattern = cpu_to_le64(pattern); 3321 req->src_port = cpu_to_le32(port_num); 3322 req->dest_port = cpu_to_le32(port_num); 3323 req->pkt_size = cpu_to_le32(pkt_size); 3324 req->num_pkts = cpu_to_le32(num_pkts); 3325 req->loopback_type = cpu_to_le32(loopback_type); 3326 3327 status = be_mcc_notify(adapter); 3328 if (status) 3329 goto err; 3330 3331 spin_unlock_bh(&adapter->mcc_lock); 3332 3333 wait_for_completion(&adapter->et_cmd_compl); 3334 resp = embedded_payload(wrb); 3335 status = le32_to_cpu(resp->status); 3336 3337 return status; 3338 err: 3339 spin_unlock_bh(&adapter->mcc_lock); 3340 return status; 3341 } 3342 3343 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, 3344 u32 byte_cnt, struct be_dma_mem *cmd) 3345 { 3346 struct be_mcc_wrb *wrb; 3347 struct be_cmd_req_ddrdma_test *req; 3348 int status; 3349 int i, j = 0; 3350 3351 if (!be_cmd_allowed(adapter, OPCODE_LOWLEVEL_HOST_DDR_DMA, 3352 CMD_SUBSYSTEM_LOWLEVEL)) 3353 return -EPERM; 3354 3355 spin_lock_bh(&adapter->mcc_lock); 3356 3357 wrb = wrb_from_mccq(adapter); 3358 if (!wrb) { 3359 status = -EBUSY; 3360 goto err; 3361 } 3362 req = cmd->va; 3363 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, 3364 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, 3365 cmd); 3366 3367 req->pattern = cpu_to_le64(pattern); 3368 req->byte_count = cpu_to_le32(byte_cnt); 3369 for (i = 0; i < byte_cnt; i++) { 3370 req->snd_buff[i] = (u8)(pattern >> (j * 8)); 3371 j++; 3372 if (j > 7) 3373 j = 0; 3374 } 3375 3376 status = be_mcc_notify_wait(adapter); 3377 3378 if (!status) { 3379 struct be_cmd_resp_ddrdma_test *resp; 3380 3381 resp = cmd->va; 3382 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) || 3383 resp->snd_err) { 3384 status = -1; 3385 } 3386 } 3387 3388 err: 3389 spin_unlock_bh(&adapter->mcc_lock); 3390 return status; 3391 } 3392 3393 int be_cmd_get_seeprom_data(struct be_adapter *adapter, 3394 struct be_dma_mem *nonemb_cmd) 3395 { 3396 struct be_mcc_wrb *wrb; 3397 struct be_cmd_req_seeprom_read *req; 3398 int status; 3399 3400 spin_lock_bh(&adapter->mcc_lock); 3401 3402 wrb = wrb_from_mccq(adapter); 3403 if (!wrb) { 3404 status = -EBUSY; 3405 goto err; 3406 } 3407 req = nonemb_cmd->va; 3408 3409 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3410 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb, 3411 nonemb_cmd); 3412 3413 status = be_mcc_notify_wait(adapter); 3414 3415 err: 3416 spin_unlock_bh(&adapter->mcc_lock); 3417 return status; 3418 } 3419 3420 int be_cmd_get_phy_info(struct be_adapter *adapter) 3421 { 3422 struct be_mcc_wrb *wrb; 3423 struct be_cmd_req_get_phy_info *req; 3424 struct be_dma_mem cmd; 3425 int status; 3426 3427 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS, 3428 CMD_SUBSYSTEM_COMMON)) 3429 return -EPERM; 3430 3431 spin_lock_bh(&adapter->mcc_lock); 3432 3433 wrb = wrb_from_mccq(adapter); 3434 if (!wrb) { 3435 status = -EBUSY; 3436 goto err; 3437 } 3438 cmd.size = sizeof(struct be_cmd_req_get_phy_info); 3439 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 3440 GFP_ATOMIC); 3441 if (!cmd.va) { 3442 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 3443 status = -ENOMEM; 3444 goto err; 3445 } 3446 3447 req = cmd.va; 3448 3449 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3450 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req), 3451 wrb, &cmd); 3452 3453 status = be_mcc_notify_wait(adapter); 3454 if (!status) { 3455 struct be_phy_info *resp_phy_info = 3456 cmd.va + sizeof(struct be_cmd_req_hdr); 3457 3458 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type); 3459 adapter->phy.interface_type = 3460 le16_to_cpu(resp_phy_info->interface_type); 3461 adapter->phy.auto_speeds_supported = 3462 le16_to_cpu(resp_phy_info->auto_speeds_supported); 3463 adapter->phy.fixed_speeds_supported = 3464 le16_to_cpu(resp_phy_info->fixed_speeds_supported); 3465 adapter->phy.misc_params = 3466 le32_to_cpu(resp_phy_info->misc_params); 3467 3468 if (BE2_chip(adapter)) { 3469 adapter->phy.fixed_speeds_supported = 3470 BE_SUPPORTED_SPEED_10GBPS | 3471 BE_SUPPORTED_SPEED_1GBPS; 3472 } 3473 } 3474 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); 3475 err: 3476 spin_unlock_bh(&adapter->mcc_lock); 3477 return status; 3478 } 3479 3480 static int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) 3481 { 3482 struct be_mcc_wrb *wrb; 3483 struct be_cmd_req_set_qos *req; 3484 int status; 3485 3486 spin_lock_bh(&adapter->mcc_lock); 3487 3488 wrb = wrb_from_mccq(adapter); 3489 if (!wrb) { 3490 status = -EBUSY; 3491 goto err; 3492 } 3493 3494 req = embedded_payload(wrb); 3495 3496 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3497 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL); 3498 3499 req->hdr.domain = domain; 3500 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC); 3501 req->max_bps_nic = cpu_to_le32(bps); 3502 3503 status = be_mcc_notify_wait(adapter); 3504 3505 err: 3506 spin_unlock_bh(&adapter->mcc_lock); 3507 return status; 3508 } 3509 3510 int be_cmd_get_cntl_attributes(struct be_adapter *adapter) 3511 { 3512 struct be_mcc_wrb *wrb; 3513 struct be_cmd_req_cntl_attribs *req; 3514 struct be_cmd_resp_cntl_attribs *resp; 3515 int status, i; 3516 int payload_len = max(sizeof(*req), sizeof(*resp)); 3517 struct mgmt_controller_attrib *attribs; 3518 struct be_dma_mem attribs_cmd; 3519 u32 *serial_num; 3520 3521 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3522 return -1; 3523 3524 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); 3525 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); 3526 attribs_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, 3527 attribs_cmd.size, 3528 &attribs_cmd.dma, GFP_ATOMIC); 3529 if (!attribs_cmd.va) { 3530 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 3531 status = -ENOMEM; 3532 goto err; 3533 } 3534 3535 wrb = wrb_from_mbox(adapter); 3536 if (!wrb) { 3537 status = -EBUSY; 3538 goto err; 3539 } 3540 req = attribs_cmd.va; 3541 3542 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3543 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, 3544 wrb, &attribs_cmd); 3545 3546 status = be_mbox_notify_wait(adapter); 3547 if (!status) { 3548 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr); 3549 adapter->hba_port_num = attribs->hba_attribs.phy_port; 3550 serial_num = attribs->hba_attribs.controller_serial_number; 3551 for (i = 0; i < CNTL_SERIAL_NUM_WORDS; i++) 3552 adapter->serial_num[i] = le32_to_cpu(serial_num[i]) & 3553 (BIT_MASK(16) - 1); 3554 /* For BEx, since GET_FUNC_CONFIG command is not 3555 * supported, we read funcnum here as a workaround. 3556 */ 3557 if (BEx_chip(adapter)) 3558 adapter->pf_num = attribs->hba_attribs.pci_funcnum; 3559 } 3560 3561 err: 3562 mutex_unlock(&adapter->mbox_lock); 3563 if (attribs_cmd.va) 3564 dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size, 3565 attribs_cmd.va, attribs_cmd.dma); 3566 return status; 3567 } 3568 3569 /* Uses mbox */ 3570 int be_cmd_req_native_mode(struct be_adapter *adapter) 3571 { 3572 struct be_mcc_wrb *wrb; 3573 struct be_cmd_req_set_func_cap *req; 3574 int status; 3575 3576 if (mutex_lock_interruptible(&adapter->mbox_lock)) 3577 return -1; 3578 3579 wrb = wrb_from_mbox(adapter); 3580 if (!wrb) { 3581 status = -EBUSY; 3582 goto err; 3583 } 3584 3585 req = embedded_payload(wrb); 3586 3587 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3588 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, 3589 sizeof(*req), wrb, NULL); 3590 3591 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS | 3592 CAPABILITY_BE3_NATIVE_ERX_API); 3593 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API); 3594 3595 status = be_mbox_notify_wait(adapter); 3596 if (!status) { 3597 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb); 3598 3599 adapter->be3_native = le32_to_cpu(resp->cap_flags) & 3600 CAPABILITY_BE3_NATIVE_ERX_API; 3601 if (!adapter->be3_native) 3602 dev_warn(&adapter->pdev->dev, 3603 "adapter not in advanced mode\n"); 3604 } 3605 err: 3606 mutex_unlock(&adapter->mbox_lock); 3607 return status; 3608 } 3609 3610 /* Get privilege(s) for a function */ 3611 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege, 3612 u32 domain) 3613 { 3614 struct be_mcc_wrb *wrb; 3615 struct be_cmd_req_get_fn_privileges *req; 3616 int status; 3617 3618 spin_lock_bh(&adapter->mcc_lock); 3619 3620 wrb = wrb_from_mccq(adapter); 3621 if (!wrb) { 3622 status = -EBUSY; 3623 goto err; 3624 } 3625 3626 req = embedded_payload(wrb); 3627 3628 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3629 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req), 3630 wrb, NULL); 3631 3632 req->hdr.domain = domain; 3633 3634 status = be_mcc_notify_wait(adapter); 3635 if (!status) { 3636 struct be_cmd_resp_get_fn_privileges *resp = 3637 embedded_payload(wrb); 3638 3639 *privilege = le32_to_cpu(resp->privilege_mask); 3640 3641 /* In UMC mode FW does not return right privileges. 3642 * Override with correct privilege equivalent to PF. 3643 */ 3644 if (BEx_chip(adapter) && be_is_mc(adapter) && 3645 be_physfn(adapter)) 3646 *privilege = MAX_PRIVILEGES; 3647 } 3648 3649 err: 3650 spin_unlock_bh(&adapter->mcc_lock); 3651 return status; 3652 } 3653 3654 /* Set privilege(s) for a function */ 3655 int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges, 3656 u32 domain) 3657 { 3658 struct be_mcc_wrb *wrb; 3659 struct be_cmd_req_set_fn_privileges *req; 3660 int status; 3661 3662 spin_lock_bh(&adapter->mcc_lock); 3663 3664 wrb = wrb_from_mccq(adapter); 3665 if (!wrb) { 3666 status = -EBUSY; 3667 goto err; 3668 } 3669 3670 req = embedded_payload(wrb); 3671 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3672 OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req), 3673 wrb, NULL); 3674 req->hdr.domain = domain; 3675 if (lancer_chip(adapter)) 3676 req->privileges_lancer = cpu_to_le32(privileges); 3677 else 3678 req->privileges = cpu_to_le32(privileges); 3679 3680 status = be_mcc_notify_wait(adapter); 3681 err: 3682 spin_unlock_bh(&adapter->mcc_lock); 3683 return status; 3684 } 3685 3686 /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested. 3687 * pmac_id_valid: false => pmac_id or MAC address is requested. 3688 * If pmac_id is returned, pmac_id_valid is returned as true 3689 */ 3690 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac, 3691 bool *pmac_id_valid, u32 *pmac_id, u32 if_handle, 3692 u8 domain) 3693 { 3694 struct be_mcc_wrb *wrb; 3695 struct be_cmd_req_get_mac_list *req; 3696 int status; 3697 int mac_count; 3698 struct be_dma_mem get_mac_list_cmd; 3699 int i; 3700 3701 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); 3702 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); 3703 get_mac_list_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, 3704 get_mac_list_cmd.size, 3705 &get_mac_list_cmd.dma, 3706 GFP_ATOMIC); 3707 3708 if (!get_mac_list_cmd.va) { 3709 dev_err(&adapter->pdev->dev, 3710 "Memory allocation failure during GET_MAC_LIST\n"); 3711 return -ENOMEM; 3712 } 3713 3714 spin_lock_bh(&adapter->mcc_lock); 3715 3716 wrb = wrb_from_mccq(adapter); 3717 if (!wrb) { 3718 status = -EBUSY; 3719 goto out; 3720 } 3721 3722 req = get_mac_list_cmd.va; 3723 3724 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3725 OPCODE_COMMON_GET_MAC_LIST, 3726 get_mac_list_cmd.size, wrb, &get_mac_list_cmd); 3727 req->hdr.domain = domain; 3728 req->mac_type = MAC_ADDRESS_TYPE_NETWORK; 3729 if (*pmac_id_valid) { 3730 req->mac_id = cpu_to_le32(*pmac_id); 3731 req->iface_id = cpu_to_le16(if_handle); 3732 req->perm_override = 0; 3733 } else { 3734 req->perm_override = 1; 3735 } 3736 3737 status = be_mcc_notify_wait(adapter); 3738 if (!status) { 3739 struct be_cmd_resp_get_mac_list *resp = 3740 get_mac_list_cmd.va; 3741 3742 if (*pmac_id_valid) { 3743 memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr, 3744 ETH_ALEN); 3745 goto out; 3746 } 3747 3748 mac_count = resp->true_mac_count + resp->pseudo_mac_count; 3749 /* Mac list returned could contain one or more active mac_ids 3750 * or one or more true or pseudo permanent mac addresses. 3751 * If an active mac_id is present, return first active mac_id 3752 * found. 3753 */ 3754 for (i = 0; i < mac_count; i++) { 3755 struct get_list_macaddr *mac_entry; 3756 u16 mac_addr_size; 3757 u32 mac_id; 3758 3759 mac_entry = &resp->macaddr_list[i]; 3760 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size); 3761 /* mac_id is a 32 bit value and mac_addr size 3762 * is 6 bytes 3763 */ 3764 if (mac_addr_size == sizeof(u32)) { 3765 *pmac_id_valid = true; 3766 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id; 3767 *pmac_id = le32_to_cpu(mac_id); 3768 goto out; 3769 } 3770 } 3771 /* If no active mac_id found, return first mac addr */ 3772 *pmac_id_valid = false; 3773 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, 3774 ETH_ALEN); 3775 } 3776 3777 out: 3778 spin_unlock_bh(&adapter->mcc_lock); 3779 dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size, 3780 get_mac_list_cmd.va, get_mac_list_cmd.dma); 3781 return status; 3782 } 3783 3784 int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, 3785 u8 *mac, u32 if_handle, bool active, u32 domain) 3786 { 3787 if (!active) 3788 be_cmd_get_mac_from_list(adapter, mac, &active, &curr_pmac_id, 3789 if_handle, domain); 3790 if (BEx_chip(adapter)) 3791 return be_cmd_mac_addr_query(adapter, mac, false, 3792 if_handle, curr_pmac_id); 3793 else 3794 /* Fetch the MAC address using pmac_id */ 3795 return be_cmd_get_mac_from_list(adapter, mac, &active, 3796 &curr_pmac_id, 3797 if_handle, domain); 3798 } 3799 3800 int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac) 3801 { 3802 int status; 3803 bool pmac_valid = false; 3804 3805 eth_zero_addr(mac); 3806 3807 if (BEx_chip(adapter)) { 3808 if (be_physfn(adapter)) 3809 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 3810 0); 3811 else 3812 status = be_cmd_mac_addr_query(adapter, mac, false, 3813 adapter->if_handle, 0); 3814 } else { 3815 status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid, 3816 NULL, adapter->if_handle, 0); 3817 } 3818 3819 return status; 3820 } 3821 3822 /* Uses synchronous MCCQ */ 3823 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, 3824 u8 mac_count, u32 domain) 3825 { 3826 struct be_mcc_wrb *wrb; 3827 struct be_cmd_req_set_mac_list *req; 3828 int status; 3829 struct be_dma_mem cmd; 3830 3831 memset(&cmd, 0, sizeof(struct be_dma_mem)); 3832 cmd.size = sizeof(struct be_cmd_req_set_mac_list); 3833 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 3834 GFP_KERNEL); 3835 if (!cmd.va) 3836 return -ENOMEM; 3837 3838 spin_lock_bh(&adapter->mcc_lock); 3839 3840 wrb = wrb_from_mccq(adapter); 3841 if (!wrb) { 3842 status = -EBUSY; 3843 goto err; 3844 } 3845 3846 req = cmd.va; 3847 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3848 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req), 3849 wrb, &cmd); 3850 3851 req->hdr.domain = domain; 3852 req->mac_count = mac_count; 3853 if (mac_count) 3854 memcpy(req->mac, mac_array, ETH_ALEN * mac_count); 3855 3856 status = be_mcc_notify_wait(adapter); 3857 3858 err: 3859 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); 3860 spin_unlock_bh(&adapter->mcc_lock); 3861 return status; 3862 } 3863 3864 /* Wrapper to delete any active MACs and provision the new mac. 3865 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the 3866 * current list are active. 3867 */ 3868 int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom) 3869 { 3870 bool active_mac = false; 3871 u8 old_mac[ETH_ALEN]; 3872 u32 pmac_id; 3873 int status; 3874 3875 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac, 3876 &pmac_id, if_id, dom); 3877 3878 if (!status && active_mac) 3879 be_cmd_pmac_del(adapter, if_id, pmac_id, dom); 3880 3881 return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom); 3882 } 3883 3884 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, 3885 u32 domain, u16 intf_id, u16 hsw_mode, u8 spoofchk) 3886 { 3887 struct be_mcc_wrb *wrb; 3888 struct be_cmd_req_set_hsw_config *req; 3889 void *ctxt; 3890 int status; 3891 3892 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_HSW_CONFIG, 3893 CMD_SUBSYSTEM_COMMON)) 3894 return -EPERM; 3895 3896 spin_lock_bh(&adapter->mcc_lock); 3897 3898 wrb = wrb_from_mccq(adapter); 3899 if (!wrb) { 3900 status = -EBUSY; 3901 goto err; 3902 } 3903 3904 req = embedded_payload(wrb); 3905 ctxt = &req->context; 3906 3907 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3908 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, 3909 NULL); 3910 3911 req->hdr.domain = domain; 3912 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id); 3913 if (pvid) { 3914 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1); 3915 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid); 3916 } 3917 if (hsw_mode) { 3918 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, 3919 ctxt, adapter->hba_port_num); 3920 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1); 3921 AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type, 3922 ctxt, hsw_mode); 3923 } 3924 3925 /* Enable/disable both mac and vlan spoof checking */ 3926 if (!BEx_chip(adapter) && spoofchk) { 3927 AMAP_SET_BITS(struct amap_set_hsw_context, mac_spoofchk, 3928 ctxt, spoofchk); 3929 AMAP_SET_BITS(struct amap_set_hsw_context, vlan_spoofchk, 3930 ctxt, spoofchk); 3931 } 3932 3933 be_dws_cpu_to_le(req->context, sizeof(req->context)); 3934 status = be_mcc_notify_wait(adapter); 3935 3936 err: 3937 spin_unlock_bh(&adapter->mcc_lock); 3938 return status; 3939 } 3940 3941 /* Get Hyper switch config */ 3942 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, 3943 u32 domain, u16 intf_id, u8 *mode, bool *spoofchk) 3944 { 3945 struct be_mcc_wrb *wrb; 3946 struct be_cmd_req_get_hsw_config *req; 3947 void *ctxt; 3948 int status; 3949 u16 vid; 3950 3951 spin_lock_bh(&adapter->mcc_lock); 3952 3953 wrb = wrb_from_mccq(adapter); 3954 if (!wrb) { 3955 status = -EBUSY; 3956 goto err; 3957 } 3958 3959 req = embedded_payload(wrb); 3960 ctxt = &req->context; 3961 3962 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 3963 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, 3964 NULL); 3965 3966 req->hdr.domain = domain; 3967 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, 3968 ctxt, intf_id); 3969 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1); 3970 3971 if (!BEx_chip(adapter) && mode) { 3972 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, 3973 ctxt, adapter->hba_port_num); 3974 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1); 3975 } 3976 be_dws_cpu_to_le(req->context, sizeof(req->context)); 3977 3978 status = be_mcc_notify_wait(adapter); 3979 if (!status) { 3980 struct be_cmd_resp_get_hsw_config *resp = 3981 embedded_payload(wrb); 3982 3983 be_dws_le_to_cpu(&resp->context, sizeof(resp->context)); 3984 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, 3985 pvid, &resp->context); 3986 if (pvid) 3987 *pvid = le16_to_cpu(vid); 3988 if (mode) 3989 *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context, 3990 port_fwd_type, &resp->context); 3991 if (spoofchk) 3992 *spoofchk = 3993 AMAP_GET_BITS(struct amap_get_hsw_resp_context, 3994 spoofchk, &resp->context); 3995 } 3996 3997 err: 3998 spin_unlock_bh(&adapter->mcc_lock); 3999 return status; 4000 } 4001 4002 static bool be_is_wol_excluded(struct be_adapter *adapter) 4003 { 4004 struct pci_dev *pdev = adapter->pdev; 4005 4006 if (be_virtfn(adapter)) 4007 return true; 4008 4009 switch (pdev->subsystem_device) { 4010 case OC_SUBSYS_DEVICE_ID1: 4011 case OC_SUBSYS_DEVICE_ID2: 4012 case OC_SUBSYS_DEVICE_ID3: 4013 case OC_SUBSYS_DEVICE_ID4: 4014 return true; 4015 default: 4016 return false; 4017 } 4018 } 4019 4020 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) 4021 { 4022 struct be_mcc_wrb *wrb; 4023 struct be_cmd_req_acpi_wol_magic_config_v1 *req; 4024 int status = 0; 4025 struct be_dma_mem cmd; 4026 4027 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, 4028 CMD_SUBSYSTEM_ETH)) 4029 return -EPERM; 4030 4031 if (be_is_wol_excluded(adapter)) 4032 return status; 4033 4034 if (mutex_lock_interruptible(&adapter->mbox_lock)) 4035 return -1; 4036 4037 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4038 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); 4039 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4040 GFP_ATOMIC); 4041 if (!cmd.va) { 4042 dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); 4043 status = -ENOMEM; 4044 goto err; 4045 } 4046 4047 wrb = wrb_from_mbox(adapter); 4048 if (!wrb) { 4049 status = -EBUSY; 4050 goto err; 4051 } 4052 4053 req = cmd.va; 4054 4055 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, 4056 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, 4057 sizeof(*req), wrb, &cmd); 4058 4059 req->hdr.version = 1; 4060 req->query_options = BE_GET_WOL_CAP; 4061 4062 status = be_mbox_notify_wait(adapter); 4063 if (!status) { 4064 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp; 4065 4066 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *)cmd.va; 4067 4068 adapter->wol_cap = resp->wol_settings; 4069 4070 /* Non-zero macaddr indicates WOL is enabled */ 4071 if (adapter->wol_cap & BE_WOL_CAP && 4072 !is_zero_ether_addr(resp->magic_mac)) 4073 adapter->wol_en = true; 4074 } 4075 err: 4076 mutex_unlock(&adapter->mbox_lock); 4077 if (cmd.va) 4078 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, 4079 cmd.dma); 4080 return status; 4081 4082 } 4083 4084 int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level) 4085 { 4086 struct be_dma_mem extfat_cmd; 4087 struct be_fat_conf_params *cfgs; 4088 int status; 4089 int i, j; 4090 4091 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 4092 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 4093 extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, 4094 extfat_cmd.size, &extfat_cmd.dma, 4095 GFP_ATOMIC); 4096 if (!extfat_cmd.va) 4097 return -ENOMEM; 4098 4099 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); 4100 if (status) 4101 goto err; 4102 4103 cfgs = (struct be_fat_conf_params *) 4104 (extfat_cmd.va + sizeof(struct be_cmd_resp_hdr)); 4105 for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) { 4106 u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes); 4107 4108 for (j = 0; j < num_modes; j++) { 4109 if (cfgs->module[i].trace_lvl[j].mode == MODE_UART) 4110 cfgs->module[i].trace_lvl[j].dbg_lvl = 4111 cpu_to_le32(level); 4112 } 4113 } 4114 4115 status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs); 4116 err: 4117 dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va, 4118 extfat_cmd.dma); 4119 return status; 4120 } 4121 4122 int be_cmd_get_fw_log_level(struct be_adapter *adapter) 4123 { 4124 struct be_dma_mem extfat_cmd; 4125 struct be_fat_conf_params *cfgs; 4126 int status, j; 4127 int level = 0; 4128 4129 memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); 4130 extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); 4131 extfat_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, 4132 extfat_cmd.size, &extfat_cmd.dma, 4133 GFP_ATOMIC); 4134 4135 if (!extfat_cmd.va) { 4136 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", 4137 __func__); 4138 goto err; 4139 } 4140 4141 status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); 4142 if (!status) { 4143 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + 4144 sizeof(struct be_cmd_resp_hdr)); 4145 4146 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) { 4147 if (cfgs->module[0].trace_lvl[j].mode == MODE_UART) 4148 level = cfgs->module[0].trace_lvl[j].dbg_lvl; 4149 } 4150 } 4151 dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va, 4152 extfat_cmd.dma); 4153 err: 4154 return level; 4155 } 4156 4157 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter, 4158 struct be_dma_mem *cmd) 4159 { 4160 struct be_mcc_wrb *wrb; 4161 struct be_cmd_req_get_ext_fat_caps *req; 4162 int status; 4163 4164 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES, 4165 CMD_SUBSYSTEM_COMMON)) 4166 return -EPERM; 4167 4168 if (mutex_lock_interruptible(&adapter->mbox_lock)) 4169 return -1; 4170 4171 wrb = wrb_from_mbox(adapter); 4172 if (!wrb) { 4173 status = -EBUSY; 4174 goto err; 4175 } 4176 4177 req = cmd->va; 4178 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4179 OPCODE_COMMON_GET_EXT_FAT_CAPABILITIES, 4180 cmd->size, wrb, cmd); 4181 req->parameter_type = cpu_to_le32(1); 4182 4183 status = be_mbox_notify_wait(adapter); 4184 err: 4185 mutex_unlock(&adapter->mbox_lock); 4186 return status; 4187 } 4188 4189 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter, 4190 struct be_dma_mem *cmd, 4191 struct be_fat_conf_params *configs) 4192 { 4193 struct be_mcc_wrb *wrb; 4194 struct be_cmd_req_set_ext_fat_caps *req; 4195 int status; 4196 4197 spin_lock_bh(&adapter->mcc_lock); 4198 4199 wrb = wrb_from_mccq(adapter); 4200 if (!wrb) { 4201 status = -EBUSY; 4202 goto err; 4203 } 4204 4205 req = cmd->va; 4206 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params)); 4207 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4208 OPCODE_COMMON_SET_EXT_FAT_CAPABILITIES, 4209 cmd->size, wrb, cmd); 4210 4211 status = be_mcc_notify_wait(adapter); 4212 err: 4213 spin_unlock_bh(&adapter->mcc_lock); 4214 return status; 4215 } 4216 4217 int be_cmd_query_port_name(struct be_adapter *adapter) 4218 { 4219 struct be_cmd_req_get_port_name *req; 4220 struct be_mcc_wrb *wrb; 4221 int status; 4222 4223 if (mutex_lock_interruptible(&adapter->mbox_lock)) 4224 return -1; 4225 4226 wrb = wrb_from_mbox(adapter); 4227 req = embedded_payload(wrb); 4228 4229 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4230 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb, 4231 NULL); 4232 if (!BEx_chip(adapter)) 4233 req->hdr.version = 1; 4234 4235 status = be_mbox_notify_wait(adapter); 4236 if (!status) { 4237 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb); 4238 4239 adapter->port_name = resp->port_name[adapter->hba_port_num]; 4240 } else { 4241 adapter->port_name = adapter->hba_port_num + '0'; 4242 } 4243 4244 mutex_unlock(&adapter->mbox_lock); 4245 return status; 4246 } 4247 4248 /* When more than 1 NIC descriptor is present in the descriptor list, 4249 * the caller must specify the pf_num to obtain the NIC descriptor 4250 * corresponding to its pci function. 4251 * get_vft must be true when the caller wants the VF-template desc of the 4252 * PF-pool. 4253 * The pf_num should be set to PF_NUM_IGNORE when the caller knows 4254 * that only it's NIC descriptor is present in the descriptor list. 4255 */ 4256 static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count, 4257 bool get_vft, u8 pf_num) 4258 { 4259 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; 4260 struct be_nic_res_desc *nic; 4261 int i; 4262 4263 for (i = 0; i < desc_count; i++) { 4264 if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 || 4265 hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) { 4266 nic = (struct be_nic_res_desc *)hdr; 4267 4268 if ((pf_num == PF_NUM_IGNORE || 4269 nic->pf_num == pf_num) && 4270 (!get_vft || nic->flags & BIT(VFT_SHIFT))) 4271 return nic; 4272 } 4273 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; 4274 hdr = (void *)hdr + hdr->desc_len; 4275 } 4276 return NULL; 4277 } 4278 4279 static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count, 4280 u8 pf_num) 4281 { 4282 return be_get_nic_desc(buf, desc_count, true, pf_num); 4283 } 4284 4285 static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count, 4286 u8 pf_num) 4287 { 4288 return be_get_nic_desc(buf, desc_count, false, pf_num); 4289 } 4290 4291 static struct be_pcie_res_desc *be_get_pcie_desc(u8 *buf, u32 desc_count, 4292 u8 pf_num) 4293 { 4294 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; 4295 struct be_pcie_res_desc *pcie; 4296 int i; 4297 4298 for (i = 0; i < desc_count; i++) { 4299 if (hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 || 4300 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1) { 4301 pcie = (struct be_pcie_res_desc *)hdr; 4302 if (pcie->pf_num == pf_num) 4303 return pcie; 4304 } 4305 4306 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; 4307 hdr = (void *)hdr + hdr->desc_len; 4308 } 4309 return NULL; 4310 } 4311 4312 static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count) 4313 { 4314 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; 4315 int i; 4316 4317 for (i = 0; i < desc_count; i++) { 4318 if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1) 4319 return (struct be_port_res_desc *)hdr; 4320 4321 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; 4322 hdr = (void *)hdr + hdr->desc_len; 4323 } 4324 return NULL; 4325 } 4326 4327 static void be_copy_nic_desc(struct be_resources *res, 4328 struct be_nic_res_desc *desc) 4329 { 4330 res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count); 4331 res->max_vlans = le16_to_cpu(desc->vlan_count); 4332 res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count); 4333 res->max_tx_qs = le16_to_cpu(desc->txq_count); 4334 res->max_rss_qs = le16_to_cpu(desc->rssq_count); 4335 res->max_rx_qs = le16_to_cpu(desc->rq_count); 4336 res->max_evt_qs = le16_to_cpu(desc->eq_count); 4337 res->max_cq_count = le16_to_cpu(desc->cq_count); 4338 res->max_iface_count = le16_to_cpu(desc->iface_count); 4339 res->max_mcc_count = le16_to_cpu(desc->mcc_count); 4340 /* Clear flags that driver is not interested in */ 4341 res->if_cap_flags = le32_to_cpu(desc->cap_flags) & 4342 BE_IF_CAP_FLAGS_WANT; 4343 } 4344 4345 /* Uses Mbox */ 4346 int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res) 4347 { 4348 struct be_mcc_wrb *wrb; 4349 struct be_cmd_req_get_func_config *req; 4350 int status; 4351 struct be_dma_mem cmd; 4352 4353 if (mutex_lock_interruptible(&adapter->mbox_lock)) 4354 return -1; 4355 4356 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4357 cmd.size = sizeof(struct be_cmd_resp_get_func_config); 4358 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4359 GFP_ATOMIC); 4360 if (!cmd.va) { 4361 dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); 4362 status = -ENOMEM; 4363 goto err; 4364 } 4365 4366 wrb = wrb_from_mbox(adapter); 4367 if (!wrb) { 4368 status = -EBUSY; 4369 goto err; 4370 } 4371 4372 req = cmd.va; 4373 4374 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4375 OPCODE_COMMON_GET_FUNC_CONFIG, 4376 cmd.size, wrb, &cmd); 4377 4378 if (skyhawk_chip(adapter)) 4379 req->hdr.version = 1; 4380 4381 status = be_mbox_notify_wait(adapter); 4382 if (!status) { 4383 struct be_cmd_resp_get_func_config *resp = cmd.va; 4384 u32 desc_count = le32_to_cpu(resp->desc_count); 4385 struct be_nic_res_desc *desc; 4386 4387 /* GET_FUNC_CONFIG returns resource descriptors of the 4388 * current function only. So, pf_num should be set to 4389 * PF_NUM_IGNORE. 4390 */ 4391 desc = be_get_func_nic_desc(resp->func_param, desc_count, 4392 PF_NUM_IGNORE); 4393 if (!desc) { 4394 status = -EINVAL; 4395 goto err; 4396 } 4397 4398 /* Store pf_num & vf_num for later use in GET_PROFILE_CONFIG */ 4399 adapter->pf_num = desc->pf_num; 4400 adapter->vf_num = desc->vf_num; 4401 4402 if (res) 4403 be_copy_nic_desc(res, desc); 4404 } 4405 err: 4406 mutex_unlock(&adapter->mbox_lock); 4407 if (cmd.va) 4408 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, 4409 cmd.dma); 4410 return status; 4411 } 4412 4413 /* This routine returns a list of all the NIC PF_nums in the adapter */ 4414 static u16 be_get_nic_pf_num_list(u8 *buf, u32 desc_count, u16 *nic_pf_nums) 4415 { 4416 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf; 4417 struct be_pcie_res_desc *pcie = NULL; 4418 int i; 4419 u16 nic_pf_count = 0; 4420 4421 for (i = 0; i < desc_count; i++) { 4422 if (hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 || 4423 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1) { 4424 pcie = (struct be_pcie_res_desc *)hdr; 4425 if (pcie->pf_state && (pcie->pf_type == MISSION_NIC || 4426 pcie->pf_type == MISSION_RDMA)) { 4427 nic_pf_nums[nic_pf_count++] = pcie->pf_num; 4428 } 4429 } 4430 4431 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0; 4432 hdr = (void *)hdr + hdr->desc_len; 4433 } 4434 return nic_pf_count; 4435 } 4436 4437 /* Will use MBOX only if MCCQ has not been created */ 4438 int be_cmd_get_profile_config(struct be_adapter *adapter, 4439 struct be_resources *res, 4440 struct be_port_resources *port_res, 4441 u8 profile_type, u8 query, u8 domain) 4442 { 4443 struct be_cmd_resp_get_profile_config *resp; 4444 struct be_cmd_req_get_profile_config *req; 4445 struct be_nic_res_desc *vf_res; 4446 struct be_pcie_res_desc *pcie; 4447 struct be_port_res_desc *port; 4448 struct be_nic_res_desc *nic; 4449 struct be_mcc_wrb wrb = {0}; 4450 struct be_dma_mem cmd; 4451 u16 desc_count; 4452 int status; 4453 4454 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4455 cmd.size = sizeof(struct be_cmd_resp_get_profile_config); 4456 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4457 GFP_ATOMIC); 4458 if (!cmd.va) 4459 return -ENOMEM; 4460 4461 req = cmd.va; 4462 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4463 OPCODE_COMMON_GET_PROFILE_CONFIG, 4464 cmd.size, &wrb, &cmd); 4465 4466 if (!lancer_chip(adapter)) 4467 req->hdr.version = 1; 4468 req->type = profile_type; 4469 req->hdr.domain = domain; 4470 4471 /* When QUERY_MODIFIABLE_FIELDS_TYPE bit is set, cmd returns the 4472 * descriptors with all bits set to "1" for the fields which can be 4473 * modified using SET_PROFILE_CONFIG cmd. 4474 */ 4475 if (query == RESOURCE_MODIFIABLE) 4476 req->type |= QUERY_MODIFIABLE_FIELDS_TYPE; 4477 4478 status = be_cmd_notify_wait(adapter, &wrb); 4479 if (status) 4480 goto err; 4481 4482 resp = cmd.va; 4483 desc_count = le16_to_cpu(resp->desc_count); 4484 4485 if (port_res) { 4486 u16 nic_pf_cnt = 0, i; 4487 u16 nic_pf_num_list[MAX_NIC_FUNCS]; 4488 4489 nic_pf_cnt = be_get_nic_pf_num_list(resp->func_param, 4490 desc_count, 4491 nic_pf_num_list); 4492 4493 for (i = 0; i < nic_pf_cnt; i++) { 4494 nic = be_get_func_nic_desc(resp->func_param, desc_count, 4495 nic_pf_num_list[i]); 4496 if (nic->link_param == adapter->port_num) { 4497 port_res->nic_pfs++; 4498 pcie = be_get_pcie_desc(resp->func_param, 4499 desc_count, 4500 nic_pf_num_list[i]); 4501 port_res->max_vfs += le16_to_cpu(pcie->num_vfs); 4502 } 4503 } 4504 goto err; 4505 } 4506 4507 pcie = be_get_pcie_desc(resp->func_param, desc_count, 4508 adapter->pf_num); 4509 if (pcie) 4510 res->max_vfs = le16_to_cpu(pcie->num_vfs); 4511 4512 port = be_get_port_desc(resp->func_param, desc_count); 4513 if (port) 4514 adapter->mc_type = port->mc_type; 4515 4516 nic = be_get_func_nic_desc(resp->func_param, desc_count, 4517 adapter->pf_num); 4518 if (nic) 4519 be_copy_nic_desc(res, nic); 4520 4521 vf_res = be_get_vft_desc(resp->func_param, desc_count, 4522 adapter->pf_num); 4523 if (vf_res) 4524 res->vf_if_cap_flags = vf_res->cap_flags; 4525 err: 4526 if (cmd.va) 4527 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, 4528 cmd.dma); 4529 return status; 4530 } 4531 4532 /* Will use MBOX only if MCCQ has not been created */ 4533 static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc, 4534 int size, int count, u8 version, u8 domain) 4535 { 4536 struct be_cmd_req_set_profile_config *req; 4537 struct be_mcc_wrb wrb = {0}; 4538 struct be_dma_mem cmd; 4539 int status; 4540 4541 memset(&cmd, 0, sizeof(struct be_dma_mem)); 4542 cmd.size = sizeof(struct be_cmd_req_set_profile_config); 4543 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, 4544 GFP_ATOMIC); 4545 if (!cmd.va) 4546 return -ENOMEM; 4547 4548 req = cmd.va; 4549 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4550 OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size, 4551 &wrb, &cmd); 4552 req->hdr.version = version; 4553 req->hdr.domain = domain; 4554 req->desc_count = cpu_to_le32(count); 4555 memcpy(req->desc, desc, size); 4556 4557 status = be_cmd_notify_wait(adapter, &wrb); 4558 4559 if (cmd.va) 4560 dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, 4561 cmd.dma); 4562 return status; 4563 } 4564 4565 /* Mark all fields invalid */ 4566 static void be_reset_nic_desc(struct be_nic_res_desc *nic) 4567 { 4568 memset(nic, 0, sizeof(*nic)); 4569 nic->unicast_mac_count = 0xFFFF; 4570 nic->mcc_count = 0xFFFF; 4571 nic->vlan_count = 0xFFFF; 4572 nic->mcast_mac_count = 0xFFFF; 4573 nic->txq_count = 0xFFFF; 4574 nic->rq_count = 0xFFFF; 4575 nic->rssq_count = 0xFFFF; 4576 nic->lro_count = 0xFFFF; 4577 nic->cq_count = 0xFFFF; 4578 nic->toe_conn_count = 0xFFFF; 4579 nic->eq_count = 0xFFFF; 4580 nic->iface_count = 0xFFFF; 4581 nic->link_param = 0xFF; 4582 nic->channel_id_param = cpu_to_le16(0xF000); 4583 nic->acpi_params = 0xFF; 4584 nic->wol_param = 0x0F; 4585 nic->tunnel_iface_count = 0xFFFF; 4586 nic->direct_tenant_iface_count = 0xFFFF; 4587 nic->bw_min = 0xFFFFFFFF; 4588 nic->bw_max = 0xFFFFFFFF; 4589 } 4590 4591 /* Mark all fields invalid */ 4592 static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie) 4593 { 4594 memset(pcie, 0, sizeof(*pcie)); 4595 pcie->sriov_state = 0xFF; 4596 pcie->pf_state = 0xFF; 4597 pcie->pf_type = 0xFF; 4598 pcie->num_vfs = 0xFFFF; 4599 } 4600 4601 int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed, 4602 u8 domain) 4603 { 4604 struct be_nic_res_desc nic_desc; 4605 u32 bw_percent; 4606 u16 version = 0; 4607 4608 if (BE3_chip(adapter)) 4609 return be_cmd_set_qos(adapter, max_rate / 10, domain); 4610 4611 be_reset_nic_desc(&nic_desc); 4612 nic_desc.pf_num = adapter->pf_num; 4613 nic_desc.vf_num = domain; 4614 nic_desc.bw_min = 0; 4615 if (lancer_chip(adapter)) { 4616 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0; 4617 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0; 4618 nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) | 4619 (1 << NOSV_SHIFT); 4620 nic_desc.bw_max = cpu_to_le32(max_rate / 10); 4621 } else { 4622 version = 1; 4623 nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1; 4624 nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 4625 nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT); 4626 bw_percent = max_rate ? (max_rate * 100) / link_speed : 100; 4627 nic_desc.bw_max = cpu_to_le32(bw_percent); 4628 } 4629 4630 return be_cmd_set_profile_config(adapter, &nic_desc, 4631 nic_desc.hdr.desc_len, 4632 1, version, domain); 4633 } 4634 4635 int be_cmd_set_sriov_config(struct be_adapter *adapter, 4636 struct be_resources pool_res, u16 num_vfs, 4637 struct be_resources *vft_res) 4638 { 4639 struct { 4640 struct be_pcie_res_desc pcie; 4641 struct be_nic_res_desc nic_vft; 4642 } __packed desc; 4643 4644 /* PF PCIE descriptor */ 4645 be_reset_pcie_desc(&desc.pcie); 4646 desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1; 4647 desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 4648 desc.pcie.flags = BIT(IMM_SHIFT) | BIT(NOSV_SHIFT); 4649 desc.pcie.pf_num = adapter->pdev->devfn; 4650 desc.pcie.sriov_state = num_vfs ? 1 : 0; 4651 desc.pcie.num_vfs = cpu_to_le16(num_vfs); 4652 4653 /* VF NIC Template descriptor */ 4654 be_reset_nic_desc(&desc.nic_vft); 4655 desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1; 4656 desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 4657 desc.nic_vft.flags = vft_res->flags | BIT(VFT_SHIFT) | 4658 BIT(IMM_SHIFT) | BIT(NOSV_SHIFT); 4659 desc.nic_vft.pf_num = adapter->pdev->devfn; 4660 desc.nic_vft.vf_num = 0; 4661 desc.nic_vft.cap_flags = cpu_to_le32(vft_res->vf_if_cap_flags); 4662 desc.nic_vft.rq_count = cpu_to_le16(vft_res->max_rx_qs); 4663 desc.nic_vft.txq_count = cpu_to_le16(vft_res->max_tx_qs); 4664 desc.nic_vft.rssq_count = cpu_to_le16(vft_res->max_rss_qs); 4665 desc.nic_vft.cq_count = cpu_to_le16(vft_res->max_cq_count); 4666 4667 if (vft_res->max_uc_mac) 4668 desc.nic_vft.unicast_mac_count = 4669 cpu_to_le16(vft_res->max_uc_mac); 4670 if (vft_res->max_vlans) 4671 desc.nic_vft.vlan_count = cpu_to_le16(vft_res->max_vlans); 4672 if (vft_res->max_iface_count) 4673 desc.nic_vft.iface_count = 4674 cpu_to_le16(vft_res->max_iface_count); 4675 if (vft_res->max_mcc_count) 4676 desc.nic_vft.mcc_count = cpu_to_le16(vft_res->max_mcc_count); 4677 4678 return be_cmd_set_profile_config(adapter, &desc, 4679 2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0); 4680 } 4681 4682 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op) 4683 { 4684 struct be_mcc_wrb *wrb; 4685 struct be_cmd_req_manage_iface_filters *req; 4686 int status; 4687 4688 if (iface == 0xFFFFFFFF) 4689 return -1; 4690 4691 spin_lock_bh(&adapter->mcc_lock); 4692 4693 wrb = wrb_from_mccq(adapter); 4694 if (!wrb) { 4695 status = -EBUSY; 4696 goto err; 4697 } 4698 req = embedded_payload(wrb); 4699 4700 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4701 OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req), 4702 wrb, NULL); 4703 req->op = op; 4704 req->target_iface_id = cpu_to_le32(iface); 4705 4706 status = be_mcc_notify_wait(adapter); 4707 err: 4708 spin_unlock_bh(&adapter->mcc_lock); 4709 return status; 4710 } 4711 4712 int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port) 4713 { 4714 struct be_port_res_desc port_desc; 4715 4716 memset(&port_desc, 0, sizeof(port_desc)); 4717 port_desc.hdr.desc_type = PORT_RESOURCE_DESC_TYPE_V1; 4718 port_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1; 4719 port_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT); 4720 port_desc.link_num = adapter->hba_port_num; 4721 if (port) { 4722 port_desc.nv_flags = NV_TYPE_VXLAN | (1 << SOCVID_SHIFT) | 4723 (1 << RCVID_SHIFT); 4724 port_desc.nv_port = swab16(port); 4725 } else { 4726 port_desc.nv_flags = NV_TYPE_DISABLED; 4727 port_desc.nv_port = 0; 4728 } 4729 4730 return be_cmd_set_profile_config(adapter, &port_desc, 4731 RESOURCE_DESC_SIZE_V1, 1, 1, 0); 4732 } 4733 4734 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg, 4735 int vf_num) 4736 { 4737 struct be_mcc_wrb *wrb; 4738 struct be_cmd_req_get_iface_list *req; 4739 struct be_cmd_resp_get_iface_list *resp; 4740 int status; 4741 4742 spin_lock_bh(&adapter->mcc_lock); 4743 4744 wrb = wrb_from_mccq(adapter); 4745 if (!wrb) { 4746 status = -EBUSY; 4747 goto err; 4748 } 4749 req = embedded_payload(wrb); 4750 4751 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4752 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp), 4753 wrb, NULL); 4754 req->hdr.domain = vf_num + 1; 4755 4756 status = be_mcc_notify_wait(adapter); 4757 if (!status) { 4758 resp = (struct be_cmd_resp_get_iface_list *)req; 4759 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id); 4760 } 4761 4762 err: 4763 spin_unlock_bh(&adapter->mcc_lock); 4764 return status; 4765 } 4766 4767 static int lancer_wait_idle(struct be_adapter *adapter) 4768 { 4769 #define SLIPORT_IDLE_TIMEOUT 30 4770 u32 reg_val; 4771 int status = 0, i; 4772 4773 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) { 4774 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET); 4775 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0) 4776 break; 4777 4778 ssleep(1); 4779 } 4780 4781 if (i == SLIPORT_IDLE_TIMEOUT) 4782 status = -1; 4783 4784 return status; 4785 } 4786 4787 int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask) 4788 { 4789 int status = 0; 4790 4791 status = lancer_wait_idle(adapter); 4792 if (status) 4793 return status; 4794 4795 iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET); 4796 4797 return status; 4798 } 4799 4800 /* Routine to check whether dump image is present or not */ 4801 bool dump_present(struct be_adapter *adapter) 4802 { 4803 u32 sliport_status = 0; 4804 4805 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET); 4806 return !!(sliport_status & SLIPORT_STATUS_DIP_MASK); 4807 } 4808 4809 int lancer_initiate_dump(struct be_adapter *adapter) 4810 { 4811 struct device *dev = &adapter->pdev->dev; 4812 int status; 4813 4814 if (dump_present(adapter)) { 4815 dev_info(dev, "Previous dump not cleared, not forcing dump\n"); 4816 return -EEXIST; 4817 } 4818 4819 /* give firmware reset and diagnostic dump */ 4820 status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK | 4821 PHYSDEV_CONTROL_DD_MASK); 4822 if (status < 0) { 4823 dev_err(dev, "FW reset failed\n"); 4824 return status; 4825 } 4826 4827 status = lancer_wait_idle(adapter); 4828 if (status) 4829 return status; 4830 4831 if (!dump_present(adapter)) { 4832 dev_err(dev, "FW dump not generated\n"); 4833 return -EIO; 4834 } 4835 4836 return 0; 4837 } 4838 4839 int lancer_delete_dump(struct be_adapter *adapter) 4840 { 4841 int status; 4842 4843 status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE); 4844 return be_cmd_status(status); 4845 } 4846 4847 /* Uses sync mcc */ 4848 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain) 4849 { 4850 struct be_mcc_wrb *wrb; 4851 struct be_cmd_enable_disable_vf *req; 4852 int status; 4853 4854 if (BEx_chip(adapter)) 4855 return 0; 4856 4857 spin_lock_bh(&adapter->mcc_lock); 4858 4859 wrb = wrb_from_mccq(adapter); 4860 if (!wrb) { 4861 status = -EBUSY; 4862 goto err; 4863 } 4864 4865 req = embedded_payload(wrb); 4866 4867 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4868 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req), 4869 wrb, NULL); 4870 4871 req->hdr.domain = domain; 4872 req->enable = 1; 4873 status = be_mcc_notify_wait(adapter); 4874 err: 4875 spin_unlock_bh(&adapter->mcc_lock); 4876 return status; 4877 } 4878 4879 int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable) 4880 { 4881 struct be_mcc_wrb *wrb; 4882 struct be_cmd_req_intr_set *req; 4883 int status; 4884 4885 if (mutex_lock_interruptible(&adapter->mbox_lock)) 4886 return -1; 4887 4888 wrb = wrb_from_mbox(adapter); 4889 4890 req = embedded_payload(wrb); 4891 4892 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4893 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req), 4894 wrb, NULL); 4895 4896 req->intr_enabled = intr_enable; 4897 4898 status = be_mbox_notify_wait(adapter); 4899 4900 mutex_unlock(&adapter->mbox_lock); 4901 return status; 4902 } 4903 4904 /* Uses MBOX */ 4905 int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile_id) 4906 { 4907 struct be_cmd_req_get_active_profile *req; 4908 struct be_mcc_wrb *wrb; 4909 int status; 4910 4911 if (mutex_lock_interruptible(&adapter->mbox_lock)) 4912 return -1; 4913 4914 wrb = wrb_from_mbox(adapter); 4915 if (!wrb) { 4916 status = -EBUSY; 4917 goto err; 4918 } 4919 4920 req = embedded_payload(wrb); 4921 4922 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4923 OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req), 4924 wrb, NULL); 4925 4926 status = be_mbox_notify_wait(adapter); 4927 if (!status) { 4928 struct be_cmd_resp_get_active_profile *resp = 4929 embedded_payload(wrb); 4930 4931 *profile_id = le16_to_cpu(resp->active_profile_id); 4932 } 4933 4934 err: 4935 mutex_unlock(&adapter->mbox_lock); 4936 return status; 4937 } 4938 4939 static int 4940 __be_cmd_set_logical_link_config(struct be_adapter *adapter, 4941 int link_state, int version, u8 domain) 4942 { 4943 struct be_cmd_req_set_ll_link *req; 4944 struct be_mcc_wrb *wrb; 4945 u32 link_config = 0; 4946 int status; 4947 4948 spin_lock_bh(&adapter->mcc_lock); 4949 4950 wrb = wrb_from_mccq(adapter); 4951 if (!wrb) { 4952 status = -EBUSY; 4953 goto err; 4954 } 4955 4956 req = embedded_payload(wrb); 4957 4958 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 4959 OPCODE_COMMON_SET_LOGICAL_LINK_CONFIG, 4960 sizeof(*req), wrb, NULL); 4961 4962 req->hdr.version = version; 4963 req->hdr.domain = domain; 4964 4965 if (link_state == IFLA_VF_LINK_STATE_ENABLE || 4966 link_state == IFLA_VF_LINK_STATE_AUTO) 4967 link_config |= PLINK_ENABLE; 4968 4969 if (link_state == IFLA_VF_LINK_STATE_AUTO) 4970 link_config |= PLINK_TRACK; 4971 4972 req->link_config = cpu_to_le32(link_config); 4973 4974 status = be_mcc_notify_wait(adapter); 4975 err: 4976 spin_unlock_bh(&adapter->mcc_lock); 4977 return status; 4978 } 4979 4980 int be_cmd_set_logical_link_config(struct be_adapter *adapter, 4981 int link_state, u8 domain) 4982 { 4983 int status; 4984 4985 if (BE2_chip(adapter)) 4986 return -EOPNOTSUPP; 4987 4988 status = __be_cmd_set_logical_link_config(adapter, link_state, 4989 2, domain); 4990 4991 /* Version 2 of the command will not be recognized by older FW. 4992 * On such a failure issue version 1 of the command. 4993 */ 4994 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST) 4995 status = __be_cmd_set_logical_link_config(adapter, link_state, 4996 1, domain); 4997 return status; 4998 } 4999 5000 int be_cmd_set_features(struct be_adapter *adapter) 5001 { 5002 struct be_cmd_resp_set_features *resp; 5003 struct be_cmd_req_set_features *req; 5004 struct be_mcc_wrb *wrb; 5005 int status; 5006 5007 spin_lock_bh(&adapter->mcc_lock); 5008 5009 wrb = wrb_from_mccq(adapter); 5010 if (!wrb) { 5011 status = -EBUSY; 5012 goto err; 5013 } 5014 5015 req = embedded_payload(wrb); 5016 5017 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, 5018 OPCODE_COMMON_SET_FEATURES, 5019 sizeof(*req), wrb, NULL); 5020 5021 req->features = cpu_to_le32(BE_FEATURE_UE_RECOVERY); 5022 req->parameter_len = cpu_to_le32(sizeof(struct be_req_ue_recovery)); 5023 req->parameter.req.uer = cpu_to_le32(BE_UE_RECOVERY_UER_MASK); 5024 5025 status = be_mcc_notify_wait(adapter); 5026 if (status) 5027 goto err; 5028 5029 resp = embedded_payload(wrb); 5030 5031 adapter->error_recovery.ue_to_poll_time = 5032 le16_to_cpu(resp->parameter.resp.ue2rp); 5033 adapter->error_recovery.ue_to_reset_time = 5034 le16_to_cpu(resp->parameter.resp.ue2sr); 5035 adapter->error_recovery.recovery_supported = true; 5036 err: 5037 /* Checking "MCC_STATUS_INVALID_LENGTH" for SKH as FW 5038 * returns this error in older firmware versions 5039 */ 5040 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST || 5041 base_status(status) == MCC_STATUS_INVALID_LENGTH) 5042 dev_info(&adapter->pdev->dev, 5043 "Adapter does not support HW error recovery\n"); 5044 5045 spin_unlock_bh(&adapter->mcc_lock); 5046 return status; 5047 } 5048 5049 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload, 5050 int wrb_payload_size, u16 *cmd_status, u16 *ext_status) 5051 { 5052 struct be_adapter *adapter = netdev_priv(netdev_handle); 5053 struct be_mcc_wrb *wrb; 5054 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *)wrb_payload; 5055 struct be_cmd_req_hdr *req; 5056 struct be_cmd_resp_hdr *resp; 5057 int status; 5058 5059 spin_lock_bh(&adapter->mcc_lock); 5060 5061 wrb = wrb_from_mccq(adapter); 5062 if (!wrb) { 5063 status = -EBUSY; 5064 goto err; 5065 } 5066 req = embedded_payload(wrb); 5067 resp = embedded_payload(wrb); 5068 5069 be_wrb_cmd_hdr_prepare(req, hdr->subsystem, 5070 hdr->opcode, wrb_payload_size, wrb, NULL); 5071 memcpy(req, wrb_payload, wrb_payload_size); 5072 be_dws_cpu_to_le(req, wrb_payload_size); 5073 5074 status = be_mcc_notify_wait(adapter); 5075 if (cmd_status) 5076 *cmd_status = (status & 0xffff); 5077 if (ext_status) 5078 *ext_status = 0; 5079 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length); 5080 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length); 5081 err: 5082 spin_unlock_bh(&adapter->mcc_lock); 5083 return status; 5084 } 5085 EXPORT_SYMBOL(be_roce_mcc_cmd); 5086