1 /* Broadcom NetXtreme-C/E network driver. 2 * 3 * Copyright (c) 2017 Broadcom Limited 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation. 8 */ 9 10 #include <linux/pci.h> 11 #include <linux/netdevice.h> 12 #include <linux/vmalloc.h> 13 #include <net/devlink.h> 14 #include <net/netdev_lock.h> 15 #include "bnxt_hsi.h" 16 #include "bnxt.h" 17 #include "bnxt_hwrm.h" 18 #include "bnxt_vfr.h" 19 #include "bnxt_devlink.h" 20 #include "bnxt_ethtool.h" 21 #include "bnxt_ulp.h" 22 #include "bnxt_ptp.h" 23 #include "bnxt_coredump.h" 24 #include "bnxt_nvm_defs.h" 25 26 static void __bnxt_fw_recover(struct bnxt *bp) 27 { 28 if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state) || 29 test_bit(BNXT_STATE_FW_NON_FATAL_COND, &bp->state)) 30 bnxt_fw_reset(bp); 31 else 32 bnxt_fw_exception(bp); 33 } 34 35 static int 36 bnxt_dl_flash_update(struct devlink *dl, 37 struct devlink_flash_update_params *params, 38 struct netlink_ext_ack *extack) 39 { 40 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 41 int rc; 42 43 if (!BNXT_PF(bp)) { 44 NL_SET_ERR_MSG_MOD(extack, 45 "flash update not supported from a VF"); 46 return -EPERM; 47 } 48 49 devlink_flash_update_status_notify(dl, "Preparing to flash", NULL, 0, 0); 50 rc = bnxt_flash_package_from_fw_obj(bp->dev, params->fw, 0, extack); 51 if (!rc) 52 devlink_flash_update_status_notify(dl, "Flashing done", NULL, 0, 0); 53 else 54 devlink_flash_update_status_notify(dl, "Flashing failed", NULL, 0, 0); 55 return rc; 56 } 57 58 static int bnxt_hwrm_remote_dev_reset_set(struct bnxt *bp, bool remote_reset) 59 { 60 struct hwrm_func_cfg_input *req; 61 int rc; 62 63 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) 64 return -EOPNOTSUPP; 65 66 rc = bnxt_hwrm_func_cfg_short_req_init(bp, &req); 67 if (rc) 68 return rc; 69 70 req->fid = cpu_to_le16(0xffff); 71 req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_HOT_RESET_IF_SUPPORT); 72 if (remote_reset) 73 req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS); 74 75 return hwrm_req_send(bp, req); 76 } 77 78 static char *bnxt_health_severity_str(enum bnxt_health_severity severity) 79 { 80 switch (severity) { 81 case SEVERITY_NORMAL: return "normal"; 82 case SEVERITY_WARNING: return "warning"; 83 case SEVERITY_RECOVERABLE: return "recoverable"; 84 case SEVERITY_FATAL: return "fatal"; 85 default: return "unknown"; 86 } 87 } 88 89 static char *bnxt_health_remedy_str(enum bnxt_health_remedy remedy) 90 { 91 switch (remedy) { 92 case REMEDY_DEVLINK_RECOVER: return "devlink recover"; 93 case REMEDY_POWER_CYCLE_DEVICE: return "device power cycle"; 94 case REMEDY_POWER_CYCLE_HOST: return "host power cycle"; 95 case REMEDY_FW_UPDATE: return "update firmware"; 96 case REMEDY_HW_REPLACE: return "replace hardware"; 97 default: return "unknown"; 98 } 99 } 100 101 static int bnxt_fw_diagnose(struct devlink_health_reporter *reporter, 102 struct devlink_fmsg *fmsg, 103 struct netlink_ext_ack *extack) 104 { 105 struct bnxt *bp = devlink_health_reporter_priv(reporter); 106 struct bnxt_fw_health *h = bp->fw_health; 107 u32 fw_status, fw_resets; 108 109 if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { 110 devlink_fmsg_string_pair_put(fmsg, "Status", "recovering"); 111 return 0; 112 } 113 114 if (!h->status_reliable) { 115 devlink_fmsg_string_pair_put(fmsg, "Status", "unknown"); 116 return 0; 117 } 118 119 mutex_lock(&h->lock); 120 fw_status = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); 121 if (BNXT_FW_IS_BOOTING(fw_status)) { 122 devlink_fmsg_string_pair_put(fmsg, "Status", "initializing"); 123 } else if (h->severity || fw_status != BNXT_FW_STATUS_HEALTHY) { 124 if (!h->severity) { 125 h->severity = SEVERITY_FATAL; 126 h->remedy = REMEDY_POWER_CYCLE_DEVICE; 127 h->diagnoses++; 128 devlink_health_report(h->fw_reporter, 129 "FW error diagnosed", h); 130 } 131 devlink_fmsg_string_pair_put(fmsg, "Status", "error"); 132 devlink_fmsg_u32_pair_put(fmsg, "Syndrome", fw_status); 133 } else { 134 devlink_fmsg_string_pair_put(fmsg, "Status", "healthy"); 135 } 136 137 devlink_fmsg_string_pair_put(fmsg, "Severity", 138 bnxt_health_severity_str(h->severity)); 139 140 if (h->severity) { 141 devlink_fmsg_string_pair_put(fmsg, "Remedy", 142 bnxt_health_remedy_str(h->remedy)); 143 if (h->remedy == REMEDY_DEVLINK_RECOVER) 144 devlink_fmsg_string_pair_put(fmsg, "Impact", 145 "traffic+ntuple_cfg"); 146 } 147 148 mutex_unlock(&h->lock); 149 if (!h->resets_reliable) 150 return 0; 151 152 fw_resets = bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG); 153 devlink_fmsg_u32_pair_put(fmsg, "Resets", fw_resets); 154 devlink_fmsg_u32_pair_put(fmsg, "Arrests", h->arrests); 155 devlink_fmsg_u32_pair_put(fmsg, "Survivals", h->survivals); 156 devlink_fmsg_u32_pair_put(fmsg, "Discoveries", h->discoveries); 157 devlink_fmsg_u32_pair_put(fmsg, "Fatalities", h->fatalities); 158 devlink_fmsg_u32_pair_put(fmsg, "Diagnoses", h->diagnoses); 159 return 0; 160 } 161 162 static int bnxt_fw_dump(struct devlink_health_reporter *reporter, 163 struct devlink_fmsg *fmsg, void *priv_ctx, 164 struct netlink_ext_ack *extack) 165 { 166 struct bnxt *bp = devlink_health_reporter_priv(reporter); 167 u32 dump_len; 168 void *data; 169 int rc; 170 171 /* TODO: no firmware dump support in devlink_health_report() context */ 172 if (priv_ctx) 173 return -EOPNOTSUPP; 174 175 dump_len = bnxt_get_coredump_length(bp, BNXT_DUMP_LIVE); 176 if (!dump_len) 177 return -EIO; 178 179 data = vmalloc(dump_len); 180 if (!data) 181 return -ENOMEM; 182 183 rc = bnxt_get_coredump(bp, BNXT_DUMP_LIVE, data, &dump_len); 184 if (!rc) { 185 devlink_fmsg_pair_nest_start(fmsg, "core"); 186 devlink_fmsg_binary_pair_put(fmsg, "data", data, dump_len); 187 devlink_fmsg_u32_pair_put(fmsg, "size", dump_len); 188 devlink_fmsg_pair_nest_end(fmsg); 189 } 190 191 vfree(data); 192 return rc; 193 } 194 195 static int bnxt_fw_recover(struct devlink_health_reporter *reporter, 196 void *priv_ctx, 197 struct netlink_ext_ack *extack) 198 { 199 struct bnxt *bp = devlink_health_reporter_priv(reporter); 200 201 if (bp->fw_health->severity == SEVERITY_FATAL) 202 return -ENODEV; 203 204 set_bit(BNXT_STATE_RECOVER, &bp->state); 205 __bnxt_fw_recover(bp); 206 207 return -EINPROGRESS; 208 } 209 210 static const struct devlink_health_reporter_ops bnxt_dl_fw_reporter_ops = { 211 .name = "fw", 212 .diagnose = bnxt_fw_diagnose, 213 .dump = bnxt_fw_dump, 214 .recover = bnxt_fw_recover, 215 }; 216 217 static struct devlink_health_reporter * 218 __bnxt_dl_reporter_create(struct bnxt *bp, 219 const struct devlink_health_reporter_ops *ops) 220 { 221 struct devlink_health_reporter *reporter; 222 223 reporter = devlink_health_reporter_create(bp->dl, ops, 0, bp); 224 if (IS_ERR(reporter)) { 225 netdev_warn(bp->dev, "Failed to create %s health reporter, rc = %ld\n", 226 ops->name, PTR_ERR(reporter)); 227 return NULL; 228 } 229 230 return reporter; 231 } 232 233 void bnxt_dl_fw_reporters_create(struct bnxt *bp) 234 { 235 struct bnxt_fw_health *fw_health = bp->fw_health; 236 237 if (fw_health && !fw_health->fw_reporter) 238 fw_health->fw_reporter = __bnxt_dl_reporter_create(bp, &bnxt_dl_fw_reporter_ops); 239 } 240 241 void bnxt_dl_fw_reporters_destroy(struct bnxt *bp) 242 { 243 struct bnxt_fw_health *fw_health = bp->fw_health; 244 245 if (fw_health && fw_health->fw_reporter) { 246 devlink_health_reporter_destroy(fw_health->fw_reporter); 247 fw_health->fw_reporter = NULL; 248 } 249 } 250 251 void bnxt_devlink_health_fw_report(struct bnxt *bp) 252 { 253 struct bnxt_fw_health *fw_health = bp->fw_health; 254 int rc; 255 256 if (!fw_health) 257 return; 258 259 if (!fw_health->fw_reporter) { 260 __bnxt_fw_recover(bp); 261 return; 262 } 263 264 mutex_lock(&fw_health->lock); 265 fw_health->severity = SEVERITY_RECOVERABLE; 266 fw_health->remedy = REMEDY_DEVLINK_RECOVER; 267 mutex_unlock(&fw_health->lock); 268 rc = devlink_health_report(fw_health->fw_reporter, "FW error reported", 269 fw_health); 270 if (rc == -ECANCELED) 271 __bnxt_fw_recover(bp); 272 } 273 274 void bnxt_dl_health_fw_status_update(struct bnxt *bp, bool healthy) 275 { 276 struct bnxt_fw_health *fw_health = bp->fw_health; 277 u8 state; 278 279 mutex_lock(&fw_health->lock); 280 if (healthy) { 281 fw_health->severity = SEVERITY_NORMAL; 282 state = DEVLINK_HEALTH_REPORTER_STATE_HEALTHY; 283 } else { 284 fw_health->severity = SEVERITY_FATAL; 285 fw_health->remedy = REMEDY_POWER_CYCLE_DEVICE; 286 state = DEVLINK_HEALTH_REPORTER_STATE_ERROR; 287 } 288 mutex_unlock(&fw_health->lock); 289 devlink_health_reporter_state_update(fw_health->fw_reporter, state); 290 } 291 292 void bnxt_dl_health_fw_recovery_done(struct bnxt *bp) 293 { 294 struct bnxt_dl *dl = devlink_priv(bp->dl); 295 296 devlink_health_reporter_recovery_done(bp->fw_health->fw_reporter); 297 bnxt_hwrm_remote_dev_reset_set(bp, dl->remote_reset); 298 } 299 300 static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, 301 struct netlink_ext_ack *extack); 302 303 static void 304 bnxt_dl_livepatch_report_err(struct bnxt *bp, struct netlink_ext_ack *extack, 305 struct hwrm_fw_livepatch_output *resp) 306 { 307 int err = ((struct hwrm_err_output *)resp)->cmd_err; 308 309 switch (err) { 310 case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_OPCODE: 311 netdev_err(bp->dev, "Illegal live patch opcode"); 312 NL_SET_ERR_MSG_MOD(extack, "Invalid opcode"); 313 break; 314 case FW_LIVEPATCH_CMD_ERR_CODE_NOT_SUPPORTED: 315 NL_SET_ERR_MSG_MOD(extack, "Live patch operation not supported"); 316 break; 317 case FW_LIVEPATCH_CMD_ERR_CODE_NOT_INSTALLED: 318 NL_SET_ERR_MSG_MOD(extack, "Live patch not found"); 319 break; 320 case FW_LIVEPATCH_CMD_ERR_CODE_NOT_PATCHED: 321 NL_SET_ERR_MSG_MOD(extack, 322 "Live patch deactivation failed. Firmware not patched."); 323 break; 324 case FW_LIVEPATCH_CMD_ERR_CODE_AUTH_FAIL: 325 NL_SET_ERR_MSG_MOD(extack, "Live patch not authenticated"); 326 break; 327 case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_HEADER: 328 NL_SET_ERR_MSG_MOD(extack, "Incompatible live patch"); 329 break; 330 case FW_LIVEPATCH_CMD_ERR_CODE_INVALID_SIZE: 331 NL_SET_ERR_MSG_MOD(extack, "Live patch has invalid size"); 332 break; 333 case FW_LIVEPATCH_CMD_ERR_CODE_ALREADY_PATCHED: 334 NL_SET_ERR_MSG_MOD(extack, "Live patch already applied"); 335 break; 336 default: 337 netdev_err(bp->dev, "Unexpected live patch error: %d\n", err); 338 NL_SET_ERR_MSG_MOD(extack, "Failed to activate live patch"); 339 break; 340 } 341 } 342 343 /* Live patch status in NVM */ 344 #define BNXT_LIVEPATCH_NOT_INSTALLED 0 345 #define BNXT_LIVEPATCH_INSTALLED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL 346 #define BNXT_LIVEPATCH_REMOVED FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE 347 #define BNXT_LIVEPATCH_MASK (FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL | \ 348 FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) 349 #define BNXT_LIVEPATCH_ACTIVATED BNXT_LIVEPATCH_MASK 350 351 #define BNXT_LIVEPATCH_STATE(flags) ((flags) & BNXT_LIVEPATCH_MASK) 352 353 static int 354 bnxt_dl_livepatch_activate(struct bnxt *bp, struct netlink_ext_ack *extack) 355 { 356 struct hwrm_fw_livepatch_query_output *query_resp; 357 struct hwrm_fw_livepatch_query_input *query_req; 358 struct hwrm_fw_livepatch_output *patch_resp; 359 struct hwrm_fw_livepatch_input *patch_req; 360 u16 flags, live_patch_state; 361 bool activated = false; 362 u32 installed = 0; 363 u8 target; 364 int rc; 365 366 if (~bp->fw_cap & BNXT_FW_CAP_LIVEPATCH) { 367 NL_SET_ERR_MSG_MOD(extack, "Device does not support live patch"); 368 return -EOPNOTSUPP; 369 } 370 371 rc = hwrm_req_init(bp, query_req, HWRM_FW_LIVEPATCH_QUERY); 372 if (rc) 373 return rc; 374 query_resp = hwrm_req_hold(bp, query_req); 375 376 rc = hwrm_req_init(bp, patch_req, HWRM_FW_LIVEPATCH); 377 if (rc) { 378 hwrm_req_drop(bp, query_req); 379 return rc; 380 } 381 patch_req->loadtype = FW_LIVEPATCH_REQ_LOADTYPE_NVM_INSTALL; 382 patch_resp = hwrm_req_hold(bp, patch_req); 383 384 for (target = 1; target <= FW_LIVEPATCH_REQ_FW_TARGET_LAST; target++) { 385 query_req->fw_target = target; 386 rc = hwrm_req_send(bp, query_req); 387 if (rc) { 388 NL_SET_ERR_MSG_MOD(extack, "Failed to query packages"); 389 break; 390 } 391 392 flags = le16_to_cpu(query_resp->status_flags); 393 live_patch_state = BNXT_LIVEPATCH_STATE(flags); 394 395 if (live_patch_state == BNXT_LIVEPATCH_NOT_INSTALLED) 396 continue; 397 398 if (live_patch_state == BNXT_LIVEPATCH_ACTIVATED) { 399 activated = true; 400 continue; 401 } 402 403 if (live_patch_state == BNXT_LIVEPATCH_INSTALLED) 404 patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_ACTIVATE; 405 else if (live_patch_state == BNXT_LIVEPATCH_REMOVED) 406 patch_req->opcode = FW_LIVEPATCH_REQ_OPCODE_DEACTIVATE; 407 408 patch_req->fw_target = target; 409 rc = hwrm_req_send(bp, patch_req); 410 if (rc) { 411 bnxt_dl_livepatch_report_err(bp, extack, patch_resp); 412 break; 413 } 414 installed++; 415 } 416 417 if (!rc && !installed) { 418 if (activated) { 419 NL_SET_ERR_MSG_MOD(extack, "Live patch already activated"); 420 rc = -EEXIST; 421 } else { 422 NL_SET_ERR_MSG_MOD(extack, "No live patches found"); 423 rc = -ENOENT; 424 } 425 } 426 hwrm_req_drop(bp, query_req); 427 hwrm_req_drop(bp, patch_req); 428 return rc; 429 } 430 431 static int bnxt_dl_reload_down(struct devlink *dl, bool netns_change, 432 enum devlink_reload_action action, 433 enum devlink_reload_limit limit, 434 struct netlink_ext_ack *extack) 435 { 436 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 437 int rc = 0; 438 439 switch (action) { 440 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: { 441 bnxt_ulp_stop(bp); 442 rtnl_lock(); 443 netdev_lock(bp->dev); 444 if (bnxt_sriov_cfg(bp)) { 445 NL_SET_ERR_MSG_MOD(extack, 446 "reload is unsupported while VFs are allocated or being configured"); 447 netdev_unlock(bp->dev); 448 rtnl_unlock(); 449 bnxt_ulp_start(bp, 0); 450 return -EOPNOTSUPP; 451 } 452 if (bp->dev->reg_state == NETREG_UNREGISTERED) { 453 netdev_unlock(bp->dev); 454 rtnl_unlock(); 455 bnxt_ulp_start(bp, 0); 456 return -ENODEV; 457 } 458 if (netif_running(bp->dev)) 459 bnxt_close_nic(bp, true, true); 460 bnxt_vf_reps_free(bp); 461 rc = bnxt_hwrm_func_drv_unrgtr(bp); 462 if (rc) { 463 NL_SET_ERR_MSG_MOD(extack, "Failed to deregister"); 464 if (netif_running(bp->dev)) 465 netif_close(bp->dev); 466 netdev_unlock(bp->dev); 467 rtnl_unlock(); 468 break; 469 } 470 bnxt_cancel_reservations(bp, false); 471 bnxt_free_ctx_mem(bp, false); 472 break; 473 } 474 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: { 475 if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET) 476 return bnxt_dl_livepatch_activate(bp, extack); 477 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET) { 478 NL_SET_ERR_MSG_MOD(extack, "Device not capable, requires reboot"); 479 return -EOPNOTSUPP; 480 } 481 if (!bnxt_hwrm_reset_permitted(bp)) { 482 NL_SET_ERR_MSG_MOD(extack, 483 "Reset denied by firmware, it may be inhibited by remote driver"); 484 return -EPERM; 485 } 486 rtnl_lock(); 487 netdev_lock(bp->dev); 488 if (bp->dev->reg_state == NETREG_UNREGISTERED) { 489 netdev_unlock(bp->dev); 490 rtnl_unlock(); 491 return -ENODEV; 492 } 493 if (netif_running(bp->dev)) 494 set_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); 495 rc = bnxt_hwrm_firmware_reset(bp->dev, 496 FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP, 497 FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP, 498 FW_RESET_REQ_FLAGS_RESET_GRACEFUL | 499 FW_RESET_REQ_FLAGS_FW_ACTIVATION); 500 if (rc) { 501 NL_SET_ERR_MSG_MOD(extack, "Failed to activate firmware"); 502 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); 503 netdev_unlock(bp->dev); 504 rtnl_unlock(); 505 } 506 break; 507 } 508 default: 509 rc = -EOPNOTSUPP; 510 } 511 512 return rc; 513 } 514 515 static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action action, 516 enum devlink_reload_limit limit, u32 *actions_performed, 517 struct netlink_ext_ack *extack) 518 { 519 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 520 int rc = 0; 521 522 netdev_assert_locked(bp->dev); 523 524 *actions_performed = 0; 525 switch (action) { 526 case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: { 527 bnxt_fw_init_one(bp); 528 bnxt_vf_reps_alloc(bp); 529 if (netif_running(bp->dev)) 530 rc = bnxt_open_nic(bp, true, true); 531 if (!rc) { 532 bnxt_reenable_sriov(bp); 533 bnxt_ptp_reapply_pps(bp); 534 } 535 break; 536 } 537 case DEVLINK_RELOAD_ACTION_FW_ACTIVATE: { 538 unsigned long start = jiffies; 539 unsigned long timeout = start + BNXT_DFLT_FW_RST_MAX_DSECS * HZ / 10; 540 541 if (limit == DEVLINK_RELOAD_LIMIT_NO_RESET) 542 break; 543 if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) 544 timeout = start + bp->fw_health->normal_func_wait_dsecs * HZ / 10; 545 if (!netif_running(bp->dev)) 546 NL_SET_ERR_MSG_MOD(extack, 547 "Device is closed, not waiting for reset notice that will never come"); 548 netdev_unlock(bp->dev); 549 rtnl_unlock(); 550 while (test_bit(BNXT_STATE_FW_ACTIVATE, &bp->state)) { 551 if (time_after(jiffies, timeout)) { 552 NL_SET_ERR_MSG_MOD(extack, "Activation incomplete"); 553 rc = -ETIMEDOUT; 554 break; 555 } 556 if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { 557 NL_SET_ERR_MSG_MOD(extack, "Activation aborted"); 558 rc = -ENODEV; 559 break; 560 } 561 msleep(50); 562 } 563 rtnl_lock(); 564 netdev_lock(bp->dev); 565 if (!rc) 566 *actions_performed |= BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT); 567 clear_bit(BNXT_STATE_FW_ACTIVATE, &bp->state); 568 break; 569 } 570 default: 571 return -EOPNOTSUPP; 572 } 573 574 if (!rc) { 575 bnxt_print_device_info(bp); 576 if (netif_running(bp->dev)) { 577 mutex_lock(&bp->link_lock); 578 bnxt_report_link(bp); 579 mutex_unlock(&bp->link_lock); 580 } 581 *actions_performed |= BIT(action); 582 } else if (netif_running(bp->dev)) { 583 netif_close(bp->dev); 584 } 585 netdev_unlock(bp->dev); 586 rtnl_unlock(); 587 if (action == DEVLINK_RELOAD_ACTION_DRIVER_REINIT) 588 bnxt_ulp_start(bp, rc); 589 return rc; 590 } 591 592 static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack) 593 { 594 bool rc = false; 595 u32 datalen; 596 u16 index; 597 u8 *buf; 598 599 if (bnxt_find_nvram_item(bp->dev, BNX_DIR_TYPE_VPD, 600 BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 601 &index, NULL, &datalen) || !datalen) { 602 NL_SET_ERR_MSG_MOD(extack, "nvm test vpd entry error"); 603 return false; 604 } 605 606 buf = kzalloc(datalen, GFP_KERNEL); 607 if (!buf) { 608 NL_SET_ERR_MSG_MOD(extack, "insufficient memory for nvm test"); 609 return false; 610 } 611 612 if (bnxt_get_nvram_item(bp->dev, index, 0, datalen, buf)) { 613 NL_SET_ERR_MSG_MOD(extack, "nvm test vpd read error"); 614 goto done; 615 } 616 617 if (bnxt_flash_nvram(bp->dev, BNX_DIR_TYPE_VPD, BNX_DIR_ORDINAL_FIRST, 618 BNX_DIR_EXT_NONE, 0, 0, buf, datalen)) { 619 NL_SET_ERR_MSG_MOD(extack, "nvm test vpd write error"); 620 goto done; 621 } 622 623 rc = true; 624 625 done: 626 kfree(buf); 627 return rc; 628 } 629 630 static bool bnxt_dl_selftest_check(struct devlink *dl, unsigned int id, 631 struct netlink_ext_ack *extack) 632 { 633 return id == DEVLINK_ATTR_SELFTEST_ID_FLASH; 634 } 635 636 static enum devlink_selftest_status bnxt_dl_selftest_run(struct devlink *dl, 637 unsigned int id, 638 struct netlink_ext_ack *extack) 639 { 640 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 641 642 if (id == DEVLINK_ATTR_SELFTEST_ID_FLASH) 643 return bnxt_nvm_test(bp, extack) ? 644 DEVLINK_SELFTEST_STATUS_PASS : 645 DEVLINK_SELFTEST_STATUS_FAIL; 646 647 return DEVLINK_SELFTEST_STATUS_SKIP; 648 } 649 650 static const struct devlink_ops bnxt_dl_ops = { 651 #ifdef CONFIG_BNXT_SRIOV 652 .eswitch_mode_set = bnxt_dl_eswitch_mode_set, 653 .eswitch_mode_get = bnxt_dl_eswitch_mode_get, 654 #endif /* CONFIG_BNXT_SRIOV */ 655 .info_get = bnxt_dl_info_get, 656 .flash_update = bnxt_dl_flash_update, 657 .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | 658 BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE), 659 .reload_limits = BIT(DEVLINK_RELOAD_LIMIT_NO_RESET), 660 .reload_down = bnxt_dl_reload_down, 661 .reload_up = bnxt_dl_reload_up, 662 .selftest_check = bnxt_dl_selftest_check, 663 .selftest_run = bnxt_dl_selftest_run, 664 }; 665 666 static const struct devlink_ops bnxt_vf_dl_ops; 667 668 enum bnxt_dl_param_id { 669 BNXT_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, 670 BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, 671 }; 672 673 static const struct bnxt_dl_nvm_param nvm_params[] = { 674 {DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV, NVM_OFF_ENABLE_SRIOV, 675 BNXT_NVM_SHARED_CFG, 1, 1}, 676 {DEVLINK_PARAM_GENERIC_ID_IGNORE_ARI, NVM_OFF_IGNORE_ARI, 677 BNXT_NVM_SHARED_CFG, 1, 1}, 678 {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX, 679 NVM_OFF_MSIX_VEC_PER_PF_MAX, BNXT_NVM_SHARED_CFG, 10, 4}, 680 {DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN, 681 NVM_OFF_MSIX_VEC_PER_PF_MIN, BNXT_NVM_SHARED_CFG, 7, 4}, 682 {DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE, NVM_OFF_SUPPORT_RDMA, 683 BNXT_NVM_FUNC_CFG, 1, 1}, 684 {BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, NVM_OFF_DIS_GRE_VER_CHECK, 685 BNXT_NVM_SHARED_CFG, 1, 1}, 686 }; 687 688 union bnxt_nvm_data { 689 u8 val8; 690 __le32 val32; 691 }; 692 693 static void bnxt_copy_to_nvm_data(union bnxt_nvm_data *dst, 694 union devlink_param_value *src, 695 int nvm_num_bits, int dl_num_bytes) 696 { 697 u32 val32 = 0; 698 699 if (nvm_num_bits == 1) { 700 dst->val8 = src->vbool; 701 return; 702 } 703 if (dl_num_bytes == 4) 704 val32 = src->vu32; 705 else if (dl_num_bytes == 2) 706 val32 = (u32)src->vu16; 707 else if (dl_num_bytes == 1) 708 val32 = (u32)src->vu8; 709 dst->val32 = cpu_to_le32(val32); 710 } 711 712 static void bnxt_copy_from_nvm_data(union devlink_param_value *dst, 713 union bnxt_nvm_data *src, 714 int nvm_num_bits, int dl_num_bytes) 715 { 716 u32 val32; 717 718 if (nvm_num_bits == 1) { 719 dst->vbool = src->val8; 720 return; 721 } 722 val32 = le32_to_cpu(src->val32); 723 if (dl_num_bytes == 4) 724 dst->vu32 = val32; 725 else if (dl_num_bytes == 2) 726 dst->vu16 = (u16)val32; 727 else if (dl_num_bytes == 1) 728 dst->vu8 = (u8)val32; 729 } 730 731 static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp, u32 *nvm_cfg_ver) 732 { 733 struct hwrm_nvm_get_variable_input *req; 734 u16 bytes = BNXT_NVM_CFG_VER_BYTES; 735 u16 bits = BNXT_NVM_CFG_VER_BITS; 736 union devlink_param_value ver; 737 union bnxt_nvm_data *data; 738 dma_addr_t data_dma_addr; 739 int rc, i = 2; 740 u16 dim = 1; 741 742 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE); 743 if (rc) 744 return rc; 745 746 data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr); 747 if (!data) { 748 rc = -ENOMEM; 749 goto exit; 750 } 751 752 /* earlier devices present as an array of raw bytes */ 753 if (!BNXT_CHIP_P5_PLUS(bp)) { 754 dim = 0; 755 i = 0; 756 bits *= 3; /* array of 3 version components */ 757 bytes *= 4; /* copy whole word */ 758 } 759 760 hwrm_req_hold(bp, req); 761 req->dest_data_addr = cpu_to_le64(data_dma_addr); 762 req->data_len = cpu_to_le16(bits); 763 req->option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER); 764 req->dimensions = cpu_to_le16(dim); 765 766 while (i >= 0) { 767 req->index_0 = cpu_to_le16(i--); 768 rc = hwrm_req_send_silent(bp, req); 769 if (rc) 770 goto exit; 771 bnxt_copy_from_nvm_data(&ver, data, bits, bytes); 772 773 if (BNXT_CHIP_P5_PLUS(bp)) { 774 *nvm_cfg_ver <<= 8; 775 *nvm_cfg_ver |= ver.vu8; 776 } else { 777 *nvm_cfg_ver = ver.vu32; 778 } 779 } 780 781 exit: 782 hwrm_req_drop(bp, req); 783 return rc; 784 } 785 786 static int bnxt_dl_info_put(struct bnxt *bp, struct devlink_info_req *req, 787 enum bnxt_dl_version_type type, const char *key, 788 char *buf) 789 { 790 if (!strlen(buf)) 791 return 0; 792 793 if ((bp->flags & BNXT_FLAG_CHIP_P5_PLUS) && 794 (!strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_NCSI) || 795 !strcmp(key, DEVLINK_INFO_VERSION_GENERIC_FW_ROCE))) 796 return 0; 797 798 switch (type) { 799 case BNXT_VERSION_FIXED: 800 return devlink_info_version_fixed_put(req, key, buf); 801 case BNXT_VERSION_RUNNING: 802 return devlink_info_version_running_put(req, key, buf); 803 case BNXT_VERSION_STORED: 804 return devlink_info_version_stored_put(req, key, buf); 805 } 806 return 0; 807 } 808 809 #define BNXT_FW_SRT_PATCH "fw.srt.patch" 810 #define BNXT_FW_CRT_PATCH "fw.crt.patch" 811 812 static int bnxt_dl_livepatch_info_put(struct bnxt *bp, 813 struct devlink_info_req *req, 814 const char *key) 815 { 816 struct hwrm_fw_livepatch_query_input *query; 817 struct hwrm_fw_livepatch_query_output *resp; 818 u16 flags; 819 int rc; 820 821 if (~bp->fw_cap & BNXT_FW_CAP_LIVEPATCH) 822 return 0; 823 824 rc = hwrm_req_init(bp, query, HWRM_FW_LIVEPATCH_QUERY); 825 if (rc) 826 return rc; 827 828 if (!strcmp(key, BNXT_FW_SRT_PATCH)) 829 query->fw_target = FW_LIVEPATCH_QUERY_REQ_FW_TARGET_SECURE_FW; 830 else if (!strcmp(key, BNXT_FW_CRT_PATCH)) 831 query->fw_target = FW_LIVEPATCH_QUERY_REQ_FW_TARGET_COMMON_FW; 832 else 833 goto exit; 834 835 resp = hwrm_req_hold(bp, query); 836 rc = hwrm_req_send(bp, query); 837 if (rc) 838 goto exit; 839 840 flags = le16_to_cpu(resp->status_flags); 841 if (flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_ACTIVE) { 842 resp->active_ver[sizeof(resp->active_ver) - 1] = '\0'; 843 rc = devlink_info_version_running_put(req, key, resp->active_ver); 844 if (rc) 845 goto exit; 846 } 847 848 if (flags & FW_LIVEPATCH_QUERY_RESP_STATUS_FLAGS_INSTALL) { 849 resp->install_ver[sizeof(resp->install_ver) - 1] = '\0'; 850 rc = devlink_info_version_stored_put(req, key, resp->install_ver); 851 if (rc) 852 goto exit; 853 } 854 855 exit: 856 hwrm_req_drop(bp, query); 857 return rc; 858 } 859 860 #define HWRM_FW_VER_STR_LEN 16 861 862 static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, 863 struct netlink_ext_ack *extack) 864 { 865 struct hwrm_nvm_get_dev_info_output nvm_dev_info; 866 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 867 struct hwrm_ver_get_output *ver_resp; 868 char mgmt_ver[FW_VER_STR_LEN]; 869 char roce_ver[FW_VER_STR_LEN]; 870 char ncsi_ver[FW_VER_STR_LEN]; 871 char buf[32]; 872 u32 ver = 0; 873 int rc; 874 875 if (BNXT_PF(bp) && (bp->flags & BNXT_FLAG_DSN_VALID)) { 876 sprintf(buf, "%02X-%02X-%02X-%02X-%02X-%02X-%02X-%02X", 877 bp->dsn[7], bp->dsn[6], bp->dsn[5], bp->dsn[4], 878 bp->dsn[3], bp->dsn[2], bp->dsn[1], bp->dsn[0]); 879 rc = devlink_info_serial_number_put(req, buf); 880 if (rc) 881 return rc; 882 } 883 884 if (strlen(bp->board_serialno)) { 885 rc = devlink_info_board_serial_number_put(req, bp->board_serialno); 886 if (rc) 887 return rc; 888 } 889 890 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED, 891 DEVLINK_INFO_VERSION_GENERIC_BOARD_ID, 892 bp->board_partno); 893 if (rc) 894 return rc; 895 896 sprintf(buf, "%X", bp->chip_num); 897 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED, 898 DEVLINK_INFO_VERSION_GENERIC_ASIC_ID, buf); 899 if (rc) 900 return rc; 901 902 ver_resp = &bp->ver_resp; 903 sprintf(buf, "%c%d", 'A' + ver_resp->chip_rev, ver_resp->chip_metal); 904 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_FIXED, 905 DEVLINK_INFO_VERSION_GENERIC_ASIC_REV, buf); 906 if (rc) 907 return rc; 908 909 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING, 910 DEVLINK_INFO_VERSION_GENERIC_FW_PSID, 911 bp->nvm_cfg_ver); 912 if (rc) 913 return rc; 914 915 buf[0] = 0; 916 strncat(buf, ver_resp->active_pkg_name, HWRM_FW_VER_STR_LEN); 917 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING, 918 DEVLINK_INFO_VERSION_GENERIC_FW, buf); 919 if (rc) 920 return rc; 921 922 if (BNXT_PF(bp) && !bnxt_hwrm_get_nvm_cfg_ver(bp, &ver)) { 923 sprintf(buf, "%d.%d.%d", (ver >> 16) & 0xff, (ver >> 8) & 0xff, 924 ver & 0xff); 925 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED, 926 DEVLINK_INFO_VERSION_GENERIC_FW_PSID, 927 buf); 928 if (rc) 929 return rc; 930 } 931 932 if (ver_resp->flags & VER_GET_RESP_FLAGS_EXT_VER_AVAIL) { 933 snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", 934 ver_resp->hwrm_fw_major, ver_resp->hwrm_fw_minor, 935 ver_resp->hwrm_fw_build, ver_resp->hwrm_fw_patch); 936 937 snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", 938 ver_resp->mgmt_fw_major, ver_resp->mgmt_fw_minor, 939 ver_resp->mgmt_fw_build, ver_resp->mgmt_fw_patch); 940 941 snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", 942 ver_resp->roce_fw_major, ver_resp->roce_fw_minor, 943 ver_resp->roce_fw_build, ver_resp->roce_fw_patch); 944 } else { 945 snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", 946 ver_resp->hwrm_fw_maj_8b, ver_resp->hwrm_fw_min_8b, 947 ver_resp->hwrm_fw_bld_8b, ver_resp->hwrm_fw_rsvd_8b); 948 949 snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", 950 ver_resp->mgmt_fw_maj_8b, ver_resp->mgmt_fw_min_8b, 951 ver_resp->mgmt_fw_bld_8b, ver_resp->mgmt_fw_rsvd_8b); 952 953 snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", 954 ver_resp->roce_fw_maj_8b, ver_resp->roce_fw_min_8b, 955 ver_resp->roce_fw_bld_8b, ver_resp->roce_fw_rsvd_8b); 956 } 957 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING, 958 DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver); 959 if (rc) 960 return rc; 961 962 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING, 963 DEVLINK_INFO_VERSION_GENERIC_FW_MGMT_API, 964 bp->hwrm_ver_supp); 965 if (rc) 966 return rc; 967 968 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING, 969 DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver); 970 if (rc) 971 return rc; 972 973 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_RUNNING, 974 DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver); 975 if (rc) 976 return rc; 977 978 rc = bnxt_hwrm_nvm_get_dev_info(bp, &nvm_dev_info); 979 if (rc || 980 !(nvm_dev_info.flags & NVM_GET_DEV_INFO_RESP_FLAGS_FW_VER_VALID)) { 981 if (!bnxt_get_pkginfo(bp->dev, buf, sizeof(buf))) 982 return bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED, 983 DEVLINK_INFO_VERSION_GENERIC_FW, 984 buf); 985 return 0; 986 } 987 988 buf[0] = 0; 989 strncat(buf, nvm_dev_info.pkg_name, HWRM_FW_VER_STR_LEN); 990 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED, 991 DEVLINK_INFO_VERSION_GENERIC_FW, buf); 992 if (rc) 993 return rc; 994 995 snprintf(mgmt_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", 996 nvm_dev_info.hwrm_fw_major, nvm_dev_info.hwrm_fw_minor, 997 nvm_dev_info.hwrm_fw_build, nvm_dev_info.hwrm_fw_patch); 998 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED, 999 DEVLINK_INFO_VERSION_GENERIC_FW_MGMT, mgmt_ver); 1000 if (rc) 1001 return rc; 1002 1003 snprintf(ncsi_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", 1004 nvm_dev_info.mgmt_fw_major, nvm_dev_info.mgmt_fw_minor, 1005 nvm_dev_info.mgmt_fw_build, nvm_dev_info.mgmt_fw_patch); 1006 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED, 1007 DEVLINK_INFO_VERSION_GENERIC_FW_NCSI, ncsi_ver); 1008 if (rc) 1009 return rc; 1010 1011 snprintf(roce_ver, FW_VER_STR_LEN, "%d.%d.%d.%d", 1012 nvm_dev_info.roce_fw_major, nvm_dev_info.roce_fw_minor, 1013 nvm_dev_info.roce_fw_build, nvm_dev_info.roce_fw_patch); 1014 rc = bnxt_dl_info_put(bp, req, BNXT_VERSION_STORED, 1015 DEVLINK_INFO_VERSION_GENERIC_FW_ROCE, roce_ver); 1016 if (rc) 1017 return rc; 1018 1019 if (BNXT_CHIP_P5_PLUS(bp)) { 1020 rc = bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_SRT_PATCH); 1021 if (rc) 1022 return rc; 1023 } 1024 return bnxt_dl_livepatch_info_put(bp, req, BNXT_FW_CRT_PATCH); 1025 1026 } 1027 1028 static int __bnxt_hwrm_nvm_req(struct bnxt *bp, 1029 const struct bnxt_dl_nvm_param *nvm, void *msg, 1030 union devlink_param_value *val) 1031 { 1032 struct hwrm_nvm_get_variable_input *req = msg; 1033 struct hwrm_err_output *resp; 1034 union bnxt_nvm_data *data; 1035 dma_addr_t data_dma_addr; 1036 int idx = 0, rc; 1037 1038 if (nvm->dir_type == BNXT_NVM_PORT_CFG) 1039 idx = bp->pf.port_id; 1040 else if (nvm->dir_type == BNXT_NVM_FUNC_CFG) 1041 idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID; 1042 1043 data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr); 1044 1045 if (!data) { 1046 hwrm_req_drop(bp, req); 1047 return -ENOMEM; 1048 } 1049 1050 req->dest_data_addr = cpu_to_le64(data_dma_addr); 1051 req->data_len = cpu_to_le16(nvm->nvm_num_bits); 1052 req->option_num = cpu_to_le16(nvm->offset); 1053 req->index_0 = cpu_to_le16(idx); 1054 if (idx) 1055 req->dimensions = cpu_to_le16(1); 1056 1057 resp = hwrm_req_hold(bp, req); 1058 if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) { 1059 bnxt_copy_to_nvm_data(data, val, nvm->nvm_num_bits, 1060 nvm->dl_num_bytes); 1061 rc = hwrm_req_send(bp, msg); 1062 } else { 1063 rc = hwrm_req_send_silent(bp, msg); 1064 if (!rc) { 1065 bnxt_copy_from_nvm_data(val, data, 1066 nvm->nvm_num_bits, 1067 nvm->dl_num_bytes); 1068 } else { 1069 if (resp->cmd_err == 1070 NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST) 1071 rc = -EOPNOTSUPP; 1072 } 1073 } 1074 hwrm_req_drop(bp, req); 1075 if (rc == -EACCES) 1076 netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n"); 1077 return rc; 1078 } 1079 1080 static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, 1081 union devlink_param_value *val) 1082 { 1083 struct hwrm_nvm_get_variable_input *req = msg; 1084 const struct bnxt_dl_nvm_param *nvm_param; 1085 int i; 1086 1087 /* Get/Set NVM CFG parameter is supported only on PFs */ 1088 if (BNXT_VF(bp)) { 1089 hwrm_req_drop(bp, req); 1090 return -EPERM; 1091 } 1092 1093 for (i = 0; i < ARRAY_SIZE(nvm_params); i++) { 1094 nvm_param = &nvm_params[i]; 1095 if (nvm_param->id == param_id) 1096 return __bnxt_hwrm_nvm_req(bp, nvm_param, msg, val); 1097 } 1098 return -EOPNOTSUPP; 1099 } 1100 1101 static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, 1102 struct devlink_param_gset_ctx *ctx) 1103 { 1104 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 1105 struct hwrm_nvm_get_variable_input *req; 1106 int rc; 1107 1108 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE); 1109 if (rc) 1110 return rc; 1111 1112 rc = bnxt_hwrm_nvm_req(bp, id, req, &ctx->val); 1113 if (!rc && id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) 1114 ctx->val.vbool = !ctx->val.vbool; 1115 1116 return rc; 1117 } 1118 1119 static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id, 1120 struct devlink_param_gset_ctx *ctx, 1121 struct netlink_ext_ack *extack) 1122 { 1123 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 1124 struct hwrm_nvm_set_variable_input *req; 1125 int rc; 1126 1127 rc = hwrm_req_init(bp, req, HWRM_NVM_SET_VARIABLE); 1128 if (rc) 1129 return rc; 1130 1131 if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) 1132 ctx->val.vbool = !ctx->val.vbool; 1133 1134 return bnxt_hwrm_nvm_req(bp, id, req, &ctx->val); 1135 } 1136 1137 static int bnxt_dl_roce_validate(struct devlink *dl, u32 id, 1138 union devlink_param_value val, 1139 struct netlink_ext_ack *extack) 1140 { 1141 const struct bnxt_dl_nvm_param nvm_roce_cap = {0, NVM_OFF_RDMA_CAPABLE, 1142 BNXT_NVM_SHARED_CFG, 1, 1}; 1143 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 1144 struct hwrm_nvm_get_variable_input *req; 1145 union devlink_param_value roce_cap; 1146 int rc; 1147 1148 rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE); 1149 if (rc) 1150 return rc; 1151 1152 if (__bnxt_hwrm_nvm_req(bp, &nvm_roce_cap, req, &roce_cap)) { 1153 NL_SET_ERR_MSG_MOD(extack, "Unable to verify if device is RDMA Capable"); 1154 return -EINVAL; 1155 } 1156 if (!roce_cap.vbool) { 1157 NL_SET_ERR_MSG_MOD(extack, "Device does not support RDMA"); 1158 return -EINVAL; 1159 } 1160 return 0; 1161 } 1162 1163 static int bnxt_dl_msix_validate(struct devlink *dl, u32 id, 1164 union devlink_param_value val, 1165 struct netlink_ext_ack *extack) 1166 { 1167 int max_val = -1; 1168 1169 if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MAX) 1170 max_val = BNXT_MSIX_VEC_MAX; 1171 1172 if (id == DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN) 1173 max_val = BNXT_MSIX_VEC_MIN_MAX; 1174 1175 if (val.vu32 > max_val) { 1176 NL_SET_ERR_MSG_MOD(extack, "MSIX value is exceeding the range"); 1177 return -EINVAL; 1178 } 1179 1180 return 0; 1181 } 1182 1183 static int bnxt_remote_dev_reset_get(struct devlink *dl, u32 id, 1184 struct devlink_param_gset_ctx *ctx) 1185 { 1186 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 1187 1188 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) 1189 return -EOPNOTSUPP; 1190 1191 ctx->val.vbool = bnxt_dl_get_remote_reset(dl); 1192 return 0; 1193 } 1194 1195 static int bnxt_remote_dev_reset_set(struct devlink *dl, u32 id, 1196 struct devlink_param_gset_ctx *ctx, 1197 struct netlink_ext_ack *extack) 1198 { 1199 struct bnxt *bp = bnxt_get_bp_from_dl(dl); 1200 int rc; 1201 1202 rc = bnxt_hwrm_remote_dev_reset_set(bp, ctx->val.vbool); 1203 if (rc) 1204 return rc; 1205 1206 bnxt_dl_set_remote_reset(dl, ctx->val.vbool); 1207 return rc; 1208 } 1209 1210 static const struct devlink_param bnxt_dl_params[] = { 1211 DEVLINK_PARAM_GENERIC(ENABLE_SRIOV, 1212 BIT(DEVLINK_PARAM_CMODE_PERMANENT), 1213 bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, 1214 NULL), 1215 DEVLINK_PARAM_GENERIC(IGNORE_ARI, 1216 BIT(DEVLINK_PARAM_CMODE_PERMANENT), 1217 bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, 1218 NULL), 1219 DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MAX, 1220 BIT(DEVLINK_PARAM_CMODE_PERMANENT), 1221 bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, 1222 bnxt_dl_msix_validate), 1223 DEVLINK_PARAM_GENERIC(MSIX_VEC_PER_PF_MIN, 1224 BIT(DEVLINK_PARAM_CMODE_PERMANENT), 1225 bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, 1226 bnxt_dl_msix_validate), 1227 DEVLINK_PARAM_GENERIC(ENABLE_ROCE, 1228 BIT(DEVLINK_PARAM_CMODE_PERMANENT), 1229 bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, 1230 bnxt_dl_roce_validate), 1231 DEVLINK_PARAM_DRIVER(BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK, 1232 "gre_ver_check", DEVLINK_PARAM_TYPE_BOOL, 1233 BIT(DEVLINK_PARAM_CMODE_PERMANENT), 1234 bnxt_dl_nvm_param_get, bnxt_dl_nvm_param_set, 1235 NULL), 1236 /* keep REMOTE_DEV_RESET last, it is excluded based on caps */ 1237 DEVLINK_PARAM_GENERIC(ENABLE_REMOTE_DEV_RESET, 1238 BIT(DEVLINK_PARAM_CMODE_RUNTIME), 1239 bnxt_remote_dev_reset_get, 1240 bnxt_remote_dev_reset_set, NULL), 1241 }; 1242 1243 static int bnxt_dl_params_register(struct bnxt *bp) 1244 { 1245 int num_params = ARRAY_SIZE(bnxt_dl_params); 1246 int rc; 1247 1248 if (bp->hwrm_spec_code < 0x10600) 1249 return 0; 1250 1251 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) 1252 num_params--; 1253 1254 rc = devlink_params_register(bp->dl, bnxt_dl_params, num_params); 1255 if (rc) 1256 netdev_warn(bp->dev, "devlink_params_register failed. rc=%d\n", 1257 rc); 1258 return rc; 1259 } 1260 1261 static void bnxt_dl_params_unregister(struct bnxt *bp) 1262 { 1263 int num_params = ARRAY_SIZE(bnxt_dl_params); 1264 1265 if (bp->hwrm_spec_code < 0x10600) 1266 return; 1267 1268 if (~bp->fw_cap & BNXT_FW_CAP_HOT_RESET_IF) 1269 num_params--; 1270 1271 devlink_params_unregister(bp->dl, bnxt_dl_params, num_params); 1272 } 1273 1274 int bnxt_dl_register(struct bnxt *bp) 1275 { 1276 const struct devlink_ops *devlink_ops; 1277 struct devlink_port_attrs attrs = {}; 1278 struct bnxt_dl *bp_dl; 1279 struct devlink *dl; 1280 int rc; 1281 1282 if (BNXT_PF(bp)) 1283 devlink_ops = &bnxt_dl_ops; 1284 else 1285 devlink_ops = &bnxt_vf_dl_ops; 1286 1287 dl = devlink_alloc(devlink_ops, sizeof(struct bnxt_dl), &bp->pdev->dev); 1288 if (!dl) { 1289 netdev_warn(bp->dev, "devlink_alloc failed\n"); 1290 return -ENOMEM; 1291 } 1292 1293 bp->dl = dl; 1294 bp_dl = devlink_priv(dl); 1295 bp_dl->bp = bp; 1296 bnxt_dl_set_remote_reset(dl, true); 1297 1298 /* Add switchdev eswitch mode setting, if SRIOV supported */ 1299 if (pci_find_ext_capability(bp->pdev, PCI_EXT_CAP_ID_SRIOV) && 1300 bp->hwrm_spec_code > 0x10803) 1301 bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; 1302 1303 if (!BNXT_PF(bp)) 1304 goto out; 1305 1306 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; 1307 attrs.phys.port_number = bp->pf.port_id; 1308 memcpy(attrs.switch_id.id, bp->dsn, sizeof(bp->dsn)); 1309 attrs.switch_id.id_len = sizeof(bp->dsn); 1310 devlink_port_attrs_set(&bp->dl_port, &attrs); 1311 rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id); 1312 if (rc) { 1313 netdev_err(bp->dev, "devlink_port_register failed\n"); 1314 goto err_dl_free; 1315 } 1316 1317 rc = bnxt_dl_params_register(bp); 1318 if (rc) 1319 goto err_dl_port_unreg; 1320 1321 out: 1322 devlink_register(dl); 1323 return 0; 1324 1325 err_dl_port_unreg: 1326 devlink_port_unregister(&bp->dl_port); 1327 err_dl_free: 1328 devlink_free(dl); 1329 return rc; 1330 } 1331 1332 void bnxt_dl_unregister(struct bnxt *bp) 1333 { 1334 struct devlink *dl = bp->dl; 1335 1336 devlink_unregister(dl); 1337 if (BNXT_PF(bp)) { 1338 bnxt_dl_params_unregister(bp); 1339 devlink_port_unregister(&bp->dl_port); 1340 } 1341 devlink_free(dl); 1342 } 1343