1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * SolidRun DPU driver for control plane 4 * 5 * Copyright (C) 2022-2023 SolidRun 6 * 7 * Author: Alvaro Karsz <alvaro.karsz@solid-run.com> 8 * 9 */ 10 #include <linux/iopoll.h> 11 12 #include "snet_vdpa.h" 13 14 /* SNET DPU device ID */ 15 #define SNET_DEVICE_ID 0x1000 16 /* SNET signature */ 17 #define SNET_SIGNATURE 0xD0D06363 18 /* Max. config version that we can work with */ 19 #define SNET_CFG_VERSION 0x2 20 /* Queue align */ 21 #define SNET_QUEUE_ALIGNMENT PAGE_SIZE 22 /* Kick value to notify that new data is available */ 23 #define SNET_KICK_VAL 0x1 24 #define SNET_CONFIG_OFF 0x0 25 /* How long we are willing to wait for a SNET device */ 26 #define SNET_DETECT_TIMEOUT 5000000 27 /* How long should we wait for the DPU to read our config */ 28 #define SNET_READ_CFG_TIMEOUT 3000000 29 /* Size of configs written to the DPU */ 30 #define SNET_GENERAL_CFG_LEN 36 31 #define SNET_GENERAL_CFG_VQ_LEN 40 32 33 static struct snet *vdpa_to_snet(struct vdpa_device *vdpa) 34 { 35 return container_of(vdpa, struct snet, vdpa); 36 } 37 38 static irqreturn_t snet_cfg_irq_hndlr(int irq, void *data) 39 { 40 struct snet *snet = data; 41 /* Call callback if any */ 42 if (likely(snet->cb.callback)) 43 return snet->cb.callback(snet->cb.private); 44 45 return IRQ_HANDLED; 46 } 47 48 static irqreturn_t snet_vq_irq_hndlr(int irq, void *data) 49 { 50 struct snet_vq *vq = data; 51 /* Call callback if any */ 52 if (likely(vq->cb.callback)) 53 return vq->cb.callback(vq->cb.private); 54 55 return IRQ_HANDLED; 56 } 57 58 static void snet_free_irqs(struct snet *snet) 59 { 60 struct psnet *psnet = snet->psnet; 61 struct pci_dev *pdev; 62 u32 i; 63 64 /* Which Device allcoated the IRQs? */ 65 if (PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF)) 66 pdev = snet->pdev->physfn; 67 else 68 pdev = snet->pdev; 69 70 /* Free config's IRQ */ 71 if (snet->cfg_irq != -1) { 72 devm_free_irq(&pdev->dev, snet->cfg_irq, snet); 73 snet->cfg_irq = -1; 74 } 75 /* Free VQ IRQs */ 76 for (i = 0; i < snet->cfg->vq_num; i++) { 77 if (snet->vqs[i] && snet->vqs[i]->irq != -1) { 78 devm_free_irq(&pdev->dev, snet->vqs[i]->irq, snet->vqs[i]); 79 snet->vqs[i]->irq = -1; 80 } 81 } 82 83 /* IRQ vectors are freed when the pci remove callback is called */ 84 } 85 86 static int snet_set_vq_address(struct vdpa_device *vdev, u16 idx, u64 desc_area, 87 u64 driver_area, u64 device_area) 88 { 89 struct snet *snet = vdpa_to_snet(vdev); 90 /* save received parameters in vqueue sturct */ 91 snet->vqs[idx]->desc_area = desc_area; 92 snet->vqs[idx]->driver_area = driver_area; 93 snet->vqs[idx]->device_area = device_area; 94 95 return 0; 96 } 97 98 static void snet_set_vq_num(struct vdpa_device *vdev, u16 idx, u32 num) 99 { 100 struct snet *snet = vdpa_to_snet(vdev); 101 /* save num in vqueue */ 102 snet->vqs[idx]->num = num; 103 } 104 105 static void snet_kick_vq(struct vdpa_device *vdev, u16 idx) 106 { 107 struct snet *snet = vdpa_to_snet(vdev); 108 /* not ready - ignore */ 109 if (unlikely(!snet->vqs[idx]->ready)) 110 return; 111 112 iowrite32(SNET_KICK_VAL, snet->vqs[idx]->kick_ptr); 113 } 114 115 static void snet_kick_vq_with_data(struct vdpa_device *vdev, u32 data) 116 { 117 struct snet *snet = vdpa_to_snet(vdev); 118 u16 idx = data & 0xFFFF; 119 120 /* not ready - ignore */ 121 if (unlikely(!snet->vqs[idx]->ready)) 122 return; 123 124 iowrite32((data & 0xFFFF0000) | SNET_KICK_VAL, snet->vqs[idx]->kick_ptr); 125 } 126 127 static void snet_set_vq_cb(struct vdpa_device *vdev, u16 idx, struct vdpa_callback *cb) 128 { 129 struct snet *snet = vdpa_to_snet(vdev); 130 131 snet->vqs[idx]->cb.callback = cb->callback; 132 snet->vqs[idx]->cb.private = cb->private; 133 } 134 135 static void snet_set_vq_ready(struct vdpa_device *vdev, u16 idx, bool ready) 136 { 137 struct snet *snet = vdpa_to_snet(vdev); 138 139 snet->vqs[idx]->ready = ready; 140 } 141 142 static bool snet_get_vq_ready(struct vdpa_device *vdev, u16 idx) 143 { 144 struct snet *snet = vdpa_to_snet(vdev); 145 146 return snet->vqs[idx]->ready; 147 } 148 149 static bool snet_vq_state_is_initial(struct snet *snet, const struct vdpa_vq_state *state) 150 { 151 if (SNET_HAS_FEATURE(snet, VIRTIO_F_RING_PACKED)) { 152 const struct vdpa_vq_state_packed *p = &state->packed; 153 154 if (p->last_avail_counter == 1 && p->last_used_counter == 1 && 155 p->last_avail_idx == 0 && p->last_used_idx == 0) 156 return true; 157 } else { 158 const struct vdpa_vq_state_split *s = &state->split; 159 160 if (s->avail_index == 0) 161 return true; 162 } 163 164 return false; 165 } 166 167 static int snet_set_vq_state(struct vdpa_device *vdev, u16 idx, const struct vdpa_vq_state *state) 168 { 169 struct snet *snet = vdpa_to_snet(vdev); 170 171 /* We can set any state for config version 2+ */ 172 if (SNET_CFG_VER(snet, 2)) { 173 memcpy(&snet->vqs[idx]->vq_state, state, sizeof(*state)); 174 return 0; 175 } 176 177 /* Older config - we can't set the VQ state. 178 * Return 0 only if this is the initial state we use in the DPU. 179 */ 180 if (snet_vq_state_is_initial(snet, state)) 181 return 0; 182 183 return -EOPNOTSUPP; 184 } 185 186 static int snet_get_vq_state(struct vdpa_device *vdev, u16 idx, struct vdpa_vq_state *state) 187 { 188 struct snet *snet = vdpa_to_snet(vdev); 189 190 return snet_read_vq_state(snet, idx, state); 191 } 192 193 static int snet_get_vq_irq(struct vdpa_device *vdev, u16 idx) 194 { 195 struct snet *snet = vdpa_to_snet(vdev); 196 197 return snet->vqs[idx]->irq; 198 } 199 200 static u32 snet_get_vq_align(struct vdpa_device *vdev) 201 { 202 return (u32)SNET_QUEUE_ALIGNMENT; 203 } 204 205 static int snet_reset_dev(struct snet *snet) 206 { 207 struct pci_dev *pdev = snet->pdev; 208 int ret = 0; 209 u32 i; 210 211 /* If status is 0, nothing to do */ 212 if (!snet->status) 213 return 0; 214 215 /* If DPU started, destroy it */ 216 if (snet->status & VIRTIO_CONFIG_S_DRIVER_OK) 217 ret = snet_destroy_dev(snet); 218 219 /* Clear VQs */ 220 for (i = 0; i < snet->cfg->vq_num; i++) { 221 if (!snet->vqs[i]) 222 continue; 223 snet->vqs[i]->cb.callback = NULL; 224 snet->vqs[i]->cb.private = NULL; 225 snet->vqs[i]->desc_area = 0; 226 snet->vqs[i]->device_area = 0; 227 snet->vqs[i]->driver_area = 0; 228 snet->vqs[i]->ready = false; 229 } 230 231 /* Clear config callback */ 232 snet->cb.callback = NULL; 233 snet->cb.private = NULL; 234 /* Free IRQs */ 235 snet_free_irqs(snet); 236 /* Reset status */ 237 snet->status = 0; 238 snet->dpu_ready = false; 239 240 if (ret) 241 SNET_WARN(pdev, "Incomplete reset to SNET[%u] device, err: %d\n", snet->sid, ret); 242 else 243 SNET_DBG(pdev, "Reset SNET[%u] device\n", snet->sid); 244 245 return 0; 246 } 247 248 static int snet_reset(struct vdpa_device *vdev) 249 { 250 struct snet *snet = vdpa_to_snet(vdev); 251 252 return snet_reset_dev(snet); 253 } 254 255 static size_t snet_get_config_size(struct vdpa_device *vdev) 256 { 257 struct snet *snet = vdpa_to_snet(vdev); 258 259 return (size_t)snet->cfg->cfg_size; 260 } 261 262 static u64 snet_get_features(struct vdpa_device *vdev) 263 { 264 struct snet *snet = vdpa_to_snet(vdev); 265 266 return snet->cfg->features; 267 } 268 269 static int snet_set_drv_features(struct vdpa_device *vdev, u64 features) 270 { 271 struct snet *snet = vdpa_to_snet(vdev); 272 273 snet->negotiated_features = snet->cfg->features & features; 274 return 0; 275 } 276 277 static u64 snet_get_drv_features(struct vdpa_device *vdev) 278 { 279 struct snet *snet = vdpa_to_snet(vdev); 280 281 return snet->negotiated_features; 282 } 283 284 static u16 snet_get_vq_num_max(struct vdpa_device *vdev) 285 { 286 struct snet *snet = vdpa_to_snet(vdev); 287 288 return (u16)snet->cfg->vq_size; 289 } 290 291 static void snet_set_config_cb(struct vdpa_device *vdev, struct vdpa_callback *cb) 292 { 293 struct snet *snet = vdpa_to_snet(vdev); 294 295 snet->cb.callback = cb->callback; 296 snet->cb.private = cb->private; 297 } 298 299 static u32 snet_get_device_id(struct vdpa_device *vdev) 300 { 301 struct snet *snet = vdpa_to_snet(vdev); 302 303 return snet->cfg->virtio_id; 304 } 305 306 static u32 snet_get_vendor_id(struct vdpa_device *vdev) 307 { 308 return (u32)PCI_VENDOR_ID_SOLIDRUN; 309 } 310 311 static u8 snet_get_status(struct vdpa_device *vdev) 312 { 313 struct snet *snet = vdpa_to_snet(vdev); 314 315 return snet->status; 316 } 317 318 static int snet_write_conf(struct snet *snet) 319 { 320 u32 off, i, tmp; 321 int ret; 322 323 /* No need to write the config twice */ 324 if (snet->dpu_ready) 325 return true; 326 327 /* Snet data : 328 * 329 * General data: SNET_GENERAL_CFG_LEN bytes long 330 * 0 0x4 0x8 0xC 0x10 0x14 0x1C 0x24 331 * | MAGIC NUMBER | CFG VER | SNET SID | NUMBER OF QUEUES | IRQ IDX | FEATURES | RSVD | 332 * 333 * For every VQ: SNET_GENERAL_CFG_VQ_LEN bytes long 334 * 0 0x4 0x8 335 * | VQ SID AND QUEUE SIZE | IRQ Index | 336 * | DESC AREA | 337 * | DEVICE AREA | 338 * | DRIVER AREA | 339 * | VQ STATE (CFG 2+) | RSVD | 340 * 341 * Magic number should be written last, this is the DPU indication that the data is ready 342 */ 343 344 /* Init offset */ 345 off = snet->psnet->cfg.host_cfg_off; 346 347 /* Ignore magic number for now */ 348 off += 4; 349 snet_write32(snet, off, snet->psnet->negotiated_cfg_ver); 350 off += 4; 351 snet_write32(snet, off, snet->sid); 352 off += 4; 353 snet_write32(snet, off, snet->cfg->vq_num); 354 off += 4; 355 snet_write32(snet, off, snet->cfg_irq_idx); 356 off += 4; 357 snet_write64(snet, off, snet->negotiated_features); 358 off += 8; 359 /* Ignore reserved */ 360 off += 8; 361 /* Write VQs */ 362 for (i = 0 ; i < snet->cfg->vq_num ; i++) { 363 tmp = (i << 16) | (snet->vqs[i]->num & 0xFFFF); 364 snet_write32(snet, off, tmp); 365 off += 4; 366 snet_write32(snet, off, snet->vqs[i]->irq_idx); 367 off += 4; 368 snet_write64(snet, off, snet->vqs[i]->desc_area); 369 off += 8; 370 snet_write64(snet, off, snet->vqs[i]->device_area); 371 off += 8; 372 snet_write64(snet, off, snet->vqs[i]->driver_area); 373 off += 8; 374 /* Write VQ state if config version is 2+ */ 375 if (SNET_CFG_VER(snet, 2)) 376 snet_write32(snet, off, *(u32 *)&snet->vqs[i]->vq_state); 377 off += 4; 378 379 /* Ignore reserved */ 380 off += 4; 381 } 382 383 /* Write magic number - data is ready */ 384 snet_write32(snet, snet->psnet->cfg.host_cfg_off, SNET_SIGNATURE); 385 386 /* The DPU will ACK the config by clearing the signature */ 387 ret = readx_poll_timeout(ioread32, snet->bar + snet->psnet->cfg.host_cfg_off, 388 tmp, !tmp, 10, SNET_READ_CFG_TIMEOUT); 389 if (ret) { 390 SNET_ERR(snet->pdev, "Timeout waiting for the DPU to read the config\n"); 391 return false; 392 } 393 394 /* set DPU flag */ 395 snet->dpu_ready = true; 396 397 return true; 398 } 399 400 static int snet_request_irqs(struct pci_dev *pdev, struct snet *snet) 401 { 402 int ret, i, irq; 403 404 /* Request config IRQ */ 405 irq = pci_irq_vector(pdev, snet->cfg_irq_idx); 406 ret = devm_request_irq(&pdev->dev, irq, snet_cfg_irq_hndlr, 0, 407 snet->cfg_irq_name, snet); 408 if (ret) { 409 SNET_ERR(pdev, "Failed to request IRQ\n"); 410 return ret; 411 } 412 snet->cfg_irq = irq; 413 414 /* Request IRQ for every VQ */ 415 for (i = 0; i < snet->cfg->vq_num; i++) { 416 irq = pci_irq_vector(pdev, snet->vqs[i]->irq_idx); 417 ret = devm_request_irq(&pdev->dev, irq, snet_vq_irq_hndlr, 0, 418 snet->vqs[i]->irq_name, snet->vqs[i]); 419 if (ret) { 420 SNET_ERR(pdev, "Failed to request IRQ\n"); 421 return ret; 422 } 423 snet->vqs[i]->irq = irq; 424 } 425 return 0; 426 } 427 428 static void snet_set_status(struct vdpa_device *vdev, u8 status) 429 { 430 struct snet *snet = vdpa_to_snet(vdev); 431 struct psnet *psnet = snet->psnet; 432 struct pci_dev *pdev = snet->pdev; 433 int ret; 434 bool pf_irqs; 435 436 if (status == snet->status) 437 return; 438 439 if ((status & VIRTIO_CONFIG_S_DRIVER_OK) && 440 !(snet->status & VIRTIO_CONFIG_S_DRIVER_OK)) { 441 /* Request IRQs */ 442 pf_irqs = PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF); 443 ret = snet_request_irqs(pf_irqs ? pdev->physfn : pdev, snet); 444 if (ret) 445 goto set_err; 446 447 /* Write config to the DPU */ 448 if (snet_write_conf(snet)) { 449 SNET_INFO(pdev, "Create SNET[%u] device\n", snet->sid); 450 } else { 451 snet_free_irqs(snet); 452 goto set_err; 453 } 454 } 455 456 /* Save the new status */ 457 snet->status = status; 458 return; 459 460 set_err: 461 snet->status |= VIRTIO_CONFIG_S_FAILED; 462 } 463 464 static void snet_get_config(struct vdpa_device *vdev, unsigned int offset, 465 void *buf, unsigned int len) 466 { 467 struct snet *snet = vdpa_to_snet(vdev); 468 void __iomem *cfg_ptr = snet->cfg->virtio_cfg + offset; 469 u8 *buf_ptr = buf; 470 u32 i; 471 472 /* check for offset error */ 473 if (offset + len > snet->cfg->cfg_size) 474 return; 475 476 /* Write into buffer */ 477 for (i = 0; i < len; i++) 478 *buf_ptr++ = ioread8(cfg_ptr + i); 479 } 480 481 static void snet_set_config(struct vdpa_device *vdev, unsigned int offset, 482 const void *buf, unsigned int len) 483 { 484 struct snet *snet = vdpa_to_snet(vdev); 485 void __iomem *cfg_ptr = snet->cfg->virtio_cfg + offset; 486 const u8 *buf_ptr = buf; 487 u32 i; 488 489 /* check for offset error */ 490 if (offset + len > snet->cfg->cfg_size) 491 return; 492 493 /* Write into PCI BAR */ 494 for (i = 0; i < len; i++) 495 iowrite8(*buf_ptr++, cfg_ptr + i); 496 } 497 498 static int snet_suspend(struct vdpa_device *vdev) 499 { 500 struct snet *snet = vdpa_to_snet(vdev); 501 int ret; 502 503 ret = snet_suspend_dev(snet); 504 if (ret) 505 SNET_ERR(snet->pdev, "SNET[%u] suspend failed, err: %d\n", snet->sid, ret); 506 else 507 SNET_DBG(snet->pdev, "Suspend SNET[%u] device\n", snet->sid); 508 509 return ret; 510 } 511 512 static int snet_resume(struct vdpa_device *vdev) 513 { 514 struct snet *snet = vdpa_to_snet(vdev); 515 int ret; 516 517 ret = snet_resume_dev(snet); 518 if (ret) 519 SNET_ERR(snet->pdev, "SNET[%u] resume failed, err: %d\n", snet->sid, ret); 520 else 521 SNET_DBG(snet->pdev, "Resume SNET[%u] device\n", snet->sid); 522 523 return ret; 524 } 525 526 static const struct vdpa_config_ops snet_config_ops = { 527 .set_vq_address = snet_set_vq_address, 528 .set_vq_num = snet_set_vq_num, 529 .kick_vq = snet_kick_vq, 530 .kick_vq_with_data = snet_kick_vq_with_data, 531 .set_vq_cb = snet_set_vq_cb, 532 .set_vq_ready = snet_set_vq_ready, 533 .get_vq_ready = snet_get_vq_ready, 534 .set_vq_state = snet_set_vq_state, 535 .get_vq_state = snet_get_vq_state, 536 .get_vq_irq = snet_get_vq_irq, 537 .get_vq_align = snet_get_vq_align, 538 .reset = snet_reset, 539 .get_config_size = snet_get_config_size, 540 .get_device_features = snet_get_features, 541 .set_driver_features = snet_set_drv_features, 542 .get_driver_features = snet_get_drv_features, 543 .get_vq_num_min = snet_get_vq_num_max, 544 .get_vq_num_max = snet_get_vq_num_max, 545 .set_config_cb = snet_set_config_cb, 546 .get_device_id = snet_get_device_id, 547 .get_vendor_id = snet_get_vendor_id, 548 .get_status = snet_get_status, 549 .set_status = snet_set_status, 550 .get_config = snet_get_config, 551 .set_config = snet_set_config, 552 .suspend = snet_suspend, 553 .resume = snet_resume, 554 }; 555 556 static int psnet_open_pf_bar(struct pci_dev *pdev, struct psnet *psnet) 557 { 558 char *name; 559 unsigned short i; 560 bool bars_found = false; 561 562 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "psnet[%s]-bars", pci_name(pdev)); 563 if (!name) 564 return -ENOMEM; 565 566 /* We don't know which BAR will be used to communicate.. 567 * We will map every bar with len > 0. 568 * 569 * Later, we will discover the BAR and unmap all other BARs. 570 */ 571 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 572 void __iomem *io; 573 574 if (pci_resource_len(pdev, i) == 0) 575 continue; 576 577 io = pcim_iomap_region(pdev, i, name); 578 if (IS_ERR(io)) { 579 SNET_ERR(pdev, "Failed to request and map PCI BARs\n"); 580 return PTR_ERR(io); 581 } 582 583 psnet->bars[i] = io; 584 bars_found = true; 585 } 586 587 /* No BAR can be used.. */ 588 if (!bars_found) { 589 SNET_ERR(pdev, "Failed to find a PCI BAR\n"); 590 return -ENODEV; 591 } 592 593 return 0; 594 } 595 596 static int snet_open_vf_bar(struct pci_dev *pdev, struct snet *snet) 597 { 598 char *name; 599 void __iomem *io; 600 601 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "snet[%s]-bars", pci_name(pdev)); 602 if (!name) 603 return -ENOMEM; 604 605 /* Request and map BAR */ 606 io = pcim_iomap_region(pdev, snet->psnet->cfg.vf_bar, name); 607 if (IS_ERR(io)) { 608 SNET_ERR(pdev, "Failed to request and map PCI BAR for a VF\n"); 609 return PTR_ERR(io); 610 } 611 612 snet->bar = io; 613 614 return 0; 615 } 616 617 static void snet_free_cfg(struct snet_cfg *cfg) 618 { 619 u32 i; 620 621 if (!cfg->devs) 622 return; 623 624 /* Free devices */ 625 for (i = 0; i < cfg->devices_num; i++) { 626 if (!cfg->devs[i]) 627 break; 628 629 kfree(cfg->devs[i]); 630 } 631 /* Free pointers to devices */ 632 kfree(cfg->devs); 633 } 634 635 /* Detect which BAR is used for communication with the device. */ 636 static int psnet_detect_bar(struct psnet *psnet, u32 off) 637 { 638 unsigned long exit_time; 639 int i; 640 641 exit_time = jiffies + usecs_to_jiffies(SNET_DETECT_TIMEOUT); 642 643 /* SNET DPU will write SNET's signature when the config is ready. */ 644 while (time_before(jiffies, exit_time)) { 645 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 646 /* Is this BAR mapped? */ 647 if (!psnet->bars[i]) 648 continue; 649 650 if (ioread32(psnet->bars[i] + off) == SNET_SIGNATURE) 651 return i; 652 } 653 usleep_range(1000, 10000); 654 } 655 656 return -ENODEV; 657 } 658 659 static void psnet_unmap_unused_bars(struct pci_dev *pdev, struct psnet *psnet) 660 { 661 unsigned short i; 662 663 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 664 if (psnet->bars[i] && i != psnet->barno) 665 pcim_iounmap_region(pdev, i); 666 } 667 } 668 669 /* Read SNET config from PCI BAR */ 670 static int psnet_read_cfg(struct pci_dev *pdev, struct psnet *psnet) 671 { 672 struct snet_cfg *cfg = &psnet->cfg; 673 u32 i, off; 674 int barno; 675 676 /* Move to where the config starts */ 677 off = SNET_CONFIG_OFF; 678 679 /* Find BAR used for communication */ 680 barno = psnet_detect_bar(psnet, off); 681 if (barno < 0) { 682 SNET_ERR(pdev, "SNET config is not ready.\n"); 683 return barno; 684 } 685 686 /* Save used BAR number and unmap all other BARs */ 687 psnet->barno = barno; 688 SNET_DBG(pdev, "Using BAR number %d\n", barno); 689 690 psnet_unmap_unused_bars(pdev, psnet); 691 692 /* load config from BAR */ 693 cfg->key = psnet_read32(psnet, off); 694 off += 4; 695 cfg->cfg_size = psnet_read32(psnet, off); 696 off += 4; 697 cfg->cfg_ver = psnet_read32(psnet, off); 698 off += 4; 699 /* The negotiated config version is the lower one between this driver's config 700 * and the DPU's. 701 */ 702 psnet->negotiated_cfg_ver = min_t(u32, cfg->cfg_ver, SNET_CFG_VERSION); 703 SNET_DBG(pdev, "SNET config version %u\n", psnet->negotiated_cfg_ver); 704 705 cfg->vf_num = psnet_read32(psnet, off); 706 off += 4; 707 cfg->vf_bar = psnet_read32(psnet, off); 708 off += 4; 709 cfg->host_cfg_off = psnet_read32(psnet, off); 710 off += 4; 711 cfg->max_size_host_cfg = psnet_read32(psnet, off); 712 off += 4; 713 cfg->virtio_cfg_off = psnet_read32(psnet, off); 714 off += 4; 715 cfg->kick_off = psnet_read32(psnet, off); 716 off += 4; 717 cfg->hwmon_off = psnet_read32(psnet, off); 718 off += 4; 719 cfg->ctrl_off = psnet_read32(psnet, off); 720 off += 4; 721 cfg->flags = psnet_read32(psnet, off); 722 off += 4; 723 /* Ignore Reserved */ 724 off += sizeof(cfg->rsvd); 725 726 cfg->devices_num = psnet_read32(psnet, off); 727 off += 4; 728 /* Allocate memory to hold pointer to the devices */ 729 cfg->devs = kcalloc(cfg->devices_num, sizeof(void *), GFP_KERNEL); 730 if (!cfg->devs) 731 return -ENOMEM; 732 733 /* Load device configuration from BAR */ 734 for (i = 0; i < cfg->devices_num; i++) { 735 cfg->devs[i] = kzalloc(sizeof(*cfg->devs[i]), GFP_KERNEL); 736 if (!cfg->devs[i]) { 737 snet_free_cfg(cfg); 738 return -ENOMEM; 739 } 740 /* Read device config */ 741 cfg->devs[i]->virtio_id = psnet_read32(psnet, off); 742 off += 4; 743 cfg->devs[i]->vq_num = psnet_read32(psnet, off); 744 off += 4; 745 cfg->devs[i]->vq_size = psnet_read32(psnet, off); 746 off += 4; 747 cfg->devs[i]->vfid = psnet_read32(psnet, off); 748 off += 4; 749 cfg->devs[i]->features = psnet_read64(psnet, off); 750 off += 8; 751 /* Ignore Reserved */ 752 off += sizeof(cfg->devs[i]->rsvd); 753 754 cfg->devs[i]->cfg_size = psnet_read32(psnet, off); 755 off += 4; 756 757 /* Is the config witten to the DPU going to be too big? */ 758 if (SNET_GENERAL_CFG_LEN + SNET_GENERAL_CFG_VQ_LEN * cfg->devs[i]->vq_num > 759 cfg->max_size_host_cfg) { 760 SNET_ERR(pdev, "Failed to read SNET config, the config is too big..\n"); 761 snet_free_cfg(cfg); 762 return -EINVAL; 763 } 764 } 765 return 0; 766 } 767 768 static int psnet_alloc_irq_vector(struct pci_dev *pdev, struct psnet *psnet) 769 { 770 int ret = 0; 771 u32 i, irq_num = 0; 772 773 /* Let's count how many IRQs we need, 1 for every VQ + 1 for config change */ 774 for (i = 0; i < psnet->cfg.devices_num; i++) 775 irq_num += psnet->cfg.devs[i]->vq_num + 1; 776 777 ret = pci_alloc_irq_vectors(pdev, irq_num, irq_num, PCI_IRQ_MSIX); 778 if (ret != irq_num) { 779 SNET_ERR(pdev, "Failed to allocate IRQ vectors\n"); 780 return ret; 781 } 782 SNET_DBG(pdev, "Allocated %u IRQ vectors from physical function\n", irq_num); 783 784 return 0; 785 } 786 787 static int snet_alloc_irq_vector(struct pci_dev *pdev, struct snet_dev_cfg *snet_cfg) 788 { 789 int ret = 0; 790 u32 irq_num; 791 792 /* We want 1 IRQ for every VQ + 1 for config change events */ 793 irq_num = snet_cfg->vq_num + 1; 794 795 ret = pci_alloc_irq_vectors(pdev, irq_num, irq_num, PCI_IRQ_MSIX); 796 if (ret <= 0) { 797 SNET_ERR(pdev, "Failed to allocate IRQ vectors\n"); 798 return ret; 799 } 800 801 return 0; 802 } 803 804 static void snet_free_vqs(struct snet *snet) 805 { 806 u32 i; 807 808 if (!snet->vqs) 809 return; 810 811 for (i = 0 ; i < snet->cfg->vq_num ; i++) { 812 if (!snet->vqs[i]) 813 break; 814 815 kfree(snet->vqs[i]); 816 } 817 kfree(snet->vqs); 818 } 819 820 static int snet_build_vqs(struct snet *snet) 821 { 822 u32 i; 823 /* Allocate the VQ pointers array */ 824 snet->vqs = kcalloc(snet->cfg->vq_num, sizeof(void *), GFP_KERNEL); 825 if (!snet->vqs) 826 return -ENOMEM; 827 828 /* Allocate the VQs */ 829 for (i = 0; i < snet->cfg->vq_num; i++) { 830 snet->vqs[i] = kzalloc(sizeof(*snet->vqs[i]), GFP_KERNEL); 831 if (!snet->vqs[i]) { 832 snet_free_vqs(snet); 833 return -ENOMEM; 834 } 835 /* Reset IRQ num */ 836 snet->vqs[i]->irq = -1; 837 /* VQ serial ID */ 838 snet->vqs[i]->sid = i; 839 /* Kick address - every VQ gets 4B */ 840 snet->vqs[i]->kick_ptr = snet->bar + snet->psnet->cfg.kick_off + 841 snet->vqs[i]->sid * 4; 842 /* Clear kick address for this VQ */ 843 iowrite32(0, snet->vqs[i]->kick_ptr); 844 } 845 return 0; 846 } 847 848 static int psnet_get_next_irq_num(struct psnet *psnet) 849 { 850 int irq; 851 852 spin_lock(&psnet->lock); 853 irq = psnet->next_irq++; 854 spin_unlock(&psnet->lock); 855 856 return irq; 857 } 858 859 static void snet_reserve_irq_idx(struct pci_dev *pdev, struct snet *snet) 860 { 861 struct psnet *psnet = snet->psnet; 862 int i; 863 864 /* one IRQ for every VQ, and one for config changes */ 865 snet->cfg_irq_idx = psnet_get_next_irq_num(psnet); 866 snprintf(snet->cfg_irq_name, SNET_NAME_SIZE, "snet[%s]-cfg[%d]", 867 pci_name(pdev), snet->cfg_irq_idx); 868 869 for (i = 0; i < snet->cfg->vq_num; i++) { 870 /* Get next free IRQ ID */ 871 snet->vqs[i]->irq_idx = psnet_get_next_irq_num(psnet); 872 /* Write IRQ name */ 873 snprintf(snet->vqs[i]->irq_name, SNET_NAME_SIZE, "snet[%s]-vq[%d]", 874 pci_name(pdev), snet->vqs[i]->irq_idx); 875 } 876 } 877 878 /* Find a device config based on virtual function id */ 879 static struct snet_dev_cfg *snet_find_dev_cfg(struct snet_cfg *cfg, u32 vfid) 880 { 881 u32 i; 882 883 for (i = 0; i < cfg->devices_num; i++) { 884 if (cfg->devs[i]->vfid == vfid) 885 return cfg->devs[i]; 886 } 887 /* Oppss.. no config found.. */ 888 return NULL; 889 } 890 891 /* Probe function for a physical PCI function */ 892 static int snet_vdpa_probe_pf(struct pci_dev *pdev) 893 { 894 struct psnet *psnet; 895 int ret = 0; 896 bool pf_irqs = false; 897 898 ret = pcim_enable_device(pdev); 899 if (ret) { 900 SNET_ERR(pdev, "Failed to enable PCI device\n"); 901 return ret; 902 } 903 904 /* Allocate a PCI physical function device */ 905 psnet = kzalloc(sizeof(*psnet), GFP_KERNEL); 906 if (!psnet) 907 return -ENOMEM; 908 909 /* Init PSNET spinlock */ 910 spin_lock_init(&psnet->lock); 911 912 pci_set_master(pdev); 913 pci_set_drvdata(pdev, psnet); 914 915 /* Open SNET MAIN BAR */ 916 ret = psnet_open_pf_bar(pdev, psnet); 917 if (ret) 918 goto free_psnet; 919 920 /* Try to read SNET's config from PCI BAR */ 921 ret = psnet_read_cfg(pdev, psnet); 922 if (ret) 923 goto free_psnet; 924 925 /* If SNET_CFG_FLAG_IRQ_PF flag is set, we should use 926 * PF MSI-X vectors 927 */ 928 pf_irqs = PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF); 929 930 if (pf_irqs) { 931 ret = psnet_alloc_irq_vector(pdev, psnet); 932 if (ret) 933 goto free_cfg; 934 } 935 936 SNET_DBG(pdev, "Enable %u virtual functions\n", psnet->cfg.vf_num); 937 ret = pci_enable_sriov(pdev, psnet->cfg.vf_num); 938 if (ret) { 939 SNET_ERR(pdev, "Failed to enable SR-IOV\n"); 940 goto free_irq; 941 } 942 943 /* Create HW monitor device */ 944 if (PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_HWMON)) { 945 #if IS_ENABLED(CONFIG_HWMON) 946 psnet_create_hwmon(pdev); 947 #else 948 SNET_WARN(pdev, "Can't start HWMON, CONFIG_HWMON is not enabled\n"); 949 #endif 950 } 951 952 return 0; 953 954 free_irq: 955 if (pf_irqs) 956 pci_free_irq_vectors(pdev); 957 free_cfg: 958 snet_free_cfg(&psnet->cfg); 959 free_psnet: 960 kfree(psnet); 961 return ret; 962 } 963 964 /* Probe function for a virtual PCI function */ 965 static int snet_vdpa_probe_vf(struct pci_dev *pdev) 966 { 967 struct pci_dev *pdev_pf = pdev->physfn; 968 struct psnet *psnet = pci_get_drvdata(pdev_pf); 969 struct snet_dev_cfg *dev_cfg; 970 struct snet *snet; 971 u32 vfid; 972 int ret; 973 bool pf_irqs = false; 974 975 /* Get virtual function id. 976 * (the DPU counts the VFs from 1) 977 */ 978 ret = pci_iov_vf_id(pdev); 979 if (ret < 0) { 980 SNET_ERR(pdev, "Failed to find a VF id\n"); 981 return ret; 982 } 983 vfid = ret + 1; 984 985 /* Find the snet_dev_cfg based on vfid */ 986 dev_cfg = snet_find_dev_cfg(&psnet->cfg, vfid); 987 if (!dev_cfg) { 988 SNET_WARN(pdev, "Failed to find a VF config..\n"); 989 return -ENODEV; 990 } 991 992 /* Which PCI device should allocate the IRQs? 993 * If the SNET_CFG_FLAG_IRQ_PF flag set, the PF device allocates the IRQs 994 */ 995 pf_irqs = PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF); 996 997 ret = pcim_enable_device(pdev); 998 if (ret) { 999 SNET_ERR(pdev, "Failed to enable PCI VF device\n"); 1000 return ret; 1001 } 1002 1003 /* Request for MSI-X IRQs */ 1004 if (!pf_irqs) { 1005 ret = snet_alloc_irq_vector(pdev, dev_cfg); 1006 if (ret) 1007 return ret; 1008 } 1009 1010 /* Allocate vdpa device */ 1011 snet = vdpa_alloc_device(struct snet, vdpa, &pdev->dev, &snet_config_ops, 1, 1, NULL, 1012 false); 1013 if (!snet) { 1014 SNET_ERR(pdev, "Failed to allocate a vdpa device\n"); 1015 ret = -ENOMEM; 1016 goto free_irqs; 1017 } 1018 1019 /* Init control mutex and spinlock */ 1020 mutex_init(&snet->ctrl_lock); 1021 spin_lock_init(&snet->ctrl_spinlock); 1022 1023 /* Save pci device pointer */ 1024 snet->pdev = pdev; 1025 snet->psnet = psnet; 1026 snet->cfg = dev_cfg; 1027 snet->dpu_ready = false; 1028 snet->sid = vfid; 1029 /* Reset IRQ value */ 1030 snet->cfg_irq = -1; 1031 1032 ret = snet_open_vf_bar(pdev, snet); 1033 if (ret) 1034 goto put_device; 1035 1036 /* Create a VirtIO config pointer */ 1037 snet->cfg->virtio_cfg = snet->bar + snet->psnet->cfg.virtio_cfg_off; 1038 1039 /* Clear control registers */ 1040 snet_ctrl_clear(snet); 1041 1042 pci_set_master(pdev); 1043 pci_set_drvdata(pdev, snet); 1044 1045 ret = snet_build_vqs(snet); 1046 if (ret) 1047 goto put_device; 1048 1049 /* Reserve IRQ indexes, 1050 * The IRQs may be requested and freed multiple times, 1051 * but the indexes won't change. 1052 */ 1053 snet_reserve_irq_idx(pf_irqs ? pdev_pf : pdev, snet); 1054 1055 /*set DMA device*/ 1056 snet->vdpa.dma_dev = &pdev->dev; 1057 1058 /* Register VDPA device */ 1059 ret = vdpa_register_device(&snet->vdpa, snet->cfg->vq_num); 1060 if (ret) { 1061 SNET_ERR(pdev, "Failed to register vdpa device\n"); 1062 goto free_vqs; 1063 } 1064 1065 return 0; 1066 1067 free_vqs: 1068 snet_free_vqs(snet); 1069 put_device: 1070 put_device(&snet->vdpa.dev); 1071 free_irqs: 1072 if (!pf_irqs) 1073 pci_free_irq_vectors(pdev); 1074 return ret; 1075 } 1076 1077 static int snet_vdpa_probe(struct pci_dev *pdev, const struct pci_device_id *id) 1078 { 1079 if (pdev->is_virtfn) 1080 return snet_vdpa_probe_vf(pdev); 1081 else 1082 return snet_vdpa_probe_pf(pdev); 1083 } 1084 1085 static void snet_vdpa_remove_pf(struct pci_dev *pdev) 1086 { 1087 struct psnet *psnet = pci_get_drvdata(pdev); 1088 1089 pci_disable_sriov(pdev); 1090 /* If IRQs are allocated from the PF, we should free the IRQs */ 1091 if (PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF)) 1092 pci_free_irq_vectors(pdev); 1093 1094 snet_free_cfg(&psnet->cfg); 1095 kfree(psnet); 1096 } 1097 1098 static void snet_vdpa_remove_vf(struct pci_dev *pdev) 1099 { 1100 struct snet *snet = pci_get_drvdata(pdev); 1101 struct psnet *psnet = snet->psnet; 1102 1103 vdpa_unregister_device(&snet->vdpa); 1104 snet_free_vqs(snet); 1105 /* If IRQs are allocated from the VF, we should free the IRQs */ 1106 if (!PSNET_FLAG_ON(psnet, SNET_CFG_FLAG_IRQ_PF)) 1107 pci_free_irq_vectors(pdev); 1108 } 1109 1110 static void snet_vdpa_remove(struct pci_dev *pdev) 1111 { 1112 if (pdev->is_virtfn) 1113 snet_vdpa_remove_vf(pdev); 1114 else 1115 snet_vdpa_remove_pf(pdev); 1116 } 1117 1118 static struct pci_device_id snet_driver_pci_ids[] = { 1119 { PCI_DEVICE_SUB(PCI_VENDOR_ID_SOLIDRUN, SNET_DEVICE_ID, 1120 PCI_VENDOR_ID_SOLIDRUN, SNET_DEVICE_ID) }, 1121 { 0 }, 1122 }; 1123 1124 MODULE_DEVICE_TABLE(pci, snet_driver_pci_ids); 1125 1126 static struct pci_driver snet_vdpa_driver = { 1127 .name = "snet-vdpa-driver", 1128 .id_table = snet_driver_pci_ids, 1129 .probe = snet_vdpa_probe, 1130 .remove = snet_vdpa_remove, 1131 }; 1132 1133 module_pci_driver(snet_vdpa_driver); 1134 1135 MODULE_AUTHOR("Alvaro Karsz <alvaro.karsz@solid-run.com>"); 1136 MODULE_DESCRIPTION("SolidRun vDPA driver"); 1137 MODULE_LICENSE("GPL v2"); 1138