1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ 3 #include <linux/init.h> 4 #include <linux/kernel.h> 5 #include <linux/module.h> 6 #include <linux/pci.h> 7 #include <linux/device.h> 8 #include <linux/sched/task.h> 9 #include <linux/io-64-nonatomic-lo-hi.h> 10 #include <linux/cdev.h> 11 #include <linux/fs.h> 12 #include <linux/poll.h> 13 #include <linux/iommu.h> 14 #include <linux/highmem.h> 15 #include <uapi/linux/idxd.h> 16 #include <linux/xarray.h> 17 #include "registers.h" 18 #include "idxd.h" 19 20 struct idxd_cdev_context { 21 const char *name; 22 dev_t devt; 23 struct ida minor_ida; 24 }; 25 26 /* 27 * Since user file names are global in DSA devices, define their ida's as 28 * global to avoid conflict file names. 29 */ 30 static DEFINE_IDA(file_ida); 31 32 /* 33 * ictx is an array based off of accelerator types. enum idxd_type 34 * is used as index 35 */ 36 static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = { 37 { .name = "dsa" }, 38 { .name = "iax" } 39 }; 40 41 struct idxd_user_context { 42 struct idxd_wq *wq; 43 struct task_struct *task; 44 unsigned int pasid; 45 struct mm_struct *mm; 46 unsigned int flags; 47 struct iommu_sva *sva; 48 struct idxd_dev idxd_dev; 49 u64 counters[COUNTER_MAX]; 50 int id; 51 pid_t pid; 52 }; 53 54 static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid); 55 static void idxd_xa_pasid_remove(struct idxd_user_context *ctx); 56 57 static inline struct idxd_user_context *dev_to_uctx(struct device *dev) 58 { 59 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev); 60 61 return container_of(idxd_dev, struct idxd_user_context, idxd_dev); 62 } 63 64 static ssize_t cr_faults_show(struct device *dev, struct device_attribute *attr, char *buf) 65 { 66 struct idxd_user_context *ctx = dev_to_uctx(dev); 67 68 return sysfs_emit(buf, "%llu\n", ctx->counters[COUNTER_FAULTS]); 69 } 70 static DEVICE_ATTR_RO(cr_faults); 71 72 static ssize_t cr_fault_failures_show(struct device *dev, 73 struct device_attribute *attr, char *buf) 74 { 75 struct idxd_user_context *ctx = dev_to_uctx(dev); 76 77 return sysfs_emit(buf, "%llu\n", ctx->counters[COUNTER_FAULT_FAILS]); 78 } 79 static DEVICE_ATTR_RO(cr_fault_failures); 80 81 static ssize_t pid_show(struct device *dev, struct device_attribute *attr, char *buf) 82 { 83 struct idxd_user_context *ctx = dev_to_uctx(dev); 84 85 return sysfs_emit(buf, "%u\n", ctx->pid); 86 } 87 static DEVICE_ATTR_RO(pid); 88 89 static struct attribute *cdev_file_attributes[] = { 90 &dev_attr_cr_faults.attr, 91 &dev_attr_cr_fault_failures.attr, 92 &dev_attr_pid.attr, 93 NULL 94 }; 95 96 static umode_t cdev_file_attr_visible(struct kobject *kobj, struct attribute *a, int n) 97 { 98 struct device *dev = container_of(kobj, typeof(*dev), kobj); 99 struct idxd_user_context *ctx = dev_to_uctx(dev); 100 struct idxd_wq *wq = ctx->wq; 101 102 if (!wq_pasid_enabled(wq)) 103 return 0; 104 105 return a->mode; 106 } 107 108 static const struct attribute_group cdev_file_attribute_group = { 109 .attrs = cdev_file_attributes, 110 .is_visible = cdev_file_attr_visible, 111 }; 112 113 static const struct attribute_group *cdev_file_attribute_groups[] = { 114 &cdev_file_attribute_group, 115 NULL 116 }; 117 118 static void idxd_file_dev_release(struct device *dev) 119 { 120 struct idxd_user_context *ctx = dev_to_uctx(dev); 121 struct idxd_wq *wq = ctx->wq; 122 struct idxd_device *idxd = wq->idxd; 123 int rc; 124 125 ida_free(&file_ida, ctx->id); 126 127 /* Wait for in-flight operations to complete. */ 128 if (wq_shared(wq)) { 129 idxd_device_drain_pasid(idxd, ctx->pasid); 130 } else { 131 if (device_user_pasid_enabled(idxd)) { 132 /* The wq disable in the disable pasid function will drain the wq */ 133 rc = idxd_wq_disable_pasid(wq); 134 if (rc < 0) 135 dev_err(dev, "wq disable pasid failed.\n"); 136 } else { 137 idxd_wq_drain(wq); 138 } 139 } 140 141 if (ctx->sva) { 142 idxd_cdev_evl_drain_pasid(wq, ctx->pasid); 143 iommu_sva_unbind_device(ctx->sva); 144 idxd_xa_pasid_remove(ctx); 145 } 146 kfree(ctx); 147 mutex_lock(&wq->wq_lock); 148 idxd_wq_put(wq); 149 mutex_unlock(&wq->wq_lock); 150 } 151 152 static const struct device_type idxd_cdev_file_type = { 153 .name = "idxd_file", 154 .release = idxd_file_dev_release, 155 .groups = cdev_file_attribute_groups, 156 }; 157 158 static void idxd_cdev_dev_release(struct device *dev) 159 { 160 struct idxd_cdev *idxd_cdev = dev_to_cdev(dev); 161 struct idxd_cdev_context *cdev_ctx; 162 struct idxd_wq *wq = idxd_cdev->wq; 163 164 cdev_ctx = &ictx[wq->idxd->data->type]; 165 ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor); 166 kfree(idxd_cdev); 167 } 168 169 static const struct device_type idxd_cdev_device_type = { 170 .name = "idxd_cdev", 171 .release = idxd_cdev_dev_release, 172 }; 173 174 static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode) 175 { 176 struct cdev *cdev = inode->i_cdev; 177 178 return container_of(cdev, struct idxd_cdev, cdev); 179 } 180 181 static inline struct idxd_wq *inode_wq(struct inode *inode) 182 { 183 struct idxd_cdev *idxd_cdev = inode_idxd_cdev(inode); 184 185 return idxd_cdev->wq; 186 } 187 188 static void idxd_xa_pasid_remove(struct idxd_user_context *ctx) 189 { 190 struct idxd_wq *wq = ctx->wq; 191 void *ptr; 192 193 mutex_lock(&wq->uc_lock); 194 ptr = xa_cmpxchg(&wq->upasid_xa, ctx->pasid, ctx, NULL, GFP_KERNEL); 195 if (ptr != (void *)ctx) 196 dev_warn(&wq->idxd->pdev->dev, "xarray cmpxchg failed for pasid %u\n", 197 ctx->pasid); 198 mutex_unlock(&wq->uc_lock); 199 } 200 201 void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index) 202 { 203 struct idxd_user_context *ctx; 204 205 if (index >= COUNTER_MAX) 206 return; 207 208 mutex_lock(&wq->uc_lock); 209 ctx = xa_load(&wq->upasid_xa, pasid); 210 if (!ctx) { 211 mutex_unlock(&wq->uc_lock); 212 return; 213 } 214 ctx->counters[index]++; 215 mutex_unlock(&wq->uc_lock); 216 } 217 218 static int idxd_cdev_open(struct inode *inode, struct file *filp) 219 { 220 struct idxd_user_context *ctx; 221 struct idxd_device *idxd; 222 struct idxd_wq *wq; 223 struct device *dev, *fdev; 224 int rc = 0; 225 struct iommu_sva *sva = NULL; 226 unsigned int pasid; 227 struct idxd_cdev *idxd_cdev; 228 229 wq = inode_wq(inode); 230 idxd = wq->idxd; 231 dev = &idxd->pdev->dev; 232 233 dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq)); 234 235 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 236 if (!ctx) 237 return -ENOMEM; 238 239 mutex_lock(&wq->wq_lock); 240 241 if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) { 242 rc = -EBUSY; 243 goto failed; 244 } 245 246 ctx->wq = wq; 247 filp->private_data = ctx; 248 ctx->pid = current->pid; 249 250 if (device_user_pasid_enabled(idxd)) { 251 sva = iommu_sva_bind_device(dev, current->mm); 252 if (IS_ERR(sva)) { 253 rc = PTR_ERR(sva); 254 dev_err(dev, "pasid allocation failed: %d\n", rc); 255 goto failed; 256 } 257 258 pasid = iommu_sva_get_pasid(sva); 259 if (pasid == IOMMU_PASID_INVALID) { 260 rc = -EINVAL; 261 goto failed_get_pasid; 262 } 263 264 ctx->sva = sva; 265 ctx->pasid = pasid; 266 ctx->mm = current->mm; 267 268 mutex_lock(&wq->uc_lock); 269 rc = xa_insert(&wq->upasid_xa, pasid, ctx, GFP_KERNEL); 270 mutex_unlock(&wq->uc_lock); 271 if (rc < 0) 272 dev_warn(dev, "PASID entry already exist in xarray.\n"); 273 274 if (wq_dedicated(wq)) { 275 rc = idxd_wq_set_pasid(wq, pasid); 276 if (rc < 0) { 277 dev_err(dev, "wq set pasid failed: %d\n", rc); 278 goto failed_set_pasid; 279 } 280 } 281 } 282 283 idxd_cdev = wq->idxd_cdev; 284 ctx->id = ida_alloc(&file_ida, GFP_KERNEL); 285 if (ctx->id < 0) { 286 dev_warn(dev, "ida alloc failure\n"); 287 goto failed_ida; 288 } 289 ctx->idxd_dev.type = IDXD_DEV_CDEV_FILE; 290 fdev = user_ctx_dev(ctx); 291 device_initialize(fdev); 292 fdev->parent = cdev_dev(idxd_cdev); 293 fdev->bus = &dsa_bus_type; 294 fdev->type = &idxd_cdev_file_type; 295 296 rc = dev_set_name(fdev, "file%d", ctx->id); 297 if (rc < 0) { 298 dev_warn(dev, "set name failure\n"); 299 goto failed_dev_name; 300 } 301 302 rc = device_add(fdev); 303 if (rc < 0) { 304 dev_warn(dev, "file device add failure\n"); 305 goto failed_dev_add; 306 } 307 308 idxd_wq_get(wq); 309 mutex_unlock(&wq->wq_lock); 310 return 0; 311 312 failed_dev_add: 313 failed_dev_name: 314 put_device(fdev); 315 failed_ida: 316 failed_set_pasid: 317 if (device_user_pasid_enabled(idxd)) 318 idxd_xa_pasid_remove(ctx); 319 failed_get_pasid: 320 if (device_user_pasid_enabled(idxd) && !IS_ERR_OR_NULL(sva)) 321 iommu_sva_unbind_device(sva); 322 failed: 323 mutex_unlock(&wq->wq_lock); 324 kfree(ctx); 325 return rc; 326 } 327 328 static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid) 329 { 330 struct idxd_device *idxd = wq->idxd; 331 struct idxd_evl *evl = idxd->evl; 332 union evl_status_reg status; 333 u16 h, t, size; 334 int ent_size = evl_ent_size(idxd); 335 struct __evl_entry *entry_head; 336 337 if (!evl) 338 return; 339 340 mutex_lock(&evl->lock); 341 status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET); 342 t = status.tail; 343 h = status.head; 344 size = evl->size; 345 346 while (h != t) { 347 entry_head = (struct __evl_entry *)(evl->log + (h * ent_size)); 348 if (entry_head->pasid == pasid && entry_head->wq_idx == wq->id) 349 set_bit(h, evl->bmap); 350 h = (h + 1) % size; 351 } 352 if (wq->wq) 353 drain_workqueue(wq->wq); 354 355 mutex_unlock(&evl->lock); 356 } 357 358 static int idxd_cdev_release(struct inode *node, struct file *filep) 359 { 360 struct idxd_user_context *ctx = filep->private_data; 361 struct idxd_wq *wq = ctx->wq; 362 struct idxd_device *idxd = wq->idxd; 363 struct device *dev = &idxd->pdev->dev; 364 365 dev_dbg(dev, "%s called\n", __func__); 366 filep->private_data = NULL; 367 368 device_unregister(user_ctx_dev(ctx)); 369 370 return 0; 371 } 372 373 static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma, 374 const char *func) 375 { 376 struct device *dev = &wq->idxd->pdev->dev; 377 378 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { 379 dev_info_ratelimited(dev, 380 "%s: %s: mapping too large: %lu\n", 381 current->comm, func, 382 vma->vm_end - vma->vm_start); 383 return -EINVAL; 384 } 385 386 return 0; 387 } 388 389 static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma) 390 { 391 struct idxd_user_context *ctx = filp->private_data; 392 struct idxd_wq *wq = ctx->wq; 393 struct idxd_device *idxd = wq->idxd; 394 struct pci_dev *pdev = idxd->pdev; 395 phys_addr_t base = pci_resource_start(pdev, IDXD_WQ_BAR); 396 unsigned long pfn; 397 int rc; 398 399 dev_dbg(&pdev->dev, "%s called\n", __func__); 400 401 /* 402 * Due to an erratum in some of the devices supported by the driver, 403 * direct user submission to the device can be unsafe. 404 * (See the INTEL-SA-01084 security advisory) 405 * 406 * For the devices that exhibit this behavior, require that the user 407 * has CAP_SYS_RAWIO capabilities. 408 */ 409 if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO)) 410 return -EPERM; 411 412 if (current->mm != ctx->mm) 413 return -EPERM; 414 415 rc = check_vma(wq, vma, __func__); 416 if (rc < 0) 417 return rc; 418 419 vm_flags_set(vma, VM_DONTCOPY); 420 pfn = (base + idxd_get_wq_portal_full_offset(wq->id, 421 IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT; 422 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 423 vma->vm_private_data = ctx; 424 425 return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, 426 vma->vm_page_prot); 427 } 428 429 static int idxd_submit_user_descriptor(struct idxd_user_context *ctx, 430 struct dsa_hw_desc __user *udesc) 431 { 432 struct idxd_wq *wq = ctx->wq; 433 struct idxd_dev *idxd_dev = &wq->idxd->idxd_dev; 434 const uint64_t comp_addr_align = is_dsa_dev(idxd_dev) ? 0x20 : 0x40; 435 void __iomem *portal = idxd_wq_portal_addr(wq); 436 struct dsa_hw_desc descriptor __aligned(64); 437 int rc; 438 439 rc = copy_from_user(&descriptor, udesc, sizeof(descriptor)); 440 if (rc) 441 return -EFAULT; 442 443 /* 444 * DSA devices are capable of indirect ("batch") command submission. 445 * On devices where direct user submissions are not safe, we cannot 446 * allow this since there is no good way for us to verify these 447 * indirect commands. Narrow the restriction of operations with the 448 * BATCH opcode to only DSA version 1 devices. 449 */ 450 if (is_dsa_dev(idxd_dev) && descriptor.opcode == DSA_OPCODE_BATCH && 451 wq->idxd->hw.version == DEVICE_VERSION_1 && 452 !wq->idxd->user_submission_safe) 453 return -EINVAL; 454 /* 455 * As per the programming specification, the completion address must be 456 * aligned to 32 or 64 bytes. If this is violated the hardware 457 * engine can get very confused (security issue). 458 */ 459 if (!IS_ALIGNED(descriptor.completion_addr, comp_addr_align)) 460 return -EINVAL; 461 462 if (wq_dedicated(wq)) 463 iosubmit_cmds512(portal, &descriptor, 1); 464 else { 465 descriptor.priv = 0; 466 descriptor.pasid = ctx->pasid; 467 rc = idxd_enqcmds(wq, portal, &descriptor); 468 if (rc < 0) 469 return rc; 470 } 471 472 return 0; 473 } 474 475 static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t len, 476 loff_t *unused) 477 { 478 struct dsa_hw_desc __user *udesc = (struct dsa_hw_desc __user *)buf; 479 struct idxd_user_context *ctx = filp->private_data; 480 ssize_t written = 0; 481 int i; 482 483 if (current->mm != ctx->mm) 484 return -EPERM; 485 486 for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) { 487 int rc = idxd_submit_user_descriptor(ctx, udesc + i); 488 489 if (rc) 490 return written ? written : rc; 491 492 written += sizeof(struct dsa_hw_desc); 493 } 494 495 return written; 496 } 497 498 static __poll_t idxd_cdev_poll(struct file *filp, 499 struct poll_table_struct *wait) 500 { 501 struct idxd_user_context *ctx = filp->private_data; 502 struct idxd_wq *wq = ctx->wq; 503 struct idxd_device *idxd = wq->idxd; 504 __poll_t out = 0; 505 506 if (current->mm != ctx->mm) 507 return POLLNVAL; 508 509 poll_wait(filp, &wq->err_queue, wait); 510 spin_lock(&idxd->dev_lock); 511 if (idxd->sw_err.valid) 512 out = EPOLLIN | EPOLLRDNORM; 513 spin_unlock(&idxd->dev_lock); 514 515 return out; 516 } 517 518 static const struct file_operations idxd_cdev_fops = { 519 .owner = THIS_MODULE, 520 .open = idxd_cdev_open, 521 .release = idxd_cdev_release, 522 .mmap = idxd_cdev_mmap, 523 .write = idxd_cdev_write, 524 .poll = idxd_cdev_poll, 525 }; 526 527 int idxd_cdev_get_major(struct idxd_device *idxd) 528 { 529 return MAJOR(ictx[idxd->data->type].devt); 530 } 531 532 int idxd_wq_add_cdev(struct idxd_wq *wq) 533 { 534 struct idxd_device *idxd = wq->idxd; 535 struct idxd_cdev *idxd_cdev; 536 struct cdev *cdev; 537 struct device *dev; 538 struct idxd_cdev_context *cdev_ctx; 539 int rc, minor; 540 541 idxd_cdev = kzalloc(sizeof(*idxd_cdev), GFP_KERNEL); 542 if (!idxd_cdev) 543 return -ENOMEM; 544 545 idxd_cdev->idxd_dev.type = IDXD_DEV_CDEV; 546 idxd_cdev->wq = wq; 547 cdev = &idxd_cdev->cdev; 548 dev = cdev_dev(idxd_cdev); 549 cdev_ctx = &ictx[wq->idxd->data->type]; 550 minor = ida_alloc_max(&cdev_ctx->minor_ida, MINORMASK, GFP_KERNEL); 551 if (minor < 0) { 552 kfree(idxd_cdev); 553 return minor; 554 } 555 idxd_cdev->minor = minor; 556 557 device_initialize(dev); 558 dev->parent = wq_confdev(wq); 559 dev->bus = &dsa_bus_type; 560 dev->type = &idxd_cdev_device_type; 561 dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor); 562 563 rc = dev_set_name(dev, "%s/wq%u.%u", idxd->data->name_prefix, idxd->id, wq->id); 564 if (rc < 0) 565 goto err; 566 567 wq->idxd_cdev = idxd_cdev; 568 cdev_init(cdev, &idxd_cdev_fops); 569 rc = cdev_device_add(cdev, dev); 570 if (rc) { 571 dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc); 572 goto err; 573 } 574 575 return 0; 576 577 err: 578 put_device(dev); 579 wq->idxd_cdev = NULL; 580 return rc; 581 } 582 583 void idxd_wq_del_cdev(struct idxd_wq *wq) 584 { 585 struct idxd_cdev *idxd_cdev; 586 587 idxd_cdev = wq->idxd_cdev; 588 wq->idxd_cdev = NULL; 589 cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev)); 590 put_device(cdev_dev(idxd_cdev)); 591 } 592 593 static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) 594 { 595 struct device *dev = &idxd_dev->conf_dev; 596 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); 597 struct idxd_device *idxd = wq->idxd; 598 int rc; 599 600 if (idxd->state != IDXD_DEV_ENABLED) 601 return -ENXIO; 602 603 mutex_lock(&wq->wq_lock); 604 605 if (!idxd_wq_driver_name_match(wq, dev)) { 606 idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME; 607 rc = -ENODEV; 608 goto wq_err; 609 } 610 611 /* 612 * User type WQ is enabled only when SVA is enabled for two reasons: 613 * - If no IOMMU or IOMMU Passthrough without SVA, userspace 614 * can directly access physical address through the WQ. 615 * - The IDXD cdev driver does not provide any ways to pin 616 * user pages and translate the address from user VA to IOVA or 617 * PA without IOMMU SVA. Therefore the application has no way 618 * to instruct the device to perform DMA function. This makes 619 * the cdev not usable for normal application usage. 620 */ 621 if (!device_user_pasid_enabled(idxd)) { 622 idxd->cmd_status = IDXD_SCMD_WQ_USER_NO_IOMMU; 623 dev_dbg(&idxd->pdev->dev, 624 "User type WQ cannot be enabled without SVA.\n"); 625 626 rc = -EOPNOTSUPP; 627 goto wq_err; 628 } 629 630 wq->wq = create_workqueue(dev_name(wq_confdev(wq))); 631 if (!wq->wq) { 632 rc = -ENOMEM; 633 goto wq_err; 634 } 635 636 wq->type = IDXD_WQT_USER; 637 rc = idxd_drv_enable_wq(wq); 638 if (rc < 0) 639 goto err; 640 641 rc = idxd_wq_add_cdev(wq); 642 if (rc < 0) { 643 idxd->cmd_status = IDXD_SCMD_CDEV_ERR; 644 goto err_cdev; 645 } 646 647 idxd->cmd_status = 0; 648 mutex_unlock(&wq->wq_lock); 649 return 0; 650 651 err_cdev: 652 idxd_drv_disable_wq(wq); 653 err: 654 destroy_workqueue(wq->wq); 655 wq->type = IDXD_WQT_NONE; 656 wq_err: 657 mutex_unlock(&wq->wq_lock); 658 return rc; 659 } 660 661 static void idxd_user_drv_remove(struct idxd_dev *idxd_dev) 662 { 663 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev); 664 665 mutex_lock(&wq->wq_lock); 666 idxd_wq_del_cdev(wq); 667 idxd_drv_disable_wq(wq); 668 wq->type = IDXD_WQT_NONE; 669 destroy_workqueue(wq->wq); 670 wq->wq = NULL; 671 mutex_unlock(&wq->wq_lock); 672 } 673 674 static enum idxd_dev_type dev_types[] = { 675 IDXD_DEV_WQ, 676 IDXD_DEV_NONE, 677 }; 678 679 struct idxd_device_driver idxd_user_drv = { 680 .probe = idxd_user_drv_probe, 681 .remove = idxd_user_drv_remove, 682 .name = "user", 683 .type = dev_types, 684 }; 685 EXPORT_SYMBOL_GPL(idxd_user_drv); 686 687 int idxd_cdev_register(void) 688 { 689 int rc, i; 690 691 for (i = 0; i < IDXD_TYPE_MAX; i++) { 692 ida_init(&ictx[i].minor_ida); 693 rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK, 694 ictx[i].name); 695 if (rc) 696 goto err_free_chrdev_region; 697 } 698 699 return 0; 700 701 err_free_chrdev_region: 702 for (i--; i >= 0; i--) 703 unregister_chrdev_region(ictx[i].devt, MINORMASK); 704 705 return rc; 706 } 707 708 void idxd_cdev_remove(void) 709 { 710 int i; 711 712 for (i = 0; i < IDXD_TYPE_MAX; i++) { 713 unregister_chrdev_region(ictx[i].devt, MINORMASK); 714 ida_destroy(&ictx[i].minor_ida); 715 } 716 } 717 718 /** 719 * idxd_copy_cr - copy completion record to user address space found by wq and 720 * PASID 721 * @wq: work queue 722 * @pasid: PASID 723 * @addr: user fault address to write 724 * @cr: completion record 725 * @len: number of bytes to copy 726 * 727 * This is called by a work that handles completion record fault. 728 * 729 * Return: number of bytes copied. 730 */ 731 int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr, 732 void *cr, int len) 733 { 734 struct device *dev = &wq->idxd->pdev->dev; 735 int left = len, status_size = 1; 736 struct idxd_user_context *ctx; 737 struct mm_struct *mm; 738 739 mutex_lock(&wq->uc_lock); 740 741 ctx = xa_load(&wq->upasid_xa, pasid); 742 if (!ctx) { 743 dev_warn(dev, "No user context\n"); 744 goto out; 745 } 746 747 mm = ctx->mm; 748 /* 749 * The completion record fault handling work is running in kernel 750 * thread context. It temporarily switches to the mm to copy cr 751 * to addr in the mm. 752 */ 753 kthread_use_mm(mm); 754 left = copy_to_user((void __user *)addr + status_size, cr + status_size, 755 len - status_size); 756 /* 757 * Copy status only after the rest of completion record is copied 758 * successfully so that the user gets the complete completion record 759 * when a non-zero status is polled. 760 */ 761 if (!left) { 762 u8 status; 763 764 /* 765 * Ensure that the completion record's status field is written 766 * after the rest of the completion record has been written. 767 * This ensures that the user receives the correct completion 768 * record information once polling for a non-zero status. 769 */ 770 wmb(); 771 status = *(u8 *)cr; 772 if (put_user(status, (u8 __user *)addr)) 773 left += status_size; 774 } else { 775 left += status_size; 776 } 777 kthread_unuse_mm(mm); 778 779 out: 780 mutex_unlock(&wq->uc_lock); 781 782 return len - left; 783 } 784