1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/pci.h>
7 #include <linux/device.h>
8 #include <linux/sched/task.h>
9 #include <linux/io-64-nonatomic-lo-hi.h>
10 #include <linux/cdev.h>
11 #include <linux/fs.h>
12 #include <linux/poll.h>
13 #include <linux/iommu.h>
14 #include <linux/highmem.h>
15 #include <uapi/linux/idxd.h>
16 #include <linux/xarray.h>
17 #include "registers.h"
18 #include "idxd.h"
19
20 struct idxd_cdev_context {
21 const char *name;
22 dev_t devt;
23 struct ida minor_ida;
24 };
25
26 /*
27 * Since user file names are global in DSA devices, define their ida's as
28 * global to avoid conflict file names.
29 */
30 static DEFINE_IDA(file_ida);
31
32 /*
33 * ictx is an array based off of accelerator types. enum idxd_type
34 * is used as index
35 */
36 static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = {
37 { .name = "dsa" },
38 { .name = "iax" }
39 };
40
41 struct idxd_user_context {
42 struct idxd_wq *wq;
43 struct task_struct *task;
44 unsigned int pasid;
45 struct mm_struct *mm;
46 unsigned int flags;
47 struct iommu_sva *sva;
48 struct idxd_dev idxd_dev;
49 u64 counters[COUNTER_MAX];
50 int id;
51 pid_t pid;
52 };
53
54 static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid);
55 static void idxd_xa_pasid_remove(struct idxd_user_context *ctx);
56
dev_to_uctx(struct device * dev)57 static inline struct idxd_user_context *dev_to_uctx(struct device *dev)
58 {
59 struct idxd_dev *idxd_dev = confdev_to_idxd_dev(dev);
60
61 return container_of(idxd_dev, struct idxd_user_context, idxd_dev);
62 }
63
cr_faults_show(struct device * dev,struct device_attribute * attr,char * buf)64 static ssize_t cr_faults_show(struct device *dev, struct device_attribute *attr, char *buf)
65 {
66 struct idxd_user_context *ctx = dev_to_uctx(dev);
67
68 return sysfs_emit(buf, "%llu\n", ctx->counters[COUNTER_FAULTS]);
69 }
70 static DEVICE_ATTR_RO(cr_faults);
71
cr_fault_failures_show(struct device * dev,struct device_attribute * attr,char * buf)72 static ssize_t cr_fault_failures_show(struct device *dev,
73 struct device_attribute *attr, char *buf)
74 {
75 struct idxd_user_context *ctx = dev_to_uctx(dev);
76
77 return sysfs_emit(buf, "%llu\n", ctx->counters[COUNTER_FAULT_FAILS]);
78 }
79 static DEVICE_ATTR_RO(cr_fault_failures);
80
pid_show(struct device * dev,struct device_attribute * attr,char * buf)81 static ssize_t pid_show(struct device *dev, struct device_attribute *attr, char *buf)
82 {
83 struct idxd_user_context *ctx = dev_to_uctx(dev);
84
85 return sysfs_emit(buf, "%u\n", ctx->pid);
86 }
87 static DEVICE_ATTR_RO(pid);
88
89 static struct attribute *cdev_file_attributes[] = {
90 &dev_attr_cr_faults.attr,
91 &dev_attr_cr_fault_failures.attr,
92 &dev_attr_pid.attr,
93 NULL
94 };
95
cdev_file_attr_visible(struct kobject * kobj,struct attribute * a,int n)96 static umode_t cdev_file_attr_visible(struct kobject *kobj, struct attribute *a, int n)
97 {
98 struct device *dev = container_of(kobj, typeof(*dev), kobj);
99 struct idxd_user_context *ctx = dev_to_uctx(dev);
100 struct idxd_wq *wq = ctx->wq;
101
102 if (!wq_pasid_enabled(wq))
103 return 0;
104
105 return a->mode;
106 }
107
108 static const struct attribute_group cdev_file_attribute_group = {
109 .attrs = cdev_file_attributes,
110 .is_visible = cdev_file_attr_visible,
111 };
112
113 static const struct attribute_group *cdev_file_attribute_groups[] = {
114 &cdev_file_attribute_group,
115 NULL
116 };
117
idxd_file_dev_release(struct device * dev)118 static void idxd_file_dev_release(struct device *dev)
119 {
120 struct idxd_user_context *ctx = dev_to_uctx(dev);
121 struct idxd_wq *wq = ctx->wq;
122 struct idxd_device *idxd = wq->idxd;
123 int rc;
124
125 ida_free(&file_ida, ctx->id);
126
127 /* Wait for in-flight operations to complete. */
128 if (wq_shared(wq)) {
129 idxd_device_drain_pasid(idxd, ctx->pasid);
130 } else {
131 if (device_user_pasid_enabled(idxd)) {
132 /* The wq disable in the disable pasid function will drain the wq */
133 rc = idxd_wq_disable_pasid(wq);
134 if (rc < 0)
135 dev_err(dev, "wq disable pasid failed.\n");
136 } else {
137 idxd_wq_drain(wq);
138 }
139 }
140
141 if (ctx->sva) {
142 idxd_cdev_evl_drain_pasid(wq, ctx->pasid);
143 iommu_sva_unbind_device(ctx->sva);
144 idxd_xa_pasid_remove(ctx);
145 }
146 kfree(ctx);
147 mutex_lock(&wq->wq_lock);
148 idxd_wq_put(wq);
149 mutex_unlock(&wq->wq_lock);
150 }
151
152 static const struct device_type idxd_cdev_file_type = {
153 .name = "idxd_file",
154 .release = idxd_file_dev_release,
155 .groups = cdev_file_attribute_groups,
156 };
157
idxd_cdev_dev_release(struct device * dev)158 static void idxd_cdev_dev_release(struct device *dev)
159 {
160 struct idxd_cdev *idxd_cdev = dev_to_cdev(dev);
161 struct idxd_cdev_context *cdev_ctx;
162 struct idxd_wq *wq = idxd_cdev->wq;
163
164 cdev_ctx = &ictx[wq->idxd->data->type];
165 ida_free(&cdev_ctx->minor_ida, idxd_cdev->minor);
166 kfree(idxd_cdev);
167 }
168
169 static const struct device_type idxd_cdev_device_type = {
170 .name = "idxd_cdev",
171 .release = idxd_cdev_dev_release,
172 };
173
inode_idxd_cdev(struct inode * inode)174 static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode)
175 {
176 struct cdev *cdev = inode->i_cdev;
177
178 return container_of(cdev, struct idxd_cdev, cdev);
179 }
180
inode_wq(struct inode * inode)181 static inline struct idxd_wq *inode_wq(struct inode *inode)
182 {
183 struct idxd_cdev *idxd_cdev = inode_idxd_cdev(inode);
184
185 return idxd_cdev->wq;
186 }
187
idxd_xa_pasid_remove(struct idxd_user_context * ctx)188 static void idxd_xa_pasid_remove(struct idxd_user_context *ctx)
189 {
190 struct idxd_wq *wq = ctx->wq;
191 void *ptr;
192
193 mutex_lock(&wq->uc_lock);
194 ptr = xa_cmpxchg(&wq->upasid_xa, ctx->pasid, ctx, NULL, GFP_KERNEL);
195 if (ptr != (void *)ctx)
196 dev_warn(&wq->idxd->pdev->dev, "xarray cmpxchg failed for pasid %u\n",
197 ctx->pasid);
198 mutex_unlock(&wq->uc_lock);
199 }
200
idxd_user_counter_increment(struct idxd_wq * wq,u32 pasid,int index)201 void idxd_user_counter_increment(struct idxd_wq *wq, u32 pasid, int index)
202 {
203 struct idxd_user_context *ctx;
204
205 if (index >= COUNTER_MAX)
206 return;
207
208 mutex_lock(&wq->uc_lock);
209 ctx = xa_load(&wq->upasid_xa, pasid);
210 if (!ctx) {
211 mutex_unlock(&wq->uc_lock);
212 return;
213 }
214 ctx->counters[index]++;
215 mutex_unlock(&wq->uc_lock);
216 }
217
idxd_cdev_open(struct inode * inode,struct file * filp)218 static int idxd_cdev_open(struct inode *inode, struct file *filp)
219 {
220 struct idxd_user_context *ctx;
221 struct idxd_device *idxd;
222 struct idxd_wq *wq;
223 struct device *dev, *fdev;
224 int rc = 0;
225 struct iommu_sva *sva = NULL;
226 unsigned int pasid;
227 struct idxd_cdev *idxd_cdev;
228
229 wq = inode_wq(inode);
230 idxd = wq->idxd;
231 dev = &idxd->pdev->dev;
232
233 dev_dbg(dev, "%s called: %d\n", __func__, idxd_wq_refcount(wq));
234
235 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
236 if (!ctx)
237 return -ENOMEM;
238
239 mutex_lock(&wq->wq_lock);
240
241 if (idxd_wq_refcount(wq) > 0 && wq_dedicated(wq)) {
242 rc = -EBUSY;
243 goto failed;
244 }
245
246 ctx->wq = wq;
247 filp->private_data = ctx;
248 ctx->pid = current->pid;
249
250 if (device_user_pasid_enabled(idxd)) {
251 sva = iommu_sva_bind_device(dev, current->mm);
252 if (IS_ERR(sva)) {
253 rc = PTR_ERR(sva);
254 dev_err(dev, "pasid allocation failed: %d\n", rc);
255 goto failed;
256 }
257
258 pasid = iommu_sva_get_pasid(sva);
259 if (pasid == IOMMU_PASID_INVALID) {
260 rc = -EINVAL;
261 goto failed_get_pasid;
262 }
263
264 ctx->sva = sva;
265 ctx->pasid = pasid;
266 ctx->mm = current->mm;
267
268 mutex_lock(&wq->uc_lock);
269 rc = xa_insert(&wq->upasid_xa, pasid, ctx, GFP_KERNEL);
270 mutex_unlock(&wq->uc_lock);
271 if (rc < 0)
272 dev_warn(dev, "PASID entry already exist in xarray.\n");
273
274 if (wq_dedicated(wq)) {
275 rc = idxd_wq_set_pasid(wq, pasid);
276 if (rc < 0) {
277 dev_err(dev, "wq set pasid failed: %d\n", rc);
278 goto failed_set_pasid;
279 }
280 }
281 }
282
283 idxd_cdev = wq->idxd_cdev;
284 ctx->id = ida_alloc(&file_ida, GFP_KERNEL);
285 if (ctx->id < 0) {
286 dev_warn(dev, "ida alloc failure\n");
287 goto failed_ida;
288 }
289 ctx->idxd_dev.type = IDXD_DEV_CDEV_FILE;
290 fdev = user_ctx_dev(ctx);
291 device_initialize(fdev);
292 fdev->parent = cdev_dev(idxd_cdev);
293 fdev->bus = &dsa_bus_type;
294 fdev->type = &idxd_cdev_file_type;
295
296 rc = dev_set_name(fdev, "file%d", ctx->id);
297 if (rc < 0) {
298 dev_warn(dev, "set name failure\n");
299 goto failed_dev_name;
300 }
301
302 rc = device_add(fdev);
303 if (rc < 0) {
304 dev_warn(dev, "file device add failure\n");
305 goto failed_dev_add;
306 }
307
308 idxd_wq_get(wq);
309 mutex_unlock(&wq->wq_lock);
310 return 0;
311
312 failed_dev_add:
313 failed_dev_name:
314 put_device(fdev);
315 failed_ida:
316 failed_set_pasid:
317 if (device_user_pasid_enabled(idxd))
318 idxd_xa_pasid_remove(ctx);
319 failed_get_pasid:
320 if (device_user_pasid_enabled(idxd) && !IS_ERR_OR_NULL(sva))
321 iommu_sva_unbind_device(sva);
322 failed:
323 mutex_unlock(&wq->wq_lock);
324 kfree(ctx);
325 return rc;
326 }
327
idxd_cdev_evl_drain_pasid(struct idxd_wq * wq,u32 pasid)328 static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
329 {
330 struct idxd_device *idxd = wq->idxd;
331 struct idxd_evl *evl = idxd->evl;
332 union evl_status_reg status;
333 u16 h, t, size;
334 int ent_size = evl_ent_size(idxd);
335 struct __evl_entry *entry_head;
336
337 if (!evl)
338 return;
339
340 mutex_lock(&evl->lock);
341 status.bits = ioread64(idxd->reg_base + IDXD_EVLSTATUS_OFFSET);
342 t = status.tail;
343 h = status.head;
344 size = evl->size;
345
346 while (h != t) {
347 entry_head = (struct __evl_entry *)(evl->log + (h * ent_size));
348 if (entry_head->pasid == pasid && entry_head->wq_idx == wq->id)
349 set_bit(h, evl->bmap);
350 h = (h + 1) % size;
351 }
352 drain_workqueue(wq->wq);
353 mutex_unlock(&evl->lock);
354 }
355
idxd_cdev_release(struct inode * node,struct file * filep)356 static int idxd_cdev_release(struct inode *node, struct file *filep)
357 {
358 struct idxd_user_context *ctx = filep->private_data;
359 struct idxd_wq *wq = ctx->wq;
360 struct idxd_device *idxd = wq->idxd;
361 struct device *dev = &idxd->pdev->dev;
362
363 dev_dbg(dev, "%s called\n", __func__);
364 filep->private_data = NULL;
365
366 device_unregister(user_ctx_dev(ctx));
367
368 return 0;
369 }
370
check_vma(struct idxd_wq * wq,struct vm_area_struct * vma,const char * func)371 static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma,
372 const char *func)
373 {
374 struct device *dev = &wq->idxd->pdev->dev;
375
376 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
377 dev_info_ratelimited(dev,
378 "%s: %s: mapping too large: %lu\n",
379 current->comm, func,
380 vma->vm_end - vma->vm_start);
381 return -EINVAL;
382 }
383
384 return 0;
385 }
386
idxd_cdev_mmap(struct file * filp,struct vm_area_struct * vma)387 static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
388 {
389 struct idxd_user_context *ctx = filp->private_data;
390 struct idxd_wq *wq = ctx->wq;
391 struct idxd_device *idxd = wq->idxd;
392 struct pci_dev *pdev = idxd->pdev;
393 phys_addr_t base = pci_resource_start(pdev, IDXD_WQ_BAR);
394 unsigned long pfn;
395 int rc;
396
397 dev_dbg(&pdev->dev, "%s called\n", __func__);
398
399 /*
400 * Due to an erratum in some of the devices supported by the driver,
401 * direct user submission to the device can be unsafe.
402 * (See the INTEL-SA-01084 security advisory)
403 *
404 * For the devices that exhibit this behavior, require that the user
405 * has CAP_SYS_RAWIO capabilities.
406 */
407 if (!idxd->user_submission_safe && !capable(CAP_SYS_RAWIO))
408 return -EPERM;
409
410 if (current->mm != ctx->mm)
411 return -EPERM;
412
413 rc = check_vma(wq, vma, __func__);
414 if (rc < 0)
415 return rc;
416
417 vm_flags_set(vma, VM_DONTCOPY);
418 pfn = (base + idxd_get_wq_portal_full_offset(wq->id,
419 IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT;
420 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
421 vma->vm_private_data = ctx;
422
423 return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE,
424 vma->vm_page_prot);
425 }
426
idxd_submit_user_descriptor(struct idxd_user_context * ctx,struct dsa_hw_desc __user * udesc)427 static int idxd_submit_user_descriptor(struct idxd_user_context *ctx,
428 struct dsa_hw_desc __user *udesc)
429 {
430 struct idxd_wq *wq = ctx->wq;
431 struct idxd_dev *idxd_dev = &wq->idxd->idxd_dev;
432 const uint64_t comp_addr_align = is_dsa_dev(idxd_dev) ? 0x20 : 0x40;
433 void __iomem *portal = idxd_wq_portal_addr(wq);
434 struct dsa_hw_desc descriptor __aligned(64);
435 int rc;
436
437 rc = copy_from_user(&descriptor, udesc, sizeof(descriptor));
438 if (rc)
439 return -EFAULT;
440
441 /*
442 * DSA devices are capable of indirect ("batch") command submission.
443 * On devices where direct user submissions are not safe, we cannot
444 * allow this since there is no good way for us to verify these
445 * indirect commands.
446 */
447 if (is_dsa_dev(idxd_dev) && descriptor.opcode == DSA_OPCODE_BATCH &&
448 !wq->idxd->user_submission_safe)
449 return -EINVAL;
450 /*
451 * As per the programming specification, the completion address must be
452 * aligned to 32 or 64 bytes. If this is violated the hardware
453 * engine can get very confused (security issue).
454 */
455 if (!IS_ALIGNED(descriptor.completion_addr, comp_addr_align))
456 return -EINVAL;
457
458 if (wq_dedicated(wq))
459 iosubmit_cmds512(portal, &descriptor, 1);
460 else {
461 descriptor.priv = 0;
462 descriptor.pasid = ctx->pasid;
463 rc = idxd_enqcmds(wq, portal, &descriptor);
464 if (rc < 0)
465 return rc;
466 }
467
468 return 0;
469 }
470
idxd_cdev_write(struct file * filp,const char __user * buf,size_t len,loff_t * unused)471 static ssize_t idxd_cdev_write(struct file *filp, const char __user *buf, size_t len,
472 loff_t *unused)
473 {
474 struct dsa_hw_desc __user *udesc = (struct dsa_hw_desc __user *)buf;
475 struct idxd_user_context *ctx = filp->private_data;
476 ssize_t written = 0;
477 int i;
478
479 if (current->mm != ctx->mm)
480 return -EPERM;
481
482 for (i = 0; i < len/sizeof(struct dsa_hw_desc); i++) {
483 int rc = idxd_submit_user_descriptor(ctx, udesc + i);
484
485 if (rc)
486 return written ? written : rc;
487
488 written += sizeof(struct dsa_hw_desc);
489 }
490
491 return written;
492 }
493
idxd_cdev_poll(struct file * filp,struct poll_table_struct * wait)494 static __poll_t idxd_cdev_poll(struct file *filp,
495 struct poll_table_struct *wait)
496 {
497 struct idxd_user_context *ctx = filp->private_data;
498 struct idxd_wq *wq = ctx->wq;
499 struct idxd_device *idxd = wq->idxd;
500 __poll_t out = 0;
501
502 if (current->mm != ctx->mm)
503 return POLLNVAL;
504
505 poll_wait(filp, &wq->err_queue, wait);
506 spin_lock(&idxd->dev_lock);
507 if (idxd->sw_err.valid)
508 out = EPOLLIN | EPOLLRDNORM;
509 spin_unlock(&idxd->dev_lock);
510
511 return out;
512 }
513
514 static const struct file_operations idxd_cdev_fops = {
515 .owner = THIS_MODULE,
516 .open = idxd_cdev_open,
517 .release = idxd_cdev_release,
518 .mmap = idxd_cdev_mmap,
519 .write = idxd_cdev_write,
520 .poll = idxd_cdev_poll,
521 };
522
idxd_cdev_get_major(struct idxd_device * idxd)523 int idxd_cdev_get_major(struct idxd_device *idxd)
524 {
525 return MAJOR(ictx[idxd->data->type].devt);
526 }
527
idxd_wq_add_cdev(struct idxd_wq * wq)528 int idxd_wq_add_cdev(struct idxd_wq *wq)
529 {
530 struct idxd_device *idxd = wq->idxd;
531 struct idxd_cdev *idxd_cdev;
532 struct cdev *cdev;
533 struct device *dev;
534 struct idxd_cdev_context *cdev_ctx;
535 int rc, minor;
536
537 idxd_cdev = kzalloc(sizeof(*idxd_cdev), GFP_KERNEL);
538 if (!idxd_cdev)
539 return -ENOMEM;
540
541 idxd_cdev->idxd_dev.type = IDXD_DEV_CDEV;
542 idxd_cdev->wq = wq;
543 cdev = &idxd_cdev->cdev;
544 dev = cdev_dev(idxd_cdev);
545 cdev_ctx = &ictx[wq->idxd->data->type];
546 minor = ida_alloc_max(&cdev_ctx->minor_ida, MINORMASK, GFP_KERNEL);
547 if (minor < 0) {
548 kfree(idxd_cdev);
549 return minor;
550 }
551 idxd_cdev->minor = minor;
552
553 device_initialize(dev);
554 dev->parent = wq_confdev(wq);
555 dev->bus = &dsa_bus_type;
556 dev->type = &idxd_cdev_device_type;
557 dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor);
558
559 rc = dev_set_name(dev, "%s/wq%u.%u", idxd->data->name_prefix, idxd->id, wq->id);
560 if (rc < 0)
561 goto err;
562
563 wq->idxd_cdev = idxd_cdev;
564 cdev_init(cdev, &idxd_cdev_fops);
565 rc = cdev_device_add(cdev, dev);
566 if (rc) {
567 dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc);
568 goto err;
569 }
570
571 return 0;
572
573 err:
574 put_device(dev);
575 wq->idxd_cdev = NULL;
576 return rc;
577 }
578
idxd_wq_del_cdev(struct idxd_wq * wq)579 void idxd_wq_del_cdev(struct idxd_wq *wq)
580 {
581 struct idxd_cdev *idxd_cdev;
582
583 idxd_cdev = wq->idxd_cdev;
584 wq->idxd_cdev = NULL;
585 cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev));
586 put_device(cdev_dev(idxd_cdev));
587 }
588
idxd_user_drv_probe(struct idxd_dev * idxd_dev)589 static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
590 {
591 struct device *dev = &idxd_dev->conf_dev;
592 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
593 struct idxd_device *idxd = wq->idxd;
594 int rc;
595
596 if (idxd->state != IDXD_DEV_ENABLED)
597 return -ENXIO;
598
599 mutex_lock(&wq->wq_lock);
600
601 if (!idxd_wq_driver_name_match(wq, dev)) {
602 idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME;
603 rc = -ENODEV;
604 goto wq_err;
605 }
606
607 /*
608 * User type WQ is enabled only when SVA is enabled for two reasons:
609 * - If no IOMMU or IOMMU Passthrough without SVA, userspace
610 * can directly access physical address through the WQ.
611 * - The IDXD cdev driver does not provide any ways to pin
612 * user pages and translate the address from user VA to IOVA or
613 * PA without IOMMU SVA. Therefore the application has no way
614 * to instruct the device to perform DMA function. This makes
615 * the cdev not usable for normal application usage.
616 */
617 if (!device_user_pasid_enabled(idxd)) {
618 idxd->cmd_status = IDXD_SCMD_WQ_USER_NO_IOMMU;
619 dev_dbg(&idxd->pdev->dev,
620 "User type WQ cannot be enabled without SVA.\n");
621
622 rc = -EOPNOTSUPP;
623 goto wq_err;
624 }
625
626 wq->wq = create_workqueue(dev_name(wq_confdev(wq)));
627 if (!wq->wq) {
628 rc = -ENOMEM;
629 goto wq_err;
630 }
631
632 wq->type = IDXD_WQT_USER;
633 rc = idxd_drv_enable_wq(wq);
634 if (rc < 0)
635 goto err;
636
637 rc = idxd_wq_add_cdev(wq);
638 if (rc < 0) {
639 idxd->cmd_status = IDXD_SCMD_CDEV_ERR;
640 goto err_cdev;
641 }
642
643 idxd->cmd_status = 0;
644 mutex_unlock(&wq->wq_lock);
645 return 0;
646
647 err_cdev:
648 idxd_drv_disable_wq(wq);
649 err:
650 destroy_workqueue(wq->wq);
651 wq->type = IDXD_WQT_NONE;
652 wq_err:
653 mutex_unlock(&wq->wq_lock);
654 return rc;
655 }
656
idxd_user_drv_remove(struct idxd_dev * idxd_dev)657 static void idxd_user_drv_remove(struct idxd_dev *idxd_dev)
658 {
659 struct idxd_wq *wq = idxd_dev_to_wq(idxd_dev);
660
661 mutex_lock(&wq->wq_lock);
662 idxd_wq_del_cdev(wq);
663 idxd_drv_disable_wq(wq);
664 wq->type = IDXD_WQT_NONE;
665 destroy_workqueue(wq->wq);
666 wq->wq = NULL;
667 mutex_unlock(&wq->wq_lock);
668 }
669
670 static enum idxd_dev_type dev_types[] = {
671 IDXD_DEV_WQ,
672 IDXD_DEV_NONE,
673 };
674
675 struct idxd_device_driver idxd_user_drv = {
676 .probe = idxd_user_drv_probe,
677 .remove = idxd_user_drv_remove,
678 .name = "user",
679 .type = dev_types,
680 };
681 EXPORT_SYMBOL_GPL(idxd_user_drv);
682
idxd_cdev_register(void)683 int idxd_cdev_register(void)
684 {
685 int rc, i;
686
687 for (i = 0; i < IDXD_TYPE_MAX; i++) {
688 ida_init(&ictx[i].minor_ida);
689 rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK,
690 ictx[i].name);
691 if (rc)
692 goto err_free_chrdev_region;
693 }
694
695 return 0;
696
697 err_free_chrdev_region:
698 for (i--; i >= 0; i--)
699 unregister_chrdev_region(ictx[i].devt, MINORMASK);
700
701 return rc;
702 }
703
idxd_cdev_remove(void)704 void idxd_cdev_remove(void)
705 {
706 int i;
707
708 for (i = 0; i < IDXD_TYPE_MAX; i++) {
709 unregister_chrdev_region(ictx[i].devt, MINORMASK);
710 ida_destroy(&ictx[i].minor_ida);
711 }
712 }
713
714 /**
715 * idxd_copy_cr - copy completion record to user address space found by wq and
716 * PASID
717 * @wq: work queue
718 * @pasid: PASID
719 * @addr: user fault address to write
720 * @cr: completion record
721 * @len: number of bytes to copy
722 *
723 * This is called by a work that handles completion record fault.
724 *
725 * Return: number of bytes copied.
726 */
idxd_copy_cr(struct idxd_wq * wq,ioasid_t pasid,unsigned long addr,void * cr,int len)727 int idxd_copy_cr(struct idxd_wq *wq, ioasid_t pasid, unsigned long addr,
728 void *cr, int len)
729 {
730 struct device *dev = &wq->idxd->pdev->dev;
731 int left = len, status_size = 1;
732 struct idxd_user_context *ctx;
733 struct mm_struct *mm;
734
735 mutex_lock(&wq->uc_lock);
736
737 ctx = xa_load(&wq->upasid_xa, pasid);
738 if (!ctx) {
739 dev_warn(dev, "No user context\n");
740 goto out;
741 }
742
743 mm = ctx->mm;
744 /*
745 * The completion record fault handling work is running in kernel
746 * thread context. It temporarily switches to the mm to copy cr
747 * to addr in the mm.
748 */
749 kthread_use_mm(mm);
750 left = copy_to_user((void __user *)addr + status_size, cr + status_size,
751 len - status_size);
752 /*
753 * Copy status only after the rest of completion record is copied
754 * successfully so that the user gets the complete completion record
755 * when a non-zero status is polled.
756 */
757 if (!left) {
758 u8 status;
759
760 /*
761 * Ensure that the completion record's status field is written
762 * after the rest of the completion record has been written.
763 * This ensures that the user receives the correct completion
764 * record information once polling for a non-zero status.
765 */
766 wmb();
767 status = *(u8 *)cr;
768 if (put_user(status, (u8 __user *)addr))
769 left += status_size;
770 } else {
771 left += status_size;
772 }
773 kthread_unuse_mm(mm);
774
775 out:
776 mutex_unlock(&wq->uc_lock);
777
778 return len - left;
779 }
780