Lines Matching +full:send +full:- +full:migration

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * VAS user space API for its accelerators (Only NX-GZIP is supported now)
7 #define pr_fmt(fmt) "vas-api: " fmt
20 #include <uapi/asm/vas-api.h>
24 * For NX-GZIP
26 * fd = open("/dev/crypto/nx-gzip", O_RDWR);
33 * where "vas_copy" and "vas_paste" are defined in copy-paste.h.
40 * Wrapper object for the nx-gzip device - there is just one instance of
73 * pid will not be re-used - needed only for multithread in get_vas_user_win_ref()
76 task_ref->pid = get_task_pid(current, PIDTYPE_PID); in get_vas_user_win_ref()
80 task_ref->mm = get_task_mm(current); in get_vas_user_win_ref()
81 if (!task_ref->mm) { in get_vas_user_win_ref()
82 put_pid(task_ref->pid); in get_vas_user_win_ref()
84 current->pid); in get_vas_user_win_ref()
85 return -EPERM; in get_vas_user_win_ref()
88 mmgrab(task_ref->mm); in get_vas_user_win_ref()
89 mmput(task_ref->mm); in get_vas_user_win_ref()
97 task_ref->tgid = find_get_pid(task_tgid_vnr(current)); in get_vas_user_win_ref()
112 pid = task_ref->pid; in ref_get_pid_and_task()
115 pid = task_ref->tgid; in ref_get_pid_and_task()
126 if (tsk->flags & PF_EXITING) { in ref_get_pid_and_task()
146 * invalid csb_addr, send a signal to the process.
159 * NX user space windows can not be opened for task->mm=NULL in vas_update_csb()
162 if (WARN_ON_ONCE(!task_ref->mm)) in vas_update_csb()
165 csb_addr = (void __user *)be64_to_cpu(crb->csb_addr); in vas_update_csb()
178 csb.address = crb->stamp.nx.fault_storage_addr; in vas_update_csb()
182 * Process closes send window after all pending NX requests are in vas_update_csb()
183 * completed. In multi-thread applications, a child thread can in vas_update_csb()
189 * invalid, send SEGV signal to pid saved in window. If the in vas_update_csb()
190 * child thread is not running, send the signal to tgid. in vas_update_csb()
201 kthread_use_mm(task_ref->mm); in vas_update_csb()
213 kthread_unuse_mm(task_ref->mm); in vas_update_csb()
248 dde = &crb->source; in vas_dump_crb()
250 be64_to_cpu(dde->address), be32_to_cpu(dde->length), in vas_dump_crb()
251 dde->count, dde->index, dde->flags); in vas_dump_crb()
253 dde = &crb->target; in vas_dump_crb()
255 be64_to_cpu(dde->address), be32_to_cpu(dde->length), in vas_dump_crb()
256 dde->count, dde->index, dde->flags); in vas_dump_crb()
258 nx = &crb->stamp.nx; in vas_dump_crb()
260 be32_to_cpu(nx->pswid), in vas_dump_crb()
261 be64_to_cpu(crb->stamp.nx.fault_storage_addr), in vas_dump_crb()
262 nx->flags, nx->fault_status); in vas_dump_crb()
271 return -ENOMEM; in coproc_open()
273 cp_inst->coproc = container_of(inode->i_cdev, struct coproc_dev, in coproc_open()
275 fp->private_data = cp_inst; in coproc_open()
288 cp_inst = fp->private_data; in coproc_ioc_tx_win_open()
293 if (cp_inst->txwin) in coproc_ioc_tx_win_open()
294 return -EEXIST; in coproc_ioc_tx_win_open()
299 return -EFAULT; in coproc_ioc_tx_win_open()
304 return -EINVAL; in coproc_ioc_tx_win_open()
307 if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->open_win) { in coproc_ioc_tx_win_open()
309 return -EACCES; in coproc_ioc_tx_win_open()
312 txwin = cp_inst->coproc->vops->open_win(uattr.vas_id, uattr.flags, in coproc_ioc_tx_win_open()
313 cp_inst->coproc->cop_type); in coproc_ioc_tx_win_open()
320 mutex_init(&txwin->task_ref.mmap_mutex); in coproc_ioc_tx_win_open()
321 cp_inst->txwin = txwin; in coproc_ioc_tx_win_open()
328 struct coproc_instance *cp_inst = fp->private_data; in coproc_release()
331 if (cp_inst->txwin) { in coproc_release()
332 if (cp_inst->coproc->vops && in coproc_release()
333 cp_inst->coproc->vops->close_win) { in coproc_release()
334 rc = cp_inst->coproc->vops->close_win(cp_inst->txwin); in coproc_release()
338 cp_inst->txwin = NULL; in coproc_release()
342 fp->private_data = NULL; in coproc_release()
361 struct pt_regs *regs = current->thread.regs; in do_fail_paste()
365 return -EINVAL; in do_fail_paste()
368 return -EINVAL; in do_fail_paste()
375 if (get_user(instword, (u32 __user *)(regs->nip))) in do_fail_paste()
376 return -EAGAIN; in do_fail_paste()
382 return -ENOENT; in do_fail_paste()
384 regs->ccr &= ~0xe0000000; /* Clear CR0[0-2] to fail paste */ in do_fail_paste()
397 struct vm_area_struct *vma = vmf->vma; in vas_mmap_fault()
398 struct file *fp = vma->vm_file; in vas_mmap_fault()
399 struct coproc_instance *cp_inst = fp->private_data; in vas_mmap_fault()
408 if (!cp_inst || !cp_inst->txwin) { in vas_mmap_fault()
413 txwin = cp_inst->txwin; in vas_mmap_fault()
416 * migration, invalidate the existing mapping for the current in vas_mmap_fault()
417 * paste addresses and set windows in-active (zap_vma_pages in in vas_mmap_fault()
419 * New mapping will be done later after migration or new credits in vas_mmap_fault()
423 if (txwin->task_ref.vma != vmf->vma) { in vas_mmap_fault()
428 mutex_lock(&txwin->task_ref.mmap_mutex); in vas_mmap_fault()
435 if (txwin->status == VAS_WIN_ACTIVE) { in vas_mmap_fault()
436 paste_addr = cp_inst->coproc->vops->paste_addr(txwin); in vas_mmap_fault()
438 fault = vmf_insert_pfn(vma, vma->vm_start, in vas_mmap_fault()
440 mutex_unlock(&txwin->task_ref.mmap_mutex); in vas_mmap_fault()
444 mutex_unlock(&txwin->task_ref.mmap_mutex); in vas_mmap_fault()
448 * It can happen during migration or lost credits. in vas_mmap_fault()
455 * for migration) or should fallback to SW compression or in vas_mmap_fault()
458 * failures are coming during migration or core removal: in vas_mmap_fault()
461 if (!ret || (ret == -EAGAIN)) in vas_mmap_fault()
469 * struct which is used to unmap during migration if the window is
477 struct file *fp = vma->vm_file; in vas_mmap_close()
478 struct coproc_instance *cp_inst = fp->private_data; in vas_mmap_close()
482 if (!cp_inst || !cp_inst->txwin) { in vas_mmap_close()
487 txwin = cp_inst->txwin; in vas_mmap_close()
492 if (WARN_ON(txwin->task_ref.vma != vma)) { in vas_mmap_close()
497 mutex_lock(&txwin->task_ref.mmap_mutex); in vas_mmap_close()
498 txwin->task_ref.vma = NULL; in vas_mmap_close()
499 mutex_unlock(&txwin->task_ref.mmap_mutex); in vas_mmap_close()
509 struct coproc_instance *cp_inst = fp->private_data; in coproc_mmap()
516 txwin = cp_inst->txwin; in coproc_mmap()
518 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { in coproc_mmap()
520 (vma->vm_end - vma->vm_start), PAGE_SIZE); in coproc_mmap()
521 return -EINVAL; in coproc_mmap()
524 /* Ensure instance has an open send window */ in coproc_mmap()
526 pr_err("No send window open?\n"); in coproc_mmap()
527 return -EINVAL; in coproc_mmap()
530 if (!cp_inst->coproc->vops || !cp_inst->coproc->vops->paste_addr) { in coproc_mmap()
532 return -EACCES; in coproc_mmap()
540 * -EACCES and expects the user space reissue mmap() when it in coproc_mmap()
546 mutex_lock(&txwin->task_ref.mmap_mutex); in coproc_mmap()
547 if (txwin->status != VAS_WIN_ACTIVE) { in coproc_mmap()
549 rc = -EACCES; in coproc_mmap()
553 paste_addr = cp_inst->coproc->vops->paste_addr(txwin); in coproc_mmap()
556 rc = -EINVAL; in coproc_mmap()
564 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); in coproc_mmap()
566 prot = __pgprot(pgprot_val(vma->vm_page_prot) | _PAGE_DIRTY); in coproc_mmap()
568 rc = remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff, in coproc_mmap()
569 vma->vm_end - vma->vm_start, prot); in coproc_mmap()
572 vma->vm_start, rc); in coproc_mmap()
574 txwin->task_ref.vma = vma; in coproc_mmap()
575 vma->vm_ops = &vas_vm_ops; in coproc_mmap()
578 mutex_unlock(&txwin->task_ref.mmap_mutex); in coproc_mmap()
588 return -EINVAL; in coproc_ioctl()
600 * Supporting only nx-gzip coprocessor type now, but this API code
607 int rc = -EINVAL; in vas_register_coproc_api()
625 coproc_device.class->devnode = coproc_devnode; in vas_register_coproc_api()
643 pr_err("Unable to create coproc-%d %d\n", MINOR(devno), rc); in vas_register_coproc_api()