Lines Matching refs:vops

580 static int xe_vma_ops_alloc(struct xe_vma_ops *vops, bool array_of_binds)  in xe_vma_ops_alloc()  argument
585 if (!vops->pt_update_ops[i].num_ops) in xe_vma_ops_alloc()
588 vops->pt_update_ops[i].ops = in xe_vma_ops_alloc()
589 kmalloc_objs(*vops->pt_update_ops[i].ops, in xe_vma_ops_alloc()
590 vops->pt_update_ops[i].num_ops, in xe_vma_ops_alloc()
592 if (!vops->pt_update_ops[i].ops) in xe_vma_ops_alloc()
610 static void xe_vma_svm_prefetch_ops_fini(struct xe_vma_ops *vops) in xe_vma_svm_prefetch_ops_fini() argument
614 if (!(vops->flags & XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH)) in xe_vma_svm_prefetch_ops_fini()
617 list_for_each_entry(op, &vops->list, link) in xe_vma_svm_prefetch_ops_fini()
621 static void xe_vma_ops_fini(struct xe_vma_ops *vops) in xe_vma_ops_fini() argument
625 xe_vma_svm_prefetch_ops_fini(vops); in xe_vma_ops_fini()
628 kfree(vops->pt_update_ops[i].ops); in xe_vma_ops_fini()
631 static void xe_vma_ops_incr_pt_update_ops(struct xe_vma_ops *vops, u8 tile_mask, int inc_val) in xe_vma_ops_incr_pt_update_ops() argument
640 vops->pt_update_ops[i].num_ops += inc_val; in xe_vma_ops_incr_pt_update_ops()
665 static int xe_vm_ops_add_rebind(struct xe_vma_ops *vops, struct xe_vma *vma, in xe_vm_ops_add_rebind() argument
675 list_add_tail(&op->link, &vops->list); in xe_vm_ops_add_rebind()
676 xe_vma_ops_incr_pt_update_ops(vops, tile_mask, 1); in xe_vm_ops_add_rebind()
682 struct xe_vma_ops *vops);
683 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm,
691 struct xe_vma_ops vops; in xe_vm_rebind() local
700 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vm_rebind()
702 vops.pt_update_ops[i].wait_vm_bookkeep = true; in xe_vm_rebind()
713 err = xe_vm_ops_add_rebind(&vops, vma, in xe_vm_rebind()
719 err = xe_vma_ops_alloc(&vops, false); in xe_vm_rebind()
723 fence = ops_execute(vm, &vops); in xe_vm_rebind()
733 list_for_each_entry_safe(op, next_op, &vops.list, link) { in xe_vm_rebind()
737 xe_vma_ops_fini(&vops); in xe_vm_rebind()
745 struct xe_vma_ops vops; in xe_vma_rebind() local
755 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vma_rebind()
756 vops.flags |= XE_VMA_OPS_FLAG_SKIP_TLB_WAIT; in xe_vma_rebind()
758 vops.pt_update_ops[id].wait_vm_bookkeep = true; in xe_vma_rebind()
759 vops.pt_update_ops[tile->id].q = in xe_vma_rebind()
763 err = xe_vm_ops_add_rebind(&vops, vma, tile_mask); in xe_vma_rebind()
767 err = xe_vma_ops_alloc(&vops, false); in xe_vma_rebind()
773 fence = ops_execute(vm, &vops); in xe_vma_rebind()
776 list_for_each_entry_safe(op, next_op, &vops.list, link) { in xe_vma_rebind()
780 xe_vma_ops_fini(&vops); in xe_vma_rebind()
799 xe_vm_ops_add_range_rebind(struct xe_vma_ops *vops, in xe_vm_ops_add_range_rebind() argument
811 list_add_tail(&op->link, &vops->list); in xe_vm_ops_add_range_rebind()
812 xe_vma_ops_incr_pt_update_ops(vops, tile_mask, 1); in xe_vm_ops_add_range_rebind()
835 struct xe_vma_ops vops; in xe_vm_range_rebind() local
846 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vm_range_rebind()
847 vops.flags |= XE_VMA_OPS_FLAG_SKIP_TLB_WAIT; in xe_vm_range_rebind()
849 vops.pt_update_ops[id].wait_vm_bookkeep = true; in xe_vm_range_rebind()
850 vops.pt_update_ops[tile->id].q = in xe_vm_range_rebind()
854 err = xe_vm_ops_add_range_rebind(&vops, vma, range, tile_mask); in xe_vm_range_rebind()
858 err = xe_vma_ops_alloc(&vops, false); in xe_vm_range_rebind()
864 fence = ops_execute(vm, &vops); in xe_vm_range_rebind()
867 list_for_each_entry_safe(op, next_op, &vops.list, link) { in xe_vm_range_rebind()
871 xe_vma_ops_fini(&vops); in xe_vm_range_rebind()
887 xe_vm_ops_add_range_unbind(struct xe_vma_ops *vops, in xe_vm_ops_add_range_unbind() argument
897 list_add_tail(&op->link, &vops->list); in xe_vm_ops_add_range_unbind()
898 xe_vma_ops_incr_pt_update_ops(vops, range->tile_present, 1); in xe_vm_ops_add_range_unbind()
917 struct xe_vma_ops vops; in xe_vm_range_unbind() local
930 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vm_range_unbind()
932 vops.pt_update_ops[id].wait_vm_bookkeep = true; in xe_vm_range_unbind()
933 vops.pt_update_ops[tile->id].q = in xe_vm_range_unbind()
937 err = xe_vm_ops_add_range_unbind(&vops, range); in xe_vm_range_unbind()
941 err = xe_vma_ops_alloc(&vops, false); in xe_vm_range_unbind()
947 fence = ops_execute(vm, &vops); in xe_vm_range_unbind()
950 list_for_each_entry_safe(op, next_op, &vops.list, link) { in xe_vm_range_unbind()
954 xe_vma_ops_fini(&vops); in xe_vm_range_unbind()
2250 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_vma_ops *vops, in vm_bind_ioctl_ops_create() argument
2275 vops->flags |= XE_VMA_OPS_FLAG_ALLOW_SVM_UNMAP; in vm_bind_ioctl_ops_create()
2404 vops->flags |= XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH; in vm_bind_ioctl_ops_create()
2617 struct xe_vma_ops *vops) in vm_bind_ioctl_ops_parse() argument
2636 list_add_tail(&op->link, &vops->list); in vm_bind_ioctl_ops_parse()
2663 xe_vma_ops_incr_pt_update_ops(vops, in vm_bind_ioctl_ops_parse()
2683 if (vops->flags & XE_VMA_OPS_FLAG_MADVISE) in vm_bind_ioctl_ops_parse()
2756 xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, num_remap_ops); in vm_bind_ioctl_ops_parse()
2765 !(vops->flags & XE_VMA_OPS_FLAG_ALLOW_SVM_UNMAP)) in vm_bind_ioctl_ops_parse()
2769 xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1); in vm_bind_ioctl_ops_parse()
2781 xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, in vm_bind_ioctl_ops_parse()
2784 xe_vma_ops_incr_pt_update_ops(vops, op->tile_mask, 1); in vm_bind_ioctl_ops_parse()
2979 struct xe_vma_ops *vops, struct xe_vma_op *op) in op_lock_and_prep() argument
2989 res_evict = !(vops->flags & XE_VMA_OPS_ARRAY_OF_BINDS); in op_lock_and_prep()
3051 static int vm_bind_ioctl_ops_prefetch_ranges(struct xe_vm *vm, struct xe_vma_ops *vops) in vm_bind_ioctl_ops_prefetch_ranges() argument
3056 if (!(vops->flags & XE_VMA_OPS_FLAG_HAS_SVM_PREFETCH)) in vm_bind_ioctl_ops_prefetch_ranges()
3059 list_for_each_entry(op, &vops->list, link) { in vm_bind_ioctl_ops_prefetch_ranges()
3072 struct xe_vma_ops *vops) in vm_bind_ioctl_ops_lock_and_prep() argument
3081 list_for_each_entry(op, &vops->list, link) { in vm_bind_ioctl_ops_lock_and_prep()
3082 err = op_lock_and_prep(exec, vm, vops, op); in vm_bind_ioctl_ops_lock_and_prep()
3088 if (vops->inject_error && in vm_bind_ioctl_ops_lock_and_prep()
3122 static void trace_xe_vm_ops_execute(struct xe_vma_ops *vops) in trace_xe_vm_ops_execute() argument
3126 list_for_each_entry(op, &vops->list, link) in trace_xe_vm_ops_execute()
3130 static int vm_ops_setup_tile_args(struct xe_vm *vm, struct xe_vma_ops *vops) in vm_ops_setup_tile_args() argument
3132 struct xe_exec_queue *q = vops->q; in vm_ops_setup_tile_args()
3138 if (vops->pt_update_ops[id].num_ops) in vm_ops_setup_tile_args()
3141 if (vops->pt_update_ops[id].q) in vm_ops_setup_tile_args()
3145 vops->pt_update_ops[id].q = q; in vm_ops_setup_tile_args()
3149 vops->pt_update_ops[id].q = vm->q[id]; in vm_ops_setup_tile_args()
3157 struct xe_vma_ops *vops) in ops_execute() argument
3166 number_tiles = vm_ops_setup_tile_args(vm, vops); in ops_execute()
3173 if (!(vops->flags & XE_VMA_OPS_FLAG_SKIP_TLB_WAIT)) in ops_execute()
3191 if (!vops->pt_update_ops[id].num_ops) in ops_execute()
3194 err = xe_pt_update_ops_prepare(tile, vops); in ops_execute()
3201 trace_xe_vm_ops_execute(vops); in ops_execute()
3204 struct xe_exec_queue *q = vops->pt_update_ops[tile->id].q; in ops_execute()
3207 if (!vops->pt_update_ops[id].num_ops) in ops_execute()
3210 fence = xe_pt_update_ops_run(tile, vops); in ops_execute()
3216 if (vops->flags & XE_VMA_OPS_FLAG_SKIP_TLB_WAIT) in ops_execute()
3232 if (!vops->pt_update_ops[id].num_ops) in ops_execute()
3235 xe_pt_update_ops_fini(tile, vops); in ops_execute()
3242 if (!vops->pt_update_ops[id].num_ops) in ops_execute()
3245 xe_pt_update_ops_abort(tile, vops); in ops_execute()
3288 static void vm_bind_ioctl_ops_fini(struct xe_vm *vm, struct xe_vma_ops *vops, in vm_bind_ioctl_ops_fini() argument
3295 ufence = find_ufence_get(vops->syncs, vops->num_syncs); in vm_bind_ioctl_ops_fini()
3296 list_for_each_entry(op, &vops->list, link) { in vm_bind_ioctl_ops_fini()
3309 for (i = 0; i < vops->num_syncs; i++) in vm_bind_ioctl_ops_fini()
3310 xe_sync_entry_signal(vops->syncs + i, fence); in vm_bind_ioctl_ops_fini()
3315 struct xe_vma_ops *vops) in vm_bind_ioctl_ops_execute() argument
3329 err = vm_bind_ioctl_ops_lock_and_prep(&exec, vm, vops); in vm_bind_ioctl_ops_execute()
3336 fence = ops_execute(vm, vops); in vm_bind_ioctl_ops_execute()
3340 vm_bind_ioctl_ops_fini(vm, vops, NULL); in vm_bind_ioctl_ops_execute()
3344 vm_bind_ioctl_ops_fini(vm, vops, fence); in vm_bind_ioctl_ops_execute()
3530 static void xe_vma_ops_init(struct xe_vma_ops *vops, struct xe_vm *vm, in xe_vma_ops_init() argument
3534 memset(vops, 0, sizeof(*vops)); in xe_vma_ops_init()
3535 INIT_LIST_HEAD(&vops->list); in xe_vma_ops_init()
3536 vops->vm = vm; in xe_vma_ops_init()
3537 vops->q = q; in xe_vma_ops_init()
3538 vops->syncs = syncs; in xe_vma_ops_init()
3539 vops->num_syncs = num_syncs; in xe_vma_ops_init()
3540 vops->flags = 0; in xe_vma_ops_init()
3624 struct xe_vma_ops vops; in xe_vm_bind_ioctl() local
3758 xe_vma_ops_init(&vops, vm, q, syncs, num_syncs); in xe_vm_bind_ioctl()
3760 vops.flags |= XE_VMA_OPS_ARRAY_OF_BINDS; in xe_vm_bind_ioctl()
3770 ops[i] = vm_bind_ioctl_ops_create(vm, &vops, bos[i], obj_offset, in xe_vm_bind_ioctl()
3779 err = vm_bind_ioctl_ops_parse(vm, ops[i], &vops); in xe_vm_bind_ioctl()
3785 vops.inject_error = true; in xe_vm_bind_ioctl()
3794 if (list_empty(&vops.list)) { in xe_vm_bind_ioctl()
3799 err = xe_vma_ops_alloc(&vops, args->num_binds > 1); in xe_vm_bind_ioctl()
3803 err = vm_bind_ioctl_ops_prefetch_ranges(vm, &vops); in xe_vm_bind_ioctl()
3807 fence = vm_bind_ioctl_ops_execute(vm, &vops); in xe_vm_bind_ioctl()
3816 xe_vma_ops_fini(&vops); in xe_vm_bind_ioctl()
3865 struct xe_vma_ops vops; in xe_vm_bind_kernel_bo() local
3877 xe_vma_ops_init(&vops, vm, q, NULL, 0); in xe_vm_bind_kernel_bo()
3879 ops = vm_bind_ioctl_ops_create(vm, &vops, bo, 0, addr, xe_bo_size(bo), in xe_vm_bind_kernel_bo()
3887 err = vm_bind_ioctl_ops_parse(vm, ops, &vops); in xe_vm_bind_kernel_bo()
3891 xe_assert(vm->xe, !list_empty(&vops.list)); in xe_vm_bind_kernel_bo()
3893 err = xe_vma_ops_alloc(&vops, false); in xe_vm_bind_kernel_bo()
3897 fence = vm_bind_ioctl_ops_execute(vm, &vops); in xe_vm_bind_kernel_bo()
3905 xe_vma_ops_fini(&vops); in xe_vm_bind_kernel_bo()
4373 struct xe_vma_ops vops; in xe_vm_alloc_vma() local
4443 xe_vma_ops_init(&vops, vm, NULL, NULL, 0); in xe_vm_alloc_vma()
4446 vops.flags |= XE_VMA_OPS_FLAG_MADVISE; in xe_vm_alloc_vma()
4448 vops.flags |= XE_VMA_OPS_FLAG_ALLOW_SVM_UNMAP; in xe_vm_alloc_vma()
4450 err = vm_bind_ioctl_ops_parse(vm, ops, &vops); in xe_vm_alloc_vma()