Home
last modified time | relevance | path

Searched refs:vm_ops (Results 1 – 25 of 175) sorted by relevance

1234567

/linux/fs/coda/
H A Dfile.c37 struct vm_operations_struct vm_ops; member
126 container_of(vma->vm_ops, struct coda_vm_ops, vm_ops); in coda_vm_open()
138 container_of(vma->vm_ops, struct coda_vm_ops, vm_ops); in coda_vm_close()
144 vma->vm_ops = cvm_ops->host_vm_ops; in coda_vm_close()
212 cvm_ops->host_vm_ops = vma->vm_ops; in coda_file_mmap()
213 if (vma->vm_ops) in coda_file_mmap()
214 cvm_ops->vm_ops = *vma->vm_ops; in coda_file_mmap()
216 cvm_ops->vm_ops.open = coda_vm_open; in coda_file_mmap()
217 cvm_ops->vm_ops.close = coda_vm_close; in coda_file_mmap()
221 vma->vm_ops = &cvm_ops->vm_ops; in coda_file_mmap()
/linux/tools/testing/vma/
H A Dvma.c465 const struct vm_operations_struct vm_ops = { in test_merge_new() local
511 vma_a->vm_ops = &vm_ops; /* This should have no impact. */ in test_merge_new()
548 vma_d->vm_ops = &vm_ops; /* This should have no impact. */ in test_merge_new()
566 vma_d->vm_ops = NULL; /* This would otherwise degrade the merge. */ in test_merge_new()
721 const struct vm_operations_struct vm_ops = { in test_vma_merge_with_close() local
796 vma_next->vm_ops = &vm_ops; in test_vma_merge_with_close()
821 vma->vm_ops = &vm_ops; in test_vma_merge_with_close()
849 vma->vm_ops = &vm_ops; in test_vma_merge_with_close()
878 vma->vm_ops = &vm_ops; in test_vma_merge_with_close()
904 vma_next->vm_ops = &vm_ops; in test_vma_merge_with_close()
[all …]
H A Dvma_internal.h299 const struct vm_operations_struct *vm_ops; member
370 const struct vm_operations_struct *vm_ops; member
610 vma->vm_ops = &vma_dummy_vm_ops; in vma_init()
714 return !vma->vm_ops; in vma_is_anonymous()
1072 vma->vm_ops = NULL; in vma_set_anonymous()
/linux/fs/kernfs/
H A Dfile.c375 if (!of->vm_ops) in kernfs_vma_open()
381 if (of->vm_ops->open) in kernfs_vma_open()
382 of->vm_ops->open(vma); in kernfs_vma_open()
393 if (!of->vm_ops) in kernfs_vma_fault()
400 if (of->vm_ops->fault) in kernfs_vma_fault()
401 ret = of->vm_ops->fault(vmf); in kernfs_vma_fault()
413 if (!of->vm_ops) in kernfs_vma_page_mkwrite()
420 if (of->vm_ops->page_mkwrite) in kernfs_vma_page_mkwrite()
421 ret = of->vm_ops->page_mkwrite(vmf); in kernfs_vma_page_mkwrite()
436 if (!of->vm_ops) in kernfs_vma_access()
[all …]
/linux/arch/um/kernel/
H A Dtlb.c18 struct vm_ops { struct
53 struct vm_ops *ops) in update_pte_range()
94 struct vm_ops *ops) in update_pmd_range()
117 struct vm_ops *ops) in update_pud_range()
140 struct vm_ops *ops) in update_p4d_range()
164 struct vm_ops ops; in um_tlb_sync()
/linux/ipc/
H A Dshm.c89 const struct vm_operations_struct *vm_ops; member
311 if (sfd->vm_ops->open) in shm_open()
312 sfd->vm_ops->open(vma); in shm_open()
404 if (sfd->vm_ops->close) in shm_close()
405 sfd->vm_ops->close(vma); in shm_close()
545 return sfd->vm_ops->fault(vmf); in shm_fault()
553 if (sfd->vm_ops->may_split) in shm_may_split()
554 return sfd->vm_ops->may_split(vma, addr); in shm_may_split()
564 if (sfd->vm_ops->pagesize) in shm_pagesize()
565 return sfd->vm_ops->pagesize(vma); in shm_pagesize()
[all …]
/linux/drivers/gpu/drm/xe/
H A Dxe_mmio_gem.c42 static const struct vm_operations_struct vm_ops = { variable
51 .vm_ops = &vm_ops,
/linux/mm/
H A Dvma.c23 const struct vm_operations_struct *vm_ops; member
509 if (vma->vm_ops && vma->vm_ops->may_split) { in __split_vma()
510 err = vma->vm_ops->may_split(vma, addr); in __split_vma()
542 if (new->vm_ops && new->vm_ops->open) in __split_vma()
543 new->vm_ops->open(new); in __split_vma()
775 return !vma->vm_ops || !vma->vm_ops->close; in can_merge_remove_vma()
1878 if (new_vma->vm_ops && new_vma->vm_ops->open) in copy_vma()
1879 new_vma->vm_ops->open(new_vma); in copy_vma()
2001 static bool vm_ops_needs_writenotify(const struct vm_operations_struct *vm_ops) in vm_ops_needs_writenotify() argument
2003 return vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite); in vm_ops_needs_writenotify()
[all …]
H A Dvma_init.c45 dest->vm_ops = src->vm_ops; in vm_area_init_from()
H A Dsecretmem.c132 desc->vm_ops = &secretmem_vm_ops; in secretmem_mmap_prepare()
139 return vma->vm_ops == &secretmem_vm_ops; in vma_is_secretmem()
H A Dmemory.c608 vma->vm_ops ? vma->vm_ops->fault : NULL, in print_bad_page_map()
690 if (vma->vm_ops && vma->vm_ops->find_normal_page) in __vm_normal_page()
691 return vma->vm_ops->find_normal_page(vma, addr); in __vm_normal_page()
2254 return vma->vm_ops && vma->vm_ops->pfn_mkwrite && in vm_mixed_zeropage_allowed()
3489 ret = vmf->vma->vm_ops->page_mkwrite(vmf); in do_page_mkwrite()
3517 bool page_mkwrite = vma->vm_ops && vma->vm_ops->page_mkwrite; in fault_dirty_shared_page()
3602 if (vma->vm_ops->map_pages || !(vmf->flags & FAULT_FLAG_VMA_LOCK)) in vmf_can_call_fault()
3854 if (vma->vm_ops && vma->vm_ops->pfn_mkwrite) { in wp_pfn_shared()
3863 ret = vma->vm_ops->pfn_mkwrite(vmf); in wp_pfn_shared()
3880 if (vma->vm_ops && vma->vm_ops->page_mkwrite) { in wp_page_shared()
[all …]
H A Dmremap.c1046 if (vma->vm_ops && vma->vm_ops->may_split) { in prep_move_vma()
1048 err = vma->vm_ops->may_split(vma, old_addr); in prep_move_vma()
1050 err = vma->vm_ops->may_split(vma, old_addr + old_len); in prep_move_vma()
1211 else if (vma->vm_ops && vma->vm_ops->mremap) in copy_vma_and_data()
1212 err = vma->vm_ops->mremap(new_vma); in copy_vma_and_data()
H A Dinternal.h177 vma->vm_ops = &vma_dummy_vm_ops; in mmap_file()
189 if (vma->vm_ops && vma->vm_ops->close) { in vma_close()
190 vma->vm_ops->close(vma); in vma_close()
196 vma->vm_ops = &vma_dummy_vm_ops; in vma_close()
/linux/drivers/gpu/drm/xen/
H A Dxen_drm_front_gem.c66 vma->vm_ops = gem_obj->funcs->vm_ops; in xen_drm_front_gem_object_mmap()
111 .vm_ops = &xen_drm_drv_vm_ops,
/linux/drivers/gpu/drm/mediatek/
H A Dmtk_gem.c20 static const struct vm_operations_struct vm_ops = { variable
31 .vm_ops = &vm_ops,
/linux/arch/hexagon/kernel/
H A DMakefile14 obj-y += vm_entry.o vm_events.o vm_switch.o vm_ops.o vm_init_segtable.o
/linux/drivers/accel/amdxdna/
H A Damdxdna_gem.c291 vma->vm_ops->close(vma); in amdxdna_insert_pages()
299 vma->vm_ops = NULL; in amdxdna_insert_pages()
312 vma->vm_ops->close(vma); in amdxdna_insert_pages()
360 vma->vm_ops = &drm_gem_shmem_vm_ops; in amdxdna_gem_dmabuf_mmap()
378 vma->vm_ops->close(vma); in amdxdna_gem_dmabuf_mmap()
495 .vm_ops = &drm_gem_shmem_vm_ops,
/linux/include/linux/
H A Dagp_backend.h56 const struct vm_operations_struct *vm_ops; member
/linux/drivers/gpu/drm/vmwgfx/
H A Dvmwgfx_gem.c121 vma->vm_ops = NULL; in vmw_gem_mmap()
154 .vm_ops = &vmw_vm_ops,
/linux/drivers/gpu/drm/i915/gem/
H A Di915_gem_mman.c1027 vma->vm_ops = obj->ops->mmap_ops; in i915_gem_object_mmap()
1038 vma->vm_ops = &vm_ops_cpu; in i915_gem_object_mmap()
1046 vma->vm_ops = &vm_ops_cpu; in i915_gem_object_mmap()
1052 vma->vm_ops = &vm_ops_cpu; in i915_gem_object_mmap()
1058 vma->vm_ops = &vm_ops_gtt; in i915_gem_object_mmap()
/linux/drivers/pci/
H A Dmmap.c47 vma->vm_ops = &pci_phys_vm_ops; in pci_mmap_resource_range()
/linux/drivers/gpu/drm/msm/
H A Dmsm_gem_vma.c120 struct list_head vm_ops; member
470 list_add_tail(&op->node, &arg->job->vm_ops); in vm_op_enqueue()
692 while (!list_empty(&job->vm_ops)) { in msm_vma_job_run()
694 list_first_entry(&job->vm_ops, struct msm_vm_op, node); in msm_vma_job_run()
755 while (!list_empty(&job->vm_ops)) { in msm_vma_job_free()
757 list_first_entry(&job->vm_ops, struct msm_vm_op, node); in msm_vma_job_free()
955 INIT_LIST_HEAD(&job->vm_ops); in vm_bind_job_create()
/linux/arch/x86/kernel/cpu/sgx/
H A Dencl.h95 if (!result || result->vm_ops != &sgx_vm_ops) in sgx_encl_find()
/linux/drivers/media/common/videobuf2/
H A Dvideobuf2-vmalloc.c195 vma->vm_ops = &vb2_common_vm_ops; in vb2_vmalloc_mmap()
197 vma->vm_ops->open(vma); in vb2_vmalloc_mmap()
/linux/drivers/gpu/drm/ttm/
H A Dttm_bo_vm.c496 if (!vma->vm_ops) in ttm_bo_mmap_obj()
497 vma->vm_ops = &ttm_bo_vm_ops; in ttm_bo_mmap_obj()

1234567