/linux/include/linux/ |
H A D | huge_mm.h | 10 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf); 14 void huge_pmd_set_accessed(struct vm_fault *vmf); 20 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud); 22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud) in huge_pud_set_accessed() 27 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf); 40 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn, 42 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn, 44 vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio, 46 vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio, 475 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vm [all...] |
H A D | mm.h | 526 * vm_fault is filled by the pagefault handler and passed to the vma's 535 struct vm_fault { struct 603 vm_fault_t (*fault)(struct vm_fault *vmf); 604 vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order); 605 vm_fault_t (*map_pages)(struct vm_fault *vmf, 611 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf); 614 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf); 675 * These must be here rather than mmap_lock.h as dependent on vm_fault type, 679 static inline void release_fault_lock(struct vm_fault *vmf) in release_fault_lock() 687 static inline void assert_fault_locked(struct vm_fault *vm 536 __anon885272200108vm_fault global() argument 543 flagsvm_fault global() argument 545 pmdvm_fault global() argument 547 pudvm_fault global() argument 550 __anon88527220020avm_fault global() argument 557 cow_pagevm_fault global() argument 558 pagevm_fault global() argument 564 ptevm_fault global() argument 568 ptlvm_fault global() argument 572 prealloc_ptevm_fault global() argument 3588 vm_fault_to_errno(vm_fault_t vm_fault,int foll_flags) vm_fault_to_errno() argument [all...] |
H A D | mempolicy.h | 171 int mpol_misplaced(struct folio *folio, struct vm_fault *vmf, 290 struct vm_fault *vmf, in mpol_misplaced()
|
H A D | userfaultfd_k.h | 81 extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason); 297 static inline vm_fault_t handle_userfault(struct vm_fault *vmf, in handle_userfault()
|
H A D | dax.h | 259 vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order, 262 vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
|
/linux/include/trace/events/ |
H A D | fs_dax.h | 11 TP_PROTO(struct inode *inode, struct vm_fault *vmf, 56 TP_PROTO(struct inode *inode, struct vm_fault *vmf, \ 64 TP_PROTO(struct inode *inode, struct vm_fault *vmf, 98 TP_PROTO(struct inode *inode, struct vm_fault *vmf, \ 106 TP_PROTO(struct inode *inode, struct vm_fault *vmf, int result), 140 TP_PROTO(struct inode *inode, struct vm_fault *vmf, int result), \
|
/linux/drivers/gpu/drm/ttm/ |
H A D | ttm_bo_vm.c | 44 struct vm_fault *vmf) in ttm_bo_vm_fault_idle() 119 struct vm_fault *vmf) in ttm_bo_vm_reserve() 167 * @vmf: The struct vm_fault given as argument to the fault callback 183 vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, in ttm_bo_vm_fault_reserved() 292 vm_fault_t ttm_bo_vm_dummy_page(struct vm_fault *vmf, pgprot_t prot) in ttm_bo_vm_dummy_page() 322 vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf) in ttm_bo_vm_fault()
|
/linux/mm/ |
H A D | memory.c | 96 static vm_fault_t do_fault(struct vm_fault *vmf); 97 static vm_fault_t do_anonymous_page(struct vm_fault *vmf); 98 static bool vmf_pte_changed(struct vm_fault *vmf); 104 static __always_inline bool vmf_orig_pte_uffd_wp(struct vm_fault *vmf) in vmf_orig_pte_uffd_wp() 2639 vm_fault_t vmf_insert_page_mkwrite(struct vm_fault *vmf, struct page *page, in vmf_insert_page_mkwrite() 3211 static inline int pte_unmap_same(struct vm_fault *vmf) in pte_unmap_same() 3233 struct vm_fault *vmf) in __wp_page_copy_user() 3350 static vm_fault_t do_page_mkwrite(struct vm_fault *vmf, struct folio *folio) in do_page_mkwrite() 3383 static vm_fault_t fault_dirty_shared_page(struct vm_fault *vmf) in fault_dirty_shared_page() 3436 static inline void wp_page_reuse(struct vm_fault *vm [all...] |
H A D | swap.h | 79 struct vm_fault *vmf); 160 struct vm_fault *vmf) in swapin_readahead()
|
/linux/arch/arc/include/asm/ |
H A D | pgtable-bits-arcv2.h | 103 struct vm_fault; 104 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma,
|
/linux/fs/ocfs2/ |
H A D | mmap.c | 31 static vm_fault_t ocfs2_fault(struct vm_fault *vmf) in ocfs2_fault() 113 static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf) in ocfs2_page_mkwrite()
|
/linux/fs/bcachefs/ |
H A D | fs-io-pagecache.h | 167 vm_fault_t bch2_page_fault(struct vm_fault *); 168 vm_fault_t bch2_page_mkwrite(struct vm_fault *);
|
/linux/arch/hexagon/mm/ |
H A D | Makefile | 6 obj-y := init.o uaccess.o vm_fault.o cache.o
|
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ |
H A D | nv50.c | 121 static const struct nvkm_enum vm_fault[] = { variable 174 re = nvkm_enum_find(vm_fault , st1); in nv50_fb_intr()
|
/linux/drivers/gpu/drm/amd/amdgpu/ |
H A D | gmc_v12_0.c | 159 adev->gmc.vm_fault.num_types = 1; in gmc_v12_0_set_irq_funcs() 160 adev->gmc.vm_fault.funcs = &gmc_v12_0_irq_funcs; in gmc_v12_0_set_irq_funcs() 631 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); in gmc_v12_0_late_init() 762 &adev->gmc.vm_fault); in gmc_v12_0_sw_init() 769 &adev->gmc.vm_fault); in gmc_v12_0_sw_init() 932 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); in gmc_v12_0_hw_fini()
|
H A D | gmc_v11_0.c | 166 adev->gmc.vm_fault.num_types = 1; in gmc_v11_0_set_irq_funcs() 167 adev->gmc.vm_fault.funcs = &gmc_v11_0_irq_funcs; in gmc_v11_0_set_irq_funcs() 636 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); in gmc_v11_0_late_init() 786 &adev->gmc.vm_fault); in gmc_v11_0_sw_init() 793 &adev->gmc.vm_fault); in gmc_v11_0_sw_init() 964 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); in gmc_v11_0_hw_fini()
|
H A D | gmc_v10_0.c | 197 adev->gmc.vm_fault.num_types = 1; in gmc_v10_0_set_irq_funcs() 198 adev->gmc.vm_fault.funcs = &gmc_v10_0_irq_funcs; in gmc_v10_0_set_irq_funcs() 663 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); in gmc_v10_0_late_init() 842 &adev->gmc.vm_fault); in gmc_v10_0_sw_init() 849 &adev->gmc.vm_fault); in gmc_v10_0_sw_init() 1043 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); in gmc_v10_0_hw_fini()
|
H A D | gmc_v6_0.c | 790 return amdgpu_irq_get(adev, &adev->gmc.vm_fault, 0); in gmc_v6_0_late_init() 828 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 146, &adev->gmc.vm_fault); in gmc_v6_0_sw_init() 832 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 147, &adev->gmc.vm_fault); in gmc_v6_0_sw_init() 931 amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0); in gmc_v6_0_hw_fini() 1160 adev->gmc.vm_fault.num_types = 1; in gmc_v6_0_set_irq_funcs() 1161 adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs; in gmc_v6_0_set_irq_funcs()
|
/linux/fs/ |
H A D | dax.c | 1003 static int copy_cow_page_dax(struct vm_fault *vmf, const struct iomap_iter *iter) in copy_cow_page_dax() 1042 static void *dax_insert_entry(struct xa_state *xas, struct vm_fault *vmf, in dax_insert_entry() 1358 static vm_fault_t dax_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_load_hole() 1374 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_pmd_load_hole() 1425 static vm_fault_t dax_pmd_load_hole(struct xa_state *xas, struct vm_fault *vmf, in dax_pmd_load_hole() 1790 static vm_fault_t dax_fault_cow_page(struct vm_fault *vmf, in dax_fault_cow_page() 1829 static vm_fault_t dax_fault_iter(struct vm_fault *vmf, in dax_fault_iter() 1886 static vm_fault_t dax_iomap_pte_fault(struct vm_fault *vmf, unsigned long *pfnp, in dax_iomap_pte_fault() 1965 static bool dax_fault_check_fallback(struct vm_fault *vmf, struct xa_state *xas, in dax_fault_check_fallback() 1998 static vm_fault_t dax_iomap_pmd_fault(struct vm_fault *vm [all...] |
/linux/tools/testing/vma/ |
H A D | vma_internal.h | 403 struct vm_fault {}; struct 422 vm_fault_t (*fault)(struct vm_fault *vmf); 423 vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order); 424 vm_fault_t (*map_pages)(struct vm_fault *vmf, 430 vm_fault_t (*page_mkwrite)(struct vm_fault *vmf); 433 vm_fault_t (*pfn_mkwrite)(struct vm_fault *vmf);
|
/linux/fs/xfs/ |
H A D | xfs_file.c | 1730 struct vm_fault *vmf, in xfs_dax_fault_locked() 1752 struct vm_fault *vmf, in xfs_dax_read_fault() 1779 struct vm_fault *vmf, in __xfs_write_fault() 1818 struct vm_fault *vmf, in xfs_write_fault_zoned() 1844 struct vm_fault *vmf, in xfs_write_fault() 1854 struct vm_fault *vmf) in xfs_is_write_fault() 1862 struct vm_fault *vmf) in xfs_filemap_fault() 1879 struct vm_fault *vmf, in xfs_filemap_huge_fault() 1893 struct vm_fault *vmf) in xfs_filemap_page_mkwrite() 1905 struct vm_fault *vm in xfs_filemap_pfn_mkwrite() [all...] |
/linux/arch/hexagon/include/asm/ |
H A D | cacheflush.h | 61 static inline void update_mmu_cache_range(struct vm_fault *vmf, in update_mmu_cache_range()
|
/linux/arch/x86/entry/vdso/ |
H A D | vma.c | 53 struct vm_area_struct *vma, struct vm_fault *vmf) in vdso_fault() 92 struct vm_area_struct *vma, struct vm_fault *vmf) in vvar_vclock_fault()
|
/linux/arch/csky/abiv1/ |
H A D | cacheflush.c | 44 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, in update_mmu_cache_range()
|
/linux/arch/csky/abiv2/ |
H A D | cacheflush.c | 10 void update_mmu_cache_range(struct vm_fault *vmf, struct vm_area_struct *vma, in update_mmu_cache_range()
|