/linux/Documentation/translations/zh_CN/core-api/ |
H A D | cachetlb.rst | 81 5) ``void update_mmu_cache(struct vm_area_struct *vma,
|
/linux/arch/hexagon/include/asm/ |
H A D | cacheflush.h | 68 #define update_mmu_cache(vma, addr, ptep) \ macro
|
/linux/arch/microblaze/include/asm/ |
H A D | tlbflush.h | 37 #define update_mmu_cache(vma, addr, pte) \ macro
|
/linux/arch/sh/mm/ |
H A D | tlbex_32.c | 80 update_mmu_cache(NULL, address, pte); in handle_tlbmiss()
|
/linux/arch/sh/include/asm/ |
H A D | pgtable.h | 113 #define update_mmu_cache(vma, addr, ptep) \ macro
|
/linux/arch/m68k/include/asm/ |
H A D | pgtable_mm.h | 145 #define update_mmu_cache(vma, addr, ptep) \ macro
|
/linux/arch/arc/include/asm/ |
H A D | pgtable-bits-arcv2.h | 107 #define update_mmu_cache(vma, addr, ptep) \ macro
|
/linux/arch/powerpc/include/asm/ |
H A D | pgtable.h | 49 #define update_mmu_cache(vma, addr, ptep) \ macro
|
/linux/arch/riscv/include/asm/ |
H A D | pgtable.h | 518 #define update_mmu_cache(vma, addr, ptep) \ macro 529 update_mmu_cache(vma, address, ptep); in update_mmu_cache_pmd() 955 update_mmu_cache(vma, address, ptep); in update_mmu_cache_pud()
|
/linux/arch/csky/include/asm/ |
H A D | pgtable.h | 263 #define update_mmu_cache(vma, addr, ptep) \ macro
|
/linux/arch/nios2/include/asm/ |
H A D | pgtable.h | 285 #define update_mmu_cache(vma, addr, ptep) \ macro 298 * update_mmu_cache will unconditionally execute, handling both in ptep_set_access_flags()
|
/linux/arch/alpha/include/asm/ |
H A D | pgtable.h | 294 extern inline void update_mmu_cache(struct vm_area_struct * vma, in update_mmu_cache() function
|
/linux/arch/xtensa/include/asm/ |
H A D | pgtable.h | 402 #define update_mmu_cache(vma, address, ptep) \ macro
|
/linux/arch/sparc/include/asm/ |
H A D | pgtable_32.h | 314 #define update_mmu_cache(vma, address, ptep) do { } while (0) macro
|
H A D | pgtable_64.h | 983 #define update_mmu_cache(vma, addr, ptep) \ macro
|
/linux/mm/ |
H A D | userfaultfd.c | 229 update_mmu_cache(dst_vma, dst_addr, dst_pte); in mfill_atomic_install_pte() 371 update_mmu_cache(dst_vma, dst_addr, dst_pte); in mfill_atomic_pte_zeropage() 453 update_mmu_cache(dst_vma, dst_addr, dst_pte); in mfill_atomic_pte_poison()
|
H A D | migrate_device.c | 690 update_mmu_cache(vma, addr, ptep); in migrate_vma_insert_page()
|
H A D | hugetlb.c | 5500 update_mmu_cache(vma, address, ptep); in set_huge_ptep_writable() 6812 update_mmu_cache(vma, vmf.address, vmf.pte); in hugetlb_fault() 6908 update_mmu_cache(dst_vma, dst_addr, dst_pte); in hugetlb_mfill_atomic_pte() 7065 update_mmu_cache(dst_vma, dst_addr, dst_pte); in hugetlb_mfill_atomic_pte()
|
H A D | memory.c | 766 update_mmu_cache(vma, address, ptep); in restore_exclusive_pte() 2167 update_mmu_cache(vma, addr, pte); in insert_page_into_pte_locked() 2471 update_mmu_cache(vma, addr, pte); in insert_pfn() 2485 update_mmu_cache(vma, addr, pte); /* XXX: why not for insert_page? */ in insert_pfn() 5998 * There is also a hook called "update_mmu_cache()" that architectures
|
H A D | madvise.c | 1231 update_mmu_cache(walk->vma, addr, pte); in guard_remove_pte_entry()
|
H A D | migrate.c | 423 update_mmu_cache(vma, pvmw.address, pvmw.pte); in remove_migration_pte()
|
H A D | gup.c | 789 update_mmu_cache(vma, address, pte); in follow_pfn_pte()
|
/linux/arch/loongarch/include/asm/ |
H A D | pgtable.h | 446 #define update_mmu_cache(vma, addr, ptep) \ macro
|
/linux/arch/mips/include/asm/ |
H A D | pgtable.h | 501 * update_mmu_cache will unconditionally execute, handling both 588 #define update_mmu_cache(vma, address, ptep) \ macro
|
/linux/arch/x86/include/asm/ |
H A D | pgtable.h | 1495 static inline void update_mmu_cache(struct vm_area_struct *vma, in update_mmu_cache() function
|