/linux/arch/x86/mm/ |
H A D | init_64.c | 155 pgt_lock = &pgd_page_get_mm(page)->page_table_lock; in sync_global_pgds_l5() 198 pgt_lock = &pgd_page_get_mm(page)->page_table_lock; in sync_global_pgds_l4() 534 spin_lock(&init_mm.page_table_lock); in phys_pmd_init() 539 spin_unlock(&init_mm.page_table_lock); in phys_pmd_init() 565 spin_lock(&init_mm.page_table_lock); in phys_pmd_init() 569 spin_unlock(&init_mm.page_table_lock); in phys_pmd_init() 577 spin_lock(&init_mm.page_table_lock); in phys_pmd_init() 579 spin_unlock(&init_mm.page_table_lock); in phys_pmd_init() 651 spin_lock(&init_mm.page_table_lock); in phys_pud_init() 655 spin_unlock(&init_mm.page_table_lock); in phys_pud_init() [all...] |
H A D | fault.c | 279 pgt_lock = &pgd_page_get_mm(page)->page_table_lock; in arch_sync_kernel_mappings()
|
/linux/Documentation/translations/zh_CN/mm/ |
H A D | split_page_table_lock.rst | 14 最初,mm->page_table_lock spinlock保护了mm_struct的所有页表。但是这种方 38 时启用PTE表的分页表锁。如果分页锁被禁用,所有的表都由mm->page_table_lock 52 对PMD_SIZE页面采取pmd分割锁,否则mm->page_table_lock;
|
/linux/arch/powerpc/mm/ |
H A D | pgtable-frag.c | 40 spin_lock(&mm->page_table_lock); in get_pte_from_cache() 51 spin_unlock(&mm->page_table_lock); in get_pte_from_cache() 81 spin_lock(&mm->page_table_lock); in __alloc_for_ptecache() 91 spin_unlock(&mm->page_table_lock); in __alloc_for_ptecache()
|
/linux/mm/ |
H A D | init-mm.c | 39 .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
|
H A D | hugetlb_vmemmap.c | 74 spin_lock(&init_mm.page_table_lock); in vmemmap_split_pmd() 92 spin_unlock(&init_mm.page_table_lock); in vmemmap_split_pmd() 108 spin_lock(&init_mm.page_table_lock); in vmemmap_pmd_entry() 131 spin_unlock(&init_mm.page_table_lock); in vmemmap_pmd_entry()
|
H A D | debug_vm_pgtable.c | 1333 spin_lock(&(args.mm->page_table_lock)); in debug_vm_pgtable() 1338 spin_unlock(&(args.mm->page_table_lock)); in debug_vm_pgtable()
|
H A D | memory.c | 468 spin_lock(&init_mm.page_table_lock); in __pte_alloc_kernel() 474 spin_unlock(&init_mm.page_table_lock); in __pte_alloc_kernel() 6407 spin_lock(&mm->page_table_lock); in __p4d_alloc() 6414 spin_unlock(&mm->page_table_lock); in __p4d_alloc() 6430 spin_lock(&mm->page_table_lock); in __pud_alloc() 6437 spin_unlock(&mm->page_table_lock); in __pud_alloc()
|
/linux/Documentation/mm/ |
H A D | split_page_table_lock.rst | 5 Originally, mm->page_table_lock spinlock protected all page tables of the 12 tables. Access to higher level tables protected by mm->page_table_lock. 42 If split lock is disabled, all tables are guarded by mm->page_table_lock. 56 takes pmd split lock for PMD_SIZE page, mm->page_table_lock
|
H A D | process_addrs.rst | 261 this VMA. Initially set by mmap read, page_table_lock. 263 by the :c:macro:`!page_table_lock`. This When non-:c:macro:`NULL` and 426 mm->page_table_lock or pte_lock 455 ->page_table_lock or pte_lock (various, mainly in memory.c) 473 ->page_table_lock or pte_lock (anon_vma_prepare and various) 475 ->page_table_lock or pte_lock 511 :c:member:`!mm->page_table_lock` lock when modified. 641 PGD, P4D or PUD, the :c:member:`!mm->page_table_lock` must be held. This is 647 references the :c:member:`!mm->page_table_lock`. 649 Allocating a PTE will either use the :c:member:`!mm->page_table_lock` o [all...] |
/linux/mm/kasan/ |
H A D | shadow.c | 316 spin_lock(&init_mm.page_table_lock); in kasan_populate_vmalloc_pte() 321 spin_unlock(&init_mm.page_table_lock); in kasan_populate_vmalloc_pte() 468 spin_lock(&init_mm.page_table_lock); in kasan_depopulate_vmalloc_pte() 474 spin_unlock(&init_mm.page_table_lock); in kasan_depopulate_vmalloc_pte()
|
/linux/arch/powerpc/mm/book3s64/ |
H A D | pgtable.c | 392 spin_lock(&mm->page_table_lock); in get_pmd_from_cache() 403 spin_unlock(&mm->page_table_lock); in get_pmd_from_cache() 433 spin_lock(&mm->page_table_lock); in __alloc_for_pmdcache() 443 spin_unlock(&mm->page_table_lock); in __alloc_for_pmdcache()
|
H A D | radix_pgtable.c | 902 spin_lock(&init_mm.page_table_lock); in remove_pagetable() 928 spin_unlock(&init_mm.page_table_lock); in remove_pagetable()
|
/linux/drivers/iommu/ |
H A D | omap-iommu-debug.c | 187 spin_lock(&obj->page_table_lock); in dump_ioptable() 210 spin_unlock(&obj->page_table_lock); in dump_ioptable()
|
H A D | omap-iommu.c | 497 spin_unlock(&obj->page_table_lock); in iopte_alloc() 499 spin_lock(&obj->page_table_lock); in iopte_alloc() 653 spin_lock(&obj->page_table_lock); in iopgtable_store_entry_core() 655 spin_unlock(&obj->page_table_lock); in iopgtable_store_entry_core() 761 spin_lock(&obj->page_table_lock); in iopgtable_clear_entry() 766 spin_unlock(&obj->page_table_lock); in iopgtable_clear_entry() 776 spin_lock(&obj->page_table_lock); in iopgtable_clear_entry_all() 798 spin_unlock(&obj->page_table_lock); in iopgtable_clear_entry_all() 1192 spin_lock_init(&obj->page_table_lock); in omap_iommu_probe()
|
H A D | omap-iommu.h | 69 spinlock_t page_table_lock; /* protect iopgd */ member
|
/linux/arch/sparc/mm/ |
H A D | tlb.c | 277 assert_spin_locked(&mm->page_table_lock); in pgtable_trans_huge_deposit() 292 assert_spin_locked(&mm->page_table_lock); in pgtable_trans_huge_withdraw()
|
H A D | srmmu.c | 351 spin_lock(&mm->page_table_lock); in pte_alloc_one() 357 spin_unlock(&mm->page_table_lock); in pte_alloc_one() 367 spin_lock(&mm->page_table_lock); in pte_free() 370 spin_unlock(&mm->page_table_lock); in pte_free()
|
/linux/arch/parisc/include/asm/ |
H A D | mmu_context.h | 55 /* put physical address of page_table_lock in cr28 (tr4) in switch_mm_irqs_off() 57 spinlock_t *pgd_lock = &next->page_table_lock; in switch_mm_irqs_off()
|
/linux/arch/x86/xen/ |
H A D | mmu_pv.c | 713 spin_lock_nest_lock(ptl, &mm->page_table_lock); in xen_pte_lock() 827 spin_lock(&init_mm.page_table_lock); in xen_mm_pin_all() 838 spin_unlock(&init_mm.page_table_lock); in xen_mm_pin_all() 938 spin_lock(&init_mm.page_table_lock); in xen_mm_unpin_all() 950 spin_unlock(&init_mm.page_table_lock); in xen_mm_unpin_all() 955 spin_lock(&mm->page_table_lock); in xen_enter_mmap() 957 spin_unlock(&mm->page_table_lock); in xen_enter_mmap() 1040 spin_lock(&mm->page_table_lock); in xen_exit_mmap() 1046 spin_unlock(&mm->page_table_lock); in xen_exit_mmap()
|
/linux/arch/x86/kernel/ |
H A D | tboot.c | 104 .page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
|
/linux/drivers/firmware/efi/ |
H A D | efi.c | 74 .page_table_lock = __SPIN_LOCK_UNLOCKED(efi_mm.page_table_lock),
|
/linux/include/linux/ |
H A D | mm.h | 2961 * We use mm->page_table_lock to guard all pagetable pages of the mm. 2965 return &mm->page_table_lock; in pte_lockptr() 2969 return &mm->page_table_lock; in ptep_lockptr() 3090 return &mm->page_table_lock; in pmd_lockptr() 3120 * which need to be converted from page_table_lock. 3124 return &mm->page_table_lock; in pud_lockptr()
|
H A D | mm_types.h | 862 * page_table_lock */ 863 struct anon_vma *anon_vma; /* Serialized by page_table_lock */ 1025 spinlock_t page_table_lock; /* Protects page tables and some member 1139 pgtable_t pmd_huge_pte; /* protected by page_table_lock */
|
H A D | hugetlb.h | 1246 return &mm->page_table_lock;
|