| /linux/arch/x86/include/asm/ |
| H A D | mmu_context.h | 55 static inline void init_new_context_ldt(struct mm_struct *mm) in init_new_context_ldt() argument 57 mm->context.ldt = NULL; in init_new_context_ldt() 58 init_rwsem(&mm->context.ldt_usr_sem); in init_new_context_ldt() 60 int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm); 61 void destroy_context_ldt(struct mm_struct *mm); 62 void ldt_arch_exit_mmap(struct mm_struct *mm); 64 static inline void init_new_context_ldt(struct mm_struct *mm) { } in init_new_context_ldt() argument 66 struct mm_struct *mm) in ldt_dup_context() argument 70 static inline void destroy_context_ldt(struct mm_struct *mm) { } in destroy_context_ldt() argument 71 static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { } in ldt_arch_exit_mmap() argument [all …]
|
| /linux/include/linux/ |
| H A D | mmap_lock.h | 26 void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write); 27 void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write, 29 void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write); 31 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm, in __mmap_lock_trace_start_locking() argument 35 __mmap_lock_do_trace_start_locking(mm, write); in __mmap_lock_trace_start_locking() 38 static inline void __mmap_lock_trace_acquire_returned(struct mm_struct *mm, in __mmap_lock_trace_acquire_returned() argument 42 __mmap_lock_do_trace_acquire_returned(mm, write, success); in __mmap_lock_trace_acquire_returned() 45 static inline void __mmap_lock_trace_released(struct mm_struct *mm, bool write) in __mmap_lock_trace_released() argument 48 __mmap_lock_do_trace_released(mm, write); in __mmap_lock_trace_released() 53 static inline void __mmap_lock_trace_start_locking(struct mm_struct *mm, in __mmap_lock_trace_start_locking() argument [all …]
|
| H A D | mmu_notifier.h | 89 struct mm_struct *mm); 101 struct mm_struct *mm, 111 struct mm_struct *mm, 122 struct mm_struct *mm, 199 struct mm_struct *mm, 213 struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm); 231 struct mm_struct *mm; member 251 struct mm_struct *mm; member 263 struct mm_struct *mm; member 271 static inline int mm_has_notifiers(struct mm_struct *mm) in mm_has_notifiers() argument [all …]
|
| H A D | ksm.h | 20 vm_flags_t ksm_vma_flags(struct mm_struct *mm, const struct file *file, 22 int ksm_enable_merge_any(struct mm_struct *mm); 23 int ksm_disable_merge_any(struct mm_struct *mm); 24 int ksm_disable(struct mm_struct *mm); 26 int __ksm_enter(struct mm_struct *mm); 27 void __ksm_exit(struct mm_struct *mm); 37 static inline void ksm_map_zero_page(struct mm_struct *mm) in ksm_map_zero_page() argument 40 atomic_long_inc(&mm->ksm_zero_pages); in ksm_map_zero_page() 43 static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte) in ksm_might_unmap_zero_page() argument 47 atomic_long_dec(&mm->ksm_zero_pages); in ksm_might_unmap_zero_page() [all …]
|
| H A D | page_table_check.h | 17 void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr, 19 void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr, 21 void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr, 23 void __page_table_check_ptes_set(struct mm_struct *mm, unsigned long addr, 25 void __page_table_check_pmds_set(struct mm_struct *mm, unsigned long addr, 27 void __page_table_check_puds_set(struct mm_struct *mm, unsigned long addr, 29 void __page_table_check_pte_clear_range(struct mm_struct *mm, 49 static inline void page_table_check_pte_clear(struct mm_struct *mm, in page_table_check_pte_clear() argument 55 __page_table_check_pte_clear(mm, addr, pte); in page_table_check_pte_clear() 58 static inline void page_table_check_pmd_clear(struct mm_struct *mm, in page_table_check_pmd_clear() argument [all …]
|
| /linux/drivers/gpu/drm/ |
| H A D | drm_buddy.c | 27 static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm, in drm_block_alloc() argument 50 static void drm_block_free(struct drm_buddy *mm, in drm_block_free() argument 93 static void rbtree_insert(struct drm_buddy *mm, in rbtree_insert() argument 98 &mm->free_trees[tree][drm_buddy_block_order(block)], in rbtree_insert() 102 static void rbtree_remove(struct drm_buddy *mm, in rbtree_remove() argument 110 root = &mm->free_trees[tree][order]; in rbtree_remove() 126 static void mark_allocated(struct drm_buddy *mm, in mark_allocated() argument 132 rbtree_remove(mm, block); in mark_allocated() 135 static void mark_free(struct drm_buddy *mm, in mark_free() argument 144 rbtree_insert(mm, block, tree); in mark_free() [all …]
|
| H A D | drm_mm.c | 119 static void show_leaks(struct drm_mm *mm) in show_leaks() argument 128 list_for_each_entry(node, drm_mm_nodes(mm), node_list) { in show_leaks() 147 static void show_leaks(struct drm_mm *mm) { } in show_leaks() argument 158 __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) in INTERVAL_TREE_DEFINE() 160 return drm_mm_interval_tree_iter_first((struct rb_root_cached *)&mm->interval_tree, in INTERVAL_TREE_DEFINE() 161 start, last) ?: (struct drm_mm_node *)&mm->head_node; in INTERVAL_TREE_DEFINE() 168 struct drm_mm *mm = hole_node->mm; in drm_mm_interval_tree_add_node() local 191 link = &mm->interval_tree.rb_root.rb_node; in drm_mm_interval_tree_add_node() 209 rb_insert_augmented_cached(&node->rb, &mm->interval_tree, leftmost, in drm_mm_interval_tree_add_node() 269 struct drm_mm *mm = node->mm; in add_hole() local [all …]
|
| /linux/arch/powerpc/include/asm/ |
| H A D | mmu_context.h | 18 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 20 extern void destroy_context(struct mm_struct *mm); 24 extern bool mm_iommu_preregistered(struct mm_struct *mm); 25 extern long mm_iommu_new(struct mm_struct *mm, 28 extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua, 31 extern long mm_iommu_put(struct mm_struct *mm, 33 extern void mm_iommu_init(struct mm_struct *mm); 34 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm, 36 extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm, 40 extern bool mm_iommu_is_devmem(struct mm_struct *mm, unsigned long hpa, [all …]
|
| /linux/arch/s390/mm/ |
| H A D | pgtable.c | 39 static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr, in ptep_ipte_local() argument 46 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_local() 50 asce = asce ? : mm->context.asce; in ptep_ipte_local() 59 static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr, in ptep_ipte_global() argument 66 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_global() 70 asce = asce ? : mm->context.asce; in ptep_ipte_global() 79 static inline pte_t ptep_flush_direct(struct mm_struct *mm, in ptep_flush_direct() argument 88 atomic_inc(&mm->context.flush_count); in ptep_flush_direct() 90 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) in ptep_flush_direct() 91 ptep_ipte_local(mm, addr, ptep, nodat); in ptep_flush_direct() [all …]
|
| /linux/mm/ |
| H A D | mmu_notifier.c | 191 interval_sub->mm->notifier_subscriptions; in mmu_interval_read_begin() 264 struct mm_struct *mm) in mn_itree_release() argument 269 .mm = mm, in mn_itree_release() 302 struct mm_struct *mm) in mn_hlist_release() argument 321 subscription->ops->release(subscription, mm); in mn_hlist_release() 350 void __mmu_notifier_release(struct mm_struct *mm) in __mmu_notifier_release() argument 353 mm->notifier_subscriptions; in __mmu_notifier_release() 356 mn_itree_release(subscriptions, mm); in __mmu_notifier_release() 359 mn_hlist_release(subscriptions, mm); in __mmu_notifier_release() 367 int __mmu_notifier_clear_flush_young(struct mm_struct *mm, in __mmu_notifier_clear_flush_young() argument [all …]
|
| H A D | debug.c | 175 void dump_mm(const struct mm_struct *mm) in dump_mm() argument 202 mm, mm->task_size, in dump_mm() 203 mm->mmap_base, mm->mmap_legacy_base, in dump_mm() 204 mm->pgd, atomic_read(&mm->mm_users), in dump_mm() 205 atomic_read(&mm->mm_count), in dump_mm() 206 mm_pgtables_bytes(mm), in dump_mm() 207 mm->map_count, in dump_mm() 208 mm->hiwater_rss, mm->hiwater_vm, mm->total_vm, mm->locked_vm, in dump_mm() 209 (u64)atomic64_read(&mm->pinned_vm), in dump_mm() 210 mm->data_vm, mm->exec_vm, mm->stack_vm, in dump_mm() [all …]
|
| H A D | mmap.c | 111 return mlock_future_ok(current->mm, in check_brk_limits() 112 current->mm->def_flags & VM_LOCKED, len) in check_brk_limits() 119 struct mm_struct *mm = current->mm; in SYSCALL_DEFINE1() local 126 if (mmap_write_lock_killable(mm)) in SYSCALL_DEFINE1() 129 origbrk = mm->brk; in SYSCALL_DEFINE1() 131 min_brk = mm->start_brk; in SYSCALL_DEFINE1() 139 min_brk = mm->end_data; in SYSCALL_DEFINE1() 150 if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk, in SYSCALL_DEFINE1() 151 mm->end_data, mm->start_data)) in SYSCALL_DEFINE1() 155 oldbrk = PAGE_ALIGN(mm->brk); in SYSCALL_DEFINE1() [all …]
|
| /linux/arch/s390/include/asm/ |
| H A D | pgalloc.h | 35 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit); 37 static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long addr, in check_asce_limit() argument 42 if (addr + len > mm->context.asce_limit && in check_asce_limit() 44 rc = crst_table_upgrade(mm, addr + len); in check_asce_limit() 51 static inline p4d_t *p4d_alloc_one_noprof(struct mm_struct *mm, unsigned long address) in p4d_alloc_one_noprof() argument 53 unsigned long *table = crst_table_alloc_noprof(mm); in p4d_alloc_one_noprof() 64 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) in p4d_free() argument 66 if (mm_p4d_folded(mm)) in p4d_free() 70 crst_table_free(mm, (unsigned long *) p4d); in p4d_free() 73 static inline pud_t *pud_alloc_one_noprof(struct mm_struct *mm, unsigned long address) in pud_alloc_one_noprof() argument [all …]
|
| /linux/arch/m68k/include/asm/ |
| H A D | mmu_context.h | 28 static inline void get_mmu_context(struct mm_struct *mm) in get_mmu_context() argument 32 if (mm->context != NO_CONTEXT) in get_mmu_context() 45 mm->context = ctx; in get_mmu_context() 46 context_mm[ctx] = mm; in get_mmu_context() 52 #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) argument 58 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument 60 if (mm->context != NO_CONTEXT) { in destroy_context() 61 clear_bit(mm->context, context_map); in destroy_context() 62 mm->context = NO_CONTEXT; in destroy_context() 75 get_mmu_context(tsk->mm); in switch_mm() [all …]
|
| /linux/arch/powerpc/mm/book3s64/ |
| H A D | mmu_context.c | 95 static int hash__init_new_context(struct mm_struct *mm) in hash__init_new_context() argument 99 mm->context.hash_context = kmalloc_obj(struct hash_mm_context); in hash__init_new_context() 100 if (!mm->context.hash_context) in hash__init_new_context() 117 if (mm->context.id == 0) { in hash__init_new_context() 118 memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context)); in hash__init_new_context() 119 slice_init_new_context_exec(mm); in hash__init_new_context() 122 …memcpy(mm->context.hash_context, current->mm->context.hash_context, sizeof(struct hash_mm_context)… in hash__init_new_context() 125 if (current->mm->context.hash_context->spt) { in hash__init_new_context() 126 mm->context.hash_context->spt = kmalloc_obj(struct subpage_prot_table); in hash__init_new_context() 127 if (!mm->context.hash_context->spt) { in hash__init_new_context() [all …]
|
| H A D | slice.c | 86 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, in slice_area_is_free() argument 91 if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr) in slice_area_is_free() 93 vma = find_vma(mm, addr); in slice_area_is_free() 97 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) in slice_low_has_vma() argument 99 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT, in slice_low_has_vma() 103 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) in slice_high_has_vma() argument 114 return !slice_area_is_free(mm, start, end - start); in slice_high_has_vma() 117 static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret, in slice_mask_for_free() argument 127 if (!slice_low_has_vma(mm, i)) in slice_mask_for_free() 134 if (!slice_high_has_vma(mm, i)) in slice_mask_for_free() [all …]
|
| /linux/tools/testing/vma/tests/ |
| H A D | merge.c | 72 static struct vm_area_struct *try_merge_new_vma(struct mm_struct *mm, in try_merge_new_vma() argument 92 return alloc_and_link_vma(mm, start, end, pgoff, vm_flags); in try_merge_new_vma() 99 struct mm_struct mm = {}; in test_simple_merge() local 100 struct vm_area_struct *vma_left = alloc_vma(&mm, 0, 0x1000, 0, vm_flags); in test_simple_merge() 101 struct vm_area_struct *vma_right = alloc_vma(&mm, 0x2000, 0x3000, 2, vm_flags); in test_simple_merge() 102 VMA_ITERATOR(vmi, &mm, 0x1000); in test_simple_merge() 104 .mm = &mm, in test_simple_merge() 112 ASSERT_FALSE(attach_vma(&mm, vma_left)); in test_simple_merge() 113 ASSERT_FALSE(attach_vma(&mm, vma_right)); in test_simple_merge() 124 mtree_destroy(&mm.mm_mt); in test_simple_merge() [all …]
|
| /linux/Documentation/core-api/ |
| H A D | mm-api.rst | 14 .. kernel-doc:: mm/gup.c 40 .. kernel-doc:: mm/slub.c 43 .. kernel-doc:: mm/slab_common.c 46 .. kernel-doc:: mm/util.c 52 .. kernel-doc:: mm/vmalloc.c 61 .. kernel-doc:: mm/filemap.c 67 .. kernel-doc:: mm/readahead.c 70 .. kernel-doc:: mm/readahead.c 76 .. kernel-doc:: mm/page-writeback.c 82 .. kernel-doc:: mm/truncate.c [all …]
|
| /linux/include/asm-generic/ |
| H A D | pgalloc.h | 19 static inline pte_t *__pte_alloc_one_kernel_noprof(struct mm_struct *mm) in __pte_alloc_one_kernel_noprof() argument 25 if (!pagetable_pte_ctor(mm, ptdesc)) { in __pte_alloc_one_kernel_noprof() 43 static inline pte_t *pte_alloc_one_kernel_noprof(struct mm_struct *mm) in pte_alloc_one_kernel_noprof() argument 45 return __pte_alloc_one_kernel_noprof(mm); in pte_alloc_one_kernel_noprof() 55 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument 72 static inline pgtable_t __pte_alloc_one_noprof(struct mm_struct *mm, gfp_t gfp) in __pte_alloc_one_noprof() argument 79 if (!pagetable_pte_ctor(mm, ptdesc)) { in __pte_alloc_one_noprof() 97 static inline pgtable_t pte_alloc_one_noprof(struct mm_struct *mm) in pte_alloc_one_noprof() argument 99 return __pte_alloc_one_noprof(mm, GFP_PGTABLE_USER); in pte_alloc_one_noprof() 114 static inline void pte_free(struct mm_struct *mm, struct page *pte_page) in pte_free() argument [all …]
|
| /linux/arch/sparc/include/asm/ |
| H A D | mmu_context_64.h | 24 void get_new_mmu_context(struct mm_struct *mm); 27 int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 29 void destroy_context(struct mm_struct *mm); 37 static inline void tsb_context_switch_ctx(struct mm_struct *mm, in tsb_context_switch_ctx() argument 40 __tsb_context_switch(__pa(mm->pgd), in tsb_context_switch_ctx() 41 &mm->context.tsb_block[MM_TSB_BASE], in tsb_context_switch_ctx() 43 (mm->context.tsb_block[MM_TSB_HUGE].tsb ? in tsb_context_switch_ctx() 44 &mm->context.tsb_block[MM_TSB_HUGE] : in tsb_context_switch_ctx() 49 , __pa(&mm->context.tsb_descr[MM_TSB_BASE]), in tsb_context_switch_ctx() 55 void tsb_grow(struct mm_struct *mm, [all …]
|
| /linux/arch/x86/kernel/ |
| H A D | ldt.c | 42 void load_mm_ldt(struct mm_struct *mm) in load_mm_ldt() argument 47 ldt = READ_ONCE(mm->context.ldt); in load_mm_ldt() 138 struct mm_struct *mm = __mm; in flush_ldt() local 140 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) in flush_ldt() 143 load_mm_ldt(mm); in flush_ldt() 189 static void do_sanity_check(struct mm_struct *mm, in do_sanity_check() argument 193 if (mm->context.ldt) { in do_sanity_check() 234 static void map_ldt_struct_to_user(struct mm_struct *mm) in map_ldt_struct_to_user() argument 236 pgd_t *k_pgd = pgd_offset(mm, LDT_BASE_ADDR); in map_ldt_struct_to_user() 243 if (boot_cpu_has(X86_FEATURE_PTI) && !mm->context.ldt) in map_ldt_struct_to_user() [all …]
|
| /linux/arch/arm/mm/ |
| H A D | pgd.c | 20 #define _pgd_alloc(mm) kmalloc_objs(pgd_t, PTRS_PER_PGD, GFP_KERNEL | __GFP_ZERO) argument 21 #define _pgd_free(mm, pgd) kfree(pgd) argument 23 #define _pgd_alloc(mm) __pgd_alloc(mm, 2) argument 24 #define _pgd_free(mm, pgd) __pgd_free(mm, pgd) argument 30 pgd_t *pgd_alloc(struct mm_struct *mm) in pgd_alloc() argument 38 new_pgd = _pgd_alloc(mm); in pgd_alloc() 55 new_p4d = p4d_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), in pgd_alloc() 60 new_pud = pud_alloc(mm, new_p4d, MODULES_VADDR); in pgd_alloc() 64 new_pmd = pmd_alloc(mm, new_pud, 0); in pgd_alloc() 89 new_p4d = p4d_alloc(mm, new_pgd, 0); in pgd_alloc() [all …]
|
| /linux/arch/arm/include/asm/ |
| H A D | mmu_context.h | 24 void __check_vmalloc_seq(struct mm_struct *mm); 27 static inline void check_vmalloc_seq(struct mm_struct *mm) in check_vmalloc_seq() argument 30 unlikely(atomic_read(&mm->context.vmalloc_seq) != in check_vmalloc_seq() 32 __check_vmalloc_seq(mm); in check_vmalloc_seq() 38 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); 42 init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument 44 atomic64_set(&mm->context.id, 0); in init_new_context() 49 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, 52 static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, in a15_erratum_get_cpumask() argument 62 static inline void check_and_switch_context(struct mm_struct *mm, in check_and_switch_context() argument [all …]
|
| /linux/arch/sparc/mm/ |
| H A D | tsb.c | 121 struct mm_struct *mm = tb->mm; in flush_tsb_user() local 124 spin_lock_irqsave(&mm->context.lock, flags); in flush_tsb_user() 127 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; in flush_tsb_user() 128 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; in flush_tsb_user() 140 else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { in flush_tsb_user() 141 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; in flush_tsb_user() 142 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; in flush_tsb_user() 149 spin_unlock_irqrestore(&mm->context.lock, flags); in flush_tsb_user() 152 void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr, in flush_tsb_user_page() argument 157 spin_lock_irqsave(&mm->context.lock, flags); in flush_tsb_user_page() [all …]
|
| H A D | tlb.c | 28 struct mm_struct *mm = tb->mm; in flush_tlb_pending() local 35 if (CTX_VALID(mm->context)) { in flush_tlb_pending() 37 global_flush_tlb_page(mm, tb->vaddrs[0]); in flush_tlb_pending() 40 smp_flush_tlb_pending(tb->mm, tb->tlb_nr, in flush_tlb_pending() 43 __flush_tlb_pending(CTX_HWBITS(tb->mm->context), in flush_tlb_pending() 78 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr, in tlb_batch_add_one() argument 90 if (unlikely(nr != 0 && mm != tb->mm)) { in tlb_batch_add_one() 96 flush_tsb_user_page(mm, vaddr, hugepage_shift); in tlb_batch_add_one() 97 global_flush_tlb_page(mm, vaddr); in tlb_batch_add_one() 102 tb->mm = mm; in tlb_batch_add_one() [all …]
|