Home
last modified time | relevance | path

Searched refs:mm (Results 1 – 25 of 1290) sorted by relevance

12345678910>>...52

/linux/arch/x86/include/asm/ !
H A Dmmu_context.h55 static inline void init_new_context_ldt(struct mm_struct *mm) in init_new_context_ldt() argument
57 mm->context.ldt = NULL; in init_new_context_ldt()
58 init_rwsem(&mm->context.ldt_usr_sem); in init_new_context_ldt()
60 int ldt_dup_context(struct mm_struct *oldmm, struct mm_struct *mm);
61 void destroy_context_ldt(struct mm_struct *mm);
62 void ldt_arch_exit_mmap(struct mm_struct *mm);
64 static inline void init_new_context_ldt(struct mm_struct *mm) { } in init_new_context_ldt() argument
66 struct mm_struct *mm) in ldt_dup_context() argument
70 static inline void destroy_context_ldt(struct mm_struct *mm) { } in destroy_context_ldt() argument
71 static inline void ldt_arch_exit_mmap(struct mm_struct *mm) { } in ldt_arch_exit_mmap() argument
78 load_mm_ldt(struct mm_struct * mm) load_mm_ldt() argument
89 mm_lam_cr3_mask(struct mm_struct * mm) mm_lam_cr3_mask() argument
100 dup_lam(struct mm_struct * oldmm,struct mm_struct * mm) dup_lam() argument
107 mm_untag_mask(struct mm_struct * mm) mm_untag_mask() argument
112 mm_reset_untag_mask(struct mm_struct * mm) mm_reset_untag_mask() argument
118 arch_pgtable_dma_compat(struct mm_struct * mm) arch_pgtable_dma_compat() argument
125 mm_lam_cr3_mask(struct mm_struct * mm) mm_lam_cr3_mask() argument
130 dup_lam(struct mm_struct * oldmm,struct mm_struct * mm) dup_lam() argument
134 mm_reset_untag_mask(struct mm_struct * mm) mm_reset_untag_mask() argument
153 init_new_context(struct task_struct * tsk,struct mm_struct * mm) init_new_context() argument
177 destroy_context(struct mm_struct * mm) destroy_context() argument
197 deactivate_mm(tsk,mm) global() argument
202 deactivate_mm(tsk,mm) global() argument
211 arch_dup_pkeys(struct mm_struct * oldmm,struct mm_struct * mm) arch_dup_pkeys() argument
223 arch_dup_mmap(struct mm_struct * oldmm,struct mm_struct * mm) arch_dup_mmap() argument
231 arch_exit_mmap(struct mm_struct * mm) arch_exit_mmap() argument
238 is_64bit_mm(struct mm_struct * mm) is_64bit_mm() argument
244 is_64bit_mm(struct mm_struct * mm) is_64bit_mm() argument
250 is_notrack_mm(struct mm_struct * mm) is_notrack_mm() argument
255 set_notrack_mm(struct mm_struct * mm) set_notrack_mm() argument
[all...]
/linux/drivers/gpu/drm/ !
H A Ddrm_buddy.c17 static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm, in drm_block_alloc() argument
38 static void drm_block_free(struct drm_buddy *mm, in drm_block_free() argument
44 static void list_insert_sorted(struct drm_buddy *mm, in list_insert_sorted() argument
50 head = &mm->free_list[drm_buddy_block_order(block)]; in list_insert_sorted()
81 static void mark_free(struct drm_buddy *mm, in mark_free() argument
87 list_insert_sorted(mm, block); in mark_free()
123 static unsigned int __drm_buddy_free(struct drm_buddy *mm, in __drm_buddy_free() argument
153 mm->clear_avail -= drm_buddy_block_size(mm, buddy); in __drm_buddy_free()
155 drm_block_free(mm, bloc in __drm_buddy_free()
167 __force_merge(struct drm_buddy * mm,u64 start,u64 end,unsigned int min_order) __force_merge() argument
237 drm_buddy_init(struct drm_buddy * mm,u64 size,u64 chunk_size) drm_buddy_init() argument
328 drm_buddy_fini(struct drm_buddy * mm) drm_buddy_fini() argument
357 split_block(struct drm_buddy * mm,struct drm_buddy_block * block) split_block() argument
417 drm_buddy_reset_clear(struct drm_buddy * mm,bool is_clear) drm_buddy_reset_clear() argument
457 drm_buddy_free_block(struct drm_buddy * mm,struct drm_buddy_block * block) drm_buddy_free_block() argument
469 __drm_buddy_free_list(struct drm_buddy * mm,struct list_head * objects,bool mark_clear,bool mark_dirty) __drm_buddy_free_list() argument
489 drm_buddy_free_list_internal(struct drm_buddy * mm,struct list_head * objects) drm_buddy_free_list_internal() argument
507 drm_buddy_free_list(struct drm_buddy * mm,struct list_head * objects,unsigned int flags) drm_buddy_free_list() argument
525 __alloc_range_bias(struct drm_buddy * mm,u64 start,u64 end,unsigned int order,unsigned long flags,bool fallback) __alloc_range_bias() argument
617 __drm_buddy_alloc_range_bias(struct drm_buddy * mm,u64 start,u64 end,unsigned int order,unsigned long flags) __drm_buddy_alloc_range_bias() argument
635 get_maxblock(struct drm_buddy * mm,unsigned int order,unsigned long flags) get_maxblock() argument
670 alloc_from_freelist(struct drm_buddy * mm,unsigned int order,unsigned long flags) alloc_from_freelist() argument
734 __alloc_range(struct drm_buddy * mm,struct list_head * dfs,u64 start,u64 size,struct list_head * blocks,u64 * total_allocated_on_err) __alloc_range() argument
829 __drm_buddy_alloc_range(struct drm_buddy * mm,u64 start,u64 size,u64 * total_allocated_on_err,struct list_head * blocks) __drm_buddy_alloc_range() argument
845 __alloc_contig_try_harder(struct drm_buddy * mm,u64 size,u64 min_block_size,struct list_head * blocks) __alloc_contig_try_harder() argument
918 drm_buddy_block_trim(struct drm_buddy * mm,u64 * start,u64 new_size,struct list_head * blocks) drm_buddy_block_trim() argument
992 __drm_buddy_alloc_blocks(struct drm_buddy * mm,u64 start,u64 end,unsigned int order,unsigned long flags) __drm_buddy_alloc_blocks() argument
1026 drm_buddy_alloc_blocks(struct drm_buddy * mm,u64 start,u64 end,u64 size,u64 min_block_size,struct list_head * blocks,unsigned long flags) drm_buddy_alloc_blocks() argument
1179 drm_buddy_block_print(struct drm_buddy * mm,struct drm_buddy_block * block,struct drm_printer * p) drm_buddy_block_print() argument
1196 drm_buddy_print(struct drm_buddy * mm,struct drm_printer * p) drm_buddy_print() argument
[all...]
/linux/arch/powerpc/include/asm/ !
H A Dmmu_context.h7 #include <linux/mm.h>
18 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
20 extern void destroy_context(struct mm_struct *mm);
24 extern bool mm_iommu_preregistered(struct mm_struct *mm);
25 extern long mm_iommu_new(struct mm_struct *mm,
28 extern long mm_iommu_newdev(struct mm_struct *mm, unsigned long ua,
31 extern long mm_iommu_put(struct mm_struct *mm,
33 extern void mm_iommu_init(struct mm_struct *mm);
34 extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup(struct mm_struct *mm,
36 extern struct mm_iommu_table_group_mem_t *mm_iommu_get(struct mm_struct *mm,
45 mm_iommu_is_devmem(struct mm_struct * mm,unsigned long hpa,unsigned int pageshift,unsigned long * size) mm_iommu_is_devmem() argument
50 mm_iommu_init(struct mm_struct * mm) mm_iommu_init() argument
72 alloc_extended_context(struct mm_struct * mm,unsigned long ea) alloc_extended_context() argument
88 need_extra_context(struct mm_struct * mm,unsigned long ea) need_extra_context() argument
105 alloc_extended_context(struct mm_struct * mm,unsigned long ea) alloc_extended_context() argument
113 need_extra_context(struct mm_struct * mm,unsigned long ea) need_extra_context() argument
120 inc_mm_active_cpus(struct mm_struct * mm) inc_mm_active_cpus() argument
125 dec_mm_active_cpus(struct mm_struct * mm) dec_mm_active_cpus() argument
131 mm_context_add_copro(struct mm_struct * mm) mm_context_add_copro() argument
142 mm_context_remove_copro(struct mm_struct * mm) mm_context_remove_copro() argument
187 mm_context_add_vas_window(struct mm_struct * mm) mm_context_add_vas_window() argument
193 mm_context_remove_vas_window(struct mm_struct * mm) mm_context_remove_vas_window() argument
204 inc_mm_active_cpus(struct mm_struct * mm) inc_mm_active_cpus() argument
205 dec_mm_active_cpus(struct mm_struct * mm) dec_mm_active_cpus() argument
206 mm_context_add_copro(struct mm_struct * mm) mm_context_add_copro() argument
207 mm_context_remove_copro(struct mm_struct * mm) mm_context_remove_copro() argument
250 enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk) enter_lazy_tlb() argument
272 pkey_mm_init(mm) global() argument
273 arch_dup_pkeys(oldmm,mm) global() argument
283 arch_dup_mmap(struct mm_struct * oldmm,struct mm_struct * mm) arch_dup_mmap() argument
[all...]
/linux/include/linux/ !
H A Dmmu_notifier.h40 * that the mm refcount is zero and the range is no longer accessible.
66 * Called either by mmu_notifier_unregister or when the mm is
69 * methods (the ones invoked outside the mm context) and it
74 * tsk->mm == mm exits.
81 * last thread of this mm quits, you've also to be sure that
89 struct mm_struct *mm);
101 struct mm_struct *mm,
111 struct mm_struct *mm,
122 struct mm_struct *mm,
231 struct mm_struct *mm; global() member
251 struct mm_struct *mm; global() member
263 struct mm_struct *mm; global() member
271 mm_has_notifiers(struct mm_struct * mm) mm_has_notifiers() argument
279 mmu_notifier_get(const struct mmu_notifier_ops * ops,struct mm_struct * mm) mmu_notifier_get() argument
399 mmu_notifier_release(struct mm_struct * mm) mmu_notifier_release() argument
405 mmu_notifier_clear_flush_young(struct mm_struct * mm,unsigned long start,unsigned long end) mmu_notifier_clear_flush_young() argument
414 mmu_notifier_clear_young(struct mm_struct * mm,unsigned long start,unsigned long end) mmu_notifier_clear_young() argument
423 mmu_notifier_test_young(struct mm_struct * mm,unsigned long address) mmu_notifier_test_young() argument
475 mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct * mm,unsigned long start,unsigned long end) mmu_notifier_arch_invalidate_secondary_tlbs() argument
482 mmu_notifier_subscriptions_init(struct mm_struct * mm) mmu_notifier_subscriptions_init() argument
487 mmu_notifier_subscriptions_destroy(struct mm_struct * mm) mmu_notifier_subscriptions_destroy() argument
497 mmu_notifier_range_init(struct mmu_notifier_range * range,enum mmu_notifier_event event,unsigned flags,struct mm_struct * mm,unsigned long start,unsigned long end) mmu_notifier_range_init() argument
511 mmu_notifier_range_init_owner(struct mmu_notifier_range * range,enum mmu_notifier_event event,unsigned int flags,struct mm_struct * mm,unsigned long start,unsigned long end,void * owner) mmu_notifier_range_init_owner() argument
581 mmu_notifier_range_init(range,event,flags,mm,start,end) global() argument
583 mmu_notifier_range_init_owner(range,event,flags,mm,start,end,owner) global() argument
593 mm_has_notifiers(struct mm_struct * mm) mm_has_notifiers() argument
598 mmu_notifier_release(struct mm_struct * mm) mmu_notifier_release() argument
602 mmu_notifier_clear_flush_young(struct mm_struct * mm,unsigned long start,unsigned long end) mmu_notifier_clear_flush_young() argument
609 mmu_notifier_clear_young(struct mm_struct * mm,unsigned long start,unsigned long end) mmu_notifier_clear_young() argument
616 mmu_notifier_test_young(struct mm_struct * mm,unsigned long address) mmu_notifier_test_young() argument
638 mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct * mm,unsigned long start,unsigned long end) mmu_notifier_arch_invalidate_secondary_tlbs() argument
643 mmu_notifier_subscriptions_init(struct mm_struct * mm) mmu_notifier_subscriptions_init() argument
647 mmu_notifier_subscriptions_destroy(struct mm_struct * mm) mmu_notifier_subscriptions_destroy() argument
[all...]
H A Dpage_table_check.h17 void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte);
18 void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd);
19 void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud);
20 void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
22 void __page_table_check_pmds_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd,
24 void __page_table_check_puds_set(struct mm_struct *mm, pud_t *pudp, pud_t pud,
26 void __page_table_check_pte_clear_range(struct mm_struct *mm,
46 static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte) in page_table_check_pte_clear() argument
51 __page_table_check_pte_clear(mm, pte); in page_table_check_pte_clear()
54 static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_ argument
62 page_table_check_pud_clear(struct mm_struct * mm,pud_t pud) page_table_check_pud_clear() argument
70 page_table_check_ptes_set(struct mm_struct * mm,pte_t * ptep,pte_t pte,unsigned int nr) page_table_check_ptes_set() argument
79 page_table_check_pmds_set(struct mm_struct * mm,pmd_t * pmdp,pmd_t pmd,unsigned int nr) page_table_check_pmds_set() argument
88 page_table_check_puds_set(struct mm_struct * mm,pud_t * pudp,pud_t pud,unsigned int nr) page_table_check_puds_set() argument
97 page_table_check_pte_clear_range(struct mm_struct * mm,unsigned long addr,pmd_t pmd) page_table_check_pte_clear_range() argument
117 page_table_check_pte_clear(struct mm_struct * mm,pte_t pte) page_table_check_pte_clear() argument
121 page_table_check_pmd_clear(struct mm_struct * mm,pmd_t pmd) page_table_check_pmd_clear() argument
125 page_table_check_pud_clear(struct mm_struct * mm,pud_t pud) page_table_check_pud_clear() argument
129 page_table_check_ptes_set(struct mm_struct * mm,pte_t * ptep,pte_t pte,unsigned int nr) page_table_check_ptes_set() argument
134 page_table_check_pmds_set(struct mm_struct * mm,pmd_t * pmdp,pmd_t pmd,unsigned int nr) page_table_check_pmds_set() argument
139 page_table_check_puds_set(struct mm_struct * mm,pud_t * pudp,pud_t pud,unsigned int nr) page_table_check_puds_set() argument
144 page_table_check_pte_clear_range(struct mm_struct * mm,unsigned long addr,pmd_t pmd) page_table_check_pte_clear_range() argument
152 page_table_check_pmd_set(mm,pmdp,pmd) global() argument
153 page_table_check_pud_set(mm,pudp,pud) global() argument
[all...]
H A Dksm.h12 #include <linux/mm.h>
20 vm_flags_t ksm_vma_flags(const struct mm_struct *mm, const struct file *file,
22 int ksm_enable_merge_any(struct mm_struct *mm);
23 int ksm_disable_merge_any(struct mm_struct *mm);
24 int ksm_disable(struct mm_struct *mm);
26 int __ksm_enter(struct mm_struct *mm);
27 void __ksm_exit(struct mm_struct *mm);
37 static inline void ksm_map_zero_page(struct mm_struct *mm) in ksm_map_zero_page() argument
40 atomic_long_inc(&mm->ksm_zero_pages); in ksm_map_zero_page()
43 static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_ argument
51 mm_ksm_zero_pages(struct mm_struct * mm) mm_ksm_zero_pages() argument
56 ksm_fork(struct mm_struct * mm,struct mm_struct * oldmm) ksm_fork() argument
63 ksm_execve(struct mm_struct * mm) ksm_execve() argument
71 ksm_exit(struct mm_struct * mm) ksm_exit() argument
100 ksm_vma_flags(const struct mm_struct * mm,const struct file * file,vm_flags_t vm_flags) ksm_vma_flags() argument
106 ksm_disable(struct mm_struct * mm) ksm_disable() argument
111 ksm_fork(struct mm_struct * mm,struct mm_struct * oldmm) ksm_fork() argument
115 ksm_execve(struct mm_struct * mm) ksm_execve() argument
120 ksm_exit(struct mm_struct * mm) ksm_exit() argument
124 ksm_might_unmap_zero_page(struct mm_struct * mm,pte_t pte) ksm_might_unmap_zero_page() argument
[all...]
/linux/drivers/gpu/drm/tests/ !
H A Ddrm_buddy_test.c31 struct drm_buddy mm; in drm_test_buddy_alloc_range_bias() local
41 KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, ps), in drm_test_buddy_alloc_range_bias()
65 drm_buddy_alloc_blocks(&mm, bias_start, in drm_test_buddy_alloc_range_bias()
74 drm_buddy_alloc_blocks(&mm, bias_start, in drm_test_buddy_alloc_range_bias()
83 drm_buddy_alloc_blocks(&mm, bias_start + ps, in drm_test_buddy_alloc_range_bias()
92 drm_buddy_alloc_blocks(&mm, bias_start + ps, in drm_test_buddy_alloc_range_bias()
102 drm_buddy_alloc_blocks(&mm, bias_start, in drm_test_buddy_alloc_range_bias()
108 drm_buddy_free_list(&mm, &tmp, 0); in drm_test_buddy_alloc_range_bias()
112 drm_buddy_alloc_blocks(&mm, bias_start, in drm_test_buddy_alloc_range_bias()
118 drm_buddy_free_list(&mm, in drm_test_buddy_alloc_range_bias()
268 struct drm_buddy mm; drm_test_buddy_alloc_clear() local
418 struct drm_buddy mm; drm_test_buddy_alloc_contiguous() local
506 struct drm_buddy mm; drm_test_buddy_alloc_pathological() local
586 struct drm_buddy mm; drm_test_buddy_alloc_pessimistic() local
681 struct drm_buddy mm; drm_test_buddy_alloc_optimistic() local
727 struct drm_buddy mm; drm_test_buddy_alloc_limit() local
[all...]
H A Ddrm_mm_test.c38 static bool assert_no_holes(struct kunit *test, const struct drm_mm *mm) in assert_no_holes() argument
45 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) in assert_no_holes()
53 drm_mm_for_each_node(hole, mm) { in assert_no_holes()
63 static bool assert_one_hole(struct kunit *test, const struct drm_mm *mm, u64 start, u64 end) in assert_one_hole() argument
74 drm_mm_for_each_hole(hole, mm, hole_start, hole_end) { in assert_one_hole()
78 "empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n", in assert_one_hole()
103 static bool assert_node(struct kunit *test, struct drm_mm_node *node, struct drm_mm *mm, in assert_node() argument
108 if (!drm_mm_node_allocated(node) || node->mm != mm) { in assert_node()
138 struct drm_mm mm; in drm_test_mm_init() local
192 struct drm_mm mm; drm_test_mm_debug() local
217 expect_insert(struct kunit * test,struct drm_mm * mm,struct drm_mm_node * node,u64 size,u64 alignment,unsigned long color,const struct insert_mode * mode) expect_insert() argument
243 struct drm_mm mm; drm_test_mm_align_pot() local
290 struct drm_mm mm; drm_test_mm_once() local
[all...]
/linux/arch/s390/include/asm/ !
H A Dpgalloc.h18 #include <linux/mm.h>
26 struct ptdesc *page_table_alloc_pgste(struct mm_struct *mm);
35 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
37 static inline unsigned long check_asce_limit(struct mm_struct *mm, unsigned long addr, in check_asce_limit() argument
42 if (addr + len > mm->context.asce_limit && in check_asce_limit()
44 rc = crst_table_upgrade(mm, addr + len); in check_asce_limit()
51 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address) in p4d_alloc_one() argument
53 unsigned long *table = crst_table_alloc(mm); in p4d_alloc_one()
63 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) in p4d_free() argument
65 if (mm_p4d_folded(mm)) in p4d_free()
72 pud_alloc_one(struct mm_struct * mm,unsigned long address) pud_alloc_one() argument
84 pud_free(struct mm_struct * mm,pud_t * pud) pud_free() argument
93 pmd_alloc_one(struct mm_struct * mm,unsigned long vmaddr) pmd_alloc_one() argument
107 pmd_free(struct mm_struct * mm,pmd_t * pmd) pmd_free() argument
115 pgd_populate(struct mm_struct * mm,pgd_t * pgd,p4d_t * p4d) pgd_populate() argument
120 p4d_populate(struct mm_struct * mm,p4d_t * p4d,pud_t * pud) p4d_populate() argument
125 pud_populate(struct mm_struct * mm,pud_t * pud,pmd_t * pmd) pud_populate() argument
130 pgd_alloc(struct mm_struct * mm) pgd_alloc() argument
141 pgd_free(struct mm_struct * mm,pgd_t * pgd) pgd_free() argument
147 pmd_populate(struct mm_struct * mm,pmd_t * pmd,pgtable_t pte) pmd_populate() argument
153 pmd_populate_kernel(mm,pmd,pte) global() argument
158 pte_alloc_one_kernel(mm) global() argument
159 pte_alloc_one(mm) global() argument
161 pte_free_kernel(mm,pte) global() argument
162 pte_free(mm,pte) global() argument
[all...]
/linux/arch/m68k/include/asm/ !
H A Dmmu_context.h28 static inline void get_mmu_context(struct mm_struct *mm) in get_mmu_context() argument
32 if (mm->context != NO_CONTEXT) in get_mmu_context()
45 mm->context = ctx; in get_mmu_context()
46 context_mm[ctx] = mm; in get_mmu_context()
52 #define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0) argument
58 static inline void destroy_context(struct mm_struct *mm) in destroy_context() argument
60 if (mm->context != NO_CONTEXT) { in destroy_context()
61 clear_bit(mm->context, context_map); in destroy_context()
62 mm in destroy_context()
85 activate_mm(struct mm_struct * active_mm,struct mm_struct * mm) activate_mm() argument
96 struct mm_struct *mm; load_ksp_mmu() local
179 init_new_context(struct task_struct * tsk,struct mm_struct * mm) init_new_context() argument
187 get_mmu_context(struct mm_struct * mm) get_mmu_context() argument
195 destroy_context(struct mm_struct * mm) destroy_context() argument
201 activate_context(struct mm_struct * mm) activate_context() argument
228 init_new_context(struct task_struct * tsk,struct mm_struct * mm) init_new_context() argument
234 switch_mm_0230(struct mm_struct * mm) switch_mm_0230() argument
267 switch_mm_0460(struct mm_struct * mm) switch_mm_0460() argument
[all...]
/linux/mm/ !
H A Dmmu_notifier.c3 * linux/mm/mmu_notifier.c
13 #include <linux/mm.h>
19 #include <linux/sched/mm.h>
35 * mm->notifier_subscriptions inside the mm_take_all_locks() protected
40 /* all mmu notifiers registered in this mm are queued in this list */
56 * this mm, such that PTES cannot be read into SPTEs (shadow PTEs) while any
59 * Note that the core mm creates nested invalidate_range_start()/end() regions
62 * progress on the mm side.
69 * - mm->active_invalidate_ranges != 0
75 * - mm
264 mn_itree_release(struct mmu_notifier_subscriptions * subscriptions,struct mm_struct * mm) mn_itree_release() argument
302 mn_hlist_release(struct mmu_notifier_subscriptions * subscriptions,struct mm_struct * mm) mn_hlist_release() argument
350 __mmu_notifier_release(struct mm_struct * mm) __mmu_notifier_release() argument
367 __mmu_notifier_clear_flush_young(struct mm_struct * mm,unsigned long start,unsigned long end) __mmu_notifier_clear_flush_young() argument
387 __mmu_notifier_clear_young(struct mm_struct * mm,unsigned long start,unsigned long end) __mmu_notifier_clear_young() argument
407 __mmu_notifier_test_young(struct mm_struct * mm,unsigned long address) __mmu_notifier_test_young() argument
573 __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct * mm,unsigned long start,unsigned long end) __mmu_notifier_arch_invalidate_secondary_tlbs() argument
597 __mmu_notifier_register(struct mmu_notifier * subscription,struct mm_struct * mm) __mmu_notifier_register() argument
700 mmu_notifier_register(struct mmu_notifier * subscription,struct mm_struct * mm) mmu_notifier_register() argument
712 find_get_mmu_notifier(struct mm_struct * mm,const struct mmu_notifier_ops * ops) find_get_mmu_notifier() argument
752 mmu_notifier_get_locked(const struct mmu_notifier_ops * ops,struct mm_struct * mm) mmu_notifier_get_locked() argument
780 __mmu_notifier_subscriptions_destroy(struct mm_struct * mm) __mmu_notifier_subscriptions_destroy() argument
798 mmu_notifier_unregister(struct mmu_notifier * subscription,struct mm_struct * mm) mmu_notifier_unregister() argument
843 struct mm_struct *mm = subscription->mm; mmu_notifier_free_rcu() local
874 struct mm_struct *mm = subscription->mm; mmu_notifier_put() local
891 __mmu_interval_notifier_insert(struct mmu_interval_notifier * interval_sub,struct mm_struct * mm,struct mmu_notifier_subscriptions * subscriptions,unsigned long start,unsigned long length,const struct mmu_interval_notifier_ops * ops) __mmu_interval_notifier_insert() argument
973 mmu_interval_notifier_insert(struct mmu_interval_notifier * interval_sub,struct mm_struct * mm,unsigned long start,unsigned long length,const struct mmu_interval_notifier_ops * ops) mmu_interval_notifier_insert() argument
995 mmu_interval_notifier_insert_locked(struct mmu_interval_notifier * interval_sub,struct mm_struct * mm,unsigned long start,unsigned long length,const struct mmu_interval_notifier_ops * ops) mmu_interval_notifier_insert_locked() argument
1040 struct mm_struct *mm = interval_sub->mm; mmu_interval_notifier_remove() local
[all...]
H A Ddebug.c3 * mm/debug.c
5 * mm/ specific debug routines.
10 #include <linux/mm.h>
157 pr_emerg("vma %px start %px end %px mm %px\n" in dump_vma()
175 void dump_mm(const struct mm_struct *mm) in dump_mm() argument
177 pr_emerg("mm %px task_size %lu\n" in dump_mm()
202 mm, mm->task_size, in dump_mm()
203 mm->mmap_base, mm in dump_mm()
[all...]
H A Dmmap_lock.c5 #include <linux/mm.h>
26 void __mmap_lock_do_trace_start_locking(struct mm_struct *mm, bool write) in __mmap_lock_do_trace_start_locking() argument
28 trace_mmap_lock_start_locking(mm, write); in __mmap_lock_do_trace_start_locking()
32 void __mmap_lock_do_trace_acquire_returned(struct mm_struct *mm, bool write, in __mmap_lock_do_trace_acquire_returned() argument
35 trace_mmap_lock_acquire_returned(mm, write, success); in __mmap_lock_do_trace_acquire_returned()
39 void __mmap_lock_do_trace_released(struct mm_struct *mm, bool write) in __mmap_lock_do_trace_released() argument
41 trace_mmap_lock_released(mm, write); in __mmap_lock_do_trace_released()
135 struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, in lock_vma_under_rcu() argument
138 MA_STATE(mas, &mm->mm_mt, address, address); in lock_vma_under_rcu()
147 vma = vma_start_read(mm, vm in lock_vma_under_rcu()
181 lock_next_vma_under_mmap_lock(struct mm_struct * mm,struct vma_iterator * vmi,unsigned long from_addr) lock_next_vma_under_mmap_lock() argument
206 lock_next_vma(struct mm_struct * mm,struct vma_iterator * vmi,unsigned long from_addr) lock_next_vma() argument
275 get_mmap_lock_carefully(struct mm_struct * mm,struct pt_regs * regs) get_mmap_lock_carefully() argument
289 mmap_upgrade_trylock(struct mm_struct * mm) mmap_upgrade_trylock() argument
302 upgrade_mmap_lock_carefully(struct mm_struct * mm,struct pt_regs * regs) upgrade_mmap_lock_carefully() argument
332 lock_mm_and_find_vma(struct mm_struct * mm,unsigned long addr,struct pt_regs * regs) lock_mm_and_find_vma() argument
394 lock_mm_and_find_vma(struct mm_struct * mm,unsigned long addr,struct pt_regs * regs) lock_mm_and_find_vma() argument
[all...]
/linux/arch/powerpc/mm/book3s64/ !
H A Dmmu_context.c13 #include <linux/mm.h>
95 static int hash__init_new_context(struct mm_struct *mm) in hash__init_new_context() argument
99 mm->context.hash_context = kmalloc(sizeof(struct hash_mm_context), in hash__init_new_context()
101 if (!mm->context.hash_context) in hash__init_new_context()
111 * initialize context slice details for newly allocated mm's (which will in hash__init_new_context()
118 if (mm->context.id == 0) { in hash__init_new_context()
119 memset(mm->context.hash_context, 0, sizeof(struct hash_mm_context)); in hash__init_new_context()
120 slice_init_new_context_exec(mm); in hash__init_new_context()
122 /* This is fork. Copy hash_context details from current->mm */ in hash__init_new_context()
123 memcpy(mm in hash__init_new_context()
157 hash__init_new_context(struct mm_struct * mm) hash__init_new_context() argument
164 radix__init_new_context(struct mm_struct * mm) radix__init_new_context() argument
195 init_new_context(struct task_struct * tsk,struct mm_struct * mm) init_new_context() argument
261 destroy_pagetable_cache(struct mm_struct * mm) destroy_pagetable_cache() argument
275 destroy_context(struct mm_struct * mm) destroy_context() argument
301 arch_exit_mmap(struct mm_struct * mm) arch_exit_mmap() argument
[all...]
H A Dslice.c15 #include <linux/mm.h>
21 #include <linux/sched/mm.h>
86 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr, in slice_area_is_free() argument
91 if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr) in slice_area_is_free()
93 vma = find_vma(mm, addr); in slice_area_is_free()
97 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) in slice_low_has_vma() argument
99 return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT, in slice_low_has_vma()
103 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice) in slice_high_has_vma() argument
114 return !slice_area_is_free(mm, start, end - start); in slice_high_has_vma()
117 static void slice_mask_for_free(struct mm_struct *mm, struc argument
138 slice_check_range_fits(struct mm_struct * mm,const struct slice_mask * available,unsigned long start,unsigned long len) slice_check_range_fits() argument
173 struct mm_struct *mm = parm; slice_flush_segments() local
187 slice_convert(struct mm_struct * mm,const struct slice_mask * mask,int psize) slice_convert() argument
280 slice_find_area_bottomup(struct mm_struct * mm,unsigned long addr,unsigned long len,const struct slice_mask * available,int psize,unsigned long high_limit) slice_find_area_bottomup() argument
322 slice_find_area_topdown(struct mm_struct * mm,unsigned long addr,unsigned long len,const struct slice_mask * available,int psize,unsigned long high_limit) slice_find_area_topdown() argument
380 slice_find_area(struct mm_struct * mm,unsigned long len,const struct slice_mask * mask,int psize,int topdown,unsigned long high_limit) slice_find_area() argument
436 struct mm_struct *mm = current->mm; slice_get_unmapped_area() local
692 get_slice_psize(struct mm_struct * mm,unsigned long addr) get_slice_psize() argument
711 slice_init_new_context_exec(struct mm_struct * mm) slice_init_new_context_exec() argument
747 struct mm_struct *mm = current->mm; slice_setup_new_exec() local
757 slice_set_range_psize(struct mm_struct * mm,unsigned long start,unsigned long len,unsigned int psize) slice_set_range_psize() argument
788 slice_is_hugepage_only_range(struct mm_struct * mm,unsigned long addr,unsigned long len) slice_is_hugepage_only_range() argument
[all...]
/linux/Documentation/core-api/ !
H A Dmm-api.rst14 .. kernel-doc:: mm/gup.c
40 .. kernel-doc:: mm/slub.c
43 .. kernel-doc:: mm/slab_common.c
46 .. kernel-doc:: mm/util.c
52 .. kernel-doc:: mm/vmalloc.c
61 .. kernel-doc:: mm/filemap.c
67 .. kernel-doc:: mm/readahead.c
70 .. kernel-doc:: mm/readahead.c
76 .. kernel-doc:: mm/page-writeback.c
82 .. kernel-doc:: mm/truncat
[all...]
/linux/arch/arm/include/asm/ !
H A Dmmu_context.h24 void __check_vmalloc_seq(struct mm_struct *mm);
27 static inline void check_vmalloc_seq(struct mm_struct *mm) in check_vmalloc_seq() argument
30 unlikely(atomic_read(&mm->context.vmalloc_seq) != in check_vmalloc_seq()
32 __check_vmalloc_seq(mm); in check_vmalloc_seq()
38 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
42 init_new_context(struct task_struct *tsk, struct mm_struct *mm) in init_new_context() argument
44 atomic64_set(&mm->context.id, 0); in init_new_context()
49 void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
52 static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm, in a15_erratum_get_cpumask() argument
62 static inline void check_and_switch_context(struct mm_struct *mm, in check_and_switch_context() argument
85 struct mm_struct *mm = current->mm; finish_arch_post_lock_switch() local
142 enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk) enter_lazy_tlb() argument
[all...]
/linux/tools/testing/vma/ !
H A Dvma.c13 #include "../../../mm/vma.h"
31 #include "../../../mm/vma_init.c"
32 #include "../../../mm/vma_exec.c"
33 #include "../../../mm/vma.c"
64 static struct vm_area_struct *alloc_vma(struct mm_struct *mm, in alloc_vma() argument
70 struct vm_area_struct *ret = vm_area_alloc(mm); in alloc_vma()
85 static int attach_vma(struct mm_struct *mm, struct vm_area_struct *vma) in attach_vma() argument
89 res = vma_link(mm, vma); in attach_vma()
102 static struct vm_area_struct *alloc_and_link_vma(struct mm_struct *mm, in alloc_and_link_vma() argument
108 struct vm_area_struct *vma = alloc_vma(mm, star in alloc_and_link_vma()
211 try_merge_new_vma(struct mm_struct * mm,struct vma_merge_struct * vmg,unsigned long start,unsigned long end,pgoff_t pgoff,vm_flags_t vm_flags,bool * was_merged) try_merge_new_vma() argument
249 cleanup_mm(struct mm_struct * mm,struct vma_iterator * vmi) cleanup_mm() argument
305 struct mm_struct mm = {}; test_simple_merge() local
339 struct mm_struct mm = {}; test_simple_modify() local
398 struct mm_struct mm = {}; test_simple_expand() local
426 struct mm_struct mm = {}; test_simple_shrink() local
447 struct mm_struct mm = {}; test_merge_new() local
643 struct mm_struct mm = {}; test_vma_merge_special_flags() local
715 struct mm_struct mm = {}; test_vma_merge_with_close() local
924 struct mm_struct mm = {}; test_vma_merge_new_with_close() local
979 struct mm_struct mm = {}; test_merge_existing() local
1209 struct mm_struct mm = {}; test_anon_vma_non_mergeable() local
1296 struct mm_struct mm = {}; test_dup_anon_vma() local
1456 struct mm_struct mm = {}; test_vmi_prealloc_fail() local
1522 struct mm_struct mm = {}; test_merge_extend() local
1552 struct mm_struct mm = {}; test_copy_vma() local
1585 struct mm_struct mm = {}; test_expand_only_mode() local
1624 struct mm_struct mm = {}; test_mmap_region_basic() local
[all...]
/linux/arch/s390/mm/ !
H A Dpgtable.c13 #include <linux/mm.h>
39 static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr, in ptep_ipte_local() argument
46 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_local()
50 asce = asce ? : mm->context.asce; in ptep_ipte_local()
59 static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr, in ptep_ipte_global() argument
66 asce = READ_ONCE(mm->context.gmap_asce); in ptep_ipte_global()
70 asce = asce ? : mm->context.asce; in ptep_ipte_global()
79 static inline pte_t ptep_flush_direct(struct mm_struct *mm, in ptep_flush_direct() argument
88 atomic_inc(&mm->context.flush_count); in ptep_flush_direct()
90 cpumask_equal(mm_cpumask(mm), cpumask_o in ptep_flush_direct()
98 ptep_flush_lazy(struct mm_struct * mm,unsigned long addr,pte_t * ptep,int nodat) ptep_flush_lazy() argument
157 pgste_update_all(pte_t pte,pgste_t pgste,struct mm_struct * mm) pgste_update_all() argument
178 pgste_set_key(pte_t * ptep,pgste_t pgste,pte_t entry,struct mm_struct * mm) pgste_set_key() argument
222 pgste_pte_notify(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pgste_t pgste) pgste_pte_notify() argument
238 ptep_xchg_start(struct mm_struct * mm,unsigned long addr,pte_t * ptep) ptep_xchg_start() argument
250 ptep_xchg_commit(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pgste_t pgste,pte_t old,pte_t new) ptep_xchg_commit() argument
271 ptep_xchg_direct(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t new) ptep_xchg_direct() argument
292 ptep_reset_dat_prot(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t new) ptep_reset_dat_prot() argument
313 ptep_xchg_lazy(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t new) ptep_xchg_lazy() argument
336 struct mm_struct *mm = vma->vm_mm; ptep_modify_prot_start() local
353 struct mm_struct *mm = vma->vm_mm; ptep_modify_prot_commit() local
366 pmdp_idte_local(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp) pmdp_idte_local() argument
378 pmdp_idte_global(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp) pmdp_idte_global() argument
397 pmdp_flush_direct(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp) pmdp_flush_direct() argument
415 pmdp_flush_lazy(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp) pmdp_flush_lazy() argument
438 pmd_lookup(struct mm_struct * mm,unsigned long addr,pmd_t ** pmdp) pmd_lookup() argument
471 pmdp_xchg_direct(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t new) pmdp_xchg_direct() argument
484 pmdp_xchg_lazy(struct mm_struct * mm,unsigned long addr,pmd_t * pmdp,pmd_t new) pmdp_xchg_lazy() argument
497 pudp_idte_local(struct mm_struct * mm,unsigned long addr,pud_t * pudp) pudp_idte_local() argument
507 pudp_idte_global(struct mm_struct * mm,unsigned long addr,pud_t * pudp) pudp_idte_global() argument
523 pudp_flush_direct(struct mm_struct * mm,unsigned long addr,pud_t * pudp) pudp_flush_direct() argument
541 pudp_xchg_direct(struct mm_struct * mm,unsigned long addr,pud_t * pudp,pud_t new) pudp_xchg_direct() argument
555 pgtable_trans_huge_deposit(struct mm_struct * mm,pmd_t * pmdp,pgtable_t pgtable) pgtable_trans_huge_deposit() argument
570 pgtable_trans_huge_withdraw(struct mm_struct * mm,pmd_t * pmdp) pgtable_trans_huge_withdraw() argument
596 ptep_set_pte_at(struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t entry) ptep_set_pte_at() argument
611 ptep_set_notify(struct mm_struct * mm,unsigned long addr,pte_t * ptep) ptep_set_notify() argument
633 ptep_force_prot(struct mm_struct * mm,unsigned long addr,pte_t * ptep,int prot,unsigned long bit) ptep_force_prot() argument
668 ptep_shadow_pte(struct mm_struct * mm,unsigned long saddr,pte_t * sptep,pte_t * tptep,pte_t pte) ptep_shadow_pte() argument
695 ptep_unshadow_pte(struct mm_struct * mm,unsigned long saddr,pte_t * ptep) ptep_unshadow_pte() argument
709 ptep_zap_swap_entry(struct mm_struct * mm,swp_entry_t entry) ptep_zap_swap_entry() argument
721 ptep_zap_unused(struct mm_struct * mm,unsigned long addr,pte_t * ptep,int reset) ptep_zap_unused() argument
745 ptep_zap_key(struct mm_struct * mm,unsigned long addr,pte_t * ptep) ptep_zap_key() argument
765 ptep_test_and_clear_uc(struct mm_struct * mm,unsigned long addr,pte_t * ptep) ptep_test_and_clear_uc() argument
792 set_guest_storage_key(struct mm_struct * mm,unsigned long addr,unsigned char key,bool nq) set_guest_storage_key() argument
872 cond_set_guest_storage_key(struct mm_struct * mm,unsigned long addr,unsigned char key,unsigned char * oldkey,bool nq,bool mr,bool mc) cond_set_guest_storage_key() argument
903 reset_guest_reference_bit(struct mm_struct * mm,unsigned long addr) reset_guest_reference_bit() argument
965 get_guest_storage_key(struct mm_struct * mm,unsigned long addr,unsigned char * key) get_guest_storage_key() argument
1032 pgste_perform_essa(struct mm_struct * mm,unsigned long hva,int orc,unsigned long * oldpte,unsigned long * oldpgste) pgste_perform_essa() argument
1138 set_pgste_bits(struct mm_struct * mm,unsigned long hva,unsigned long bits,unsigned long value) set_pgste_bits() argument
1171 get_pgste(struct mm_struct * mm,unsigned long hva,unsigned long * pgstep) get_pgste() argument
[all...]
/linux/arch/arm/mm/ !
H A Dpgd.c3 * linux/arch/arm/mm/pgd.c
7 #include <linux/mm.h>
17 #include "mm.h"
20 #define _pgd_alloc(mm) kmalloc_array(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL | __GFP_ZERO) argument
21 #define _pgd_free(mm, pgd) kfree(pgd) argument
23 #define _pgd_alloc(mm) __pgd_alloc(mm, 2) argument
24 #define _pgd_free(mm, pgd) __pgd_free(mm, pgd) argument
30 pgd_t *pgd_alloc(struct mm_struct *mm) in pgd_alloc() argument
140 pgd_free(struct mm_struct * mm,pgd_t * pgd_base) pgd_free() argument
[all...]
/linux/include/asm-generic/ !
H A Dpgalloc.h12 * @mm: the mm_struct of the current context
19 static inline pte_t *__pte_alloc_one_kernel_noprof(struct mm_struct *mm) in __pte_alloc_one_kernel_noprof() argument
26 if (!pagetable_pte_ctor(mm, ptdesc)) { in __pte_alloc_one_kernel_noprof()
38 * @mm: the mm_struct of the current context
42 static inline pte_t *pte_alloc_one_kernel_noprof(struct mm_struct *mm) in pte_alloc_one_kernel_noprof() argument
44 return __pte_alloc_one_kernel_noprof(mm); in pte_alloc_one_kernel_noprof()
51 * @mm: the mm_struct of the current context
54 static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) in pte_free_kernel() argument
61 * @mm: the mm_struct of the current context
71 static inline pgtable_t __pte_alloc_one_noprof(struct mm_struct *mm, gfp_ argument
96 pte_alloc_one_noprof(struct mm_struct * mm) pte_alloc_one_noprof() argument
113 pte_free(struct mm_struct * mm,struct page * pte_page) pte_free() argument
135 pmd_alloc_one_noprof(struct mm_struct * mm,unsigned long addr) pmd_alloc_one_noprof() argument
155 pmd_free(struct mm_struct * mm,pmd_t * pmd) pmd_free() argument
168 __pud_alloc_one_noprof(struct mm_struct * mm,unsigned long addr) __pud_alloc_one_noprof() argument
196 pud_alloc_one_noprof(struct mm_struct * mm,unsigned long addr) pud_alloc_one_noprof() argument
203 __pud_free(struct mm_struct * mm,pud_t * pud) __pud_free() argument
212 pud_free(struct mm_struct * mm,pud_t * pud) pud_free() argument
222 __p4d_alloc_one_noprof(struct mm_struct * mm,unsigned long addr) __p4d_alloc_one_noprof() argument
241 p4d_alloc_one_noprof(struct mm_struct * mm,unsigned long addr) p4d_alloc_one_noprof() argument
248 __p4d_free(struct mm_struct * mm,p4d_t * p4d) __p4d_free() argument
257 p4d_free(struct mm_struct * mm,p4d_t * p4d) p4d_free() argument
266 __pgd_alloc_noprof(struct mm_struct * mm,unsigned int order) __pgd_alloc_noprof() argument
284 __pgd_free(struct mm_struct * mm,pgd_t * pgd) __pgd_free() argument
293 pgd_free(struct mm_struct * mm,pgd_t * pgd) pgd_free() argument
[all...]
/linux/include/linux/sched/ !
H A Dmm.h20 * @mm: The &struct mm_struct to pin.
22 * Make sure that @mm will not get freed even after the owning task
27 * This is a preferred way to pin @mm for a longer/unbounded amount
32 * See also <Documentation/mm/active_mm.rst> for an in-depth explanation
35 static inline void mmgrab(struct mm_struct *mm) in mmgrab() argument
37 atomic_inc(&mm->mm_count); in mmgrab()
45 extern void __mmdrop(struct mm_struct *mm);
47 static inline void mmdrop(struct mm_struct *mm) in mmdrop() argument
54 if (unlikely(atomic_dec_and_test(&mm->mm_count))) in mmdrop()
55 __mmdrop(mm); in mmdrop()
65 struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); __mmdrop_delayed() local
74 mmdrop_sched(struct mm_struct * mm) mmdrop_sched() argument
81 mmdrop_sched(struct mm_struct * mm) mmdrop_sched() argument
88 mmgrab_lazy_tlb(struct mm_struct * mm) mmgrab_lazy_tlb() argument
94 mmdrop_lazy_tlb(struct mm_struct * mm) mmdrop_lazy_tlb() argument
107 mmdrop_lazy_tlb_sched(struct mm_struct * mm) mmdrop_lazy_tlb_sched() argument
131 mmget(struct mm_struct * mm) mmget() argument
136 mmget_not_zero(struct mm_struct * mm) mmget_not_zero() argument
166 mm_update_next_owner(struct mm_struct * mm) mm_update_next_owner() argument
213 arch_pick_mmap_layout(struct mm_struct * mm,struct rlimit * rlim_stack) arch_pick_mmap_layout() argument
532 membarrier_mm_sync_core_before_usermode(struct mm_struct * mm) membarrier_mm_sync_core_before_usermode() argument
561 membarrier_exec_mmap(struct mm_struct * mm) membarrier_exec_mmap() argument
564 membarrier_mm_sync_core_before_usermode(struct mm_struct * mm) membarrier_mm_sync_core_before_usermode() argument
[all...]
/linux/arch/x86/kernel/ !
H A Dldt.c19 #include <linux/mm.h>
42 void load_mm_ldt(struct mm_struct *mm) in load_mm_ldt() argument
47 ldt = READ_ONCE(mm->context.ldt); in load_mm_ldt()
50 * Any change to mm->context.ldt is followed by an IPI to all in load_mm_ldt()
51 * CPUs with the mm active. The LDT will not be freed until in load_mm_ldt()
93 * Load the LDT if either the old or new mm had an LDT. in switch_ldt()
95 * An mm will never go from having an LDT to not having an LDT. Two in switch_ldt()
138 struct mm_struct *mm = __mm; in flush_ldt() local
140 if (this_cpu_read(cpu_tlbstate.loaded_mm) != mm) in flush_ldt()
143 load_mm_ldt(mm); in flush_ldt()
189 do_sanity_check(struct mm_struct * mm,bool had_kernel_mapping,bool had_user_mapping) do_sanity_check() argument
234 map_ldt_struct_to_user(struct mm_struct * mm) map_ldt_struct_to_user() argument
247 sanity_check_ldt_mapping(struct mm_struct * mm) sanity_check_ldt_mapping() argument
264 map_ldt_struct_to_user(struct mm_struct * mm) map_ldt_struct_to_user() argument
272 sanity_check_ldt_mapping(struct mm_struct * mm) sanity_check_ldt_mapping() argument
288 map_ldt_struct(struct mm_struct * mm,struct ldt_struct * ldt,int slot) map_ldt_struct() argument
349 unmap_ldt_struct(struct mm_struct * mm,struct ldt_struct * ldt) unmap_ldt_struct() argument
383 map_ldt_struct(struct mm_struct * mm,struct ldt_struct * ldt,int slot) map_ldt_struct() argument
388 unmap_ldt_struct(struct mm_struct * mm,struct ldt_struct * ldt) unmap_ldt_struct() argument
393 free_ldt_pgtables(struct mm_struct * mm) free_ldt_pgtables() argument
421 install_ldt(struct mm_struct * mm,struct ldt_struct * ldt) install_ldt() argument
451 ldt_dup_context(struct mm_struct * old_mm,struct mm_struct * mm) ldt_dup_context() argument
491 destroy_context_ldt(struct mm_struct * mm) destroy_context_ldt() argument
497 ldt_arch_exit_mmap(struct mm_struct * mm) ldt_arch_exit_mmap() argument
504 struct mm_struct *mm = current->mm; read_ldt() local
580 struct mm_struct *mm = current->mm; write_ldt() local
[all...]
/linux/arch/sparc/mm/ !
H A Dtsb.c2 /* arch/sparc64/mm/tsb.c
121 struct mm_struct *mm = tb->mm; in flush_tsb_user() local
124 spin_lock_irqsave(&mm->context.lock, flags); in flush_tsb_user()
127 base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; in flush_tsb_user()
128 nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; in flush_tsb_user()
140 else if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { in flush_tsb_user()
141 base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; in flush_tsb_user()
142 nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; in flush_tsb_user()
149 spin_unlock_irqrestore(&mm in flush_tsb_user()
152 flush_tsb_user_page(struct mm_struct * mm,unsigned long vaddr,unsigned int hugepage_shift) flush_tsb_user_page() argument
194 setup_tsb_params(struct mm_struct * mm,unsigned long tsb_idx,unsigned long tsb_bytes) setup_tsb_params() argument
396 tsb_grow(struct mm_struct * mm,unsigned long tsb_index,unsigned long rss) tsb_grow() argument
536 init_new_context(struct task_struct * tsk,struct mm_struct * mm) init_new_context() argument
602 destroy_context(struct mm_struct * mm) destroy_context() argument
[all...]
/linux/arch/arm64/include/asm/ !
H A Dmmu_context.h56 void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
58 static inline void cpu_switch_mm(pgd_t *pgd, struct mm_struct *mm) in cpu_switch_mm() argument
61 cpu_do_switch_mm(virt_to_phys(pgd),mm); in cpu_switch_mm()
89 * Remove the idmap from TTBR0_EL1 and install the pgd of the active mm.
96 * If current is a not a user task, the mm covers the TTBR1_EL1 page tables,
102 struct mm_struct *mm = current->active_mm; in cpu_uninstall_idmap() local
108 if (mm != &init_mm && !system_uses_ttbr0_pan()) in cpu_uninstall_idmap()
109 cpu_switch_mm(mm->pgd, mm); in cpu_uninstall_idmap()
166 * freeing the ASID from the context of the dying mm (
173 init_new_context(tsk,mm) global() argument
175 init_new_context(struct task_struct * tsk,struct mm_struct * mm) init_new_context() argument
187 arch_dup_pkeys(struct mm_struct * oldmm,struct mm_struct * mm) arch_dup_pkeys() argument
193 arch_dup_mmap(struct mm_struct * oldmm,struct mm_struct * mm) arch_dup_mmap() argument
200 arch_exit_mmap(struct mm_struct * mm) arch_exit_mmap() argument
204 arch_unmap(struct mm_struct * mm,unsigned long start,unsigned long end) arch_unmap() argument
211 update_saved_ttbr0(struct task_struct * tsk,struct mm_struct * mm) update_saved_ttbr0() argument
227 update_saved_ttbr0(struct task_struct * tsk,struct mm_struct * mm) update_saved_ttbr0() argument
234 enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk) enter_lazy_tlb() argument
301 mm_untag_mask(struct mm_struct * mm) mm_untag_mask() argument
325 deactivate_mm(struct task_struct * tsk,struct mm_struct * mm) deactivate_mm() argument
[all...]

12345678910>>...52