1 /*
2 * arch/arm/include/asm/mmu_context.h
3 *
4 * Copyright (C) 1996 Russell King.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Changelog:
11 * 27-06-1996 RMK Created
12 */
13 #ifndef __ASM_ARM_MMU_CONTEXT_H
14 #define __ASM_ARM_MMU_CONTEXT_H
15
16 #include <linux/compiler.h>
17 #include <linux/sched.h>
18 #include <asm/cacheflush.h>
19 #include <asm/cachetype.h>
20 #include <asm/proc-fns.h>
21
22 void __check_kvm_seq(struct mm_struct *mm);
23
24 #ifdef CONFIG_CPU_HAS_ASID
25
26 /*
27 * On ARMv6, we have the following structure in the Context ID:
28 *
29 * 31 7 0
30 * +-------------------------+-----------+
31 * | process ID | ASID |
32 * +-------------------------+-----------+
33 * | context ID |
34 * +-------------------------------------+
35 *
36 * The ASID is used to tag entries in the CPU caches and TLBs.
37 * The context ID is used by debuggers and trace logic, and
38 * should be unique within all running processes.
39 */
40 #define ASID_BITS 8
41 #define ASID_MASK ((~0) << ASID_BITS)
42 #define ASID_FIRST_VERSION (1 << ASID_BITS)
43
44 extern unsigned int cpu_last_asid;
45 #ifdef CONFIG_SMP
46 DECLARE_PER_CPU(struct mm_struct *, current_mm);
47 #endif
48
49 void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
50 void __new_context(struct mm_struct *mm);
51
check_context(struct mm_struct * mm)52 static inline void check_context(struct mm_struct *mm)
53 {
54 /*
55 * This code is executed with interrupts enabled. Therefore,
56 * mm->context.id cannot be updated to the latest ASID version
57 * on a different CPU (and condition below not triggered)
58 * without first getting an IPI to reset the context. The
59 * alternative is to take a read_lock on mm->context.id_lock
60 * (after changing its type to rwlock_t).
61 */
62 if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
63 __new_context(mm);
64
65 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
66 __check_kvm_seq(mm);
67 }
68
69 #define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0)
70
71 #else
72
check_context(struct mm_struct * mm)73 static inline void check_context(struct mm_struct *mm)
74 {
75 #ifdef CONFIG_MMU
76 if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
77 __check_kvm_seq(mm);
78 #endif
79 }
80
81 #define init_new_context(tsk,mm) 0
82
83 #endif
84
85 #define destroy_context(mm) do { } while(0)
86
87 /*
88 * This is called when "tsk" is about to enter lazy TLB mode.
89 *
90 * mm: describes the currently active mm context
91 * tsk: task which is entering lazy tlb
92 * cpu: cpu number which is entering lazy tlb
93 *
94 * tsk->mm will be NULL
95 */
96 static inline void
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)97 enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
98 {
99 }
100
101 /*
102 * This is the actual mm switch as far as the scheduler
103 * is concerned. No registers are touched. We avoid
104 * calling the CPU specific function when the mm hasn't
105 * actually changed.
106 */
107 static inline void
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)108 switch_mm(struct mm_struct *prev, struct mm_struct *next,
109 struct task_struct *tsk)
110 {
111 #ifdef CONFIG_MMU
112 unsigned int cpu = smp_processor_id();
113
114 #ifdef CONFIG_SMP
115 /* check for possible thread migration */
116 if (!cpumask_empty(mm_cpumask(next)) &&
117 !cpumask_test_cpu(cpu, mm_cpumask(next)))
118 __flush_icache_all();
119 #endif
120 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
121 #ifdef CONFIG_SMP
122 struct mm_struct **crt_mm = &per_cpu(current_mm, cpu);
123 *crt_mm = next;
124 #endif
125 check_context(next);
126 cpu_switch_mm(next->pgd, next);
127 if (cache_is_vivt())
128 cpumask_clear_cpu(cpu, mm_cpumask(prev));
129 }
130 #endif
131 }
132
133 #define deactivate_mm(tsk,mm) do { } while (0)
134 #define activate_mm(prev,next) switch_mm(prev, next, NULL)
135
136 /*
137 * We are inserting a "fake" vma for the user-accessible vector page so
138 * gdb and friends can get to it through ptrace and /proc/<pid>/mem.
139 * But we also want to remove it before the generic code gets to see it
140 * during process exit or the unmapping of it would cause total havoc.
141 * (the macro is used as remove_vma() is static to mm/mmap.c)
142 */
143 #define arch_exit_mmap(mm) \
144 do { \
145 struct vm_area_struct *high_vma = find_vma(mm, 0xffff0000); \
146 if (high_vma) { \
147 BUG_ON(high_vma->vm_next); /* it should be last */ \
148 if (high_vma->vm_prev) \
149 high_vma->vm_prev->vm_next = NULL; \
150 else \
151 mm->mmap = NULL; \
152 rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
153 mm->mmap_cache = NULL; \
154 mm->map_count--; \
155 remove_vma(high_vma); \
156 } \
157 } while (0)
158
arch_dup_mmap(struct mm_struct * oldmm,struct mm_struct * mm)159 static inline void arch_dup_mmap(struct mm_struct *oldmm,
160 struct mm_struct *mm)
161 {
162 }
163
164 #endif
165