1 #ifndef _ASM_SCORE_MMU_CONTEXT_H
2 #define _ASM_SCORE_MMU_CONTEXT_H
3
4 #include <linux/errno.h>
5 #include <linux/sched.h>
6 #include <linux/slab.h>
7 #include <asm-generic/mm_hooks.h>
8
9 #include <asm/cacheflush.h>
10 #include <asm/tlbflush.h>
11 #include <asm/scoreregs.h>
12
13 /*
14 * For the fast tlb miss handlers, we keep a per cpu array of pointers
15 * to the current pgd for each processor. Also, the proc. id is stuffed
16 * into the context register.
17 */
18 extern unsigned long asid_cache;
19 extern unsigned long pgd_current;
20
21 #define TLBMISS_HANDLER_SETUP_PGD(pgd) (pgd_current = (unsigned long)(pgd))
22
23 #define TLBMISS_HANDLER_SETUP() \
24 do { \
25 write_c0_context(0); \
26 TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir) \
27 } while (0)
28
29 /*
30 * All unused by hardware upper bits will be considered
31 * as a software asid extension.
32 */
33 #define ASID_VERSION_MASK 0xfffff000
34 #define ASID_FIRST_VERSION 0x1000
35
36 /* PEVN --------- VPN ---------- --ASID--- -NA- */
37 /* binary: 0000 0000 0000 0000 0000 0000 0001 0000 */
38 /* binary: 0000 0000 0000 0000 0000 1111 1111 0000 */
39 #define ASID_INC 0x10
40 #define ASID_MASK 0xff0
41
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)42 static inline void enter_lazy_tlb(struct mm_struct *mm,
43 struct task_struct *tsk)
44 {}
45
46 static inline void
get_new_mmu_context(struct mm_struct * mm)47 get_new_mmu_context(struct mm_struct *mm)
48 {
49 unsigned long asid = asid_cache + ASID_INC;
50
51 if (!(asid & ASID_MASK)) {
52 local_flush_tlb_all(); /* start new asid cycle */
53 if (!asid) /* fix version if needed */
54 asid = ASID_FIRST_VERSION;
55 }
56
57 mm->context = asid;
58 asid_cache = asid;
59 }
60
61 /*
62 * Initialize the context related info for a new mm_struct
63 * instance.
64 */
65 static inline int
init_new_context(struct task_struct * tsk,struct mm_struct * mm)66 init_new_context(struct task_struct *tsk, struct mm_struct *mm)
67 {
68 mm->context = 0;
69 return 0;
70 }
71
switch_mm(struct mm_struct * prev,struct mm_struct * next,struct task_struct * tsk)72 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
73 struct task_struct *tsk)
74 {
75 unsigned long flags;
76
77 local_irq_save(flags);
78 if ((next->context ^ asid_cache) & ASID_VERSION_MASK)
79 get_new_mmu_context(next);
80
81 pevn_set(next->context);
82 TLBMISS_HANDLER_SETUP_PGD(next->pgd);
83 local_irq_restore(flags);
84 }
85
86 /*
87 * Destroy context related info for an mm_struct that is about
88 * to be put to rest.
89 */
destroy_context(struct mm_struct * mm)90 static inline void destroy_context(struct mm_struct *mm)
91 {}
92
93 static inline void
deactivate_mm(struct task_struct * task,struct mm_struct * mm)94 deactivate_mm(struct task_struct *task, struct mm_struct *mm)
95 {}
96
97 /*
98 * After we have set current->mm to a new value, this activates
99 * the context for the new mm so we see the new mappings.
100 */
101 static inline void
activate_mm(struct mm_struct * prev,struct mm_struct * next)102 activate_mm(struct mm_struct *prev, struct mm_struct *next)
103 {
104 unsigned long flags;
105
106 local_irq_save(flags);
107 get_new_mmu_context(next);
108 pevn_set(next->context);
109 TLBMISS_HANDLER_SETUP_PGD(next->pgd);
110 local_irq_restore(flags);
111 }
112
113 #endif /* _ASM_SCORE_MMU_CONTEXT_H */
114