1 /*
2  * The SH64 TLB miss.
3  *
4  * Original code from fault.c
5  * Copyright (C) 2000, 2001  Paolo Alberelli
6  *
7  * Fast PTE->TLB refill path
8  * Copyright (C) 2003 Richard.Curnow@superh.com
9  *
10  * IMPORTANT NOTES :
11  * The do_fast_page_fault function is called from a context in entry.S
12  * where very few registers have been saved.  In particular, the code in
13  * this file must be compiled not to use ANY caller-save registers that
14  * are not part of the restricted save set.  Also, it means that code in
15  * this file must not make calls to functions elsewhere in the kernel, or
16  * else the excepting context will see corruption in its caller-save
17  * registers.  Plus, the entry.S save area is non-reentrant, so this code
18  * has to run with SR.BL==1, i.e. no interrupts taken inside it and panic
19  * on any exception.
20  *
21  * This file is subject to the terms and conditions of the GNU General Public
22  * License.  See the file "COPYING" in the main directory of this archive
23  * for more details.
24  */
25 #include <linux/signal.h>
26 #include <linux/sched.h>
27 #include <linux/kernel.h>
28 #include <linux/errno.h>
29 #include <linux/string.h>
30 #include <linux/types.h>
31 #include <linux/ptrace.h>
32 #include <linux/mman.h>
33 #include <linux/mm.h>
34 #include <linux/smp.h>
35 #include <linux/interrupt.h>
36 #include <asm/system.h>
37 #include <asm/tlb.h>
38 #include <asm/io.h>
39 #include <asm/uaccess.h>
40 #include <asm/pgalloc.h>
41 #include <asm/mmu_context.h>
42 #include <cpu/registers.h>
43 
44 /* Callable from fault.c, so not static */
__do_tlb_refill(unsigned long address,unsigned long long is_text_not_data,pte_t * pte)45 inline void __do_tlb_refill(unsigned long address,
46                             unsigned long long is_text_not_data, pte_t *pte)
47 {
48 	unsigned long long ptel;
49 	unsigned long long pteh=0;
50 	struct tlb_info *tlbp;
51 	unsigned long long next;
52 
53 	/* Get PTEL first */
54 	ptel = pte_val(*pte);
55 
56 	/*
57 	 * Set PTEH register
58 	 */
59 	pteh = neff_sign_extend(address & MMU_VPN_MASK);
60 
61 	/* Set the ASID. */
62 	pteh |= get_asid() << PTEH_ASID_SHIFT;
63 	pteh |= PTEH_VALID;
64 
65 	/* Set PTEL register, set_pte has performed the sign extension */
66 	ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */
67 
68 	tlbp = is_text_not_data ? &(cpu_data->itlb) : &(cpu_data->dtlb);
69 	next = tlbp->next;
70 	__flush_tlb_slot(next);
71 	asm volatile ("putcfg %0,1,%2\n\n\t"
72 		      "putcfg %0,0,%1\n"
73 		      :  : "r" (next), "r" (pteh), "r" (ptel) );
74 
75 	next += TLB_STEP;
76 	if (next > tlbp->last) next = tlbp->first;
77 	tlbp->next = next;
78 
79 }
80 
handle_vmalloc_fault(struct mm_struct * mm,unsigned long protection_flags,unsigned long long textaccess,unsigned long address)81 static int handle_vmalloc_fault(struct mm_struct *mm,
82 				unsigned long protection_flags,
83                                 unsigned long long textaccess,
84 				unsigned long address)
85 {
86 	pgd_t *dir;
87 	pud_t *pud;
88 	pmd_t *pmd;
89 	static pte_t *pte;
90 	pte_t entry;
91 
92 	dir = pgd_offset_k(address);
93 
94 	pud = pud_offset(dir, address);
95 	if (pud_none_or_clear_bad(pud))
96 		return 0;
97 
98 	pmd = pmd_offset(pud, address);
99 	if (pmd_none_or_clear_bad(pmd))
100 		return 0;
101 
102 	pte = pte_offset_kernel(pmd, address);
103 	entry = *pte;
104 
105 	if (pte_none(entry) || !pte_present(entry))
106 		return 0;
107 	if ((pte_val(entry) & protection_flags) != protection_flags)
108 		return 0;
109 
110         __do_tlb_refill(address, textaccess, pte);
111 
112 	return 1;
113 }
114 
handle_tlbmiss(struct mm_struct * mm,unsigned long long protection_flags,unsigned long long textaccess,unsigned long address)115 static int handle_tlbmiss(struct mm_struct *mm,
116 			  unsigned long long protection_flags,
117 			  unsigned long long textaccess,
118 			  unsigned long address)
119 {
120 	pgd_t *dir;
121 	pud_t *pud;
122 	pmd_t *pmd;
123 	pte_t *pte;
124 	pte_t entry;
125 
126 	/* NB. The PGD currently only contains a single entry - there is no
127 	   page table tree stored for the top half of the address space since
128 	   virtual pages in that region should never be mapped in user mode.
129 	   (In kernel mode, the only things in that region are the 512Mb super
130 	   page (locked in), and vmalloc (modules) +  I/O device pages (handled
131 	   by handle_vmalloc_fault), so no PGD for the upper half is required
132 	   by kernel mode either).
133 
134 	   See how mm->pgd is allocated and initialised in pgd_alloc to see why
135 	   the next test is necessary.  - RPC */
136 	if (address >= (unsigned long) TASK_SIZE)
137 		/* upper half - never has page table entries. */
138 		return 0;
139 
140 	dir = pgd_offset(mm, address);
141 	if (pgd_none(*dir) || !pgd_present(*dir))
142 		return 0;
143 	if (!pgd_present(*dir))
144 		return 0;
145 
146 	pud = pud_offset(dir, address);
147 	if (pud_none(*pud) || !pud_present(*pud))
148 		return 0;
149 
150 	pmd = pmd_offset(pud, address);
151 	if (pmd_none(*pmd) || !pmd_present(*pmd))
152 		return 0;
153 
154 	pte = pte_offset_kernel(pmd, address);
155 	entry = *pte;
156 
157 	if (pte_none(entry) || !pte_present(entry))
158 		return 0;
159 
160 	/*
161 	 * If the page doesn't have sufficient protection bits set to
162 	 * service the kind of fault being handled, there's not much
163 	 * point doing the TLB refill.  Punt the fault to the general
164 	 * handler.
165 	 */
166 	if ((pte_val(entry) & protection_flags) != protection_flags)
167 		return 0;
168 
169         __do_tlb_refill(address, textaccess, pte);
170 
171 	return 1;
172 }
173 
174 /*
175  * Put all this information into one structure so that everything is just
176  * arithmetic relative to a single base address.  This reduces the number
177  * of movi/shori pairs needed just to load addresses of static data.
178  */
179 struct expevt_lookup {
180 	unsigned short protection_flags[8];
181 	unsigned char  is_text_access[8];
182 	unsigned char  is_write_access[8];
183 };
184 
185 #define PRU (1<<9)
186 #define PRW (1<<8)
187 #define PRX (1<<7)
188 #define PRR (1<<6)
189 
190 #define DIRTY (_PAGE_DIRTY | _PAGE_ACCESSED)
191 #define YOUNG (_PAGE_ACCESSED)
192 
193 /* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether
194    the fault happened in user mode or privileged mode. */
195 static struct expevt_lookup expevt_lookup_table = {
196 	.protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW},
197 	.is_text_access   = {1,   1,   0, 0, 0,   0,   0,   0}
198 };
199 
200 /*
201    This routine handles page faults that can be serviced just by refilling a
202    TLB entry from an existing page table entry.  (This case represents a very
203    large majority of page faults.) Return 1 if the fault was successfully
204    handled.  Return 0 if the fault could not be handled.  (This leads into the
205    general fault handling in fault.c which deals with mapping file-backed
206    pages, stack growth, segmentation faults, swapping etc etc)
207  */
do_fast_page_fault(unsigned long long ssr_md,unsigned long long expevt,unsigned long address)208 asmlinkage int do_fast_page_fault(unsigned long long ssr_md,
209 				  unsigned long long expevt,
210 			          unsigned long address)
211 {
212 	struct task_struct *tsk;
213 	struct mm_struct *mm;
214 	unsigned long long textaccess;
215 	unsigned long long protection_flags;
216 	unsigned long long index;
217 	unsigned long long expevt4;
218 
219 	/* The next few lines implement a way of hashing EXPEVT into a
220 	 * small array index which can be used to lookup parameters
221 	 * specific to the type of TLBMISS being handled.
222 	 *
223 	 * Note:
224 	 *	ITLBMISS has EXPEVT==0xa40
225 	 *	RTLBMISS has EXPEVT==0x040
226 	 *	WTLBMISS has EXPEVT==0x060
227 	 */
228 	expevt4 = (expevt >> 4);
229 	/* TODO : xor ssr_md into this expression too. Then we can check
230 	 * that PRU is set when it needs to be. */
231 	index = expevt4 ^ (expevt4 >> 5);
232 	index &= 7;
233 	protection_flags = expevt_lookup_table.protection_flags[index];
234 	textaccess       = expevt_lookup_table.is_text_access[index];
235 
236 	/* SIM
237 	 * Note this is now called with interrupts still disabled
238 	 * This is to cope with being called for a missing IO port
239 	 * address with interrupts disabled. This should be fixed as
240 	 * soon as we have a better 'fast path' miss handler.
241 	 *
242 	 * Plus take care how you try and debug this stuff.
243 	 * For example, writing debug data to a port which you
244 	 * have just faulted on is not going to work.
245 	 */
246 
247 	tsk = current;
248 	mm = tsk->mm;
249 
250 	if ((address >= VMALLOC_START && address < VMALLOC_END) ||
251 	    (address >= IOBASE_VADDR  && address < IOBASE_END)) {
252 		if (ssr_md)
253 			/*
254 			 * Process-contexts can never have this address
255 			 * range mapped
256 			 */
257 			if (handle_vmalloc_fault(mm, protection_flags,
258 						 textaccess, address))
259 				return 1;
260 	} else if (!in_interrupt() && mm) {
261 		if (handle_tlbmiss(mm, protection_flags, textaccess, address))
262 			return 1;
263 	}
264 
265 	return 0;
266 }
267