1 /*
2  * r2300.c: R2000 and R3000 specific mmu/cache code.
3  *
4  * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
5  *
6  * with a lot of changes to make this thing work for R3000s
7  * Tx39XX R4k style caches added. HK
8  * Copyright (C) 1998, 1999, 2000 Harald Koerfgen
9  * Copyright (C) 1998 Gleb Raiko & Vladimir Roganov
10  */
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/smp.h>
15 #include <linux/mm.h>
16 
17 #include <asm/cacheops.h>
18 #include <asm/page.h>
19 #include <asm/pgtable.h>
20 #include <asm/mmu_context.h>
21 #include <asm/system.h>
22 #include <asm/isadep.h>
23 #include <asm/io.h>
24 #include <asm/bootinfo.h>
25 #include <asm/cpu.h>
26 
27 /* For R3000 cores with R4000 style caches */
28 static unsigned long icache_size, dcache_size;		/* Size in bytes */
29 
30 #include <asm/r4kcache.h>
31 
32 extern int r3k_have_wired_reg;	/* in r3k-tlb.c */
33 
34 /* This sequence is required to ensure icache is disabled immediately */
35 #define TX39_STOP_STREAMING() \
36 __asm__ __volatile__( \
37 	".set    push\n\t" \
38 	".set    noreorder\n\t" \
39 	"b       1f\n\t" \
40 	"nop\n\t" \
41 	"1:\n\t" \
42 	".set pop" \
43 	)
44 
45 /* TX39H-style cache flush routines. */
tx39h_flush_icache_all(void)46 static void tx39h_flush_icache_all(void)
47 {
48 	unsigned long flags, config;
49 
50 	/* disable icache (set ICE#) */
51 	local_irq_save(flags);
52 	config = read_c0_conf();
53 	write_c0_conf(config & ~TX39_CONF_ICE);
54 	TX39_STOP_STREAMING();
55 	blast_icache16();
56 	write_c0_conf(config);
57 	local_irq_restore(flags);
58 }
59 
tx39h_dma_cache_wback_inv(unsigned long addr,unsigned long size)60 static void tx39h_dma_cache_wback_inv(unsigned long addr, unsigned long size)
61 {
62 	/* Catch bad driver code */
63 	BUG_ON(size == 0);
64 
65 	iob();
66 	blast_inv_dcache_range(addr, addr + size);
67 }
68 
69 
70 /* TX39H2,TX39H3 */
tx39_blast_dcache_page(unsigned long addr)71 static inline void tx39_blast_dcache_page(unsigned long addr)
72 {
73 	if (current_cpu_type() != CPU_TX3912)
74 		blast_dcache16_page(addr);
75 }
76 
tx39_blast_dcache_page_indexed(unsigned long addr)77 static inline void tx39_blast_dcache_page_indexed(unsigned long addr)
78 {
79 	blast_dcache16_page_indexed(addr);
80 }
81 
tx39_blast_dcache(void)82 static inline void tx39_blast_dcache(void)
83 {
84 	blast_dcache16();
85 }
86 
tx39_blast_icache_page(unsigned long addr)87 static inline void tx39_blast_icache_page(unsigned long addr)
88 {
89 	unsigned long flags, config;
90 	/* disable icache (set ICE#) */
91 	local_irq_save(flags);
92 	config = read_c0_conf();
93 	write_c0_conf(config & ~TX39_CONF_ICE);
94 	TX39_STOP_STREAMING();
95 	blast_icache16_page(addr);
96 	write_c0_conf(config);
97 	local_irq_restore(flags);
98 }
99 
tx39_blast_icache_page_indexed(unsigned long addr)100 static inline void tx39_blast_icache_page_indexed(unsigned long addr)
101 {
102 	unsigned long flags, config;
103 	/* disable icache (set ICE#) */
104 	local_irq_save(flags);
105 	config = read_c0_conf();
106 	write_c0_conf(config & ~TX39_CONF_ICE);
107 	TX39_STOP_STREAMING();
108 	blast_icache16_page_indexed(addr);
109 	write_c0_conf(config);
110 	local_irq_restore(flags);
111 }
112 
tx39_blast_icache(void)113 static inline void tx39_blast_icache(void)
114 {
115 	unsigned long flags, config;
116 	/* disable icache (set ICE#) */
117 	local_irq_save(flags);
118 	config = read_c0_conf();
119 	write_c0_conf(config & ~TX39_CONF_ICE);
120 	TX39_STOP_STREAMING();
121 	blast_icache16();
122 	write_c0_conf(config);
123 	local_irq_restore(flags);
124 }
125 
tx39__flush_cache_vmap(void)126 static void tx39__flush_cache_vmap(void)
127 {
128 	tx39_blast_dcache();
129 }
130 
tx39__flush_cache_vunmap(void)131 static void tx39__flush_cache_vunmap(void)
132 {
133 	tx39_blast_dcache();
134 }
135 
tx39_flush_cache_all(void)136 static inline void tx39_flush_cache_all(void)
137 {
138 	if (!cpu_has_dc_aliases)
139 		return;
140 
141 	tx39_blast_dcache();
142 }
143 
tx39___flush_cache_all(void)144 static inline void tx39___flush_cache_all(void)
145 {
146 	tx39_blast_dcache();
147 	tx39_blast_icache();
148 }
149 
tx39_flush_cache_mm(struct mm_struct * mm)150 static void tx39_flush_cache_mm(struct mm_struct *mm)
151 {
152 	if (!cpu_has_dc_aliases)
153 		return;
154 
155 	if (cpu_context(smp_processor_id(), mm) != 0)
156 		tx39_blast_dcache();
157 }
158 
tx39_flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)159 static void tx39_flush_cache_range(struct vm_area_struct *vma,
160 	unsigned long start, unsigned long end)
161 {
162 	if (!cpu_has_dc_aliases)
163 		return;
164 	if (!(cpu_context(smp_processor_id(), vma->vm_mm)))
165 		return;
166 
167 	tx39_blast_dcache();
168 }
169 
tx39_flush_cache_page(struct vm_area_struct * vma,unsigned long page,unsigned long pfn)170 static void tx39_flush_cache_page(struct vm_area_struct *vma, unsigned long page, unsigned long pfn)
171 {
172 	int exec = vma->vm_flags & VM_EXEC;
173 	struct mm_struct *mm = vma->vm_mm;
174 	pgd_t *pgdp;
175 	pud_t *pudp;
176 	pmd_t *pmdp;
177 	pte_t *ptep;
178 
179 	/*
180 	 * If ownes no valid ASID yet, cannot possibly have gotten
181 	 * this page into the cache.
182 	 */
183 	if (cpu_context(smp_processor_id(), mm) == 0)
184 		return;
185 
186 	page &= PAGE_MASK;
187 	pgdp = pgd_offset(mm, page);
188 	pudp = pud_offset(pgdp, page);
189 	pmdp = pmd_offset(pudp, page);
190 	ptep = pte_offset(pmdp, page);
191 
192 	/*
193 	 * If the page isn't marked valid, the page cannot possibly be
194 	 * in the cache.
195 	 */
196 	if (!(pte_val(*ptep) & _PAGE_PRESENT))
197 		return;
198 
199 	/*
200 	 * Doing flushes for another ASID than the current one is
201 	 * too difficult since stupid R4k caches do a TLB translation
202 	 * for every cache flush operation.  So we do indexed flushes
203 	 * in that case, which doesn't overly flush the cache too much.
204 	 */
205 	if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) {
206 		if (cpu_has_dc_aliases || exec)
207 			tx39_blast_dcache_page(page);
208 		if (exec)
209 			tx39_blast_icache_page(page);
210 
211 		return;
212 	}
213 
214 	/*
215 	 * Do indexed flush, too much work to get the (possible) TLB refills
216 	 * to work correctly.
217 	 */
218 	if (cpu_has_dc_aliases || exec)
219 		tx39_blast_dcache_page_indexed(page);
220 	if (exec)
221 		tx39_blast_icache_page_indexed(page);
222 }
223 
local_tx39_flush_data_cache_page(void * addr)224 static void local_tx39_flush_data_cache_page(void * addr)
225 {
226 	tx39_blast_dcache_page((unsigned long)addr);
227 }
228 
tx39_flush_data_cache_page(unsigned long addr)229 static void tx39_flush_data_cache_page(unsigned long addr)
230 {
231 	tx39_blast_dcache_page(addr);
232 }
233 
tx39_flush_icache_range(unsigned long start,unsigned long end)234 static void tx39_flush_icache_range(unsigned long start, unsigned long end)
235 {
236 	if (end - start > dcache_size)
237 		tx39_blast_dcache();
238 	else
239 		protected_blast_dcache_range(start, end);
240 
241 	if (end - start > icache_size)
242 		tx39_blast_icache();
243 	else {
244 		unsigned long flags, config;
245 		/* disable icache (set ICE#) */
246 		local_irq_save(flags);
247 		config = read_c0_conf();
248 		write_c0_conf(config & ~TX39_CONF_ICE);
249 		TX39_STOP_STREAMING();
250 		protected_blast_icache_range(start, end);
251 		write_c0_conf(config);
252 		local_irq_restore(flags);
253 	}
254 }
255 
tx39_flush_kernel_vmap_range(unsigned long vaddr,int size)256 static void tx39_flush_kernel_vmap_range(unsigned long vaddr, int size)
257 {
258 	BUG();
259 }
260 
tx39_dma_cache_wback_inv(unsigned long addr,unsigned long size)261 static void tx39_dma_cache_wback_inv(unsigned long addr, unsigned long size)
262 {
263 	unsigned long end;
264 
265 	if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
266 		end = addr + size;
267 		do {
268 			tx39_blast_dcache_page(addr);
269 			addr += PAGE_SIZE;
270 		} while(addr != end);
271 	} else if (size > dcache_size) {
272 		tx39_blast_dcache();
273 	} else {
274 		blast_dcache_range(addr, addr + size);
275 	}
276 }
277 
tx39_dma_cache_inv(unsigned long addr,unsigned long size)278 static void tx39_dma_cache_inv(unsigned long addr, unsigned long size)
279 {
280 	unsigned long end;
281 
282 	if (((size | addr) & (PAGE_SIZE - 1)) == 0) {
283 		end = addr + size;
284 		do {
285 			tx39_blast_dcache_page(addr);
286 			addr += PAGE_SIZE;
287 		} while(addr != end);
288 	} else if (size > dcache_size) {
289 		tx39_blast_dcache();
290 	} else {
291 		blast_inv_dcache_range(addr, addr + size);
292 	}
293 }
294 
tx39_flush_cache_sigtramp(unsigned long addr)295 static void tx39_flush_cache_sigtramp(unsigned long addr)
296 {
297 	unsigned long ic_lsize = current_cpu_data.icache.linesz;
298 	unsigned long dc_lsize = current_cpu_data.dcache.linesz;
299 	unsigned long config;
300 	unsigned long flags;
301 
302 	protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
303 
304 	/* disable icache (set ICE#) */
305 	local_irq_save(flags);
306 	config = read_c0_conf();
307 	write_c0_conf(config & ~TX39_CONF_ICE);
308 	TX39_STOP_STREAMING();
309 	protected_flush_icache_line(addr & ~(ic_lsize - 1));
310 	write_c0_conf(config);
311 	local_irq_restore(flags);
312 }
313 
tx39_probe_cache(void)314 static __init void tx39_probe_cache(void)
315 {
316 	unsigned long config;
317 
318 	config = read_c0_conf();
319 
320 	icache_size = 1 << (10 + ((config & TX39_CONF_ICS_MASK) >>
321 				  TX39_CONF_ICS_SHIFT));
322 	dcache_size = 1 << (10 + ((config & TX39_CONF_DCS_MASK) >>
323 				  TX39_CONF_DCS_SHIFT));
324 
325 	current_cpu_data.icache.linesz = 16;
326 	switch (current_cpu_type()) {
327 	case CPU_TX3912:
328 		current_cpu_data.icache.ways = 1;
329 		current_cpu_data.dcache.ways = 1;
330 		current_cpu_data.dcache.linesz = 4;
331 		break;
332 
333 	case CPU_TX3927:
334 		current_cpu_data.icache.ways = 2;
335 		current_cpu_data.dcache.ways = 2;
336 		current_cpu_data.dcache.linesz = 16;
337 		break;
338 
339 	case CPU_TX3922:
340 	default:
341 		current_cpu_data.icache.ways = 1;
342 		current_cpu_data.dcache.ways = 1;
343 		current_cpu_data.dcache.linesz = 16;
344 		break;
345 	}
346 }
347 
tx39_cache_init(void)348 void __cpuinit tx39_cache_init(void)
349 {
350 	extern void build_clear_page(void);
351 	extern void build_copy_page(void);
352 	unsigned long config;
353 
354 	config = read_c0_conf();
355 	config &= ~TX39_CONF_WBON;
356 	write_c0_conf(config);
357 
358 	tx39_probe_cache();
359 
360 	switch (current_cpu_type()) {
361 	case CPU_TX3912:
362 		/* TX39/H core (writethru direct-map cache) */
363 		__flush_cache_vmap	= tx39__flush_cache_vmap;
364 		__flush_cache_vunmap	= tx39__flush_cache_vunmap;
365 		flush_cache_all	= tx39h_flush_icache_all;
366 		__flush_cache_all	= tx39h_flush_icache_all;
367 		flush_cache_mm		= (void *) tx39h_flush_icache_all;
368 		flush_cache_range	= (void *) tx39h_flush_icache_all;
369 		flush_cache_page	= (void *) tx39h_flush_icache_all;
370 		flush_icache_range	= (void *) tx39h_flush_icache_all;
371 		local_flush_icache_range = (void *) tx39h_flush_icache_all;
372 
373 		flush_cache_sigtramp	= (void *) tx39h_flush_icache_all;
374 		local_flush_data_cache_page	= (void *) tx39h_flush_icache_all;
375 		flush_data_cache_page	= (void *) tx39h_flush_icache_all;
376 
377 		_dma_cache_wback_inv	= tx39h_dma_cache_wback_inv;
378 
379 		shm_align_mask		= PAGE_SIZE - 1;
380 
381 		break;
382 
383 	case CPU_TX3922:
384 	case CPU_TX3927:
385 	default:
386 		/* TX39/H2,H3 core (writeback 2way-set-associative cache) */
387 		r3k_have_wired_reg = 1;
388 		write_c0_wired(0);	/* set 8 on reset... */
389 		/* board-dependent init code may set WBON */
390 
391 		__flush_cache_vmap	= tx39__flush_cache_vmap;
392 		__flush_cache_vunmap	= tx39__flush_cache_vunmap;
393 
394 		flush_cache_all = tx39_flush_cache_all;
395 		__flush_cache_all = tx39___flush_cache_all;
396 		flush_cache_mm = tx39_flush_cache_mm;
397 		flush_cache_range = tx39_flush_cache_range;
398 		flush_cache_page = tx39_flush_cache_page;
399 		flush_icache_range = tx39_flush_icache_range;
400 		local_flush_icache_range = tx39_flush_icache_range;
401 
402 		__flush_kernel_vmap_range = tx39_flush_kernel_vmap_range;
403 
404 		flush_cache_sigtramp = tx39_flush_cache_sigtramp;
405 		local_flush_data_cache_page = local_tx39_flush_data_cache_page;
406 		flush_data_cache_page = tx39_flush_data_cache_page;
407 
408 		_dma_cache_wback_inv = tx39_dma_cache_wback_inv;
409 		_dma_cache_wback = tx39_dma_cache_wback_inv;
410 		_dma_cache_inv = tx39_dma_cache_inv;
411 
412 		shm_align_mask = max_t(unsigned long,
413 		                       (dcache_size / current_cpu_data.dcache.ways) - 1,
414 		                       PAGE_SIZE - 1);
415 
416 		break;
417 	}
418 
419 	current_cpu_data.icache.waysize = icache_size / current_cpu_data.icache.ways;
420 	current_cpu_data.dcache.waysize = dcache_size / current_cpu_data.dcache.ways;
421 
422 	current_cpu_data.icache.sets =
423 		current_cpu_data.icache.waysize / current_cpu_data.icache.linesz;
424 	current_cpu_data.dcache.sets =
425 		current_cpu_data.dcache.waysize / current_cpu_data.dcache.linesz;
426 
427 	if (current_cpu_data.dcache.waysize > PAGE_SIZE)
428 		current_cpu_data.dcache.flags |= MIPS_CACHE_ALIASES;
429 
430 	current_cpu_data.icache.waybit = 0;
431 	current_cpu_data.dcache.waybit = 0;
432 
433 	printk("Primary instruction cache %ldkB, linesize %d bytes\n",
434 		icache_size >> 10, current_cpu_data.icache.linesz);
435 	printk("Primary data cache %ldkB, linesize %d bytes\n",
436 		dcache_size >> 10, current_cpu_data.dcache.linesz);
437 
438 	build_clear_page();
439 	build_copy_page();
440 	tx39h_flush_icache_all();
441 }
442