1 /*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999)
7 * Copyright (C) 1999 SuSE GmbH Nuernberg
8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org)
9 *
10 * Cache and TLB management
11 *
12 */
13
14 #include <linux/init.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/module.h>
18 #include <linux/seq_file.h>
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <asm/pdc.h>
22 #include <asm/cache.h>
23 #include <asm/cacheflush.h>
24 #include <asm/tlbflush.h>
25 #include <asm/system.h>
26 #include <asm/page.h>
27 #include <asm/pgalloc.h>
28 #include <asm/processor.h>
29 #include <asm/sections.h>
30 #include <asm/shmparam.h>
31
32 int split_tlb __read_mostly;
33 int dcache_stride __read_mostly;
34 int icache_stride __read_mostly;
35 EXPORT_SYMBOL(dcache_stride);
36
37 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
38 EXPORT_SYMBOL(flush_dcache_page_asm);
39 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr);
40
41
42 /* On some machines (e.g. ones with the Merced bus), there can be
43 * only a single PxTLB broadcast at a time; this must be guaranteed
44 * by software. We put a spinlock around all TLB flushes to
45 * ensure this.
46 */
47 DEFINE_SPINLOCK(pa_tlb_lock);
48
49 struct pdc_cache_info cache_info __read_mostly;
50 #ifndef CONFIG_PA20
51 static struct pdc_btlb_info btlb_info __read_mostly;
52 #endif
53
54 #ifdef CONFIG_SMP
55 void
flush_data_cache(void)56 flush_data_cache(void)
57 {
58 on_each_cpu(flush_data_cache_local, NULL, 1);
59 }
60 void
flush_instruction_cache(void)61 flush_instruction_cache(void)
62 {
63 on_each_cpu(flush_instruction_cache_local, NULL, 1);
64 }
65 #endif
66
67 void
flush_cache_all_local(void)68 flush_cache_all_local(void)
69 {
70 flush_instruction_cache_local(NULL);
71 flush_data_cache_local(NULL);
72 }
73 EXPORT_SYMBOL(flush_cache_all_local);
74
75 void
update_mmu_cache(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)76 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
77 {
78 struct page *page = pte_page(*ptep);
79
80 if (pfn_valid(page_to_pfn(page)) && page_mapping(page) &&
81 test_bit(PG_dcache_dirty, &page->flags)) {
82
83 flush_kernel_dcache_page(page);
84 clear_bit(PG_dcache_dirty, &page->flags);
85 } else if (parisc_requires_coherency())
86 flush_kernel_dcache_page(page);
87 }
88
89 void
show_cache_info(struct seq_file * m)90 show_cache_info(struct seq_file *m)
91 {
92 char buf[32];
93
94 seq_printf(m, "I-cache\t\t: %ld KB\n",
95 cache_info.ic_size/1024 );
96 if (cache_info.dc_loop != 1)
97 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop);
98 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n",
99 cache_info.dc_size/1024,
100 (cache_info.dc_conf.cc_wt ? "WT":"WB"),
101 (cache_info.dc_conf.cc_sh ? ", shared I/D":""),
102 ((cache_info.dc_loop == 1) ? "direct mapped" : buf));
103 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n",
104 cache_info.it_size,
105 cache_info.dt_size,
106 cache_info.dt_conf.tc_sh ? " - shared with ITLB":""
107 );
108
109 #ifndef CONFIG_PA20
110 /* BTLB - Block TLB */
111 if (btlb_info.max_size==0) {
112 seq_printf(m, "BTLB\t\t: not supported\n" );
113 } else {
114 seq_printf(m,
115 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n"
116 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n"
117 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n",
118 btlb_info.max_size, (int)4096,
119 btlb_info.max_size>>8,
120 btlb_info.fixed_range_info.num_i,
121 btlb_info.fixed_range_info.num_d,
122 btlb_info.fixed_range_info.num_comb,
123 btlb_info.variable_range_info.num_i,
124 btlb_info.variable_range_info.num_d,
125 btlb_info.variable_range_info.num_comb
126 );
127 }
128 #endif
129 }
130
131 void __init
parisc_cache_init(void)132 parisc_cache_init(void)
133 {
134 if (pdc_cache_info(&cache_info) < 0)
135 panic("parisc_cache_init: pdc_cache_info failed");
136
137 #if 0
138 printk("ic_size %lx dc_size %lx it_size %lx\n",
139 cache_info.ic_size,
140 cache_info.dc_size,
141 cache_info.it_size);
142
143 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
144 cache_info.dc_base,
145 cache_info.dc_stride,
146 cache_info.dc_count,
147 cache_info.dc_loop);
148
149 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n",
150 *(unsigned long *) (&cache_info.dc_conf),
151 cache_info.dc_conf.cc_alias,
152 cache_info.dc_conf.cc_block,
153 cache_info.dc_conf.cc_line,
154 cache_info.dc_conf.cc_shift);
155 printk(" wt %d sh %d cst %d hv %d\n",
156 cache_info.dc_conf.cc_wt,
157 cache_info.dc_conf.cc_sh,
158 cache_info.dc_conf.cc_cst,
159 cache_info.dc_conf.cc_hv);
160
161 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n",
162 cache_info.ic_base,
163 cache_info.ic_stride,
164 cache_info.ic_count,
165 cache_info.ic_loop);
166
167 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n",
168 *(unsigned long *) (&cache_info.ic_conf),
169 cache_info.ic_conf.cc_alias,
170 cache_info.ic_conf.cc_block,
171 cache_info.ic_conf.cc_line,
172 cache_info.ic_conf.cc_shift);
173 printk(" wt %d sh %d cst %d hv %d\n",
174 cache_info.ic_conf.cc_wt,
175 cache_info.ic_conf.cc_sh,
176 cache_info.ic_conf.cc_cst,
177 cache_info.ic_conf.cc_hv);
178
179 printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
180 cache_info.dt_conf.tc_sh,
181 cache_info.dt_conf.tc_page,
182 cache_info.dt_conf.tc_cst,
183 cache_info.dt_conf.tc_aid,
184 cache_info.dt_conf.tc_pad1);
185
186 printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n",
187 cache_info.it_conf.tc_sh,
188 cache_info.it_conf.tc_page,
189 cache_info.it_conf.tc_cst,
190 cache_info.it_conf.tc_aid,
191 cache_info.it_conf.tc_pad1);
192 #endif
193
194 split_tlb = 0;
195 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) {
196 if (cache_info.dt_conf.tc_sh == 2)
197 printk(KERN_WARNING "Unexpected TLB configuration. "
198 "Will flush I/D separately (could be optimized).\n");
199
200 split_tlb = 1;
201 }
202
203 /* "New and Improved" version from Jim Hull
204 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift))
205 * The following CAFL_STRIDE is an optimized version, see
206 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html
207 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html
208 */
209 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift))
210 dcache_stride = CAFL_STRIDE(cache_info.dc_conf);
211 icache_stride = CAFL_STRIDE(cache_info.ic_conf);
212 #undef CAFL_STRIDE
213
214 #ifndef CONFIG_PA20
215 if (pdc_btlb_info(&btlb_info) < 0) {
216 memset(&btlb_info, 0, sizeof btlb_info);
217 }
218 #endif
219
220 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) ==
221 PDC_MODEL_NVA_UNSUPPORTED) {
222 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n");
223 #if 0
224 panic("SMP kernel required to avoid non-equivalent aliasing");
225 #endif
226 }
227 }
228
disable_sr_hashing(void)229 void disable_sr_hashing(void)
230 {
231 int srhash_type, retval;
232 unsigned long space_bits;
233
234 switch (boot_cpu_data.cpu_type) {
235 case pcx: /* We shouldn't get this far. setup.c should prevent it. */
236 BUG();
237 return;
238
239 case pcxs:
240 case pcxt:
241 case pcxt_:
242 srhash_type = SRHASH_PCXST;
243 break;
244
245 case pcxl:
246 srhash_type = SRHASH_PCXL;
247 break;
248
249 case pcxl2: /* pcxl2 doesn't support space register hashing */
250 return;
251
252 default: /* Currently all PA2.0 machines use the same ins. sequence */
253 srhash_type = SRHASH_PA20;
254 break;
255 }
256
257 disable_sr_hashing_asm(srhash_type);
258
259 retval = pdc_spaceid_bits(&space_bits);
260 /* If this procedure isn't implemented, don't panic. */
261 if (retval < 0 && retval != PDC_BAD_OPTION)
262 panic("pdc_spaceid_bits call failed.\n");
263 if (space_bits != 0)
264 panic("SpaceID hashing is still on!\n");
265 }
266
267 static inline void
__flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long physaddr)268 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
269 unsigned long physaddr)
270 {
271 flush_dcache_page_asm(physaddr, vmaddr);
272 if (vma->vm_flags & VM_EXEC)
273 flush_icache_page_asm(physaddr, vmaddr);
274 }
275
flush_dcache_page(struct page * page)276 void flush_dcache_page(struct page *page)
277 {
278 struct address_space *mapping = page_mapping(page);
279 struct vm_area_struct *mpnt;
280 struct prio_tree_iter iter;
281 unsigned long offset;
282 unsigned long addr, old_addr = 0;
283 pgoff_t pgoff;
284
285 if (mapping && !mapping_mapped(mapping)) {
286 set_bit(PG_dcache_dirty, &page->flags);
287 return;
288 }
289
290 flush_kernel_dcache_page(page);
291
292 if (!mapping)
293 return;
294
295 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
296
297 /* We have carefully arranged in arch_get_unmapped_area() that
298 * *any* mappings of a file are always congruently mapped (whether
299 * declared as MAP_PRIVATE or MAP_SHARED), so we only need
300 * to flush one address here for them all to become coherent */
301
302 flush_dcache_mmap_lock(mapping);
303 vma_prio_tree_foreach(mpnt, &iter, &mapping->i_mmap, pgoff, pgoff) {
304 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
305 addr = mpnt->vm_start + offset;
306
307 /* The TLB is the engine of coherence on parisc: The
308 * CPU is entitled to speculate any page with a TLB
309 * mapping, so here we kill the mapping then flush the
310 * page along a special flush only alias mapping.
311 * This guarantees that the page is no-longer in the
312 * cache for any process and nor may it be
313 * speculatively read in (until the user or kernel
314 * specifically accesses it, of course) */
315
316 flush_tlb_page(mpnt, addr);
317 if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
318 __flush_cache_page(mpnt, addr, page_to_phys(page));
319 if (old_addr)
320 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
321 old_addr = addr;
322 }
323 }
324 flush_dcache_mmap_unlock(mapping);
325 }
326 EXPORT_SYMBOL(flush_dcache_page);
327
328 /* Defined in arch/parisc/kernel/pacache.S */
329 EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
330 EXPORT_SYMBOL(flush_kernel_dcache_page_asm);
331 EXPORT_SYMBOL(flush_data_cache_local);
332 EXPORT_SYMBOL(flush_kernel_icache_range_asm);
333
clear_user_page_asm(void * page,unsigned long vaddr)334 void clear_user_page_asm(void *page, unsigned long vaddr)
335 {
336 unsigned long flags;
337 /* This function is implemented in assembly in pacache.S */
338 extern void __clear_user_page_asm(void *page, unsigned long vaddr);
339
340 purge_tlb_start(flags);
341 __clear_user_page_asm(page, vaddr);
342 purge_tlb_end(flags);
343 }
344
345 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */
346 int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD;
347
parisc_setup_cache_timing(void)348 void __init parisc_setup_cache_timing(void)
349 {
350 unsigned long rangetime, alltime;
351 unsigned long size;
352
353 alltime = mfctl(16);
354 flush_data_cache();
355 alltime = mfctl(16) - alltime;
356
357 size = (unsigned long)(_end - _text);
358 rangetime = mfctl(16);
359 flush_kernel_dcache_range((unsigned long)_text, size);
360 rangetime = mfctl(16) - rangetime;
361
362 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n",
363 alltime, size, rangetime);
364
365 /* Racy, but if we see an intermediate value, it's ok too... */
366 parisc_cache_flush_threshold = size * alltime / rangetime;
367
368 parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1);
369 if (!parisc_cache_flush_threshold)
370 parisc_cache_flush_threshold = FLUSH_THRESHOLD;
371
372 if (parisc_cache_flush_threshold > cache_info.dc_size)
373 parisc_cache_flush_threshold = cache_info.dc_size;
374
375 printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus());
376 }
377
378 extern void purge_kernel_dcache_page(unsigned long);
379 extern void clear_user_page_asm(void *page, unsigned long vaddr);
380
clear_user_page(void * page,unsigned long vaddr,struct page * pg)381 void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
382 {
383 unsigned long flags;
384
385 purge_kernel_dcache_page((unsigned long)page);
386 purge_tlb_start(flags);
387 pdtlb_kernel(page);
388 purge_tlb_end(flags);
389 clear_user_page_asm(page, vaddr);
390 }
391 EXPORT_SYMBOL(clear_user_page);
392
flush_kernel_dcache_page_addr(void * addr)393 void flush_kernel_dcache_page_addr(void *addr)
394 {
395 unsigned long flags;
396
397 flush_kernel_dcache_page_asm(addr);
398 purge_tlb_start(flags);
399 pdtlb_kernel(addr);
400 purge_tlb_end(flags);
401 }
402 EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
403
copy_user_page(void * vto,void * vfrom,unsigned long vaddr,struct page * pg)404 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
405 struct page *pg)
406 {
407 /* no coherency needed (all in kmap/kunmap) */
408 copy_user_page_asm(vto, vfrom);
409 if (!parisc_requires_coherency())
410 flush_kernel_dcache_page_asm(vto);
411 }
412 EXPORT_SYMBOL(copy_user_page);
413
414 #ifdef CONFIG_PA8X00
415
kunmap_parisc(void * addr)416 void kunmap_parisc(void *addr)
417 {
418 if (parisc_requires_coherency())
419 flush_kernel_dcache_page_addr(addr);
420 }
421 EXPORT_SYMBOL(kunmap_parisc);
422 #endif
423
__flush_tlb_range(unsigned long sid,unsigned long start,unsigned long end)424 void __flush_tlb_range(unsigned long sid, unsigned long start,
425 unsigned long end)
426 {
427 unsigned long npages;
428
429 npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
430 if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */
431 flush_tlb_all();
432 else {
433 unsigned long flags;
434
435 mtsp(sid, 1);
436 purge_tlb_start(flags);
437 if (split_tlb) {
438 while (npages--) {
439 pdtlb(start);
440 pitlb(start);
441 start += PAGE_SIZE;
442 }
443 } else {
444 while (npages--) {
445 pdtlb(start);
446 start += PAGE_SIZE;
447 }
448 }
449 purge_tlb_end(flags);
450 }
451 }
452
cacheflush_h_tmp_function(void * dummy)453 static void cacheflush_h_tmp_function(void *dummy)
454 {
455 flush_cache_all_local();
456 }
457
flush_cache_all(void)458 void flush_cache_all(void)
459 {
460 on_each_cpu(cacheflush_h_tmp_function, NULL, 1);
461 }
462
flush_cache_mm(struct mm_struct * mm)463 void flush_cache_mm(struct mm_struct *mm)
464 {
465 #ifdef CONFIG_SMP
466 flush_cache_all();
467 #else
468 flush_cache_all_local();
469 #endif
470 }
471
472 void
flush_user_dcache_range(unsigned long start,unsigned long end)473 flush_user_dcache_range(unsigned long start, unsigned long end)
474 {
475 if ((end - start) < parisc_cache_flush_threshold)
476 flush_user_dcache_range_asm(start,end);
477 else
478 flush_data_cache();
479 }
480
481 void
flush_user_icache_range(unsigned long start,unsigned long end)482 flush_user_icache_range(unsigned long start, unsigned long end)
483 {
484 if ((end - start) < parisc_cache_flush_threshold)
485 flush_user_icache_range_asm(start,end);
486 else
487 flush_instruction_cache();
488 }
489
490
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)491 void flush_cache_range(struct vm_area_struct *vma,
492 unsigned long start, unsigned long end)
493 {
494 int sr3;
495
496 BUG_ON(!vma->vm_mm->context);
497
498 sr3 = mfsp(3);
499 if (vma->vm_mm->context == sr3) {
500 flush_user_dcache_range(start,end);
501 flush_user_icache_range(start,end);
502 } else {
503 flush_cache_all();
504 }
505 }
506
507 void
flush_cache_page(struct vm_area_struct * vma,unsigned long vmaddr,unsigned long pfn)508 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn)
509 {
510 BUG_ON(!vma->vm_mm->context);
511
512 flush_tlb_page(vma, vmaddr);
513 __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn)));
514
515 }
516