1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  *   This program is free software; you can redistribute it and/or
5  *   modify it under the terms of the GNU General Public License
6  *   as published by the Free Software Foundation, version 2.
7  *
8  *   This program is distributed in the hope that it will be useful, but
9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  *   NON INFRINGEMENT.  See the GNU General Public License for
12  *   more details.
13  */
14 
15 #ifndef _ASM_TILE_CACHEFLUSH_H
16 #define _ASM_TILE_CACHEFLUSH_H
17 
18 #include <arch/chip.h>
19 
20 /* Keep includes the same across arches.  */
21 #include <linux/mm.h>
22 #include <linux/cache.h>
23 #include <asm/system.h>
24 #include <arch/icache.h>
25 
26 /* Caches are physically-indexed and so don't need special treatment */
27 #define flush_cache_all()			do { } while (0)
28 #define flush_cache_mm(mm)			do { } while (0)
29 #define flush_cache_dup_mm(mm)			do { } while (0)
30 #define flush_cache_range(vma, start, end)	do { } while (0)
31 #define flush_cache_page(vma, vmaddr, pfn)	do { } while (0)
32 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
33 #define flush_dcache_page(page)			do { } while (0)
34 #define flush_dcache_mmap_lock(mapping)		do { } while (0)
35 #define flush_dcache_mmap_unlock(mapping)	do { } while (0)
36 #define flush_cache_vmap(start, end)		do { } while (0)
37 #define flush_cache_vunmap(start, end)		do { } while (0)
38 #define flush_icache_page(vma, pg)		do { } while (0)
39 #define flush_icache_user_range(vma, pg, adr, len)	do { } while (0)
40 
41 /* Flush the icache just on this cpu */
42 extern void __flush_icache_range(unsigned long start, unsigned long end);
43 
44 /* Flush the entire icache on this cpu. */
45 #define __flush_icache() __flush_icache_range(0, CHIP_L1I_CACHE_SIZE())
46 
47 #ifdef CONFIG_SMP
48 /*
49  * When the kernel writes to its own text we need to do an SMP
50  * broadcast to make the L1I coherent everywhere.  This includes
51  * module load and single step.
52  */
53 extern void flush_icache_range(unsigned long start, unsigned long end);
54 #else
55 #define flush_icache_range __flush_icache_range
56 #endif
57 
58 /*
59  * An update to an executable user page requires icache flushing.
60  * We could carefully update only tiles that are running this process,
61  * and rely on the fact that we flush the icache on every context
62  * switch to avoid doing extra work here.  But for now, I'll be
63  * conservative and just do a global icache flush.
64  */
copy_to_user_page(struct vm_area_struct * vma,struct page * page,unsigned long vaddr,void * dst,void * src,int len)65 static inline void copy_to_user_page(struct vm_area_struct *vma,
66 				     struct page *page, unsigned long vaddr,
67 				     void *dst, void *src, int len)
68 {
69 	memcpy(dst, src, len);
70 	if (vma->vm_flags & VM_EXEC) {
71 		flush_icache_range((unsigned long) dst,
72 				   (unsigned long) dst + len);
73 	}
74 }
75 
76 #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
77 	memcpy((dst), (src), (len))
78 
79 /*
80  * Invalidate a VA range; pads to L2 cacheline boundaries.
81  *
82  * Note that on TILE64, __inv_buffer() actually flushes modified
83  * cache lines in addition to invalidating them, i.e., it's the
84  * same as __finv_buffer().
85  */
__inv_buffer(void * buffer,size_t size)86 static inline void __inv_buffer(void *buffer, size_t size)
87 {
88 	char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
89 	char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
90 	while (next < finish) {
91 		__insn_inv(next);
92 		next += CHIP_INV_STRIDE();
93 	}
94 }
95 
96 /* Flush a VA range; pads to L2 cacheline boundaries. */
__flush_buffer(void * buffer,size_t size)97 static inline void __flush_buffer(void *buffer, size_t size)
98 {
99 	char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
100 	char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
101 	while (next < finish) {
102 		__insn_flush(next);
103 		next += CHIP_FLUSH_STRIDE();
104 	}
105 }
106 
107 /* Flush & invalidate a VA range; pads to L2 cacheline boundaries. */
__finv_buffer(void * buffer,size_t size)108 static inline void __finv_buffer(void *buffer, size_t size)
109 {
110 	char *next = (char *)((long)buffer & -L2_CACHE_BYTES);
111 	char *finish = (char *)L2_CACHE_ALIGN((long)buffer + size);
112 	while (next < finish) {
113 		__insn_finv(next);
114 		next += CHIP_FINV_STRIDE();
115 	}
116 }
117 
118 
119 /* Invalidate a VA range and wait for it to be complete. */
inv_buffer(void * buffer,size_t size)120 static inline void inv_buffer(void *buffer, size_t size)
121 {
122 	__inv_buffer(buffer, size);
123 	mb();
124 }
125 
126 /*
127  * Flush a locally-homecached VA range and wait for the evicted
128  * cachelines to hit memory.
129  */
flush_buffer_local(void * buffer,size_t size)130 static inline void flush_buffer_local(void *buffer, size_t size)
131 {
132 	__flush_buffer(buffer, size);
133 	mb_incoherent();
134 }
135 
136 /*
137  * Flush and invalidate a locally-homecached VA range and wait for the
138  * evicted cachelines to hit memory.
139  */
finv_buffer_local(void * buffer,size_t size)140 static inline void finv_buffer_local(void *buffer, size_t size)
141 {
142 	__finv_buffer(buffer, size);
143 	mb_incoherent();
144 }
145 
146 /*
147  * Flush and invalidate a VA range that is homed remotely, waiting
148  * until the memory controller holds the flushed values.  If "hfh" is
149  * true, we will do a more expensive flush involving additional loads
150  * to make sure we have touched all the possible home cpus of a buffer
151  * that is homed with "hash for home".
152  */
153 void finv_buffer_remote(void *buffer, size_t size, int hfh);
154 
155 #endif /* _ASM_TILE_CACHEFLUSH_H */
156