Lines Matching +full:data +full:- +full:mapping
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1995-2002 Russell King
48 : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero) in flush_pfn_alias()
55 unsigned long offset = vaddr & (PAGE_SIZE - 1); in flush_icache_alias()
94 if (vma->vm_flags & VM_EXEC) in flush_cache_range()
110 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged()) in flush_cache_page()
145 /* VIPT non-aliasing D-cache */ in __flush_ptrace_access()
163 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) in flush_ptrace_access()
165 if (vma->vm_flags & VM_EXEC) in flush_ptrace_access()
179 * Copy user data from/to a page which is mapped into a different
199 void __flush_dcache_page(struct address_space *mapping, struct page *page) in __flush_dcache_page() argument
202 * Writeback any data associated with the kernel mapping of this in __flush_dcache_page()
203 * page. This ensures that data in the physical page is mutually in __flush_dcache_page()
204 * coherent with the kernels mapping. in __flush_dcache_page()
229 * we only need to do one flush - which would be at the relevant in __flush_dcache_page()
230 * userspace colour, which is congruent with page->index. in __flush_dcache_page()
232 if (mapping && cache_is_vipt_aliasing()) in __flush_dcache_page()
234 page->index << PAGE_SHIFT); in __flush_dcache_page()
237 static void __flush_dcache_aliases(struct address_space *mapping, struct page *page) in __flush_dcache_aliases() argument
239 struct mm_struct *mm = current->active_mm; in __flush_dcache_aliases()
245 * - VIVT cache: we need to also write back and invalidate all user in __flush_dcache_aliases()
246 * data in the current VM view associated with this page. in __flush_dcache_aliases()
247 * - aliasing VIPT: we only need to find one mapping of this page. in __flush_dcache_aliases()
249 pgoff = page->index; in __flush_dcache_aliases()
251 flush_dcache_mmap_lock(mapping); in __flush_dcache_aliases()
252 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { in __flush_dcache_aliases()
258 if (mpnt->vm_mm != mm) in __flush_dcache_aliases()
260 if (!(mpnt->vm_flags & VM_MAYSHARE)) in __flush_dcache_aliases()
262 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; in __flush_dcache_aliases()
263 flush_cache_page(mpnt, mpnt->vm_start + offset, page_to_pfn(page)); in __flush_dcache_aliases()
265 flush_dcache_mmap_unlock(mapping); in __flush_dcache_aliases()
273 struct address_space *mapping; in __sync_icache_dcache() local
276 /* only flush non-aliasing VIPT caches for exec mappings */ in __sync_icache_dcache()
284 mapping = page_mapping_file(page); in __sync_icache_dcache()
286 mapping = NULL; in __sync_icache_dcache()
288 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) in __sync_icache_dcache()
289 __flush_dcache_page(mapping, page); in __sync_icache_dcache()
297 * Ensure cache coherency between kernel mapping and userspace mapping
301 * - VIPT non-aliasing cache: fully coherent so nothing required.
302 * - VIVT: fully aliasing, so we need to handle every alias in our
304 * - VIPT aliasing: need to handle one alias in our current VM view.
317 struct address_space *mapping; in flush_dcache_page() local
327 if (test_bit(PG_dcache_clean, &page->flags)) in flush_dcache_page()
328 clear_bit(PG_dcache_clean, &page->flags); in flush_dcache_page()
332 mapping = page_mapping_file(page); in flush_dcache_page()
335 mapping && !page_mapcount(page)) in flush_dcache_page()
336 clear_bit(PG_dcache_clean, &page->flags); in flush_dcache_page()
338 __flush_dcache_page(mapping, page); in flush_dcache_page()
339 if (mapping && cache_is_vivt()) in flush_dcache_page()
340 __flush_dcache_aliases(mapping, page); in flush_dcache_page()
341 else if (mapping) in flush_dcache_page()
343 set_bit(PG_dcache_clean, &page->flags); in flush_dcache_page()
349 * Ensure cache coherency for the kernel mapping of this page. We can
353 * space mappings, this is a no-op since the page was already marked
360 struct address_space *mapping; in flush_kernel_dcache_page() local
362 mapping = page_mapping_file(page); in flush_kernel_dcache_page()
364 if (!mapping || mapping_mapped(mapping)) { in flush_kernel_dcache_page()
383 * can safely access the data. The expected sequence is:
386 * -> flush_anon_page
394 /* VIPT non-aliasing caches need do nothing */ in __flush_anon_page()
399 * Write back and invalidate userspace mapping. in __flush_anon_page()
414 * Invalidate kernel mapping. No data should be contained in __flush_anon_page()
415 * in this mapping of the page. FIXME: this is overkill in __flush_anon_page()
416 * since we actually ask for a write-back and invalidate. in __flush_anon_page()