Lines Matching +full:non +full:- +full:armv7

1 /* SPDX-License-Identifier: GPL-2.0-only */
5 * Copyright (C) 1999-2002 Russell King
12 #include <asm/glue-cache.h>
17 #define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
29 * The arch/arm/mm/cache-*.S and arch/arm/mm/proc-*.S files
35 * See Documentation/core-api/cachetlb.rst for more information.
37 * effects are cache-type (VIVT/VIPT/PIPT) specific.
42 * Currently only needed for cache-v6.S and cache-v7.S, see
52 * inner shareable and invalidate the I-cache.
65 * - start - user start address (inclusive, page aligned)
66 * - end - user end address (exclusive, page aligned)
67 * - flags - vma->vm_flags field
72 * region described by start, end. If you have non-snooping
74 * - start - virtual start address
75 * - end - virtual end address
80 * region described by start, end. If you have non-snooping
82 * - start - virtual start address
83 * - end - virtual end address
88 * - kaddr - page address
89 * - size - region size
97 * - start - virtual start address
98 * - end - virtual end address
135 * These are private to the dma-mapping API. Do not use directly.
154 * These are private to the dma-mapping API. Do not use directly.
179 /* Invalidate I-cache */
184 /* Invalidate I-cache inner shareable */
190 * Optimized __flush_icache_all for the common cases. Note that UP ARMv7
227 struct mm_struct *mm = vma->vm_mm; in vivt_flush_cache_range()
231 vma->vm_flags); in vivt_flush_cache_range()
237 struct mm_struct *mm = vma->vm_mm; in vivt_flush_cache_page()
241 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); in vivt_flush_cache_page()
281 * cache page at virtual address page->virtual.
318 #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
319 #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
329 * vmalloc, ioremap etc) in kernel space for pages. On non-VIPT
330 * caches, since the direct-mappings of these pages may contain cached
353 * Memory synchronization helpers for mixed cached vs non cached accesses.
367 * adjacent non-cached writer, each state variable must be located to
428 * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
431 * - Clear the SCTLR.C bit to prevent further cache allocations
432 * - Flush the desired level of cache
433 * - Clear the ACTLR "SMP" bit to disable local coherency
438 * WARNING -- After this has been called:
440 * - No ldrex/strex (and similar) instructions must be used.
441 * - The CPU is obviously no longer coherent with the other CPUs.
442 * - This is unlikely to work as expected if Linux is running non-secure.
446 * - This is known to apply to several ARMv7 processor implementations,
449 * - The clobber list is dictated by the call to v7_flush_dcache_*.
452 * CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering
453 * trampoline are inserted by the linker and to keep sp 64-bit aligned.
457 ".arch armv7-a \n\t" \