Lines Matching full:stride
241 * __flush_tlb_range(vma, start, end, stride, last_level, tlb_level)
245 * determined by 'stride' and only affect any walk-cache entries
362 * @stride: Flush granularity
370 * entries one by one at the granularity of 'stride'. If the TLB
389 #define __flush_tlb_range_op(op, start, pages, stride, \ argument
407 __flush_start += stride; \
408 __flush_pages -= stride >> PAGE_SHIFT; \
426 #define __flush_s2_tlb_range_op(op, start, pages, stride, tlb_level) \ argument
427 __flush_tlb_range_op(op, start, pages, stride, 0, tlb_level, false, kvm_lpa2_is_enabled());
430 unsigned long end, unsigned long pages, unsigned long stride) in __flush_tlb_range_limit_excess() argument
439 (end - start) >= (MAX_DVM_OPS * stride)) || in __flush_tlb_range_limit_excess()
448 unsigned long stride, bool last_level, in __flush_tlb_range_nosync() argument
453 start = round_down(start, stride); in __flush_tlb_range_nosync()
454 end = round_up(end, stride); in __flush_tlb_range_nosync()
457 if (__flush_tlb_range_limit_excess(start, end, pages, stride)) { in __flush_tlb_range_nosync()
466 __flush_tlb_range_op(vale1is, start, pages, stride, asid, in __flush_tlb_range_nosync()
469 __flush_tlb_range_op(vae1is, start, pages, stride, asid, in __flush_tlb_range_nosync()
477 unsigned long stride, bool last_level, in __flush_tlb_range() argument
480 __flush_tlb_range_nosync(vma->vm_mm, start, end, stride, in __flush_tlb_range()
499 const unsigned long stride = PAGE_SIZE; in flush_tlb_kernel_range() local
502 start = round_down(start, stride); in flush_tlb_kernel_range()
503 end = round_up(end, stride); in flush_tlb_kernel_range()
506 if (__flush_tlb_range_limit_excess(start, end, pages, stride)) { in flush_tlb_kernel_range()
512 __flush_tlb_range_op(vaale1is, start, pages, stride, 0, in flush_tlb_kernel_range()