Lines Matching +full:active +full:- +full:semi
20 batch = tlb->active; in tlb_next_batch()
21 if (batch->next) { in tlb_next_batch()
22 tlb->active = batch->next; in tlb_next_batch()
26 if (tlb->batch_count == MAX_GATHER_BATCH_COUNT) in tlb_next_batch()
33 tlb->batch_count++; in tlb_next_batch()
34 batch->next = NULL; in tlb_next_batch()
35 batch->nr = 0; in tlb_next_batch()
36 batch->max = MAX_GATHER_BATCH; in tlb_next_batch()
38 tlb->active->next = batch; in tlb_next_batch()
39 tlb->active = batch; in tlb_next_batch()
48 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { in tlb_batch_pages_flush()
49 free_pages_and_swap_cache(batch->pages, batch->nr); in tlb_batch_pages_flush()
50 batch->nr = 0; in tlb_batch_pages_flush()
52 tlb->active = &tlb->local; in tlb_batch_pages_flush()
59 for (batch = tlb->local.next; batch; batch = next) { in tlb_batch_list_free()
60 next = batch->next; in tlb_batch_list_free()
63 tlb->local.next = NULL; in tlb_batch_list_free()
70 VM_BUG_ON(!tlb->end); in __tlb_remove_page_size()
73 VM_WARN_ON(tlb->page_size != page_size); in __tlb_remove_page_size()
76 batch = tlb->active; in __tlb_remove_page_size()
81 batch->pages[batch->nr++] = page; in __tlb_remove_page_size()
82 if (batch->nr == batch->max) { in __tlb_remove_page_size()
85 batch = tlb->active; in __tlb_remove_page_size()
87 VM_BUG_ON_PAGE(batch->nr > batch->max, page); in __tlb_remove_page_size()
100 for (i = 0; i < batch->nr; i++) in __tlb_remove_table_free()
101 __tlb_remove_table(batch->tables[i]); in __tlb_remove_table_free()
109 * Semi RCU freeing of the page directories.
113 * gup_fast() and other software pagetable walkers do a lockless page-table
145 * This isn't an RCU grace period and hence the page-tables cannot be in tlb_remove_table_sync_one()
146 * assumed to be actually RCU-freed. in tlb_remove_table_sync_one()
148 * It is however sufficient for software page-table walkers that rely on in tlb_remove_table_sync_one()
161 call_rcu(&batch->rcu, tlb_remove_table_rcu); in tlb_remove_table_free()
182 * Invalidate page-table caches used by hardware walkers. Then in tlb_table_invalidate()
183 * we still need to RCU-sched wait while freeing the pages in tlb_table_invalidate()
184 * because software walkers can still be in-flight. in tlb_table_invalidate()
198 struct mmu_table_batch **batch = &tlb->batch; in tlb_table_flush()
209 struct mmu_table_batch **batch = &tlb->batch; in tlb_remove_table()
218 (*batch)->nr = 0; in tlb_remove_table()
221 (*batch)->tables[(*batch)->nr++] = table; in tlb_remove_table()
222 if ((*batch)->nr == MAX_TABLE_BATCH) in tlb_remove_table()
228 tlb->batch = NULL; in tlb_table_init()
253 * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
256 * @start: start of the region that will be removed from the page-table
257 * @end: end of the region that will be removed from the page-table
259 * Called to initialize an (on-stack) mmu_gather structure for page-table
260 * tear-down from @mm. The @start and @end are set to 0 and -1
267 tlb->mm = mm; in tlb_gather_mmu()
270 tlb->fullmm = !(start | (end+1)); in tlb_gather_mmu()
273 tlb->need_flush_all = 0; in tlb_gather_mmu()
274 tlb->local.next = NULL; in tlb_gather_mmu()
275 tlb->local.nr = 0; in tlb_gather_mmu()
276 tlb->local.max = ARRAY_SIZE(tlb->__pages); in tlb_gather_mmu()
277 tlb->active = &tlb->local; in tlb_gather_mmu()
278 tlb->batch_count = 0; in tlb_gather_mmu()
283 tlb->page_size = 0; in tlb_gather_mmu()
287 inc_tlb_flush_pending(tlb->mm); in tlb_gather_mmu()
291 * tlb_finish_mmu - finish an mmu_gather structure
293 * @start: start of the region that will be removed from the page-table
294 * @end: end of the region that will be removed from the page-table
304 * under non-exclusive lock (e.g., mmap_lock read-side) but defer TLB in tlb_finish_mmu()
314 if (mm_tlb_flush_nested(tlb->mm)) { in tlb_finish_mmu()
320 * On x86 non-fullmm doesn't yield significant difference in tlb_finish_mmu()
323 tlb->fullmm = 1; in tlb_finish_mmu()
325 tlb->freed_tables = 1; in tlb_finish_mmu()
333 dec_tlb_flush_pending(tlb->mm); in tlb_finish_mmu()