Lines Matching refs:batch
20 struct mmu_gather_batch *batch; in tlb_next_batch() local
26 batch = tlb->active; in tlb_next_batch()
27 if (batch->next) { in tlb_next_batch()
28 tlb->active = batch->next; in tlb_next_batch()
35 batch = (void *)__get_free_page(GFP_NOWAIT); in tlb_next_batch()
36 if (!batch) in tlb_next_batch()
40 batch->next = NULL; in tlb_next_batch()
41 batch->nr = 0; in tlb_next_batch()
42 batch->max = MAX_GATHER_BATCH; in tlb_next_batch()
44 tlb->active->next = batch; in tlb_next_batch()
45 tlb->active = batch; in tlb_next_batch()
51 static void tlb_flush_rmap_batch(struct mmu_gather_batch *batch, struct vm_area_struct *vma) in tlb_flush_rmap_batch() argument
53 struct encoded_page **pages = batch->encoded_pages; in tlb_flush_rmap_batch()
55 for (int i = 0; i < batch->nr; i++) { in tlb_flush_rmap_batch()
101 static void __tlb_batch_free_encoded_pages(struct mmu_gather_batch *batch) in __tlb_batch_free_encoded_pages() argument
103 struct encoded_page **pages = batch->encoded_pages; in __tlb_batch_free_encoded_pages()
106 while (batch->nr) { in __tlb_batch_free_encoded_pages()
108 nr = min(MAX_NR_FOLIOS_PER_FREE, batch->nr); in __tlb_batch_free_encoded_pages()
126 nr < batch->nr && nr_pages < MAX_NR_FOLIOS_PER_FREE; in __tlb_batch_free_encoded_pages()
138 batch->nr -= nr; in __tlb_batch_free_encoded_pages()
146 struct mmu_gather_batch *batch; in tlb_batch_pages_flush() local
148 for (batch = &tlb->local; batch && batch->nr; batch = batch->next) in tlb_batch_pages_flush()
149 __tlb_batch_free_encoded_pages(batch); in tlb_batch_pages_flush()
155 struct mmu_gather_batch *batch, *next; in tlb_batch_list_free() local
157 for (batch = tlb->local.next; batch; batch = next) { in tlb_batch_list_free()
158 next = batch->next; in tlb_batch_list_free()
159 free_pages((unsigned long)batch, 0); in tlb_batch_list_free()
169 struct mmu_gather_batch *batch; in __tlb_remove_folio_pages_size() local
179 batch = tlb->active; in __tlb_remove_folio_pages_size()
185 batch->encoded_pages[batch->nr++] = encode_page(page, flags); in __tlb_remove_folio_pages_size()
188 batch->encoded_pages[batch->nr++] = encode_page(page, flags); in __tlb_remove_folio_pages_size()
189 batch->encoded_pages[batch->nr++] = encode_nr_pages(nr_pages); in __tlb_remove_folio_pages_size()
195 if (batch->nr >= batch->max - 1) { in __tlb_remove_folio_pages_size()
198 batch = tlb->active; in __tlb_remove_folio_pages_size()
200 VM_BUG_ON_PAGE(batch->nr > batch->max - 1, page); in __tlb_remove_folio_pages_size()
222 static void __tlb_remove_table_free(struct mmu_table_batch *batch) in __tlb_remove_table_free() argument
226 for (i = 0; i < batch->nr; i++) in __tlb_remove_table_free()
227 __tlb_remove_table(batch->tables[i]); in __tlb_remove_table_free()
229 free_page((unsigned long)batch); in __tlb_remove_table_free()
293 static void tlb_remove_table_free(struct mmu_table_batch *batch) in tlb_remove_table_free() argument
295 call_rcu(&batch->rcu, tlb_remove_table_rcu); in tlb_remove_table_free()
300 static void tlb_remove_table_free(struct mmu_table_batch *batch) in tlb_remove_table_free() argument
302 __tlb_remove_table_free(batch); in tlb_remove_table_free()
353 struct mmu_table_batch **batch = &tlb->batch; in tlb_table_flush() local
355 if (*batch) { in tlb_table_flush()
357 tlb_remove_table_free(*batch); in tlb_table_flush()
358 *batch = NULL; in tlb_table_flush()
364 struct mmu_table_batch **batch = &tlb->batch; in tlb_remove_table() local
366 if (*batch == NULL) { in tlb_remove_table()
367 *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT); in tlb_remove_table()
368 if (*batch == NULL) { in tlb_remove_table()
373 (*batch)->nr = 0; in tlb_remove_table()
376 (*batch)->tables[(*batch)->nr++] = table; in tlb_remove_table()
377 if ((*batch)->nr == MAX_TABLE_BATCH) in tlb_remove_table()
383 tlb->batch = NULL; in tlb_table_init()