1 #include <linux/gfp.h>
2 #include <linux/highmem.h>
3 #include <linux/kernel.h>
4 #include <linux/mmdebug.h>
5 #include <linux/mm_types.h>
6 #include <linux/mm_inline.h>
7 #include <linux/pagemap.h>
8 #include <linux/rcupdate.h>
9 #include <linux/smp.h>
10 #include <linux/swap.h>
11 #include <linux/rmap.h>
12
13 #include <asm/pgalloc.h>
14 #include <asm/tlb.h>
15
16 #ifndef CONFIG_MMU_GATHER_NO_GATHER
17
tlb_next_batch(struct mmu_gather * tlb)18 static bool tlb_next_batch(struct mmu_gather *tlb)
19 {
20 struct mmu_gather_batch *batch;
21
22 /* Limit batching if we have delayed rmaps pending */
23 if (tlb->delayed_rmap && tlb->active != &tlb->local)
24 return false;
25
26 batch = tlb->active;
27 if (batch->next) {
28 tlb->active = batch->next;
29 return true;
30 }
31
32 if (tlb->batch_count == MAX_GATHER_BATCH_COUNT)
33 return false;
34
35 batch = (void *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
36 if (!batch)
37 return false;
38
39 tlb->batch_count++;
40 batch->next = NULL;
41 batch->nr = 0;
42 batch->max = MAX_GATHER_BATCH;
43
44 tlb->active->next = batch;
45 tlb->active = batch;
46
47 return true;
48 }
49
50 #ifdef CONFIG_SMP
tlb_flush_rmap_batch(struct mmu_gather_batch * batch,struct vm_area_struct * vma)51 static void tlb_flush_rmap_batch(struct mmu_gather_batch *batch, struct vm_area_struct *vma)
52 {
53 struct encoded_page **pages = batch->encoded_pages;
54
55 for (int i = 0; i < batch->nr; i++) {
56 struct encoded_page *enc = pages[i];
57
58 if (encoded_page_flags(enc) & ENCODED_PAGE_BIT_DELAY_RMAP) {
59 struct page *page = encoded_page_ptr(enc);
60 unsigned int nr_pages = 1;
61
62 if (unlikely(encoded_page_flags(enc) &
63 ENCODED_PAGE_BIT_NR_PAGES_NEXT))
64 nr_pages = encoded_nr_pages(pages[++i]);
65
66 folio_remove_rmap_ptes(page_folio(page), page, nr_pages,
67 vma);
68 }
69 }
70 }
71
72 /**
73 * tlb_flush_rmaps - do pending rmap removals after we have flushed the TLB
74 * @tlb: the current mmu_gather
75 * @vma: The memory area from which the pages are being removed.
76 *
77 * Note that because of how tlb_next_batch() above works, we will
78 * never start multiple new batches with pending delayed rmaps, so
79 * we only need to walk through the current active batch and the
80 * original local one.
81 */
tlb_flush_rmaps(struct mmu_gather * tlb,struct vm_area_struct * vma)82 void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma)
83 {
84 if (!tlb->delayed_rmap)
85 return;
86
87 tlb_flush_rmap_batch(&tlb->local, vma);
88 if (tlb->active != &tlb->local)
89 tlb_flush_rmap_batch(tlb->active, vma);
90 tlb->delayed_rmap = 0;
91 }
92 #endif
93
94 /*
95 * We might end up freeing a lot of pages. Reschedule on a regular
96 * basis to avoid soft lockups in configurations without full
97 * preemption enabled. The magic number of 512 folios seems to work.
98 */
99 #define MAX_NR_FOLIOS_PER_FREE 512
100
__tlb_batch_free_encoded_pages(struct mmu_gather_batch * batch)101 static void __tlb_batch_free_encoded_pages(struct mmu_gather_batch *batch)
102 {
103 struct encoded_page **pages = batch->encoded_pages;
104 unsigned int nr, nr_pages;
105
106 while (batch->nr) {
107 if (!page_poisoning_enabled_static() && !want_init_on_free()) {
108 nr = min(MAX_NR_FOLIOS_PER_FREE, batch->nr);
109
110 /*
111 * Make sure we cover page + nr_pages, and don't leave
112 * nr_pages behind when capping the number of entries.
113 */
114 if (unlikely(encoded_page_flags(pages[nr - 1]) &
115 ENCODED_PAGE_BIT_NR_PAGES_NEXT))
116 nr++;
117 } else {
118 /*
119 * With page poisoning and init_on_free, the time it
120 * takes to free memory grows proportionally with the
121 * actual memory size. Therefore, limit based on the
122 * actual memory size and not the number of involved
123 * folios.
124 */
125 for (nr = 0, nr_pages = 0;
126 nr < batch->nr && nr_pages < MAX_NR_FOLIOS_PER_FREE;
127 nr++) {
128 if (unlikely(encoded_page_flags(pages[nr]) &
129 ENCODED_PAGE_BIT_NR_PAGES_NEXT))
130 nr_pages += encoded_nr_pages(pages[++nr]);
131 else
132 nr_pages++;
133 }
134 }
135
136 free_pages_and_swap_cache(pages, nr);
137 pages += nr;
138 batch->nr -= nr;
139
140 cond_resched();
141 }
142 }
143
tlb_batch_pages_flush(struct mmu_gather * tlb)144 static void tlb_batch_pages_flush(struct mmu_gather *tlb)
145 {
146 struct mmu_gather_batch *batch;
147
148 for (batch = &tlb->local; batch && batch->nr; batch = batch->next)
149 __tlb_batch_free_encoded_pages(batch);
150 tlb->active = &tlb->local;
151 }
152
tlb_batch_list_free(struct mmu_gather * tlb)153 static void tlb_batch_list_free(struct mmu_gather *tlb)
154 {
155 struct mmu_gather_batch *batch, *next;
156
157 for (batch = tlb->local.next; batch; batch = next) {
158 next = batch->next;
159 free_pages((unsigned long)batch, 0);
160 }
161 tlb->local.next = NULL;
162 }
163
__tlb_remove_folio_pages_size(struct mmu_gather * tlb,struct page * page,unsigned int nr_pages,bool delay_rmap,int page_size)164 static bool __tlb_remove_folio_pages_size(struct mmu_gather *tlb,
165 struct page *page, unsigned int nr_pages, bool delay_rmap,
166 int page_size)
167 {
168 int flags = delay_rmap ? ENCODED_PAGE_BIT_DELAY_RMAP : 0;
169 struct mmu_gather_batch *batch;
170
171 VM_BUG_ON(!tlb->end);
172
173 #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
174 VM_WARN_ON(tlb->page_size != page_size);
175 VM_WARN_ON_ONCE(nr_pages != 1 && page_size != PAGE_SIZE);
176 VM_WARN_ON_ONCE(page_folio(page) != page_folio(page + nr_pages - 1));
177 #endif
178
179 batch = tlb->active;
180 /*
181 * Add the page and check if we are full. If so
182 * force a flush.
183 */
184 if (likely(nr_pages == 1)) {
185 batch->encoded_pages[batch->nr++] = encode_page(page, flags);
186 } else {
187 flags |= ENCODED_PAGE_BIT_NR_PAGES_NEXT;
188 batch->encoded_pages[batch->nr++] = encode_page(page, flags);
189 batch->encoded_pages[batch->nr++] = encode_nr_pages(nr_pages);
190 }
191 /*
192 * Make sure that we can always add another "page" + "nr_pages",
193 * requiring two entries instead of only a single one.
194 */
195 if (batch->nr >= batch->max - 1) {
196 if (!tlb_next_batch(tlb))
197 return true;
198 batch = tlb->active;
199 }
200 VM_BUG_ON_PAGE(batch->nr > batch->max - 1, page);
201
202 return false;
203 }
204
__tlb_remove_folio_pages(struct mmu_gather * tlb,struct page * page,unsigned int nr_pages,bool delay_rmap)205 bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page,
206 unsigned int nr_pages, bool delay_rmap)
207 {
208 return __tlb_remove_folio_pages_size(tlb, page, nr_pages, delay_rmap,
209 PAGE_SIZE);
210 }
211
__tlb_remove_page_size(struct mmu_gather * tlb,struct page * page,bool delay_rmap,int page_size)212 bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
213 bool delay_rmap, int page_size)
214 {
215 return __tlb_remove_folio_pages_size(tlb, page, 1, delay_rmap, page_size);
216 }
217
218 #endif /* MMU_GATHER_NO_GATHER */
219
220 #ifdef CONFIG_MMU_GATHER_TABLE_FREE
221
__tlb_remove_table_free(struct mmu_table_batch * batch)222 static void __tlb_remove_table_free(struct mmu_table_batch *batch)
223 {
224 int i;
225
226 for (i = 0; i < batch->nr; i++)
227 __tlb_remove_table(batch->tables[i]);
228
229 free_page((unsigned long)batch);
230 }
231
232 #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
233
234 /*
235 * Semi RCU freeing of the page directories.
236 *
237 * This is needed by some architectures to implement software pagetable walkers.
238 *
239 * gup_fast() and other software pagetable walkers do a lockless page-table
240 * walk and therefore needs some synchronization with the freeing of the page
241 * directories. The chosen means to accomplish that is by disabling IRQs over
242 * the walk.
243 *
244 * Architectures that use IPIs to flush TLBs will then automagically DTRT,
245 * since we unlink the page, flush TLBs, free the page. Since the disabling of
246 * IRQs delays the completion of the TLB flush we can never observe an already
247 * freed page.
248 *
249 * Not all systems IPI every CPU for this purpose:
250 *
251 * - Some architectures have HW support for cross-CPU synchronisation of TLB
252 * flushes, so there's no IPI at all.
253 *
254 * - Paravirt guests can do this TLB flushing in the hypervisor, or coordinate
255 * with the hypervisor to defer flushing on preempted vCPUs.
256 *
257 * Such systems need to delay the freeing by some other means, this is that
258 * means.
259 *
260 * What we do is batch the freed directory pages (tables) and RCU free them.
261 * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
262 * holds off grace periods.
263 *
264 * However, in order to batch these pages we need to allocate storage, this
265 * allocation is deep inside the MM code and can thus easily fail on memory
266 * pressure. To guarantee progress we fall back to single table freeing, see
267 * the implementation of tlb_remove_table_one().
268 *
269 */
270
tlb_remove_table_smp_sync(void * arg)271 static void tlb_remove_table_smp_sync(void *arg)
272 {
273 /* Simply deliver the interrupt */
274 }
275
tlb_remove_table_sync_one(void)276 void tlb_remove_table_sync_one(void)
277 {
278 /*
279 * This isn't an RCU grace period and hence the page-tables cannot be
280 * assumed to be actually RCU-freed.
281 *
282 * It is however sufficient for software page-table walkers that rely on
283 * IRQ disabling.
284 */
285 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
286 }
287
tlb_remove_table_rcu(struct rcu_head * head)288 static void tlb_remove_table_rcu(struct rcu_head *head)
289 {
290 __tlb_remove_table_free(container_of(head, struct mmu_table_batch, rcu));
291 }
292
tlb_remove_table_free(struct mmu_table_batch * batch)293 static void tlb_remove_table_free(struct mmu_table_batch *batch)
294 {
295 call_rcu(&batch->rcu, tlb_remove_table_rcu);
296 }
297
298 #else /* !CONFIG_MMU_GATHER_RCU_TABLE_FREE */
299
tlb_remove_table_free(struct mmu_table_batch * batch)300 static void tlb_remove_table_free(struct mmu_table_batch *batch)
301 {
302 __tlb_remove_table_free(batch);
303 }
304
305 #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
306
307 /*
308 * If we want tlb_remove_table() to imply TLB invalidates.
309 */
tlb_table_invalidate(struct mmu_gather * tlb)310 static inline void tlb_table_invalidate(struct mmu_gather *tlb)
311 {
312 if (tlb_needs_table_invalidate()) {
313 /*
314 * Invalidate page-table caches used by hardware walkers. Then
315 * we still need to RCU-sched wait while freeing the pages
316 * because software walkers can still be in-flight.
317 */
318 tlb_flush_mmu_tlbonly(tlb);
319 }
320 }
321
322 #ifdef CONFIG_PT_RECLAIM
__tlb_remove_table_one_rcu(struct rcu_head * head)323 static inline void __tlb_remove_table_one_rcu(struct rcu_head *head)
324 {
325 struct ptdesc *ptdesc;
326
327 ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
328 __tlb_remove_table(ptdesc);
329 }
330
__tlb_remove_table_one(void * table)331 static inline void __tlb_remove_table_one(void *table)
332 {
333 struct ptdesc *ptdesc;
334
335 ptdesc = table;
336 call_rcu(&ptdesc->pt_rcu_head, __tlb_remove_table_one_rcu);
337 }
338 #else
__tlb_remove_table_one(void * table)339 static inline void __tlb_remove_table_one(void *table)
340 {
341 tlb_remove_table_sync_one();
342 __tlb_remove_table(table);
343 }
344 #endif /* CONFIG_PT_RECLAIM */
345
tlb_remove_table_one(void * table)346 static void tlb_remove_table_one(void *table)
347 {
348 __tlb_remove_table_one(table);
349 }
350
tlb_table_flush(struct mmu_gather * tlb)351 static void tlb_table_flush(struct mmu_gather *tlb)
352 {
353 struct mmu_table_batch **batch = &tlb->batch;
354
355 if (*batch) {
356 tlb_table_invalidate(tlb);
357 tlb_remove_table_free(*batch);
358 *batch = NULL;
359 }
360 }
361
tlb_remove_table(struct mmu_gather * tlb,void * table)362 void tlb_remove_table(struct mmu_gather *tlb, void *table)
363 {
364 struct mmu_table_batch **batch = &tlb->batch;
365
366 if (*batch == NULL) {
367 *batch = (struct mmu_table_batch *)__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
368 if (*batch == NULL) {
369 tlb_table_invalidate(tlb);
370 tlb_remove_table_one(table);
371 return;
372 }
373 (*batch)->nr = 0;
374 }
375
376 (*batch)->tables[(*batch)->nr++] = table;
377 if ((*batch)->nr == MAX_TABLE_BATCH)
378 tlb_table_flush(tlb);
379 }
380
tlb_table_init(struct mmu_gather * tlb)381 static inline void tlb_table_init(struct mmu_gather *tlb)
382 {
383 tlb->batch = NULL;
384 }
385
386 #else /* !CONFIG_MMU_GATHER_TABLE_FREE */
387
tlb_table_flush(struct mmu_gather * tlb)388 static inline void tlb_table_flush(struct mmu_gather *tlb) { }
tlb_table_init(struct mmu_gather * tlb)389 static inline void tlb_table_init(struct mmu_gather *tlb) { }
390
391 #endif /* CONFIG_MMU_GATHER_TABLE_FREE */
392
tlb_flush_mmu_free(struct mmu_gather * tlb)393 static void tlb_flush_mmu_free(struct mmu_gather *tlb)
394 {
395 tlb_table_flush(tlb);
396 #ifndef CONFIG_MMU_GATHER_NO_GATHER
397 tlb_batch_pages_flush(tlb);
398 #endif
399 }
400
tlb_flush_mmu(struct mmu_gather * tlb)401 void tlb_flush_mmu(struct mmu_gather *tlb)
402 {
403 tlb_flush_mmu_tlbonly(tlb);
404 tlb_flush_mmu_free(tlb);
405 }
406
__tlb_gather_mmu(struct mmu_gather * tlb,struct mm_struct * mm,bool fullmm)407 static void __tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm,
408 bool fullmm)
409 {
410 tlb->mm = mm;
411 tlb->fullmm = fullmm;
412
413 #ifndef CONFIG_MMU_GATHER_NO_GATHER
414 tlb->need_flush_all = 0;
415 tlb->local.next = NULL;
416 tlb->local.nr = 0;
417 tlb->local.max = ARRAY_SIZE(tlb->__pages);
418 tlb->active = &tlb->local;
419 tlb->batch_count = 0;
420 #endif
421 tlb->delayed_rmap = 0;
422
423 tlb_table_init(tlb);
424 #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
425 tlb->page_size = 0;
426 #endif
427
428 __tlb_reset_range(tlb);
429 inc_tlb_flush_pending(tlb->mm);
430 }
431
432 /**
433 * tlb_gather_mmu - initialize an mmu_gather structure for page-table tear-down
434 * @tlb: the mmu_gather structure to initialize
435 * @mm: the mm_struct of the target address space
436 *
437 * Called to initialize an (on-stack) mmu_gather structure for page-table
438 * tear-down from @mm.
439 */
tlb_gather_mmu(struct mmu_gather * tlb,struct mm_struct * mm)440 void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm)
441 {
442 __tlb_gather_mmu(tlb, mm, false);
443 }
444
445 /**
446 * tlb_gather_mmu_fullmm - initialize an mmu_gather structure for page-table tear-down
447 * @tlb: the mmu_gather structure to initialize
448 * @mm: the mm_struct of the target address space
449 *
450 * In this case, @mm is without users and we're going to destroy the
451 * full address space (exit/execve).
452 *
453 * Called to initialize an (on-stack) mmu_gather structure for page-table
454 * tear-down from @mm.
455 */
tlb_gather_mmu_fullmm(struct mmu_gather * tlb,struct mm_struct * mm)456 void tlb_gather_mmu_fullmm(struct mmu_gather *tlb, struct mm_struct *mm)
457 {
458 __tlb_gather_mmu(tlb, mm, true);
459 }
460
461 /**
462 * tlb_finish_mmu - finish an mmu_gather structure
463 * @tlb: the mmu_gather structure to finish
464 *
465 * Called at the end of the shootdown operation to free up any resources that
466 * were required.
467 */
tlb_finish_mmu(struct mmu_gather * tlb)468 void tlb_finish_mmu(struct mmu_gather *tlb)
469 {
470 /*
471 * If there are parallel threads are doing PTE changes on same range
472 * under non-exclusive lock (e.g., mmap_lock read-side) but defer TLB
473 * flush by batching, one thread may end up seeing inconsistent PTEs
474 * and result in having stale TLB entries. So flush TLB forcefully
475 * if we detect parallel PTE batching threads.
476 *
477 * However, some syscalls, e.g. munmap(), may free page tables, this
478 * needs force flush everything in the given range. Otherwise this
479 * may result in having stale TLB entries for some architectures,
480 * e.g. aarch64, that could specify flush what level TLB.
481 */
482 if (mm_tlb_flush_nested(tlb->mm)) {
483 /*
484 * The aarch64 yields better performance with fullmm by
485 * avoiding multiple CPUs spamming TLBI messages at the
486 * same time.
487 *
488 * On x86 non-fullmm doesn't yield significant difference
489 * against fullmm.
490 */
491 tlb->fullmm = 1;
492 __tlb_reset_range(tlb);
493 tlb->freed_tables = 1;
494 }
495
496 tlb_flush_mmu(tlb);
497
498 #ifndef CONFIG_MMU_GATHER_NO_GATHER
499 tlb_batch_list_free(tlb);
500 #endif
501 dec_tlb_flush_pending(tlb->mm);
502 }
503