1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * linux/mm/swap_state.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
7 *
8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie
9 */
10 #include <linux/mm.h>
11 #include <linux/gfp.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/mempolicy.h>
14 #include <linux/swap.h>
15 #include <linux/swapops.h>
16 #include <linux/init.h>
17 #include <linux/pagemap.h>
18 #include <linux/pagevec.h>
19 #include <linux/backing-dev.h>
20 #include <linux/blkdev.h>
21 #include <linux/migrate.h>
22 #include <linux/vmalloc.h>
23 #include <linux/huge_mm.h>
24 #include <linux/shmem_fs.h>
25 #include "internal.h"
26 #include "swap.h"
27
28 /*
29 * swapper_space is a fiction, retained to simplify the path through
30 * vmscan's shrink_folio_list.
31 */
32 static const struct address_space_operations swap_aops = {
33 .writepage = swap_writepage,
34 .dirty_folio = noop_dirty_folio,
35 #ifdef CONFIG_MIGRATION
36 .migrate_folio = migrate_folio,
37 #endif
38 };
39
40 struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
41 static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
42 static bool enable_vma_readahead __read_mostly = true;
43
44 #define SWAP_RA_ORDER_CEILING 5
45
46 #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2)
47 #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1)
48 #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK
49 #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK)
50
51 #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK)
52 #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
53 #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK)
54
55 #define SWAP_RA_VAL(addr, win, hits) \
56 (((addr) & PAGE_MASK) | \
57 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \
58 ((hits) & SWAP_RA_HITS_MASK))
59
60 /* Initial readahead hits is 4 to start up with a small window */
61 #define GET_SWAP_RA_VAL(vma) \
62 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
63
64 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
65
show_swap_cache_info(void)66 void show_swap_cache_info(void)
67 {
68 printk("%lu pages in swap cache\n", total_swapcache_pages());
69 printk("Free swap = %ldkB\n", K(get_nr_swap_pages()));
70 printk("Total swap = %lukB\n", K(total_swap_pages));
71 }
72
get_shadow_from_swap_cache(swp_entry_t entry)73 void *get_shadow_from_swap_cache(swp_entry_t entry)
74 {
75 struct address_space *address_space = swap_address_space(entry);
76 pgoff_t idx = swap_cache_index(entry);
77 void *shadow;
78
79 shadow = xa_load(&address_space->i_pages, idx);
80 if (xa_is_value(shadow))
81 return shadow;
82 return NULL;
83 }
84
85 /*
86 * add_to_swap_cache resembles filemap_add_folio on swapper_space,
87 * but sets SwapCache flag and 'swap' instead of mapping and index.
88 */
add_to_swap_cache(struct folio * folio,swp_entry_t entry,gfp_t gfp,void ** shadowp)89 int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
90 gfp_t gfp, void **shadowp)
91 {
92 struct address_space *address_space = swap_address_space(entry);
93 pgoff_t idx = swap_cache_index(entry);
94 XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
95 unsigned long i, nr = folio_nr_pages(folio);
96 void *old;
97
98 xas_set_update(&xas, workingset_update_node);
99
100 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
101 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
102 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
103
104 folio_ref_add(folio, nr);
105 folio_set_swapcache(folio);
106 folio->swap = entry;
107
108 do {
109 xas_lock_irq(&xas);
110 xas_create_range(&xas);
111 if (xas_error(&xas))
112 goto unlock;
113 for (i = 0; i < nr; i++) {
114 VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
115 if (shadowp) {
116 old = xas_load(&xas);
117 if (xa_is_value(old))
118 *shadowp = old;
119 }
120 xas_store(&xas, folio);
121 xas_next(&xas);
122 }
123 address_space->nrpages += nr;
124 __node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
125 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
126 unlock:
127 xas_unlock_irq(&xas);
128 } while (xas_nomem(&xas, gfp));
129
130 if (!xas_error(&xas))
131 return 0;
132
133 folio_clear_swapcache(folio);
134 folio_ref_sub(folio, nr);
135 return xas_error(&xas);
136 }
137
138 /*
139 * This must be called only on folios that have
140 * been verified to be in the swap cache.
141 */
__delete_from_swap_cache(struct folio * folio,swp_entry_t entry,void * shadow)142 void __delete_from_swap_cache(struct folio *folio,
143 swp_entry_t entry, void *shadow)
144 {
145 struct address_space *address_space = swap_address_space(entry);
146 int i;
147 long nr = folio_nr_pages(folio);
148 pgoff_t idx = swap_cache_index(entry);
149 XA_STATE(xas, &address_space->i_pages, idx);
150
151 xas_set_update(&xas, workingset_update_node);
152
153 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
154 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
155 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
156
157 for (i = 0; i < nr; i++) {
158 void *entry = xas_store(&xas, shadow);
159 VM_BUG_ON_PAGE(entry != folio, entry);
160 xas_next(&xas);
161 }
162 folio->swap.val = 0;
163 folio_clear_swapcache(folio);
164 address_space->nrpages -= nr;
165 __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
166 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
167 }
168
169 /*
170 * This must be called only on folios that have
171 * been verified to be in the swap cache and locked.
172 * It will never put the folio into the free list,
173 * the caller has a reference on the folio.
174 */
delete_from_swap_cache(struct folio * folio)175 void delete_from_swap_cache(struct folio *folio)
176 {
177 swp_entry_t entry = folio->swap;
178 struct address_space *address_space = swap_address_space(entry);
179
180 xa_lock_irq(&address_space->i_pages);
181 __delete_from_swap_cache(folio, entry, NULL);
182 xa_unlock_irq(&address_space->i_pages);
183
184 put_swap_folio(folio, entry);
185 folio_ref_sub(folio, folio_nr_pages(folio));
186 }
187
clear_shadow_from_swap_cache(int type,unsigned long begin,unsigned long end)188 void clear_shadow_from_swap_cache(int type, unsigned long begin,
189 unsigned long end)
190 {
191 unsigned long curr = begin;
192 void *old;
193
194 for (;;) {
195 swp_entry_t entry = swp_entry(type, curr);
196 unsigned long index = curr & SWAP_ADDRESS_SPACE_MASK;
197 struct address_space *address_space = swap_address_space(entry);
198 XA_STATE(xas, &address_space->i_pages, index);
199
200 xas_set_update(&xas, workingset_update_node);
201
202 xa_lock_irq(&address_space->i_pages);
203 xas_for_each(&xas, old, min(index + (end - curr), SWAP_ADDRESS_SPACE_PAGES)) {
204 if (!xa_is_value(old))
205 continue;
206 xas_store(&xas, NULL);
207 }
208 xa_unlock_irq(&address_space->i_pages);
209
210 /* search the next swapcache until we meet end */
211 curr = ALIGN((curr + 1), SWAP_ADDRESS_SPACE_PAGES);
212 if (curr > end)
213 break;
214 }
215 }
216
217 /*
218 * If we are the only user, then try to free up the swap cache.
219 *
220 * Its ok to check the swapcache flag without the folio lock
221 * here because we are going to recheck again inside
222 * folio_free_swap() _with_ the lock.
223 * - Marcelo
224 */
free_swap_cache(struct folio * folio)225 void free_swap_cache(struct folio *folio)
226 {
227 if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
228 folio_trylock(folio)) {
229 folio_free_swap(folio);
230 folio_unlock(folio);
231 }
232 }
233
234 /*
235 * Perform a free_page(), also freeing any swap cache associated with
236 * this page if it is the last user of the page.
237 */
free_page_and_swap_cache(struct page * page)238 void free_page_and_swap_cache(struct page *page)
239 {
240 struct folio *folio = page_folio(page);
241
242 free_swap_cache(folio);
243 if (!is_huge_zero_folio(folio))
244 folio_put(folio);
245 }
246
247 /*
248 * Passed an array of pages, drop them all from swapcache and then release
249 * them. They are removed from the LRU and freed if this is their last use.
250 */
free_pages_and_swap_cache(struct encoded_page ** pages,int nr)251 void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
252 {
253 struct folio_batch folios;
254 unsigned int refs[PAGEVEC_SIZE];
255
256 folio_batch_init(&folios);
257 for (int i = 0; i < nr; i++) {
258 struct folio *folio = page_folio(encoded_page_ptr(pages[i]));
259
260 free_swap_cache(folio);
261 refs[folios.nr] = 1;
262 if (unlikely(encoded_page_flags(pages[i]) &
263 ENCODED_PAGE_BIT_NR_PAGES_NEXT))
264 refs[folios.nr] = encoded_nr_pages(pages[++i]);
265
266 if (folio_batch_add(&folios, folio) == 0)
267 folios_put_refs(&folios, refs);
268 }
269 if (folios.nr)
270 folios_put_refs(&folios, refs);
271 }
272
swap_use_vma_readahead(void)273 static inline bool swap_use_vma_readahead(void)
274 {
275 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
276 }
277
278 /*
279 * Lookup a swap entry in the swap cache. A found folio will be returned
280 * unlocked and with its refcount incremented - we rely on the kernel
281 * lock getting page table operations atomic even if we drop the folio
282 * lock before returning.
283 *
284 * Caller must lock the swap device or hold a reference to keep it valid.
285 */
swap_cache_get_folio(swp_entry_t entry,struct vm_area_struct * vma,unsigned long addr)286 struct folio *swap_cache_get_folio(swp_entry_t entry,
287 struct vm_area_struct *vma, unsigned long addr)
288 {
289 struct folio *folio;
290
291 folio = filemap_get_folio(swap_address_space(entry), swap_cache_index(entry));
292 if (!IS_ERR(folio)) {
293 bool vma_ra = swap_use_vma_readahead();
294 bool readahead;
295
296 /*
297 * At the moment, we don't support PG_readahead for anon THP
298 * so let's bail out rather than confusing the readahead stat.
299 */
300 if (unlikely(folio_test_large(folio)))
301 return folio;
302
303 readahead = folio_test_clear_readahead(folio);
304 if (vma && vma_ra) {
305 unsigned long ra_val;
306 int win, hits;
307
308 ra_val = GET_SWAP_RA_VAL(vma);
309 win = SWAP_RA_WIN(ra_val);
310 hits = SWAP_RA_HITS(ra_val);
311 if (readahead)
312 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
313 atomic_long_set(&vma->swap_readahead_info,
314 SWAP_RA_VAL(addr, win, hits));
315 }
316
317 if (readahead) {
318 count_vm_event(SWAP_RA_HIT);
319 if (!vma || !vma_ra)
320 atomic_inc(&swapin_readahead_hits);
321 }
322 } else {
323 folio = NULL;
324 }
325
326 return folio;
327 }
328
329 /**
330 * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
331 * @mapping: The address_space to search.
332 * @index: The page cache index.
333 *
334 * This differs from filemap_get_folio() in that it will also look for the
335 * folio in the swap cache.
336 *
337 * Return: The found folio or %NULL.
338 */
filemap_get_incore_folio(struct address_space * mapping,pgoff_t index)339 struct folio *filemap_get_incore_folio(struct address_space *mapping,
340 pgoff_t index)
341 {
342 swp_entry_t swp;
343 struct swap_info_struct *si;
344 struct folio *folio = filemap_get_entry(mapping, index);
345
346 if (!folio)
347 return ERR_PTR(-ENOENT);
348 if (!xa_is_value(folio))
349 return folio;
350 if (!shmem_mapping(mapping))
351 return ERR_PTR(-ENOENT);
352
353 swp = radix_to_swp_entry(folio);
354 /* There might be swapin error entries in shmem mapping. */
355 if (non_swap_entry(swp))
356 return ERR_PTR(-ENOENT);
357 /* Prevent swapoff from happening to us */
358 si = get_swap_device(swp);
359 if (!si)
360 return ERR_PTR(-ENOENT);
361 index = swap_cache_index(swp);
362 folio = filemap_get_folio(swap_address_space(swp), index);
363 put_swap_device(si);
364 return folio;
365 }
366
__read_swap_cache_async(swp_entry_t entry,gfp_t gfp_mask,struct mempolicy * mpol,pgoff_t ilx,bool * new_page_allocated,bool skip_if_exists)367 struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
368 struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
369 bool skip_if_exists)
370 {
371 struct swap_info_struct *si = swp_swap_info(entry);
372 struct folio *folio;
373 struct folio *new_folio = NULL;
374 struct folio *result = NULL;
375 void *shadow = NULL;
376
377 *new_page_allocated = false;
378 for (;;) {
379 int err;
380 /*
381 * First check the swap cache. Since this is normally
382 * called after swap_cache_get_folio() failed, re-calling
383 * that would confuse statistics.
384 */
385 folio = filemap_get_folio(swap_address_space(entry),
386 swap_cache_index(entry));
387 if (!IS_ERR(folio))
388 goto got_folio;
389
390 /*
391 * Just skip read ahead for unused swap slot.
392 */
393 if (!swap_entry_swapped(si, entry))
394 goto put_and_return;
395
396 /*
397 * Get a new folio to read into from swap. Allocate it now if
398 * new_folio not exist, before marking swap_map SWAP_HAS_CACHE,
399 * when -EEXIST will cause any racers to loop around until we
400 * add it to cache.
401 */
402 if (!new_folio) {
403 new_folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id());
404 if (!new_folio)
405 goto put_and_return;
406 }
407
408 /*
409 * Swap entry may have been freed since our caller observed it.
410 */
411 err = swapcache_prepare(entry, 1);
412 if (!err)
413 break;
414 else if (err != -EEXIST)
415 goto put_and_return;
416
417 /*
418 * Protect against a recursive call to __read_swap_cache_async()
419 * on the same entry waiting forever here because SWAP_HAS_CACHE
420 * is set but the folio is not the swap cache yet. This can
421 * happen today if mem_cgroup_swapin_charge_folio() below
422 * triggers reclaim through zswap, which may call
423 * __read_swap_cache_async() in the writeback path.
424 */
425 if (skip_if_exists)
426 goto put_and_return;
427
428 /*
429 * We might race against __delete_from_swap_cache(), and
430 * stumble across a swap_map entry whose SWAP_HAS_CACHE
431 * has not yet been cleared. Or race against another
432 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
433 * in swap_map, but not yet added its folio to swap cache.
434 */
435 schedule_timeout_uninterruptible(1);
436 }
437
438 /*
439 * The swap entry is ours to swap in. Prepare the new folio.
440 */
441 __folio_set_locked(new_folio);
442 __folio_set_swapbacked(new_folio);
443
444 if (mem_cgroup_swapin_charge_folio(new_folio, NULL, gfp_mask, entry))
445 goto fail_unlock;
446
447 /* May fail (-ENOMEM) if XArray node allocation failed. */
448 if (add_to_swap_cache(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
449 goto fail_unlock;
450
451 memcg1_swapin(entry, 1);
452
453 if (shadow)
454 workingset_refault(new_folio, shadow);
455
456 /* Caller will initiate read into locked new_folio */
457 folio_add_lru(new_folio);
458 *new_page_allocated = true;
459 folio = new_folio;
460 got_folio:
461 result = folio;
462 goto put_and_return;
463
464 fail_unlock:
465 put_swap_folio(new_folio, entry);
466 folio_unlock(new_folio);
467 put_and_return:
468 if (!(*new_page_allocated) && new_folio)
469 folio_put(new_folio);
470 return result;
471 }
472
473 /*
474 * Locate a page of swap in physical memory, reserving swap cache space
475 * and reading the disk if it is not already cached.
476 * A failure return means that either the page allocation failed or that
477 * the swap entry is no longer in use.
478 *
479 * get/put_swap_device() aren't needed to call this function, because
480 * __read_swap_cache_async() call them and swap_read_folio() holds the
481 * swap cache folio lock.
482 */
read_swap_cache_async(swp_entry_t entry,gfp_t gfp_mask,struct vm_area_struct * vma,unsigned long addr,struct swap_iocb ** plug)483 struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
484 struct vm_area_struct *vma, unsigned long addr,
485 struct swap_iocb **plug)
486 {
487 struct swap_info_struct *si;
488 bool page_allocated;
489 struct mempolicy *mpol;
490 pgoff_t ilx;
491 struct folio *folio;
492
493 si = get_swap_device(entry);
494 if (!si)
495 return NULL;
496
497 mpol = get_vma_policy(vma, addr, 0, &ilx);
498 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
499 &page_allocated, false);
500 mpol_cond_put(mpol);
501
502 if (page_allocated)
503 swap_read_folio(folio, plug);
504
505 put_swap_device(si);
506 return folio;
507 }
508
__swapin_nr_pages(unsigned long prev_offset,unsigned long offset,int hits,int max_pages,int prev_win)509 static unsigned int __swapin_nr_pages(unsigned long prev_offset,
510 unsigned long offset,
511 int hits,
512 int max_pages,
513 int prev_win)
514 {
515 unsigned int pages, last_ra;
516
517 /*
518 * This heuristic has been found to work well on both sequential and
519 * random loads, swapping to hard disk or to SSD: please don't ask
520 * what the "+ 2" means, it just happens to work well, that's all.
521 */
522 pages = hits + 2;
523 if (pages == 2) {
524 /*
525 * We can have no readahead hits to judge by: but must not get
526 * stuck here forever, so check for an adjacent offset instead
527 * (and don't even bother to check whether swap type is same).
528 */
529 if (offset != prev_offset + 1 && offset != prev_offset - 1)
530 pages = 1;
531 } else {
532 unsigned int roundup = 4;
533 while (roundup < pages)
534 roundup <<= 1;
535 pages = roundup;
536 }
537
538 if (pages > max_pages)
539 pages = max_pages;
540
541 /* Don't shrink readahead too fast */
542 last_ra = prev_win / 2;
543 if (pages < last_ra)
544 pages = last_ra;
545
546 return pages;
547 }
548
swapin_nr_pages(unsigned long offset)549 static unsigned long swapin_nr_pages(unsigned long offset)
550 {
551 static unsigned long prev_offset;
552 unsigned int hits, pages, max_pages;
553 static atomic_t last_readahead_pages;
554
555 max_pages = 1 << READ_ONCE(page_cluster);
556 if (max_pages <= 1)
557 return 1;
558
559 hits = atomic_xchg(&swapin_readahead_hits, 0);
560 pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
561 max_pages,
562 atomic_read(&last_readahead_pages));
563 if (!hits)
564 WRITE_ONCE(prev_offset, offset);
565 atomic_set(&last_readahead_pages, pages);
566
567 return pages;
568 }
569
570 /**
571 * swap_cluster_readahead - swap in pages in hope we need them soon
572 * @entry: swap entry of this memory
573 * @gfp_mask: memory allocation flags
574 * @mpol: NUMA memory allocation policy to be applied
575 * @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
576 *
577 * Returns the struct folio for entry and addr, after queueing swapin.
578 *
579 * Primitive swap readahead code. We simply read an aligned block of
580 * (1 << page_cluster) entries in the swap area. This method is chosen
581 * because it doesn't cost us any seek time. We also make sure to queue
582 * the 'original' request together with the readahead ones...
583 *
584 * Note: it is intentional that the same NUMA policy and interleave index
585 * are used for every page of the readahead: neighbouring pages on swap
586 * are fairly likely to have been swapped out from the same node.
587 */
swap_cluster_readahead(swp_entry_t entry,gfp_t gfp_mask,struct mempolicy * mpol,pgoff_t ilx)588 struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
589 struct mempolicy *mpol, pgoff_t ilx)
590 {
591 struct folio *folio;
592 unsigned long entry_offset = swp_offset(entry);
593 unsigned long offset = entry_offset;
594 unsigned long start_offset, end_offset;
595 unsigned long mask;
596 struct swap_info_struct *si = swp_swap_info(entry);
597 struct blk_plug plug;
598 struct swap_iocb *splug = NULL;
599 bool page_allocated;
600
601 mask = swapin_nr_pages(offset) - 1;
602 if (!mask)
603 goto skip;
604
605 /* Read a page_cluster sized and aligned cluster around offset. */
606 start_offset = offset & ~mask;
607 end_offset = offset | mask;
608 if (!start_offset) /* First page is swap header. */
609 start_offset++;
610 if (end_offset >= si->max)
611 end_offset = si->max - 1;
612
613 blk_start_plug(&plug);
614 for (offset = start_offset; offset <= end_offset ; offset++) {
615 /* Ok, do the async read-ahead now */
616 folio = __read_swap_cache_async(
617 swp_entry(swp_type(entry), offset),
618 gfp_mask, mpol, ilx, &page_allocated, false);
619 if (!folio)
620 continue;
621 if (page_allocated) {
622 swap_read_folio(folio, &splug);
623 if (offset != entry_offset) {
624 folio_set_readahead(folio);
625 count_vm_event(SWAP_RA);
626 }
627 }
628 folio_put(folio);
629 }
630 blk_finish_plug(&plug);
631 swap_read_unplug(splug);
632 lru_add_drain(); /* Push any new pages onto the LRU now */
633 skip:
634 /* The page was likely read above, so no need for plugging here */
635 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
636 &page_allocated, false);
637 if (unlikely(page_allocated))
638 swap_read_folio(folio, NULL);
639 return folio;
640 }
641
init_swap_address_space(unsigned int type,unsigned long nr_pages)642 int init_swap_address_space(unsigned int type, unsigned long nr_pages)
643 {
644 struct address_space *spaces, *space;
645 unsigned int i, nr;
646
647 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
648 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
649 if (!spaces)
650 return -ENOMEM;
651 for (i = 0; i < nr; i++) {
652 space = spaces + i;
653 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
654 atomic_set(&space->i_mmap_writable, 0);
655 space->a_ops = &swap_aops;
656 /* swap cache doesn't use writeback related tags */
657 mapping_set_no_writeback_tags(space);
658 }
659 nr_swapper_spaces[type] = nr;
660 swapper_spaces[type] = spaces;
661
662 return 0;
663 }
664
exit_swap_address_space(unsigned int type)665 void exit_swap_address_space(unsigned int type)
666 {
667 int i;
668 struct address_space *spaces = swapper_spaces[type];
669
670 for (i = 0; i < nr_swapper_spaces[type]; i++)
671 VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
672 kvfree(spaces);
673 nr_swapper_spaces[type] = 0;
674 swapper_spaces[type] = NULL;
675 }
676
swap_vma_ra_win(struct vm_fault * vmf,unsigned long * start,unsigned long * end)677 static int swap_vma_ra_win(struct vm_fault *vmf, unsigned long *start,
678 unsigned long *end)
679 {
680 struct vm_area_struct *vma = vmf->vma;
681 unsigned long ra_val;
682 unsigned long faddr, prev_faddr, left, right;
683 unsigned int max_win, hits, prev_win, win;
684
685 max_win = 1 << min(READ_ONCE(page_cluster), SWAP_RA_ORDER_CEILING);
686 if (max_win == 1)
687 return 1;
688
689 faddr = vmf->address;
690 ra_val = GET_SWAP_RA_VAL(vma);
691 prev_faddr = SWAP_RA_ADDR(ra_val);
692 prev_win = SWAP_RA_WIN(ra_val);
693 hits = SWAP_RA_HITS(ra_val);
694 win = __swapin_nr_pages(PFN_DOWN(prev_faddr), PFN_DOWN(faddr), hits,
695 max_win, prev_win);
696 atomic_long_set(&vma->swap_readahead_info, SWAP_RA_VAL(faddr, win, 0));
697 if (win == 1)
698 return 1;
699
700 if (faddr == prev_faddr + PAGE_SIZE)
701 left = faddr;
702 else if (prev_faddr == faddr + PAGE_SIZE)
703 left = faddr - (win << PAGE_SHIFT) + PAGE_SIZE;
704 else
705 left = faddr - (((win - 1) / 2) << PAGE_SHIFT);
706 right = left + (win << PAGE_SHIFT);
707 if ((long)left < 0)
708 left = 0;
709 *start = max3(left, vma->vm_start, faddr & PMD_MASK);
710 *end = min3(right, vma->vm_end, (faddr & PMD_MASK) + PMD_SIZE);
711
712 return win;
713 }
714
715 /**
716 * swap_vma_readahead - swap in pages in hope we need them soon
717 * @targ_entry: swap entry of the targeted memory
718 * @gfp_mask: memory allocation flags
719 * @mpol: NUMA memory allocation policy to be applied
720 * @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
721 * @vmf: fault information
722 *
723 * Returns the struct folio for entry and addr, after queueing swapin.
724 *
725 * Primitive swap readahead code. We simply read in a few pages whose
726 * virtual addresses are around the fault address in the same vma.
727 *
728 * Caller must hold read mmap_lock if vmf->vma is not NULL.
729 *
730 */
swap_vma_readahead(swp_entry_t targ_entry,gfp_t gfp_mask,struct mempolicy * mpol,pgoff_t targ_ilx,struct vm_fault * vmf)731 static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
732 struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf)
733 {
734 struct blk_plug plug;
735 struct swap_iocb *splug = NULL;
736 struct folio *folio;
737 pte_t *pte = NULL, pentry;
738 int win;
739 unsigned long start, end, addr;
740 swp_entry_t entry;
741 pgoff_t ilx;
742 bool page_allocated;
743
744 win = swap_vma_ra_win(vmf, &start, &end);
745 if (win == 1)
746 goto skip;
747
748 ilx = targ_ilx - PFN_DOWN(vmf->address - start);
749
750 blk_start_plug(&plug);
751 for (addr = start; addr < end; ilx++, addr += PAGE_SIZE) {
752 if (!pte++) {
753 pte = pte_offset_map(vmf->pmd, addr);
754 if (!pte)
755 break;
756 }
757 pentry = ptep_get_lockless(pte);
758 if (!is_swap_pte(pentry))
759 continue;
760 entry = pte_to_swp_entry(pentry);
761 if (unlikely(non_swap_entry(entry)))
762 continue;
763 pte_unmap(pte);
764 pte = NULL;
765 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx,
766 &page_allocated, false);
767 if (!folio)
768 continue;
769 if (page_allocated) {
770 swap_read_folio(folio, &splug);
771 if (addr != vmf->address) {
772 folio_set_readahead(folio);
773 count_vm_event(SWAP_RA);
774 }
775 }
776 folio_put(folio);
777 }
778 if (pte)
779 pte_unmap(pte);
780 blk_finish_plug(&plug);
781 swap_read_unplug(splug);
782 lru_add_drain();
783 skip:
784 /* The folio was likely read above, so no need for plugging here */
785 folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx,
786 &page_allocated, false);
787 if (unlikely(page_allocated))
788 swap_read_folio(folio, NULL);
789 return folio;
790 }
791
792 /**
793 * swapin_readahead - swap in pages in hope we need them soon
794 * @entry: swap entry of this memory
795 * @gfp_mask: memory allocation flags
796 * @vmf: fault information
797 *
798 * Returns the struct folio for entry and addr, after queueing swapin.
799 *
800 * It's a main entry function for swap readahead. By the configuration,
801 * it will read ahead blocks by cluster-based(ie, physical disk based)
802 * or vma-based(ie, virtual address based on faulty address) readahead.
803 */
swapin_readahead(swp_entry_t entry,gfp_t gfp_mask,struct vm_fault * vmf)804 struct folio *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
805 struct vm_fault *vmf)
806 {
807 struct mempolicy *mpol;
808 pgoff_t ilx;
809 struct folio *folio;
810
811 mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
812 folio = swap_use_vma_readahead() ?
813 swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) :
814 swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
815 mpol_cond_put(mpol);
816
817 return folio;
818 }
819
820 #ifdef CONFIG_SYSFS
vma_ra_enabled_show(struct kobject * kobj,struct kobj_attribute * attr,char * buf)821 static ssize_t vma_ra_enabled_show(struct kobject *kobj,
822 struct kobj_attribute *attr, char *buf)
823 {
824 return sysfs_emit(buf, "%s\n", str_true_false(enable_vma_readahead));
825 }
vma_ra_enabled_store(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)826 static ssize_t vma_ra_enabled_store(struct kobject *kobj,
827 struct kobj_attribute *attr,
828 const char *buf, size_t count)
829 {
830 ssize_t ret;
831
832 ret = kstrtobool(buf, &enable_vma_readahead);
833 if (ret)
834 return ret;
835
836 return count;
837 }
838 static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
839
840 static struct attribute *swap_attrs[] = {
841 &vma_ra_enabled_attr.attr,
842 NULL,
843 };
844
845 static const struct attribute_group swap_attr_group = {
846 .attrs = swap_attrs,
847 };
848
swap_init_sysfs(void)849 static int __init swap_init_sysfs(void)
850 {
851 int err;
852 struct kobject *swap_kobj;
853
854 swap_kobj = kobject_create_and_add("swap", mm_kobj);
855 if (!swap_kobj) {
856 pr_err("failed to create swap kobject\n");
857 return -ENOMEM;
858 }
859 err = sysfs_create_group(swap_kobj, &swap_attr_group);
860 if (err) {
861 pr_err("failed to register swap group\n");
862 goto delete_obj;
863 }
864 return 0;
865
866 delete_obj:
867 kobject_put(swap_kobj);
868 return err;
869 }
870 subsys_initcall(swap_init_sysfs);
871 #endif
872