1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/mm/swapfile.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
7 */
8
9 #include <linux/blkdev.h>
10 #include <linux/mm.h>
11 #include <linux/sched/mm.h>
12 #include <linux/sched/task.h>
13 #include <linux/hugetlb.h>
14 #include <linux/mman.h>
15 #include <linux/slab.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/swap.h>
18 #include <linux/vmalloc.h>
19 #include <linux/pagemap.h>
20 #include <linux/namei.h>
21 #include <linux/shmem_fs.h>
22 #include <linux/blk-cgroup.h>
23 #include <linux/random.h>
24 #include <linux/writeback.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/init.h>
28 #include <linux/ksm.h>
29 #include <linux/rmap.h>
30 #include <linux/security.h>
31 #include <linux/backing-dev.h>
32 #include <linux/mutex.h>
33 #include <linux/capability.h>
34 #include <linux/syscalls.h>
35 #include <linux/memcontrol.h>
36 #include <linux/poll.h>
37 #include <linux/oom.h>
38 #include <linux/swapfile.h>
39 #include <linux/export.h>
40 #include <linux/sort.h>
41 #include <linux/completion.h>
42 #include <linux/suspend.h>
43 #include <linux/zswap.h>
44 #include <linux/plist.h>
45
46 #include <asm/tlbflush.h>
47 #include <linux/swapops.h>
48 #include <linux/swap_cgroup.h>
49 #include "internal.h"
50 #include "swap.h"
51
52 static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
53 unsigned char);
54 static void free_swap_count_continuations(struct swap_info_struct *);
55 static void swap_entry_range_free(struct swap_info_struct *si,
56 struct swap_cluster_info *ci,
57 swp_entry_t entry, unsigned int nr_pages);
58 static void swap_range_alloc(struct swap_info_struct *si,
59 unsigned int nr_entries);
60 static bool folio_swapcache_freeable(struct folio *folio);
61 static struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
62 unsigned long offset);
63 static inline void unlock_cluster(struct swap_cluster_info *ci);
64
65 static DEFINE_SPINLOCK(swap_lock);
66 static unsigned int nr_swapfiles;
67 atomic_long_t nr_swap_pages;
68 /*
69 * Some modules use swappable objects and may try to swap them out under
70 * memory pressure (via the shrinker). Before doing so, they may wish to
71 * check to see if any swap space is available.
72 */
73 EXPORT_SYMBOL_GPL(nr_swap_pages);
74 /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
75 long total_swap_pages;
76 static int least_priority = -1;
77 unsigned long swapfile_maximum_size;
78 #ifdef CONFIG_MIGRATION
79 bool swap_migration_ad_supported;
80 #endif /* CONFIG_MIGRATION */
81
82 static const char Bad_file[] = "Bad swap file entry ";
83 static const char Unused_file[] = "Unused swap file entry ";
84 static const char Bad_offset[] = "Bad swap offset entry ";
85 static const char Unused_offset[] = "Unused swap offset entry ";
86
87 /*
88 * all active swap_info_structs
89 * protected with swap_lock, and ordered by priority.
90 */
91 static PLIST_HEAD(swap_active_head);
92
93 /*
94 * all available (active, not full) swap_info_structs
95 * protected with swap_avail_lock, ordered by priority.
96 * This is used by folio_alloc_swap() instead of swap_active_head
97 * because swap_active_head includes all swap_info_structs,
98 * but folio_alloc_swap() doesn't need to look at full ones.
99 * This uses its own lock instead of swap_lock because when a
100 * swap_info_struct changes between not-full/full, it needs to
101 * add/remove itself to/from this list, but the swap_info_struct->lock
102 * is held and the locking order requires swap_lock to be taken
103 * before any swap_info_struct->lock.
104 */
105 static struct plist_head *swap_avail_heads;
106 static DEFINE_SPINLOCK(swap_avail_lock);
107
108 static struct swap_info_struct *swap_info[MAX_SWAPFILES];
109
110 static DEFINE_MUTEX(swapon_mutex);
111
112 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
113 /* Activity counter to indicate that a swapon or swapoff has occurred */
114 static atomic_t proc_poll_event = ATOMIC_INIT(0);
115
116 atomic_t nr_rotate_swap = ATOMIC_INIT(0);
117
118 struct percpu_swap_cluster {
119 struct swap_info_struct *si[SWAP_NR_ORDERS];
120 unsigned long offset[SWAP_NR_ORDERS];
121 local_lock_t lock;
122 };
123
124 static DEFINE_PER_CPU(struct percpu_swap_cluster, percpu_swap_cluster) = {
125 .si = { NULL },
126 .offset = { SWAP_ENTRY_INVALID },
127 .lock = INIT_LOCAL_LOCK(),
128 };
129
swap_type_to_swap_info(int type)130 static struct swap_info_struct *swap_type_to_swap_info(int type)
131 {
132 if (type >= MAX_SWAPFILES)
133 return NULL;
134
135 return READ_ONCE(swap_info[type]); /* rcu_dereference() */
136 }
137
swap_count(unsigned char ent)138 static inline unsigned char swap_count(unsigned char ent)
139 {
140 return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */
141 }
142
143 /*
144 * Use the second highest bit of inuse_pages counter as the indicator
145 * if one swap device is on the available plist, so the atomic can
146 * still be updated arithmetically while having special data embedded.
147 *
148 * inuse_pages counter is the only thing indicating if a device should
149 * be on avail_lists or not (except swapon / swapoff). By embedding the
150 * off-list bit in the atomic counter, updates no longer need any lock
151 * to check the list status.
152 *
153 * This bit will be set if the device is not on the plist and not
154 * usable, will be cleared if the device is on the plist.
155 */
156 #define SWAP_USAGE_OFFLIST_BIT (1UL << (BITS_PER_TYPE(atomic_t) - 2))
157 #define SWAP_USAGE_COUNTER_MASK (~SWAP_USAGE_OFFLIST_BIT)
swap_usage_in_pages(struct swap_info_struct * si)158 static long swap_usage_in_pages(struct swap_info_struct *si)
159 {
160 return atomic_long_read(&si->inuse_pages) & SWAP_USAGE_COUNTER_MASK;
161 }
162
163 /* Reclaim the swap entry anyway if possible */
164 #define TTRS_ANYWAY 0x1
165 /*
166 * Reclaim the swap entry if there are no more mappings of the
167 * corresponding page
168 */
169 #define TTRS_UNMAPPED 0x2
170 /* Reclaim the swap entry if swap is getting full */
171 #define TTRS_FULL 0x4
172
swap_only_has_cache(struct swap_info_struct * si,unsigned long offset,int nr_pages)173 static bool swap_only_has_cache(struct swap_info_struct *si,
174 unsigned long offset, int nr_pages)
175 {
176 unsigned char *map = si->swap_map + offset;
177 unsigned char *map_end = map + nr_pages;
178
179 do {
180 VM_BUG_ON(!(*map & SWAP_HAS_CACHE));
181 if (*map != SWAP_HAS_CACHE)
182 return false;
183 } while (++map < map_end);
184
185 return true;
186 }
187
swap_is_last_map(struct swap_info_struct * si,unsigned long offset,int nr_pages,bool * has_cache)188 static bool swap_is_last_map(struct swap_info_struct *si,
189 unsigned long offset, int nr_pages, bool *has_cache)
190 {
191 unsigned char *map = si->swap_map + offset;
192 unsigned char *map_end = map + nr_pages;
193 unsigned char count = *map;
194
195 if (swap_count(count) != 1)
196 return false;
197
198 while (++map < map_end) {
199 if (*map != count)
200 return false;
201 }
202
203 *has_cache = !!(count & SWAP_HAS_CACHE);
204 return true;
205 }
206
207 /*
208 * returns number of pages in the folio that backs the swap entry. If positive,
209 * the folio was reclaimed. If negative, the folio was not reclaimed. If 0, no
210 * folio was associated with the swap entry.
211 */
__try_to_reclaim_swap(struct swap_info_struct * si,unsigned long offset,unsigned long flags)212 static int __try_to_reclaim_swap(struct swap_info_struct *si,
213 unsigned long offset, unsigned long flags)
214 {
215 swp_entry_t entry = swp_entry(si->type, offset);
216 struct address_space *address_space = swap_address_space(entry);
217 struct swap_cluster_info *ci;
218 struct folio *folio;
219 int ret, nr_pages;
220 bool need_reclaim;
221
222 again:
223 folio = filemap_get_folio(address_space, swap_cache_index(entry));
224 if (IS_ERR(folio))
225 return 0;
226
227 nr_pages = folio_nr_pages(folio);
228 ret = -nr_pages;
229
230 /*
231 * When this function is called from scan_swap_map_slots() and it's
232 * called by vmscan.c at reclaiming folios. So we hold a folio lock
233 * here. We have to use trylock for avoiding deadlock. This is a special
234 * case and you should use folio_free_swap() with explicit folio_lock()
235 * in usual operations.
236 */
237 if (!folio_trylock(folio))
238 goto out;
239
240 /*
241 * Offset could point to the middle of a large folio, or folio
242 * may no longer point to the expected offset before it's locked.
243 */
244 entry = folio->swap;
245 if (offset < swp_offset(entry) || offset >= swp_offset(entry) + nr_pages) {
246 folio_unlock(folio);
247 folio_put(folio);
248 goto again;
249 }
250 offset = swp_offset(entry);
251
252 need_reclaim = ((flags & TTRS_ANYWAY) ||
253 ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) ||
254 ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio)));
255 if (!need_reclaim || !folio_swapcache_freeable(folio))
256 goto out_unlock;
257
258 /*
259 * It's safe to delete the folio from swap cache only if the folio's
260 * swap_map is HAS_CACHE only, which means the slots have no page table
261 * reference or pending writeback, and can't be allocated to others.
262 */
263 ci = lock_cluster(si, offset);
264 need_reclaim = swap_only_has_cache(si, offset, nr_pages);
265 unlock_cluster(ci);
266 if (!need_reclaim)
267 goto out_unlock;
268
269 delete_from_swap_cache(folio);
270 folio_set_dirty(folio);
271 ret = nr_pages;
272 out_unlock:
273 folio_unlock(folio);
274 out:
275 folio_put(folio);
276 return ret;
277 }
278
first_se(struct swap_info_struct * sis)279 static inline struct swap_extent *first_se(struct swap_info_struct *sis)
280 {
281 struct rb_node *rb = rb_first(&sis->swap_extent_root);
282 return rb_entry(rb, struct swap_extent, rb_node);
283 }
284
next_se(struct swap_extent * se)285 static inline struct swap_extent *next_se(struct swap_extent *se)
286 {
287 struct rb_node *rb = rb_next(&se->rb_node);
288 return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL;
289 }
290
291 /*
292 * swapon tell device that all the old swap contents can be discarded,
293 * to allow the swap device to optimize its wear-levelling.
294 */
discard_swap(struct swap_info_struct * si)295 static int discard_swap(struct swap_info_struct *si)
296 {
297 struct swap_extent *se;
298 sector_t start_block;
299 sector_t nr_blocks;
300 int err = 0;
301
302 /* Do not discard the swap header page! */
303 se = first_se(si);
304 start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
305 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
306 if (nr_blocks) {
307 err = blkdev_issue_discard(si->bdev, start_block,
308 nr_blocks, GFP_KERNEL);
309 if (err)
310 return err;
311 cond_resched();
312 }
313
314 for (se = next_se(se); se; se = next_se(se)) {
315 start_block = se->start_block << (PAGE_SHIFT - 9);
316 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
317
318 err = blkdev_issue_discard(si->bdev, start_block,
319 nr_blocks, GFP_KERNEL);
320 if (err)
321 break;
322
323 cond_resched();
324 }
325 return err; /* That will often be -EOPNOTSUPP */
326 }
327
328 static struct swap_extent *
offset_to_swap_extent(struct swap_info_struct * sis,unsigned long offset)329 offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset)
330 {
331 struct swap_extent *se;
332 struct rb_node *rb;
333
334 rb = sis->swap_extent_root.rb_node;
335 while (rb) {
336 se = rb_entry(rb, struct swap_extent, rb_node);
337 if (offset < se->start_page)
338 rb = rb->rb_left;
339 else if (offset >= se->start_page + se->nr_pages)
340 rb = rb->rb_right;
341 else
342 return se;
343 }
344 /* It *must* be present */
345 BUG();
346 }
347
swap_folio_sector(struct folio * folio)348 sector_t swap_folio_sector(struct folio *folio)
349 {
350 struct swap_info_struct *sis = swp_swap_info(folio->swap);
351 struct swap_extent *se;
352 sector_t sector;
353 pgoff_t offset;
354
355 offset = swp_offset(folio->swap);
356 se = offset_to_swap_extent(sis, offset);
357 sector = se->start_block + (offset - se->start_page);
358 return sector << (PAGE_SHIFT - 9);
359 }
360
361 /*
362 * swap allocation tell device that a cluster of swap can now be discarded,
363 * to allow the swap device to optimize its wear-levelling.
364 */
discard_swap_cluster(struct swap_info_struct * si,pgoff_t start_page,pgoff_t nr_pages)365 static void discard_swap_cluster(struct swap_info_struct *si,
366 pgoff_t start_page, pgoff_t nr_pages)
367 {
368 struct swap_extent *se = offset_to_swap_extent(si, start_page);
369
370 while (nr_pages) {
371 pgoff_t offset = start_page - se->start_page;
372 sector_t start_block = se->start_block + offset;
373 sector_t nr_blocks = se->nr_pages - offset;
374
375 if (nr_blocks > nr_pages)
376 nr_blocks = nr_pages;
377 start_page += nr_blocks;
378 nr_pages -= nr_blocks;
379
380 start_block <<= PAGE_SHIFT - 9;
381 nr_blocks <<= PAGE_SHIFT - 9;
382 if (blkdev_issue_discard(si->bdev, start_block,
383 nr_blocks, GFP_NOIO))
384 break;
385
386 se = next_se(se);
387 }
388 }
389
390 #ifdef CONFIG_THP_SWAP
391 #define SWAPFILE_CLUSTER HPAGE_PMD_NR
392
393 #define swap_entry_order(order) (order)
394 #else
395 #define SWAPFILE_CLUSTER 256
396
397 /*
398 * Define swap_entry_order() as constant to let compiler to optimize
399 * out some code if !CONFIG_THP_SWAP
400 */
401 #define swap_entry_order(order) 0
402 #endif
403 #define LATENCY_LIMIT 256
404
cluster_is_empty(struct swap_cluster_info * info)405 static inline bool cluster_is_empty(struct swap_cluster_info *info)
406 {
407 return info->count == 0;
408 }
409
cluster_is_discard(struct swap_cluster_info * info)410 static inline bool cluster_is_discard(struct swap_cluster_info *info)
411 {
412 return info->flags == CLUSTER_FLAG_DISCARD;
413 }
414
cluster_is_usable(struct swap_cluster_info * ci,int order)415 static inline bool cluster_is_usable(struct swap_cluster_info *ci, int order)
416 {
417 if (unlikely(ci->flags > CLUSTER_FLAG_USABLE))
418 return false;
419 if (!order)
420 return true;
421 return cluster_is_empty(ci) || order == ci->order;
422 }
423
cluster_index(struct swap_info_struct * si,struct swap_cluster_info * ci)424 static inline unsigned int cluster_index(struct swap_info_struct *si,
425 struct swap_cluster_info *ci)
426 {
427 return ci - si->cluster_info;
428 }
429
offset_to_cluster(struct swap_info_struct * si,unsigned long offset)430 static inline struct swap_cluster_info *offset_to_cluster(struct swap_info_struct *si,
431 unsigned long offset)
432 {
433 return &si->cluster_info[offset / SWAPFILE_CLUSTER];
434 }
435
cluster_offset(struct swap_info_struct * si,struct swap_cluster_info * ci)436 static inline unsigned int cluster_offset(struct swap_info_struct *si,
437 struct swap_cluster_info *ci)
438 {
439 return cluster_index(si, ci) * SWAPFILE_CLUSTER;
440 }
441
lock_cluster(struct swap_info_struct * si,unsigned long offset)442 static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
443 unsigned long offset)
444 {
445 struct swap_cluster_info *ci;
446
447 ci = offset_to_cluster(si, offset);
448 spin_lock(&ci->lock);
449
450 return ci;
451 }
452
unlock_cluster(struct swap_cluster_info * ci)453 static inline void unlock_cluster(struct swap_cluster_info *ci)
454 {
455 spin_unlock(&ci->lock);
456 }
457
move_cluster(struct swap_info_struct * si,struct swap_cluster_info * ci,struct list_head * list,enum swap_cluster_flags new_flags)458 static void move_cluster(struct swap_info_struct *si,
459 struct swap_cluster_info *ci, struct list_head *list,
460 enum swap_cluster_flags new_flags)
461 {
462 VM_WARN_ON(ci->flags == new_flags);
463
464 BUILD_BUG_ON(1 << sizeof(ci->flags) * BITS_PER_BYTE < CLUSTER_FLAG_MAX);
465 lockdep_assert_held(&ci->lock);
466
467 spin_lock(&si->lock);
468 if (ci->flags == CLUSTER_FLAG_NONE)
469 list_add_tail(&ci->list, list);
470 else
471 list_move_tail(&ci->list, list);
472 spin_unlock(&si->lock);
473
474 if (ci->flags == CLUSTER_FLAG_FRAG)
475 atomic_long_dec(&si->frag_cluster_nr[ci->order]);
476 else if (new_flags == CLUSTER_FLAG_FRAG)
477 atomic_long_inc(&si->frag_cluster_nr[ci->order]);
478 ci->flags = new_flags;
479 }
480
481 /* Add a cluster to discard list and schedule it to do discard */
swap_cluster_schedule_discard(struct swap_info_struct * si,struct swap_cluster_info * ci)482 static void swap_cluster_schedule_discard(struct swap_info_struct *si,
483 struct swap_cluster_info *ci)
484 {
485 VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE);
486 move_cluster(si, ci, &si->discard_clusters, CLUSTER_FLAG_DISCARD);
487 schedule_work(&si->discard_work);
488 }
489
__free_cluster(struct swap_info_struct * si,struct swap_cluster_info * ci)490 static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
491 {
492 lockdep_assert_held(&ci->lock);
493 move_cluster(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE);
494 ci->order = 0;
495 }
496
497 /*
498 * Isolate and lock the first cluster that is not contented on a list,
499 * clean its flag before taken off-list. Cluster flag must be in sync
500 * with list status, so cluster updaters can always know the cluster
501 * list status without touching si lock.
502 *
503 * Note it's possible that all clusters on a list are contented so
504 * this returns NULL for an non-empty list.
505 */
isolate_lock_cluster(struct swap_info_struct * si,struct list_head * list)506 static struct swap_cluster_info *isolate_lock_cluster(
507 struct swap_info_struct *si, struct list_head *list)
508 {
509 struct swap_cluster_info *ci, *ret = NULL;
510
511 spin_lock(&si->lock);
512
513 if (unlikely(!(si->flags & SWP_WRITEOK)))
514 goto out;
515
516 list_for_each_entry(ci, list, list) {
517 if (!spin_trylock(&ci->lock))
518 continue;
519
520 /* We may only isolate and clear flags of following lists */
521 VM_BUG_ON(!ci->flags);
522 VM_BUG_ON(ci->flags > CLUSTER_FLAG_USABLE &&
523 ci->flags != CLUSTER_FLAG_FULL);
524
525 list_del(&ci->list);
526 ci->flags = CLUSTER_FLAG_NONE;
527 ret = ci;
528 break;
529 }
530 out:
531 spin_unlock(&si->lock);
532
533 return ret;
534 }
535
536 /*
537 * Doing discard actually. After a cluster discard is finished, the cluster
538 * will be added to free cluster list. Discard cluster is a bit special as
539 * they don't participate in allocation or reclaim, so clusters marked as
540 * CLUSTER_FLAG_DISCARD must remain off-list or on discard list.
541 */
swap_do_scheduled_discard(struct swap_info_struct * si)542 static bool swap_do_scheduled_discard(struct swap_info_struct *si)
543 {
544 struct swap_cluster_info *ci;
545 bool ret = false;
546 unsigned int idx;
547
548 spin_lock(&si->lock);
549 while (!list_empty(&si->discard_clusters)) {
550 ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list);
551 /*
552 * Delete the cluster from list to prepare for discard, but keep
553 * the CLUSTER_FLAG_DISCARD flag, percpu_swap_cluster could be
554 * pointing to it, or ran into by relocate_cluster.
555 */
556 list_del(&ci->list);
557 idx = cluster_index(si, ci);
558 spin_unlock(&si->lock);
559 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
560 SWAPFILE_CLUSTER);
561
562 spin_lock(&ci->lock);
563 /*
564 * Discard is done, clear its flags as it's off-list, then
565 * return the cluster to allocation list.
566 */
567 ci->flags = CLUSTER_FLAG_NONE;
568 __free_cluster(si, ci);
569 spin_unlock(&ci->lock);
570 ret = true;
571 spin_lock(&si->lock);
572 }
573 spin_unlock(&si->lock);
574 return ret;
575 }
576
swap_discard_work(struct work_struct * work)577 static void swap_discard_work(struct work_struct *work)
578 {
579 struct swap_info_struct *si;
580
581 si = container_of(work, struct swap_info_struct, discard_work);
582
583 swap_do_scheduled_discard(si);
584 }
585
swap_users_ref_free(struct percpu_ref * ref)586 static void swap_users_ref_free(struct percpu_ref *ref)
587 {
588 struct swap_info_struct *si;
589
590 si = container_of(ref, struct swap_info_struct, users);
591 complete(&si->comp);
592 }
593
594 /*
595 * Must be called after freeing if ci->count == 0, moves the cluster to free
596 * or discard list.
597 */
free_cluster(struct swap_info_struct * si,struct swap_cluster_info * ci)598 static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
599 {
600 VM_BUG_ON(ci->count != 0);
601 VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE);
602 lockdep_assert_held(&ci->lock);
603
604 /*
605 * If the swap is discardable, prepare discard the cluster
606 * instead of free it immediately. The cluster will be freed
607 * after discard.
608 */
609 if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
610 (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
611 swap_cluster_schedule_discard(si, ci);
612 return;
613 }
614
615 __free_cluster(si, ci);
616 }
617
618 /*
619 * Must be called after freeing if ci->count != 0, moves the cluster to
620 * nonfull list.
621 */
partial_free_cluster(struct swap_info_struct * si,struct swap_cluster_info * ci)622 static void partial_free_cluster(struct swap_info_struct *si,
623 struct swap_cluster_info *ci)
624 {
625 VM_BUG_ON(!ci->count || ci->count == SWAPFILE_CLUSTER);
626 lockdep_assert_held(&ci->lock);
627
628 if (ci->flags != CLUSTER_FLAG_NONFULL)
629 move_cluster(si, ci, &si->nonfull_clusters[ci->order],
630 CLUSTER_FLAG_NONFULL);
631 }
632
633 /*
634 * Must be called after allocation, moves the cluster to full or frag list.
635 * Note: allocation doesn't acquire si lock, and may drop the ci lock for
636 * reclaim, so the cluster could be any where when called.
637 */
relocate_cluster(struct swap_info_struct * si,struct swap_cluster_info * ci)638 static void relocate_cluster(struct swap_info_struct *si,
639 struct swap_cluster_info *ci)
640 {
641 lockdep_assert_held(&ci->lock);
642
643 /* Discard cluster must remain off-list or on discard list */
644 if (cluster_is_discard(ci))
645 return;
646
647 if (!ci->count) {
648 if (ci->flags != CLUSTER_FLAG_FREE)
649 free_cluster(si, ci);
650 } else if (ci->count != SWAPFILE_CLUSTER) {
651 if (ci->flags != CLUSTER_FLAG_FRAG)
652 move_cluster(si, ci, &si->frag_clusters[ci->order],
653 CLUSTER_FLAG_FRAG);
654 } else {
655 if (ci->flags != CLUSTER_FLAG_FULL)
656 move_cluster(si, ci, &si->full_clusters,
657 CLUSTER_FLAG_FULL);
658 }
659 }
660
661 /*
662 * The cluster corresponding to page_nr will be used. The cluster will not be
663 * added to free cluster list and its usage counter will be increased by 1.
664 * Only used for initialization.
665 */
inc_cluster_info_page(struct swap_info_struct * si,struct swap_cluster_info * cluster_info,unsigned long page_nr)666 static void inc_cluster_info_page(struct swap_info_struct *si,
667 struct swap_cluster_info *cluster_info, unsigned long page_nr)
668 {
669 unsigned long idx = page_nr / SWAPFILE_CLUSTER;
670 struct swap_cluster_info *ci;
671
672 ci = cluster_info + idx;
673 ci->count++;
674
675 VM_BUG_ON(ci->count > SWAPFILE_CLUSTER);
676 VM_BUG_ON(ci->flags);
677 }
678
cluster_reclaim_range(struct swap_info_struct * si,struct swap_cluster_info * ci,unsigned long start,unsigned long end)679 static bool cluster_reclaim_range(struct swap_info_struct *si,
680 struct swap_cluster_info *ci,
681 unsigned long start, unsigned long end)
682 {
683 unsigned char *map = si->swap_map;
684 unsigned long offset = start;
685 int nr_reclaim;
686
687 spin_unlock(&ci->lock);
688 do {
689 switch (READ_ONCE(map[offset])) {
690 case 0:
691 offset++;
692 break;
693 case SWAP_HAS_CACHE:
694 nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY);
695 if (nr_reclaim > 0)
696 offset += nr_reclaim;
697 else
698 goto out;
699 break;
700 default:
701 goto out;
702 }
703 } while (offset < end);
704 out:
705 spin_lock(&ci->lock);
706 /*
707 * Recheck the range no matter reclaim succeeded or not, the slot
708 * could have been be freed while we are not holding the lock.
709 */
710 for (offset = start; offset < end; offset++)
711 if (READ_ONCE(map[offset]))
712 return false;
713
714 return true;
715 }
716
cluster_scan_range(struct swap_info_struct * si,struct swap_cluster_info * ci,unsigned long start,unsigned int nr_pages,bool * need_reclaim)717 static bool cluster_scan_range(struct swap_info_struct *si,
718 struct swap_cluster_info *ci,
719 unsigned long start, unsigned int nr_pages,
720 bool *need_reclaim)
721 {
722 unsigned long offset, end = start + nr_pages;
723 unsigned char *map = si->swap_map;
724
725 if (cluster_is_empty(ci))
726 return true;
727
728 for (offset = start; offset < end; offset++) {
729 switch (READ_ONCE(map[offset])) {
730 case 0:
731 continue;
732 case SWAP_HAS_CACHE:
733 if (!vm_swap_full())
734 return false;
735 *need_reclaim = true;
736 continue;
737 default:
738 return false;
739 }
740 }
741
742 return true;
743 }
744
cluster_alloc_range(struct swap_info_struct * si,struct swap_cluster_info * ci,unsigned int start,unsigned char usage,unsigned int order)745 static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci,
746 unsigned int start, unsigned char usage,
747 unsigned int order)
748 {
749 unsigned int nr_pages = 1 << order;
750
751 lockdep_assert_held(&ci->lock);
752
753 if (!(si->flags & SWP_WRITEOK))
754 return false;
755
756 /*
757 * The first allocation in a cluster makes the
758 * cluster exclusive to this order
759 */
760 if (cluster_is_empty(ci))
761 ci->order = order;
762
763 memset(si->swap_map + start, usage, nr_pages);
764 swap_range_alloc(si, nr_pages);
765 ci->count += nr_pages;
766
767 return true;
768 }
769
770 /* Try use a new cluster for current CPU and allocate from it. */
alloc_swap_scan_cluster(struct swap_info_struct * si,struct swap_cluster_info * ci,unsigned long offset,unsigned int order,unsigned char usage)771 static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si,
772 struct swap_cluster_info *ci,
773 unsigned long offset,
774 unsigned int order,
775 unsigned char usage)
776 {
777 unsigned int next = SWAP_ENTRY_INVALID, found = SWAP_ENTRY_INVALID;
778 unsigned long start = ALIGN_DOWN(offset, SWAPFILE_CLUSTER);
779 unsigned long end = min(start + SWAPFILE_CLUSTER, si->max);
780 unsigned int nr_pages = 1 << order;
781 bool need_reclaim, ret;
782
783 lockdep_assert_held(&ci->lock);
784
785 if (end < nr_pages || ci->count + nr_pages > SWAPFILE_CLUSTER)
786 goto out;
787
788 for (end -= nr_pages; offset <= end; offset += nr_pages) {
789 need_reclaim = false;
790 if (!cluster_scan_range(si, ci, offset, nr_pages, &need_reclaim))
791 continue;
792 if (need_reclaim) {
793 ret = cluster_reclaim_range(si, ci, offset, offset + nr_pages);
794 /*
795 * Reclaim drops ci->lock and cluster could be used
796 * by another order. Not checking flag as off-list
797 * cluster has no flag set, and change of list
798 * won't cause fragmentation.
799 */
800 if (!cluster_is_usable(ci, order))
801 goto out;
802 if (cluster_is_empty(ci))
803 offset = start;
804 /* Reclaim failed but cluster is usable, try next */
805 if (!ret)
806 continue;
807 }
808 if (!cluster_alloc_range(si, ci, offset, usage, order))
809 break;
810 found = offset;
811 offset += nr_pages;
812 if (ci->count < SWAPFILE_CLUSTER && offset <= end)
813 next = offset;
814 break;
815 }
816 out:
817 relocate_cluster(si, ci);
818 unlock_cluster(ci);
819 if (si->flags & SWP_SOLIDSTATE) {
820 this_cpu_write(percpu_swap_cluster.offset[order], next);
821 this_cpu_write(percpu_swap_cluster.si[order], si);
822 } else {
823 si->global_cluster->next[order] = next;
824 }
825 return found;
826 }
827
swap_reclaim_full_clusters(struct swap_info_struct * si,bool force)828 static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force)
829 {
830 long to_scan = 1;
831 unsigned long offset, end;
832 struct swap_cluster_info *ci;
833 unsigned char *map = si->swap_map;
834 int nr_reclaim;
835
836 if (force)
837 to_scan = swap_usage_in_pages(si) / SWAPFILE_CLUSTER;
838
839 while ((ci = isolate_lock_cluster(si, &si->full_clusters))) {
840 offset = cluster_offset(si, ci);
841 end = min(si->max, offset + SWAPFILE_CLUSTER);
842 to_scan--;
843
844 while (offset < end) {
845 if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) {
846 spin_unlock(&ci->lock);
847 nr_reclaim = __try_to_reclaim_swap(si, offset,
848 TTRS_ANYWAY);
849 spin_lock(&ci->lock);
850 if (nr_reclaim) {
851 offset += abs(nr_reclaim);
852 continue;
853 }
854 }
855 offset++;
856 }
857
858 /* in case no swap cache is reclaimed */
859 if (ci->flags == CLUSTER_FLAG_NONE)
860 relocate_cluster(si, ci);
861
862 unlock_cluster(ci);
863 if (to_scan <= 0)
864 break;
865 }
866 }
867
swap_reclaim_work(struct work_struct * work)868 static void swap_reclaim_work(struct work_struct *work)
869 {
870 struct swap_info_struct *si;
871
872 si = container_of(work, struct swap_info_struct, reclaim_work);
873
874 swap_reclaim_full_clusters(si, true);
875 }
876
877 /*
878 * Try to allocate swap entries with specified order and try set a new
879 * cluster for current CPU too.
880 */
cluster_alloc_swap_entry(struct swap_info_struct * si,int order,unsigned char usage)881 static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int order,
882 unsigned char usage)
883 {
884 struct swap_cluster_info *ci;
885 unsigned int offset = SWAP_ENTRY_INVALID, found = SWAP_ENTRY_INVALID;
886
887 /*
888 * Swapfile is not block device so unable
889 * to allocate large entries.
890 */
891 if (order && !(si->flags & SWP_BLKDEV))
892 return 0;
893
894 if (!(si->flags & SWP_SOLIDSTATE)) {
895 /* Serialize HDD SWAP allocation for each device. */
896 spin_lock(&si->global_cluster_lock);
897 offset = si->global_cluster->next[order];
898 if (offset == SWAP_ENTRY_INVALID)
899 goto new_cluster;
900
901 ci = lock_cluster(si, offset);
902 /* Cluster could have been used by another order */
903 if (cluster_is_usable(ci, order)) {
904 if (cluster_is_empty(ci))
905 offset = cluster_offset(si, ci);
906 found = alloc_swap_scan_cluster(si, ci, offset,
907 order, usage);
908 } else {
909 unlock_cluster(ci);
910 }
911 if (found)
912 goto done;
913 }
914
915 new_cluster:
916 ci = isolate_lock_cluster(si, &si->free_clusters);
917 if (ci) {
918 found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
919 order, usage);
920 if (found)
921 goto done;
922 }
923
924 /* Try reclaim from full clusters if free clusters list is drained */
925 if (vm_swap_full())
926 swap_reclaim_full_clusters(si, false);
927
928 if (order < PMD_ORDER) {
929 unsigned int frags = 0, frags_existing;
930
931 while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[order]))) {
932 found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
933 order, usage);
934 if (found)
935 goto done;
936 /* Clusters failed to allocate are moved to frag_clusters */
937 frags++;
938 }
939
940 frags_existing = atomic_long_read(&si->frag_cluster_nr[order]);
941 while (frags < frags_existing &&
942 (ci = isolate_lock_cluster(si, &si->frag_clusters[order]))) {
943 atomic_long_dec(&si->frag_cluster_nr[order]);
944 /*
945 * Rotate the frag list to iterate, they were all
946 * failing high order allocation or moved here due to
947 * per-CPU usage, but they could contain newly released
948 * reclaimable (eg. lazy-freed swap cache) slots.
949 */
950 found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
951 order, usage);
952 if (found)
953 goto done;
954 frags++;
955 }
956 }
957
958 /*
959 * We don't have free cluster but have some clusters in
960 * discarding, do discard now and reclaim them, then
961 * reread cluster_next_cpu since we dropped si->lock
962 */
963 if ((si->flags & SWP_PAGE_DISCARD) && swap_do_scheduled_discard(si))
964 goto new_cluster;
965
966 if (order)
967 goto done;
968
969 /* Order 0 stealing from higher order */
970 for (int o = 1; o < SWAP_NR_ORDERS; o++) {
971 /*
972 * Clusters here have at least one usable slots and can't fail order 0
973 * allocation, but reclaim may drop si->lock and race with another user.
974 */
975 while ((ci = isolate_lock_cluster(si, &si->frag_clusters[o]))) {
976 atomic_long_dec(&si->frag_cluster_nr[o]);
977 found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
978 0, usage);
979 if (found)
980 goto done;
981 }
982
983 while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[o]))) {
984 found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
985 0, usage);
986 if (found)
987 goto done;
988 }
989 }
990 done:
991 if (!(si->flags & SWP_SOLIDSTATE))
992 spin_unlock(&si->global_cluster_lock);
993 return found;
994 }
995
996 /* SWAP_USAGE_OFFLIST_BIT can only be set by this helper. */
del_from_avail_list(struct swap_info_struct * si,bool swapoff)997 static void del_from_avail_list(struct swap_info_struct *si, bool swapoff)
998 {
999 int nid;
1000 unsigned long pages;
1001
1002 spin_lock(&swap_avail_lock);
1003
1004 if (swapoff) {
1005 /*
1006 * Forcefully remove it. Clear the SWP_WRITEOK flags for
1007 * swapoff here so it's synchronized by both si->lock and
1008 * swap_avail_lock, to ensure the result can be seen by
1009 * add_to_avail_list.
1010 */
1011 lockdep_assert_held(&si->lock);
1012 si->flags &= ~SWP_WRITEOK;
1013 atomic_long_or(SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages);
1014 } else {
1015 /*
1016 * If not called by swapoff, take it off-list only if it's
1017 * full and SWAP_USAGE_OFFLIST_BIT is not set (strictly
1018 * si->inuse_pages == pages), any concurrent slot freeing,
1019 * or device already removed from plist by someone else
1020 * will make this return false.
1021 */
1022 pages = si->pages;
1023 if (!atomic_long_try_cmpxchg(&si->inuse_pages, &pages,
1024 pages | SWAP_USAGE_OFFLIST_BIT))
1025 goto skip;
1026 }
1027
1028 for_each_node(nid)
1029 plist_del(&si->avail_lists[nid], &swap_avail_heads[nid]);
1030
1031 skip:
1032 spin_unlock(&swap_avail_lock);
1033 }
1034
1035 /* SWAP_USAGE_OFFLIST_BIT can only be cleared by this helper. */
add_to_avail_list(struct swap_info_struct * si,bool swapon)1036 static void add_to_avail_list(struct swap_info_struct *si, bool swapon)
1037 {
1038 int nid;
1039 long val;
1040 unsigned long pages;
1041
1042 spin_lock(&swap_avail_lock);
1043
1044 /* Corresponding to SWP_WRITEOK clearing in del_from_avail_list */
1045 if (swapon) {
1046 lockdep_assert_held(&si->lock);
1047 si->flags |= SWP_WRITEOK;
1048 } else {
1049 if (!(READ_ONCE(si->flags) & SWP_WRITEOK))
1050 goto skip;
1051 }
1052
1053 if (!(atomic_long_read(&si->inuse_pages) & SWAP_USAGE_OFFLIST_BIT))
1054 goto skip;
1055
1056 val = atomic_long_fetch_and_relaxed(~SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages);
1057
1058 /*
1059 * When device is full and device is on the plist, only one updater will
1060 * see (inuse_pages == si->pages) and will call del_from_avail_list. If
1061 * that updater happen to be here, just skip adding.
1062 */
1063 pages = si->pages;
1064 if (val == pages) {
1065 /* Just like the cmpxchg in del_from_avail_list */
1066 if (atomic_long_try_cmpxchg(&si->inuse_pages, &pages,
1067 pages | SWAP_USAGE_OFFLIST_BIT))
1068 goto skip;
1069 }
1070
1071 for_each_node(nid)
1072 plist_add(&si->avail_lists[nid], &swap_avail_heads[nid]);
1073
1074 skip:
1075 spin_unlock(&swap_avail_lock);
1076 }
1077
1078 /*
1079 * swap_usage_add / swap_usage_sub of each slot are serialized by ci->lock
1080 * within each cluster, so the total contribution to the global counter should
1081 * always be positive and cannot exceed the total number of usable slots.
1082 */
swap_usage_add(struct swap_info_struct * si,unsigned int nr_entries)1083 static bool swap_usage_add(struct swap_info_struct *si, unsigned int nr_entries)
1084 {
1085 long val = atomic_long_add_return_relaxed(nr_entries, &si->inuse_pages);
1086
1087 /*
1088 * If device is full, and SWAP_USAGE_OFFLIST_BIT is not set,
1089 * remove it from the plist.
1090 */
1091 if (unlikely(val == si->pages)) {
1092 del_from_avail_list(si, false);
1093 return true;
1094 }
1095
1096 return false;
1097 }
1098
swap_usage_sub(struct swap_info_struct * si,unsigned int nr_entries)1099 static void swap_usage_sub(struct swap_info_struct *si, unsigned int nr_entries)
1100 {
1101 long val = atomic_long_sub_return_relaxed(nr_entries, &si->inuse_pages);
1102
1103 /*
1104 * If device is not full, and SWAP_USAGE_OFFLIST_BIT is set,
1105 * add it to the plist.
1106 */
1107 if (unlikely(val & SWAP_USAGE_OFFLIST_BIT))
1108 add_to_avail_list(si, false);
1109 }
1110
swap_range_alloc(struct swap_info_struct * si,unsigned int nr_entries)1111 static void swap_range_alloc(struct swap_info_struct *si,
1112 unsigned int nr_entries)
1113 {
1114 if (swap_usage_add(si, nr_entries)) {
1115 if (vm_swap_full())
1116 schedule_work(&si->reclaim_work);
1117 }
1118 }
1119
swap_range_free(struct swap_info_struct * si,unsigned long offset,unsigned int nr_entries)1120 static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
1121 unsigned int nr_entries)
1122 {
1123 unsigned long begin = offset;
1124 unsigned long end = offset + nr_entries - 1;
1125 void (*swap_slot_free_notify)(struct block_device *, unsigned long);
1126 unsigned int i;
1127
1128 /*
1129 * Use atomic clear_bit operations only on zeromap instead of non-atomic
1130 * bitmap_clear to prevent adjacent bits corruption due to simultaneous writes.
1131 */
1132 for (i = 0; i < nr_entries; i++) {
1133 clear_bit(offset + i, si->zeromap);
1134 zswap_invalidate(swp_entry(si->type, offset + i));
1135 }
1136
1137 if (si->flags & SWP_BLKDEV)
1138 swap_slot_free_notify =
1139 si->bdev->bd_disk->fops->swap_slot_free_notify;
1140 else
1141 swap_slot_free_notify = NULL;
1142 while (offset <= end) {
1143 arch_swap_invalidate_page(si->type, offset);
1144 if (swap_slot_free_notify)
1145 swap_slot_free_notify(si->bdev, offset);
1146 offset++;
1147 }
1148 clear_shadow_from_swap_cache(si->type, begin, end);
1149
1150 /*
1151 * Make sure that try_to_unuse() observes si->inuse_pages reaching 0
1152 * only after the above cleanups are done.
1153 */
1154 smp_wmb();
1155 atomic_long_add(nr_entries, &nr_swap_pages);
1156 swap_usage_sub(si, nr_entries);
1157 }
1158
get_swap_device_info(struct swap_info_struct * si)1159 static bool get_swap_device_info(struct swap_info_struct *si)
1160 {
1161 if (!percpu_ref_tryget_live(&si->users))
1162 return false;
1163 /*
1164 * Guarantee the si->users are checked before accessing other
1165 * fields of swap_info_struct, and si->flags (SWP_WRITEOK) is
1166 * up to dated.
1167 *
1168 * Paired with the spin_unlock() after setup_swap_info() in
1169 * enable_swap_info(), and smp_wmb() in swapoff.
1170 */
1171 smp_rmb();
1172 return true;
1173 }
1174
1175 /*
1176 * Fast path try to get swap entries with specified order from current
1177 * CPU's swap entry pool (a cluster).
1178 */
swap_alloc_fast(swp_entry_t * entry,int order)1179 static bool swap_alloc_fast(swp_entry_t *entry,
1180 int order)
1181 {
1182 struct swap_cluster_info *ci;
1183 struct swap_info_struct *si;
1184 unsigned int offset, found = SWAP_ENTRY_INVALID;
1185
1186 /*
1187 * Once allocated, swap_info_struct will never be completely freed,
1188 * so checking it's liveness by get_swap_device_info is enough.
1189 */
1190 si = this_cpu_read(percpu_swap_cluster.si[order]);
1191 offset = this_cpu_read(percpu_swap_cluster.offset[order]);
1192 if (!si || !offset || !get_swap_device_info(si))
1193 return false;
1194
1195 ci = lock_cluster(si, offset);
1196 if (cluster_is_usable(ci, order)) {
1197 if (cluster_is_empty(ci))
1198 offset = cluster_offset(si, ci);
1199 found = alloc_swap_scan_cluster(si, ci, offset, order, SWAP_HAS_CACHE);
1200 if (found)
1201 *entry = swp_entry(si->type, found);
1202 } else {
1203 unlock_cluster(ci);
1204 }
1205
1206 put_swap_device(si);
1207 return !!found;
1208 }
1209
1210 /* Rotate the device and switch to a new cluster */
swap_alloc_slow(swp_entry_t * entry,int order)1211 static bool swap_alloc_slow(swp_entry_t *entry,
1212 int order)
1213 {
1214 int node;
1215 unsigned long offset;
1216 struct swap_info_struct *si, *next;
1217
1218 node = numa_node_id();
1219 spin_lock(&swap_avail_lock);
1220 start_over:
1221 plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
1222 /* Rotate the device and switch to a new cluster */
1223 plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
1224 spin_unlock(&swap_avail_lock);
1225 if (get_swap_device_info(si)) {
1226 offset = cluster_alloc_swap_entry(si, order, SWAP_HAS_CACHE);
1227 put_swap_device(si);
1228 if (offset) {
1229 *entry = swp_entry(si->type, offset);
1230 return true;
1231 }
1232 if (order)
1233 return false;
1234 }
1235
1236 spin_lock(&swap_avail_lock);
1237 /*
1238 * if we got here, it's likely that si was almost full before,
1239 * and since scan_swap_map_slots() can drop the si->lock,
1240 * multiple callers probably all tried to get a page from the
1241 * same si and it filled up before we could get one; or, the si
1242 * filled up between us dropping swap_avail_lock and taking
1243 * si->lock. Since we dropped the swap_avail_lock, the
1244 * swap_avail_head list may have been modified; so if next is
1245 * still in the swap_avail_head list then try it, otherwise
1246 * start over if we have not gotten any slots.
1247 */
1248 if (plist_node_empty(&next->avail_lists[node]))
1249 goto start_over;
1250 }
1251 spin_unlock(&swap_avail_lock);
1252 return false;
1253 }
1254
1255 /**
1256 * folio_alloc_swap - allocate swap space for a folio
1257 * @folio: folio we want to move to swap
1258 * @gfp: gfp mask for shadow nodes
1259 *
1260 * Allocate swap space for the folio and add the folio to the
1261 * swap cache.
1262 *
1263 * Context: Caller needs to hold the folio lock.
1264 * Return: Whether the folio was added to the swap cache.
1265 */
folio_alloc_swap(struct folio * folio,gfp_t gfp)1266 int folio_alloc_swap(struct folio *folio, gfp_t gfp)
1267 {
1268 unsigned int order = folio_order(folio);
1269 unsigned int size = 1 << order;
1270 swp_entry_t entry = {};
1271
1272 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1273 VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
1274
1275 if (order) {
1276 /*
1277 * Reject large allocation when THP_SWAP is disabled,
1278 * the caller should split the folio and try again.
1279 */
1280 if (!IS_ENABLED(CONFIG_THP_SWAP))
1281 return -EAGAIN;
1282
1283 /*
1284 * Allocation size should never exceed cluster size
1285 * (HPAGE_PMD_SIZE).
1286 */
1287 if (size > SWAPFILE_CLUSTER) {
1288 VM_WARN_ON_ONCE(1);
1289 return -EINVAL;
1290 }
1291 }
1292
1293 local_lock(&percpu_swap_cluster.lock);
1294 if (!swap_alloc_fast(&entry, order))
1295 swap_alloc_slow(&entry, order);
1296 local_unlock(&percpu_swap_cluster.lock);
1297
1298 /* Need to call this even if allocation failed, for MEMCG_SWAP_FAIL. */
1299 if (mem_cgroup_try_charge_swap(folio, entry))
1300 goto out_free;
1301
1302 if (!entry.val)
1303 return -ENOMEM;
1304
1305 /*
1306 * XArray node allocations from PF_MEMALLOC contexts could
1307 * completely exhaust the page allocator. __GFP_NOMEMALLOC
1308 * stops emergency reserves from being allocated.
1309 *
1310 * TODO: this could cause a theoretical memory reclaim
1311 * deadlock in the swap out path.
1312 */
1313 if (add_to_swap_cache(folio, entry, gfp | __GFP_NOMEMALLOC, NULL))
1314 goto out_free;
1315
1316 atomic_long_sub(size, &nr_swap_pages);
1317 return 0;
1318
1319 out_free:
1320 put_swap_folio(folio, entry);
1321 return -ENOMEM;
1322 }
1323
_swap_info_get(swp_entry_t entry)1324 static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
1325 {
1326 struct swap_info_struct *si;
1327 unsigned long offset;
1328
1329 if (!entry.val)
1330 goto out;
1331 si = swp_swap_info(entry);
1332 if (!si)
1333 goto bad_nofile;
1334 if (data_race(!(si->flags & SWP_USED)))
1335 goto bad_device;
1336 offset = swp_offset(entry);
1337 if (offset >= si->max)
1338 goto bad_offset;
1339 if (data_race(!si->swap_map[swp_offset(entry)]))
1340 goto bad_free;
1341 return si;
1342
1343 bad_free:
1344 pr_err("%s: %s%08lx\n", __func__, Unused_offset, entry.val);
1345 goto out;
1346 bad_offset:
1347 pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
1348 goto out;
1349 bad_device:
1350 pr_err("%s: %s%08lx\n", __func__, Unused_file, entry.val);
1351 goto out;
1352 bad_nofile:
1353 pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1354 out:
1355 return NULL;
1356 }
1357
__swap_entry_free_locked(struct swap_info_struct * si,unsigned long offset,unsigned char usage)1358 static unsigned char __swap_entry_free_locked(struct swap_info_struct *si,
1359 unsigned long offset,
1360 unsigned char usage)
1361 {
1362 unsigned char count;
1363 unsigned char has_cache;
1364
1365 count = si->swap_map[offset];
1366
1367 has_cache = count & SWAP_HAS_CACHE;
1368 count &= ~SWAP_HAS_CACHE;
1369
1370 if (usage == SWAP_HAS_CACHE) {
1371 VM_BUG_ON(!has_cache);
1372 has_cache = 0;
1373 } else if (count == SWAP_MAP_SHMEM) {
1374 /*
1375 * Or we could insist on shmem.c using a special
1376 * swap_shmem_free() and free_shmem_swap_and_cache()...
1377 */
1378 count = 0;
1379 } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
1380 if (count == COUNT_CONTINUED) {
1381 if (swap_count_continued(si, offset, count))
1382 count = SWAP_MAP_MAX | COUNT_CONTINUED;
1383 else
1384 count = SWAP_MAP_MAX;
1385 } else
1386 count--;
1387 }
1388
1389 usage = count | has_cache;
1390 if (usage)
1391 WRITE_ONCE(si->swap_map[offset], usage);
1392 else
1393 WRITE_ONCE(si->swap_map[offset], SWAP_HAS_CACHE);
1394
1395 return usage;
1396 }
1397
1398 /*
1399 * When we get a swap entry, if there aren't some other ways to
1400 * prevent swapoff, such as the folio in swap cache is locked, RCU
1401 * reader side is locked, etc., the swap entry may become invalid
1402 * because of swapoff. Then, we need to enclose all swap related
1403 * functions with get_swap_device() and put_swap_device(), unless the
1404 * swap functions call get/put_swap_device() by themselves.
1405 *
1406 * RCU reader side lock (including any spinlock) is sufficient to
1407 * prevent swapoff, because synchronize_rcu() is called in swapoff()
1408 * before freeing data structures.
1409 *
1410 * Check whether swap entry is valid in the swap device. If so,
1411 * return pointer to swap_info_struct, and keep the swap entry valid
1412 * via preventing the swap device from being swapoff, until
1413 * put_swap_device() is called. Otherwise return NULL.
1414 *
1415 * Notice that swapoff or swapoff+swapon can still happen before the
1416 * percpu_ref_tryget_live() in get_swap_device() or after the
1417 * percpu_ref_put() in put_swap_device() if there isn't any other way
1418 * to prevent swapoff. The caller must be prepared for that. For
1419 * example, the following situation is possible.
1420 *
1421 * CPU1 CPU2
1422 * do_swap_page()
1423 * ... swapoff+swapon
1424 * __read_swap_cache_async()
1425 * swapcache_prepare()
1426 * __swap_duplicate()
1427 * // check swap_map
1428 * // verify PTE not changed
1429 *
1430 * In __swap_duplicate(), the swap_map need to be checked before
1431 * changing partly because the specified swap entry may be for another
1432 * swap device which has been swapoff. And in do_swap_page(), after
1433 * the page is read from the swap device, the PTE is verified not
1434 * changed with the page table locked to check whether the swap device
1435 * has been swapoff or swapoff+swapon.
1436 */
get_swap_device(swp_entry_t entry)1437 struct swap_info_struct *get_swap_device(swp_entry_t entry)
1438 {
1439 struct swap_info_struct *si;
1440 unsigned long offset;
1441
1442 if (!entry.val)
1443 goto out;
1444 si = swp_swap_info(entry);
1445 if (!si)
1446 goto bad_nofile;
1447 if (!get_swap_device_info(si))
1448 goto out;
1449 offset = swp_offset(entry);
1450 if (offset >= si->max)
1451 goto put_out;
1452
1453 return si;
1454 bad_nofile:
1455 pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1456 out:
1457 return NULL;
1458 put_out:
1459 pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
1460 percpu_ref_put(&si->users);
1461 return NULL;
1462 }
1463
__swap_entry_free(struct swap_info_struct * si,swp_entry_t entry)1464 static unsigned char __swap_entry_free(struct swap_info_struct *si,
1465 swp_entry_t entry)
1466 {
1467 struct swap_cluster_info *ci;
1468 unsigned long offset = swp_offset(entry);
1469 unsigned char usage;
1470
1471 ci = lock_cluster(si, offset);
1472 usage = __swap_entry_free_locked(si, offset, 1);
1473 if (!usage)
1474 swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
1475 unlock_cluster(ci);
1476
1477 return usage;
1478 }
1479
__swap_entries_free(struct swap_info_struct * si,swp_entry_t entry,int nr)1480 static bool __swap_entries_free(struct swap_info_struct *si,
1481 swp_entry_t entry, int nr)
1482 {
1483 unsigned long offset = swp_offset(entry);
1484 unsigned int type = swp_type(entry);
1485 struct swap_cluster_info *ci;
1486 bool has_cache = false;
1487 unsigned char count;
1488 int i;
1489
1490 if (nr <= 1 || swap_count(data_race(si->swap_map[offset])) != 1)
1491 goto fallback;
1492 /* cross into another cluster */
1493 if (nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER)
1494 goto fallback;
1495
1496 ci = lock_cluster(si, offset);
1497 if (!swap_is_last_map(si, offset, nr, &has_cache)) {
1498 unlock_cluster(ci);
1499 goto fallback;
1500 }
1501 for (i = 0; i < nr; i++)
1502 WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE);
1503 if (!has_cache)
1504 swap_entry_range_free(si, ci, entry, nr);
1505 unlock_cluster(ci);
1506
1507 return has_cache;
1508
1509 fallback:
1510 for (i = 0; i < nr; i++) {
1511 if (data_race(si->swap_map[offset + i])) {
1512 count = __swap_entry_free(si, swp_entry(type, offset + i));
1513 if (count == SWAP_HAS_CACHE)
1514 has_cache = true;
1515 } else {
1516 WARN_ON_ONCE(1);
1517 }
1518 }
1519 return has_cache;
1520 }
1521
1522 /*
1523 * Drop the last HAS_CACHE flag of swap entries, caller have to
1524 * ensure all entries belong to the same cgroup.
1525 */
swap_entry_range_free(struct swap_info_struct * si,struct swap_cluster_info * ci,swp_entry_t entry,unsigned int nr_pages)1526 static void swap_entry_range_free(struct swap_info_struct *si,
1527 struct swap_cluster_info *ci,
1528 swp_entry_t entry, unsigned int nr_pages)
1529 {
1530 unsigned long offset = swp_offset(entry);
1531 unsigned char *map = si->swap_map + offset;
1532 unsigned char *map_end = map + nr_pages;
1533
1534 /* It should never free entries across different clusters */
1535 VM_BUG_ON(ci != offset_to_cluster(si, offset + nr_pages - 1));
1536 VM_BUG_ON(cluster_is_empty(ci));
1537 VM_BUG_ON(ci->count < nr_pages);
1538
1539 ci->count -= nr_pages;
1540 do {
1541 VM_BUG_ON(*map != SWAP_HAS_CACHE);
1542 *map = 0;
1543 } while (++map < map_end);
1544
1545 mem_cgroup_uncharge_swap(entry, nr_pages);
1546 swap_range_free(si, offset, nr_pages);
1547
1548 if (!ci->count)
1549 free_cluster(si, ci);
1550 else
1551 partial_free_cluster(si, ci);
1552 }
1553
cluster_swap_free_nr(struct swap_info_struct * si,unsigned long offset,int nr_pages,unsigned char usage)1554 static void cluster_swap_free_nr(struct swap_info_struct *si,
1555 unsigned long offset, int nr_pages,
1556 unsigned char usage)
1557 {
1558 struct swap_cluster_info *ci;
1559 unsigned long end = offset + nr_pages;
1560
1561 ci = lock_cluster(si, offset);
1562 do {
1563 if (!__swap_entry_free_locked(si, offset, usage))
1564 swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
1565 } while (++offset < end);
1566 unlock_cluster(ci);
1567 }
1568
1569 /*
1570 * Caller has made sure that the swap device corresponding to entry
1571 * is still around or has not been recycled.
1572 */
swap_free_nr(swp_entry_t entry,int nr_pages)1573 void swap_free_nr(swp_entry_t entry, int nr_pages)
1574 {
1575 int nr;
1576 struct swap_info_struct *sis;
1577 unsigned long offset = swp_offset(entry);
1578
1579 sis = _swap_info_get(entry);
1580 if (!sis)
1581 return;
1582
1583 while (nr_pages) {
1584 nr = min_t(int, nr_pages, SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
1585 cluster_swap_free_nr(sis, offset, nr, 1);
1586 offset += nr;
1587 nr_pages -= nr;
1588 }
1589 }
1590
1591 /*
1592 * Called after dropping swapcache to decrease refcnt to swap entries.
1593 */
put_swap_folio(struct folio * folio,swp_entry_t entry)1594 void put_swap_folio(struct folio *folio, swp_entry_t entry)
1595 {
1596 unsigned long offset = swp_offset(entry);
1597 struct swap_cluster_info *ci;
1598 struct swap_info_struct *si;
1599 int size = 1 << swap_entry_order(folio_order(folio));
1600
1601 si = _swap_info_get(entry);
1602 if (!si)
1603 return;
1604
1605 ci = lock_cluster(si, offset);
1606 if (swap_only_has_cache(si, offset, size))
1607 swap_entry_range_free(si, ci, entry, size);
1608 else {
1609 for (int i = 0; i < size; i++, entry.val++) {
1610 if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE))
1611 swap_entry_range_free(si, ci, entry, 1);
1612 }
1613 }
1614 unlock_cluster(ci);
1615 }
1616
__swap_count(swp_entry_t entry)1617 int __swap_count(swp_entry_t entry)
1618 {
1619 struct swap_info_struct *si = swp_swap_info(entry);
1620 pgoff_t offset = swp_offset(entry);
1621
1622 return swap_count(si->swap_map[offset]);
1623 }
1624
1625 /*
1626 * How many references to @entry are currently swapped out?
1627 * This does not give an exact answer when swap count is continued,
1628 * but does include the high COUNT_CONTINUED flag to allow for that.
1629 */
swap_entry_swapped(struct swap_info_struct * si,swp_entry_t entry)1630 bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry)
1631 {
1632 pgoff_t offset = swp_offset(entry);
1633 struct swap_cluster_info *ci;
1634 int count;
1635
1636 ci = lock_cluster(si, offset);
1637 count = swap_count(si->swap_map[offset]);
1638 unlock_cluster(ci);
1639 return !!count;
1640 }
1641
1642 /*
1643 * How many references to @entry are currently swapped out?
1644 * This considers COUNT_CONTINUED so it returns exact answer.
1645 */
swp_swapcount(swp_entry_t entry)1646 int swp_swapcount(swp_entry_t entry)
1647 {
1648 int count, tmp_count, n;
1649 struct swap_info_struct *si;
1650 struct swap_cluster_info *ci;
1651 struct page *page;
1652 pgoff_t offset;
1653 unsigned char *map;
1654
1655 si = _swap_info_get(entry);
1656 if (!si)
1657 return 0;
1658
1659 offset = swp_offset(entry);
1660
1661 ci = lock_cluster(si, offset);
1662
1663 count = swap_count(si->swap_map[offset]);
1664 if (!(count & COUNT_CONTINUED))
1665 goto out;
1666
1667 count &= ~COUNT_CONTINUED;
1668 n = SWAP_MAP_MAX + 1;
1669
1670 page = vmalloc_to_page(si->swap_map + offset);
1671 offset &= ~PAGE_MASK;
1672 VM_BUG_ON(page_private(page) != SWP_CONTINUED);
1673
1674 do {
1675 page = list_next_entry(page, lru);
1676 map = kmap_local_page(page);
1677 tmp_count = map[offset];
1678 kunmap_local(map);
1679
1680 count += (tmp_count & ~COUNT_CONTINUED) * n;
1681 n *= (SWAP_CONT_MAX + 1);
1682 } while (tmp_count & COUNT_CONTINUED);
1683 out:
1684 unlock_cluster(ci);
1685 return count;
1686 }
1687
swap_page_trans_huge_swapped(struct swap_info_struct * si,swp_entry_t entry,int order)1688 static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
1689 swp_entry_t entry, int order)
1690 {
1691 struct swap_cluster_info *ci;
1692 unsigned char *map = si->swap_map;
1693 unsigned int nr_pages = 1 << order;
1694 unsigned long roffset = swp_offset(entry);
1695 unsigned long offset = round_down(roffset, nr_pages);
1696 int i;
1697 bool ret = false;
1698
1699 ci = lock_cluster(si, offset);
1700 if (nr_pages == 1) {
1701 if (swap_count(map[roffset]))
1702 ret = true;
1703 goto unlock_out;
1704 }
1705 for (i = 0; i < nr_pages; i++) {
1706 if (swap_count(map[offset + i])) {
1707 ret = true;
1708 break;
1709 }
1710 }
1711 unlock_out:
1712 unlock_cluster(ci);
1713 return ret;
1714 }
1715
folio_swapped(struct folio * folio)1716 static bool folio_swapped(struct folio *folio)
1717 {
1718 swp_entry_t entry = folio->swap;
1719 struct swap_info_struct *si = _swap_info_get(entry);
1720
1721 if (!si)
1722 return false;
1723
1724 if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio)))
1725 return swap_entry_swapped(si, entry);
1726
1727 return swap_page_trans_huge_swapped(si, entry, folio_order(folio));
1728 }
1729
folio_swapcache_freeable(struct folio * folio)1730 static bool folio_swapcache_freeable(struct folio *folio)
1731 {
1732 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1733
1734 if (!folio_test_swapcache(folio))
1735 return false;
1736 if (folio_test_writeback(folio))
1737 return false;
1738
1739 /*
1740 * Once hibernation has begun to create its image of memory,
1741 * there's a danger that one of the calls to folio_free_swap()
1742 * - most probably a call from __try_to_reclaim_swap() while
1743 * hibernation is allocating its own swap pages for the image,
1744 * but conceivably even a call from memory reclaim - will free
1745 * the swap from a folio which has already been recorded in the
1746 * image as a clean swapcache folio, and then reuse its swap for
1747 * another page of the image. On waking from hibernation, the
1748 * original folio might be freed under memory pressure, then
1749 * later read back in from swap, now with the wrong data.
1750 *
1751 * Hibernation suspends storage while it is writing the image
1752 * to disk so check that here.
1753 */
1754 if (pm_suspended_storage())
1755 return false;
1756
1757 return true;
1758 }
1759
1760 /**
1761 * folio_free_swap() - Free the swap space used for this folio.
1762 * @folio: The folio to remove.
1763 *
1764 * If swap is getting full, or if there are no more mappings of this folio,
1765 * then call folio_free_swap to free its swap space.
1766 *
1767 * Return: true if we were able to release the swap space.
1768 */
folio_free_swap(struct folio * folio)1769 bool folio_free_swap(struct folio *folio)
1770 {
1771 if (!folio_swapcache_freeable(folio))
1772 return false;
1773 if (folio_swapped(folio))
1774 return false;
1775
1776 delete_from_swap_cache(folio);
1777 folio_set_dirty(folio);
1778 return true;
1779 }
1780
1781 /**
1782 * free_swap_and_cache_nr() - Release reference on range of swap entries and
1783 * reclaim their cache if no more references remain.
1784 * @entry: First entry of range.
1785 * @nr: Number of entries in range.
1786 *
1787 * For each swap entry in the contiguous range, release a reference. If any swap
1788 * entries become free, try to reclaim their underlying folios, if present. The
1789 * offset range is defined by [entry.offset, entry.offset + nr).
1790 */
free_swap_and_cache_nr(swp_entry_t entry,int nr)1791 void free_swap_and_cache_nr(swp_entry_t entry, int nr)
1792 {
1793 const unsigned long start_offset = swp_offset(entry);
1794 const unsigned long end_offset = start_offset + nr;
1795 struct swap_info_struct *si;
1796 bool any_only_cache = false;
1797 unsigned long offset;
1798
1799 si = get_swap_device(entry);
1800 if (!si)
1801 return;
1802
1803 if (WARN_ON(end_offset > si->max))
1804 goto out;
1805
1806 /*
1807 * First free all entries in the range.
1808 */
1809 any_only_cache = __swap_entries_free(si, entry, nr);
1810
1811 /*
1812 * Short-circuit the below loop if none of the entries had their
1813 * reference drop to zero.
1814 */
1815 if (!any_only_cache)
1816 goto out;
1817
1818 /*
1819 * Now go back over the range trying to reclaim the swap cache. This is
1820 * more efficient for large folios because we will only try to reclaim
1821 * the swap once per folio in the common case. If we do
1822 * __swap_entry_free() and __try_to_reclaim_swap() in the same loop, the
1823 * latter will get a reference and lock the folio for every individual
1824 * page but will only succeed once the swap slot for every subpage is
1825 * zero.
1826 */
1827 for (offset = start_offset; offset < end_offset; offset += nr) {
1828 nr = 1;
1829 if (READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
1830 /*
1831 * Folios are always naturally aligned in swap so
1832 * advance forward to the next boundary. Zero means no
1833 * folio was found for the swap entry, so advance by 1
1834 * in this case. Negative value means folio was found
1835 * but could not be reclaimed. Here we can still advance
1836 * to the next boundary.
1837 */
1838 nr = __try_to_reclaim_swap(si, offset,
1839 TTRS_UNMAPPED | TTRS_FULL);
1840 if (nr == 0)
1841 nr = 1;
1842 else if (nr < 0)
1843 nr = -nr;
1844 nr = ALIGN(offset + 1, nr) - offset;
1845 }
1846 }
1847
1848 out:
1849 put_swap_device(si);
1850 }
1851
1852 #ifdef CONFIG_HIBERNATION
1853
get_swap_page_of_type(int type)1854 swp_entry_t get_swap_page_of_type(int type)
1855 {
1856 struct swap_info_struct *si = swap_type_to_swap_info(type);
1857 unsigned long offset;
1858 swp_entry_t entry = {0};
1859
1860 if (!si)
1861 goto fail;
1862
1863 /* This is called for allocating swap entry, not cache */
1864 if (get_swap_device_info(si)) {
1865 if (si->flags & SWP_WRITEOK) {
1866 offset = cluster_alloc_swap_entry(si, 0, 1);
1867 if (offset) {
1868 entry = swp_entry(si->type, offset);
1869 atomic_long_dec(&nr_swap_pages);
1870 }
1871 }
1872 put_swap_device(si);
1873 }
1874 fail:
1875 return entry;
1876 }
1877
1878 /*
1879 * Find the swap type that corresponds to given device (if any).
1880 *
1881 * @offset - number of the PAGE_SIZE-sized block of the device, starting
1882 * from 0, in which the swap header is expected to be located.
1883 *
1884 * This is needed for the suspend to disk (aka swsusp).
1885 */
swap_type_of(dev_t device,sector_t offset)1886 int swap_type_of(dev_t device, sector_t offset)
1887 {
1888 int type;
1889
1890 if (!device)
1891 return -1;
1892
1893 spin_lock(&swap_lock);
1894 for (type = 0; type < nr_swapfiles; type++) {
1895 struct swap_info_struct *sis = swap_info[type];
1896
1897 if (!(sis->flags & SWP_WRITEOK))
1898 continue;
1899
1900 if (device == sis->bdev->bd_dev) {
1901 struct swap_extent *se = first_se(sis);
1902
1903 if (se->start_block == offset) {
1904 spin_unlock(&swap_lock);
1905 return type;
1906 }
1907 }
1908 }
1909 spin_unlock(&swap_lock);
1910 return -ENODEV;
1911 }
1912
find_first_swap(dev_t * device)1913 int find_first_swap(dev_t *device)
1914 {
1915 int type;
1916
1917 spin_lock(&swap_lock);
1918 for (type = 0; type < nr_swapfiles; type++) {
1919 struct swap_info_struct *sis = swap_info[type];
1920
1921 if (!(sis->flags & SWP_WRITEOK))
1922 continue;
1923 *device = sis->bdev->bd_dev;
1924 spin_unlock(&swap_lock);
1925 return type;
1926 }
1927 spin_unlock(&swap_lock);
1928 return -ENODEV;
1929 }
1930
1931 /*
1932 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
1933 * corresponding to given index in swap_info (swap type).
1934 */
swapdev_block(int type,pgoff_t offset)1935 sector_t swapdev_block(int type, pgoff_t offset)
1936 {
1937 struct swap_info_struct *si = swap_type_to_swap_info(type);
1938 struct swap_extent *se;
1939
1940 if (!si || !(si->flags & SWP_WRITEOK))
1941 return 0;
1942 se = offset_to_swap_extent(si, offset);
1943 return se->start_block + (offset - se->start_page);
1944 }
1945
1946 /*
1947 * Return either the total number of swap pages of given type, or the number
1948 * of free pages of that type (depending on @free)
1949 *
1950 * This is needed for software suspend
1951 */
count_swap_pages(int type,int free)1952 unsigned int count_swap_pages(int type, int free)
1953 {
1954 unsigned int n = 0;
1955
1956 spin_lock(&swap_lock);
1957 if ((unsigned int)type < nr_swapfiles) {
1958 struct swap_info_struct *sis = swap_info[type];
1959
1960 spin_lock(&sis->lock);
1961 if (sis->flags & SWP_WRITEOK) {
1962 n = sis->pages;
1963 if (free)
1964 n -= swap_usage_in_pages(sis);
1965 }
1966 spin_unlock(&sis->lock);
1967 }
1968 spin_unlock(&swap_lock);
1969 return n;
1970 }
1971 #endif /* CONFIG_HIBERNATION */
1972
pte_same_as_swp(pte_t pte,pte_t swp_pte)1973 static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
1974 {
1975 return pte_same(pte_swp_clear_flags(pte), swp_pte);
1976 }
1977
1978 /*
1979 * No need to decide whether this PTE shares the swap entry with others,
1980 * just let do_wp_page work it out if a write is requested later - to
1981 * force COW, vm_page_prot omits write permission from any private vma.
1982 */
unuse_pte(struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,swp_entry_t entry,struct folio * folio)1983 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
1984 unsigned long addr, swp_entry_t entry, struct folio *folio)
1985 {
1986 struct page *page;
1987 struct folio *swapcache;
1988 spinlock_t *ptl;
1989 pte_t *pte, new_pte, old_pte;
1990 bool hwpoisoned = false;
1991 int ret = 1;
1992
1993 swapcache = folio;
1994 folio = ksm_might_need_to_copy(folio, vma, addr);
1995 if (unlikely(!folio))
1996 return -ENOMEM;
1997 else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
1998 hwpoisoned = true;
1999 folio = swapcache;
2000 }
2001
2002 page = folio_file_page(folio, swp_offset(entry));
2003 if (PageHWPoison(page))
2004 hwpoisoned = true;
2005
2006 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
2007 if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte),
2008 swp_entry_to_pte(entry)))) {
2009 ret = 0;
2010 goto out;
2011 }
2012
2013 old_pte = ptep_get(pte);
2014
2015 if (unlikely(hwpoisoned || !folio_test_uptodate(folio))) {
2016 swp_entry_t swp_entry;
2017
2018 dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
2019 if (hwpoisoned) {
2020 swp_entry = make_hwpoison_entry(page);
2021 } else {
2022 swp_entry = make_poisoned_swp_entry();
2023 }
2024 new_pte = swp_entry_to_pte(swp_entry);
2025 ret = 0;
2026 goto setpte;
2027 }
2028
2029 /*
2030 * Some architectures may have to restore extra metadata to the page
2031 * when reading from swap. This metadata may be indexed by swap entry
2032 * so this must be called before swap_free().
2033 */
2034 arch_swap_restore(folio_swap(entry, folio), folio);
2035
2036 dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
2037 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
2038 folio_get(folio);
2039 if (folio == swapcache) {
2040 rmap_t rmap_flags = RMAP_NONE;
2041
2042 /*
2043 * See do_swap_page(): writeback would be problematic.
2044 * However, we do a folio_wait_writeback() just before this
2045 * call and have the folio locked.
2046 */
2047 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
2048 if (pte_swp_exclusive(old_pte))
2049 rmap_flags |= RMAP_EXCLUSIVE;
2050 /*
2051 * We currently only expect small !anon folios, which are either
2052 * fully exclusive or fully shared. If we ever get large folios
2053 * here, we have to be careful.
2054 */
2055 if (!folio_test_anon(folio)) {
2056 VM_WARN_ON_ONCE(folio_test_large(folio));
2057 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
2058 folio_add_new_anon_rmap(folio, vma, addr, rmap_flags);
2059 } else {
2060 folio_add_anon_rmap_pte(folio, page, vma, addr, rmap_flags);
2061 }
2062 } else { /* ksm created a completely new copy */
2063 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
2064 folio_add_lru_vma(folio, vma);
2065 }
2066 new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot));
2067 if (pte_swp_soft_dirty(old_pte))
2068 new_pte = pte_mksoft_dirty(new_pte);
2069 if (pte_swp_uffd_wp(old_pte))
2070 new_pte = pte_mkuffd_wp(new_pte);
2071 setpte:
2072 set_pte_at(vma->vm_mm, addr, pte, new_pte);
2073 swap_free(entry);
2074 out:
2075 if (pte)
2076 pte_unmap_unlock(pte, ptl);
2077 if (folio != swapcache) {
2078 folio_unlock(folio);
2079 folio_put(folio);
2080 }
2081 return ret;
2082 }
2083
unuse_pte_range(struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,unsigned long end,unsigned int type)2084 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
2085 unsigned long addr, unsigned long end,
2086 unsigned int type)
2087 {
2088 pte_t *pte = NULL;
2089 struct swap_info_struct *si;
2090
2091 si = swap_info[type];
2092 do {
2093 struct folio *folio;
2094 unsigned long offset;
2095 unsigned char swp_count;
2096 swp_entry_t entry;
2097 int ret;
2098 pte_t ptent;
2099
2100 if (!pte++) {
2101 pte = pte_offset_map(pmd, addr);
2102 if (!pte)
2103 break;
2104 }
2105
2106 ptent = ptep_get_lockless(pte);
2107
2108 if (!is_swap_pte(ptent))
2109 continue;
2110
2111 entry = pte_to_swp_entry(ptent);
2112 if (swp_type(entry) != type)
2113 continue;
2114
2115 offset = swp_offset(entry);
2116 pte_unmap(pte);
2117 pte = NULL;
2118
2119 folio = swap_cache_get_folio(entry, vma, addr);
2120 if (!folio) {
2121 struct vm_fault vmf = {
2122 .vma = vma,
2123 .address = addr,
2124 .real_address = addr,
2125 .pmd = pmd,
2126 };
2127
2128 folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
2129 &vmf);
2130 }
2131 if (!folio) {
2132 swp_count = READ_ONCE(si->swap_map[offset]);
2133 if (swp_count == 0 || swp_count == SWAP_MAP_BAD)
2134 continue;
2135 return -ENOMEM;
2136 }
2137
2138 folio_lock(folio);
2139 folio_wait_writeback(folio);
2140 ret = unuse_pte(vma, pmd, addr, entry, folio);
2141 if (ret < 0) {
2142 folio_unlock(folio);
2143 folio_put(folio);
2144 return ret;
2145 }
2146
2147 folio_free_swap(folio);
2148 folio_unlock(folio);
2149 folio_put(folio);
2150 } while (addr += PAGE_SIZE, addr != end);
2151
2152 if (pte)
2153 pte_unmap(pte);
2154 return 0;
2155 }
2156
unuse_pmd_range(struct vm_area_struct * vma,pud_t * pud,unsigned long addr,unsigned long end,unsigned int type)2157 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
2158 unsigned long addr, unsigned long end,
2159 unsigned int type)
2160 {
2161 pmd_t *pmd;
2162 unsigned long next;
2163 int ret;
2164
2165 pmd = pmd_offset(pud, addr);
2166 do {
2167 cond_resched();
2168 next = pmd_addr_end(addr, end);
2169 ret = unuse_pte_range(vma, pmd, addr, next, type);
2170 if (ret)
2171 return ret;
2172 } while (pmd++, addr = next, addr != end);
2173 return 0;
2174 }
2175
unuse_pud_range(struct vm_area_struct * vma,p4d_t * p4d,unsigned long addr,unsigned long end,unsigned int type)2176 static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
2177 unsigned long addr, unsigned long end,
2178 unsigned int type)
2179 {
2180 pud_t *pud;
2181 unsigned long next;
2182 int ret;
2183
2184 pud = pud_offset(p4d, addr);
2185 do {
2186 next = pud_addr_end(addr, end);
2187 if (pud_none_or_clear_bad(pud))
2188 continue;
2189 ret = unuse_pmd_range(vma, pud, addr, next, type);
2190 if (ret)
2191 return ret;
2192 } while (pud++, addr = next, addr != end);
2193 return 0;
2194 }
2195
unuse_p4d_range(struct vm_area_struct * vma,pgd_t * pgd,unsigned long addr,unsigned long end,unsigned int type)2196 static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
2197 unsigned long addr, unsigned long end,
2198 unsigned int type)
2199 {
2200 p4d_t *p4d;
2201 unsigned long next;
2202 int ret;
2203
2204 p4d = p4d_offset(pgd, addr);
2205 do {
2206 next = p4d_addr_end(addr, end);
2207 if (p4d_none_or_clear_bad(p4d))
2208 continue;
2209 ret = unuse_pud_range(vma, p4d, addr, next, type);
2210 if (ret)
2211 return ret;
2212 } while (p4d++, addr = next, addr != end);
2213 return 0;
2214 }
2215
unuse_vma(struct vm_area_struct * vma,unsigned int type)2216 static int unuse_vma(struct vm_area_struct *vma, unsigned int type)
2217 {
2218 pgd_t *pgd;
2219 unsigned long addr, end, next;
2220 int ret;
2221
2222 addr = vma->vm_start;
2223 end = vma->vm_end;
2224
2225 pgd = pgd_offset(vma->vm_mm, addr);
2226 do {
2227 next = pgd_addr_end(addr, end);
2228 if (pgd_none_or_clear_bad(pgd))
2229 continue;
2230 ret = unuse_p4d_range(vma, pgd, addr, next, type);
2231 if (ret)
2232 return ret;
2233 } while (pgd++, addr = next, addr != end);
2234 return 0;
2235 }
2236
unuse_mm(struct mm_struct * mm,unsigned int type)2237 static int unuse_mm(struct mm_struct *mm, unsigned int type)
2238 {
2239 struct vm_area_struct *vma;
2240 int ret = 0;
2241 VMA_ITERATOR(vmi, mm, 0);
2242
2243 mmap_read_lock(mm);
2244 for_each_vma(vmi, vma) {
2245 if (vma->anon_vma && !is_vm_hugetlb_page(vma)) {
2246 ret = unuse_vma(vma, type);
2247 if (ret)
2248 break;
2249 }
2250
2251 cond_resched();
2252 }
2253 mmap_read_unlock(mm);
2254 return ret;
2255 }
2256
2257 /*
2258 * Scan swap_map from current position to next entry still in use.
2259 * Return 0 if there are no inuse entries after prev till end of
2260 * the map.
2261 */
find_next_to_unuse(struct swap_info_struct * si,unsigned int prev)2262 static unsigned int find_next_to_unuse(struct swap_info_struct *si,
2263 unsigned int prev)
2264 {
2265 unsigned int i;
2266 unsigned char count;
2267
2268 /*
2269 * No need for swap_lock here: we're just looking
2270 * for whether an entry is in use, not modifying it; false
2271 * hits are okay, and sys_swapoff() has already prevented new
2272 * allocations from this area (while holding swap_lock).
2273 */
2274 for (i = prev + 1; i < si->max; i++) {
2275 count = READ_ONCE(si->swap_map[i]);
2276 if (count && swap_count(count) != SWAP_MAP_BAD)
2277 break;
2278 if ((i % LATENCY_LIMIT) == 0)
2279 cond_resched();
2280 }
2281
2282 if (i == si->max)
2283 i = 0;
2284
2285 return i;
2286 }
2287
try_to_unuse(unsigned int type)2288 static int try_to_unuse(unsigned int type)
2289 {
2290 struct mm_struct *prev_mm;
2291 struct mm_struct *mm;
2292 struct list_head *p;
2293 int retval = 0;
2294 struct swap_info_struct *si = swap_info[type];
2295 struct folio *folio;
2296 swp_entry_t entry;
2297 unsigned int i;
2298
2299 if (!swap_usage_in_pages(si))
2300 goto success;
2301
2302 retry:
2303 retval = shmem_unuse(type);
2304 if (retval)
2305 return retval;
2306
2307 prev_mm = &init_mm;
2308 mmget(prev_mm);
2309
2310 spin_lock(&mmlist_lock);
2311 p = &init_mm.mmlist;
2312 while (swap_usage_in_pages(si) &&
2313 !signal_pending(current) &&
2314 (p = p->next) != &init_mm.mmlist) {
2315
2316 mm = list_entry(p, struct mm_struct, mmlist);
2317 if (!mmget_not_zero(mm))
2318 continue;
2319 spin_unlock(&mmlist_lock);
2320 mmput(prev_mm);
2321 prev_mm = mm;
2322 retval = unuse_mm(mm, type);
2323 if (retval) {
2324 mmput(prev_mm);
2325 return retval;
2326 }
2327
2328 /*
2329 * Make sure that we aren't completely killing
2330 * interactive performance.
2331 */
2332 cond_resched();
2333 spin_lock(&mmlist_lock);
2334 }
2335 spin_unlock(&mmlist_lock);
2336
2337 mmput(prev_mm);
2338
2339 i = 0;
2340 while (swap_usage_in_pages(si) &&
2341 !signal_pending(current) &&
2342 (i = find_next_to_unuse(si, i)) != 0) {
2343
2344 entry = swp_entry(type, i);
2345 folio = filemap_get_folio(swap_address_space(entry), swap_cache_index(entry));
2346 if (IS_ERR(folio))
2347 continue;
2348
2349 /*
2350 * It is conceivable that a racing task removed this folio from
2351 * swap cache just before we acquired the page lock. The folio
2352 * might even be back in swap cache on another swap area. But
2353 * that is okay, folio_free_swap() only removes stale folios.
2354 */
2355 folio_lock(folio);
2356 folio_wait_writeback(folio);
2357 folio_free_swap(folio);
2358 folio_unlock(folio);
2359 folio_put(folio);
2360 }
2361
2362 /*
2363 * Lets check again to see if there are still swap entries in the map.
2364 * If yes, we would need to do retry the unuse logic again.
2365 * Under global memory pressure, swap entries can be reinserted back
2366 * into process space after the mmlist loop above passes over them.
2367 *
2368 * Limit the number of retries? No: when mmget_not_zero()
2369 * above fails, that mm is likely to be freeing swap from
2370 * exit_mmap(), which proceeds at its own independent pace;
2371 * and even shmem_writepage() could have been preempted after
2372 * folio_alloc_swap(), temporarily hiding that swap. It's easy
2373 * and robust (though cpu-intensive) just to keep retrying.
2374 */
2375 if (swap_usage_in_pages(si)) {
2376 if (!signal_pending(current))
2377 goto retry;
2378 return -EINTR;
2379 }
2380
2381 success:
2382 /*
2383 * Make sure that further cleanups after try_to_unuse() returns happen
2384 * after swap_range_free() reduces si->inuse_pages to 0.
2385 */
2386 smp_mb();
2387 return 0;
2388 }
2389
2390 /*
2391 * After a successful try_to_unuse, if no swap is now in use, we know
2392 * we can empty the mmlist. swap_lock must be held on entry and exit.
2393 * Note that mmlist_lock nests inside swap_lock, and an mm must be
2394 * added to the mmlist just after page_duplicate - before would be racy.
2395 */
drain_mmlist(void)2396 static void drain_mmlist(void)
2397 {
2398 struct list_head *p, *next;
2399 unsigned int type;
2400
2401 for (type = 0; type < nr_swapfiles; type++)
2402 if (swap_usage_in_pages(swap_info[type]))
2403 return;
2404 spin_lock(&mmlist_lock);
2405 list_for_each_safe(p, next, &init_mm.mmlist)
2406 list_del_init(p);
2407 spin_unlock(&mmlist_lock);
2408 }
2409
2410 /*
2411 * Free all of a swapdev's extent information
2412 */
destroy_swap_extents(struct swap_info_struct * sis)2413 static void destroy_swap_extents(struct swap_info_struct *sis)
2414 {
2415 while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) {
2416 struct rb_node *rb = sis->swap_extent_root.rb_node;
2417 struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node);
2418
2419 rb_erase(rb, &sis->swap_extent_root);
2420 kfree(se);
2421 }
2422
2423 if (sis->flags & SWP_ACTIVATED) {
2424 struct file *swap_file = sis->swap_file;
2425 struct address_space *mapping = swap_file->f_mapping;
2426
2427 sis->flags &= ~SWP_ACTIVATED;
2428 if (mapping->a_ops->swap_deactivate)
2429 mapping->a_ops->swap_deactivate(swap_file);
2430 }
2431 }
2432
2433 /*
2434 * Add a block range (and the corresponding page range) into this swapdev's
2435 * extent tree.
2436 *
2437 * This function rather assumes that it is called in ascending page order.
2438 */
2439 int
add_swap_extent(struct swap_info_struct * sis,unsigned long start_page,unsigned long nr_pages,sector_t start_block)2440 add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
2441 unsigned long nr_pages, sector_t start_block)
2442 {
2443 struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL;
2444 struct swap_extent *se;
2445 struct swap_extent *new_se;
2446
2447 /*
2448 * place the new node at the right most since the
2449 * function is called in ascending page order.
2450 */
2451 while (*link) {
2452 parent = *link;
2453 link = &parent->rb_right;
2454 }
2455
2456 if (parent) {
2457 se = rb_entry(parent, struct swap_extent, rb_node);
2458 BUG_ON(se->start_page + se->nr_pages != start_page);
2459 if (se->start_block + se->nr_pages == start_block) {
2460 /* Merge it */
2461 se->nr_pages += nr_pages;
2462 return 0;
2463 }
2464 }
2465
2466 /* No merge, insert a new extent. */
2467 new_se = kmalloc(sizeof(*se), GFP_KERNEL);
2468 if (new_se == NULL)
2469 return -ENOMEM;
2470 new_se->start_page = start_page;
2471 new_se->nr_pages = nr_pages;
2472 new_se->start_block = start_block;
2473
2474 rb_link_node(&new_se->rb_node, parent, link);
2475 rb_insert_color(&new_se->rb_node, &sis->swap_extent_root);
2476 return 1;
2477 }
2478 EXPORT_SYMBOL_GPL(add_swap_extent);
2479
2480 /*
2481 * A `swap extent' is a simple thing which maps a contiguous range of pages
2482 * onto a contiguous range of disk blocks. A rbtree of swap extents is
2483 * built at swapon time and is then used at swap_writepage/swap_read_folio
2484 * time for locating where on disk a page belongs.
2485 *
2486 * If the swapfile is an S_ISBLK block device, a single extent is installed.
2487 * This is done so that the main operating code can treat S_ISBLK and S_ISREG
2488 * swap files identically.
2489 *
2490 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
2491 * extent rbtree operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK
2492 * swapfiles are handled *identically* after swapon time.
2493 *
2494 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
2495 * and will parse them into a rbtree, in PAGE_SIZE chunks. If some stray
2496 * blocks are found which do not fall within the PAGE_SIZE alignment
2497 * requirements, they are simply tossed out - we will never use those blocks
2498 * for swapping.
2499 *
2500 * For all swap devices we set S_SWAPFILE across the life of the swapon. This
2501 * prevents users from writing to the swap device, which will corrupt memory.
2502 *
2503 * The amount of disk space which a single swap extent represents varies.
2504 * Typically it is in the 1-4 megabyte range. So we can have hundreds of
2505 * extents in the rbtree. - akpm.
2506 */
setup_swap_extents(struct swap_info_struct * sis,sector_t * span)2507 static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
2508 {
2509 struct file *swap_file = sis->swap_file;
2510 struct address_space *mapping = swap_file->f_mapping;
2511 struct inode *inode = mapping->host;
2512 int ret;
2513
2514 if (S_ISBLK(inode->i_mode)) {
2515 ret = add_swap_extent(sis, 0, sis->max, 0);
2516 *span = sis->pages;
2517 return ret;
2518 }
2519
2520 if (mapping->a_ops->swap_activate) {
2521 ret = mapping->a_ops->swap_activate(sis, swap_file, span);
2522 if (ret < 0)
2523 return ret;
2524 sis->flags |= SWP_ACTIVATED;
2525 if ((sis->flags & SWP_FS_OPS) &&
2526 sio_pool_init() != 0) {
2527 destroy_swap_extents(sis);
2528 return -ENOMEM;
2529 }
2530 return ret;
2531 }
2532
2533 return generic_swapfile_activate(sis, swap_file, span);
2534 }
2535
swap_node(struct swap_info_struct * si)2536 static int swap_node(struct swap_info_struct *si)
2537 {
2538 struct block_device *bdev;
2539
2540 if (si->bdev)
2541 bdev = si->bdev;
2542 else
2543 bdev = si->swap_file->f_inode->i_sb->s_bdev;
2544
2545 return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
2546 }
2547
setup_swap_info(struct swap_info_struct * si,int prio,unsigned char * swap_map,struct swap_cluster_info * cluster_info,unsigned long * zeromap)2548 static void setup_swap_info(struct swap_info_struct *si, int prio,
2549 unsigned char *swap_map,
2550 struct swap_cluster_info *cluster_info,
2551 unsigned long *zeromap)
2552 {
2553 int i;
2554
2555 if (prio >= 0)
2556 si->prio = prio;
2557 else
2558 si->prio = --least_priority;
2559 /*
2560 * the plist prio is negated because plist ordering is
2561 * low-to-high, while swap ordering is high-to-low
2562 */
2563 si->list.prio = -si->prio;
2564 for_each_node(i) {
2565 if (si->prio >= 0)
2566 si->avail_lists[i].prio = -si->prio;
2567 else {
2568 if (swap_node(si) == i)
2569 si->avail_lists[i].prio = 1;
2570 else
2571 si->avail_lists[i].prio = -si->prio;
2572 }
2573 }
2574 si->swap_map = swap_map;
2575 si->cluster_info = cluster_info;
2576 si->zeromap = zeromap;
2577 }
2578
_enable_swap_info(struct swap_info_struct * si)2579 static void _enable_swap_info(struct swap_info_struct *si)
2580 {
2581 atomic_long_add(si->pages, &nr_swap_pages);
2582 total_swap_pages += si->pages;
2583
2584 assert_spin_locked(&swap_lock);
2585 /*
2586 * both lists are plists, and thus priority ordered.
2587 * swap_active_head needs to be priority ordered for swapoff(),
2588 * which on removal of any swap_info_struct with an auto-assigned
2589 * (i.e. negative) priority increments the auto-assigned priority
2590 * of any lower-priority swap_info_structs.
2591 * swap_avail_head needs to be priority ordered for folio_alloc_swap(),
2592 * which allocates swap pages from the highest available priority
2593 * swap_info_struct.
2594 */
2595 plist_add(&si->list, &swap_active_head);
2596
2597 /* Add back to available list */
2598 add_to_avail_list(si, true);
2599 }
2600
enable_swap_info(struct swap_info_struct * si,int prio,unsigned char * swap_map,struct swap_cluster_info * cluster_info,unsigned long * zeromap)2601 static void enable_swap_info(struct swap_info_struct *si, int prio,
2602 unsigned char *swap_map,
2603 struct swap_cluster_info *cluster_info,
2604 unsigned long *zeromap)
2605 {
2606 spin_lock(&swap_lock);
2607 spin_lock(&si->lock);
2608 setup_swap_info(si, prio, swap_map, cluster_info, zeromap);
2609 spin_unlock(&si->lock);
2610 spin_unlock(&swap_lock);
2611 /*
2612 * Finished initializing swap device, now it's safe to reference it.
2613 */
2614 percpu_ref_resurrect(&si->users);
2615 spin_lock(&swap_lock);
2616 spin_lock(&si->lock);
2617 _enable_swap_info(si);
2618 spin_unlock(&si->lock);
2619 spin_unlock(&swap_lock);
2620 }
2621
reinsert_swap_info(struct swap_info_struct * si)2622 static void reinsert_swap_info(struct swap_info_struct *si)
2623 {
2624 spin_lock(&swap_lock);
2625 spin_lock(&si->lock);
2626 setup_swap_info(si, si->prio, si->swap_map, si->cluster_info, si->zeromap);
2627 _enable_swap_info(si);
2628 spin_unlock(&si->lock);
2629 spin_unlock(&swap_lock);
2630 }
2631
2632 /*
2633 * Called after clearing SWP_WRITEOK, ensures cluster_alloc_range
2634 * see the updated flags, so there will be no more allocations.
2635 */
wait_for_allocation(struct swap_info_struct * si)2636 static void wait_for_allocation(struct swap_info_struct *si)
2637 {
2638 unsigned long offset;
2639 unsigned long end = ALIGN(si->max, SWAPFILE_CLUSTER);
2640 struct swap_cluster_info *ci;
2641
2642 BUG_ON(si->flags & SWP_WRITEOK);
2643
2644 for (offset = 0; offset < end; offset += SWAPFILE_CLUSTER) {
2645 ci = lock_cluster(si, offset);
2646 unlock_cluster(ci);
2647 }
2648 }
2649
2650 /*
2651 * Called after swap device's reference count is dead, so
2652 * neither scan nor allocation will use it.
2653 */
flush_percpu_swap_cluster(struct swap_info_struct * si)2654 static void flush_percpu_swap_cluster(struct swap_info_struct *si)
2655 {
2656 int cpu, i;
2657 struct swap_info_struct **pcp_si;
2658
2659 for_each_possible_cpu(cpu) {
2660 pcp_si = per_cpu_ptr(percpu_swap_cluster.si, cpu);
2661 /*
2662 * Invalidate the percpu swap cluster cache, si->users
2663 * is dead, so no new user will point to it, just flush
2664 * any existing user.
2665 */
2666 for (i = 0; i < SWAP_NR_ORDERS; i++)
2667 cmpxchg(&pcp_si[i], si, NULL);
2668 }
2669 }
2670
2671
SYSCALL_DEFINE1(swapoff,const char __user *,specialfile)2672 SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
2673 {
2674 struct swap_info_struct *p = NULL;
2675 unsigned char *swap_map;
2676 unsigned long *zeromap;
2677 struct swap_cluster_info *cluster_info;
2678 struct file *swap_file, *victim;
2679 struct address_space *mapping;
2680 struct inode *inode;
2681 struct filename *pathname;
2682 int err, found = 0;
2683
2684 if (!capable(CAP_SYS_ADMIN))
2685 return -EPERM;
2686
2687 BUG_ON(!current->mm);
2688
2689 pathname = getname(specialfile);
2690 if (IS_ERR(pathname))
2691 return PTR_ERR(pathname);
2692
2693 victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
2694 err = PTR_ERR(victim);
2695 if (IS_ERR(victim))
2696 goto out;
2697
2698 mapping = victim->f_mapping;
2699 spin_lock(&swap_lock);
2700 plist_for_each_entry(p, &swap_active_head, list) {
2701 if (p->flags & SWP_WRITEOK) {
2702 if (p->swap_file->f_mapping == mapping) {
2703 found = 1;
2704 break;
2705 }
2706 }
2707 }
2708 if (!found) {
2709 err = -EINVAL;
2710 spin_unlock(&swap_lock);
2711 goto out_dput;
2712 }
2713 if (!security_vm_enough_memory_mm(current->mm, p->pages))
2714 vm_unacct_memory(p->pages);
2715 else {
2716 err = -ENOMEM;
2717 spin_unlock(&swap_lock);
2718 goto out_dput;
2719 }
2720 spin_lock(&p->lock);
2721 del_from_avail_list(p, true);
2722 if (p->prio < 0) {
2723 struct swap_info_struct *si = p;
2724 int nid;
2725
2726 plist_for_each_entry_continue(si, &swap_active_head, list) {
2727 si->prio++;
2728 si->list.prio--;
2729 for_each_node(nid) {
2730 if (si->avail_lists[nid].prio != 1)
2731 si->avail_lists[nid].prio--;
2732 }
2733 }
2734 least_priority++;
2735 }
2736 plist_del(&p->list, &swap_active_head);
2737 atomic_long_sub(p->pages, &nr_swap_pages);
2738 total_swap_pages -= p->pages;
2739 spin_unlock(&p->lock);
2740 spin_unlock(&swap_lock);
2741
2742 wait_for_allocation(p);
2743
2744 set_current_oom_origin();
2745 err = try_to_unuse(p->type);
2746 clear_current_oom_origin();
2747
2748 if (err) {
2749 /* re-insert swap space back into swap_list */
2750 reinsert_swap_info(p);
2751 goto out_dput;
2752 }
2753
2754 /*
2755 * Wait for swap operations protected by get/put_swap_device()
2756 * to complete. Because of synchronize_rcu() here, all swap
2757 * operations protected by RCU reader side lock (including any
2758 * spinlock) will be waited too. This makes it easy to
2759 * prevent folio_test_swapcache() and the following swap cache
2760 * operations from racing with swapoff.
2761 */
2762 percpu_ref_kill(&p->users);
2763 synchronize_rcu();
2764 wait_for_completion(&p->comp);
2765
2766 flush_work(&p->discard_work);
2767 flush_work(&p->reclaim_work);
2768 flush_percpu_swap_cluster(p);
2769
2770 destroy_swap_extents(p);
2771 if (p->flags & SWP_CONTINUED)
2772 free_swap_count_continuations(p);
2773
2774 if (!p->bdev || !bdev_nonrot(p->bdev))
2775 atomic_dec(&nr_rotate_swap);
2776
2777 mutex_lock(&swapon_mutex);
2778 spin_lock(&swap_lock);
2779 spin_lock(&p->lock);
2780 drain_mmlist();
2781
2782 swap_file = p->swap_file;
2783 p->swap_file = NULL;
2784 p->max = 0;
2785 swap_map = p->swap_map;
2786 p->swap_map = NULL;
2787 zeromap = p->zeromap;
2788 p->zeromap = NULL;
2789 cluster_info = p->cluster_info;
2790 p->cluster_info = NULL;
2791 spin_unlock(&p->lock);
2792 spin_unlock(&swap_lock);
2793 arch_swap_invalidate_area(p->type);
2794 zswap_swapoff(p->type);
2795 mutex_unlock(&swapon_mutex);
2796 kfree(p->global_cluster);
2797 p->global_cluster = NULL;
2798 vfree(swap_map);
2799 kvfree(zeromap);
2800 kvfree(cluster_info);
2801 /* Destroy swap account information */
2802 swap_cgroup_swapoff(p->type);
2803 exit_swap_address_space(p->type);
2804
2805 inode = mapping->host;
2806
2807 inode_lock(inode);
2808 inode->i_flags &= ~S_SWAPFILE;
2809 inode_unlock(inode);
2810 filp_close(swap_file, NULL);
2811
2812 /*
2813 * Clear the SWP_USED flag after all resources are freed so that swapon
2814 * can reuse this swap_info in alloc_swap_info() safely. It is ok to
2815 * not hold p->lock after we cleared its SWP_WRITEOK.
2816 */
2817 spin_lock(&swap_lock);
2818 p->flags = 0;
2819 spin_unlock(&swap_lock);
2820
2821 err = 0;
2822 atomic_inc(&proc_poll_event);
2823 wake_up_interruptible(&proc_poll_wait);
2824
2825 out_dput:
2826 filp_close(victim, NULL);
2827 out:
2828 putname(pathname);
2829 return err;
2830 }
2831
2832 #ifdef CONFIG_PROC_FS
swaps_poll(struct file * file,poll_table * wait)2833 static __poll_t swaps_poll(struct file *file, poll_table *wait)
2834 {
2835 struct seq_file *seq = file->private_data;
2836
2837 poll_wait(file, &proc_poll_wait, wait);
2838
2839 if (seq->poll_event != atomic_read(&proc_poll_event)) {
2840 seq->poll_event = atomic_read(&proc_poll_event);
2841 return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI;
2842 }
2843
2844 return EPOLLIN | EPOLLRDNORM;
2845 }
2846
2847 /* iterator */
swap_start(struct seq_file * swap,loff_t * pos)2848 static void *swap_start(struct seq_file *swap, loff_t *pos)
2849 {
2850 struct swap_info_struct *si;
2851 int type;
2852 loff_t l = *pos;
2853
2854 mutex_lock(&swapon_mutex);
2855
2856 if (!l)
2857 return SEQ_START_TOKEN;
2858
2859 for (type = 0; (si = swap_type_to_swap_info(type)); type++) {
2860 if (!(si->flags & SWP_USED) || !si->swap_map)
2861 continue;
2862 if (!--l)
2863 return si;
2864 }
2865
2866 return NULL;
2867 }
2868
swap_next(struct seq_file * swap,void * v,loff_t * pos)2869 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
2870 {
2871 struct swap_info_struct *si = v;
2872 int type;
2873
2874 if (v == SEQ_START_TOKEN)
2875 type = 0;
2876 else
2877 type = si->type + 1;
2878
2879 ++(*pos);
2880 for (; (si = swap_type_to_swap_info(type)); type++) {
2881 if (!(si->flags & SWP_USED) || !si->swap_map)
2882 continue;
2883 return si;
2884 }
2885
2886 return NULL;
2887 }
2888
swap_stop(struct seq_file * swap,void * v)2889 static void swap_stop(struct seq_file *swap, void *v)
2890 {
2891 mutex_unlock(&swapon_mutex);
2892 }
2893
swap_show(struct seq_file * swap,void * v)2894 static int swap_show(struct seq_file *swap, void *v)
2895 {
2896 struct swap_info_struct *si = v;
2897 struct file *file;
2898 int len;
2899 unsigned long bytes, inuse;
2900
2901 if (si == SEQ_START_TOKEN) {
2902 seq_puts(swap, "Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n");
2903 return 0;
2904 }
2905
2906 bytes = K(si->pages);
2907 inuse = K(swap_usage_in_pages(si));
2908
2909 file = si->swap_file;
2910 len = seq_file_path(swap, file, " \t\n\\");
2911 seq_printf(swap, "%*s%s\t%lu\t%s%lu\t%s%d\n",
2912 len < 40 ? 40 - len : 1, " ",
2913 S_ISBLK(file_inode(file)->i_mode) ?
2914 "partition" : "file\t",
2915 bytes, bytes < 10000000 ? "\t" : "",
2916 inuse, inuse < 10000000 ? "\t" : "",
2917 si->prio);
2918 return 0;
2919 }
2920
2921 static const struct seq_operations swaps_op = {
2922 .start = swap_start,
2923 .next = swap_next,
2924 .stop = swap_stop,
2925 .show = swap_show
2926 };
2927
swaps_open(struct inode * inode,struct file * file)2928 static int swaps_open(struct inode *inode, struct file *file)
2929 {
2930 struct seq_file *seq;
2931 int ret;
2932
2933 ret = seq_open(file, &swaps_op);
2934 if (ret)
2935 return ret;
2936
2937 seq = file->private_data;
2938 seq->poll_event = atomic_read(&proc_poll_event);
2939 return 0;
2940 }
2941
2942 static const struct proc_ops swaps_proc_ops = {
2943 .proc_flags = PROC_ENTRY_PERMANENT,
2944 .proc_open = swaps_open,
2945 .proc_read = seq_read,
2946 .proc_lseek = seq_lseek,
2947 .proc_release = seq_release,
2948 .proc_poll = swaps_poll,
2949 };
2950
procswaps_init(void)2951 static int __init procswaps_init(void)
2952 {
2953 proc_create("swaps", 0, NULL, &swaps_proc_ops);
2954 return 0;
2955 }
2956 __initcall(procswaps_init);
2957 #endif /* CONFIG_PROC_FS */
2958
2959 #ifdef MAX_SWAPFILES_CHECK
max_swapfiles_check(void)2960 static int __init max_swapfiles_check(void)
2961 {
2962 MAX_SWAPFILES_CHECK();
2963 return 0;
2964 }
2965 late_initcall(max_swapfiles_check);
2966 #endif
2967
alloc_swap_info(void)2968 static struct swap_info_struct *alloc_swap_info(void)
2969 {
2970 struct swap_info_struct *p;
2971 struct swap_info_struct *defer = NULL;
2972 unsigned int type;
2973 int i;
2974
2975 p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL);
2976 if (!p)
2977 return ERR_PTR(-ENOMEM);
2978
2979 if (percpu_ref_init(&p->users, swap_users_ref_free,
2980 PERCPU_REF_INIT_DEAD, GFP_KERNEL)) {
2981 kvfree(p);
2982 return ERR_PTR(-ENOMEM);
2983 }
2984
2985 spin_lock(&swap_lock);
2986 for (type = 0; type < nr_swapfiles; type++) {
2987 if (!(swap_info[type]->flags & SWP_USED))
2988 break;
2989 }
2990 if (type >= MAX_SWAPFILES) {
2991 spin_unlock(&swap_lock);
2992 percpu_ref_exit(&p->users);
2993 kvfree(p);
2994 return ERR_PTR(-EPERM);
2995 }
2996 if (type >= nr_swapfiles) {
2997 p->type = type;
2998 /*
2999 * Publish the swap_info_struct after initializing it.
3000 * Note that kvzalloc() above zeroes all its fields.
3001 */
3002 smp_store_release(&swap_info[type], p); /* rcu_assign_pointer() */
3003 nr_swapfiles++;
3004 } else {
3005 defer = p;
3006 p = swap_info[type];
3007 /*
3008 * Do not memset this entry: a racing procfs swap_next()
3009 * would be relying on p->type to remain valid.
3010 */
3011 }
3012 p->swap_extent_root = RB_ROOT;
3013 plist_node_init(&p->list, 0);
3014 for_each_node(i)
3015 plist_node_init(&p->avail_lists[i], 0);
3016 p->flags = SWP_USED;
3017 spin_unlock(&swap_lock);
3018 if (defer) {
3019 percpu_ref_exit(&defer->users);
3020 kvfree(defer);
3021 }
3022 spin_lock_init(&p->lock);
3023 spin_lock_init(&p->cont_lock);
3024 atomic_long_set(&p->inuse_pages, SWAP_USAGE_OFFLIST_BIT);
3025 init_completion(&p->comp);
3026
3027 return p;
3028 }
3029
claim_swapfile(struct swap_info_struct * si,struct inode * inode)3030 static int claim_swapfile(struct swap_info_struct *si, struct inode *inode)
3031 {
3032 if (S_ISBLK(inode->i_mode)) {
3033 si->bdev = I_BDEV(inode);
3034 /*
3035 * Zoned block devices contain zones that have a sequential
3036 * write only restriction. Hence zoned block devices are not
3037 * suitable for swapping. Disallow them here.
3038 */
3039 if (bdev_is_zoned(si->bdev))
3040 return -EINVAL;
3041 si->flags |= SWP_BLKDEV;
3042 } else if (S_ISREG(inode->i_mode)) {
3043 si->bdev = inode->i_sb->s_bdev;
3044 }
3045
3046 return 0;
3047 }
3048
3049
3050 /*
3051 * Find out how many pages are allowed for a single swap device. There
3052 * are two limiting factors:
3053 * 1) the number of bits for the swap offset in the swp_entry_t type, and
3054 * 2) the number of bits in the swap pte, as defined by the different
3055 * architectures.
3056 *
3057 * In order to find the largest possible bit mask, a swap entry with
3058 * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
3059 * decoded to a swp_entry_t again, and finally the swap offset is
3060 * extracted.
3061 *
3062 * This will mask all the bits from the initial ~0UL mask that can't
3063 * be encoded in either the swp_entry_t or the architecture definition
3064 * of a swap pte.
3065 */
generic_max_swapfile_size(void)3066 unsigned long generic_max_swapfile_size(void)
3067 {
3068 return swp_offset(pte_to_swp_entry(
3069 swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
3070 }
3071
3072 /* Can be overridden by an architecture for additional checks. */
arch_max_swapfile_size(void)3073 __weak unsigned long arch_max_swapfile_size(void)
3074 {
3075 return generic_max_swapfile_size();
3076 }
3077
read_swap_header(struct swap_info_struct * si,union swap_header * swap_header,struct inode * inode)3078 static unsigned long read_swap_header(struct swap_info_struct *si,
3079 union swap_header *swap_header,
3080 struct inode *inode)
3081 {
3082 int i;
3083 unsigned long maxpages;
3084 unsigned long swapfilepages;
3085 unsigned long last_page;
3086
3087 if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
3088 pr_err("Unable to find swap-space signature\n");
3089 return 0;
3090 }
3091
3092 /* swap partition endianness hack... */
3093 if (swab32(swap_header->info.version) == 1) {
3094 swab32s(&swap_header->info.version);
3095 swab32s(&swap_header->info.last_page);
3096 swab32s(&swap_header->info.nr_badpages);
3097 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
3098 return 0;
3099 for (i = 0; i < swap_header->info.nr_badpages; i++)
3100 swab32s(&swap_header->info.badpages[i]);
3101 }
3102 /* Check the swap header's sub-version */
3103 if (swap_header->info.version != 1) {
3104 pr_warn("Unable to handle swap header version %d\n",
3105 swap_header->info.version);
3106 return 0;
3107 }
3108
3109 maxpages = swapfile_maximum_size;
3110 last_page = swap_header->info.last_page;
3111 if (!last_page) {
3112 pr_warn("Empty swap-file\n");
3113 return 0;
3114 }
3115 if (last_page > maxpages) {
3116 pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
3117 K(maxpages), K(last_page));
3118 }
3119 if (maxpages > last_page) {
3120 maxpages = last_page + 1;
3121 /* p->max is an unsigned int: don't overflow it */
3122 if ((unsigned int)maxpages == 0)
3123 maxpages = UINT_MAX;
3124 }
3125
3126 if (!maxpages)
3127 return 0;
3128 swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
3129 if (swapfilepages && maxpages > swapfilepages) {
3130 pr_warn("Swap area shorter than signature indicates\n");
3131 return 0;
3132 }
3133 if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
3134 return 0;
3135 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
3136 return 0;
3137
3138 return maxpages;
3139 }
3140
setup_swap_map_and_extents(struct swap_info_struct * si,union swap_header * swap_header,unsigned char * swap_map,unsigned long maxpages,sector_t * span)3141 static int setup_swap_map_and_extents(struct swap_info_struct *si,
3142 union swap_header *swap_header,
3143 unsigned char *swap_map,
3144 unsigned long maxpages,
3145 sector_t *span)
3146 {
3147 unsigned int nr_good_pages;
3148 unsigned long i;
3149 int nr_extents;
3150
3151 nr_good_pages = maxpages - 1; /* omit header page */
3152
3153 for (i = 0; i < swap_header->info.nr_badpages; i++) {
3154 unsigned int page_nr = swap_header->info.badpages[i];
3155 if (page_nr == 0 || page_nr > swap_header->info.last_page)
3156 return -EINVAL;
3157 if (page_nr < maxpages) {
3158 swap_map[page_nr] = SWAP_MAP_BAD;
3159 nr_good_pages--;
3160 }
3161 }
3162
3163 if (nr_good_pages) {
3164 swap_map[0] = SWAP_MAP_BAD;
3165 si->max = maxpages;
3166 si->pages = nr_good_pages;
3167 nr_extents = setup_swap_extents(si, span);
3168 if (nr_extents < 0)
3169 return nr_extents;
3170 nr_good_pages = si->pages;
3171 }
3172 if (!nr_good_pages) {
3173 pr_warn("Empty swap-file\n");
3174 return -EINVAL;
3175 }
3176
3177 return nr_extents;
3178 }
3179
3180 #define SWAP_CLUSTER_INFO_COLS \
3181 DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info))
3182 #define SWAP_CLUSTER_SPACE_COLS \
3183 DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER)
3184 #define SWAP_CLUSTER_COLS \
3185 max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
3186
setup_clusters(struct swap_info_struct * si,union swap_header * swap_header,unsigned long maxpages)3187 static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si,
3188 union swap_header *swap_header,
3189 unsigned long maxpages)
3190 {
3191 unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3192 struct swap_cluster_info *cluster_info;
3193 unsigned long i, j, idx;
3194 int err = -ENOMEM;
3195
3196 cluster_info = kvcalloc(nr_clusters, sizeof(*cluster_info), GFP_KERNEL);
3197 if (!cluster_info)
3198 goto err;
3199
3200 for (i = 0; i < nr_clusters; i++)
3201 spin_lock_init(&cluster_info[i].lock);
3202
3203 if (!(si->flags & SWP_SOLIDSTATE)) {
3204 si->global_cluster = kmalloc(sizeof(*si->global_cluster),
3205 GFP_KERNEL);
3206 if (!si->global_cluster)
3207 goto err_free;
3208 for (i = 0; i < SWAP_NR_ORDERS; i++)
3209 si->global_cluster->next[i] = SWAP_ENTRY_INVALID;
3210 spin_lock_init(&si->global_cluster_lock);
3211 }
3212
3213 /*
3214 * Mark unusable pages as unavailable. The clusters aren't
3215 * marked free yet, so no list operations are involved yet.
3216 *
3217 * See setup_swap_map_and_extents(): header page, bad pages,
3218 * and the EOF part of the last cluster.
3219 */
3220 inc_cluster_info_page(si, cluster_info, 0);
3221 for (i = 0; i < swap_header->info.nr_badpages; i++)
3222 inc_cluster_info_page(si, cluster_info,
3223 swap_header->info.badpages[i]);
3224 for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
3225 inc_cluster_info_page(si, cluster_info, i);
3226
3227 INIT_LIST_HEAD(&si->free_clusters);
3228 INIT_LIST_HEAD(&si->full_clusters);
3229 INIT_LIST_HEAD(&si->discard_clusters);
3230
3231 for (i = 0; i < SWAP_NR_ORDERS; i++) {
3232 INIT_LIST_HEAD(&si->nonfull_clusters[i]);
3233 INIT_LIST_HEAD(&si->frag_clusters[i]);
3234 atomic_long_set(&si->frag_cluster_nr[i], 0);
3235 }
3236
3237 /*
3238 * Reduce false cache line sharing between cluster_info and
3239 * sharing same address space.
3240 */
3241 for (j = 0; j < SWAP_CLUSTER_COLS; j++) {
3242 for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) {
3243 struct swap_cluster_info *ci;
3244 idx = i * SWAP_CLUSTER_COLS + j;
3245 ci = cluster_info + idx;
3246 if (idx >= nr_clusters)
3247 continue;
3248 if (ci->count) {
3249 ci->flags = CLUSTER_FLAG_NONFULL;
3250 list_add_tail(&ci->list, &si->nonfull_clusters[0]);
3251 continue;
3252 }
3253 ci->flags = CLUSTER_FLAG_FREE;
3254 list_add_tail(&ci->list, &si->free_clusters);
3255 }
3256 }
3257
3258 return cluster_info;
3259
3260 err_free:
3261 kvfree(cluster_info);
3262 err:
3263 return ERR_PTR(err);
3264 }
3265
SYSCALL_DEFINE2(swapon,const char __user *,specialfile,int,swap_flags)3266 SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
3267 {
3268 struct swap_info_struct *si;
3269 struct filename *name;
3270 struct file *swap_file = NULL;
3271 struct address_space *mapping;
3272 struct dentry *dentry;
3273 int prio;
3274 int error;
3275 union swap_header *swap_header;
3276 int nr_extents;
3277 sector_t span;
3278 unsigned long maxpages;
3279 unsigned char *swap_map = NULL;
3280 unsigned long *zeromap = NULL;
3281 struct swap_cluster_info *cluster_info = NULL;
3282 struct folio *folio = NULL;
3283 struct inode *inode = NULL;
3284 bool inced_nr_rotate_swap = false;
3285
3286 if (swap_flags & ~SWAP_FLAGS_VALID)
3287 return -EINVAL;
3288
3289 if (!capable(CAP_SYS_ADMIN))
3290 return -EPERM;
3291
3292 if (!swap_avail_heads)
3293 return -ENOMEM;
3294
3295 si = alloc_swap_info();
3296 if (IS_ERR(si))
3297 return PTR_ERR(si);
3298
3299 INIT_WORK(&si->discard_work, swap_discard_work);
3300 INIT_WORK(&si->reclaim_work, swap_reclaim_work);
3301
3302 name = getname(specialfile);
3303 if (IS_ERR(name)) {
3304 error = PTR_ERR(name);
3305 name = NULL;
3306 goto bad_swap;
3307 }
3308 swap_file = file_open_name(name, O_RDWR | O_LARGEFILE | O_EXCL, 0);
3309 if (IS_ERR(swap_file)) {
3310 error = PTR_ERR(swap_file);
3311 swap_file = NULL;
3312 goto bad_swap;
3313 }
3314
3315 si->swap_file = swap_file;
3316 mapping = swap_file->f_mapping;
3317 dentry = swap_file->f_path.dentry;
3318 inode = mapping->host;
3319
3320 error = claim_swapfile(si, inode);
3321 if (unlikely(error))
3322 goto bad_swap;
3323
3324 inode_lock(inode);
3325 if (d_unlinked(dentry) || cant_mount(dentry)) {
3326 error = -ENOENT;
3327 goto bad_swap_unlock_inode;
3328 }
3329 if (IS_SWAPFILE(inode)) {
3330 error = -EBUSY;
3331 goto bad_swap_unlock_inode;
3332 }
3333
3334 /*
3335 * The swap subsystem needs a major overhaul to support this.
3336 * It doesn't work yet so just disable it for now.
3337 */
3338 if (mapping_min_folio_order(mapping) > 0) {
3339 error = -EINVAL;
3340 goto bad_swap_unlock_inode;
3341 }
3342
3343 /*
3344 * Read the swap header.
3345 */
3346 if (!mapping->a_ops->read_folio) {
3347 error = -EINVAL;
3348 goto bad_swap_unlock_inode;
3349 }
3350 folio = read_mapping_folio(mapping, 0, swap_file);
3351 if (IS_ERR(folio)) {
3352 error = PTR_ERR(folio);
3353 goto bad_swap_unlock_inode;
3354 }
3355 swap_header = kmap_local_folio(folio, 0);
3356
3357 maxpages = read_swap_header(si, swap_header, inode);
3358 if (unlikely(!maxpages)) {
3359 error = -EINVAL;
3360 goto bad_swap_unlock_inode;
3361 }
3362
3363 /* OK, set up the swap map and apply the bad block list */
3364 swap_map = vzalloc(maxpages);
3365 if (!swap_map) {
3366 error = -ENOMEM;
3367 goto bad_swap_unlock_inode;
3368 }
3369
3370 error = swap_cgroup_swapon(si->type, maxpages);
3371 if (error)
3372 goto bad_swap_unlock_inode;
3373
3374 nr_extents = setup_swap_map_and_extents(si, swap_header, swap_map,
3375 maxpages, &span);
3376 if (unlikely(nr_extents < 0)) {
3377 error = nr_extents;
3378 goto bad_swap_unlock_inode;
3379 }
3380
3381 /*
3382 * Use kvmalloc_array instead of bitmap_zalloc as the allocation order might
3383 * be above MAX_PAGE_ORDER incase of a large swap file.
3384 */
3385 zeromap = kvmalloc_array(BITS_TO_LONGS(maxpages), sizeof(long),
3386 GFP_KERNEL | __GFP_ZERO);
3387 if (!zeromap) {
3388 error = -ENOMEM;
3389 goto bad_swap_unlock_inode;
3390 }
3391
3392 if (si->bdev && bdev_stable_writes(si->bdev))
3393 si->flags |= SWP_STABLE_WRITES;
3394
3395 if (si->bdev && bdev_synchronous(si->bdev))
3396 si->flags |= SWP_SYNCHRONOUS_IO;
3397
3398 if (si->bdev && bdev_nonrot(si->bdev)) {
3399 si->flags |= SWP_SOLIDSTATE;
3400 } else {
3401 atomic_inc(&nr_rotate_swap);
3402 inced_nr_rotate_swap = true;
3403 }
3404
3405 cluster_info = setup_clusters(si, swap_header, maxpages);
3406 if (IS_ERR(cluster_info)) {
3407 error = PTR_ERR(cluster_info);
3408 cluster_info = NULL;
3409 goto bad_swap_unlock_inode;
3410 }
3411
3412 if ((swap_flags & SWAP_FLAG_DISCARD) &&
3413 si->bdev && bdev_max_discard_sectors(si->bdev)) {
3414 /*
3415 * When discard is enabled for swap with no particular
3416 * policy flagged, we set all swap discard flags here in
3417 * order to sustain backward compatibility with older
3418 * swapon(8) releases.
3419 */
3420 si->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
3421 SWP_PAGE_DISCARD);
3422
3423 /*
3424 * By flagging sys_swapon, a sysadmin can tell us to
3425 * either do single-time area discards only, or to just
3426 * perform discards for released swap page-clusters.
3427 * Now it's time to adjust the p->flags accordingly.
3428 */
3429 if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
3430 si->flags &= ~SWP_PAGE_DISCARD;
3431 else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
3432 si->flags &= ~SWP_AREA_DISCARD;
3433
3434 /* issue a swapon-time discard if it's still required */
3435 if (si->flags & SWP_AREA_DISCARD) {
3436 int err = discard_swap(si);
3437 if (unlikely(err))
3438 pr_err("swapon: discard_swap(%p): %d\n",
3439 si, err);
3440 }
3441 }
3442
3443 error = init_swap_address_space(si->type, maxpages);
3444 if (error)
3445 goto bad_swap_unlock_inode;
3446
3447 error = zswap_swapon(si->type, maxpages);
3448 if (error)
3449 goto free_swap_address_space;
3450
3451 /*
3452 * Flush any pending IO and dirty mappings before we start using this
3453 * swap device.
3454 */
3455 inode->i_flags |= S_SWAPFILE;
3456 error = inode_drain_writes(inode);
3457 if (error) {
3458 inode->i_flags &= ~S_SWAPFILE;
3459 goto free_swap_zswap;
3460 }
3461
3462 mutex_lock(&swapon_mutex);
3463 prio = -1;
3464 if (swap_flags & SWAP_FLAG_PREFER)
3465 prio = swap_flags & SWAP_FLAG_PRIO_MASK;
3466 enable_swap_info(si, prio, swap_map, cluster_info, zeromap);
3467
3468 pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s\n",
3469 K(si->pages), name->name, si->prio, nr_extents,
3470 K((unsigned long long)span),
3471 (si->flags & SWP_SOLIDSTATE) ? "SS" : "",
3472 (si->flags & SWP_DISCARDABLE) ? "D" : "",
3473 (si->flags & SWP_AREA_DISCARD) ? "s" : "",
3474 (si->flags & SWP_PAGE_DISCARD) ? "c" : "");
3475
3476 mutex_unlock(&swapon_mutex);
3477 atomic_inc(&proc_poll_event);
3478 wake_up_interruptible(&proc_poll_wait);
3479
3480 error = 0;
3481 goto out;
3482 free_swap_zswap:
3483 zswap_swapoff(si->type);
3484 free_swap_address_space:
3485 exit_swap_address_space(si->type);
3486 bad_swap_unlock_inode:
3487 inode_unlock(inode);
3488 bad_swap:
3489 kfree(si->global_cluster);
3490 si->global_cluster = NULL;
3491 inode = NULL;
3492 destroy_swap_extents(si);
3493 swap_cgroup_swapoff(si->type);
3494 spin_lock(&swap_lock);
3495 si->swap_file = NULL;
3496 si->flags = 0;
3497 spin_unlock(&swap_lock);
3498 vfree(swap_map);
3499 kvfree(zeromap);
3500 kvfree(cluster_info);
3501 if (inced_nr_rotate_swap)
3502 atomic_dec(&nr_rotate_swap);
3503 if (swap_file)
3504 filp_close(swap_file, NULL);
3505 out:
3506 if (!IS_ERR_OR_NULL(folio))
3507 folio_release_kmap(folio, swap_header);
3508 if (name)
3509 putname(name);
3510 if (inode)
3511 inode_unlock(inode);
3512 return error;
3513 }
3514
si_swapinfo(struct sysinfo * val)3515 void si_swapinfo(struct sysinfo *val)
3516 {
3517 unsigned int type;
3518 unsigned long nr_to_be_unused = 0;
3519
3520 spin_lock(&swap_lock);
3521 for (type = 0; type < nr_swapfiles; type++) {
3522 struct swap_info_struct *si = swap_info[type];
3523
3524 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
3525 nr_to_be_unused += swap_usage_in_pages(si);
3526 }
3527 val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
3528 val->totalswap = total_swap_pages + nr_to_be_unused;
3529 spin_unlock(&swap_lock);
3530 }
3531
3532 /*
3533 * Verify that nr swap entries are valid and increment their swap map counts.
3534 *
3535 * Returns error code in following case.
3536 * - success -> 0
3537 * - swp_entry is invalid -> EINVAL
3538 * - swap-cache reference is requested but there is already one. -> EEXIST
3539 * - swap-cache reference is requested but the entry is not used. -> ENOENT
3540 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
3541 */
__swap_duplicate(swp_entry_t entry,unsigned char usage,int nr)3542 static int __swap_duplicate(swp_entry_t entry, unsigned char usage, int nr)
3543 {
3544 struct swap_info_struct *si;
3545 struct swap_cluster_info *ci;
3546 unsigned long offset;
3547 unsigned char count;
3548 unsigned char has_cache;
3549 int err, i;
3550
3551 si = swp_swap_info(entry);
3552 if (WARN_ON_ONCE(!si)) {
3553 pr_err("%s%08lx\n", Bad_file, entry.val);
3554 return -EINVAL;
3555 }
3556
3557 offset = swp_offset(entry);
3558 VM_WARN_ON(nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
3559 VM_WARN_ON(usage == 1 && nr > 1);
3560 ci = lock_cluster(si, offset);
3561
3562 err = 0;
3563 for (i = 0; i < nr; i++) {
3564 count = si->swap_map[offset + i];
3565
3566 /*
3567 * swapin_readahead() doesn't check if a swap entry is valid, so the
3568 * swap entry could be SWAP_MAP_BAD. Check here with lock held.
3569 */
3570 if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
3571 err = -ENOENT;
3572 goto unlock_out;
3573 }
3574
3575 has_cache = count & SWAP_HAS_CACHE;
3576 count &= ~SWAP_HAS_CACHE;
3577
3578 if (!count && !has_cache) {
3579 err = -ENOENT;
3580 } else if (usage == SWAP_HAS_CACHE) {
3581 if (has_cache)
3582 err = -EEXIST;
3583 } else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) {
3584 err = -EINVAL;
3585 }
3586
3587 if (err)
3588 goto unlock_out;
3589 }
3590
3591 for (i = 0; i < nr; i++) {
3592 count = si->swap_map[offset + i];
3593 has_cache = count & SWAP_HAS_CACHE;
3594 count &= ~SWAP_HAS_CACHE;
3595
3596 if (usage == SWAP_HAS_CACHE)
3597 has_cache = SWAP_HAS_CACHE;
3598 else if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
3599 count += usage;
3600 else if (swap_count_continued(si, offset + i, count))
3601 count = COUNT_CONTINUED;
3602 else {
3603 /*
3604 * Don't need to rollback changes, because if
3605 * usage == 1, there must be nr == 1.
3606 */
3607 err = -ENOMEM;
3608 goto unlock_out;
3609 }
3610
3611 WRITE_ONCE(si->swap_map[offset + i], count | has_cache);
3612 }
3613
3614 unlock_out:
3615 unlock_cluster(ci);
3616 return err;
3617 }
3618
3619 /*
3620 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
3621 * (in which case its reference count is never incremented).
3622 */
swap_shmem_alloc(swp_entry_t entry,int nr)3623 void swap_shmem_alloc(swp_entry_t entry, int nr)
3624 {
3625 __swap_duplicate(entry, SWAP_MAP_SHMEM, nr);
3626 }
3627
3628 /*
3629 * Increase reference count of swap entry by 1.
3630 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
3631 * but could not be atomically allocated. Returns 0, just as if it succeeded,
3632 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
3633 * might occur if a page table entry has got corrupted.
3634 */
swap_duplicate(swp_entry_t entry)3635 int swap_duplicate(swp_entry_t entry)
3636 {
3637 int err = 0;
3638
3639 while (!err && __swap_duplicate(entry, 1, 1) == -ENOMEM)
3640 err = add_swap_count_continuation(entry, GFP_ATOMIC);
3641 return err;
3642 }
3643
3644 /*
3645 * @entry: first swap entry from which we allocate nr swap cache.
3646 *
3647 * Called when allocating swap cache for existing swap entries,
3648 * This can return error codes. Returns 0 at success.
3649 * -EEXIST means there is a swap cache.
3650 * Note: return code is different from swap_duplicate().
3651 */
swapcache_prepare(swp_entry_t entry,int nr)3652 int swapcache_prepare(swp_entry_t entry, int nr)
3653 {
3654 return __swap_duplicate(entry, SWAP_HAS_CACHE, nr);
3655 }
3656
swapcache_clear(struct swap_info_struct * si,swp_entry_t entry,int nr)3657 void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr)
3658 {
3659 unsigned long offset = swp_offset(entry);
3660
3661 cluster_swap_free_nr(si, offset, nr, SWAP_HAS_CACHE);
3662 }
3663
swp_swap_info(swp_entry_t entry)3664 struct swap_info_struct *swp_swap_info(swp_entry_t entry)
3665 {
3666 return swap_type_to_swap_info(swp_type(entry));
3667 }
3668
3669 /*
3670 * out-of-line methods to avoid include hell.
3671 */
swapcache_mapping(struct folio * folio)3672 struct address_space *swapcache_mapping(struct folio *folio)
3673 {
3674 return swp_swap_info(folio->swap)->swap_file->f_mapping;
3675 }
3676 EXPORT_SYMBOL_GPL(swapcache_mapping);
3677
__folio_swap_cache_index(struct folio * folio)3678 pgoff_t __folio_swap_cache_index(struct folio *folio)
3679 {
3680 return swap_cache_index(folio->swap);
3681 }
3682 EXPORT_SYMBOL_GPL(__folio_swap_cache_index);
3683
3684 /*
3685 * add_swap_count_continuation - called when a swap count is duplicated
3686 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
3687 * page of the original vmalloc'ed swap_map, to hold the continuation count
3688 * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called
3689 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
3690 *
3691 * These continuation pages are seldom referenced: the common paths all work
3692 * on the original swap_map, only referring to a continuation page when the
3693 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
3694 *
3695 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
3696 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
3697 * can be called after dropping locks.
3698 */
add_swap_count_continuation(swp_entry_t entry,gfp_t gfp_mask)3699 int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
3700 {
3701 struct swap_info_struct *si;
3702 struct swap_cluster_info *ci;
3703 struct page *head;
3704 struct page *page;
3705 struct page *list_page;
3706 pgoff_t offset;
3707 unsigned char count;
3708 int ret = 0;
3709
3710 /*
3711 * When debugging, it's easier to use __GFP_ZERO here; but it's better
3712 * for latency not to zero a page while GFP_ATOMIC and holding locks.
3713 */
3714 page = alloc_page(gfp_mask | __GFP_HIGHMEM);
3715
3716 si = get_swap_device(entry);
3717 if (!si) {
3718 /*
3719 * An acceptable race has occurred since the failing
3720 * __swap_duplicate(): the swap device may be swapoff
3721 */
3722 goto outer;
3723 }
3724
3725 offset = swp_offset(entry);
3726
3727 ci = lock_cluster(si, offset);
3728
3729 count = swap_count(si->swap_map[offset]);
3730
3731 if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
3732 /*
3733 * The higher the swap count, the more likely it is that tasks
3734 * will race to add swap count continuation: we need to avoid
3735 * over-provisioning.
3736 */
3737 goto out;
3738 }
3739
3740 if (!page) {
3741 ret = -ENOMEM;
3742 goto out;
3743 }
3744
3745 head = vmalloc_to_page(si->swap_map + offset);
3746 offset &= ~PAGE_MASK;
3747
3748 spin_lock(&si->cont_lock);
3749 /*
3750 * Page allocation does not initialize the page's lru field,
3751 * but it does always reset its private field.
3752 */
3753 if (!page_private(head)) {
3754 BUG_ON(count & COUNT_CONTINUED);
3755 INIT_LIST_HEAD(&head->lru);
3756 set_page_private(head, SWP_CONTINUED);
3757 si->flags |= SWP_CONTINUED;
3758 }
3759
3760 list_for_each_entry(list_page, &head->lru, lru) {
3761 unsigned char *map;
3762
3763 /*
3764 * If the previous map said no continuation, but we've found
3765 * a continuation page, free our allocation and use this one.
3766 */
3767 if (!(count & COUNT_CONTINUED))
3768 goto out_unlock_cont;
3769
3770 map = kmap_local_page(list_page) + offset;
3771 count = *map;
3772 kunmap_local(map);
3773
3774 /*
3775 * If this continuation count now has some space in it,
3776 * free our allocation and use this one.
3777 */
3778 if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
3779 goto out_unlock_cont;
3780 }
3781
3782 list_add_tail(&page->lru, &head->lru);
3783 page = NULL; /* now it's attached, don't free it */
3784 out_unlock_cont:
3785 spin_unlock(&si->cont_lock);
3786 out:
3787 unlock_cluster(ci);
3788 put_swap_device(si);
3789 outer:
3790 if (page)
3791 __free_page(page);
3792 return ret;
3793 }
3794
3795 /*
3796 * swap_count_continued - when the original swap_map count is incremented
3797 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
3798 * into, carry if so, or else fail until a new continuation page is allocated;
3799 * when the original swap_map count is decremented from 0 with continuation,
3800 * borrow from the continuation and report whether it still holds more.
3801 * Called while __swap_duplicate() or caller of __swap_entry_free_locked()
3802 * holds cluster lock.
3803 */
swap_count_continued(struct swap_info_struct * si,pgoff_t offset,unsigned char count)3804 static bool swap_count_continued(struct swap_info_struct *si,
3805 pgoff_t offset, unsigned char count)
3806 {
3807 struct page *head;
3808 struct page *page;
3809 unsigned char *map;
3810 bool ret;
3811
3812 head = vmalloc_to_page(si->swap_map + offset);
3813 if (page_private(head) != SWP_CONTINUED) {
3814 BUG_ON(count & COUNT_CONTINUED);
3815 return false; /* need to add count continuation */
3816 }
3817
3818 spin_lock(&si->cont_lock);
3819 offset &= ~PAGE_MASK;
3820 page = list_next_entry(head, lru);
3821 map = kmap_local_page(page) + offset;
3822
3823 if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
3824 goto init_map; /* jump over SWAP_CONT_MAX checks */
3825
3826 if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
3827 /*
3828 * Think of how you add 1 to 999
3829 */
3830 while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
3831 kunmap_local(map);
3832 page = list_next_entry(page, lru);
3833 BUG_ON(page == head);
3834 map = kmap_local_page(page) + offset;
3835 }
3836 if (*map == SWAP_CONT_MAX) {
3837 kunmap_local(map);
3838 page = list_next_entry(page, lru);
3839 if (page == head) {
3840 ret = false; /* add count continuation */
3841 goto out;
3842 }
3843 map = kmap_local_page(page) + offset;
3844 init_map: *map = 0; /* we didn't zero the page */
3845 }
3846 *map += 1;
3847 kunmap_local(map);
3848 while ((page = list_prev_entry(page, lru)) != head) {
3849 map = kmap_local_page(page) + offset;
3850 *map = COUNT_CONTINUED;
3851 kunmap_local(map);
3852 }
3853 ret = true; /* incremented */
3854
3855 } else { /* decrementing */
3856 /*
3857 * Think of how you subtract 1 from 1000
3858 */
3859 BUG_ON(count != COUNT_CONTINUED);
3860 while (*map == COUNT_CONTINUED) {
3861 kunmap_local(map);
3862 page = list_next_entry(page, lru);
3863 BUG_ON(page == head);
3864 map = kmap_local_page(page) + offset;
3865 }
3866 BUG_ON(*map == 0);
3867 *map -= 1;
3868 if (*map == 0)
3869 count = 0;
3870 kunmap_local(map);
3871 while ((page = list_prev_entry(page, lru)) != head) {
3872 map = kmap_local_page(page) + offset;
3873 *map = SWAP_CONT_MAX | count;
3874 count = COUNT_CONTINUED;
3875 kunmap_local(map);
3876 }
3877 ret = count == COUNT_CONTINUED;
3878 }
3879 out:
3880 spin_unlock(&si->cont_lock);
3881 return ret;
3882 }
3883
3884 /*
3885 * free_swap_count_continuations - swapoff free all the continuation pages
3886 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
3887 */
free_swap_count_continuations(struct swap_info_struct * si)3888 static void free_swap_count_continuations(struct swap_info_struct *si)
3889 {
3890 pgoff_t offset;
3891
3892 for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
3893 struct page *head;
3894 head = vmalloc_to_page(si->swap_map + offset);
3895 if (page_private(head)) {
3896 struct page *page, *next;
3897
3898 list_for_each_entry_safe(page, next, &head->lru, lru) {
3899 list_del(&page->lru);
3900 __free_page(page);
3901 }
3902 }
3903 }
3904 }
3905
3906 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
__has_usable_swap(void)3907 static bool __has_usable_swap(void)
3908 {
3909 return !plist_head_empty(&swap_active_head);
3910 }
3911
__folio_throttle_swaprate(struct folio * folio,gfp_t gfp)3912 void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
3913 {
3914 struct swap_info_struct *si, *next;
3915 int nid = folio_nid(folio);
3916
3917 if (!(gfp & __GFP_IO))
3918 return;
3919
3920 if (!__has_usable_swap())
3921 return;
3922
3923 if (!blk_cgroup_congested())
3924 return;
3925
3926 /*
3927 * We've already scheduled a throttle, avoid taking the global swap
3928 * lock.
3929 */
3930 if (current->throttle_disk)
3931 return;
3932
3933 spin_lock(&swap_avail_lock);
3934 plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
3935 avail_lists[nid]) {
3936 if (si->bdev) {
3937 blkcg_schedule_throttle(si->bdev->bd_disk, true);
3938 break;
3939 }
3940 }
3941 spin_unlock(&swap_avail_lock);
3942 }
3943 #endif
3944
swapfile_init(void)3945 static int __init swapfile_init(void)
3946 {
3947 int nid;
3948
3949 swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head),
3950 GFP_KERNEL);
3951 if (!swap_avail_heads) {
3952 pr_emerg("Not enough memory for swap heads, swap is disabled\n");
3953 return -ENOMEM;
3954 }
3955
3956 for_each_node(nid)
3957 plist_head_init(&swap_avail_heads[nid]);
3958
3959 swapfile_maximum_size = arch_max_swapfile_size();
3960
3961 #ifdef CONFIG_MIGRATION
3962 if (swapfile_maximum_size >= (1UL << SWP_MIG_TOTAL_BITS))
3963 swap_migration_ad_supported = true;
3964 #endif /* CONFIG_MIGRATION */
3965
3966 return 0;
3967 }
3968 subsys_initcall(swapfile_init);
3969