1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * High memory handling common code and variables.
4 *
5 * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
6 * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de
7 *
8 *
9 * Redesigned the x86 32-bit VM architecture to deal with
10 * 64-bit physical space. With current x86 CPUs this
11 * means up to 64 Gigabytes physical RAM.
12 *
13 * Rewrote high memory support to move the page cache into
14 * high memory. Implemented permanent (schedulable) kmaps
15 * based on Linus' idea.
16 *
17 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
18 */
19
20 #include <linux/mm.h>
21 #include <linux/export.h>
22 #include <linux/swap.h>
23 #include <linux/bio.h>
24 #include <linux/pagemap.h>
25 #include <linux/mempool.h>
26 #include <linux/init.h>
27 #include <linux/hash.h>
28 #include <linux/highmem.h>
29 #include <linux/kgdb.h>
30 #include <asm/tlbflush.h>
31 #include <linux/vmalloc.h>
32
33 #ifdef CONFIG_KMAP_LOCAL
kmap_local_calc_idx(int idx)34 static inline int kmap_local_calc_idx(int idx)
35 {
36 return idx + KM_MAX_IDX * smp_processor_id();
37 }
38
39 #ifndef arch_kmap_local_map_idx
40 #define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx)
41 #endif
42 #endif /* CONFIG_KMAP_LOCAL */
43
44 /*
45 * Virtual_count is not a pure "count".
46 * 0 means that it is not mapped, and has not been mapped
47 * since a TLB flush - it is usable.
48 * 1 means that there are no users, but it has been mapped
49 * since the last TLB flush - so we can't use it.
50 * n means that there are (n-1) current users of it.
51 */
52 #ifdef CONFIG_HIGHMEM
53
54 /*
55 * Architecture with aliasing data cache may define the following family of
56 * helper functions in its asm/highmem.h to control cache color of virtual
57 * addresses where physical memory pages are mapped by kmap.
58 */
59 #ifndef get_pkmap_color
60
61 /*
62 * Determine color of virtual address where the page should be mapped.
63 */
get_pkmap_color(const struct page * page)64 static inline unsigned int get_pkmap_color(const struct page *page)
65 {
66 return 0;
67 }
68 #define get_pkmap_color get_pkmap_color
69
70 /*
71 * Get next index for mapping inside PKMAP region for page with given color.
72 */
get_next_pkmap_nr(unsigned int color)73 static inline unsigned int get_next_pkmap_nr(unsigned int color)
74 {
75 static unsigned int last_pkmap_nr;
76
77 last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
78 return last_pkmap_nr;
79 }
80
81 /*
82 * Determine if page index inside PKMAP region (pkmap_nr) of given color
83 * has wrapped around PKMAP region end. When this happens an attempt to
84 * flush all unused PKMAP slots is made.
85 */
no_more_pkmaps(unsigned int pkmap_nr,unsigned int color)86 static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color)
87 {
88 return pkmap_nr == 0;
89 }
90
91 /*
92 * Get the number of PKMAP entries of the given color. If no free slot is
93 * found after checking that many entries, kmap will sleep waiting for
94 * someone to call kunmap and free PKMAP slot.
95 */
get_pkmap_entries_count(unsigned int color)96 static inline int get_pkmap_entries_count(unsigned int color)
97 {
98 return LAST_PKMAP;
99 }
100
101 /*
102 * Get head of a wait queue for PKMAP entries of the given color.
103 * Wait queues for different mapping colors should be independent to avoid
104 * unnecessary wakeups caused by freeing of slots of other colors.
105 */
get_pkmap_wait_queue_head(unsigned int color)106 static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
107 {
108 static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
109
110 return &pkmap_map_wait;
111 }
112 #endif
113
__nr_free_highpages(void)114 unsigned long __nr_free_highpages(void)
115 {
116 unsigned long pages = 0;
117 struct zone *zone;
118
119 for_each_populated_zone(zone) {
120 if (is_highmem(zone))
121 pages += zone_page_state(zone, NR_FREE_PAGES);
122 }
123
124 return pages;
125 }
126
__totalhigh_pages(void)127 unsigned long __totalhigh_pages(void)
128 {
129 unsigned long pages = 0;
130 struct zone *zone;
131
132 for_each_populated_zone(zone) {
133 if (is_highmem(zone))
134 pages += zone_managed_pages(zone);
135 }
136
137 return pages;
138 }
139 EXPORT_SYMBOL(__totalhigh_pages);
140
141 static int pkmap_count[LAST_PKMAP];
142 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
143
144 pte_t *pkmap_page_table;
145
146 /*
147 * Most architectures have no use for kmap_high_get(), so let's abstract
148 * the disabling of IRQ out of the locking in that case to save on a
149 * potential useless overhead.
150 */
151 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
152 #define lock_kmap() spin_lock_irq(&kmap_lock)
153 #define unlock_kmap() spin_unlock_irq(&kmap_lock)
154 #define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags)
155 #define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags)
156 #else
157 #define lock_kmap() spin_lock(&kmap_lock)
158 #define unlock_kmap() spin_unlock(&kmap_lock)
159 #define lock_kmap_any(flags) \
160 do { spin_lock(&kmap_lock); (void)(flags); } while (0)
161 #define unlock_kmap_any(flags) \
162 do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
163 #endif
164
__kmap_to_page(void * vaddr)165 struct page *__kmap_to_page(void *vaddr)
166 {
167 unsigned long base = (unsigned long) vaddr & PAGE_MASK;
168 struct kmap_ctrl *kctrl = ¤t->kmap_ctrl;
169 unsigned long addr = (unsigned long)vaddr;
170 int i;
171
172 /* kmap() mappings */
173 if (WARN_ON_ONCE(addr >= PKMAP_ADDR(0) &&
174 addr < PKMAP_ADDR(LAST_PKMAP)))
175 return pte_page(ptep_get(&pkmap_page_table[PKMAP_NR(addr)]));
176
177 /* kmap_local_page() mappings */
178 if (WARN_ON_ONCE(base >= __fix_to_virt(FIX_KMAP_END) &&
179 base < __fix_to_virt(FIX_KMAP_BEGIN))) {
180 for (i = 0; i < kctrl->idx; i++) {
181 unsigned long base_addr;
182 int idx;
183 pte_t pteval = kctrl->pteval[i];
184
185 idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
186 base_addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
187
188 if (base_addr == base)
189 return pte_page(pteval);
190 }
191 }
192
193 return virt_to_page(vaddr);
194 }
195 EXPORT_SYMBOL(__kmap_to_page);
196
flush_all_zero_pkmaps(void)197 static void flush_all_zero_pkmaps(void)
198 {
199 int i;
200 int need_flush = 0;
201
202 flush_cache_kmaps();
203
204 for (i = 0; i < LAST_PKMAP; i++) {
205 struct page *page;
206 pte_t ptent;
207
208 /*
209 * zero means we don't have anything to do,
210 * >1 means that it is still in use. Only
211 * a count of 1 means that it is free but
212 * needs to be unmapped
213 */
214 if (pkmap_count[i] != 1)
215 continue;
216 pkmap_count[i] = 0;
217
218 /* sanity check */
219 ptent = ptep_get(&pkmap_page_table[i]);
220 BUG_ON(pte_none(ptent));
221
222 /*
223 * Don't need an atomic fetch-and-clear op here;
224 * no-one has the page mapped, and cannot get at
225 * its virtual address (and hence PTE) without first
226 * getting the kmap_lock (which is held here).
227 * So no dangers, even with speculative execution.
228 */
229 page = pte_page(ptent);
230 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
231
232 set_page_address(page, NULL);
233 need_flush = 1;
234 }
235 if (need_flush)
236 flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
237 }
238
__kmap_flush_unused(void)239 void __kmap_flush_unused(void)
240 {
241 lock_kmap();
242 flush_all_zero_pkmaps();
243 unlock_kmap();
244 }
245
map_new_virtual(struct page * page)246 static inline unsigned long map_new_virtual(struct page *page)
247 {
248 unsigned long vaddr;
249 int count;
250 unsigned int last_pkmap_nr;
251 unsigned int color = get_pkmap_color(page);
252
253 start:
254 count = get_pkmap_entries_count(color);
255 /* Find an empty entry */
256 for (;;) {
257 last_pkmap_nr = get_next_pkmap_nr(color);
258 if (no_more_pkmaps(last_pkmap_nr, color)) {
259 flush_all_zero_pkmaps();
260 count = get_pkmap_entries_count(color);
261 }
262 if (!pkmap_count[last_pkmap_nr])
263 break; /* Found a usable entry */
264 if (--count)
265 continue;
266
267 /*
268 * Sleep for somebody else to unmap their entries
269 */
270 {
271 DECLARE_WAITQUEUE(wait, current);
272 wait_queue_head_t *pkmap_map_wait =
273 get_pkmap_wait_queue_head(color);
274
275 __set_current_state(TASK_UNINTERRUPTIBLE);
276 add_wait_queue(pkmap_map_wait, &wait);
277 unlock_kmap();
278 schedule();
279 remove_wait_queue(pkmap_map_wait, &wait);
280 lock_kmap();
281
282 /* Somebody else might have mapped it while we slept */
283 if (page_address(page))
284 return (unsigned long)page_address(page);
285
286 /* Re-start */
287 goto start;
288 }
289 }
290 vaddr = PKMAP_ADDR(last_pkmap_nr);
291 set_pte_at(&init_mm, vaddr,
292 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
293
294 pkmap_count[last_pkmap_nr] = 1;
295 set_page_address(page, (void *)vaddr);
296
297 return vaddr;
298 }
299
300 /**
301 * kmap_high - map a highmem page into memory
302 * @page: &struct page to map
303 *
304 * Returns the page's virtual memory address.
305 *
306 * We cannot call this from interrupts, as it may block.
307 */
kmap_high(struct page * page)308 void *kmap_high(struct page *page)
309 {
310 unsigned long vaddr;
311
312 /*
313 * For highmem pages, we can't trust "virtual" until
314 * after we have the lock.
315 */
316 lock_kmap();
317 vaddr = (unsigned long)page_address(page);
318 if (!vaddr)
319 vaddr = map_new_virtual(page);
320 pkmap_count[PKMAP_NR(vaddr)]++;
321 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
322 unlock_kmap();
323 return (void *) vaddr;
324 }
325 EXPORT_SYMBOL(kmap_high);
326
327 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
328 /**
329 * kmap_high_get - pin a highmem page into memory
330 * @page: &struct page to pin
331 *
332 * Returns the page's current virtual memory address, or NULL if no mapping
333 * exists. If and only if a non null address is returned then a
334 * matching call to kunmap_high() is necessary.
335 *
336 * This can be called from any context.
337 */
kmap_high_get(const struct page * page)338 void *kmap_high_get(const struct page *page)
339 {
340 unsigned long vaddr, flags;
341
342 lock_kmap_any(flags);
343 vaddr = (unsigned long)page_address(page);
344 if (vaddr) {
345 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
346 pkmap_count[PKMAP_NR(vaddr)]++;
347 }
348 unlock_kmap_any(flags);
349 return (void *) vaddr;
350 }
351 #endif
352
353 /**
354 * kunmap_high - unmap a highmem page into memory
355 * @page: &struct page to unmap
356 *
357 * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
358 * only from user context.
359 */
kunmap_high(const struct page * page)360 void kunmap_high(const struct page *page)
361 {
362 unsigned long vaddr;
363 unsigned long nr;
364 unsigned long flags;
365 int need_wakeup;
366 unsigned int color = get_pkmap_color(page);
367 wait_queue_head_t *pkmap_map_wait;
368
369 lock_kmap_any(flags);
370 vaddr = (unsigned long)page_address(page);
371 BUG_ON(!vaddr);
372 nr = PKMAP_NR(vaddr);
373
374 /*
375 * A count must never go down to zero
376 * without a TLB flush!
377 */
378 need_wakeup = 0;
379 switch (--pkmap_count[nr]) {
380 case 0:
381 BUG();
382 case 1:
383 /*
384 * Avoid an unnecessary wake_up() function call.
385 * The common case is pkmap_count[] == 1, but
386 * no waiters.
387 * The tasks queued in the wait-queue are guarded
388 * by both the lock in the wait-queue-head and by
389 * the kmap_lock. As the kmap_lock is held here,
390 * no need for the wait-queue-head's lock. Simply
391 * test if the queue is empty.
392 */
393 pkmap_map_wait = get_pkmap_wait_queue_head(color);
394 need_wakeup = waitqueue_active(pkmap_map_wait);
395 }
396 unlock_kmap_any(flags);
397
398 /* do wake-up, if needed, race-free outside of the spin lock */
399 if (need_wakeup)
400 wake_up(pkmap_map_wait);
401 }
402 EXPORT_SYMBOL(kunmap_high);
403
zero_user_segments(struct page * page,unsigned start1,unsigned end1,unsigned start2,unsigned end2)404 void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
405 unsigned start2, unsigned end2)
406 {
407 unsigned int i;
408
409 BUG_ON(end1 > page_size(page) || end2 > page_size(page));
410
411 if (start1 >= end1)
412 start1 = end1 = 0;
413 if (start2 >= end2)
414 start2 = end2 = 0;
415
416 for (i = 0; i < compound_nr(page); i++) {
417 void *kaddr = NULL;
418
419 if (start1 >= PAGE_SIZE) {
420 start1 -= PAGE_SIZE;
421 end1 -= PAGE_SIZE;
422 } else {
423 unsigned this_end = min_t(unsigned, end1, PAGE_SIZE);
424
425 if (end1 > start1) {
426 kaddr = kmap_local_page(page + i);
427 memset(kaddr + start1, 0, this_end - start1);
428 }
429 end1 -= this_end;
430 start1 = 0;
431 }
432
433 if (start2 >= PAGE_SIZE) {
434 start2 -= PAGE_SIZE;
435 end2 -= PAGE_SIZE;
436 } else {
437 unsigned this_end = min_t(unsigned, end2, PAGE_SIZE);
438
439 if (end2 > start2) {
440 if (!kaddr)
441 kaddr = kmap_local_page(page + i);
442 memset(kaddr + start2, 0, this_end - start2);
443 }
444 end2 -= this_end;
445 start2 = 0;
446 }
447
448 if (kaddr) {
449 kunmap_local(kaddr);
450 flush_dcache_page(page + i);
451 }
452
453 if (!end1 && !end2)
454 break;
455 }
456
457 BUG_ON((start1 | start2 | end1 | end2) != 0);
458 }
459 EXPORT_SYMBOL(zero_user_segments);
460 #endif /* CONFIG_HIGHMEM */
461
462 #ifdef CONFIG_KMAP_LOCAL
463
464 #include <asm/kmap_size.h>
465
466 /*
467 * With DEBUG_KMAP_LOCAL the stack depth is doubled and every second
468 * slot is unused which acts as a guard page
469 */
470 #ifdef CONFIG_DEBUG_KMAP_LOCAL
471 # define KM_INCR 2
472 #else
473 # define KM_INCR 1
474 #endif
475
kmap_local_idx_push(void)476 static inline int kmap_local_idx_push(void)
477 {
478 WARN_ON_ONCE(in_hardirq() && !irqs_disabled());
479 current->kmap_ctrl.idx += KM_INCR;
480 BUG_ON(current->kmap_ctrl.idx >= KM_MAX_IDX);
481 return current->kmap_ctrl.idx - 1;
482 }
483
kmap_local_idx(void)484 static inline int kmap_local_idx(void)
485 {
486 return current->kmap_ctrl.idx - 1;
487 }
488
kmap_local_idx_pop(void)489 static inline void kmap_local_idx_pop(void)
490 {
491 current->kmap_ctrl.idx -= KM_INCR;
492 BUG_ON(current->kmap_ctrl.idx < 0);
493 }
494
495 #ifndef arch_kmap_local_post_map
496 # define arch_kmap_local_post_map(vaddr, pteval) do { } while (0)
497 #endif
498
499 #ifndef arch_kmap_local_pre_unmap
500 # define arch_kmap_local_pre_unmap(vaddr) do { } while (0)
501 #endif
502
503 #ifndef arch_kmap_local_post_unmap
504 # define arch_kmap_local_post_unmap(vaddr) do { } while (0)
505 #endif
506
507 #ifndef arch_kmap_local_unmap_idx
508 #define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx)
509 #endif
510
511 #ifndef arch_kmap_local_high_get
arch_kmap_local_high_get(const struct page * page)512 static inline void *arch_kmap_local_high_get(const struct page *page)
513 {
514 return NULL;
515 }
516 #endif
517
518 #ifndef arch_kmap_local_set_pte
519 #define arch_kmap_local_set_pte(mm, vaddr, ptep, ptev) \
520 set_pte_at(mm, vaddr, ptep, ptev)
521 #endif
522
523 /* Unmap a local mapping which was obtained by kmap_high_get() */
kmap_high_unmap_local(unsigned long vaddr)524 static inline bool kmap_high_unmap_local(unsigned long vaddr)
525 {
526 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
527 if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
528 kunmap_high(pte_page(ptep_get(&pkmap_page_table[PKMAP_NR(vaddr)])));
529 return true;
530 }
531 #endif
532 return false;
533 }
534
535 static pte_t *__kmap_pte;
536
kmap_get_pte(unsigned long vaddr,int idx)537 static pte_t *kmap_get_pte(unsigned long vaddr, int idx)
538 {
539 if (IS_ENABLED(CONFIG_KMAP_LOCAL_NON_LINEAR_PTE_ARRAY))
540 /*
541 * Set by the arch if __kmap_pte[-idx] does not produce
542 * the correct entry.
543 */
544 return virt_to_kpte(vaddr);
545 if (!__kmap_pte)
546 __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
547 return &__kmap_pte[-idx];
548 }
549
__kmap_local_pfn_prot(unsigned long pfn,pgprot_t prot)550 void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
551 {
552 pte_t pteval, *kmap_pte;
553 unsigned long vaddr;
554 int idx;
555
556 /*
557 * Disable migration so resulting virtual address is stable
558 * across preemption.
559 */
560 migrate_disable();
561 preempt_disable();
562 idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
563 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
564 kmap_pte = kmap_get_pte(vaddr, idx);
565 BUG_ON(!pte_none(ptep_get(kmap_pte)));
566 pteval = pfn_pte(pfn, prot);
567 arch_kmap_local_set_pte(&init_mm, vaddr, kmap_pte, pteval);
568 arch_kmap_local_post_map(vaddr, pteval);
569 current->kmap_ctrl.pteval[kmap_local_idx()] = pteval;
570 preempt_enable();
571
572 return (void *)vaddr;
573 }
574 EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot);
575
__kmap_local_page_prot(const struct page * page,pgprot_t prot)576 void *__kmap_local_page_prot(const struct page *page, pgprot_t prot)
577 {
578 void *kmap;
579
580 /*
581 * To broaden the usage of the actual kmap_local() machinery always map
582 * pages when debugging is enabled and the architecture has no problems
583 * with alias mappings.
584 */
585 if (!IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP) && !PageHighMem(page))
586 return page_address(page);
587
588 /* Try kmap_high_get() if architecture has it enabled */
589 kmap = arch_kmap_local_high_get(page);
590 if (kmap)
591 return kmap;
592
593 return __kmap_local_pfn_prot(page_to_pfn(page), prot);
594 }
595 EXPORT_SYMBOL(__kmap_local_page_prot);
596
kunmap_local_indexed(const void * vaddr)597 void kunmap_local_indexed(const void *vaddr)
598 {
599 unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
600 pte_t *kmap_pte;
601 int idx;
602
603 if (addr < __fix_to_virt(FIX_KMAP_END) ||
604 addr > __fix_to_virt(FIX_KMAP_BEGIN)) {
605 if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL_FORCE_MAP)) {
606 /* This _should_ never happen! See above. */
607 WARN_ON_ONCE(1);
608 return;
609 }
610 /*
611 * Handle mappings which were obtained by kmap_high_get()
612 * first as the virtual address of such mappings is below
613 * PAGE_OFFSET. Warn for all other addresses which are in
614 * the user space part of the virtual address space.
615 */
616 if (!kmap_high_unmap_local(addr))
617 WARN_ON_ONCE(addr < PAGE_OFFSET);
618 return;
619 }
620
621 preempt_disable();
622 idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
623 WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
624
625 kmap_pte = kmap_get_pte(addr, idx);
626 arch_kmap_local_pre_unmap(addr);
627 pte_clear(&init_mm, addr, kmap_pte);
628 arch_kmap_local_post_unmap(addr);
629 current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0);
630 kmap_local_idx_pop();
631 preempt_enable();
632 migrate_enable();
633 }
634 EXPORT_SYMBOL(kunmap_local_indexed);
635
636 /*
637 * Invoked before switch_to(). This is safe even when during or after
638 * clearing the maps an interrupt which needs a kmap_local happens because
639 * the task::kmap_ctrl.idx is not modified by the unmapping code so a
640 * nested kmap_local will use the next unused index and restore the index
641 * on unmap. The already cleared kmaps of the outgoing task are irrelevant
642 * because the interrupt context does not know about them. The same applies
643 * when scheduling back in for an interrupt which happens before the
644 * restore is complete.
645 */
__kmap_local_sched_out(void)646 void __kmap_local_sched_out(void)
647 {
648 struct task_struct *tsk = current;
649 pte_t *kmap_pte;
650 int i;
651
652 /* Clear kmaps */
653 for (i = 0; i < tsk->kmap_ctrl.idx; i++) {
654 pte_t pteval = tsk->kmap_ctrl.pteval[i];
655 unsigned long addr;
656 int idx;
657
658 /* With debug all even slots are unmapped and act as guard */
659 if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
660 WARN_ON_ONCE(pte_val(pteval) != 0);
661 continue;
662 }
663 if (WARN_ON_ONCE(pte_none(pteval)))
664 continue;
665
666 /*
667 * This is a horrible hack for XTENSA to calculate the
668 * coloured PTE index. Uses the PFN encoded into the pteval
669 * and the map index calculation because the actual mapped
670 * virtual address is not stored in task::kmap_ctrl.
671 * For any sane architecture this is optimized out.
672 */
673 idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
674
675 addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
676 kmap_pte = kmap_get_pte(addr, idx);
677 arch_kmap_local_pre_unmap(addr);
678 pte_clear(&init_mm, addr, kmap_pte);
679 arch_kmap_local_post_unmap(addr);
680 }
681 }
682
__kmap_local_sched_in(void)683 void __kmap_local_sched_in(void)
684 {
685 struct task_struct *tsk = current;
686 pte_t *kmap_pte;
687 int i;
688
689 /* Restore kmaps */
690 for (i = 0; i < tsk->kmap_ctrl.idx; i++) {
691 pte_t pteval = tsk->kmap_ctrl.pteval[i];
692 unsigned long addr;
693 int idx;
694
695 /* With debug all even slots are unmapped and act as guard */
696 if (IS_ENABLED(CONFIG_DEBUG_KMAP_LOCAL) && !(i & 0x01)) {
697 WARN_ON_ONCE(pte_val(pteval) != 0);
698 continue;
699 }
700 if (WARN_ON_ONCE(pte_none(pteval)))
701 continue;
702
703 /* See comment in __kmap_local_sched_out() */
704 idx = arch_kmap_local_map_idx(i, pte_pfn(pteval));
705 addr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
706 kmap_pte = kmap_get_pte(addr, idx);
707 set_pte_at(&init_mm, addr, kmap_pte, pteval);
708 arch_kmap_local_post_map(addr, pteval);
709 }
710 }
711
kmap_local_fork(struct task_struct * tsk)712 void kmap_local_fork(struct task_struct *tsk)
713 {
714 if (WARN_ON_ONCE(tsk->kmap_ctrl.idx))
715 memset(&tsk->kmap_ctrl, 0, sizeof(tsk->kmap_ctrl));
716 }
717
718 #endif
719
720 #if defined(HASHED_PAGE_VIRTUAL)
721
722 #define PA_HASH_ORDER 7
723
724 /*
725 * Describes one page->virtual association
726 */
727 struct page_address_map {
728 struct page *page;
729 void *virtual;
730 struct list_head list;
731 };
732
733 static struct page_address_map page_address_maps[LAST_PKMAP];
734
735 /*
736 * Hash table bucket
737 */
738 static struct page_address_slot {
739 struct list_head lh; /* List of page_address_maps */
740 spinlock_t lock; /* Protect this bucket's list */
741 } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER];
742
page_slot(const struct page * page)743 static struct page_address_slot *page_slot(const struct page *page)
744 {
745 return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
746 }
747
748 /**
749 * page_address - get the mapped virtual address of a page
750 * @page: &struct page to get the virtual address of
751 *
752 * Returns the page's virtual address.
753 */
page_address(const struct page * page)754 void *page_address(const struct page *page)
755 {
756 unsigned long flags;
757 void *ret;
758 struct page_address_slot *pas;
759
760 if (!PageHighMem(page))
761 return lowmem_page_address(page);
762
763 pas = page_slot(page);
764 ret = NULL;
765 spin_lock_irqsave(&pas->lock, flags);
766 if (!list_empty(&pas->lh)) {
767 struct page_address_map *pam;
768
769 list_for_each_entry(pam, &pas->lh, list) {
770 if (pam->page == page) {
771 ret = pam->virtual;
772 break;
773 }
774 }
775 }
776
777 spin_unlock_irqrestore(&pas->lock, flags);
778 return ret;
779 }
780 EXPORT_SYMBOL(page_address);
781
782 /**
783 * set_page_address - set a page's virtual address
784 * @page: &struct page to set
785 * @virtual: virtual address to use
786 */
set_page_address(struct page * page,void * virtual)787 void set_page_address(struct page *page, void *virtual)
788 {
789 unsigned long flags;
790 struct page_address_slot *pas;
791 struct page_address_map *pam;
792
793 BUG_ON(!PageHighMem(page));
794
795 pas = page_slot(page);
796 if (virtual) { /* Add */
797 pam = &page_address_maps[PKMAP_NR((unsigned long)virtual)];
798 pam->page = page;
799 pam->virtual = virtual;
800
801 spin_lock_irqsave(&pas->lock, flags);
802 list_add_tail(&pam->list, &pas->lh);
803 spin_unlock_irqrestore(&pas->lock, flags);
804 } else { /* Remove */
805 spin_lock_irqsave(&pas->lock, flags);
806 list_for_each_entry(pam, &pas->lh, list) {
807 if (pam->page == page) {
808 list_del(&pam->list);
809 break;
810 }
811 }
812 spin_unlock_irqrestore(&pas->lock, flags);
813 }
814 }
815
page_address_init(void)816 void __init page_address_init(void)
817 {
818 int i;
819
820 for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) {
821 INIT_LIST_HEAD(&page_address_htable[i].lh);
822 spin_lock_init(&page_address_htable[i].lock);
823 }
824 }
825
826 #endif /* defined(HASHED_PAGE_VIRTUAL) */
827