Lines Matching +full:use +full:- +full:guard +full:- +full:pages
1 // SPDX-License-Identifier: GPL-2.0
9 * Redesigned the x86 32-bit VM architecture to deal with
10 * 64-bit physical space. With current x86 CPUs this
47 * since a TLB flush - it is usable.
49 * since the last TLB flush - so we can't use it.
50 * n means that there are (n-1) current users of it.
57 * addresses where physical memory pages are mapped by kmap.
116 unsigned long pages = 0; in __nr_free_highpages() local
121 pages += zone_page_state(zone, NR_FREE_PAGES); in __nr_free_highpages()
124 return pages; in __nr_free_highpages()
129 unsigned long pages = 0; in __totalhigh_pages() local
134 pages += zone_managed_pages(zone); in __totalhigh_pages()
137 return pages; in __totalhigh_pages()
147 * Most architectures have no use for kmap_high_get(), so let's abstract
168 struct kmap_ctrl *kctrl = ¤t->kmap_ctrl; in __kmap_to_page()
180 for (i = 0; i < kctrl->idx; i++) { in __kmap_to_page()
188 return pte_page(kctrl->pteval[i]); in __kmap_to_page()
209 * >1 means that it is still in use. Only in flush_all_zero_pkmaps()
222 * Don't need an atomic fetch-and-clear op here; in flush_all_zero_pkmaps()
223 * no-one has the page mapped, and cannot get at in flush_all_zero_pkmaps()
263 if (--count) in map_new_virtual()
285 /* Re-start */ in map_new_virtual()
300 * kmap_high - map a highmem page into memory
312 * For highmem pages, we can't trust "virtual" until in kmap_high()
328 * kmap_high_get - pin a highmem page into memory
353 * kunmap_high - unmap a highmem page into memory
378 switch (--pkmap_count[nr]) { in kunmap_high()
386 * The tasks queued in the wait-queue are guarded in kunmap_high()
387 * by both the lock in the wait-queue-head and by in kunmap_high()
389 * no need for the wait-queue-head's lock. Simply in kunmap_high()
397 /* do wake-up, if needed, race-free outside of the spin lock */ in kunmap_high()
419 start1 -= PAGE_SIZE; in zero_user_segments()
420 end1 -= PAGE_SIZE; in zero_user_segments()
426 memset(kaddr + start1, 0, this_end - start1); in zero_user_segments()
428 end1 -= this_end; in zero_user_segments()
433 start2 -= PAGE_SIZE; in zero_user_segments()
434 end2 -= PAGE_SIZE; in zero_user_segments()
441 memset(kaddr + start2, 0, this_end - start2); in zero_user_segments()
443 end2 -= this_end; in zero_user_segments()
467 * slot is unused which acts as a guard page
478 current->kmap_ctrl.idx += KM_INCR; in kmap_local_idx_push()
479 BUG_ON(current->kmap_ctrl.idx >= KM_MAX_IDX); in kmap_local_idx_push()
480 return current->kmap_ctrl.idx - 1; in kmap_local_idx_push()
485 return current->kmap_ctrl.idx - 1; in kmap_local_idx()
490 current->kmap_ctrl.idx -= KM_INCR; in kmap_local_idx_pop()
491 BUG_ON(current->kmap_ctrl.idx < 0); in kmap_local_idx_pop()
540 * Set by the arch if __kmap_pte[-idx] does not produce in kmap_get_pte()
546 return &__kmap_pte[-idx]; in kmap_get_pte()
568 current->kmap_ctrl.pteval[kmap_local_idx()] = pteval; in __kmap_local_pfn_prot()
581 * pages when debugging is enabled and the architecture has no problems in __kmap_local_page_prot()
628 current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0); in kunmap_local_indexed()
639 * nested kmap_local will use the next unused index and restore the index
652 for (i = 0; i < tsk->kmap_ctrl.idx; i++) { in __kmap_local_sched_out()
653 pte_t pteval = tsk->kmap_ctrl.pteval[i]; in __kmap_local_sched_out()
657 /* With debug all even slots are unmapped and act as guard */ in __kmap_local_sched_out()
689 for (i = 0; i < tsk->kmap_ctrl.idx; i++) { in __kmap_local_sched_in()
690 pte_t pteval = tsk->kmap_ctrl.pteval[i]; in __kmap_local_sched_in()
694 /* With debug all even slots are unmapped and act as guard */ in __kmap_local_sched_in()
713 if (WARN_ON_ONCE(tsk->kmap_ctrl.idx)) in kmap_local_fork()
714 memset(&tsk->kmap_ctrl, 0, sizeof(tsk->kmap_ctrl)); in kmap_local_fork()
724 * Describes one page->virtual association
748 * page_address - get the mapped virtual address of a page
764 spin_lock_irqsave(&pas->lock, flags); in page_address()
765 if (!list_empty(&pas->lh)) { in page_address()
768 list_for_each_entry(pam, &pas->lh, list) { in page_address()
769 if (pam->page == page) { in page_address()
770 ret = pam->virtual; in page_address()
776 spin_unlock_irqrestore(&pas->lock, flags); in page_address()
782 * set_page_address - set a page's virtual address
784 * @virtual: virtual address to use
797 pam->page = page; in set_page_address()
798 pam->virtual = virtual; in set_page_address()
800 spin_lock_irqsave(&pas->lock, flags); in set_page_address()
801 list_add_tail(&pam->list, &pas->lh); in set_page_address()
802 spin_unlock_irqrestore(&pas->lock, flags); in set_page_address()
804 spin_lock_irqsave(&pas->lock, flags); in set_page_address()
805 list_for_each_entry(pam, &pas->lh, list) { in set_page_address()
806 if (pam->page == page) { in set_page_address()
807 list_del(&pam->list); in set_page_address()
811 spin_unlock_irqrestore(&pas->lock, flags); in set_page_address()