Lines Matching +full:use +full:- +full:guard +full:- +full:pages

1 // SPDX-License-Identifier: GPL-2.0
9 * Redesigned the x86 32-bit VM architecture to deal with
10 * 64-bit physical space. With current x86 CPUs this
47 * since a TLB flush - it is usable.
49 * since the last TLB flush - so we can't use it.
50 * n means that there are (n-1) current users of it.
57 * addresses where physical memory pages are mapped by kmap.
120 unsigned int pages = 0; in __nr_free_highpages() local
124 pages += zone_page_state(zone, NR_FREE_PAGES); in __nr_free_highpages()
127 return pages; in __nr_free_highpages()
136 * Most architectures have no use for kmap_high_get(), so let's abstract
157 struct kmap_ctrl *kctrl = &current->kmap_ctrl; in __kmap_to_page()
169 for (i = 0; i < kctrl->idx; i++) { in __kmap_to_page()
177 return pte_page(kctrl->pteval[i]); in __kmap_to_page()
198 * >1 means that it is still in use. Only in flush_all_zero_pkmaps()
211 * Don't need an atomic fetch-and-clear op here; in flush_all_zero_pkmaps()
212 * no-one has the page mapped, and cannot get at in flush_all_zero_pkmaps()
252 if (--count) in map_new_virtual()
274 /* Re-start */ in map_new_virtual()
289 * kmap_high - map a highmem page into memory
301 * For highmem pages, we can't trust "virtual" until in kmap_high()
317 * kmap_high_get - pin a highmem page into memory
342 * kunmap_high - unmap a highmem page into memory
367 switch (--pkmap_count[nr]) { in kunmap_high()
375 * The tasks queued in the wait-queue are guarded in kunmap_high()
376 * by both the lock in the wait-queue-head and by in kunmap_high()
378 * no need for the wait-queue-head's lock. Simply in kunmap_high()
386 /* do wake-up, if needed, race-free outside of the spin lock */ in kunmap_high()
408 start1 -= PAGE_SIZE; in zero_user_segments()
409 end1 -= PAGE_SIZE; in zero_user_segments()
415 memset(kaddr + start1, 0, this_end - start1); in zero_user_segments()
417 end1 -= this_end; in zero_user_segments()
422 start2 -= PAGE_SIZE; in zero_user_segments()
423 end2 -= PAGE_SIZE; in zero_user_segments()
430 memset(kaddr + start2, 0, this_end - start2); in zero_user_segments()
432 end2 -= this_end; in zero_user_segments()
456 * slot is unused which acts as a guard page
467 current->kmap_ctrl.idx += KM_INCR; in kmap_local_idx_push()
468 BUG_ON(current->kmap_ctrl.idx >= KM_MAX_IDX); in kmap_local_idx_push()
469 return current->kmap_ctrl.idx - 1; in kmap_local_idx_push()
474 return current->kmap_ctrl.idx - 1; in kmap_local_idx()
479 current->kmap_ctrl.idx -= KM_INCR; in kmap_local_idx_pop()
480 BUG_ON(current->kmap_ctrl.idx < 0); in kmap_local_idx_pop()
529 * Set by the arch if __kmap_pte[-idx] does not produce in kmap_get_pte()
535 return &__kmap_pte[-idx]; in kmap_get_pte()
557 current->kmap_ctrl.pteval[kmap_local_idx()] = pteval; in __kmap_local_pfn_prot()
570 * pages when debugging is enabled and the architecture has no problems in __kmap_local_page_prot()
617 current->kmap_ctrl.pteval[kmap_local_idx()] = __pte(0); in kunmap_local_indexed()
628 * nested kmap_local will use the next unused index and restore the index
641 for (i = 0; i < tsk->kmap_ctrl.idx; i++) { in __kmap_local_sched_out()
642 pte_t pteval = tsk->kmap_ctrl.pteval[i]; in __kmap_local_sched_out()
646 /* With debug all even slots are unmapped and act as guard */ in __kmap_local_sched_out()
678 for (i = 0; i < tsk->kmap_ctrl.idx; i++) { in __kmap_local_sched_in()
679 pte_t pteval = tsk->kmap_ctrl.pteval[i]; in __kmap_local_sched_in()
683 /* With debug all even slots are unmapped and act as guard */ in __kmap_local_sched_in()
702 if (WARN_ON_ONCE(tsk->kmap_ctrl.idx)) in kmap_local_fork()
703 memset(&tsk->kmap_ctrl, 0, sizeof(tsk->kmap_ctrl)); in kmap_local_fork()
713 * Describes one page->virtual association
737 * page_address - get the mapped virtual address of a page
753 spin_lock_irqsave(&pas->lock, flags); in page_address()
754 if (!list_empty(&pas->lh)) { in page_address()
757 list_for_each_entry(pam, &pas->lh, list) { in page_address()
758 if (pam->page == page) { in page_address()
759 ret = pam->virtual; in page_address()
765 spin_unlock_irqrestore(&pas->lock, flags); in page_address()
771 * set_page_address - set a page's virtual address
773 * @virtual: virtual address to use
786 pam->page = page; in set_page_address()
787 pam->virtual = virtual; in set_page_address()
789 spin_lock_irqsave(&pas->lock, flags); in set_page_address()
790 list_add_tail(&pam->list, &pas->lh); in set_page_address()
791 spin_unlock_irqrestore(&pas->lock, flags); in set_page_address()
793 spin_lock_irqsave(&pas->lock, flags); in set_page_address()
794 list_for_each_entry(pam, &pas->lh, list) { in set_page_address()
795 if (pam->page == page) { in set_page_address()
796 list_del(&pam->list); in set_page_address()
800 spin_unlock_irqrestore(&pas->lock, flags); in set_page_address()