Lines Matching +full:page +full:- +full:level
1 // SPDX-License-Identifier: GPL-2.0-only
39 * The current flushing context - we pass it instead of 5 arguments:
53 struct page **pages;
67 * entries change the page attribute in parallel to some other cpu
68 * splitting a large page entry along with changing the attribute.
86 void update_page_count(int level, unsigned long pages) in update_page_count() argument
90 direct_pages_count[level] += pages; in update_page_count()
94 static void split_page_count(int level) in split_page_count() argument
96 if (direct_pages_count[level] == 0) in split_page_count()
99 direct_pages_count[level]--; in split_page_count()
101 if (level == PG_LEVEL_2M) in split_page_count()
103 else if (level == PG_LEVEL_1G) in split_page_count()
106 direct_pages_count[level - 1] += PTRS_PER_PTE; in split_page_count()
109 static void collapse_page_count(int level) in collapse_page_count() argument
111 direct_pages_count[level]++; in collapse_page_count()
113 if (level == PG_LEVEL_2M) in collapse_page_count()
115 else if (level == PG_LEVEL_1G) in collapse_page_count()
118 direct_pages_count[level - 1] -= PTRS_PER_PTE; in collapse_page_count()
137 static inline void split_page_count(int level) { } in split_page_count() argument
138 static inline void collapse_page_count(int level) { } in collapse_page_count() argument
166 static inline void cpa_inc_lp_sameprot(int level) in cpa_inc_lp_sameprot() argument
168 if (level == PG_LEVEL_1G) in cpa_inc_lp_sameprot()
174 static inline void cpa_inc_lp_preserved(int level) in cpa_inc_lp_preserved() argument
176 if (level == PG_LEVEL_1G) in cpa_inc_lp_preserved()
190 seq_printf(m, "4K pages set-checked: %16lu\n", cpa_4k_install); in cpastats_show()
217 static inline void cpa_inc_lp_sameprot(int level) { } in cpa_inc_lp_sameprot() argument
218 static inline void cpa_inc_lp_preserved(int level) { } in cpa_inc_lp_preserved() argument
244 * points to #2, but almost all physical-to-virtual translations point to #1.
261 return __pa_symbol(roundup(_brk_end, PMD_SIZE) - 1) >> PAGE_SHIFT; in highmap_end_pfn()
277 /* There is no highmap on 32-bit */ in __cpa_pfn_in_highmap()
290 * works fine for the code that updates the page tables. But at the end of the
291 * process we need to flush the TLB and cache and the non-canonical address
295 * will fix the top bit if needed and is a no-op otherwise.
308 if (cpa->flags & CPA_PAGES_ARRAY) { in __cpa_addr()
309 struct page *page = cpa->pages[idx]; in __cpa_addr() local
311 if (unlikely(PageHighMem(page))) in __cpa_addr()
314 return (unsigned long)page_address(page); in __cpa_addr()
317 if (cpa->flags & CPA_ARRAY) in __cpa_addr()
318 return cpa->vaddr[idx]; in __cpa_addr()
320 return *cpa->vaddr + idx * PAGE_SIZE; in __cpa_addr()
330 void *p = (void *)((unsigned long)vaddr & ~(clflush_size - 1)); in clflush_cache_range_opt()
341 * clflush_cache_range - flush a cache range with clflush
374 return -ENXIO; in cpu_cache_invalidate_memregion()
387 * large page flushing. in __cpa_flush_all()
407 for (i = 0; i < cpa->numpages; i++) in __cpa_flush_tlb()
421 if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { in cpa_collapse_large_pages()
422 for (i = 0; i < cpa->numpages; i++) in cpa_collapse_large_pages()
428 end = addr + PAGE_SIZE * cpa->numpages; in cpa_collapse_large_pages()
440 list_del(&ptdesc->pt_list); in cpa_collapse_large_pages()
456 if (cpa->force_flush_all || cpa->numpages > tlb_single_page_flush_ceiling) in cpa_flush()
465 for (i = 0; i < cpa->numpages; i++) { in cpa_flush()
467 unsigned int level; in cpa_flush() local
469 pte_t *pte = lookup_address(addr, &level); in cpa_flush()
480 if (cpa->flags & CPA_COLLAPSE) in cpa_flush()
497 #define BIOS_PFN_END PFN_DOWN(BIOS_END - 1)
513 * The .rodata section needs to be read-only. Using the pfn catches all
522 * Note: __end_rodata is at page aligned and not inclusive, so in protect_rodata()
525 epfn_ro = PFN_DOWN(__pa_symbol(__end_rodata)) - 1; in protect_rodata()
534 * _PAGE_NX. This protects only the high kernel mapping (_text -> _etext)
542 unsigned long t_end = (unsigned long)_etext - 1; in protect_kernel_text()
553 * kernel text mappings for the large page aligned text, rodata sections
554 * will be always read-only. For the kernel identity mappings covering the
557 * This will preserve the large page mappings for kernel text/data at no
563 unsigned long t_end = (unsigned long)__end_rodata_hpage_align - 1; in protect_kernel_text_ro()
565 unsigned int level; in protect_kernel_text_ro() local
571 * the current mapping is already using small page mapping. No in protect_kernel_text_ro()
572 * need to work hard to preserve large page mappings in this case. in protect_kernel_text_ro()
575 * by unexpected read-only mappings for kernel identity in protect_kernel_text_ro()
577 * and the kernel identity mapping share the same page-table pages, in protect_kernel_text_ro()
581 if (lookup_address(start, &level) && (level != PG_LEVEL_4K)) in protect_kernel_text_ro()
611 pr_warn("CPA %8s %10s: 0x%016lx - 0x%016lx PFN %lx req %016llx prevent %016llx\n", in check_conflict()
631 * mapping is setting the page !PRESENT. in static_protections()
637 end = start + npg * PAGE_SIZE - 1; in static_protections()
644 * Special case to preserve a large page. If the change spawns the in static_protections()
645 * full large page mapping then there is no point to split it in static_protections()
649 if (lpsize != (npg * PAGE_SIZE) || (start & (lpsize - 1))) { in static_protections()
656 res = protect_pci_bios(pfn, pfn + npg - 1); in static_protections()
660 res = protect_rodata(pfn, pfn + npg - 1); in static_protections()
677 * 32-bit has some unfixable W+X issues, like EFI code in verify_rwx()
678 * and writeable data being in the same page. Disable in verify_rwx()
694 /* Non-leaf translation entries can disable writing or execution. */ in verify_rwx()
698 end = start + npg * PAGE_SIZE - 1; in verify_rwx()
699 WARN_ONCE(1, "CPA detected W^X violation: %016llx -> %016llx range: 0x%016lx - 0x%016lx PFN %lx\n", in verify_rwx()
713 * Lookup the page table entry for a virtual address in a specific pgd.
715 * the level of the entry, and the effective NX and RW bits of all
716 * page table levels.
719 unsigned int *level, bool *nx, bool *rw) in lookup_address_in_pgd_attr() argument
725 *level = PG_LEVEL_256T; in lookup_address_in_pgd_attr()
732 *level = PG_LEVEL_512G; in lookup_address_in_pgd_attr()
743 *level = PG_LEVEL_1G; in lookup_address_in_pgd_attr()
754 *level = PG_LEVEL_2M; in lookup_address_in_pgd_attr()
765 *level = PG_LEVEL_4K; in lookup_address_in_pgd_attr()
773 * Lookup the page table entry for a virtual address in a specific pgd.
774 * Return a pointer to the entry and the level of the mapping.
777 unsigned int *level) in lookup_address_in_pgd() argument
781 return lookup_address_in_pgd_attr(pgd, address, level, &nx, &rw); in lookup_address_in_pgd()
785 * Lookup the page table entry for a virtual address. Return a pointer
786 * to the entry and the level of the mapping.
791 pte_t *lookup_address(unsigned long address, unsigned int *level) in lookup_address() argument
793 return lookup_address_in_pgd(pgd_offset_k(address), address, level); in lookup_address()
798 unsigned int *level, bool *nx, bool *rw) in _lookup_address_cpa() argument
802 if (!cpa->pgd) in _lookup_address_cpa()
805 pgd = cpa->pgd + pgd_index(address); in _lookup_address_cpa()
807 return lookup_address_in_pgd_attr(pgd, address, level, nx, rw); in _lookup_address_cpa()
838 * areas on 32-bit NUMA systems. The percpu areas can
841 * Note that as long as the PTEs are well-formed with correct PFNs, this
855 enum pg_level level; in slow_virt_to_phys() local
858 pte = lookup_address(virt_addr, &level); in slow_virt_to_phys()
863 * before being left-shifted PAGE_SHIFT bits -- this trick is to in slow_virt_to_phys()
864 * make 32-PAE kernel work correctly. in slow_virt_to_phys()
866 switch (level) { in slow_virt_to_phys()
893 struct page *page; in __set_pmd_pte() local
895 list_for_each_entry(page, &pgd_list, lru) { in __set_pmd_pte()
901 pgd = (pgd_t *)page_address(page) + pgd_index(address); in __set_pmd_pte()
914 * _PAGE_GLOBAL means "global page" for present PTEs. in pgprot_clear_protnone_bits()
916 * for non-present PTEs. in pgprot_clear_protnone_bits()
919 * present to non-present is not confused as in pgprot_clear_protnone_bits()
934 enum pg_level level; in __should_split_large_page() local
938 * Check for races, another CPU might have split this page in __should_split_large_page()
941 tmp = _lookup_address_cpa(cpa, address, &level, &nx, &rw); in __should_split_large_page()
945 switch (level) { in __should_split_large_page()
957 return -EINVAL; in __should_split_large_page()
960 psize = page_level_size(level); in __should_split_large_page()
961 pmask = page_level_mask(level); in __should_split_large_page()
965 * page starting at address: in __should_split_large_page()
968 numpages = (lpaddr - address) >> PAGE_SHIFT; in __should_split_large_page()
969 if (numpages < cpa->numpages) in __should_split_large_page()
970 cpa->numpages = numpages; in __should_split_large_page()
974 * Convert protection attributes to 4k-format, as cpa->mask* are set in __should_split_large_page()
981 pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); in __should_split_large_page()
982 pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); in __should_split_large_page()
986 * page format: the caching mode includes the PAT bit located at in __should_split_large_page()
995 * old_pfn points to the large page base pfn. So we need to add the in __should_split_large_page()
998 pfn = old_pfn + ((address & (psize - 1)) >> PAGE_SHIFT); in __should_split_large_page()
999 cpa->pfn = pfn; in __should_split_large_page()
1002 * Calculate the large page base address and the number of 4K pages in __should_split_large_page()
1003 * in the large page in __should_split_large_page()
1018 * Split the large page and tell the split code to in __should_split_large_page()
1021 cpa->force_static_prot = 1; in __should_split_large_page()
1027 * pgprot, then the large page can be preserved and no updates are in __should_split_large_page()
1035 cpa_inc_lp_sameprot(level); in __should_split_large_page()
1040 * If the requested range does not cover the full page, split it up in __should_split_large_page()
1042 if (address != lpaddr || cpa->numpages != numpages) in __should_split_large_page()
1047 * protection requirement in the large page. in __should_split_large_page()
1056 * If there is a conflict, split the large page. in __should_split_large_page()
1061 * preserve one large page occasionally, but it's really not worth the in __should_split_large_page()
1067 /* All checks passed. Update the large page mapping. */ in __should_split_large_page()
1070 cpa->flags |= CPA_FLUSHTLB; in __should_split_large_page()
1071 cpa_inc_lp_preserved(level); in __should_split_large_page()
1080 if (cpa->force_split) in should_split_large_page()
1101 if (!cpa->force_static_prot) in split_set_pte()
1128 struct page *base) in __split_large_page()
1132 unsigned int i, level; in __split_large_page() local
1139 * Check for races, another CPU might have split this page in __split_large_page()
1142 tmp = _lookup_address_cpa(cpa, address, &level, &nx, &rw); in __split_large_page()
1150 switch (level) { in __split_large_page()
1196 split_page_count(level); in __split_large_page()
1209 * Do a global flush tlb after splitting the large page in __split_large_page()
1210 * and before we do the actual change page attribute in the PTE. in __split_large_page()
1213 * "The TLBs may contain both ordinary and large-page in __split_large_page()
1214 * translations for a 4-KByte range of linear addresses. This in __split_large_page()
1216 * the page size used for the address range changes. If the two in __split_large_page()
1217 * translations differ with respect to page frame or attributes in __split_large_page()
1219 * be implementation-specific." in __split_large_page()
1223 * page attribute in parallel, that also falls into the in __split_large_page()
1224 * just split large page entry. in __split_large_page()
1235 struct page *base; in split_large_page()
1243 return -ENOMEM; in split_large_page()
1269 /* The page is 4k intentionally */ in collapse_pmd_page()
1287 /* Success: set up a large page */ in collapse_pmd_page()
1293 /* Queue the page table to be freed after TLB flush */ in collapse_pmd_page()
1294 list_add(&page_ptdesc(pmd_page(old_pmd))->pt_list, pgtables); in collapse_pmd_page()
1297 struct page *page; in collapse_pmd_page() local
1299 /* Update all PGD tables to use the same large page */ in collapse_pmd_page()
1300 list_for_each_entry(page, &pgd_list, lru) { in collapse_pmd_page()
1301 pgd_t *pgd = (pgd_t *)page_address(page) + pgd_index(addr); in collapse_pmd_page()
1333 * To restore PUD page all PMD entries must be large and in collapse_pud_page()
1341 * To restore PUD page, all following PMDs must be compatible with the in collapse_pud_page()
1355 /* Restore PUD page and queue page table to be freed after TLB flush */ in collapse_pud_page()
1356 list_add(&page_ptdesc(pud_page(*pud))->pt_list, pgtables); in collapse_pud_page()
1369 * Caller must flush TLB and free page tables queued on the list before
1461 * Not on a 2MB page boundary? in unmap_pmd_range()
1463 if (start & (PMD_SIZE - 1)) { in unmap_pmd_range()
1476 while (end - start >= PMD_SIZE) { in unmap_pmd_range()
1493 * Try again to free the PMD page if haven't succeeded above. in unmap_pmd_range()
1505 * Not on a GB page boundary? in unmap_pud_range()
1507 if (start & (PUD_SIZE - 1)) { in unmap_pud_range()
1520 while (end - start >= PUD_SIZE) { in unmap_pud_range()
1538 * No need to try to free the PUD page because we'll free it in in unmap_pud_range()
1547 return -1; in alloc_pte_page()
1557 return -1; in alloc_pmd_page()
1573 while (num_pages-- && start < end) { in populate_pte()
1574 set_pte(pte, pfn_pte(cpa->pfn, pgprot)); in populate_pte()
1577 cpa->pfn++; in populate_pte()
1593 if (start & (PMD_SIZE - 1)) { in populate_pmd()
1598 cur_pages = (pre_end - start) >> PAGE_SHIFT; in populate_pmd()
1602 * Need a PTE page? in populate_pmd()
1607 return -1; in populate_pmd()
1622 while (end - start >= PMD_SIZE) { in populate_pmd()
1625 * We cannot use a 1G page so allocate a PMD page if needed. in populate_pmd()
1629 return -1; in populate_pmd()
1633 set_pmd(pmd, pmd_mkhuge(pfn_pmd(cpa->pfn, in populate_pmd()
1637 cpa->pfn += PMD_SIZE >> PAGE_SHIFT; in populate_pmd()
1648 return -1; in populate_pmd()
1650 populate_pte(cpa, start, end, num_pages - cur_pages, in populate_pmd()
1664 end = start + (cpa->numpages << PAGE_SHIFT); in populate_pud()
1667 * Not on a Gb page boundary? => map everything up to it with in populate_pud()
1670 if (start & (PUD_SIZE - 1)) { in populate_pud()
1675 cur_pages = (pre_end - start) >> PAGE_SHIFT; in populate_pud()
1676 cur_pages = min_t(int, (int)cpa->numpages, cur_pages); in populate_pud()
1681 * Need a PMD page? in populate_pud()
1685 return -1; in populate_pud()
1696 if (cpa->numpages == cur_pages) in populate_pud()
1705 while (boot_cpu_has(X86_FEATURE_GBPAGES) && end - start >= PUD_SIZE) { in populate_pud()
1706 set_pud(pud, pud_mkhuge(pfn_pud(cpa->pfn, in populate_pud()
1710 cpa->pfn += PUD_SIZE >> PAGE_SHIFT; in populate_pud()
1722 return -1; in populate_pud()
1724 tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages, in populate_pud()
1735 * Restrictions for kernel page table do not necessarily apply when mapping in
1746 pgd_entry = cpa->pgd + pgd_index(addr); in populate_pgd()
1751 return -1; in populate_pgd()
1757 * Allocate a PUD page and hand it down for mapping. in populate_pgd()
1763 return -1; in populate_pgd()
1768 pgprot_val(pgprot) &= ~pgprot_val(cpa->mask_clr); in populate_pgd()
1769 pgprot_val(pgprot) |= pgprot_val(cpa->mask_set); in populate_pgd()
1774 * Leave the PUD page in place in case some other CPU or thread in populate_pgd()
1779 addr + (cpa->numpages << PAGE_SHIFT)); in populate_pgd()
1783 cpa->numpages = ret; in populate_pgd()
1790 if (cpa->pgd) { in __cpa_process_fault()
1794 * provide a ->pgd value. This may change in the future. in __cpa_process_fault()
1803 cpa->numpages = 1; in __cpa_process_fault()
1811 * one virtual address page and its pfn. TBD: numpages can be set based in __cpa_process_fault()
1812 * on the initial value and the level returned by lookup_address(). in __cpa_process_fault()
1816 cpa->numpages = 1; in __cpa_process_fault()
1817 cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; in __cpa_process_fault()
1820 } else if (__cpa_pfn_in_highmap(cpa->pfn)) { in __cpa_process_fault()
1822 return -EFAULT; in __cpa_process_fault()
1825 "vaddr = %lx cpa->vaddr = %lx\n", vaddr, in __cpa_process_fault()
1826 *cpa->vaddr); in __cpa_process_fault()
1828 return -EFAULT; in __cpa_process_fault()
1836 unsigned int level; in __change_page_attr() local
1840 address = __cpa_addr(cpa, cpa->curpage); in __change_page_attr()
1842 kpte = _lookup_address_cpa(cpa, address, &level, &nx, &rw); in __change_page_attr()
1850 if (level == PG_LEVEL_4K) { in __change_page_attr()
1856 pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); in __change_page_attr()
1857 pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); in __change_page_attr()
1875 cpa->pfn = pfn; in __change_page_attr()
1881 cpa->flags |= CPA_FLUSHTLB; in __change_page_attr()
1883 cpa->numpages = 1; in __change_page_attr()
1888 * Check, whether we can keep the large page intact in __change_page_attr()
1893 * When the range fits into the existing large page, in __change_page_attr()
1894 * return. cp->numpages and cpa->tlbflush have been updated in in __change_page_attr()
1901 * We have to split the large page: in __change_page_attr()
1918 unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT); in cpa_process_alias()
1922 if (!pfn_range_is_mapped(cpa->pfn, cpa->pfn + 1)) in cpa_process_alias()
1929 vaddr = __cpa_addr(cpa, cpa->curpage); in cpa_process_alias()
1944 cpa->force_flush_all = 1; in cpa_process_alias()
1958 __cpa_pfn_in_highmap(cpa->pfn)) { in cpa_process_alias()
1959 unsigned long temp_cpa_vaddr = (cpa->pfn << PAGE_SHIFT) + in cpa_process_alias()
1960 __START_KERNEL_map - phys_base; in cpa_process_alias()
1975 cpa->force_flush_all = 1; in cpa_process_alias()
1989 unsigned long numpages = cpa->numpages; in __change_page_attr_set_clr()
1996 if (!(pgprot_val(cpa->mask_set) | pgprot_val(cpa->mask_clr)) && in __change_page_attr_set_clr()
1997 !cpa->force_split) in __change_page_attr_set_clr()
2002 * Store the remaining nr of pages for the large page in __change_page_attr_set_clr()
2005 cpa->numpages = rempages; in __change_page_attr_set_clr()
2006 /* for array changes, we can't use large page */ in __change_page_attr_set_clr()
2007 if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY)) in __change_page_attr_set_clr()
2008 cpa->numpages = 1; in __change_page_attr_set_clr()
2018 if (primary && !(cpa->flags & CPA_NO_CHECK_ALIAS)) { in __change_page_attr_set_clr()
2026 * CPA operation. Either a large page has been in __change_page_attr_set_clr()
2027 * preserved or a single page update happened. in __change_page_attr_set_clr()
2029 BUG_ON(cpa->numpages > rempages || !cpa->numpages); in __change_page_attr_set_clr()
2030 rempages -= cpa->numpages; in __change_page_attr_set_clr()
2031 cpa->curpage += cpa->numpages; in __change_page_attr_set_clr()
2036 cpa->numpages = numpages; in __change_page_attr_set_clr()
2043 struct page **pages) in change_page_attr_set_clr()
2052 * feature. Clearing non-supported features is OK. in change_page_attr_set_clr()
2137 static inline int cpa_set_pages_array(struct page **pages, int numpages, in cpa_set_pages_array()
2144 static inline int cpa_clear_pages_array(struct page **pages, int numpages, in cpa_clear_pages_array()
2248 /* Prevent speculative access to a page by marking it not-present */
2262 * speculative access to the poison page because we'd have in set_mce_nospec()
2265 * Instead we get tricky. We create a non-canonical address in set_mce_nospec()
2364 * informed about "encryption" status via page tables.
2433 * Stop new private<->shared conversions.
2435 * Taking the exclusive mem_enc_lock waits for in-flight conversions to complete.
2458 return -EBUSY; in __set_memory_enc_dec()
2480 int set_pages_uc(struct page *page, int numpages) in set_pages_uc() argument
2482 unsigned long addr = (unsigned long)page_address(page); in set_pages_uc()
2488 static int _set_pages_array(struct page **pages, int numpages, in _set_pages_array()
2507 /* If WC, set to UC- first and then WC */ in _set_pages_array()
2531 return -EINVAL; in _set_pages_array()
2534 int set_pages_array_uc(struct page **pages, int numpages) in set_pages_array_uc()
2540 int set_pages_array_wc(struct page **pages, int numpages) in set_pages_array_wc()
2546 int set_pages_wb(struct page *page, int numpages) in set_pages_wb() argument
2548 unsigned long addr = (unsigned long)page_address(page); in set_pages_wb()
2554 int set_pages_array_wb(struct page **pages, int numpages) in set_pages_array_wb()
2579 int set_pages_ro(struct page *page, int numpages) in set_pages_ro() argument
2581 unsigned long addr = (unsigned long)page_address(page); in set_pages_ro()
2586 int set_pages_rw(struct page *page, int numpages) in set_pages_rw() argument
2588 unsigned long addr = (unsigned long)page_address(page); in set_pages_rw()
2593 static int __set_pages_p(struct page *page, int numpages) in __set_pages_p() argument
2595 unsigned long tempaddr = (unsigned long) page_address(page); in __set_pages_p()
2605 * we may need to break large pages for 64-bit kernel text in __set_pages_p()
2612 static int __set_pages_np(struct page *page, int numpages) in __set_pages_np() argument
2614 unsigned long tempaddr = (unsigned long) page_address(page); in __set_pages_np()
2624 * we may need to break large pages for 64-bit kernel text in __set_pages_np()
2631 int set_direct_map_invalid_noflush(struct page *page) in set_direct_map_invalid_noflush() argument
2633 return __set_pages_np(page, 1); in set_direct_map_invalid_noflush()
2636 int set_direct_map_default_noflush(struct page *page) in set_direct_map_default_noflush() argument
2638 return __set_pages_p(page, 1); in set_direct_map_default_noflush()
2641 int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid) in set_direct_map_valid_noflush() argument
2644 return __set_pages_p(page, nr); in set_direct_map_valid_noflush()
2646 return __set_pages_np(page, nr); in set_direct_map_valid_noflush()
2650 void __kernel_map_pages(struct page *page, int numpages, int enable) in __kernel_map_pages() argument
2652 if (PageHighMem(page)) in __kernel_map_pages()
2655 debug_check_no_locks_freed(page_address(page), in __kernel_map_pages()
2662 * and hence no memory allocations during large page split. in __kernel_map_pages()
2665 __set_pages_p(page, numpages); in __kernel_map_pages()
2667 __set_pages_np(page, numpages); in __kernel_map_pages()
2671 * but that can deadlock->flush only current cpu. in __kernel_map_pages()
2683 bool kernel_page_present(struct page *page) in kernel_page_present() argument
2685 unsigned int level; in kernel_page_present() local
2688 if (PageHighMem(page)) in kernel_page_present()
2691 pte = lookup_address((unsigned long)page_address(page), &level); in kernel_page_present()
2698 int retval = -EINVAL; in kernel_map_pages_in_pgd()
2766 #include "cpa-test.c"