Lines Matching +full:operating +full:- +full:points +full:- +full:v2
11 * GPL v2 and any later version */
26 * could probably consider launching Guests as non-root.
32 * We use two-level page tables for the Guest, or three-level with PAE. If
38 * called "shadow" page tables. Which is a very Guest-centric name: these are
58 #define SWITCHER_PGD_INDEX (PTRS_PER_PGD - 1)
65 #define SWITCHER_PMD_INDEX (PTRS_PER_PMD - 1)
89 * spgd_addr() takes the virtual address and returns a pointer to the top-level
106 return &cpu->lg->pgdirs[i].pgdir[index]; in spgd_addr()
164 return cpu->lg->pgdirs[cpu->cpu_pgd].gpgdir + index * sizeof(pgd_t); in gpgd_addr()
186 /* Follow the PGD to the PTE (no mid-level for !PAE). */
200 * an optimization (ie. pre-faulting).
207 * and the page is read-only, or the write flag was set and the page was
222 return -1UL; in get_pfn()
244 base = (unsigned long)cpu->lg->mem_base / PAGE_SIZE; in gpte_to_spte()
253 if (pfn == -1UL) { in gpte_to_spte()
281 pte_pfn(gpte) >= cpu->lg->pfn_limit) in check_gpte()
288 (pgd_pfn(gpgd) >= cpu->lg->pfn_limit)) in check_gpgd()
296 (pmd_pfn(gpmd) >= cpu->lg->pfn_limit)) in check_gpmd()
326 /* First step: get the top-level Guest page table entry. */ in demand_page()
327 if (unlikely(cpu->linear_pages)) { in demand_page()
338 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); in demand_page()
360 if (unlikely(cpu->linear_pages)) { in demand_page()
409 if (unlikely(cpu->linear_pages)) { in demand_page()
410 /* Linear? Make up a PTE which points to same page. */ in demand_page()
423 * read-only (bit 2 of errcode == write). in demand_page()
428 /* User access to a kernel-only page? (bit 3 == user access) */ in demand_page()
448 * This can happen with a write to a previously read-only entry. in demand_page()
471 if (likely(!cpu->linear_pages)) in demand_page()
503 spgd = spgd_addr(cpu, cpu->cpu_pgd, vaddr); in page_writable()
570 * If we chase down the release_pgd() code, the non-PAE version looks like
598 * hypercall and once in new_pgdir() when we re-used a top-level pgdir page.
605 for (i = 0; i < pgd_index(lg->kernel_address); i++) in flush_user_mappings()
606 release_pgd(lg->pgdirs[idx].pgdir + i); in flush_user_mappings()
618 flush_user_mappings(cpu->lg, cpu->cpu_pgd); in guest_pagetable_flush_user()
622 /* We walk down the guest page tables to get a guest-physical address */
632 if (unlikely(cpu->linear_pages)) in guest_pa()
635 /* First step: get the top-level Guest page table entry. */ in guest_pa()
640 return -1UL; in guest_pa()
659 * table (if any) corresponding to this top-level address the Guest has given
665 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) in find_pgdir()
666 if (lg->pgdirs[i].pgdir && lg->pgdirs[i].gpgdir == pgtable) in find_pgdir()
689 next = random32() % ARRAY_SIZE(cpu->lg->pgdirs); in new_pgdir()
691 if (!cpu->lg->pgdirs[next].pgdir) { in new_pgdir()
692 cpu->lg->pgdirs[next].pgdir = in new_pgdir()
695 if (!cpu->lg->pgdirs[next].pgdir) in new_pgdir()
696 next = cpu->cpu_pgd; in new_pgdir()
705 free_page((long)cpu->lg->pgdirs[next].pgdir); in new_pgdir()
706 set_pgd(cpu->lg->pgdirs[next].pgdir, __pgd(0)); in new_pgdir()
707 next = cpu->cpu_pgd; in new_pgdir()
709 set_pgd(cpu->lg->pgdirs[next].pgdir + in new_pgdir()
724 cpu->lg->pgdirs[next].gpgdir = gpgdir; in new_pgdir()
725 /* Release all the non-kernel mappings. */ in new_pgdir()
726 flush_user_mappings(cpu->lg, next); in new_pgdir()
741 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) in release_all_pagetables()
742 if (lg->pgdirs[i].pgdir) { in release_all_pagetables()
749 spgd = lg->pgdirs[i].pgdir + SWITCHER_PGD_INDEX; in release_all_pagetables()
761 release_pgd(lg->pgdirs[i].pgdir + j); in release_all_pagetables()
773 release_all_pagetables(cpu->lg); in guest_pagetable_clear_all()
782 * what happens when the Guest changes page tables (ie. changes the top-level
793 if (unlikely(cpu->linear_pages)) { in guest_new_pagetable()
794 release_all_pagetables(cpu->lg); in guest_new_pagetable()
795 cpu->linear_pages = false; in guest_new_pagetable()
797 newpgdir = ARRAY_SIZE(cpu->lg->pgdirs); in guest_new_pagetable()
800 newpgdir = find_pgdir(cpu->lg, pgtable); in guest_new_pagetable()
807 if (newpgdir == ARRAY_SIZE(cpu->lg->pgdirs)) in guest_new_pagetable()
810 cpu->cpu_pgd = newpgdir; in guest_new_pagetable()
824 * don't know, but the term "puissant code-fu" comes to mind.
836 * But Avi Kivity pointed out that most Operating Systems (Linux included) set
839 * _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
864 * now. This shaves 10% off a copy-on-write in do_set_pte()
865 * micro-benchmark. in do_set_pte()
904 if (vaddr >= cpu->lg->kernel_address) { in guest_set_pte()
906 for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++) in guest_set_pte()
907 if (cpu->lg->pgdirs[i].pgdir) in guest_set_pte()
911 int pgdir = find_pgdir(cpu->lg, gpgdir); in guest_set_pte()
912 if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs)) in guest_set_pte()
930 * So with that in mind here's our code to update a (top-level) PGD entry:
941 if (pgdir < ARRAY_SIZE(lg->pgdirs)) in guest_set_pgd()
943 release_pgd(lg->pgdirs[pgdir].pgdir + idx); in guest_set_pgd()
947 /* For setting a mid-level, we just throw everything away. It's easy. */
950 guest_pagetable_clear_all(&lg->cpus[0]); in guest_set_pmd()
964 struct lg_cpu *cpu = &lg->cpus[0]; in init_guest_pagetable()
967 /* lg (and lg->cpus[]) starts zeroed: this allocates a new pgdir */ in init_guest_pagetable()
968 cpu->cpu_pgd = new_pgdir(cpu, 0, &allocated); in init_guest_pagetable()
970 return -ENOMEM; in init_guest_pagetable()
973 cpu->linear_pages = true; in init_guest_pagetable()
981 if (get_user(cpu->lg->kernel_address, in page_table_guest_data_init()
982 &cpu->lg->lguest_data->kernel_address) in page_table_guest_data_init()
988 &cpu->lg->lguest_data->reserve_mem)) { in page_table_guest_data_init()
989 kill_guest(cpu, "bad guest page %p", cpu->lg->lguest_data); in page_table_guest_data_init()
995 * "pgd_index(lg->kernel_address)". This assumes it won't hit the in page_table_guest_data_init()
999 if (pgd_index(cpu->lg->kernel_address) == SWITCHER_PGD_INDEX && in page_table_guest_data_init()
1000 pmd_index(cpu->lg->kernel_address) == SWITCHER_PMD_INDEX) in page_table_guest_data_init()
1002 if (pgd_index(cpu->lg->kernel_address) >= SWITCHER_PGD_INDEX) in page_table_guest_data_init()
1005 cpu->lg->kernel_address); in page_table_guest_data_init()
1016 for (i = 0; i < ARRAY_SIZE(lg->pgdirs); i++) in free_guest_pagetable()
1017 free_page((long)lg->pgdirs[i].pgdir); in free_guest_pagetable()
1042 pmd_table = __va(pgd_pfn(cpu->lg-> in map_switcher_in_guest()
1043 pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX]) in map_switcher_in_guest()
1056 cpu->lg->pgdirs[cpu->cpu_pgd].pgdir[SWITCHER_PGD_INDEX] = switcher_pgd; in map_switcher_in_guest()
1068 regs_pte = pfn_pte(__pa(cpu->regs_page) >> PAGE_SHIFT, PAGE_KERNEL); in map_switcher_in_guest()
1109 * read-only. in populate_switcher_pte_page()
1140 return -ENOMEM; in init_pagetables()