Lines Matching full:hvc
62 static int do_ops(struct host_vm_change *hvc, int end, in do_ops() argument
69 op = &hvc->ops[i]; in do_ops()
72 if (hvc->userspace) in do_ops()
73 ret = map(&hvc->mm->context.id, op->u.mmap.addr, in do_ops()
77 &hvc->data); in do_ops()
83 if (hvc->userspace) in do_ops()
84 ret = unmap(&hvc->mm->context.id, in do_ops()
87 &hvc->data); in do_ops()
95 if (hvc->userspace) in do_ops()
96 ret = protect(&hvc->mm->context.id, in do_ops()
100 finished, &hvc->data); in do_ops()
122 unsigned int prot, struct host_vm_change *hvc) in add_mmap() argument
128 if (hvc->userspace) in add_mmap()
132 if (hvc->index != 0) { in add_mmap()
133 last = &hvc->ops[hvc->index - 1]; in add_mmap()
143 if (hvc->index == ARRAY_SIZE(hvc->ops)) { in add_mmap()
144 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0); in add_mmap()
145 hvc->index = 0; in add_mmap()
148 hvc->ops[hvc->index++] = ((struct host_vm_op) in add_mmap()
160 struct host_vm_change *hvc) in add_munmap() argument
168 if (hvc->index != 0) { in add_munmap()
169 last = &hvc->ops[hvc->index - 1]; in add_munmap()
177 if (hvc->index == ARRAY_SIZE(hvc->ops)) { in add_munmap()
178 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0); in add_munmap()
179 hvc->index = 0; in add_munmap()
182 hvc->ops[hvc->index++] = ((struct host_vm_op) in add_munmap()
190 unsigned int prot, struct host_vm_change *hvc) in add_mprotect() argument
195 if (hvc->index != 0) { in add_mprotect()
196 last = &hvc->ops[hvc->index - 1]; in add_mprotect()
205 if (hvc->index == ARRAY_SIZE(hvc->ops)) { in add_mprotect()
206 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0); in add_mprotect()
207 hvc->index = 0; in add_mprotect()
210 hvc->ops[hvc->index++] = ((struct host_vm_op) in add_mprotect()
222 struct host_vm_change *hvc) in update_pte_range() argument
243 if (hvc->force || pte_newpage(*pte)) { in update_pte_range()
247 PAGE_SIZE, prot, hvc); in update_pte_range()
249 ret = add_munmap(addr, PAGE_SIZE, hvc); in update_pte_range()
251 ret = add_mprotect(addr, PAGE_SIZE, prot, hvc); in update_pte_range()
259 struct host_vm_change *hvc) in update_pmd_range() argument
269 if (hvc->force || pmd_newpage(*pmd)) { in update_pmd_range()
270 ret = add_munmap(addr, next - addr, hvc); in update_pmd_range()
274 else ret = update_pte_range(pmd, addr, next, hvc); in update_pmd_range()
281 struct host_vm_change *hvc) in update_pud_range() argument
291 if (hvc->force || pud_newpage(*pud)) { in update_pud_range()
292 ret = add_munmap(addr, next - addr, hvc); in update_pud_range()
296 else ret = update_pmd_range(pud, addr, next, hvc); in update_pud_range()
303 struct host_vm_change *hvc) in update_p4d_range() argument
313 if (hvc->force || p4d_newpage(*p4d)) { in update_p4d_range()
314 ret = add_munmap(addr, next - addr, hvc); in update_p4d_range()
318 ret = update_pud_range(p4d, addr, next, hvc); in update_p4d_range()
327 struct host_vm_change hvc; in fix_range_common() local
331 hvc = INIT_HVC(mm, force, userspace); in fix_range_common()
337 ret = add_munmap(addr, next - addr, &hvc); in fix_range_common()
341 ret = update_p4d_range(pgd, addr, next, &hvc); in fix_range_common()
345 ret = do_ops(&hvc, hvc.index, 1); in fix_range_common()
368 struct host_vm_change hvc; in flush_tlb_kernel_range_common() local
371 hvc = INIT_HVC(mm, force, userspace); in flush_tlb_kernel_range_common()
380 err = add_munmap(addr, last - addr, &hvc); in flush_tlb_kernel_range_common()
396 err = add_munmap(addr, last - addr, &hvc); in flush_tlb_kernel_range_common()
412 err = add_munmap(addr, last - addr, &hvc); in flush_tlb_kernel_range_common()
428 err = add_munmap(addr, last - addr, &hvc); in flush_tlb_kernel_range_common()
440 err = add_munmap(addr, PAGE_SIZE, &hvc); in flush_tlb_kernel_range_common()
446 PAGE_SIZE, 0, &hvc); in flush_tlb_kernel_range_common()
450 err = add_mprotect(addr, PAGE_SIZE, 0, &hvc); in flush_tlb_kernel_range_common()
455 err = do_ops(&hvc, hvc.index, 1); in flush_tlb_kernel_range_common()