Lines Matching +refs:get +refs:addr +refs:attrs

2442 				     unsigned long attrs)  in kvm_range_has_memory_attributes()  argument
2451 if (!attrs) { in kvm_range_has_memory_attributes()
2462 if (xas.xa_index != index || xa_to_value(entry) != attrs) { in kvm_range_has_memory_attributes()
2602 struct kvm_memory_attributes *attrs) in kvm_vm_ioctl_set_mem_attributes() argument
2607 if (attrs->flags) in kvm_vm_ioctl_set_mem_attributes()
2609 if (attrs->attributes & ~kvm_supported_mem_attributes(kvm)) in kvm_vm_ioctl_set_mem_attributes()
2611 if (attrs->size == 0 || attrs->address + attrs->size < attrs->address) in kvm_vm_ioctl_set_mem_attributes()
2613 if (!PAGE_ALIGNED(attrs->address) || !PAGE_ALIGNED(attrs->size)) in kvm_vm_ioctl_set_mem_attributes()
2616 start = attrs->address >> PAGE_SHIFT; in kvm_vm_ioctl_set_mem_attributes()
2617 end = (attrs->address + attrs->size) >> PAGE_SHIFT; in kvm_vm_ioctl_set_mem_attributes()
2624 BUILD_BUG_ON(sizeof(attrs->attributes) != sizeof(unsigned long)); in kvm_vm_ioctl_set_mem_attributes()
2626 return kvm_vm_set_mem_attributes(kvm, start, end, attrs->attributes); in kvm_vm_ioctl_set_mem_attributes()
2688 unsigned long addr, size; in kvm_host_page_size() local
2692 addr = kvm_vcpu_gfn_to_hva_prot(vcpu, gfn, NULL); in kvm_host_page_size()
2693 if (kvm_is_error_hva(addr)) in kvm_host_page_size()
2697 vma = find_vma(current->mm, addr); in kvm_host_page_size()
2787 static inline int check_user_page_hwpoison(unsigned long addr) in check_user_page_hwpoison() argument
2791 rc = get_user_pages(addr, 1, flags, NULL); in check_user_page_hwpoison()
2800 static bool hva_to_pfn_fast(unsigned long addr, bool write_fault, in hva_to_pfn_fast() argument
2813 if (get_user_page_fast_only(addr, FOLL_WRITE, page)) { in hva_to_pfn_fast()
2828 static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, in hva_to_pfn_slow() argument
2858 npages = get_user_pages_unlocked(addr, 1, &page, flags); in hva_to_pfn_slow()
2866 if (get_user_page_fast_only(addr, FOLL_WRITE, &wpage)) { in hva_to_pfn_slow()
2898 unsigned long addr, bool write_fault, in hva_to_pfn_remapped() argument
2907 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); in hva_to_pfn_remapped()
2914 r = fixup_user_fault(current->mm, addr, in hva_to_pfn_remapped()
2922 r = follow_pte(vma->vm_mm, addr, &ptep, &ptl); in hva_to_pfn_remapped()
2980 kvm_pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool interruptible, in hva_to_pfn() argument
2990 if (hva_to_pfn_fast(addr, write_fault, writable, &pfn)) in hva_to_pfn()
2996 npages = hva_to_pfn_slow(addr, async, write_fault, interruptible, in hva_to_pfn()
3005 (!async && check_user_page_hwpoison(addr))) { in hva_to_pfn()
3011 vma = vma_lookup(current->mm, addr); in hva_to_pfn()
3016 r = hva_to_pfn_remapped(vma, addr, write_fault, writable, &pfn); in hva_to_pfn()
3035 unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault); in __gfn_to_pfn_memslot() local
3038 *hva = addr; in __gfn_to_pfn_memslot()
3040 if (addr == KVM_HVA_ERR_RO_BAD) { in __gfn_to_pfn_memslot()
3046 if (kvm_is_error_hva(addr)) { in __gfn_to_pfn_memslot()
3058 return hva_to_pfn(addr, atomic, interruptible, async, write_fault, in __gfn_to_pfn_memslot()
3106 unsigned long addr; in gfn_to_page_many_atomic() local
3109 addr = gfn_to_hva_many(slot, gfn, &entry); in gfn_to_page_many_atomic()
3110 if (kvm_is_error_hva(addr)) in gfn_to_page_many_atomic()
3116 return get_user_pages_fast_only(addr, nr_pages, FOLL_WRITE, pages); in gfn_to_page_many_atomic()
3317 unsigned long addr; in __kvm_read_guest_page() local
3319 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); in __kvm_read_guest_page()
3320 if (kvm_is_error_hva(addr)) in __kvm_read_guest_page()
3322 r = __copy_from_user(data, (void __user *)addr + offset, len); in __kvm_read_guest_page()
3390 unsigned long addr; in __kvm_read_guest_atomic() local
3392 addr = gfn_to_hva_memslot_prot(slot, gfn, NULL); in __kvm_read_guest_atomic()
3393 if (kvm_is_error_hva(addr)) in __kvm_read_guest_atomic()
3396 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len); in __kvm_read_guest_atomic()
3419 unsigned long addr; in __kvm_write_guest_page() local
3421 addr = gfn_to_hva_memslot(memslot, gfn); in __kvm_write_guest_page()
3422 if (kvm_is_error_hva(addr)) in __kvm_write_guest_page()
3424 r = __copy_to_user((void __user *)addr + offset, data, len); in __kvm_write_guest_page()
5281 struct kvm_memory_attributes attrs; in kvm_vm_ioctl() local
5284 if (copy_from_user(&attrs, argp, sizeof(attrs))) in kvm_vm_ioctl()
5287 r = kvm_vm_ioctl_set_mem_attributes(kvm, &attrs); in kvm_vm_ioctl()
5735 gpa_t addr1 = r1->addr; in kvm_io_bus_cmp()
5736 gpa_t addr2 = r2->addr; in kvm_io_bus_cmp()
5763 gpa_t addr, int len) in kvm_io_bus_get_first_dev() argument
5769 .addr = addr, in kvm_io_bus_get_first_dev()
5791 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); in __kvm_io_bus_write()
5797 if (!kvm_iodevice_write(vcpu, bus->range[idx].dev, range->addr, in __kvm_io_bus_write()
5807 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, in kvm_io_bus_write() argument
5815 .addr = addr, in kvm_io_bus_write()
5829 gpa_t addr, int len, const void *val, long cookie) in kvm_io_bus_write_cookie() argument
5835 .addr = addr, in kvm_io_bus_write_cookie()
5846 if (!kvm_iodevice_write(vcpu, bus->range[cookie].dev, addr, len, in kvm_io_bus_write_cookie()
5862 idx = kvm_io_bus_get_first_dev(bus, range->addr, range->len); in __kvm_io_bus_read()
5868 if (!kvm_iodevice_read(vcpu, bus->range[idx].dev, range->addr, in __kvm_io_bus_read()
5878 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, in kvm_io_bus_read() argument
5886 .addr = addr, in kvm_io_bus_read()
5897 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, in kvm_io_bus_register_dev() argument
5920 .addr = addr, in kvm_io_bus_register_dev()
5990 gpa_t addr) in kvm_io_bus_get_dev() argument
6002 dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1); in kvm_io_bus_get_dev()
6016 int (*get)(void *, u64 *), int (*set)(void *, u64), in kvm_debugfs_open()
6030 ret = simple_attr_open(inode, file, get, in kvm_debugfs_open()