Lines Matching +full:slot +full:- +full:size
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/backing-dev.h>
21 folio = filemap_grab_folio(inode->i_mapping, index); in kvm_gmem_get_folio()
26 * Use the up-to-date flag to track whether or not the memory has been in kvm_gmem_get_folio()
28 * storage for the memory, so the folio will remain up-to-date until in kvm_gmem_get_folio()
55 struct kvm_memory_slot *slot; in kvm_gmem_invalidate_begin() local
56 struct kvm *kvm = gmem->kvm; in kvm_gmem_invalidate_begin()
59 xa_for_each_range(&gmem->bindings, index, slot, start, end - 1) { in kvm_gmem_invalidate_begin()
60 pgoff_t pgoff = slot->gmem.pgoff; in kvm_gmem_invalidate_begin()
63 .start = slot->base_gfn + max(pgoff, start) - pgoff, in kvm_gmem_invalidate_begin()
64 .end = slot->base_gfn + min(pgoff + slot->npages, end) - pgoff, in kvm_gmem_invalidate_begin()
65 .slot = slot, in kvm_gmem_invalidate_begin()
89 struct kvm *kvm = gmem->kvm; in kvm_gmem_invalidate_end()
91 if (xa_find(&gmem->bindings, &start, end - 1, XA_PRESENT)) { in kvm_gmem_invalidate_end()
100 struct list_head *gmem_list = &inode->i_mapping->i_private_list; in kvm_gmem_punch_hole()
109 filemap_invalidate_lock(inode->i_mapping); in kvm_gmem_punch_hole()
114 truncate_inode_pages_range(inode->i_mapping, offset, offset + len - 1); in kvm_gmem_punch_hole()
119 filemap_invalidate_unlock(inode->i_mapping); in kvm_gmem_punch_hole()
126 struct address_space *mapping = inode->i_mapping; in kvm_gmem_allocate()
132 return -EINVAL; in kvm_gmem_allocate()
144 r = -EINTR; in kvm_gmem_allocate()
150 r = -ENOMEM; in kvm_gmem_allocate()
159 /* 64-bit only, wrapping the index should be impossible. */ in kvm_gmem_allocate()
177 return -EOPNOTSUPP; in kvm_gmem_fallocate()
180 return -EOPNOTSUPP; in kvm_gmem_fallocate()
183 return -EINVAL; in kvm_gmem_fallocate()
197 struct kvm_gmem *gmem = file->private_data; in kvm_gmem_release()
198 struct kvm_memory_slot *slot; in kvm_gmem_release() local
199 struct kvm *kvm = gmem->kvm; in kvm_gmem_release()
205 * dereferencing the slot for existing bindings needs to be protected in kvm_gmem_release()
209 mutex_lock(&kvm->slots_lock); in kvm_gmem_release()
211 filemap_invalidate_lock(inode->i_mapping); in kvm_gmem_release()
213 xa_for_each(&gmem->bindings, index, slot) in kvm_gmem_release()
214 rcu_assign_pointer(slot->gmem.file, NULL); in kvm_gmem_release()
219 * All in-flight operations are gone and new bindings can be created. in kvm_gmem_release()
223 kvm_gmem_invalidate_begin(gmem, 0, -1ul); in kvm_gmem_release()
224 kvm_gmem_invalidate_end(gmem, 0, -1ul); in kvm_gmem_release()
226 list_del(&gmem->entry); in kvm_gmem_release()
228 filemap_invalidate_unlock(inode->i_mapping); in kvm_gmem_release()
230 mutex_unlock(&kvm->slots_lock); in kvm_gmem_release()
232 xa_destroy(&gmem->bindings); in kvm_gmem_release()
240 static inline struct file *kvm_gmem_get_file(struct kvm_memory_slot *slot) in kvm_gmem_get_file() argument
243 * Do not return slot->gmem.file if it has already been closed; in kvm_gmem_get_file()
245 * kvm_gmem_release() clears slot->gmem.file, and you do not in kvm_gmem_get_file()
248 return get_file_active(&slot->gmem.file); in kvm_gmem_get_file()
267 return -EINVAL; in kvm_gmem_migrate_folio()
272 struct list_head *gmem_list = &mapping->i_private_list; in kvm_gmem_error_folio()
278 start = folio->index; in kvm_gmem_error_folio()
288 * access a poisoned page, kvm_gmem_get_pfn() will return -EHWPOISON, in kvm_gmem_error_folio()
311 struct inode *inode = path->dentry->d_inode; in kvm_gmem_getattr()
320 return -EINVAL; in kvm_gmem_setattr()
327 static int __kvm_gmem_create(struct kvm *kvm, loff_t size, u64 flags) in __kvm_gmem_create() argument
329 const char *anon_name = "[kvm-gmem]"; in __kvm_gmem_create()
341 err = -ENOMEM; in __kvm_gmem_create()
352 file->f_flags |= O_LARGEFILE; in __kvm_gmem_create()
354 inode = file->f_inode; in __kvm_gmem_create()
355 WARN_ON(file->f_mapping != inode->i_mapping); in __kvm_gmem_create()
357 inode->i_private = (void *)(unsigned long)flags; in __kvm_gmem_create()
358 inode->i_op = &kvm_gmem_iops; in __kvm_gmem_create()
359 inode->i_mapping->a_ops = &kvm_gmem_aops; in __kvm_gmem_create()
360 inode->i_mode |= S_IFREG; in __kvm_gmem_create()
361 inode->i_size = size; in __kvm_gmem_create()
362 mapping_set_gfp_mask(inode->i_mapping, GFP_HIGHUSER); in __kvm_gmem_create()
363 mapping_set_unmovable(inode->i_mapping); in __kvm_gmem_create()
365 WARN_ON_ONCE(!mapping_unevictable(inode->i_mapping)); in __kvm_gmem_create()
368 gmem->kvm = kvm; in __kvm_gmem_create()
369 xa_init(&gmem->bindings); in __kvm_gmem_create()
370 list_add(&gmem->entry, &inode->i_mapping->i_private_list); in __kvm_gmem_create()
384 loff_t size = args->size; in kvm_gmem_create() local
385 u64 flags = args->flags; in kvm_gmem_create()
389 return -EINVAL; in kvm_gmem_create()
391 if (size <= 0 || !PAGE_ALIGNED(size)) in kvm_gmem_create()
392 return -EINVAL; in kvm_gmem_create()
394 return __kvm_gmem_create(kvm, size, flags); in kvm_gmem_create()
397 int kvm_gmem_bind(struct kvm *kvm, struct kvm_memory_slot *slot, in kvm_gmem_bind() argument
400 loff_t size = slot->npages << PAGE_SHIFT; in kvm_gmem_bind() local
405 int r = -EINVAL; in kvm_gmem_bind()
407 BUILD_BUG_ON(sizeof(gfn_t) != sizeof(slot->gmem.pgoff)); in kvm_gmem_bind()
411 return -EBADF; in kvm_gmem_bind()
413 if (file->f_op != &kvm_gmem_fops) in kvm_gmem_bind()
416 gmem = file->private_data; in kvm_gmem_bind()
417 if (gmem->kvm != kvm) in kvm_gmem_bind()
423 offset + size > i_size_read(inode)) in kvm_gmem_bind()
426 filemap_invalidate_lock(inode->i_mapping); in kvm_gmem_bind()
429 end = start + slot->npages; in kvm_gmem_bind()
431 if (!xa_empty(&gmem->bindings) && in kvm_gmem_bind()
432 xa_find(&gmem->bindings, &start, end - 1, XA_PRESENT)) { in kvm_gmem_bind()
433 filemap_invalidate_unlock(inode->i_mapping); in kvm_gmem_bind()
438 * No synchronize_rcu() needed, any in-flight readers are guaranteed to in kvm_gmem_bind()
442 rcu_assign_pointer(slot->gmem.file, file); in kvm_gmem_bind()
443 slot->gmem.pgoff = start; in kvm_gmem_bind()
445 xa_store_range(&gmem->bindings, start, end - 1, slot, GFP_KERNEL); in kvm_gmem_bind()
446 filemap_invalidate_unlock(inode->i_mapping); in kvm_gmem_bind()
459 void kvm_gmem_unbind(struct kvm_memory_slot *slot) in kvm_gmem_unbind() argument
461 unsigned long start = slot->gmem.pgoff; in kvm_gmem_unbind()
462 unsigned long end = start + slot->npages; in kvm_gmem_unbind()
470 file = kvm_gmem_get_file(slot); in kvm_gmem_unbind()
474 gmem = file->private_data; in kvm_gmem_unbind()
476 filemap_invalidate_lock(file->f_mapping); in kvm_gmem_unbind()
477 xa_store_range(&gmem->bindings, start, end - 1, NULL, GFP_KERNEL); in kvm_gmem_unbind()
478 rcu_assign_pointer(slot->gmem.file, NULL); in kvm_gmem_unbind()
480 filemap_invalidate_unlock(file->f_mapping); in kvm_gmem_unbind()
485 int kvm_gmem_get_pfn(struct kvm *kvm, struct kvm_memory_slot *slot, in kvm_gmem_get_pfn() argument
488 pgoff_t index = gfn - slot->base_gfn + slot->gmem.pgoff; in kvm_gmem_get_pfn()
495 file = kvm_gmem_get_file(slot); in kvm_gmem_get_pfn()
497 return -EFAULT; in kvm_gmem_get_pfn()
499 gmem = file->private_data; in kvm_gmem_get_pfn()
501 if (WARN_ON_ONCE(xa_load(&gmem->bindings, index) != slot)) { in kvm_gmem_get_pfn()
502 r = -EIO; in kvm_gmem_get_pfn()
508 r = -ENOMEM; in kvm_gmem_get_pfn()
513 r = -EHWPOISON; in kvm_gmem_get_pfn()