Lines Matching +full:data +full:- +full:mapping

2  * SPDX-License-Identifier: MIT
4 * Copyright © 2014-2016 Intel Corporation
30 struct drm_i915_private *i915 = to_i915(obj->base.dev); in shmem_get_pages()
31 struct intel_memory_region *mem = obj->mm.region; in shmem_get_pages()
32 const unsigned long page_count = obj->base.size / PAGE_SIZE; in shmem_get_pages()
34 struct address_space *mapping; in shmem_get_pages() local
50 GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); in shmem_get_pages()
51 GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); in shmem_get_pages()
57 if (obj->base.size > resource_size(&mem->region)) in shmem_get_pages()
58 return -ENOMEM; in shmem_get_pages()
62 return -ENOMEM; in shmem_get_pages()
67 return -ENOMEM; in shmem_get_pages()
76 mapping = obj->base.filp->f_mapping; in shmem_get_pages()
77 mapping_set_unevictable(mapping); in shmem_get_pages()
78 noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM); in shmem_get_pages()
81 sg = st->sgl; in shmem_get_pages()
82 st->nents = 0; in shmem_get_pages()
93 page = shmem_read_mapping_page_gfp(mapping, i, gfp); in shmem_get_pages()
115 gfp = mapping_gfp_mask(mapping); in shmem_get_pages()
125 * dirty pages -- unless you try over and over in shmem_get_pages()
128 * trigger the out-of-memory killer and for in shmem_get_pages()
136 sg->length >= max_segment || in shmem_get_pages()
139 sg_page_sizes |= sg->length; in shmem_get_pages()
142 st->nents++; in shmem_get_pages()
145 sg->length += PAGE_SIZE; in shmem_get_pages()
153 sg_page_sizes |= sg->length; in shmem_get_pages()
175 dev_warn(&i915->drm.pdev->dev, in shmem_get_pages()
192 mapping_clear_unevictable(mapping); in shmem_get_pages()
193 if (sg != st->sgl) { in shmem_get_pages()
216 if (ret == -ENOSPC) in shmem_get_pages()
217 ret = -ENOMEM; in shmem_get_pages()
231 shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1); in shmem_truncate()
232 obj->mm.madv = __I915_MADV_PURGED; in shmem_truncate()
233 obj->mm.pages = ERR_PTR(-EFAULT); in shmem_truncate()
239 struct address_space *mapping; in shmem_writeback() local
255 mapping = obj->base.filp->f_mapping; in shmem_writeback()
258 for (i = 0; i < obj->base.size >> PAGE_SHIFT; i++) { in shmem_writeback()
261 page = find_lock_page(mapping, i); in shmem_writeback()
269 ret = mapping->a_ops->writepage(page, &wbc); in shmem_writeback()
286 GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED); in __i915_gem_object_release_shmem()
288 if (obj->mm.madv == I915_MADV_DONTNEED) in __i915_gem_object_release_shmem()
289 obj->mm.dirty = false; in __i915_gem_object_release_shmem()
292 (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 && in __i915_gem_object_release_shmem()
293 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) in __i915_gem_object_release_shmem()
313 mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping); in shmem_put_pages()
317 if (obj->mm.dirty) in shmem_put_pages()
320 if (obj->mm.madv == I915_MADV_WILLNEED) in shmem_put_pages()
328 obj->mm.dirty = false; in shmem_put_pages()
338 struct address_space *mapping = obj->base.filp->f_mapping; in shmem_pwrite() local
339 char __user *user_data = u64_to_user_ptr(arg->data_ptr); in shmem_pwrite()
344 GEM_BUG_ON(!access_ok(user_data, arg->size)); in shmem_pwrite()
353 * or clearing-before-use) before it is overwritten. in shmem_pwrite()
356 return -ENODEV; in shmem_pwrite()
358 if (obj->mm.madv != I915_MADV_WILLNEED) in shmem_pwrite()
359 return -EFAULT; in shmem_pwrite()
365 * races pwrite with any other operation; corruption will ensue - in shmem_pwrite()
369 remain = arg->size; in shmem_pwrite()
370 offset = arg->offset; in shmem_pwrite()
376 void *data, *vaddr; in shmem_pwrite() local
380 len = PAGE_SIZE - pg; in shmem_pwrite()
389 err = __get_user(c, user_data + len - 1); in shmem_pwrite()
393 err = pagecache_write_begin(obj->base.filp, mapping, in shmem_pwrite()
395 &page, &data); in shmem_pwrite()
405 err = pagecache_write_end(obj->base.filp, mapping, in shmem_pwrite()
406 offset, len, len - unwritten, in shmem_pwrite()
407 page, data); in shmem_pwrite()
411 /* We don't handle -EFAULT, leave it to the caller to check */ in shmem_pwrite()
413 return -ENODEV; in shmem_pwrite()
415 remain -= len; in shmem_pwrite()
428 fput(obj->base.filp); in shmem_release()
453 drm_gem_private_object_init(&i915->drm, obj, size); in __create_shmem()
455 if (i915->mm.gemfs) in __create_shmem()
456 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size, in __create_shmem()
463 obj->filp = filp; in __create_shmem()
473 struct drm_i915_private *i915 = mem->i915; in create_shmem()
475 struct address_space *mapping; in create_shmem() local
482 return ERR_PTR(-ENOMEM); in create_shmem()
484 ret = __create_shmem(i915, &obj->base, size); in create_shmem()
495 mapping = obj->base.filp->f_mapping; in create_shmem()
496 mapping_set_gfp_mask(mapping, mask); in create_shmem()
497 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM)); in create_shmem()
501 obj->write_domain = I915_GEM_DOMAIN_CPU; in create_shmem()
502 obj->read_domains = I915_GEM_DOMAIN_CPU; in create_shmem()
512 * get data visible to the CPU. in create_shmem()
536 return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_SMEM], in i915_gem_object_create_shmem()
540 /* Allocate a new GEM object and fill it with the supplied data */
543 const void *data, resource_size_t size) in i915_gem_object_create_shmem_from_data() argument
554 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); in i915_gem_object_create_shmem_from_data()
556 file = obj->base.filp; in i915_gem_object_create_shmem_from_data()
563 err = pagecache_write_begin(file, file->f_mapping, in i915_gem_object_create_shmem_from_data()
570 memcpy(vaddr, data, len); in i915_gem_object_create_shmem_from_data()
573 err = pagecache_write_end(file, file->f_mapping, in i915_gem_object_create_shmem_from_data()
579 size -= len; in i915_gem_object_create_shmem_from_data()
580 data += len; in i915_gem_object_create_shmem_from_data()
595 err = i915_gemfs_init(mem->i915); in init_shmem()
608 i915_gemfs_fini(mem->i915); in release_shmem()