xref: /linux/drivers/gpu/drm/i915/gem/i915_gem_phys.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2014-2016 Intel Corporation
4  */
5 
6 #include <linux/highmem.h>
7 #include <linux/shmem_fs.h>
8 #include <linux/swap.h>
9 
10 #include <drm/drm_cache.h>
11 
12 #include "gt/intel_gt.h"
13 #include "i915_drv.h"
14 #include "i915_gem_object.h"
15 #include "i915_gem_object_frontbuffer.h"
16 #include "i915_gem_region.h"
17 #include "i915_gem_tiling.h"
18 #include "i915_scatterlist.h"
19 
i915_gem_object_get_pages_phys(struct drm_i915_gem_object * obj)20 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
21 {
22 	struct address_space *mapping = obj->base.filp->f_mapping;
23 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
24 	struct scatterlist *sg;
25 	struct sg_table *st;
26 	dma_addr_t dma;
27 	void *vaddr;
28 	void *dst;
29 	int i;
30 
31 	/* Contiguous chunk, with a single scatterlist element */
32 	if (overflows_type(obj->base.size, sg->length))
33 		return -E2BIG;
34 
35 	if (GEM_WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
36 		return -EINVAL;
37 
38 	/*
39 	 * Always aligning to the object size, allows a single allocation
40 	 * to handle all possible callers, and given typical object sizes,
41 	 * the alignment of the buddy allocation will naturally match.
42 	 */
43 	vaddr = dma_alloc_coherent(obj->base.dev->dev,
44 				   roundup_pow_of_two(obj->base.size),
45 				   &dma, GFP_KERNEL);
46 	if (!vaddr)
47 		return -ENOMEM;
48 
49 	st = kmalloc(sizeof(*st), GFP_KERNEL);
50 	if (!st)
51 		goto err_pci;
52 
53 	if (sg_alloc_table(st, 1, GFP_KERNEL))
54 		goto err_st;
55 
56 	sg = st->sgl;
57 	sg->offset = 0;
58 	sg->length = obj->base.size;
59 
60 	sg_assign_page(sg, (struct page *)vaddr);
61 	sg_dma_address(sg) = dma;
62 	sg_dma_len(sg) = obj->base.size;
63 
64 	dst = vaddr;
65 	for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
66 		struct page *page;
67 
68 		page = shmem_read_mapping_page(mapping, i);
69 		if (IS_ERR(page))
70 			goto err_st;
71 
72 		memcpy_from_page(dst, page, 0, PAGE_SIZE);
73 		drm_clflush_virt_range(dst, PAGE_SIZE);
74 
75 		put_page(page);
76 		dst += PAGE_SIZE;
77 	}
78 
79 	intel_gt_chipset_flush(to_gt(i915));
80 
81 	/* We're no longer struct page backed */
82 	obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE;
83 	__i915_gem_object_set_pages(obj, st);
84 
85 	return 0;
86 
87 err_st:
88 	kfree(st);
89 err_pci:
90 	dma_free_coherent(obj->base.dev->dev,
91 			  roundup_pow_of_two(obj->base.size),
92 			  vaddr, dma);
93 	return -ENOMEM;
94 }
95 
96 void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object * obj,struct sg_table * pages)97 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
98 			       struct sg_table *pages)
99 {
100 	dma_addr_t dma = sg_dma_address(pages->sgl);
101 	void *vaddr = sg_page(pages->sgl);
102 
103 	__i915_gem_object_release_shmem(obj, pages, false);
104 
105 	if (obj->mm.dirty) {
106 		struct address_space *mapping = obj->base.filp->f_mapping;
107 		void *src = vaddr;
108 		int i;
109 
110 		for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
111 			struct page *page;
112 
113 			page = shmem_read_mapping_page(mapping, i);
114 			if (IS_ERR(page))
115 				continue;
116 
117 			drm_clflush_virt_range(src, PAGE_SIZE);
118 			memcpy_to_page(page, 0, src, PAGE_SIZE);
119 
120 			set_page_dirty(page);
121 			if (obj->mm.madv == I915_MADV_WILLNEED)
122 				mark_page_accessed(page);
123 			put_page(page);
124 
125 			src += PAGE_SIZE;
126 		}
127 		obj->mm.dirty = false;
128 	}
129 
130 	sg_free_table(pages);
131 	kfree(pages);
132 
133 	dma_free_coherent(obj->base.dev->dev,
134 			  roundup_pow_of_two(obj->base.size),
135 			  vaddr, dma);
136 }
137 
i915_gem_object_pwrite_phys(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pwrite * args)138 int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
139 				const struct drm_i915_gem_pwrite *args)
140 {
141 	void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
142 	char __user *user_data = u64_to_user_ptr(args->data_ptr);
143 	struct drm_i915_private *i915 = to_i915(obj->base.dev);
144 	int err;
145 
146 	err = i915_gem_object_wait(obj,
147 				   I915_WAIT_INTERRUPTIBLE |
148 				   I915_WAIT_ALL,
149 				   MAX_SCHEDULE_TIMEOUT);
150 	if (err)
151 		return err;
152 
153 	/*
154 	 * We manually control the domain here and pretend that it
155 	 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
156 	 */
157 	i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
158 
159 	if (copy_from_user(vaddr, user_data, args->size))
160 		return -EFAULT;
161 
162 	drm_clflush_virt_range(vaddr, args->size);
163 	intel_gt_chipset_flush(to_gt(i915));
164 
165 	i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
166 	return 0;
167 }
168 
i915_gem_object_pread_phys(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pread * args)169 int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
170 			       const struct drm_i915_gem_pread *args)
171 {
172 	void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
173 	char __user *user_data = u64_to_user_ptr(args->data_ptr);
174 	int err;
175 
176 	err = i915_gem_object_wait(obj,
177 				   I915_WAIT_INTERRUPTIBLE,
178 				   MAX_SCHEDULE_TIMEOUT);
179 	if (err)
180 		return err;
181 
182 	drm_clflush_virt_range(vaddr, args->size);
183 	if (copy_to_user(user_data, vaddr, args->size))
184 		return -EFAULT;
185 
186 	return 0;
187 }
188 
i915_gem_object_shmem_to_phys(struct drm_i915_gem_object * obj)189 static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj)
190 {
191 	struct sg_table *pages;
192 	int err;
193 
194 	pages = __i915_gem_object_unset_pages(obj);
195 
196 	err = i915_gem_object_get_pages_phys(obj);
197 	if (err)
198 		goto err_xfer;
199 
200 	/* Perma-pin (until release) the physical set of pages */
201 	__i915_gem_object_pin_pages(obj);
202 
203 	if (!IS_ERR_OR_NULL(pages))
204 		i915_gem_object_put_pages_shmem(obj, pages);
205 
206 	i915_gem_object_release_memory_region(obj);
207 	return 0;
208 
209 err_xfer:
210 	if (!IS_ERR_OR_NULL(pages))
211 		__i915_gem_object_set_pages(obj, pages);
212 	return err;
213 }
214 
i915_gem_object_attach_phys(struct drm_i915_gem_object * obj,int align)215 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
216 {
217 	int err;
218 
219 	assert_object_held(obj);
220 
221 	if (align > obj->base.size)
222 		return -EINVAL;
223 
224 	if (!i915_gem_object_is_shmem(obj))
225 		return -EINVAL;
226 
227 	if (!i915_gem_object_has_struct_page(obj))
228 		return 0;
229 
230 	err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
231 	if (err)
232 		return err;
233 
234 	if (obj->mm.madv != I915_MADV_WILLNEED)
235 		return -EFAULT;
236 
237 	if (i915_gem_object_has_tiling_quirk(obj))
238 		return -EFAULT;
239 
240 	if (obj->mm.mapping || i915_gem_object_has_pinned_pages(obj))
241 		return -EBUSY;
242 
243 	if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
244 		drm_dbg(obj->base.dev,
245 			"Attempting to obtain a purgeable object\n");
246 		return -EFAULT;
247 	}
248 
249 	return i915_gem_object_shmem_to_phys(obj);
250 }
251 
252 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
253 #include "selftests/i915_gem_phys.c"
254 #endif
255