1 /*
2 * Copyright 2013 Red Hat Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Authors: Dave Airlie
23 * Alon Levy
24 */
25
26 #include "qxl_drv.h"
27 #include "qxl_object.h"
28
29 #include <linux/io-mapping.h>
qxl_ttm_bo_destroy(struct ttm_buffer_object * tbo)30 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
31 {
32 struct qxl_bo *bo;
33 struct qxl_device *qdev;
34
35 bo = to_qxl_bo(tbo);
36 qdev = to_qxl(bo->tbo.base.dev);
37
38 qxl_surface_evict(qdev, bo, false);
39 WARN_ON_ONCE(bo->map_count > 0);
40 mutex_lock(&qdev->gem.mutex);
41 list_del_init(&bo->list);
42 mutex_unlock(&qdev->gem.mutex);
43 drm_gem_object_release(&bo->tbo.base);
44 kfree(bo);
45 }
46
qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object * bo)47 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
48 {
49 if (bo->destroy == &qxl_ttm_bo_destroy)
50 return true;
51 return false;
52 }
53
qxl_ttm_placement_from_domain(struct qxl_bo * qbo,u32 domain,bool pinned)54 void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned)
55 {
56 u32 c = 0;
57 u32 pflag = 0;
58 unsigned int i;
59
60 if (pinned)
61 pflag |= TTM_PL_FLAG_NO_EVICT;
62 if (qbo->tbo.base.size <= PAGE_SIZE)
63 pflag |= TTM_PL_FLAG_TOPDOWN;
64
65 qbo->placement.placement = qbo->placements;
66 qbo->placement.busy_placement = qbo->placements;
67 if (domain == QXL_GEM_DOMAIN_VRAM) {
68 qbo->placements[c].mem_type = TTM_PL_VRAM;
69 qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
70 }
71 if (domain == QXL_GEM_DOMAIN_SURFACE) {
72 qbo->placements[c].mem_type = TTM_PL_PRIV;
73 qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
74 qbo->placements[c].mem_type = TTM_PL_VRAM;
75 qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag;
76 }
77 if (domain == QXL_GEM_DOMAIN_CPU) {
78 qbo->placements[c].mem_type = TTM_PL_SYSTEM;
79 qbo->placements[c++].flags = TTM_PL_MASK_CACHING | pflag;
80 }
81 if (!c) {
82 qbo->placements[c].mem_type = TTM_PL_SYSTEM;
83 qbo->placements[c++].flags = TTM_PL_MASK_CACHING;
84 }
85 qbo->placement.num_placement = c;
86 qbo->placement.num_busy_placement = c;
87 for (i = 0; i < c; ++i) {
88 qbo->placements[i].fpfn = 0;
89 qbo->placements[i].lpfn = 0;
90 }
91 }
92
93 static const struct drm_gem_object_funcs qxl_object_funcs = {
94 .free = qxl_gem_object_free,
95 .open = qxl_gem_object_open,
96 .close = qxl_gem_object_close,
97 .pin = qxl_gem_prime_pin,
98 .unpin = qxl_gem_prime_unpin,
99 .get_sg_table = qxl_gem_prime_get_sg_table,
100 .vmap = qxl_gem_prime_vmap,
101 .vunmap = qxl_gem_prime_vunmap,
102 .mmap = drm_gem_ttm_mmap,
103 .print_info = drm_gem_ttm_print_info,
104 };
105
qxl_bo_create(struct qxl_device * qdev,unsigned long size,bool kernel,bool pinned,u32 domain,struct qxl_surface * surf,struct qxl_bo ** bo_ptr)106 int qxl_bo_create(struct qxl_device *qdev,
107 unsigned long size, bool kernel, bool pinned, u32 domain,
108 struct qxl_surface *surf,
109 struct qxl_bo **bo_ptr)
110 {
111 struct qxl_bo *bo;
112 enum ttm_bo_type type;
113 int r;
114
115 if (kernel)
116 type = ttm_bo_type_kernel;
117 else
118 type = ttm_bo_type_device;
119 *bo_ptr = NULL;
120 bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
121 if (bo == NULL)
122 return -ENOMEM;
123 size = roundup(size, PAGE_SIZE);
124 r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size);
125 if (unlikely(r)) {
126 kfree(bo);
127 return r;
128 }
129 bo->tbo.base.funcs = &qxl_object_funcs;
130 bo->type = domain;
131 bo->pin_count = pinned ? 1 : 0;
132 bo->surface_id = 0;
133 INIT_LIST_HEAD(&bo->list);
134
135 if (surf)
136 bo->surf = *surf;
137
138 qxl_ttm_placement_from_domain(bo, domain, pinned);
139
140 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
141 &bo->placement, 0, !kernel, size,
142 NULL, NULL, &qxl_ttm_bo_destroy);
143 if (unlikely(r != 0)) {
144 if (r != -ERESTARTSYS)
145 dev_err(qdev->ddev.dev,
146 "object_init failed for (%lu, 0x%08X)\n",
147 size, domain);
148 return r;
149 }
150 *bo_ptr = bo;
151 return 0;
152 }
153
qxl_bo_kmap(struct qxl_bo * bo,void ** ptr)154 int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
155 {
156 bool is_iomem;
157 int r;
158
159 if (bo->kptr) {
160 if (ptr)
161 *ptr = bo->kptr;
162 bo->map_count++;
163 return 0;
164 }
165 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
166 if (r)
167 return r;
168 bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
169 if (ptr)
170 *ptr = bo->kptr;
171 bo->map_count = 1;
172 return 0;
173 }
174
qxl_bo_kmap_atomic_page(struct qxl_device * qdev,struct qxl_bo * bo,int page_offset)175 void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
176 struct qxl_bo *bo, int page_offset)
177 {
178 unsigned long offset;
179 void *rptr;
180 int ret;
181 struct io_mapping *map;
182
183 if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
184 map = qdev->vram_mapping;
185 else if (bo->tbo.mem.mem_type == TTM_PL_PRIV)
186 map = qdev->surface_mapping;
187 else
188 goto fallback;
189
190 offset = bo->tbo.mem.start << PAGE_SHIFT;
191 return io_mapping_map_atomic_wc(map, offset + page_offset);
192 fallback:
193 if (bo->kptr) {
194 rptr = bo->kptr + (page_offset * PAGE_SIZE);
195 return rptr;
196 }
197
198 ret = qxl_bo_kmap(bo, &rptr);
199 if (ret)
200 return NULL;
201
202 rptr += page_offset * PAGE_SIZE;
203 return rptr;
204 }
205
qxl_bo_kunmap(struct qxl_bo * bo)206 void qxl_bo_kunmap(struct qxl_bo *bo)
207 {
208 if (bo->kptr == NULL)
209 return;
210 bo->map_count--;
211 if (bo->map_count > 0)
212 return;
213 bo->kptr = NULL;
214 ttm_bo_kunmap(&bo->kmap);
215 }
216
qxl_bo_kunmap_atomic_page(struct qxl_device * qdev,struct qxl_bo * bo,void * pmap)217 void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
218 struct qxl_bo *bo, void *pmap)
219 {
220 if ((bo->tbo.mem.mem_type != TTM_PL_VRAM) &&
221 (bo->tbo.mem.mem_type != TTM_PL_PRIV))
222 goto fallback;
223
224 io_mapping_unmap_atomic(pmap);
225 return;
226 fallback:
227 qxl_bo_kunmap(bo);
228 }
229
qxl_bo_unref(struct qxl_bo ** bo)230 void qxl_bo_unref(struct qxl_bo **bo)
231 {
232 if ((*bo) == NULL)
233 return;
234
235 drm_gem_object_put(&(*bo)->tbo.base);
236 *bo = NULL;
237 }
238
qxl_bo_ref(struct qxl_bo * bo)239 struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
240 {
241 drm_gem_object_get(&bo->tbo.base);
242 return bo;
243 }
244
__qxl_bo_pin(struct qxl_bo * bo)245 static int __qxl_bo_pin(struct qxl_bo *bo)
246 {
247 struct ttm_operation_ctx ctx = { false, false };
248 struct drm_device *ddev = bo->tbo.base.dev;
249 int r;
250
251 if (bo->pin_count) {
252 bo->pin_count++;
253 return 0;
254 }
255 qxl_ttm_placement_from_domain(bo, bo->type, true);
256 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
257 if (likely(r == 0)) {
258 bo->pin_count = 1;
259 }
260 if (unlikely(r != 0))
261 dev_err(ddev->dev, "%p pin failed\n", bo);
262 return r;
263 }
264
__qxl_bo_unpin(struct qxl_bo * bo)265 static int __qxl_bo_unpin(struct qxl_bo *bo)
266 {
267 struct ttm_operation_ctx ctx = { false, false };
268 struct drm_device *ddev = bo->tbo.base.dev;
269 int r, i;
270
271 if (!bo->pin_count) {
272 dev_warn(ddev->dev, "%p unpin not necessary\n", bo);
273 return 0;
274 }
275 bo->pin_count--;
276 if (bo->pin_count)
277 return 0;
278 for (i = 0; i < bo->placement.num_placement; i++)
279 bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
280 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
281 if (unlikely(r != 0))
282 dev_err(ddev->dev, "%p validate failed for unpin\n", bo);
283 return r;
284 }
285
286 /*
287 * Reserve the BO before pinning the object. If the BO was reserved
288 * beforehand, use the internal version directly __qxl_bo_pin.
289 *
290 */
qxl_bo_pin(struct qxl_bo * bo)291 int qxl_bo_pin(struct qxl_bo *bo)
292 {
293 int r;
294
295 r = qxl_bo_reserve(bo);
296 if (r)
297 return r;
298
299 r = __qxl_bo_pin(bo);
300 qxl_bo_unreserve(bo);
301 return r;
302 }
303
304 /*
305 * Reserve the BO before pinning the object. If the BO was reserved
306 * beforehand, use the internal version directly __qxl_bo_unpin.
307 *
308 */
qxl_bo_unpin(struct qxl_bo * bo)309 int qxl_bo_unpin(struct qxl_bo *bo)
310 {
311 int r;
312
313 r = qxl_bo_reserve(bo);
314 if (r)
315 return r;
316
317 r = __qxl_bo_unpin(bo);
318 qxl_bo_unreserve(bo);
319 return r;
320 }
321
qxl_bo_force_delete(struct qxl_device * qdev)322 void qxl_bo_force_delete(struct qxl_device *qdev)
323 {
324 struct qxl_bo *bo, *n;
325
326 if (list_empty(&qdev->gem.objects))
327 return;
328 dev_err(qdev->ddev.dev, "Userspace still has active objects !\n");
329 list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
330 dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n",
331 &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size,
332 *((unsigned long *)&bo->tbo.base.refcount));
333 mutex_lock(&qdev->gem.mutex);
334 list_del_init(&bo->list);
335 mutex_unlock(&qdev->gem.mutex);
336 /* this should unref the ttm bo */
337 drm_gem_object_put(&bo->tbo.base);
338 }
339 }
340
qxl_bo_init(struct qxl_device * qdev)341 int qxl_bo_init(struct qxl_device *qdev)
342 {
343 return qxl_ttm_init(qdev);
344 }
345
qxl_bo_fini(struct qxl_device * qdev)346 void qxl_bo_fini(struct qxl_device *qdev)
347 {
348 qxl_ttm_fini(qdev);
349 }
350
qxl_bo_check_id(struct qxl_device * qdev,struct qxl_bo * bo)351 int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
352 {
353 int ret;
354
355 if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
356 /* allocate a surface id for this surface now */
357 ret = qxl_surface_id_alloc(qdev, bo);
358 if (ret)
359 return ret;
360
361 ret = qxl_hw_surface_alloc(qdev, bo);
362 if (ret)
363 return ret;
364 }
365 return 0;
366 }
367
qxl_surf_evict(struct qxl_device * qdev)368 int qxl_surf_evict(struct qxl_device *qdev)
369 {
370 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV);
371 }
372
qxl_vram_evict(struct qxl_device * qdev)373 int qxl_vram_evict(struct qxl_device *qdev)
374 {
375 return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM);
376 }
377