Lines Matching defs:dobj
43 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
46 DRM_DEBUG_DRIVER("release obj %p\n", dobj);
48 drm_gem_free_mmap_offset(&dobj->obj);
52 if (dobj->page) {
54 unsigned int order = get_order(dobj->obj.size);
55 __free_pages(dobj->page, order);
56 } else if (dobj->linear) {
59 drm_mm_remove_node(dobj->linear);
61 kfree(dobj->linear);
62 if (dobj->addr)
63 iounmap(dobj->addr);
66 if (dobj->obj.import_attach) {
68 if (dobj->sgt)
69 dma_buf_unmap_attachment_unlocked(dobj->obj.import_attach,
70 dobj->sgt, DMA_TO_DEVICE);
71 drm_prime_gem_destroy(&dobj->obj, NULL);
74 drm_gem_object_release(&dobj->obj);
76 kfree(dobj);
181 armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
184 if (!dobj->addr && dobj->linear)
185 dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
186 return dobj->addr;
246 struct armada_gem_object *dobj;
254 dobj = armada_gem_alloc_private_object(dev, size);
255 if (dobj == NULL)
258 ret = armada_gem_linear_back(dev, dobj);
262 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
269 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
271 drm_gem_object_put(&dobj->obj);
280 struct armada_gem_object *dobj;
290 dobj = armada_gem_alloc_object(dev, size);
291 if (dobj == NULL)
294 ret = drm_gem_handle_create(file, &dobj->obj, &handle);
301 DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
303 drm_gem_object_put(&dobj->obj);
312 struct armada_gem_object *dobj;
315 dobj = armada_gem_object_lookup(file, args->handle);
316 if (dobj == NULL)
319 if (!dobj->obj.filp) {
320 drm_gem_object_put(&dobj->obj);
324 addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
326 drm_gem_object_put(&dobj->obj);
339 struct armada_gem_object *dobj;
357 dobj = armada_gem_object_lookup(file, args->handle);
358 if (dobj == NULL)
362 if (!dobj->addr)
365 if (args->offset > dobj->obj.size ||
366 args->size > dobj->obj.size - args->offset) {
367 DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
372 if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
374 } else if (dobj->update) {
375 dobj->update(dobj->update_data);
380 drm_gem_object_put(&dobj->obj);
390 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
399 if (dobj->obj.filp) {
403 count = dobj->obj.size / PAGE_SIZE;
407 mapping = dobj->obj.filp->f_mapping;
421 } else if (dobj->page) {
426 sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
430 } else if (dobj->linear) {
434 sg_dma_address(sgt->sgl) = dobj->dev_addr;
435 sg_dma_len(sgt->sgl) = dobj->obj.size;
456 struct armada_gem_object *dobj = drm_to_armada_gem(obj);
459 if (!dobj->linear)
462 if (dobj->obj.filp) {
503 struct armada_gem_object *dobj;
521 dobj = armada_gem_alloc_private_object(dev, buf->size);
522 if (!dobj) {
527 dobj->obj.import_attach = attach;
535 return &dobj->obj;
538 int armada_gem_map_import(struct armada_gem_object *dobj)
542 dobj->sgt = dma_buf_map_attachment_unlocked(dobj->obj.import_attach,
544 if (IS_ERR(dobj->sgt)) {
545 ret = PTR_ERR(dobj->sgt);
546 dobj->sgt = NULL;
550 if (dobj->sgt->nents > 1) {
554 if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
558 dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
559 dobj->mapped = true;