Lines Matching +full:compute +full:- +full:cb

1 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
44 * struct vmw_user_surface - User-space visible surface resource
47 * @base: The TTM base object handling user-space visibility.
58 * struct vmw_surface_offset - Backing store mip level offset info
72 * struct vmw_surface_dirty - Surface dirty-tracker
163 * struct vmw_surface_dma - SVGA3D DMA command
168 SVGA3dCopyBox cb; member
173 * struct vmw_surface_define - SVGA3D Surface Define command
181 * struct vmw_surface_destroy - SVGA3D Surface Destroy command
190 * vmw_surface_dma_size - Compute fifo size for a dma command.
199 return srf->metadata.num_sizes * sizeof(struct vmw_surface_dma); in vmw_surface_dma_size()
204 * vmw_surface_define_size - Compute fifo size for a surface define command.
213 return sizeof(struct vmw_surface_define) + srf->metadata.num_sizes * in vmw_surface_define_size()
219 * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
230 * vmw_surface_destroy_encode - Encode a surface_destroy command.
241 cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY; in vmw_surface_destroy_encode()
242 cmd->header.size = sizeof(cmd->body); in vmw_surface_destroy_encode()
243 cmd->body.sid = id; in vmw_surface_destroy_encode()
247 * vmw_surface_define_encode - Encode a surface_define command.
262 cmd_len = sizeof(cmd->body) + srf->metadata.num_sizes * in vmw_surface_define_encode()
265 cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE; in vmw_surface_define_encode()
266 cmd->header.size = cmd_len; in vmw_surface_define_encode()
267 cmd->body.sid = srf->res.id; in vmw_surface_define_encode()
269 * Downcast of surfaceFlags, was upcasted when received from user-space, in vmw_surface_define_encode()
273 cmd->body.surfaceFlags = (SVGA3dSurface1Flags)srf->metadata.flags; in vmw_surface_define_encode()
274 cmd->body.format = srf->metadata.format; in vmw_surface_define_encode()
276 cmd->body.face[i].numMipLevels = srf->metadata.mip_levels[i]; in vmw_surface_define_encode()
280 src_size = srf->metadata.sizes; in vmw_surface_define_encode()
282 for (i = 0; i < srf->metadata.num_sizes; ++i, cmd_size++, src_size++) { in vmw_surface_define_encode()
283 cmd_size->width = src_size->width; in vmw_surface_define_encode()
284 cmd_size->height = src_size->height; in vmw_surface_define_encode()
285 cmd_size->depth = src_size->depth; in vmw_surface_define_encode()
290 * vmw_surface_dma_encode - Encode a surface_dma command.
306 vmw_surface_get_desc(srf->metadata.format); in vmw_surface_dma_encode()
308 for (i = 0; i < srf->metadata.num_sizes; ++i) { in vmw_surface_dma_encode()
309 SVGA3dCmdHeader *header = &cmd->header; in vmw_surface_dma_encode()
310 SVGA3dCmdSurfaceDMA *body = &cmd->body; in vmw_surface_dma_encode()
311 SVGA3dCopyBox *cb = &cmd->cb; in vmw_surface_dma_encode() local
312 SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix; in vmw_surface_dma_encode()
313 const struct vmw_surface_offset *cur_offset = &srf->offsets[i]; in vmw_surface_dma_encode()
314 const struct drm_vmw_size *cur_size = &srf->metadata.sizes[i]; in vmw_surface_dma_encode()
316 header->id = SVGA_3D_CMD_SURFACE_DMA; in vmw_surface_dma_encode()
317 header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix); in vmw_surface_dma_encode()
319 body->guest.ptr = *ptr; in vmw_surface_dma_encode()
320 body->guest.ptr.offset += cur_offset->bo_offset; in vmw_surface_dma_encode()
321 body->guest.pitch = vmw_surface_calculate_pitch(desc, cur_size); in vmw_surface_dma_encode()
322 body->host.sid = srf->res.id; in vmw_surface_dma_encode()
323 body->host.face = cur_offset->face; in vmw_surface_dma_encode()
324 body->host.mipmap = cur_offset->mip; in vmw_surface_dma_encode()
325 body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM : in vmw_surface_dma_encode()
327 cb->x = 0; in vmw_surface_dma_encode()
328 cb->y = 0; in vmw_surface_dma_encode()
329 cb->z = 0; in vmw_surface_dma_encode()
330 cb->srcx = 0; in vmw_surface_dma_encode()
331 cb->srcy = 0; in vmw_surface_dma_encode()
332 cb->srcz = 0; in vmw_surface_dma_encode()
333 cb->w = cur_size->width; in vmw_surface_dma_encode()
334 cb->h = cur_size->height; in vmw_surface_dma_encode()
335 cb->d = cur_size->depth; in vmw_surface_dma_encode()
337 suffix->suffixSize = sizeof(*suffix); in vmw_surface_dma_encode()
338 suffix->maximumOffset = in vmw_surface_dma_encode()
340 body->guest.pitch); in vmw_surface_dma_encode()
341 suffix->flags.discard = 0; in vmw_surface_dma_encode()
342 suffix->flags.unsynchronized = 0; in vmw_surface_dma_encode()
343 suffix->flags.reserved = 0; in vmw_surface_dma_encode()
350 * vmw_hw_surface_destroy - destroy a Device surface
361 struct vmw_private *dev_priv = res->dev_priv; in vmw_hw_surface_destroy()
364 if (res->func->destroy == vmw_gb_surface_destroy) { in vmw_hw_surface_destroy()
369 if (res->id != -1) { in vmw_hw_surface_destroy()
375 vmw_surface_destroy_encode(res->id, cmd); in vmw_hw_surface_destroy()
384 mutex_lock(&dev_priv->cmdbuf_mutex); in vmw_hw_surface_destroy()
385 dev_priv->used_memory_size -= res->guest_memory_size; in vmw_hw_surface_destroy()
386 mutex_unlock(&dev_priv->cmdbuf_mutex); in vmw_hw_surface_destroy()
391 * vmw_legacy_srf_create - Create a device surface as part of the
398 * Returns -EBUSY if there wasn't sufficient device resources to
405 struct vmw_private *dev_priv = res->dev_priv; in vmw_legacy_srf_create()
411 if (likely(res->id != -1)) in vmw_legacy_srf_create()
415 if (unlikely(dev_priv->used_memory_size + res->guest_memory_size >= in vmw_legacy_srf_create()
416 dev_priv->memory_size)) in vmw_legacy_srf_create()
417 return -EBUSY; in vmw_legacy_srf_create()
429 if (unlikely(res->id >= SVGA3D_HB_MAX_SURFACE_IDS)) { in vmw_legacy_srf_create()
430 ret = -EBUSY; in vmw_legacy_srf_create()
435 * Encode surface define- commands. in vmw_legacy_srf_create()
441 ret = -ENOMEM; in vmw_legacy_srf_create()
453 dev_priv->used_memory_size += res->guest_memory_size; in vmw_legacy_srf_create()
463 * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
487 struct vmw_private *dev_priv = res->dev_priv; in vmw_legacy_srf_dma()
489 BUG_ON(!val_buf->bo); in vmw_legacy_srf_dma()
493 return -ENOMEM; in vmw_legacy_srf_dma()
495 vmw_bo_get_guest_ptr(val_buf->bo, &ptr); in vmw_legacy_srf_dma()
507 vmw_bo_fence_single(val_buf->bo, fence); in vmw_legacy_srf_dma()
516 * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
530 if (!res->guest_memory_dirty) in vmw_legacy_srf_bind()
538 * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
543 * @readback: Readback - only true if dirty
559 * vmw_legacy_srf_destroy - Destroy a device surface as part of a
567 struct vmw_private *dev_priv = res->dev_priv; in vmw_legacy_srf_destroy()
571 BUG_ON(res->id == -1); in vmw_legacy_srf_destroy()
574 * Encode the dma- and surface destroy commands. in vmw_legacy_srf_destroy()
580 return -ENOMEM; in vmw_legacy_srf_destroy()
582 vmw_surface_destroy_encode(res->id, cmd); in vmw_legacy_srf_destroy()
589 dev_priv->used_memory_size -= res->guest_memory_size; in vmw_legacy_srf_destroy()
603 * vmw_surface_init - initialize a struct vmw_surface
615 struct vmw_resource *res = &srf->res; in vmw_surface_init()
619 (dev_priv->has_mob) ? &vmw_gb_surface_func : in vmw_surface_init()
632 INIT_LIST_HEAD(&srf->view_list); in vmw_surface_init()
633 res->hw_destroy = vmw_hw_surface_destroy; in vmw_surface_init()
638 * vmw_user_surface_base_to_res - TTM base object to resource converter for
644 * for the user-visible object identified by the TTM base object @base.
650 prime.base)->srf.res); in vmw_user_surface_base_to_res()
654 * vmw_user_surface_free - User visible surface resource destructor
664 WARN_ON_ONCE(res->dirty); in vmw_user_surface_free()
665 if (user_srf->master) in vmw_user_surface_free()
666 drm_master_put(&user_srf->master); in vmw_user_surface_free()
667 kfree(srf->offsets); in vmw_user_surface_free()
668 kfree(srf->metadata.sizes); in vmw_user_surface_free()
669 kfree(srf->snooper.image); in vmw_user_surface_free()
674 * vmw_user_surface_base_release - User visible surface TTM base object destructor
687 struct vmw_resource *res = &user_srf->srf.res; in vmw_user_surface_base_release()
694 * vmw_surface_destroy_ioctl - Ioctl function implementing
698 * @data: Pointer to data copied from / to user-space.
705 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; in vmw_surface_destroy_ioctl()
707 return ttm_ref_object_base_unref(tfile, arg->sid); in vmw_surface_destroy_ioctl()
711 * vmw_surface_define_ioctl - Ioctl function implementing
715 * @data: Pointer to data copied from / to user-space.
729 struct drm_vmw_surface_create_req *req = &arg->req; in vmw_surface_define_ioctl()
730 struct drm_vmw_surface_arg *rep = &arg->rep; in vmw_surface_define_ioctl()
731 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; in vmw_surface_define_ioctl()
742 if (req->mip_levels[i] > DRM_VMW_MAX_MIP_LEVELS) in vmw_surface_define_ioctl()
743 return -EINVAL; in vmw_surface_define_ioctl()
744 num_sizes += req->mip_levels[i]; in vmw_surface_define_ioctl()
749 return -EINVAL; in vmw_surface_define_ioctl()
751 desc = vmw_surface_get_desc(req->format); in vmw_surface_define_ioctl()
752 if (unlikely(desc->blockDesc == SVGA3DBLOCKDESC_NONE)) { in vmw_surface_define_ioctl()
754 req->format); in vmw_surface_define_ioctl()
755 return -EINVAL; in vmw_surface_define_ioctl()
760 ret = -ENOMEM; in vmw_surface_define_ioctl()
764 srf = &user_srf->srf; in vmw_surface_define_ioctl()
765 metadata = &srf->metadata; in vmw_surface_define_ioctl()
766 res = &srf->res; in vmw_surface_define_ioctl()
768 /* Driver internally stores as 64-bit flags */ in vmw_surface_define_ioctl()
769 metadata->flags = (SVGA3dSurfaceAllFlags)req->flags; in vmw_surface_define_ioctl()
770 metadata->format = req->format; in vmw_surface_define_ioctl()
771 metadata->scanout = req->scanout; in vmw_surface_define_ioctl()
773 memcpy(metadata->mip_levels, req->mip_levels, in vmw_surface_define_ioctl()
774 sizeof(metadata->mip_levels)); in vmw_surface_define_ioctl()
775 metadata->num_sizes = num_sizes; in vmw_surface_define_ioctl()
776 metadata->sizes = in vmw_surface_define_ioctl()
778 req->size_addr, in vmw_surface_define_ioctl()
779 metadata->num_sizes, sizeof(*metadata->sizes)); in vmw_surface_define_ioctl()
780 if (IS_ERR(metadata->sizes)) { in vmw_surface_define_ioctl()
781 ret = PTR_ERR(metadata->sizes); in vmw_surface_define_ioctl()
784 srf->offsets = kmalloc_array(metadata->num_sizes, sizeof(*srf->offsets), in vmw_surface_define_ioctl()
786 if (unlikely(!srf->offsets)) { in vmw_surface_define_ioctl()
787 ret = -ENOMEM; in vmw_surface_define_ioctl()
791 metadata->base_size = *srf->metadata.sizes; in vmw_surface_define_ioctl()
792 metadata->autogen_filter = SVGA3D_TEX_FILTER_NONE; in vmw_surface_define_ioctl()
793 metadata->multisample_count = 0; in vmw_surface_define_ioctl()
794 metadata->multisample_pattern = SVGA3D_MS_PATTERN_NONE; in vmw_surface_define_ioctl()
795 metadata->quality_level = SVGA3D_MS_QUALITY_NONE; in vmw_surface_define_ioctl()
798 cur_offset = srf->offsets; in vmw_surface_define_ioctl()
799 cur_size = metadata->sizes; in vmw_surface_define_ioctl()
802 for (j = 0; j < metadata->mip_levels[i]; ++j) { in vmw_surface_define_ioctl()
806 cur_offset->face = i; in vmw_surface_define_ioctl()
807 cur_offset->mip = j; in vmw_surface_define_ioctl()
808 cur_offset->bo_offset = cur_bo_offset; in vmw_surface_define_ioctl()
815 res->guest_memory_size = cur_bo_offset; in vmw_surface_define_ioctl()
816 if (metadata->scanout && in vmw_surface_define_ioctl()
817 metadata->num_sizes == 1 && in vmw_surface_define_ioctl()
818 metadata->sizes[0].width == VMW_CURSOR_SNOOP_WIDTH && in vmw_surface_define_ioctl()
819 metadata->sizes[0].height == VMW_CURSOR_SNOOP_HEIGHT && in vmw_surface_define_ioctl()
820 metadata->format == VMW_CURSOR_SNOOP_FORMAT) { in vmw_surface_define_ioctl()
825 desc->pitchBytesPerBlock; in vmw_surface_define_ioctl()
826 srf->snooper.image = kzalloc(cursor_size_bytes, GFP_KERNEL); in vmw_surface_define_ioctl()
827 if (!srf->snooper.image) { in vmw_surface_define_ioctl()
829 ret = -ENOMEM; in vmw_surface_define_ioctl()
833 srf->snooper.image = NULL; in vmw_surface_define_ioctl()
836 user_srf->prime.base.shareable = false; in vmw_surface_define_ioctl()
837 user_srf->prime.base.tfile = NULL; in vmw_surface_define_ioctl()
839 user_srf->master = drm_file_get_master(file_priv); in vmw_surface_define_ioctl()
851 * A gb-aware client referencing a shared surface will in vmw_surface_define_ioctl()
854 if (dev_priv->has_mob && req->shareable) { in vmw_surface_define_ioctl()
859 .size = res->guest_memory_size, in vmw_surface_define_ioctl()
865 &res->guest_memory_bo); in vmw_surface_define_ioctl()
872 tmp = vmw_resource_reference(&srf->res); in vmw_surface_define_ioctl()
873 ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime, in vmw_surface_define_ioctl()
874 req->shareable, VMW_RES_SURFACE, in vmw_surface_define_ioctl()
883 rep->sid = user_srf->prime.base.handle; in vmw_surface_define_ioctl()
888 kfree(srf->offsets); in vmw_surface_define_ioctl()
890 kfree(metadata->sizes); in vmw_surface_define_ioctl()
905 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; in vmw_surface_handle_reference()
919 ret = -EINVAL; in vmw_surface_handle_reference()
920 base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle); in vmw_surface_handle_reference()
938 !file_priv->authenticated) { in vmw_surface_handle_reference()
939 ret = -EACCES; in vmw_surface_handle_reference()
948 user_srf->master != file_priv->master) in vmw_surface_handle_reference()
974 * vmw_surface_reference_ioctl - Ioctl function implementing
978 * @data: Pointer to data copied from / to user-space.
987 struct drm_vmw_surface_arg *req = &arg->req; in vmw_surface_reference_ioctl()
988 struct drm_vmw_surface_create_req *rep = &arg->rep; in vmw_surface_reference_ioctl()
989 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; in vmw_surface_reference_ioctl()
996 ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid, in vmw_surface_reference_ioctl()
997 req->handle_type, &base); in vmw_surface_reference_ioctl()
1002 srf = &user_srf->srf; in vmw_surface_reference_ioctl()
1005 rep->flags = (uint32_t)srf->metadata.flags; in vmw_surface_reference_ioctl()
1006 rep->format = srf->metadata.format; in vmw_surface_reference_ioctl()
1007 memcpy(rep->mip_levels, srf->metadata.mip_levels, in vmw_surface_reference_ioctl()
1008 sizeof(srf->metadata.mip_levels)); in vmw_surface_reference_ioctl()
1010 rep->size_addr; in vmw_surface_reference_ioctl()
1013 ret = copy_to_user(user_sizes, &srf->metadata.base_size, in vmw_surface_reference_ioctl()
1014 sizeof(srf->metadata.base_size)); in vmw_surface_reference_ioctl()
1017 srf->metadata.num_sizes); in vmw_surface_reference_ioctl()
1018 ttm_ref_object_base_unref(tfile, base->handle); in vmw_surface_reference_ioctl()
1019 ret = -EFAULT; in vmw_surface_reference_ioctl()
1028 * vmw_gb_surface_create - Encode a surface_define command.
1035 struct vmw_private *dev_priv = res->dev_priv; in vmw_gb_surface_create()
1037 struct vmw_surface_metadata *metadata = &srf->metadata; in vmw_gb_surface_create()
1057 if (likely(res->id != -1)) in vmw_gb_surface_create()
1067 if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) { in vmw_gb_surface_create()
1068 ret = -EBUSY; in vmw_gb_surface_create()
1072 if (has_sm5_context(dev_priv) && metadata->array_size > 0) { in vmw_gb_surface_create()
1074 cmd_len = sizeof(cmd4->body); in vmw_gb_surface_create()
1076 } else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) { in vmw_gb_surface_create()
1078 cmd_len = sizeof(cmd3->body); in vmw_gb_surface_create()
1080 } else if (metadata->array_size > 0) { in vmw_gb_surface_create()
1083 cmd_len = sizeof(cmd2->body); in vmw_gb_surface_create()
1087 cmd_len = sizeof(cmd->body); in vmw_gb_surface_create()
1096 ret = -ENOMEM; in vmw_gb_surface_create()
1100 if (has_sm5_context(dev_priv) && metadata->array_size > 0) { in vmw_gb_surface_create()
1101 cmd4->header.id = cmd_id; in vmw_gb_surface_create()
1102 cmd4->header.size = cmd_len; in vmw_gb_surface_create()
1103 cmd4->body.sid = srf->res.id; in vmw_gb_surface_create()
1104 cmd4->body.surfaceFlags = metadata->flags; in vmw_gb_surface_create()
1105 cmd4->body.format = metadata->format; in vmw_gb_surface_create()
1106 cmd4->body.numMipLevels = metadata->mip_levels[0]; in vmw_gb_surface_create()
1107 cmd4->body.multisampleCount = metadata->multisample_count; in vmw_gb_surface_create()
1108 cmd4->body.multisamplePattern = metadata->multisample_pattern; in vmw_gb_surface_create()
1109 cmd4->body.qualityLevel = metadata->quality_level; in vmw_gb_surface_create()
1110 cmd4->body.autogenFilter = metadata->autogen_filter; in vmw_gb_surface_create()
1111 cmd4->body.size.width = metadata->base_size.width; in vmw_gb_surface_create()
1112 cmd4->body.size.height = metadata->base_size.height; in vmw_gb_surface_create()
1113 cmd4->body.size.depth = metadata->base_size.depth; in vmw_gb_surface_create()
1114 cmd4->body.arraySize = metadata->array_size; in vmw_gb_surface_create()
1115 cmd4->body.bufferByteStride = metadata->buffer_byte_stride; in vmw_gb_surface_create()
1116 } else if (has_sm4_1_context(dev_priv) && metadata->array_size > 0) { in vmw_gb_surface_create()
1117 cmd3->header.id = cmd_id; in vmw_gb_surface_create()
1118 cmd3->header.size = cmd_len; in vmw_gb_surface_create()
1119 cmd3->body.sid = srf->res.id; in vmw_gb_surface_create()
1120 cmd3->body.surfaceFlags = metadata->flags; in vmw_gb_surface_create()
1121 cmd3->body.format = metadata->format; in vmw_gb_surface_create()
1122 cmd3->body.numMipLevels = metadata->mip_levels[0]; in vmw_gb_surface_create()
1123 cmd3->body.multisampleCount = metadata->multisample_count; in vmw_gb_surface_create()
1124 cmd3->body.multisamplePattern = metadata->multisample_pattern; in vmw_gb_surface_create()
1125 cmd3->body.qualityLevel = metadata->quality_level; in vmw_gb_surface_create()
1126 cmd3->body.autogenFilter = metadata->autogen_filter; in vmw_gb_surface_create()
1127 cmd3->body.size.width = metadata->base_size.width; in vmw_gb_surface_create()
1128 cmd3->body.size.height = metadata->base_size.height; in vmw_gb_surface_create()
1129 cmd3->body.size.depth = metadata->base_size.depth; in vmw_gb_surface_create()
1130 cmd3->body.arraySize = metadata->array_size; in vmw_gb_surface_create()
1131 } else if (metadata->array_size > 0) { in vmw_gb_surface_create()
1132 cmd2->header.id = cmd_id; in vmw_gb_surface_create()
1133 cmd2->header.size = cmd_len; in vmw_gb_surface_create()
1134 cmd2->body.sid = srf->res.id; in vmw_gb_surface_create()
1135 cmd2->body.surfaceFlags = metadata->flags; in vmw_gb_surface_create()
1136 cmd2->body.format = metadata->format; in vmw_gb_surface_create()
1137 cmd2->body.numMipLevels = metadata->mip_levels[0]; in vmw_gb_surface_create()
1138 cmd2->body.multisampleCount = metadata->multisample_count; in vmw_gb_surface_create()
1139 cmd2->body.autogenFilter = metadata->autogen_filter; in vmw_gb_surface_create()
1140 cmd2->body.size.width = metadata->base_size.width; in vmw_gb_surface_create()
1141 cmd2->body.size.height = metadata->base_size.height; in vmw_gb_surface_create()
1142 cmd2->body.size.depth = metadata->base_size.depth; in vmw_gb_surface_create()
1143 cmd2->body.arraySize = metadata->array_size; in vmw_gb_surface_create()
1145 cmd->header.id = cmd_id; in vmw_gb_surface_create()
1146 cmd->header.size = cmd_len; in vmw_gb_surface_create()
1147 cmd->body.sid = srf->res.id; in vmw_gb_surface_create()
1148 cmd->body.surfaceFlags = metadata->flags; in vmw_gb_surface_create()
1149 cmd->body.format = metadata->format; in vmw_gb_surface_create()
1150 cmd->body.numMipLevels = metadata->mip_levels[0]; in vmw_gb_surface_create()
1151 cmd->body.multisampleCount = metadata->multisample_count; in vmw_gb_surface_create()
1152 cmd->body.autogenFilter = metadata->autogen_filter; in vmw_gb_surface_create()
1153 cmd->body.size.width = metadata->base_size.width; in vmw_gb_surface_create()
1154 cmd->body.size.height = metadata->base_size.height; in vmw_gb_surface_create()
1155 cmd->body.size.depth = metadata->base_size.depth; in vmw_gb_surface_create()
1173 struct vmw_private *dev_priv = res->dev_priv; in vmw_gb_surface_bind()
1183 struct ttm_buffer_object *bo = val_buf->bo; in vmw_gb_surface_bind()
1185 BUG_ON(bo->resource->mem_type != VMW_PL_MOB); in vmw_gb_surface_bind()
1187 submit_size = sizeof(*cmd1) + (res->guest_memory_dirty ? sizeof(*cmd2) : 0); in vmw_gb_surface_bind()
1191 return -ENOMEM; in vmw_gb_surface_bind()
1193 cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; in vmw_gb_surface_bind()
1194 cmd1->header.size = sizeof(cmd1->body); in vmw_gb_surface_bind()
1195 cmd1->body.sid = res->id; in vmw_gb_surface_bind()
1196 cmd1->body.mobid = bo->resource->start; in vmw_gb_surface_bind()
1197 if (res->guest_memory_dirty) { in vmw_gb_surface_bind()
1199 cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE; in vmw_gb_surface_bind()
1200 cmd2->header.size = sizeof(cmd2->body); in vmw_gb_surface_bind()
1201 cmd2->body.sid = res->id; in vmw_gb_surface_bind()
1205 if (res->guest_memory_bo->dirty && res->guest_memory_dirty) { in vmw_gb_surface_bind()
1210 res->guest_memory_dirty = false; in vmw_gb_surface_bind()
1219 struct vmw_private *dev_priv = res->dev_priv; in vmw_gb_surface_unbind()
1220 struct ttm_buffer_object *bo = val_buf->bo; in vmw_gb_surface_unbind()
1239 BUG_ON(bo->resource->mem_type != VMW_PL_MOB); in vmw_gb_surface_unbind()
1244 return -ENOMEM; in vmw_gb_surface_unbind()
1248 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE; in vmw_gb_surface_unbind()
1249 cmd1->header.size = sizeof(cmd1->body); in vmw_gb_surface_unbind()
1250 cmd1->body.sid = res->id; in vmw_gb_surface_unbind()
1254 cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE; in vmw_gb_surface_unbind()
1255 cmd2->header.size = sizeof(cmd2->body); in vmw_gb_surface_unbind()
1256 cmd2->body.sid = res->id; in vmw_gb_surface_unbind()
1260 cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; in vmw_gb_surface_unbind()
1261 cmd3->header.size = sizeof(cmd3->body); in vmw_gb_surface_unbind()
1262 cmd3->body.sid = res->id; in vmw_gb_surface_unbind()
1263 cmd3->body.mobid = SVGA3D_INVALID_ID; in vmw_gb_surface_unbind()
1274 vmw_bo_fence_single(val_buf->bo, fence); in vmw_gb_surface_unbind()
1284 struct vmw_private *dev_priv = res->dev_priv; in vmw_gb_surface_destroy()
1291 if (likely(res->id == -1)) in vmw_gb_surface_destroy()
1294 mutex_lock(&dev_priv->binding_mutex); in vmw_gb_surface_destroy()
1295 vmw_view_surface_list_destroy(dev_priv, &srf->view_list); in vmw_gb_surface_destroy()
1296 vmw_binding_res_list_scrub(&res->binding_head); in vmw_gb_surface_destroy()
1300 mutex_unlock(&dev_priv->binding_mutex); in vmw_gb_surface_destroy()
1301 return -ENOMEM; in vmw_gb_surface_destroy()
1304 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE; in vmw_gb_surface_destroy()
1305 cmd->header.size = sizeof(cmd->body); in vmw_gb_surface_destroy()
1306 cmd->body.sid = res->id; in vmw_gb_surface_destroy()
1308 mutex_unlock(&dev_priv->binding_mutex); in vmw_gb_surface_destroy()
1316 * vmw_gb_surface_define_ioctl - Ioctl function implementing
1320 * @data: Pointer to data copied from / to user-space.
1328 struct drm_vmw_gb_surface_create_rep *rep = &arg->rep; in vmw_gb_surface_define_ioctl()
1331 req_ext.base = arg->req; in vmw_gb_surface_define_ioctl()
1343 * vmw_gb_surface_reference_ioctl - Ioctl function implementing
1347 * @data: Pointer to data copied from / to user-space.
1355 struct drm_vmw_surface_arg *req = &arg->req; in vmw_gb_surface_reference_ioctl()
1356 struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep; in vmw_gb_surface_reference_ioctl()
1365 rep->creq = rep_ext.creq.base; in vmw_gb_surface_reference_ioctl()
1366 rep->crep = rep_ext.crep; in vmw_gb_surface_reference_ioctl()
1372 * vmw_gb_surface_define_ext_ioctl - Ioctl function implementing
1376 * @data: Pointer to data copied from / to user-space.
1384 struct drm_vmw_gb_surface_create_ext_req *req = &arg->req; in vmw_gb_surface_define_ext_ioctl()
1385 struct drm_vmw_gb_surface_create_rep *rep = &arg->rep; in vmw_gb_surface_define_ext_ioctl()
1391 * vmw_gb_surface_reference_ext_ioctl - Ioctl function implementing
1395 * @data: Pointer to data copied from / to user-space.
1403 struct drm_vmw_surface_arg *req = &arg->req; in vmw_gb_surface_reference_ext_ioctl()
1404 struct drm_vmw_gb_surface_ref_ext_rep *rep = &arg->rep; in vmw_gb_surface_reference_ext_ioctl()
1410 * vmw_gb_surface_define_internal - Ioctl function implementing
1414 * @req: Request argument from user-space.
1415 * @rep: Response argument to user-space.
1424 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; in vmw_gb_surface_define_internal()
1434 SVGA3D_FLAGS_64(req->svga3d_flags_upper_32_bits, in vmw_gb_surface_define_internal()
1435 req->base.svga3d_flags); in vmw_gb_surface_define_internal()
1437 /* array_size must be null for non-GL3 host. */ in vmw_gb_surface_define_internal()
1438 if (req->base.array_size > 0 && !has_sm4_context(dev_priv)) { in vmw_gb_surface_define_internal()
1440 return -EINVAL; in vmw_gb_surface_define_internal()
1444 if (req->svga3d_flags_upper_32_bits != 0) in vmw_gb_surface_define_internal()
1445 ret = -EINVAL; in vmw_gb_surface_define_internal()
1447 if (req->base.multisample_count != 0) in vmw_gb_surface_define_internal()
1448 ret = -EINVAL; in vmw_gb_surface_define_internal()
1450 if (req->multisample_pattern != SVGA3D_MS_PATTERN_NONE) in vmw_gb_surface_define_internal()
1451 ret = -EINVAL; in vmw_gb_surface_define_internal()
1453 if (req->quality_level != SVGA3D_MS_QUALITY_NONE) in vmw_gb_surface_define_internal()
1454 ret = -EINVAL; in vmw_gb_surface_define_internal()
1462 if (req->buffer_byte_stride > 0 && !has_sm5_context(dev_priv)) { in vmw_gb_surface_define_internal()
1464 return -EINVAL; in vmw_gb_surface_define_internal()
1468 req->base.multisample_count == 0) { in vmw_gb_surface_define_internal()
1470 return -EINVAL; in vmw_gb_surface_define_internal()
1473 if (req->base.mip_levels > DRM_VMW_MAX_MIP_LEVELS) { in vmw_gb_surface_define_internal()
1475 return -EINVAL; in vmw_gb_surface_define_internal()
1479 metadata.format = req->base.format; in vmw_gb_surface_define_internal()
1480 metadata.mip_levels[0] = req->base.mip_levels; in vmw_gb_surface_define_internal()
1481 metadata.multisample_count = req->base.multisample_count; in vmw_gb_surface_define_internal()
1482 metadata.multisample_pattern = req->multisample_pattern; in vmw_gb_surface_define_internal()
1483 metadata.quality_level = req->quality_level; in vmw_gb_surface_define_internal()
1484 metadata.array_size = req->base.array_size; in vmw_gb_surface_define_internal()
1485 metadata.buffer_byte_stride = req->buffer_byte_stride; in vmw_gb_surface_define_internal()
1487 metadata.base_size = req->base.base_size; in vmw_gb_surface_define_internal()
1488 metadata.scanout = req->base.drm_surface_flags & in vmw_gb_surface_define_internal()
1500 user_srf->master = drm_file_get_master(file_priv); in vmw_gb_surface_define_internal()
1502 res = &user_srf->srf.res; in vmw_gb_surface_define_internal()
1504 if (req->base.buffer_handle != SVGA3D_INVALID_ID) { in vmw_gb_surface_define_internal()
1505 ret = vmw_user_bo_lookup(file_priv, req->base.buffer_handle, in vmw_gb_surface_define_internal()
1506 &res->guest_memory_bo); in vmw_gb_surface_define_internal()
1508 if (res->guest_memory_bo->tbo.base.size < res->guest_memory_size) { in vmw_gb_surface_define_internal()
1510 vmw_user_bo_unref(&res->guest_memory_bo); in vmw_gb_surface_define_internal()
1511 ret = -EINVAL; in vmw_gb_surface_define_internal()
1514 backup_handle = req->base.buffer_handle; in vmw_gb_surface_define_internal()
1517 } else if (req->base.drm_surface_flags & in vmw_gb_surface_define_internal()
1521 res->guest_memory_size, in vmw_gb_surface_define_internal()
1523 &res->guest_memory_bo); in vmw_gb_surface_define_internal()
1531 if (req->base.drm_surface_flags & drm_vmw_surface_flag_coherent) { in vmw_gb_surface_define_internal()
1532 struct vmw_bo *backup = res->guest_memory_bo; in vmw_gb_surface_define_internal()
1534 ttm_bo_reserve(&backup->tbo, false, false, NULL); in vmw_gb_surface_define_internal()
1535 if (!res->func->dirty_alloc) in vmw_gb_surface_define_internal()
1536 ret = -EINVAL; in vmw_gb_surface_define_internal()
1540 res->coherent = true; in vmw_gb_surface_define_internal()
1541 ret = res->func->dirty_alloc(res); in vmw_gb_surface_define_internal()
1543 ttm_bo_unreserve(&backup->tbo); in vmw_gb_surface_define_internal()
1552 ret = ttm_prime_object_init(tfile, res->guest_memory_size, &user_srf->prime, in vmw_gb_surface_define_internal()
1553 req->base.drm_surface_flags & in vmw_gb_surface_define_internal()
1564 rep->handle = user_srf->prime.base.handle; in vmw_gb_surface_define_internal()
1565 rep->backup_size = res->guest_memory_size; in vmw_gb_surface_define_internal()
1566 if (res->guest_memory_bo) { in vmw_gb_surface_define_internal()
1567 rep->buffer_map_handle = in vmw_gb_surface_define_internal()
1568 drm_vma_node_offset_addr(&res->guest_memory_bo->tbo.base.vma_node); in vmw_gb_surface_define_internal()
1569 rep->buffer_size = res->guest_memory_bo->tbo.base.size; in vmw_gb_surface_define_internal()
1570 rep->buffer_handle = backup_handle; in vmw_gb_surface_define_internal()
1572 rep->buffer_map_handle = 0; in vmw_gb_surface_define_internal()
1573 rep->buffer_size = 0; in vmw_gb_surface_define_internal()
1574 rep->buffer_handle = SVGA3D_INVALID_ID; in vmw_gb_surface_define_internal()
1583 * vmw_gb_surface_reference_internal - Ioctl function implementing
1587 * @req: Pointer to user-space request surface arg.
1588 * @rep: Pointer to response to user-space.
1605 ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid, in vmw_gb_surface_reference_internal()
1606 req->handle_type, &base); in vmw_gb_surface_reference_internal()
1611 srf = &user_srf->srf; in vmw_gb_surface_reference_internal()
1612 if (!srf->res.guest_memory_bo) { in vmw_gb_surface_reference_internal()
1616 metadata = &srf->metadata; in vmw_gb_surface_reference_internal()
1618 mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ in vmw_gb_surface_reference_internal()
1619 ret = drm_gem_handle_create(file_priv, &srf->res.guest_memory_bo->tbo.base, in vmw_gb_surface_reference_internal()
1621 mutex_unlock(&dev_priv->cmdbuf_mutex); in vmw_gb_surface_reference_internal()
1624 req->sid); in vmw_gb_surface_reference_internal()
1628 rep->creq.base.svga3d_flags = SVGA3D_FLAGS_LOWER_32(metadata->flags); in vmw_gb_surface_reference_internal()
1629 rep->creq.base.format = metadata->format; in vmw_gb_surface_reference_internal()
1630 rep->creq.base.mip_levels = metadata->mip_levels[0]; in vmw_gb_surface_reference_internal()
1631 rep->creq.base.drm_surface_flags = 0; in vmw_gb_surface_reference_internal()
1632 rep->creq.base.multisample_count = metadata->multisample_count; in vmw_gb_surface_reference_internal()
1633 rep->creq.base.autogen_filter = metadata->autogen_filter; in vmw_gb_surface_reference_internal()
1634 rep->creq.base.array_size = metadata->array_size; in vmw_gb_surface_reference_internal()
1635 rep->creq.base.buffer_handle = backup_handle; in vmw_gb_surface_reference_internal()
1636 rep->creq.base.base_size = metadata->base_size; in vmw_gb_surface_reference_internal()
1637 rep->crep.handle = user_srf->prime.base.handle; in vmw_gb_surface_reference_internal()
1638 rep->crep.backup_size = srf->res.guest_memory_size; in vmw_gb_surface_reference_internal()
1639 rep->crep.buffer_handle = backup_handle; in vmw_gb_surface_reference_internal()
1640 rep->crep.buffer_map_handle = in vmw_gb_surface_reference_internal()
1641 drm_vma_node_offset_addr(&srf->res.guest_memory_bo->tbo.base.vma_node); in vmw_gb_surface_reference_internal()
1642 rep->crep.buffer_size = srf->res.guest_memory_bo->tbo.base.size; in vmw_gb_surface_reference_internal()
1644 rep->creq.version = drm_vmw_gb_surface_v1; in vmw_gb_surface_reference_internal()
1645 rep->creq.svga3d_flags_upper_32_bits = in vmw_gb_surface_reference_internal()
1646 SVGA3D_FLAGS_UPPER_32(metadata->flags); in vmw_gb_surface_reference_internal()
1647 rep->creq.multisample_pattern = metadata->multisample_pattern; in vmw_gb_surface_reference_internal()
1648 rep->creq.quality_level = metadata->quality_level; in vmw_gb_surface_reference_internal()
1649 rep->creq.must_be_zero = 0; in vmw_gb_surface_reference_internal()
1658 * vmw_subres_dirty_add - Add a dirty region to a subresource
1673 const struct vmw_surface_cache *cache = &dirty->cache; in vmw_subres_dirty_add()
1674 SVGA3dBox *box = &dirty->boxes[loc_start->sub_resource]; in vmw_subres_dirty_add()
1675 u32 mip = loc_start->sub_resource % cache->num_mip_levels; in vmw_subres_dirty_add()
1676 const struct drm_vmw_size *size = &cache->mip[mip].size; in vmw_subres_dirty_add()
1677 u32 box_c2 = box->z + box->d; in vmw_subres_dirty_add()
1679 if (WARN_ON(loc_start->sub_resource >= dirty->num_subres)) in vmw_subres_dirty_add()
1682 if (box->d == 0 || box->z > loc_start->z) in vmw_subres_dirty_add()
1683 box->z = loc_start->z; in vmw_subres_dirty_add()
1684 if (box_c2 < loc_end->z) in vmw_subres_dirty_add()
1685 box->d = loc_end->z - box->z; in vmw_subres_dirty_add()
1687 if (loc_start->z + 1 == loc_end->z) { in vmw_subres_dirty_add()
1688 box_c2 = box->y + box->h; in vmw_subres_dirty_add()
1689 if (box->h == 0 || box->y > loc_start->y) in vmw_subres_dirty_add()
1690 box->y = loc_start->y; in vmw_subres_dirty_add()
1691 if (box_c2 < loc_end->y) in vmw_subres_dirty_add()
1692 box->h = loc_end->y - box->y; in vmw_subres_dirty_add()
1694 if (loc_start->y + 1 == loc_end->y) { in vmw_subres_dirty_add()
1695 box_c2 = box->x + box->w; in vmw_subres_dirty_add()
1696 if (box->w == 0 || box->x > loc_start->x) in vmw_subres_dirty_add()
1697 box->x = loc_start->x; in vmw_subres_dirty_add()
1698 if (box_c2 < loc_end->x) in vmw_subres_dirty_add()
1699 box->w = loc_end->x - box->x; in vmw_subres_dirty_add()
1701 box->x = 0; in vmw_subres_dirty_add()
1702 box->w = size->width; in vmw_subres_dirty_add()
1705 box->y = 0; in vmw_subres_dirty_add()
1706 box->h = size->height; in vmw_subres_dirty_add()
1707 box->x = 0; in vmw_subres_dirty_add()
1708 box->w = size->width; in vmw_subres_dirty_add()
1713 * vmw_subres_dirty_full - Mark a full subresource as dirty
1719 const struct vmw_surface_cache *cache = &dirty->cache; in vmw_subres_dirty_full()
1720 u32 mip = subres % cache->num_mip_levels; in vmw_subres_dirty_full()
1721 const struct drm_vmw_size *size = &cache->mip[mip].size; in vmw_subres_dirty_full()
1722 SVGA3dBox *box = &dirty->boxes[subres]; in vmw_subres_dirty_full()
1724 box->x = 0; in vmw_subres_dirty_full()
1725 box->y = 0; in vmw_subres_dirty_full()
1726 box->z = 0; in vmw_subres_dirty_full()
1727 box->w = size->width; in vmw_subres_dirty_full()
1728 box->h = size->height; in vmw_subres_dirty_full()
1729 box->d = size->depth; in vmw_subres_dirty_full()
1733 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for texture
1740 (struct vmw_surface_dirty *) res->dirty; in vmw_surface_tex_dirty_range_add()
1741 size_t backup_end = res->guest_memory_offset + res->guest_memory_size; in vmw_surface_tex_dirty_range_add()
1745 start = max_t(size_t, start, res->guest_memory_offset) - res->guest_memory_offset; in vmw_surface_tex_dirty_range_add()
1746 end = min(end, backup_end) - res->guest_memory_offset; in vmw_surface_tex_dirty_range_add()
1747 cache = &dirty->cache; in vmw_surface_tex_dirty_range_add()
1749 vmw_surface_get_loc(cache, &loc2, end - 1); in vmw_surface_tex_dirty_range_add()
1757 * fashion, compute the dirty region for each sheet and the in vmw_surface_tex_dirty_range_add()
1761 for (sub_res = 0; sub_res < dirty->num_subres; ++sub_res) in vmw_surface_tex_dirty_range_add()
1766 /* Dirty range covers a single sub-resource */ in vmw_surface_tex_dirty_range_add()
1769 /* Dirty range covers multiple sub-resources */ in vmw_surface_tex_dirty_range_add()
1775 vmw_surface_min_loc(cache, loc2.sub_resource - 1, &loc_min); in vmw_surface_tex_dirty_range_add()
1778 sub_res < loc2.sub_resource - 1; ++sub_res) in vmw_surface_tex_dirty_range_add()
1784 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for buffer
1791 (struct vmw_surface_dirty *) res->dirty; in vmw_surface_buf_dirty_range_add()
1792 const struct vmw_surface_cache *cache = &dirty->cache; in vmw_surface_buf_dirty_range_add()
1793 size_t backup_end = res->guest_memory_offset + cache->mip_chain_bytes; in vmw_surface_buf_dirty_range_add()
1794 SVGA3dBox *box = &dirty->boxes[0]; in vmw_surface_buf_dirty_range_add()
1797 box->h = box->d = 1; in vmw_surface_buf_dirty_range_add()
1798 start = max_t(size_t, start, res->guest_memory_offset) - res->guest_memory_offset; in vmw_surface_buf_dirty_range_add()
1799 end = min(end, backup_end) - res->guest_memory_offset; in vmw_surface_buf_dirty_range_add()
1800 box_c2 = box->x + box->w; in vmw_surface_buf_dirty_range_add()
1801 if (box->w == 0 || box->x > start) in vmw_surface_buf_dirty_range_add()
1802 box->x = start; in vmw_surface_buf_dirty_range_add()
1804 box->w = end - box->x; in vmw_surface_buf_dirty_range_add()
1808 * vmw_surface_tex_dirty_add_range - The dirty_add_range callback for surfaces
1815 if (WARN_ON(end <= res->guest_memory_offset || in vmw_surface_dirty_range_add()
1816 start >= res->guest_memory_offset + res->guest_memory_size)) in vmw_surface_dirty_range_add()
1819 if (srf->metadata.format == SVGA3D_BUFFER) in vmw_surface_dirty_range_add()
1826 * vmw_surface_dirty_sync - The surface's dirty_sync callback.
1830 struct vmw_private *dev_priv = res->dev_priv; in vmw_surface_dirty_sync()
1833 (struct vmw_surface_dirty *) res->dirty; in vmw_surface_dirty_sync()
1835 const struct vmw_surface_cache *cache = &dirty->cache; in vmw_surface_dirty_sync()
1847 for (i = 0; i < dirty->num_subres; ++i) { in vmw_surface_dirty_sync()
1848 const SVGA3dBox *box = &dirty->boxes[i]; in vmw_surface_dirty_sync()
1850 if (box->d) in vmw_surface_dirty_sync()
1860 return -ENOMEM; in vmw_surface_dirty_sync()
1865 for (i = 0; i < dirty->num_subres; ++i) { in vmw_surface_dirty_sync()
1866 const SVGA3dBox *box = &dirty->boxes[i]; in vmw_surface_dirty_sync()
1868 if (!box->d) in vmw_surface_dirty_sync()
1876 cmd1->header.id = SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE; in vmw_surface_dirty_sync()
1877 cmd1->header.size = sizeof(cmd1->body); in vmw_surface_dirty_sync()
1878 cmd1->body.sid = res->id; in vmw_surface_dirty_sync()
1879 cmd1->body.subResource = i; in vmw_surface_dirty_sync()
1880 cmd1->body.box = *box; in vmw_surface_dirty_sync()
1883 cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE; in vmw_surface_dirty_sync()
1884 cmd2->header.size = sizeof(cmd2->body); in vmw_surface_dirty_sync()
1885 cmd2->body.image.sid = res->id; in vmw_surface_dirty_sync()
1886 cmd2->body.image.face = i / cache->num_mip_levels; in vmw_surface_dirty_sync()
1887 cmd2->body.image.mipmap = i - in vmw_surface_dirty_sync()
1888 (cache->num_mip_levels * cmd2->body.image.face); in vmw_surface_dirty_sync()
1889 cmd2->body.box = *box; in vmw_surface_dirty_sync()
1896 memset(&dirty->boxes[0], 0, sizeof(dirty->boxes[0]) * in vmw_surface_dirty_sync()
1897 dirty->num_subres); in vmw_surface_dirty_sync()
1903 * vmw_surface_dirty_alloc - The surface's dirty_alloc callback.
1908 const struct vmw_surface_metadata *metadata = &srf->metadata; in vmw_surface_dirty_alloc()
1917 if (metadata->array_size) in vmw_surface_dirty_alloc()
1918 num_layers = metadata->array_size; in vmw_surface_dirty_alloc()
1919 else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP) in vmw_surface_dirty_alloc()
1922 num_mip = metadata->mip_levels[0]; in vmw_surface_dirty_alloc()
1931 ret = -ENOMEM; in vmw_surface_dirty_alloc()
1935 num_samples = max_t(u32, 1, metadata->multisample_count); in vmw_surface_dirty_alloc()
1936 ret = vmw_surface_setup_cache(&metadata->base_size, metadata->format, in vmw_surface_dirty_alloc()
1938 &dirty->cache); in vmw_surface_dirty_alloc()
1942 dirty->num_subres = num_subres; in vmw_surface_dirty_alloc()
1943 res->dirty = (struct vmw_resource_dirty *) dirty; in vmw_surface_dirty_alloc()
1954 * vmw_surface_dirty_free - The surface's dirty_free callback
1959 (struct vmw_surface_dirty *) res->dirty; in vmw_surface_dirty_free()
1962 res->dirty = NULL; in vmw_surface_dirty_free()
1966 * vmw_surface_clean - The surface's clean callback
1970 struct vmw_private *dev_priv = res->dev_priv; in vmw_surface_clean()
1980 return -ENOMEM; in vmw_surface_clean()
1982 cmd->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE; in vmw_surface_clean()
1983 cmd->header.size = sizeof(cmd->body); in vmw_surface_clean()
1984 cmd->body.sid = res->id; in vmw_surface_clean()
1991 * vmw_gb_surface_define - Define a private GB surface
2015 if (req->scanout) { in vmw_gb_surface_define()
2016 if (!vmw_surface_is_screen_target_format(req->format)) { in vmw_gb_surface_define()
2018 return -EINVAL; in vmw_gb_surface_define()
2021 if (req->base_size.width > dev_priv->texture_max_width || in vmw_gb_surface_define()
2022 req->base_size.height > dev_priv->texture_max_height) { in vmw_gb_surface_define()
2024 req->base_size.width, in vmw_gb_surface_define()
2025 req->base_size.height, in vmw_gb_surface_define()
2026 dev_priv->texture_max_width, in vmw_gb_surface_define()
2027 dev_priv->texture_max_height); in vmw_gb_surface_define()
2028 return -EINVAL; in vmw_gb_surface_define()
2032 vmw_surface_get_desc(req->format); in vmw_gb_surface_define()
2034 if (desc->blockDesc == SVGA3DBLOCKDESC_NONE) { in vmw_gb_surface_define()
2036 return -EINVAL; in vmw_gb_surface_define()
2040 if (req->autogen_filter != SVGA3D_TEX_FILTER_NONE) in vmw_gb_surface_define()
2041 return -EINVAL; in vmw_gb_surface_define()
2043 if (req->num_sizes != 1) in vmw_gb_surface_define()
2044 return -EINVAL; in vmw_gb_surface_define()
2046 if (req->sizes != NULL) in vmw_gb_surface_define()
2047 return -EINVAL; in vmw_gb_surface_define()
2051 ret = -ENOMEM; in vmw_gb_surface_define()
2055 *srf_out = &user_srf->srf; in vmw_gb_surface_define()
2056 user_srf->prime.base.shareable = false; in vmw_gb_surface_define()
2057 user_srf->prime.base.tfile = NULL; in vmw_gb_surface_define()
2059 srf = &user_srf->srf; in vmw_gb_surface_define()
2060 srf->metadata = *req; in vmw_gb_surface_define()
2061 srf->offsets = NULL; in vmw_gb_surface_define()
2063 metadata = &srf->metadata; in vmw_gb_surface_define()
2065 if (metadata->array_size) in vmw_gb_surface_define()
2066 num_layers = req->array_size; in vmw_gb_surface_define()
2067 else if (metadata->flags & SVGA3D_SURFACE_CUBEMAP) in vmw_gb_surface_define()
2070 if (metadata->flags & SVGA3D_SURFACE_MULTISAMPLE) in vmw_gb_surface_define()
2071 sample_count = metadata->multisample_count; in vmw_gb_surface_define()
2073 srf->res.guest_memory_size = in vmw_gb_surface_define()
2075 metadata->format, in vmw_gb_surface_define()
2076 metadata->base_size, in vmw_gb_surface_define()
2077 metadata->mip_levels[0], in vmw_gb_surface_define()
2081 if (metadata->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) in vmw_gb_surface_define()
2082 srf->res.guest_memory_size += sizeof(SVGA3dDXSOState); in vmw_gb_surface_define()
2087 * to support creation of big framebuffer requested by some user-space in vmw_gb_surface_define()
2092 if (dev_priv->active_display_unit == vmw_du_screen_target && in vmw_gb_surface_define()
2093 metadata->scanout && in vmw_gb_surface_define()
2094 metadata->base_size.width <= dev_priv->stdu_max_width && in vmw_gb_surface_define()
2095 metadata->base_size.height <= dev_priv->stdu_max_height) in vmw_gb_surface_define()
2096 metadata->flags |= SVGA3D_SURFACE_SCREENTARGET; in vmw_gb_surface_define()