1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3 *
4 * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
5 * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
6 *
7 **************************************************************************/
8
9 #include "vmwgfx_binding.h"
10 #include "vmwgfx_bo.h"
11 #include "vmwgfx_drv.h"
12 #include "vmwgfx_mksstat.h"
13 #include "vmwgfx_so.h"
14
15 #include <drm/ttm/ttm_bo.h>
16 #include <drm/ttm/ttm_placement.h>
17
18 #include <linux/sync_file.h>
19 #include <linux/hashtable.h>
20 #include <linux/vmalloc.h>
21
22 /*
23 * Helper macro to get dx_ctx_node if available otherwise print an error
24 * message. This is for use in command verifier function where if dx_ctx_node
25 * is not set then command is invalid.
26 */
27 #define VMW_GET_CTX_NODE(__sw_context) \
28 ({ \
29 __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
30 VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
31 __sw_context->dx_ctx_node; \
32 }); \
33 })
34
35 #define VMW_DECLARE_CMD_VAR(__var, __type) \
36 struct { \
37 SVGA3dCmdHeader header; \
38 __type body; \
39 } __var
40
41 /**
42 * struct vmw_relocation - Buffer object relocation
43 *
44 * @head: List head for the command submission context's relocation list
45 * @vbo: Non ref-counted pointer to buffer object
46 * @mob_loc: Pointer to location for mob id to be modified
47 * @location: Pointer to location for guest pointer to be modified
48 */
49 struct vmw_relocation {
50 struct list_head head;
51 struct vmw_bo *vbo;
52 union {
53 SVGAMobId *mob_loc;
54 SVGAGuestPtr *location;
55 };
56 };
57
58 /**
59 * enum vmw_resource_relocation_type - Relocation type for resources
60 *
61 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
62 * command stream is replaced with the actual id after validation.
63 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
64 * with a NOP.
65 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
66 * validation is -1, the command is replaced with a NOP. Otherwise no action.
67 * @vmw_res_rel_max: Last value in the enum - used for error checking
68 */
69 enum vmw_resource_relocation_type {
70 vmw_res_rel_normal,
71 vmw_res_rel_nop,
72 vmw_res_rel_cond_nop,
73 vmw_res_rel_max
74 };
75
76 /**
77 * struct vmw_resource_relocation - Relocation info for resources
78 *
79 * @head: List head for the software context's relocation list.
80 * @res: Non-ref-counted pointer to the resource.
81 * @offset: Offset of single byte entries into the command buffer where the id
82 * that needs fixup is located.
83 * @rel_type: Type of relocation.
84 */
85 struct vmw_resource_relocation {
86 struct list_head head;
87 const struct vmw_resource *res;
88 u32 offset:29;
89 enum vmw_resource_relocation_type rel_type:3;
90 };
91
92 /**
93 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
94 *
95 * @head: List head of context list
96 * @ctx: The context resource
97 * @cur: The context's persistent binding state
98 * @staged: The binding state changes of this command buffer
99 */
100 struct vmw_ctx_validation_info {
101 struct list_head head;
102 struct vmw_resource *ctx;
103 struct vmw_ctx_binding_state *cur;
104 struct vmw_ctx_binding_state *staged;
105 };
106
107 /**
108 * struct vmw_cmd_entry - Describe a command for the verifier
109 *
110 * @func: Call-back to handle the command.
111 * @user_allow: Whether allowed from the execbuf ioctl.
112 * @gb_disable: Whether disabled if guest-backed objects are available.
113 * @gb_enable: Whether enabled iff guest-backed objects are available.
114 * @cmd_name: Name of the command.
115 */
116 struct vmw_cmd_entry {
117 int (*func) (struct vmw_private *, struct vmw_sw_context *,
118 SVGA3dCmdHeader *);
119 bool user_allow;
120 bool gb_disable;
121 bool gb_enable;
122 const char *cmd_name;
123 };
124
125 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
126 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
127 (_gb_disable), (_gb_enable), #_cmd}
128
129 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
130 struct vmw_sw_context *sw_context,
131 struct vmw_resource *ctx);
132 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
133 struct vmw_sw_context *sw_context,
134 SVGAMobId *id,
135 struct vmw_bo **vmw_bo_p);
136 /**
137 * vmw_ptr_diff - Compute the offset from a to b in bytes
138 *
139 * @a: A starting pointer.
140 * @b: A pointer offset in the same address space.
141 *
142 * Returns: The offset in bytes between the two pointers.
143 */
vmw_ptr_diff(void * a,void * b)144 static size_t vmw_ptr_diff(void *a, void *b)
145 {
146 return (unsigned long) b - (unsigned long) a;
147 }
148
149 /**
150 * vmw_execbuf_bindings_commit - Commit modified binding state
151 *
152 * @sw_context: The command submission context
153 * @backoff: Whether this is part of the error path and binding state changes
154 * should be ignored
155 */
vmw_execbuf_bindings_commit(struct vmw_sw_context * sw_context,bool backoff)156 static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
157 bool backoff)
158 {
159 struct vmw_ctx_validation_info *entry;
160
161 list_for_each_entry(entry, &sw_context->ctx_list, head) {
162 if (!backoff)
163 vmw_binding_state_commit(entry->cur, entry->staged);
164
165 if (entry->staged != sw_context->staged_bindings)
166 vmw_binding_state_free(entry->staged);
167 else
168 sw_context->staged_bindings_inuse = false;
169 }
170
171 /* List entries are freed with the validation context */
172 INIT_LIST_HEAD(&sw_context->ctx_list);
173 }
174
175 /**
176 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
177 *
178 * @sw_context: The command submission context
179 */
vmw_bind_dx_query_mob(struct vmw_sw_context * sw_context)180 static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
181 {
182 if (sw_context->dx_query_mob)
183 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
184 sw_context->dx_query_mob);
185 }
186
187 /**
188 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
189 * the validate list.
190 *
191 * @dev_priv: Pointer to the device private:
192 * @sw_context: The command submission context
193 * @res: Pointer to the resource
194 * @node: The validation node holding the context resource metadata
195 */
vmw_cmd_ctx_first_setup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource * res,struct vmw_ctx_validation_info * node)196 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
197 struct vmw_sw_context *sw_context,
198 struct vmw_resource *res,
199 struct vmw_ctx_validation_info *node)
200 {
201 int ret;
202
203 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
204 if (unlikely(ret != 0))
205 goto out_err;
206
207 if (!sw_context->staged_bindings) {
208 sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
209 if (IS_ERR(sw_context->staged_bindings)) {
210 ret = PTR_ERR(sw_context->staged_bindings);
211 sw_context->staged_bindings = NULL;
212 goto out_err;
213 }
214 }
215
216 if (sw_context->staged_bindings_inuse) {
217 node->staged = vmw_binding_state_alloc(dev_priv);
218 if (IS_ERR(node->staged)) {
219 ret = PTR_ERR(node->staged);
220 node->staged = NULL;
221 goto out_err;
222 }
223 } else {
224 node->staged = sw_context->staged_bindings;
225 sw_context->staged_bindings_inuse = true;
226 }
227
228 node->ctx = res;
229 node->cur = vmw_context_binding_state(res);
230 list_add_tail(&node->head, &sw_context->ctx_list);
231
232 return 0;
233
234 out_err:
235 return ret;
236 }
237
238 /**
239 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
240 *
241 * @dev_priv: Pointer to the device private struct.
242 * @res_type: The resource type.
243 *
244 * Guest-backed contexts and DX contexts require extra size to store execbuf
245 * private information in the validation node. Typically the binding manager
246 * associated data structures.
247 *
248 * Returns: The extra size requirement based on resource type.
249 */
vmw_execbuf_res_size(struct vmw_private * dev_priv,enum vmw_res_type res_type)250 static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
251 enum vmw_res_type res_type)
252 {
253 return (res_type == vmw_res_dx_context ||
254 (res_type == vmw_res_context && dev_priv->has_mob)) ?
255 sizeof(struct vmw_ctx_validation_info) : 0;
256 }
257
258 /**
259 * vmw_execbuf_rcache_update - Update a resource-node cache entry
260 *
261 * @rcache: Pointer to the entry to update.
262 * @res: Pointer to the resource.
263 * @private: Pointer to the execbuf-private space in the resource validation
264 * node.
265 */
vmw_execbuf_rcache_update(struct vmw_res_cache_entry * rcache,struct vmw_resource * res,void * private)266 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
267 struct vmw_resource *res,
268 void *private)
269 {
270 rcache->res = res;
271 rcache->private = private;
272 rcache->valid = 1;
273 rcache->valid_handle = 0;
274 }
275
276 enum vmw_val_add_flags {
277 vmw_val_add_flag_none = 0,
278 vmw_val_add_flag_noctx = 1 << 0,
279 };
280
281 /**
282 * vmw_execbuf_res_val_add - Add a resource to the validation list.
283 *
284 * @sw_context: Pointer to the software context.
285 * @res: Unreferenced rcu-protected pointer to the resource.
286 * @dirty: Whether to change dirty status.
287 * @flags: specifies whether to use the context or not
288 *
289 * Returns: 0 on success. Negative error code on failure. Typical error codes
290 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
291 */
vmw_execbuf_res_val_add(struct vmw_sw_context * sw_context,struct vmw_resource * res,u32 dirty,u32 flags)292 static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context,
293 struct vmw_resource *res,
294 u32 dirty,
295 u32 flags)
296 {
297 struct vmw_private *dev_priv = res->dev_priv;
298 int ret;
299 enum vmw_res_type res_type = vmw_res_type(res);
300 struct vmw_res_cache_entry *rcache;
301 struct vmw_ctx_validation_info *ctx_info;
302 bool first_usage;
303 unsigned int priv_size;
304
305 rcache = &sw_context->res_cache[res_type];
306 if (likely(rcache->valid && rcache->res == res)) {
307 if (dirty)
308 vmw_validation_res_set_dirty(sw_context->ctx,
309 rcache->private, dirty);
310 return 0;
311 }
312
313 if ((flags & vmw_val_add_flag_noctx) != 0) {
314 ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
315 (void **)&ctx_info, NULL);
316 if (ret)
317 return ret;
318
319 } else {
320 priv_size = vmw_execbuf_res_size(dev_priv, res_type);
321 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
322 dirty, (void **)&ctx_info,
323 &first_usage);
324 if (ret)
325 return ret;
326
327 if (priv_size && first_usage) {
328 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
329 ctx_info);
330 if (ret) {
331 VMW_DEBUG_USER("Failed first usage context setup.\n");
332 return ret;
333 }
334 }
335 }
336
337 vmw_execbuf_rcache_update(rcache, res, ctx_info);
338 return 0;
339 }
340
341 /**
342 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
343 * validation list
344 *
345 * @sw_context: The software context holding the validation list.
346 * @view: Pointer to the view resource.
347 *
348 * Returns 0 if success, negative error code otherwise.
349 */
vmw_view_res_val_add(struct vmw_sw_context * sw_context,struct vmw_resource * view)350 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
351 struct vmw_resource *view)
352 {
353 int ret;
354
355 /*
356 * First add the resource the view is pointing to, otherwise it may be
357 * swapped out when the view is validated.
358 */
359 ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view),
360 vmw_view_dirtying(view), vmw_val_add_flag_noctx);
361 if (ret)
362 return ret;
363
364 return vmw_execbuf_res_val_add(sw_context, view, VMW_RES_DIRTY_NONE,
365 vmw_val_add_flag_noctx);
366 }
367
368 /**
369 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
370 * to to the validation list.
371 *
372 * @sw_context: The software context holding the validation list.
373 * @view_type: The view type to look up.
374 * @id: view id of the view.
375 *
376 * The view is represented by a view id and the DX context it's created on, or
377 * scheduled for creation on. If there is no DX context set, the function will
378 * return an -EINVAL error pointer.
379 *
380 * Returns: Unreferenced pointer to the resource on success, negative error
381 * pointer on failure.
382 */
383 static struct vmw_resource *
vmw_view_id_val_add(struct vmw_sw_context * sw_context,enum vmw_view_type view_type,u32 id)384 vmw_view_id_val_add(struct vmw_sw_context *sw_context,
385 enum vmw_view_type view_type, u32 id)
386 {
387 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
388 struct vmw_resource *view;
389 int ret;
390
391 if (!ctx_node)
392 return ERR_PTR(-EINVAL);
393
394 view = vmw_view_lookup(sw_context->man, view_type, id);
395 if (IS_ERR(view))
396 return view;
397
398 ret = vmw_view_res_val_add(sw_context, view);
399 if (ret)
400 return ERR_PTR(ret);
401
402 return view;
403 }
404
405 /**
406 * vmw_resource_context_res_add - Put resources previously bound to a context on
407 * the validation list
408 *
409 * @dev_priv: Pointer to a device private structure
410 * @sw_context: Pointer to a software context used for this command submission
411 * @ctx: Pointer to the context resource
412 *
413 * This function puts all resources that were previously bound to @ctx on the
414 * resource validation list. This is part of the context state reemission
415 */
vmw_resource_context_res_add(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource * ctx)416 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
417 struct vmw_sw_context *sw_context,
418 struct vmw_resource *ctx)
419 {
420 struct list_head *binding_list;
421 struct vmw_ctx_bindinfo *entry;
422 int ret = 0;
423 struct vmw_resource *res;
424 u32 i;
425 u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
426 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
427
428 /* Add all cotables to the validation list. */
429 if (has_sm4_context(dev_priv) &&
430 vmw_res_type(ctx) == vmw_res_dx_context) {
431 for (i = 0; i < cotable_max; ++i) {
432 res = vmw_context_cotable(ctx, i);
433 if (IS_ERR_OR_NULL(res))
434 continue;
435
436 ret = vmw_execbuf_res_val_add(sw_context, res,
437 VMW_RES_DIRTY_SET,
438 vmw_val_add_flag_noctx);
439 if (unlikely(ret != 0))
440 return ret;
441 }
442 }
443
444 /* Add all resources bound to the context to the validation list */
445 mutex_lock(&dev_priv->binding_mutex);
446 binding_list = vmw_context_binding_list(ctx);
447
448 list_for_each_entry(entry, binding_list, ctx_list) {
449 if (vmw_res_type(entry->res) == vmw_res_view)
450 ret = vmw_view_res_val_add(sw_context, entry->res);
451 else
452 ret = vmw_execbuf_res_val_add(sw_context, entry->res,
453 vmw_binding_dirtying(entry->bt),
454 vmw_val_add_flag_noctx);
455 if (unlikely(ret != 0))
456 break;
457 }
458
459 if (has_sm4_context(dev_priv) &&
460 vmw_res_type(ctx) == vmw_res_dx_context) {
461 struct vmw_bo *dx_query_mob;
462
463 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
464 if (dx_query_mob) {
465 vmw_bo_placement_set(dx_query_mob,
466 VMW_BO_DOMAIN_MOB,
467 VMW_BO_DOMAIN_MOB);
468 ret = vmw_validation_add_bo(sw_context->ctx,
469 dx_query_mob);
470 }
471 }
472
473 mutex_unlock(&dev_priv->binding_mutex);
474 return ret;
475 }
476
477 /**
478 * vmw_resource_relocation_add - Add a relocation to the relocation list
479 *
480 * @sw_context: Pointer to the software context.
481 * @res: The resource.
482 * @offset: Offset into the command buffer currently being parsed where the id
483 * that needs fixup is located. Granularity is one byte.
484 * @rel_type: Relocation type.
485 */
vmw_resource_relocation_add(struct vmw_sw_context * sw_context,const struct vmw_resource * res,unsigned long offset,enum vmw_resource_relocation_type rel_type)486 static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
487 const struct vmw_resource *res,
488 unsigned long offset,
489 enum vmw_resource_relocation_type
490 rel_type)
491 {
492 struct vmw_resource_relocation *rel;
493
494 rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
495 if (unlikely(!rel)) {
496 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
497 return -ENOMEM;
498 }
499
500 rel->res = res;
501 rel->offset = offset;
502 rel->rel_type = rel_type;
503 list_add_tail(&rel->head, &sw_context->res_relocations);
504
505 return 0;
506 }
507
508 /**
509 * vmw_resource_relocations_free - Free all relocations on a list
510 *
511 * @list: Pointer to the head of the relocation list
512 */
vmw_resource_relocations_free(struct list_head * list)513 static void vmw_resource_relocations_free(struct list_head *list)
514 {
515 /* Memory is validation context memory, so no need to free it */
516 INIT_LIST_HEAD(list);
517 }
518
519 /**
520 * vmw_resource_relocations_apply - Apply all relocations on a list
521 *
522 * @cb: Pointer to the start of the command buffer bein patch. This need not be
523 * the same buffer as the one being parsed when the relocation list was built,
524 * but the contents must be the same modulo the resource ids.
525 * @list: Pointer to the head of the relocation list.
526 */
vmw_resource_relocations_apply(uint32_t * cb,struct list_head * list)527 static void vmw_resource_relocations_apply(uint32_t *cb,
528 struct list_head *list)
529 {
530 struct vmw_resource_relocation *rel;
531
532 /* Validate the struct vmw_resource_relocation member size */
533 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
534 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
535
536 list_for_each_entry(rel, list, head) {
537 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
538 switch (rel->rel_type) {
539 case vmw_res_rel_normal:
540 *addr = rel->res->id;
541 break;
542 case vmw_res_rel_nop:
543 *addr = SVGA_3D_CMD_NOP;
544 break;
545 default:
546 if (rel->res->id == -1)
547 *addr = SVGA_3D_CMD_NOP;
548 break;
549 }
550 }
551 }
552
vmw_cmd_invalid(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)553 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
554 struct vmw_sw_context *sw_context,
555 SVGA3dCmdHeader *header)
556 {
557 return -EINVAL;
558 }
559
vmw_cmd_ok(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)560 static int vmw_cmd_ok(struct vmw_private *dev_priv,
561 struct vmw_sw_context *sw_context,
562 SVGA3dCmdHeader *header)
563 {
564 return 0;
565 }
566
567 /**
568 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
569 * list.
570 *
571 * @sw_context: Pointer to the software context.
572 *
573 * Note that since vmware's command submission currently is protected by the
574 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
575 * only a single thread at once will attempt this.
576 */
vmw_resources_reserve(struct vmw_sw_context * sw_context)577 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
578 {
579 int ret;
580
581 ret = vmw_validation_res_reserve(sw_context->ctx, true);
582 if (ret)
583 return ret;
584
585 if (sw_context->dx_query_mob) {
586 struct vmw_bo *expected_dx_query_mob;
587
588 expected_dx_query_mob =
589 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
590 if (expected_dx_query_mob &&
591 expected_dx_query_mob != sw_context->dx_query_mob) {
592 ret = -EINVAL;
593 }
594 }
595
596 return ret;
597 }
598
599 /**
600 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
601 * resource validate list unless it's already there.
602 *
603 * @dev_priv: Pointer to a device private structure.
604 * @sw_context: Pointer to the software context.
605 * @res_type: Resource type.
606 * @dirty: Whether to change dirty status.
607 * @converter: User-space visible type specific information.
608 * @id_loc: Pointer to the location in the command buffer currently being parsed
609 * from where the user-space resource id handle is located.
610 * @p_res: Pointer to pointer to resource validation node. Populated on
611 * exit.
612 */
613 static int
vmw_cmd_res_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,enum vmw_res_type res_type,u32 dirty,const struct vmw_user_resource_conv * converter,uint32_t * id_loc,struct vmw_resource ** p_res)614 vmw_cmd_res_check(struct vmw_private *dev_priv,
615 struct vmw_sw_context *sw_context,
616 enum vmw_res_type res_type,
617 u32 dirty,
618 const struct vmw_user_resource_conv *converter,
619 uint32_t *id_loc,
620 struct vmw_resource **p_res)
621 {
622 struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
623 struct vmw_resource *res;
624 int ret = 0;
625 bool needs_unref = false;
626
627 if (p_res)
628 *p_res = NULL;
629
630 if (*id_loc == SVGA3D_INVALID_ID) {
631 if (res_type == vmw_res_context) {
632 VMW_DEBUG_USER("Illegal context invalid id.\n");
633 return -EINVAL;
634 }
635 return 0;
636 }
637
638 if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
639 res = rcache->res;
640 if (dirty)
641 vmw_validation_res_set_dirty(sw_context->ctx,
642 rcache->private, dirty);
643 } else {
644 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
645
646 ret = vmw_validation_preload_res(sw_context->ctx, size);
647 if (ret)
648 return ret;
649
650 ret = vmw_user_resource_lookup_handle
651 (dev_priv, sw_context->fp->tfile, *id_loc, converter, &res);
652 if (ret != 0) {
653 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
654 (unsigned int) *id_loc);
655 return ret;
656 }
657 needs_unref = true;
658
659 ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none);
660 if (unlikely(ret != 0))
661 goto res_check_done;
662
663 if (rcache->valid && rcache->res == res) {
664 rcache->valid_handle = true;
665 rcache->handle = *id_loc;
666 }
667 }
668
669 ret = vmw_resource_relocation_add(sw_context, res,
670 vmw_ptr_diff(sw_context->buf_start,
671 id_loc),
672 vmw_res_rel_normal);
673 if (p_res)
674 *p_res = res;
675
676 res_check_done:
677 if (needs_unref)
678 vmw_resource_unreference(&res);
679
680 return ret;
681 }
682
683 /**
684 * vmw_rebind_all_dx_query - Rebind DX query associated with the context
685 *
686 * @ctx_res: context the query belongs to
687 *
688 * This function assumes binding_mutex is held.
689 */
vmw_rebind_all_dx_query(struct vmw_resource * ctx_res)690 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
691 {
692 struct vmw_private *dev_priv = ctx_res->dev_priv;
693 struct vmw_bo *dx_query_mob;
694 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
695
696 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
697
698 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
699 return 0;
700
701 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
702 if (cmd == NULL)
703 return -ENOMEM;
704
705 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
706 cmd->header.size = sizeof(cmd->body);
707 cmd->body.cid = ctx_res->id;
708 cmd->body.mobid = dx_query_mob->tbo.resource->start;
709 vmw_cmd_commit(dev_priv, sizeof(*cmd));
710
711 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
712
713 return 0;
714 }
715
716 /**
717 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
718 * contexts.
719 *
720 * @sw_context: Pointer to the software context.
721 *
722 * Rebind context binding points that have been scrubbed because of eviction.
723 */
vmw_rebind_contexts(struct vmw_sw_context * sw_context)724 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
725 {
726 struct vmw_ctx_validation_info *val;
727 int ret;
728
729 list_for_each_entry(val, &sw_context->ctx_list, head) {
730 ret = vmw_binding_rebind_all(val->cur);
731 if (unlikely(ret != 0)) {
732 if (ret != -ERESTARTSYS)
733 VMW_DEBUG_USER("Failed to rebind context.\n");
734 return ret;
735 }
736
737 ret = vmw_rebind_all_dx_query(val->ctx);
738 if (ret != 0) {
739 VMW_DEBUG_USER("Failed to rebind queries.\n");
740 return ret;
741 }
742 }
743
744 return 0;
745 }
746
747 /**
748 * vmw_view_bindings_add - Add an array of view bindings to a context binding
749 * state tracker.
750 *
751 * @sw_context: The execbuf state used for this command.
752 * @view_type: View type for the bindings.
753 * @binding_type: Binding type for the bindings.
754 * @shader_slot: The shader slot to user for the bindings.
755 * @view_ids: Array of view ids to be bound.
756 * @num_views: Number of view ids in @view_ids.
757 * @first_slot: The binding slot to be used for the first view id in @view_ids.
758 */
vmw_view_bindings_add(struct vmw_sw_context * sw_context,enum vmw_view_type view_type,enum vmw_ctx_binding_type binding_type,uint32 shader_slot,uint32 view_ids[],u32 num_views,u32 first_slot)759 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
760 enum vmw_view_type view_type,
761 enum vmw_ctx_binding_type binding_type,
762 uint32 shader_slot,
763 uint32 view_ids[], u32 num_views,
764 u32 first_slot)
765 {
766 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
767 u32 i;
768
769 if (!ctx_node)
770 return -EINVAL;
771
772 for (i = 0; i < num_views; ++i) {
773 struct vmw_ctx_bindinfo_view binding;
774 struct vmw_resource *view = NULL;
775
776 if (view_ids[i] != SVGA3D_INVALID_ID) {
777 view = vmw_view_id_val_add(sw_context, view_type,
778 view_ids[i]);
779 if (IS_ERR(view)) {
780 VMW_DEBUG_USER("View not found.\n");
781 return PTR_ERR(view);
782 }
783 }
784 binding.bi.ctx = ctx_node->ctx;
785 binding.bi.res = view;
786 binding.bi.bt = binding_type;
787 binding.shader_slot = shader_slot;
788 binding.slot = first_slot + i;
789 vmw_binding_add(ctx_node->staged, &binding.bi,
790 shader_slot, binding.slot);
791 }
792
793 return 0;
794 }
795
796 /**
797 * vmw_cmd_cid_check - Check a command header for valid context information.
798 *
799 * @dev_priv: Pointer to a device private structure.
800 * @sw_context: Pointer to the software context.
801 * @header: A command header with an embedded user-space context handle.
802 *
803 * Convenience function: Call vmw_cmd_res_check with the user-space context
804 * handle embedded in @header.
805 */
vmw_cmd_cid_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)806 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
807 struct vmw_sw_context *sw_context,
808 SVGA3dCmdHeader *header)
809 {
810 VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
811 container_of(header, typeof(*cmd), header);
812
813 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
814 VMW_RES_DIRTY_SET, user_context_converter,
815 &cmd->body, NULL);
816 }
817
818 /**
819 * vmw_execbuf_info_from_res - Get the private validation metadata for a
820 * recently validated resource
821 *
822 * @sw_context: Pointer to the command submission context
823 * @res: The resource
824 *
825 * The resource pointed to by @res needs to be present in the command submission
826 * context's resource cache and hence the last resource of that type to be
827 * processed by the validation code.
828 *
829 * Return: a pointer to the private metadata of the resource, or NULL if it
830 * wasn't found
831 */
832 static struct vmw_ctx_validation_info *
vmw_execbuf_info_from_res(struct vmw_sw_context * sw_context,struct vmw_resource * res)833 vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
834 struct vmw_resource *res)
835 {
836 struct vmw_res_cache_entry *rcache =
837 &sw_context->res_cache[vmw_res_type(res)];
838
839 if (rcache->valid && rcache->res == res)
840 return rcache->private;
841
842 WARN_ON_ONCE(true);
843 return NULL;
844 }
845
vmw_cmd_set_render_target_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)846 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
847 struct vmw_sw_context *sw_context,
848 SVGA3dCmdHeader *header)
849 {
850 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
851 struct vmw_resource *ctx;
852 struct vmw_resource *res;
853 int ret;
854
855 cmd = container_of(header, typeof(*cmd), header);
856
857 if (cmd->body.type >= SVGA3D_RT_MAX) {
858 VMW_DEBUG_USER("Illegal render target type %u.\n",
859 (unsigned int) cmd->body.type);
860 return -EINVAL;
861 }
862
863 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
864 VMW_RES_DIRTY_SET, user_context_converter,
865 &cmd->body.cid, &ctx);
866 if (unlikely(ret != 0))
867 return ret;
868
869 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
870 VMW_RES_DIRTY_SET, user_surface_converter,
871 &cmd->body.target.sid, &res);
872 if (unlikely(ret))
873 return ret;
874
875 if (dev_priv->has_mob) {
876 struct vmw_ctx_bindinfo_view binding;
877 struct vmw_ctx_validation_info *node;
878
879 node = vmw_execbuf_info_from_res(sw_context, ctx);
880 if (!node)
881 return -EINVAL;
882
883 binding.bi.ctx = ctx;
884 binding.bi.res = res;
885 binding.bi.bt = vmw_ctx_binding_rt;
886 binding.slot = cmd->body.type;
887 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
888 }
889
890 return 0;
891 }
892
vmw_cmd_surface_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)893 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
894 struct vmw_sw_context *sw_context,
895 SVGA3dCmdHeader *header)
896 {
897 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
898 int ret;
899
900 cmd = container_of(header, typeof(*cmd), header);
901
902 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
903 VMW_RES_DIRTY_NONE, user_surface_converter,
904 &cmd->body.src.sid, NULL);
905 if (ret)
906 return ret;
907
908 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
909 VMW_RES_DIRTY_SET, user_surface_converter,
910 &cmd->body.dest.sid, NULL);
911 }
912
vmw_cmd_buffer_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)913 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
914 struct vmw_sw_context *sw_context,
915 SVGA3dCmdHeader *header)
916 {
917 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
918 int ret;
919
920 cmd = container_of(header, typeof(*cmd), header);
921 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
922 VMW_RES_DIRTY_NONE, user_surface_converter,
923 &cmd->body.src, NULL);
924 if (ret != 0)
925 return ret;
926
927 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
928 VMW_RES_DIRTY_SET, user_surface_converter,
929 &cmd->body.dest, NULL);
930 }
931
vmw_cmd_pred_copy_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)932 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
933 struct vmw_sw_context *sw_context,
934 SVGA3dCmdHeader *header)
935 {
936 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
937 int ret;
938
939 cmd = container_of(header, typeof(*cmd), header);
940 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
941 VMW_RES_DIRTY_NONE, user_surface_converter,
942 &cmd->body.srcSid, NULL);
943 if (ret != 0)
944 return ret;
945
946 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
947 VMW_RES_DIRTY_SET, user_surface_converter,
948 &cmd->body.dstSid, NULL);
949 }
950
vmw_cmd_stretch_blt_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)951 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
952 struct vmw_sw_context *sw_context,
953 SVGA3dCmdHeader *header)
954 {
955 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
956 int ret;
957
958 cmd = container_of(header, typeof(*cmd), header);
959 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
960 VMW_RES_DIRTY_NONE, user_surface_converter,
961 &cmd->body.src.sid, NULL);
962 if (unlikely(ret != 0))
963 return ret;
964
965 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
966 VMW_RES_DIRTY_SET, user_surface_converter,
967 &cmd->body.dest.sid, NULL);
968 }
969
vmw_cmd_blt_surf_screen_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)970 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
971 struct vmw_sw_context *sw_context,
972 SVGA3dCmdHeader *header)
973 {
974 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
975 container_of(header, typeof(*cmd), header);
976
977 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
978 VMW_RES_DIRTY_NONE, user_surface_converter,
979 &cmd->body.srcImage.sid, NULL);
980 }
981
vmw_cmd_present_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)982 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
983 struct vmw_sw_context *sw_context,
984 SVGA3dCmdHeader *header)
985 {
986 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
987 container_of(header, typeof(*cmd), header);
988
989 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
990 VMW_RES_DIRTY_NONE, user_surface_converter,
991 &cmd->body.sid, NULL);
992 }
993
994 /**
995 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
996 *
997 * @dev_priv: The device private structure.
998 * @new_query_bo: The new buffer holding query results.
999 * @sw_context: The software context used for this command submission.
1000 *
1001 * This function checks whether @new_query_bo is suitable for holding query
1002 * results, and if another buffer currently is pinned for query results. If so,
1003 * the function prepares the state of @sw_context for switching pinned buffers
1004 * after successful submission of the current command batch.
1005 */
vmw_query_bo_switch_prepare(struct vmw_private * dev_priv,struct vmw_bo * new_query_bo,struct vmw_sw_context * sw_context)1006 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1007 struct vmw_bo *new_query_bo,
1008 struct vmw_sw_context *sw_context)
1009 {
1010 struct vmw_res_cache_entry *ctx_entry =
1011 &sw_context->res_cache[vmw_res_context];
1012 int ret;
1013
1014 BUG_ON(!ctx_entry->valid);
1015 sw_context->last_query_ctx = ctx_entry->res;
1016
1017 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1018
1019 if (unlikely(PFN_UP(new_query_bo->tbo.resource->size) > 4)) {
1020 VMW_DEBUG_USER("Query buffer too large.\n");
1021 return -EINVAL;
1022 }
1023
1024 if (unlikely(sw_context->cur_query_bo != NULL)) {
1025 sw_context->needs_post_query_barrier = true;
1026 vmw_bo_placement_set_default_accelerated(sw_context->cur_query_bo);
1027 ret = vmw_validation_add_bo(sw_context->ctx,
1028 sw_context->cur_query_bo);
1029 if (unlikely(ret != 0))
1030 return ret;
1031 }
1032 sw_context->cur_query_bo = new_query_bo;
1033
1034 vmw_bo_placement_set_default_accelerated(dev_priv->dummy_query_bo);
1035 ret = vmw_validation_add_bo(sw_context->ctx,
1036 dev_priv->dummy_query_bo);
1037 if (unlikely(ret != 0))
1038 return ret;
1039 }
1040
1041 return 0;
1042 }
1043
1044 /**
1045 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1046 *
1047 * @dev_priv: The device private structure.
1048 * @sw_context: The software context used for this command submission batch.
1049 *
1050 * This function will check if we're switching query buffers, and will then,
1051 * issue a dummy occlusion query wait used as a query barrier. When the fence
1052 * object following that query wait has signaled, we are sure that all preceding
1053 * queries have finished, and the old query buffer can be unpinned. However,
1054 * since both the new query buffer and the old one are fenced with that fence,
1055 * we can do an asynchronus unpin now, and be sure that the old query buffer
1056 * won't be moved until the fence has signaled.
1057 *
1058 * As mentioned above, both the new - and old query buffers need to be fenced
1059 * using a sequence emitted *after* calling this function.
1060 */
vmw_query_bo_switch_commit(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context)1061 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1062 struct vmw_sw_context *sw_context)
1063 {
1064 /*
1065 * The validate list should still hold references to all
1066 * contexts here.
1067 */
1068 if (sw_context->needs_post_query_barrier) {
1069 struct vmw_res_cache_entry *ctx_entry =
1070 &sw_context->res_cache[vmw_res_context];
1071 struct vmw_resource *ctx;
1072 int ret;
1073
1074 BUG_ON(!ctx_entry->valid);
1075 ctx = ctx_entry->res;
1076
1077 ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
1078
1079 if (unlikely(ret != 0))
1080 VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1081 }
1082
1083 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1084 if (dev_priv->pinned_bo) {
1085 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1086 vmw_bo_unreference(&dev_priv->pinned_bo);
1087 }
1088
1089 if (!sw_context->needs_post_query_barrier) {
1090 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1091
1092 /*
1093 * We pin also the dummy_query_bo buffer so that we
1094 * don't need to validate it when emitting dummy queries
1095 * in context destroy paths.
1096 */
1097 if (!dev_priv->dummy_query_bo_pinned) {
1098 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1099 true);
1100 dev_priv->dummy_query_bo_pinned = true;
1101 }
1102
1103 BUG_ON(sw_context->last_query_ctx == NULL);
1104 dev_priv->query_cid = sw_context->last_query_ctx->id;
1105 dev_priv->query_cid_valid = true;
1106 dev_priv->pinned_bo =
1107 vmw_bo_reference(sw_context->cur_query_bo);
1108 }
1109 }
1110 }
1111
1112 /**
1113 * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle
1114 * to a MOB id.
1115 *
1116 * @dev_priv: Pointer to a device private structure.
1117 * @sw_context: The software context used for this command batch validation.
1118 * @id: Pointer to the user-space handle to be translated.
1119 * @vmw_bo_p: Points to a location that, on successful return will carry a
1120 * non-reference-counted pointer to the buffer object identified by the
1121 * user-space handle in @id.
1122 *
1123 * This function saves information needed to translate a user-space buffer
1124 * handle to a MOB id. The translation does not take place immediately, but
1125 * during a call to vmw_apply_relocations().
1126 *
1127 * This function builds a relocation list and a list of buffers to validate. The
1128 * former needs to be freed using either vmw_apply_relocations() or
1129 * vmw_free_relocations(). The latter needs to be freed using
1130 * vmw_clear_validations.
1131 */
vmw_translate_mob_ptr(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGAMobId * id,struct vmw_bo ** vmw_bo_p)1132 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1133 struct vmw_sw_context *sw_context,
1134 SVGAMobId *id,
1135 struct vmw_bo **vmw_bo_p)
1136 {
1137 struct vmw_bo *vmw_bo, *tmp_bo;
1138 uint32_t handle = *id;
1139 struct vmw_relocation *reloc;
1140 int ret;
1141
1142 vmw_validation_preload_bo(sw_context->ctx);
1143 ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1144 if (ret != 0) {
1145 drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
1146 return PTR_ERR(vmw_bo);
1147 }
1148 vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
1149 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
1150 tmp_bo = vmw_bo;
1151 vmw_user_bo_unref(&tmp_bo);
1152 if (unlikely(ret != 0))
1153 return ret;
1154
1155 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1156 if (!reloc)
1157 return -ENOMEM;
1158
1159 reloc->mob_loc = id;
1160 reloc->vbo = vmw_bo;
1161
1162 *vmw_bo_p = vmw_bo;
1163 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1164
1165 return 0;
1166 }
1167
1168 /**
1169 * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle
1170 * to a valid SVGAGuestPtr
1171 *
1172 * @dev_priv: Pointer to a device private structure.
1173 * @sw_context: The software context used for this command batch validation.
1174 * @ptr: Pointer to the user-space handle to be translated.
1175 * @vmw_bo_p: Points to a location that, on successful return will carry a
1176 * non-reference-counted pointer to the DMA buffer identified by the user-space
1177 * handle in @id.
1178 *
1179 * This function saves information needed to translate a user-space buffer
1180 * handle to a valid SVGAGuestPtr. The translation does not take place
1181 * immediately, but during a call to vmw_apply_relocations().
1182 *
1183 * This function builds a relocation list and a list of buffers to validate.
1184 * The former needs to be freed using either vmw_apply_relocations() or
1185 * vmw_free_relocations(). The latter needs to be freed using
1186 * vmw_clear_validations.
1187 */
vmw_translate_guest_ptr(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGAGuestPtr * ptr,struct vmw_bo ** vmw_bo_p)1188 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1189 struct vmw_sw_context *sw_context,
1190 SVGAGuestPtr *ptr,
1191 struct vmw_bo **vmw_bo_p)
1192 {
1193 struct vmw_bo *vmw_bo, *tmp_bo;
1194 uint32_t handle = ptr->gmrId;
1195 struct vmw_relocation *reloc;
1196 int ret;
1197
1198 vmw_validation_preload_bo(sw_context->ctx);
1199 ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1200 if (ret != 0) {
1201 drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
1202 return PTR_ERR(vmw_bo);
1203 }
1204 vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
1205 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
1206 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
1207 tmp_bo = vmw_bo;
1208 vmw_user_bo_unref(&tmp_bo);
1209 if (unlikely(ret != 0))
1210 return ret;
1211
1212 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1213 if (!reloc)
1214 return -ENOMEM;
1215
1216 reloc->location = ptr;
1217 reloc->vbo = vmw_bo;
1218 *vmw_bo_p = vmw_bo;
1219 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1220
1221 return 0;
1222 }
1223
1224 /**
1225 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1226 *
1227 * @dev_priv: Pointer to a device private struct.
1228 * @sw_context: The software context used for this command submission.
1229 * @header: Pointer to the command header in the command stream.
1230 *
1231 * This function adds the new query into the query COTABLE
1232 */
vmw_cmd_dx_define_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1233 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1234 struct vmw_sw_context *sw_context,
1235 SVGA3dCmdHeader *header)
1236 {
1237 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1238 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1239 struct vmw_resource *cotable_res;
1240 int ret;
1241
1242 if (!ctx_node)
1243 return -EINVAL;
1244
1245 cmd = container_of(header, typeof(*cmd), header);
1246
1247 if (cmd->body.type < SVGA3D_QUERYTYPE_MIN ||
1248 cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1249 return -EINVAL;
1250
1251 cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1252 if (IS_ERR_OR_NULL(cotable_res))
1253 return cotable_res ? PTR_ERR(cotable_res) : -EINVAL;
1254 ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1255
1256 return ret;
1257 }
1258
1259 /**
1260 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1261 *
1262 * @dev_priv: Pointer to a device private struct.
1263 * @sw_context: The software context used for this command submission.
1264 * @header: Pointer to the command header in the command stream.
1265 *
1266 * The query bind operation will eventually associate the query ID with its
1267 * backing MOB. In this function, we take the user mode MOB ID and use
1268 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1269 */
vmw_cmd_dx_bind_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1270 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1271 struct vmw_sw_context *sw_context,
1272 SVGA3dCmdHeader *header)
1273 {
1274 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1275 struct vmw_bo *vmw_bo;
1276 int ret;
1277
1278 cmd = container_of(header, typeof(*cmd), header);
1279
1280 /*
1281 * Look up the buffer pointed to by q.mobid, put it on the relocation
1282 * list so its kernel mode MOB ID can be filled in later
1283 */
1284 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1285 &vmw_bo);
1286
1287 if (ret != 0)
1288 return ret;
1289
1290 sw_context->dx_query_mob = vmw_bo;
1291 sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1292 return 0;
1293 }
1294
1295 /**
1296 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1297 *
1298 * @dev_priv: Pointer to a device private struct.
1299 * @sw_context: The software context used for this command submission.
1300 * @header: Pointer to the command header in the command stream.
1301 */
vmw_cmd_begin_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1302 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1303 struct vmw_sw_context *sw_context,
1304 SVGA3dCmdHeader *header)
1305 {
1306 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1307 container_of(header, typeof(*cmd), header);
1308
1309 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1310 VMW_RES_DIRTY_SET, user_context_converter,
1311 &cmd->body.cid, NULL);
1312 }
1313
1314 /**
1315 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1316 *
1317 * @dev_priv: Pointer to a device private struct.
1318 * @sw_context: The software context used for this command submission.
1319 * @header: Pointer to the command header in the command stream.
1320 */
vmw_cmd_begin_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1321 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1322 struct vmw_sw_context *sw_context,
1323 SVGA3dCmdHeader *header)
1324 {
1325 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1326 container_of(header, typeof(*cmd), header);
1327
1328 if (unlikely(dev_priv->has_mob)) {
1329 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1330
1331 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1332
1333 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1334 gb_cmd.header.size = cmd->header.size;
1335 gb_cmd.body.cid = cmd->body.cid;
1336 gb_cmd.body.type = cmd->body.type;
1337
1338 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1339 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1340 }
1341
1342 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1343 VMW_RES_DIRTY_SET, user_context_converter,
1344 &cmd->body.cid, NULL);
1345 }
1346
1347 /**
1348 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1349 *
1350 * @dev_priv: Pointer to a device private struct.
1351 * @sw_context: The software context used for this command submission.
1352 * @header: Pointer to the command header in the command stream.
1353 */
vmw_cmd_end_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1354 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1355 struct vmw_sw_context *sw_context,
1356 SVGA3dCmdHeader *header)
1357 {
1358 struct vmw_bo *vmw_bo;
1359 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1360 int ret;
1361
1362 cmd = container_of(header, typeof(*cmd), header);
1363 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1364 if (unlikely(ret != 0))
1365 return ret;
1366
1367 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1368 &vmw_bo);
1369 if (unlikely(ret != 0))
1370 return ret;
1371
1372 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1373
1374 return ret;
1375 }
1376
1377 /**
1378 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1379 *
1380 * @dev_priv: Pointer to a device private struct.
1381 * @sw_context: The software context used for this command submission.
1382 * @header: Pointer to the command header in the command stream.
1383 */
vmw_cmd_end_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1384 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1385 struct vmw_sw_context *sw_context,
1386 SVGA3dCmdHeader *header)
1387 {
1388 struct vmw_bo *vmw_bo;
1389 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1390 int ret;
1391
1392 cmd = container_of(header, typeof(*cmd), header);
1393 if (dev_priv->has_mob) {
1394 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1395
1396 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1397
1398 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1399 gb_cmd.header.size = cmd->header.size;
1400 gb_cmd.body.cid = cmd->body.cid;
1401 gb_cmd.body.type = cmd->body.type;
1402 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1403 gb_cmd.body.offset = cmd->body.guestResult.offset;
1404
1405 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1406 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1407 }
1408
1409 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1410 if (unlikely(ret != 0))
1411 return ret;
1412
1413 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1414 &cmd->body.guestResult, &vmw_bo);
1415 if (unlikely(ret != 0))
1416 return ret;
1417
1418 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1419
1420 return ret;
1421 }
1422
1423 /**
1424 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1425 *
1426 * @dev_priv: Pointer to a device private struct.
1427 * @sw_context: The software context used for this command submission.
1428 * @header: Pointer to the command header in the command stream.
1429 */
vmw_cmd_wait_gb_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1430 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1431 struct vmw_sw_context *sw_context,
1432 SVGA3dCmdHeader *header)
1433 {
1434 struct vmw_bo *vmw_bo;
1435 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1436 int ret;
1437
1438 cmd = container_of(header, typeof(*cmd), header);
1439 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1440 if (unlikely(ret != 0))
1441 return ret;
1442
1443 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1444 &vmw_bo);
1445 if (unlikely(ret != 0))
1446 return ret;
1447
1448 return 0;
1449 }
1450
1451 /**
1452 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1453 *
1454 * @dev_priv: Pointer to a device private struct.
1455 * @sw_context: The software context used for this command submission.
1456 * @header: Pointer to the command header in the command stream.
1457 */
vmw_cmd_wait_query(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1458 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1459 struct vmw_sw_context *sw_context,
1460 SVGA3dCmdHeader *header)
1461 {
1462 struct vmw_bo *vmw_bo;
1463 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1464 int ret;
1465
1466 cmd = container_of(header, typeof(*cmd), header);
1467 if (dev_priv->has_mob) {
1468 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1469
1470 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1471
1472 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1473 gb_cmd.header.size = cmd->header.size;
1474 gb_cmd.body.cid = cmd->body.cid;
1475 gb_cmd.body.type = cmd->body.type;
1476 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1477 gb_cmd.body.offset = cmd->body.guestResult.offset;
1478
1479 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1480 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1481 }
1482
1483 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1484 if (unlikely(ret != 0))
1485 return ret;
1486
1487 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1488 &cmd->body.guestResult, &vmw_bo);
1489 if (unlikely(ret != 0))
1490 return ret;
1491
1492 return 0;
1493 }
1494
vmw_cmd_dma(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1495 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1496 struct vmw_sw_context *sw_context,
1497 SVGA3dCmdHeader *header)
1498 {
1499 struct vmw_bo *vmw_bo = NULL;
1500 struct vmw_surface *srf = NULL;
1501 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1502 int ret;
1503 SVGA3dCmdSurfaceDMASuffix *suffix;
1504 uint32_t bo_size;
1505 bool dirty;
1506
1507 cmd = container_of(header, typeof(*cmd), header);
1508 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1509 header->size - sizeof(*suffix));
1510
1511 /* Make sure device and verifier stays in sync. */
1512 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1513 VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1514 return -EINVAL;
1515 }
1516
1517 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1518 &cmd->body.guest.ptr, &vmw_bo);
1519 if (unlikely(ret != 0))
1520 return ret;
1521
1522 /* Make sure DMA doesn't cross BO boundaries. */
1523 bo_size = vmw_bo->tbo.base.size;
1524 if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1525 VMW_DEBUG_USER("Invalid DMA offset.\n");
1526 return -EINVAL;
1527 }
1528
1529 bo_size -= cmd->body.guest.ptr.offset;
1530 if (unlikely(suffix->maximumOffset > bo_size))
1531 suffix->maximumOffset = bo_size;
1532
1533 dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1534 VMW_RES_DIRTY_SET : 0;
1535 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1536 dirty, user_surface_converter,
1537 &cmd->body.host.sid, NULL);
1538 if (unlikely(ret != 0)) {
1539 if (unlikely(ret != -ERESTARTSYS))
1540 VMW_DEBUG_USER("could not find surface for DMA.\n");
1541 return ret;
1542 }
1543
1544 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1545
1546 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->tbo, header);
1547
1548 return 0;
1549 }
1550
vmw_cmd_draw(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1551 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1552 struct vmw_sw_context *sw_context,
1553 SVGA3dCmdHeader *header)
1554 {
1555 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1556 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1557 (unsigned long)header + sizeof(*cmd));
1558 SVGA3dPrimitiveRange *range;
1559 uint32_t i;
1560 uint32_t maxnum;
1561 int ret;
1562
1563 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1564 if (unlikely(ret != 0))
1565 return ret;
1566
1567 cmd = container_of(header, typeof(*cmd), header);
1568 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1569
1570 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1571 VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1572 return -EINVAL;
1573 }
1574
1575 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1576 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1577 VMW_RES_DIRTY_NONE,
1578 user_surface_converter,
1579 &decl->array.surfaceId, NULL);
1580 if (unlikely(ret != 0))
1581 return ret;
1582 }
1583
1584 maxnum = (header->size - sizeof(cmd->body) -
1585 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1586 if (unlikely(cmd->body.numRanges > maxnum)) {
1587 VMW_DEBUG_USER("Illegal number of index ranges.\n");
1588 return -EINVAL;
1589 }
1590
1591 range = (SVGA3dPrimitiveRange *) decl;
1592 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1593 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1594 VMW_RES_DIRTY_NONE,
1595 user_surface_converter,
1596 &range->indexArray.surfaceId, NULL);
1597 if (unlikely(ret != 0))
1598 return ret;
1599 }
1600 return 0;
1601 }
1602
vmw_cmd_tex_state(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1603 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1604 struct vmw_sw_context *sw_context,
1605 SVGA3dCmdHeader *header)
1606 {
1607 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1608 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1609 ((unsigned long) header + header->size + sizeof(*header));
1610 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1611 ((unsigned long) header + sizeof(*cmd));
1612 struct vmw_resource *ctx;
1613 struct vmw_resource *res;
1614 int ret;
1615
1616 cmd = container_of(header, typeof(*cmd), header);
1617
1618 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1619 VMW_RES_DIRTY_SET, user_context_converter,
1620 &cmd->body.cid, &ctx);
1621 if (unlikely(ret != 0))
1622 return ret;
1623
1624 for (; cur_state < last_state; ++cur_state) {
1625 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1626 continue;
1627
1628 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1629 VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1630 (unsigned int) cur_state->stage);
1631 return -EINVAL;
1632 }
1633
1634 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1635 VMW_RES_DIRTY_NONE,
1636 user_surface_converter,
1637 &cur_state->value, &res);
1638 if (unlikely(ret != 0))
1639 return ret;
1640
1641 if (dev_priv->has_mob) {
1642 struct vmw_ctx_bindinfo_tex binding;
1643 struct vmw_ctx_validation_info *node;
1644
1645 node = vmw_execbuf_info_from_res(sw_context, ctx);
1646 if (!node)
1647 return -EINVAL;
1648
1649 binding.bi.ctx = ctx;
1650 binding.bi.res = res;
1651 binding.bi.bt = vmw_ctx_binding_tex;
1652 binding.texture_stage = cur_state->stage;
1653 vmw_binding_add(node->staged, &binding.bi, 0,
1654 binding.texture_stage);
1655 }
1656 }
1657
1658 return 0;
1659 }
1660
vmw_cmd_check_define_gmrfb(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf)1661 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1662 struct vmw_sw_context *sw_context,
1663 void *buf)
1664 {
1665 struct vmw_bo *vmw_bo;
1666
1667 struct {
1668 uint32_t header;
1669 SVGAFifoCmdDefineGMRFB body;
1670 } *cmd = buf;
1671
1672 return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1673 &vmw_bo);
1674 }
1675
1676 /**
1677 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1678 * switching
1679 *
1680 * @dev_priv: Pointer to a device private struct.
1681 * @sw_context: The software context being used for this batch.
1682 * @res: Pointer to the resource.
1683 * @buf_id: Pointer to the user-space backup buffer handle in the command
1684 * stream.
1685 * @backup_offset: Offset of backup into MOB.
1686 *
1687 * This function prepares for registering a switch of backup buffers in the
1688 * resource metadata just prior to unreserving. It's basically a wrapper around
1689 * vmw_cmd_res_switch_backup with a different interface.
1690 */
vmw_cmd_res_switch_backup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,struct vmw_resource * res,uint32_t * buf_id,unsigned long backup_offset)1691 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1692 struct vmw_sw_context *sw_context,
1693 struct vmw_resource *res, uint32_t *buf_id,
1694 unsigned long backup_offset)
1695 {
1696 struct vmw_bo *vbo;
1697 void *info;
1698 int ret;
1699
1700 info = vmw_execbuf_info_from_res(sw_context, res);
1701 if (!info)
1702 return -EINVAL;
1703
1704 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1705 if (ret)
1706 return ret;
1707
1708 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1709 backup_offset);
1710 return 0;
1711 }
1712
1713 /**
1714 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1715 *
1716 * @dev_priv: Pointer to a device private struct.
1717 * @sw_context: The software context being used for this batch.
1718 * @res_type: The resource type.
1719 * @converter: Information about user-space binding for this resource type.
1720 * @res_id: Pointer to the user-space resource handle in the command stream.
1721 * @buf_id: Pointer to the user-space backup buffer handle in the command
1722 * stream.
1723 * @backup_offset: Offset of backup into MOB.
1724 *
1725 * This function prepares for registering a switch of backup buffers in the
1726 * resource metadata just prior to unreserving. It's basically a wrapper around
1727 * vmw_cmd_res_switch_backup with a different interface.
1728 */
vmw_cmd_switch_backup(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,enum vmw_res_type res_type,const struct vmw_user_resource_conv * converter,uint32_t * res_id,uint32_t * buf_id,unsigned long backup_offset)1729 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1730 struct vmw_sw_context *sw_context,
1731 enum vmw_res_type res_type,
1732 const struct vmw_user_resource_conv
1733 *converter, uint32_t *res_id, uint32_t *buf_id,
1734 unsigned long backup_offset)
1735 {
1736 struct vmw_resource *res;
1737 int ret;
1738
1739 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1740 VMW_RES_DIRTY_NONE, converter, res_id, &res);
1741 if (ret)
1742 return ret;
1743
1744 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1745 backup_offset);
1746 }
1747
1748 /**
1749 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1750 *
1751 * @dev_priv: Pointer to a device private struct.
1752 * @sw_context: The software context being used for this batch.
1753 * @header: Pointer to the command header in the command stream.
1754 */
vmw_cmd_bind_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1755 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1756 struct vmw_sw_context *sw_context,
1757 SVGA3dCmdHeader *header)
1758 {
1759 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1760 container_of(header, typeof(*cmd), header);
1761
1762 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1763 user_surface_converter, &cmd->body.sid,
1764 &cmd->body.mobid, 0);
1765 }
1766
1767 /**
1768 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1769 *
1770 * @dev_priv: Pointer to a device private struct.
1771 * @sw_context: The software context being used for this batch.
1772 * @header: Pointer to the command header in the command stream.
1773 */
vmw_cmd_update_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1774 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1775 struct vmw_sw_context *sw_context,
1776 SVGA3dCmdHeader *header)
1777 {
1778 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1779 container_of(header, typeof(*cmd), header);
1780
1781 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1782 VMW_RES_DIRTY_NONE, user_surface_converter,
1783 &cmd->body.image.sid, NULL);
1784 }
1785
1786 /**
1787 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1788 *
1789 * @dev_priv: Pointer to a device private struct.
1790 * @sw_context: The software context being used for this batch.
1791 * @header: Pointer to the command header in the command stream.
1792 */
vmw_cmd_update_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1793 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1794 struct vmw_sw_context *sw_context,
1795 SVGA3dCmdHeader *header)
1796 {
1797 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1798 container_of(header, typeof(*cmd), header);
1799
1800 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1801 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1802 &cmd->body.sid, NULL);
1803 }
1804
1805 /**
1806 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1807 *
1808 * @dev_priv: Pointer to a device private struct.
1809 * @sw_context: The software context being used for this batch.
1810 * @header: Pointer to the command header in the command stream.
1811 */
vmw_cmd_readback_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1812 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1813 struct vmw_sw_context *sw_context,
1814 SVGA3dCmdHeader *header)
1815 {
1816 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1817 container_of(header, typeof(*cmd), header);
1818
1819 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1820 VMW_RES_DIRTY_NONE, user_surface_converter,
1821 &cmd->body.image.sid, NULL);
1822 }
1823
1824 /**
1825 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1826 * command
1827 *
1828 * @dev_priv: Pointer to a device private struct.
1829 * @sw_context: The software context being used for this batch.
1830 * @header: Pointer to the command header in the command stream.
1831 */
vmw_cmd_readback_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1832 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1833 struct vmw_sw_context *sw_context,
1834 SVGA3dCmdHeader *header)
1835 {
1836 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1837 container_of(header, typeof(*cmd), header);
1838
1839 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1840 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1841 &cmd->body.sid, NULL);
1842 }
1843
1844 /**
1845 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1846 * command
1847 *
1848 * @dev_priv: Pointer to a device private struct.
1849 * @sw_context: The software context being used for this batch.
1850 * @header: Pointer to the command header in the command stream.
1851 */
vmw_cmd_invalidate_gb_image(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1852 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1853 struct vmw_sw_context *sw_context,
1854 SVGA3dCmdHeader *header)
1855 {
1856 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1857 container_of(header, typeof(*cmd), header);
1858
1859 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1860 VMW_RES_DIRTY_NONE, user_surface_converter,
1861 &cmd->body.image.sid, NULL);
1862 }
1863
1864 /**
1865 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1866 * command
1867 *
1868 * @dev_priv: Pointer to a device private struct.
1869 * @sw_context: The software context being used for this batch.
1870 * @header: Pointer to the command header in the command stream.
1871 */
vmw_cmd_invalidate_gb_surface(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1872 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1873 struct vmw_sw_context *sw_context,
1874 SVGA3dCmdHeader *header)
1875 {
1876 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1877 container_of(header, typeof(*cmd), header);
1878
1879 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1880 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1881 &cmd->body.sid, NULL);
1882 }
1883
1884 /**
1885 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1886 *
1887 * @dev_priv: Pointer to a device private struct.
1888 * @sw_context: The software context being used for this batch.
1889 * @header: Pointer to the command header in the command stream.
1890 */
vmw_cmd_shader_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1891 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1892 struct vmw_sw_context *sw_context,
1893 SVGA3dCmdHeader *header)
1894 {
1895 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1896 int ret;
1897 size_t size;
1898 struct vmw_resource *ctx;
1899
1900 cmd = container_of(header, typeof(*cmd), header);
1901
1902 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1903 VMW_RES_DIRTY_SET, user_context_converter,
1904 &cmd->body.cid, &ctx);
1905 if (unlikely(ret != 0))
1906 return ret;
1907
1908 if (unlikely(!dev_priv->has_mob))
1909 return 0;
1910
1911 size = cmd->header.size - sizeof(cmd->body);
1912 ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1913 cmd->body.shid, cmd + 1, cmd->body.type,
1914 size, &sw_context->staged_cmd_res);
1915 if (unlikely(ret != 0))
1916 return ret;
1917
1918 return vmw_resource_relocation_add(sw_context, NULL,
1919 vmw_ptr_diff(sw_context->buf_start,
1920 &cmd->header.id),
1921 vmw_res_rel_nop);
1922 }
1923
1924 /**
1925 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1926 *
1927 * @dev_priv: Pointer to a device private struct.
1928 * @sw_context: The software context being used for this batch.
1929 * @header: Pointer to the command header in the command stream.
1930 */
vmw_cmd_shader_destroy(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1931 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1932 struct vmw_sw_context *sw_context,
1933 SVGA3dCmdHeader *header)
1934 {
1935 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1936 int ret;
1937 struct vmw_resource *ctx;
1938
1939 cmd = container_of(header, typeof(*cmd), header);
1940
1941 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1942 VMW_RES_DIRTY_SET, user_context_converter,
1943 &cmd->body.cid, &ctx);
1944 if (unlikely(ret != 0))
1945 return ret;
1946
1947 if (unlikely(!dev_priv->has_mob))
1948 return 0;
1949
1950 ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1951 cmd->body.type, &sw_context->staged_cmd_res);
1952 if (unlikely(ret != 0))
1953 return ret;
1954
1955 return vmw_resource_relocation_add(sw_context, NULL,
1956 vmw_ptr_diff(sw_context->buf_start,
1957 &cmd->header.id),
1958 vmw_res_rel_nop);
1959 }
1960
1961 /**
1962 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1963 *
1964 * @dev_priv: Pointer to a device private struct.
1965 * @sw_context: The software context being used for this batch.
1966 * @header: Pointer to the command header in the command stream.
1967 */
vmw_cmd_set_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)1968 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1969 struct vmw_sw_context *sw_context,
1970 SVGA3dCmdHeader *header)
1971 {
1972 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1973 struct vmw_ctx_bindinfo_shader binding;
1974 struct vmw_resource *ctx, *res = NULL;
1975 struct vmw_ctx_validation_info *ctx_info;
1976 int ret;
1977
1978 cmd = container_of(header, typeof(*cmd), header);
1979
1980 if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) {
1981 VMW_DEBUG_USER("Illegal shader type %u.\n",
1982 (unsigned int) cmd->body.type);
1983 return -EINVAL;
1984 }
1985
1986 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1987 VMW_RES_DIRTY_SET, user_context_converter,
1988 &cmd->body.cid, &ctx);
1989 if (unlikely(ret != 0))
1990 return ret;
1991
1992 if (!dev_priv->has_mob)
1993 return 0;
1994
1995 if (cmd->body.shid != SVGA3D_INVALID_ID) {
1996 /*
1997 * This is the compat shader path - Per device guest-backed
1998 * shaders, but user-space thinks it's per context host-
1999 * backed shaders.
2000 */
2001 res = vmw_shader_lookup(vmw_context_res_man(ctx),
2002 cmd->body.shid, cmd->body.type);
2003 if (!IS_ERR(res)) {
2004 ret = vmw_execbuf_res_val_add(sw_context, res,
2005 VMW_RES_DIRTY_NONE,
2006 vmw_val_add_flag_noctx);
2007 if (unlikely(ret != 0))
2008 return ret;
2009
2010 ret = vmw_resource_relocation_add
2011 (sw_context, res,
2012 vmw_ptr_diff(sw_context->buf_start,
2013 &cmd->body.shid),
2014 vmw_res_rel_normal);
2015 if (unlikely(ret != 0))
2016 return ret;
2017 }
2018 }
2019
2020 if (IS_ERR_OR_NULL(res)) {
2021 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2022 VMW_RES_DIRTY_NONE,
2023 user_shader_converter, &cmd->body.shid,
2024 &res);
2025 if (unlikely(ret != 0))
2026 return ret;
2027 }
2028
2029 ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2030 if (!ctx_info)
2031 return -EINVAL;
2032
2033 binding.bi.ctx = ctx;
2034 binding.bi.res = res;
2035 binding.bi.bt = vmw_ctx_binding_shader;
2036 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2037 vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2038
2039 return 0;
2040 }
2041
2042 /**
2043 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2044 *
2045 * @dev_priv: Pointer to a device private struct.
2046 * @sw_context: The software context being used for this batch.
2047 * @header: Pointer to the command header in the command stream.
2048 */
vmw_cmd_set_shader_const(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2049 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2050 struct vmw_sw_context *sw_context,
2051 SVGA3dCmdHeader *header)
2052 {
2053 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2054 int ret;
2055
2056 cmd = container_of(header, typeof(*cmd), header);
2057
2058 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2059 VMW_RES_DIRTY_SET, user_context_converter,
2060 &cmd->body.cid, NULL);
2061 if (unlikely(ret != 0))
2062 return ret;
2063
2064 if (dev_priv->has_mob)
2065 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2066
2067 return 0;
2068 }
2069
2070 /**
2071 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2072 *
2073 * @dev_priv: Pointer to a device private struct.
2074 * @sw_context: The software context being used for this batch.
2075 * @header: Pointer to the command header in the command stream.
2076 */
vmw_cmd_bind_gb_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2077 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2078 struct vmw_sw_context *sw_context,
2079 SVGA3dCmdHeader *header)
2080 {
2081 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2082 container_of(header, typeof(*cmd), header);
2083
2084 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2085 user_shader_converter, &cmd->body.shid,
2086 &cmd->body.mobid, cmd->body.offsetInBytes);
2087 }
2088
2089 /**
2090 * vmw_cmd_dx_set_single_constant_buffer - Validate
2091 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2092 *
2093 * @dev_priv: Pointer to a device private struct.
2094 * @sw_context: The software context being used for this batch.
2095 * @header: Pointer to the command header in the command stream.
2096 */
2097 static int
vmw_cmd_dx_set_single_constant_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2098 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2099 struct vmw_sw_context *sw_context,
2100 SVGA3dCmdHeader *header)
2101 {
2102 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2103
2104 struct vmw_resource *res = NULL;
2105 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2106 struct vmw_ctx_bindinfo_cb binding;
2107 int ret;
2108
2109 if (!ctx_node)
2110 return -EINVAL;
2111
2112 cmd = container_of(header, typeof(*cmd), header);
2113 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2114 VMW_RES_DIRTY_NONE, user_surface_converter,
2115 &cmd->body.sid, &res);
2116 if (unlikely(ret != 0))
2117 return ret;
2118
2119 if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) ||
2120 cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2121 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2122 (unsigned int) cmd->body.type,
2123 (unsigned int) cmd->body.slot);
2124 return -EINVAL;
2125 }
2126
2127 binding.bi.ctx = ctx_node->ctx;
2128 binding.bi.res = res;
2129 binding.bi.bt = vmw_ctx_binding_cb;
2130 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2131 binding.offset = cmd->body.offsetInBytes;
2132 binding.size = cmd->body.sizeInBytes;
2133 binding.slot = cmd->body.slot;
2134
2135 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2136 binding.slot);
2137
2138 return 0;
2139 }
2140
2141 /**
2142 * vmw_cmd_dx_set_constant_buffer_offset - Validate
2143 * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command.
2144 *
2145 * @dev_priv: Pointer to a device private struct.
2146 * @sw_context: The software context being used for this batch.
2147 * @header: Pointer to the command header in the command stream.
2148 */
2149 static int
vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2150 vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv,
2151 struct vmw_sw_context *sw_context,
2152 SVGA3dCmdHeader *header)
2153 {
2154 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset);
2155
2156 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2157 u32 shader_slot;
2158
2159 if (!has_sm5_context(dev_priv))
2160 return -EINVAL;
2161
2162 if (!ctx_node)
2163 return -EINVAL;
2164
2165 cmd = container_of(header, typeof(*cmd), header);
2166 if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2167 VMW_DEBUG_USER("Illegal const buffer slot %u.\n",
2168 (unsigned int) cmd->body.slot);
2169 return -EINVAL;
2170 }
2171
2172 shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET;
2173 vmw_binding_cb_offset_update(ctx_node->staged, shader_slot,
2174 cmd->body.slot, cmd->body.offsetInBytes);
2175
2176 return 0;
2177 }
2178
2179 /**
2180 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2181 * command
2182 *
2183 * @dev_priv: Pointer to a device private struct.
2184 * @sw_context: The software context being used for this batch.
2185 * @header: Pointer to the command header in the command stream.
2186 */
vmw_cmd_dx_set_shader_res(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2187 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2188 struct vmw_sw_context *sw_context,
2189 SVGA3dCmdHeader *header)
2190 {
2191 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2192 container_of(header, typeof(*cmd), header);
2193
2194 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2195 sizeof(SVGA3dShaderResourceViewId);
2196
2197 if ((u64) cmd->body.startView + (u64) num_sr_view >
2198 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2199 !vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
2200 VMW_DEBUG_USER("Invalid shader binding.\n");
2201 return -EINVAL;
2202 }
2203
2204 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2205 vmw_ctx_binding_sr,
2206 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2207 (void *) &cmd[1], num_sr_view,
2208 cmd->body.startView);
2209 }
2210
2211 /**
2212 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2213 *
2214 * @dev_priv: Pointer to a device private struct.
2215 * @sw_context: The software context being used for this batch.
2216 * @header: Pointer to the command header in the command stream.
2217 */
vmw_cmd_dx_set_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2218 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2219 struct vmw_sw_context *sw_context,
2220 SVGA3dCmdHeader *header)
2221 {
2222 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2223 struct vmw_resource *res = NULL;
2224 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2225 struct vmw_ctx_bindinfo_shader binding;
2226 int ret = 0;
2227
2228 if (!ctx_node)
2229 return -EINVAL;
2230
2231 cmd = container_of(header, typeof(*cmd), header);
2232
2233 if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
2234 VMW_DEBUG_USER("Illegal shader type %u.\n",
2235 (unsigned int) cmd->body.type);
2236 return -EINVAL;
2237 }
2238
2239 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2240 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2241 if (IS_ERR(res)) {
2242 VMW_DEBUG_USER("Could not find shader for binding.\n");
2243 return PTR_ERR(res);
2244 }
2245
2246 ret = vmw_execbuf_res_val_add(sw_context, res,
2247 VMW_RES_DIRTY_NONE,
2248 vmw_val_add_flag_noctx);
2249 if (ret)
2250 return ret;
2251 }
2252
2253 binding.bi.ctx = ctx_node->ctx;
2254 binding.bi.res = res;
2255 binding.bi.bt = vmw_ctx_binding_dx_shader;
2256 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2257
2258 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2259
2260 return 0;
2261 }
2262
2263 /**
2264 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2265 * command
2266 *
2267 * @dev_priv: Pointer to a device private struct.
2268 * @sw_context: The software context being used for this batch.
2269 * @header: Pointer to the command header in the command stream.
2270 */
vmw_cmd_dx_set_vertex_buffers(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2271 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2272 struct vmw_sw_context *sw_context,
2273 SVGA3dCmdHeader *header)
2274 {
2275 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2276 struct vmw_ctx_bindinfo_vb binding;
2277 struct vmw_resource *res;
2278 struct {
2279 SVGA3dCmdHeader header;
2280 SVGA3dCmdDXSetVertexBuffers body;
2281 SVGA3dVertexBuffer buf[];
2282 } *cmd;
2283 int i, ret, num;
2284
2285 if (!ctx_node)
2286 return -EINVAL;
2287
2288 cmd = container_of(header, typeof(*cmd), header);
2289 num = (cmd->header.size - sizeof(cmd->body)) /
2290 sizeof(SVGA3dVertexBuffer);
2291 if ((u64)num + (u64)cmd->body.startBuffer >
2292 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2293 VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2294 return -EINVAL;
2295 }
2296
2297 for (i = 0; i < num; i++) {
2298 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2299 VMW_RES_DIRTY_NONE,
2300 user_surface_converter,
2301 &cmd->buf[i].sid, &res);
2302 if (unlikely(ret != 0))
2303 return ret;
2304
2305 binding.bi.ctx = ctx_node->ctx;
2306 binding.bi.bt = vmw_ctx_binding_vb;
2307 binding.bi.res = res;
2308 binding.offset = cmd->buf[i].offset;
2309 binding.stride = cmd->buf[i].stride;
2310 binding.slot = i + cmd->body.startBuffer;
2311
2312 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2313 }
2314
2315 return 0;
2316 }
2317
2318 /**
2319 * vmw_cmd_dx_set_index_buffer - Validate
2320 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2321 *
2322 * @dev_priv: Pointer to a device private struct.
2323 * @sw_context: The software context being used for this batch.
2324 * @header: Pointer to the command header in the command stream.
2325 */
vmw_cmd_dx_set_index_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2326 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2327 struct vmw_sw_context *sw_context,
2328 SVGA3dCmdHeader *header)
2329 {
2330 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2331 struct vmw_ctx_bindinfo_ib binding;
2332 struct vmw_resource *res;
2333 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2334 int ret;
2335
2336 if (!ctx_node)
2337 return -EINVAL;
2338
2339 cmd = container_of(header, typeof(*cmd), header);
2340 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2341 VMW_RES_DIRTY_NONE, user_surface_converter,
2342 &cmd->body.sid, &res);
2343 if (unlikely(ret != 0))
2344 return ret;
2345
2346 binding.bi.ctx = ctx_node->ctx;
2347 binding.bi.res = res;
2348 binding.bi.bt = vmw_ctx_binding_ib;
2349 binding.offset = cmd->body.offset;
2350 binding.format = cmd->body.format;
2351
2352 vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2353
2354 return 0;
2355 }
2356
2357 /**
2358 * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2359 * command
2360 *
2361 * @dev_priv: Pointer to a device private struct.
2362 * @sw_context: The software context being used for this batch.
2363 * @header: Pointer to the command header in the command stream.
2364 */
vmw_cmd_dx_set_rendertargets(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2365 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2366 struct vmw_sw_context *sw_context,
2367 SVGA3dCmdHeader *header)
2368 {
2369 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2370 container_of(header, typeof(*cmd), header);
2371 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2372 sizeof(SVGA3dRenderTargetViewId);
2373 int ret;
2374
2375 if (num_rt_view > SVGA3D_DX_MAX_RENDER_TARGETS) {
2376 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2377 return -EINVAL;
2378 }
2379
2380 ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2381 0, &cmd->body.depthStencilViewId, 1, 0);
2382 if (ret)
2383 return ret;
2384
2385 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2386 vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2387 num_rt_view, 0);
2388 }
2389
2390 /**
2391 * vmw_cmd_dx_clear_rendertarget_view - Validate
2392 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2393 *
2394 * @dev_priv: Pointer to a device private struct.
2395 * @sw_context: The software context being used for this batch.
2396 * @header: Pointer to the command header in the command stream.
2397 */
vmw_cmd_dx_clear_rendertarget_view(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2398 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2399 struct vmw_sw_context *sw_context,
2400 SVGA3dCmdHeader *header)
2401 {
2402 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2403 container_of(header, typeof(*cmd), header);
2404 struct vmw_resource *ret;
2405
2406 ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2407 cmd->body.renderTargetViewId);
2408
2409 return PTR_ERR_OR_ZERO(ret);
2410 }
2411
2412 /**
2413 * vmw_cmd_dx_clear_depthstencil_view - Validate
2414 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2415 *
2416 * @dev_priv: Pointer to a device private struct.
2417 * @sw_context: The software context being used for this batch.
2418 * @header: Pointer to the command header in the command stream.
2419 */
vmw_cmd_dx_clear_depthstencil_view(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2420 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2421 struct vmw_sw_context *sw_context,
2422 SVGA3dCmdHeader *header)
2423 {
2424 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2425 container_of(header, typeof(*cmd), header);
2426 struct vmw_resource *ret;
2427
2428 ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2429 cmd->body.depthStencilViewId);
2430
2431 return PTR_ERR_OR_ZERO(ret);
2432 }
2433
vmw_cmd_dx_view_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2434 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2435 struct vmw_sw_context *sw_context,
2436 SVGA3dCmdHeader *header)
2437 {
2438 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2439 struct vmw_resource *srf;
2440 struct vmw_resource *res;
2441 enum vmw_view_type view_type;
2442 int ret;
2443 /*
2444 * This is based on the fact that all affected define commands have the
2445 * same initial command body layout.
2446 */
2447 struct {
2448 SVGA3dCmdHeader header;
2449 uint32 defined_id;
2450 uint32 sid;
2451 } *cmd;
2452
2453 if (!ctx_node)
2454 return -EINVAL;
2455
2456 view_type = vmw_view_cmd_to_type(header->id);
2457 if (view_type == vmw_view_max)
2458 return -EINVAL;
2459
2460 cmd = container_of(header, typeof(*cmd), header);
2461 if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2462 VMW_DEBUG_USER("Invalid surface id.\n");
2463 return -EINVAL;
2464 }
2465 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2466 VMW_RES_DIRTY_NONE, user_surface_converter,
2467 &cmd->sid, &srf);
2468 if (unlikely(ret != 0))
2469 return ret;
2470
2471 res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2472 if (IS_ERR_OR_NULL(res))
2473 return res ? PTR_ERR(res) : -EINVAL;
2474 ret = vmw_cotable_notify(res, cmd->defined_id);
2475 if (unlikely(ret != 0))
2476 return ret;
2477
2478 return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2479 cmd->defined_id, header,
2480 header->size + sizeof(*header),
2481 &sw_context->staged_cmd_res);
2482 }
2483
2484 /**
2485 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2486 *
2487 * @dev_priv: Pointer to a device private struct.
2488 * @sw_context: The software context being used for this batch.
2489 * @header: Pointer to the command header in the command stream.
2490 */
vmw_cmd_dx_set_so_targets(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2491 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2492 struct vmw_sw_context *sw_context,
2493 SVGA3dCmdHeader *header)
2494 {
2495 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2496 struct vmw_ctx_bindinfo_so_target binding;
2497 struct vmw_resource *res;
2498 struct {
2499 SVGA3dCmdHeader header;
2500 SVGA3dCmdDXSetSOTargets body;
2501 SVGA3dSoTarget targets[];
2502 } *cmd;
2503 int i, ret, num;
2504
2505 if (!ctx_node)
2506 return -EINVAL;
2507
2508 cmd = container_of(header, typeof(*cmd), header);
2509 num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2510
2511 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2512 VMW_DEBUG_USER("Invalid DX SO binding.\n");
2513 return -EINVAL;
2514 }
2515
2516 for (i = 0; i < num; i++) {
2517 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2518 VMW_RES_DIRTY_SET,
2519 user_surface_converter,
2520 &cmd->targets[i].sid, &res);
2521 if (unlikely(ret != 0))
2522 return ret;
2523
2524 binding.bi.ctx = ctx_node->ctx;
2525 binding.bi.res = res;
2526 binding.bi.bt = vmw_ctx_binding_so_target;
2527 binding.offset = cmd->targets[i].offset;
2528 binding.size = cmd->targets[i].sizeInBytes;
2529 binding.slot = i;
2530
2531 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2532 }
2533
2534 return 0;
2535 }
2536
vmw_cmd_dx_so_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2537 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2538 struct vmw_sw_context *sw_context,
2539 SVGA3dCmdHeader *header)
2540 {
2541 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2542 struct vmw_resource *res;
2543 /*
2544 * This is based on the fact that all affected define commands have
2545 * the same initial command body layout.
2546 */
2547 struct {
2548 SVGA3dCmdHeader header;
2549 uint32 defined_id;
2550 } *cmd;
2551 enum vmw_so_type so_type;
2552 int ret;
2553
2554 if (!ctx_node)
2555 return -EINVAL;
2556
2557 so_type = vmw_so_cmd_to_type(header->id);
2558 res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2559 if (IS_ERR_OR_NULL(res))
2560 return res ? PTR_ERR(res) : -EINVAL;
2561 cmd = container_of(header, typeof(*cmd), header);
2562 ret = vmw_cotable_notify(res, cmd->defined_id);
2563
2564 return ret;
2565 }
2566
2567 /**
2568 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2569 * command
2570 *
2571 * @dev_priv: Pointer to a device private struct.
2572 * @sw_context: The software context being used for this batch.
2573 * @header: Pointer to the command header in the command stream.
2574 */
vmw_cmd_dx_check_subresource(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2575 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2576 struct vmw_sw_context *sw_context,
2577 SVGA3dCmdHeader *header)
2578 {
2579 struct {
2580 SVGA3dCmdHeader header;
2581 union {
2582 SVGA3dCmdDXReadbackSubResource r_body;
2583 SVGA3dCmdDXInvalidateSubResource i_body;
2584 SVGA3dCmdDXUpdateSubResource u_body;
2585 SVGA3dSurfaceId sid;
2586 };
2587 } *cmd;
2588
2589 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2590 offsetof(typeof(*cmd), sid));
2591 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2592 offsetof(typeof(*cmd), sid));
2593 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2594 offsetof(typeof(*cmd), sid));
2595
2596 cmd = container_of(header, typeof(*cmd), header);
2597 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2598 VMW_RES_DIRTY_NONE, user_surface_converter,
2599 &cmd->sid, NULL);
2600 }
2601
vmw_cmd_dx_cid_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2602 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2603 struct vmw_sw_context *sw_context,
2604 SVGA3dCmdHeader *header)
2605 {
2606 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2607
2608 if (!ctx_node)
2609 return -EINVAL;
2610
2611 return 0;
2612 }
2613
2614 /**
2615 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2616 * resource for removal.
2617 *
2618 * @dev_priv: Pointer to a device private struct.
2619 * @sw_context: The software context being used for this batch.
2620 * @header: Pointer to the command header in the command stream.
2621 *
2622 * Check that the view exists, and if it was not created using this command
2623 * batch, conditionally make this command a NOP.
2624 */
vmw_cmd_dx_view_remove(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2625 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2626 struct vmw_sw_context *sw_context,
2627 SVGA3dCmdHeader *header)
2628 {
2629 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2630 struct {
2631 SVGA3dCmdHeader header;
2632 union vmw_view_destroy body;
2633 } *cmd = container_of(header, typeof(*cmd), header);
2634 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2635 struct vmw_resource *view;
2636 int ret;
2637
2638 if (!ctx_node)
2639 return -EINVAL;
2640
2641 ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2642 &sw_context->staged_cmd_res, &view);
2643 if (ret || !view)
2644 return ret;
2645
2646 /*
2647 * If the view wasn't created during this command batch, it might
2648 * have been removed due to a context swapout, so add a
2649 * relocation to conditionally make this command a NOP to avoid
2650 * device errors.
2651 */
2652 return vmw_resource_relocation_add(sw_context, view,
2653 vmw_ptr_diff(sw_context->buf_start,
2654 &cmd->header.id),
2655 vmw_res_rel_cond_nop);
2656 }
2657
2658 /**
2659 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2660 *
2661 * @dev_priv: Pointer to a device private struct.
2662 * @sw_context: The software context being used for this batch.
2663 * @header: Pointer to the command header in the command stream.
2664 */
vmw_cmd_dx_define_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2665 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2666 struct vmw_sw_context *sw_context,
2667 SVGA3dCmdHeader *header)
2668 {
2669 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2670 struct vmw_resource *res;
2671 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2672 container_of(header, typeof(*cmd), header);
2673 int ret;
2674
2675 if (!ctx_node)
2676 return -EINVAL;
2677
2678 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2679 if (IS_ERR_OR_NULL(res))
2680 return res ? PTR_ERR(res) : -EINVAL;
2681 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2682 if (ret)
2683 return ret;
2684
2685 return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2686 cmd->body.shaderId, cmd->body.type,
2687 &sw_context->staged_cmd_res);
2688 }
2689
2690 /**
2691 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2692 *
2693 * @dev_priv: Pointer to a device private struct.
2694 * @sw_context: The software context being used for this batch.
2695 * @header: Pointer to the command header in the command stream.
2696 */
vmw_cmd_dx_destroy_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2697 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2698 struct vmw_sw_context *sw_context,
2699 SVGA3dCmdHeader *header)
2700 {
2701 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2702 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2703 container_of(header, typeof(*cmd), header);
2704 int ret;
2705
2706 if (!ctx_node)
2707 return -EINVAL;
2708
2709 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2710 &sw_context->staged_cmd_res);
2711
2712 return ret;
2713 }
2714
2715 /**
2716 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2717 *
2718 * @dev_priv: Pointer to a device private struct.
2719 * @sw_context: The software context being used for this batch.
2720 * @header: Pointer to the command header in the command stream.
2721 */
vmw_cmd_dx_bind_shader(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2722 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2723 struct vmw_sw_context *sw_context,
2724 SVGA3dCmdHeader *header)
2725 {
2726 struct vmw_resource *ctx;
2727 struct vmw_resource *res;
2728 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2729 container_of(header, typeof(*cmd), header);
2730 int ret;
2731
2732 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2733 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2734 VMW_RES_DIRTY_SET,
2735 user_context_converter, &cmd->body.cid,
2736 &ctx);
2737 if (ret)
2738 return ret;
2739 } else {
2740 struct vmw_ctx_validation_info *ctx_node =
2741 VMW_GET_CTX_NODE(sw_context);
2742
2743 if (!ctx_node)
2744 return -EINVAL;
2745
2746 ctx = ctx_node->ctx;
2747 }
2748
2749 res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2750 if (IS_ERR(res)) {
2751 VMW_DEBUG_USER("Could not find shader to bind.\n");
2752 return PTR_ERR(res);
2753 }
2754
2755 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
2756 vmw_val_add_flag_noctx);
2757 if (ret) {
2758 VMW_DEBUG_USER("Error creating resource validation node.\n");
2759 return ret;
2760 }
2761
2762 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2763 &cmd->body.mobid,
2764 cmd->body.offsetInBytes);
2765 }
2766
2767 /**
2768 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2769 *
2770 * @dev_priv: Pointer to a device private struct.
2771 * @sw_context: The software context being used for this batch.
2772 * @header: Pointer to the command header in the command stream.
2773 */
vmw_cmd_dx_genmips(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2774 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2775 struct vmw_sw_context *sw_context,
2776 SVGA3dCmdHeader *header)
2777 {
2778 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2779 container_of(header, typeof(*cmd), header);
2780 struct vmw_resource *view;
2781 struct vmw_res_cache_entry *rcache;
2782
2783 view = vmw_view_id_val_add(sw_context, vmw_view_sr,
2784 cmd->body.shaderResourceViewId);
2785 if (IS_ERR(view))
2786 return PTR_ERR(view);
2787
2788 /*
2789 * Normally the shader-resource view is not gpu-dirtying, but for
2790 * this particular command it is...
2791 * So mark the last looked-up surface, which is the surface
2792 * the view points to, gpu-dirty.
2793 */
2794 rcache = &sw_context->res_cache[vmw_res_surface];
2795 vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
2796 VMW_RES_DIRTY_SET);
2797 return 0;
2798 }
2799
2800 /**
2801 * vmw_cmd_dx_transfer_from_buffer - Validate
2802 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2803 *
2804 * @dev_priv: Pointer to a device private struct.
2805 * @sw_context: The software context being used for this batch.
2806 * @header: Pointer to the command header in the command stream.
2807 */
vmw_cmd_dx_transfer_from_buffer(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2808 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2809 struct vmw_sw_context *sw_context,
2810 SVGA3dCmdHeader *header)
2811 {
2812 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2813 container_of(header, typeof(*cmd), header);
2814 int ret;
2815
2816 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2817 VMW_RES_DIRTY_NONE, user_surface_converter,
2818 &cmd->body.srcSid, NULL);
2819 if (ret != 0)
2820 return ret;
2821
2822 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2823 VMW_RES_DIRTY_SET, user_surface_converter,
2824 &cmd->body.destSid, NULL);
2825 }
2826
2827 /**
2828 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2829 *
2830 * @dev_priv: Pointer to a device private struct.
2831 * @sw_context: The software context being used for this batch.
2832 * @header: Pointer to the command header in the command stream.
2833 */
vmw_cmd_intra_surface_copy(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2834 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2835 struct vmw_sw_context *sw_context,
2836 SVGA3dCmdHeader *header)
2837 {
2838 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2839 container_of(header, typeof(*cmd), header);
2840
2841 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2842 return -EINVAL;
2843
2844 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2845 VMW_RES_DIRTY_SET, user_surface_converter,
2846 &cmd->body.surface.sid, NULL);
2847 }
2848
vmw_cmd_sm5(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2849 static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2850 struct vmw_sw_context *sw_context,
2851 SVGA3dCmdHeader *header)
2852 {
2853 if (!has_sm5_context(dev_priv))
2854 return -EINVAL;
2855
2856 return 0;
2857 }
2858
vmw_cmd_sm5_view_define(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2859 static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2860 struct vmw_sw_context *sw_context,
2861 SVGA3dCmdHeader *header)
2862 {
2863 if (!has_sm5_context(dev_priv))
2864 return -EINVAL;
2865
2866 return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2867 }
2868
vmw_cmd_sm5_view_remove(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2869 static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2870 struct vmw_sw_context *sw_context,
2871 SVGA3dCmdHeader *header)
2872 {
2873 if (!has_sm5_context(dev_priv))
2874 return -EINVAL;
2875
2876 return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2877 }
2878
vmw_cmd_clear_uav_uint(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2879 static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2880 struct vmw_sw_context *sw_context,
2881 SVGA3dCmdHeader *header)
2882 {
2883 struct {
2884 SVGA3dCmdHeader header;
2885 SVGA3dCmdDXClearUAViewUint body;
2886 } *cmd = container_of(header, typeof(*cmd), header);
2887 struct vmw_resource *ret;
2888
2889 if (!has_sm5_context(dev_priv))
2890 return -EINVAL;
2891
2892 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2893 cmd->body.uaViewId);
2894
2895 return PTR_ERR_OR_ZERO(ret);
2896 }
2897
vmw_cmd_clear_uav_float(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2898 static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2899 struct vmw_sw_context *sw_context,
2900 SVGA3dCmdHeader *header)
2901 {
2902 struct {
2903 SVGA3dCmdHeader header;
2904 SVGA3dCmdDXClearUAViewFloat body;
2905 } *cmd = container_of(header, typeof(*cmd), header);
2906 struct vmw_resource *ret;
2907
2908 if (!has_sm5_context(dev_priv))
2909 return -EINVAL;
2910
2911 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2912 cmd->body.uaViewId);
2913
2914 return PTR_ERR_OR_ZERO(ret);
2915 }
2916
vmw_cmd_set_uav(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2917 static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2918 struct vmw_sw_context *sw_context,
2919 SVGA3dCmdHeader *header)
2920 {
2921 struct {
2922 SVGA3dCmdHeader header;
2923 SVGA3dCmdDXSetUAViews body;
2924 } *cmd = container_of(header, typeof(*cmd), header);
2925 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2926 sizeof(SVGA3dUAViewId);
2927 int ret;
2928
2929 if (!has_sm5_context(dev_priv))
2930 return -EINVAL;
2931
2932 if (num_uav > vmw_max_num_uavs(dev_priv)) {
2933 VMW_DEBUG_USER("Invalid UAV binding.\n");
2934 return -EINVAL;
2935 }
2936
2937 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2938 vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2939 num_uav, 0);
2940 if (ret)
2941 return ret;
2942
2943 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2944 cmd->body.uavSpliceIndex);
2945
2946 return ret;
2947 }
2948
vmw_cmd_set_cs_uav(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2949 static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2950 struct vmw_sw_context *sw_context,
2951 SVGA3dCmdHeader *header)
2952 {
2953 struct {
2954 SVGA3dCmdHeader header;
2955 SVGA3dCmdDXSetCSUAViews body;
2956 } *cmd = container_of(header, typeof(*cmd), header);
2957 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2958 sizeof(SVGA3dUAViewId);
2959 int ret;
2960
2961 if (!has_sm5_context(dev_priv))
2962 return -EINVAL;
2963
2964 if (num_uav > vmw_max_num_uavs(dev_priv)) {
2965 VMW_DEBUG_USER("Invalid UAV binding.\n");
2966 return -EINVAL;
2967 }
2968
2969 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2970 vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2971 num_uav, 0);
2972 if (ret)
2973 return ret;
2974
2975 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
2976 cmd->body.startIndex);
2977
2978 return ret;
2979 }
2980
vmw_cmd_dx_define_streamoutput(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)2981 static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
2982 struct vmw_sw_context *sw_context,
2983 SVGA3dCmdHeader *header)
2984 {
2985 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
2986 struct vmw_resource *res;
2987 struct {
2988 SVGA3dCmdHeader header;
2989 SVGA3dCmdDXDefineStreamOutputWithMob body;
2990 } *cmd = container_of(header, typeof(*cmd), header);
2991 int ret;
2992
2993 if (!has_sm5_context(dev_priv))
2994 return -EINVAL;
2995
2996 if (!ctx_node) {
2997 DRM_ERROR("DX Context not set.\n");
2998 return -EINVAL;
2999 }
3000
3001 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
3002 if (IS_ERR_OR_NULL(res))
3003 return res ? PTR_ERR(res) : -EINVAL;
3004 ret = vmw_cotable_notify(res, cmd->body.soid);
3005 if (ret)
3006 return ret;
3007
3008 return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
3009 cmd->body.soid,
3010 &sw_context->staged_cmd_res);
3011 }
3012
vmw_cmd_dx_destroy_streamoutput(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3013 static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
3014 struct vmw_sw_context *sw_context,
3015 SVGA3dCmdHeader *header)
3016 {
3017 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3018 struct vmw_resource *res;
3019 struct {
3020 SVGA3dCmdHeader header;
3021 SVGA3dCmdDXDestroyStreamOutput body;
3022 } *cmd = container_of(header, typeof(*cmd), header);
3023
3024 if (!ctx_node) {
3025 DRM_ERROR("DX Context not set.\n");
3026 return -EINVAL;
3027 }
3028
3029 /*
3030 * When device does not support SM5 then streamoutput with mob command is
3031 * not available to user-space. Simply return in this case.
3032 */
3033 if (!has_sm5_context(dev_priv))
3034 return 0;
3035
3036 /*
3037 * With SM5 capable device if lookup fails then user-space probably used
3038 * old streamoutput define command. Return without an error.
3039 */
3040 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3041 cmd->body.soid);
3042 if (IS_ERR(res))
3043 return 0;
3044
3045 return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3046 &sw_context->staged_cmd_res);
3047 }
3048
vmw_cmd_dx_bind_streamoutput(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3049 static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3050 struct vmw_sw_context *sw_context,
3051 SVGA3dCmdHeader *header)
3052 {
3053 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3054 struct vmw_resource *res;
3055 struct {
3056 SVGA3dCmdHeader header;
3057 SVGA3dCmdDXBindStreamOutput body;
3058 } *cmd = container_of(header, typeof(*cmd), header);
3059 int ret;
3060
3061 if (!has_sm5_context(dev_priv))
3062 return -EINVAL;
3063
3064 if (!ctx_node) {
3065 DRM_ERROR("DX Context not set.\n");
3066 return -EINVAL;
3067 }
3068
3069 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3070 cmd->body.soid);
3071 if (IS_ERR(res)) {
3072 DRM_ERROR("Could not find streamoutput to bind.\n");
3073 return PTR_ERR(res);
3074 }
3075
3076 vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3077
3078 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3079 vmw_val_add_flag_noctx);
3080 if (ret) {
3081 DRM_ERROR("Error creating resource validation node.\n");
3082 return ret;
3083 }
3084
3085 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3086 &cmd->body.mobid,
3087 cmd->body.offsetInBytes);
3088 }
3089
vmw_cmd_dx_set_streamoutput(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3090 static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3091 struct vmw_sw_context *sw_context,
3092 SVGA3dCmdHeader *header)
3093 {
3094 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3095 struct vmw_resource *res;
3096 struct vmw_ctx_bindinfo_so binding;
3097 struct {
3098 SVGA3dCmdHeader header;
3099 SVGA3dCmdDXSetStreamOutput body;
3100 } *cmd = container_of(header, typeof(*cmd), header);
3101 int ret;
3102
3103 if (!ctx_node) {
3104 DRM_ERROR("DX Context not set.\n");
3105 return -EINVAL;
3106 }
3107
3108 if (cmd->body.soid == SVGA3D_INVALID_ID)
3109 return 0;
3110
3111 /*
3112 * When device does not support SM5 then streamoutput with mob command is
3113 * not available to user-space. Simply return in this case.
3114 */
3115 if (!has_sm5_context(dev_priv))
3116 return 0;
3117
3118 /*
3119 * With SM5 capable device if lookup fails then user-space probably used
3120 * old streamoutput define command. Return without an error.
3121 */
3122 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3123 cmd->body.soid);
3124 if (IS_ERR(res)) {
3125 return 0;
3126 }
3127
3128 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3129 vmw_val_add_flag_noctx);
3130 if (ret) {
3131 DRM_ERROR("Error creating resource validation node.\n");
3132 return ret;
3133 }
3134
3135 binding.bi.ctx = ctx_node->ctx;
3136 binding.bi.res = res;
3137 binding.bi.bt = vmw_ctx_binding_so;
3138 binding.slot = 0; /* Only one SO set to context at a time. */
3139
3140 vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3141 binding.slot);
3142
3143 return ret;
3144 }
3145
vmw_cmd_indexed_instanced_indirect(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3146 static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3147 struct vmw_sw_context *sw_context,
3148 SVGA3dCmdHeader *header)
3149 {
3150 struct vmw_draw_indexed_instanced_indirect_cmd {
3151 SVGA3dCmdHeader header;
3152 SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3153 } *cmd = container_of(header, typeof(*cmd), header);
3154
3155 if (!has_sm5_context(dev_priv))
3156 return -EINVAL;
3157
3158 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3159 VMW_RES_DIRTY_NONE, user_surface_converter,
3160 &cmd->body.argsBufferSid, NULL);
3161 }
3162
vmw_cmd_instanced_indirect(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3163 static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3164 struct vmw_sw_context *sw_context,
3165 SVGA3dCmdHeader *header)
3166 {
3167 struct vmw_draw_instanced_indirect_cmd {
3168 SVGA3dCmdHeader header;
3169 SVGA3dCmdDXDrawInstancedIndirect body;
3170 } *cmd = container_of(header, typeof(*cmd), header);
3171
3172 if (!has_sm5_context(dev_priv))
3173 return -EINVAL;
3174
3175 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3176 VMW_RES_DIRTY_NONE, user_surface_converter,
3177 &cmd->body.argsBufferSid, NULL);
3178 }
3179
vmw_cmd_dispatch_indirect(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,SVGA3dCmdHeader * header)3180 static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3181 struct vmw_sw_context *sw_context,
3182 SVGA3dCmdHeader *header)
3183 {
3184 struct vmw_dispatch_indirect_cmd {
3185 SVGA3dCmdHeader header;
3186 SVGA3dCmdDXDispatchIndirect body;
3187 } *cmd = container_of(header, typeof(*cmd), header);
3188
3189 if (!has_sm5_context(dev_priv))
3190 return -EINVAL;
3191
3192 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3193 VMW_RES_DIRTY_NONE, user_surface_converter,
3194 &cmd->body.argsBufferSid, NULL);
3195 }
3196
vmw_cmd_check_not_3d(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t * size)3197 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3198 struct vmw_sw_context *sw_context,
3199 void *buf, uint32_t *size)
3200 {
3201 uint32_t size_remaining = *size;
3202 uint32_t cmd_id;
3203
3204 cmd_id = ((uint32_t *)buf)[0];
3205 switch (cmd_id) {
3206 case SVGA_CMD_UPDATE:
3207 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3208 break;
3209 case SVGA_CMD_DEFINE_GMRFB:
3210 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3211 break;
3212 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3213 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3214 break;
3215 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3216 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3217 break;
3218 default:
3219 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
3220 return -EINVAL;
3221 }
3222
3223 if (*size > size_remaining) {
3224 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3225 cmd_id);
3226 return -EINVAL;
3227 }
3228
3229 if (unlikely(!sw_context->kernel)) {
3230 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
3231 return -EPERM;
3232 }
3233
3234 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3235 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3236
3237 return 0;
3238 }
3239
3240 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3241 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3242 false, false, false),
3243 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3244 false, false, false),
3245 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3246 true, false, false),
3247 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3248 true, false, false),
3249 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3250 true, false, false),
3251 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3252 false, false, false),
3253 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3254 false, false, false),
3255 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3256 true, false, false),
3257 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3258 true, false, false),
3259 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3260 true, false, false),
3261 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3262 &vmw_cmd_set_render_target_check, true, false, false),
3263 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3264 true, false, false),
3265 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3266 true, false, false),
3267 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3268 true, false, false),
3269 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3270 true, false, false),
3271 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3272 true, false, false),
3273 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3274 true, false, false),
3275 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3276 true, false, false),
3277 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3278 false, false, false),
3279 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3280 true, false, false),
3281 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3282 true, false, false),
3283 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3284 true, false, false),
3285 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3286 true, false, false),
3287 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3288 true, false, false),
3289 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3290 true, false, false),
3291 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3292 true, false, false),
3293 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3294 true, false, false),
3295 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3296 true, false, false),
3297 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3298 true, false, false),
3299 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3300 &vmw_cmd_blt_surf_screen_check, false, false, false),
3301 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3302 false, false, false),
3303 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3304 false, false, false),
3305 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3306 false, false, false),
3307 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3308 false, false, false),
3309 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3310 false, false, false),
3311 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3312 false, false, false),
3313 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3314 false, false, false),
3315 VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3316 VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3317 VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3318 VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3319 VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3320 VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
3321 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3322 false, false, true),
3323 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3324 false, false, true),
3325 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3326 false, false, true),
3327 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3328 false, false, true),
3329 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3330 false, false, true),
3331 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3332 false, false, true),
3333 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3334 false, false, true),
3335 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3336 false, false, true),
3337 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3338 true, false, true),
3339 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3340 false, false, true),
3341 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3342 true, false, true),
3343 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3344 &vmw_cmd_update_gb_surface, true, false, true),
3345 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3346 &vmw_cmd_readback_gb_image, true, false, true),
3347 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3348 &vmw_cmd_readback_gb_surface, true, false, true),
3349 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3350 &vmw_cmd_invalidate_gb_image, true, false, true),
3351 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3352 &vmw_cmd_invalidate_gb_surface, true, false, true),
3353 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3354 false, false, true),
3355 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3356 false, false, true),
3357 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3358 false, false, true),
3359 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3360 false, false, true),
3361 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3362 false, false, true),
3363 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3364 false, false, true),
3365 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3366 true, false, true),
3367 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3368 false, false, true),
3369 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3370 false, false, false),
3371 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3372 true, false, true),
3373 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3374 true, false, true),
3375 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3376 true, false, true),
3377 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3378 true, false, true),
3379 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3380 true, false, true),
3381 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3382 false, false, true),
3383 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3384 false, false, true),
3385 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3386 false, false, true),
3387 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3388 false, false, true),
3389 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3390 false, false, true),
3391 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3392 false, false, true),
3393 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3394 false, false, true),
3395 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3396 false, false, true),
3397 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3398 false, false, true),
3399 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3400 false, false, true),
3401 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3402 true, false, true),
3403 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3404 false, false, true),
3405 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3406 false, false, true),
3407 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3408 false, false, true),
3409 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3410 false, false, true),
3411
3412 /* SM commands */
3413 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3414 false, false, true),
3415 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3416 false, false, true),
3417 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3418 false, false, true),
3419 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3420 false, false, true),
3421 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3422 false, false, true),
3423 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3424 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3425 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3426 &vmw_cmd_dx_set_shader_res, true, false, true),
3427 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3428 true, false, true),
3429 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3430 true, false, true),
3431 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3432 true, false, true),
3433 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3434 true, false, true),
3435 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3436 true, false, true),
3437 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3438 &vmw_cmd_dx_cid_check, true, false, true),
3439 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3440 true, false, true),
3441 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3442 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3443 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3444 &vmw_cmd_dx_set_index_buffer, true, false, true),
3445 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3446 &vmw_cmd_dx_set_rendertargets, true, false, true),
3447 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3448 true, false, true),
3449 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3450 &vmw_cmd_dx_cid_check, true, false, true),
3451 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3452 &vmw_cmd_dx_cid_check, true, false, true),
3453 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3454 true, false, true),
3455 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3456 true, false, true),
3457 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3458 true, false, true),
3459 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3460 &vmw_cmd_dx_cid_check, true, false, true),
3461 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3462 true, false, true),
3463 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3464 true, false, true),
3465 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3466 true, false, true),
3467 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3468 true, false, true),
3469 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3470 true, false, true),
3471 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3472 true, false, true),
3473 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3474 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3475 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3476 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3477 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3478 true, false, true),
3479 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3480 true, false, true),
3481 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3482 &vmw_cmd_dx_check_subresource, true, false, true),
3483 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3484 &vmw_cmd_dx_check_subresource, true, false, true),
3485 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3486 &vmw_cmd_dx_check_subresource, true, false, true),
3487 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3488 &vmw_cmd_dx_view_define, true, false, true),
3489 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3490 &vmw_cmd_dx_view_remove, true, false, true),
3491 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3492 &vmw_cmd_dx_view_define, true, false, true),
3493 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3494 &vmw_cmd_dx_view_remove, true, false, true),
3495 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3496 &vmw_cmd_dx_view_define, true, false, true),
3497 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3498 &vmw_cmd_dx_view_remove, true, false, true),
3499 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3500 &vmw_cmd_dx_so_define, true, false, true),
3501 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3502 &vmw_cmd_dx_cid_check, true, false, true),
3503 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3504 &vmw_cmd_dx_so_define, true, false, true),
3505 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3506 &vmw_cmd_dx_cid_check, true, false, true),
3507 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3508 &vmw_cmd_dx_so_define, true, false, true),
3509 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3510 &vmw_cmd_dx_cid_check, true, false, true),
3511 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3512 &vmw_cmd_dx_so_define, true, false, true),
3513 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3514 &vmw_cmd_dx_cid_check, true, false, true),
3515 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3516 &vmw_cmd_dx_so_define, true, false, true),
3517 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3518 &vmw_cmd_dx_cid_check, true, false, true),
3519 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3520 &vmw_cmd_dx_define_shader, true, false, true),
3521 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3522 &vmw_cmd_dx_destroy_shader, true, false, true),
3523 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3524 &vmw_cmd_dx_bind_shader, true, false, true),
3525 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3526 &vmw_cmd_dx_so_define, true, false, true),
3527 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3528 &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3529 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3530 &vmw_cmd_dx_set_streamoutput, true, false, true),
3531 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3532 &vmw_cmd_dx_set_so_targets, true, false, true),
3533 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3534 &vmw_cmd_dx_cid_check, true, false, true),
3535 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3536 &vmw_cmd_dx_cid_check, true, false, true),
3537 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3538 &vmw_cmd_buffer_copy_check, true, false, true),
3539 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3540 &vmw_cmd_pred_copy_check, true, false, true),
3541 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3542 &vmw_cmd_dx_transfer_from_buffer,
3543 true, false, true),
3544 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET,
3545 &vmw_cmd_dx_set_constant_buffer_offset,
3546 true, false, true),
3547 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET,
3548 &vmw_cmd_dx_set_constant_buffer_offset,
3549 true, false, true),
3550 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET,
3551 &vmw_cmd_dx_set_constant_buffer_offset,
3552 true, false, true),
3553 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET,
3554 &vmw_cmd_dx_set_constant_buffer_offset,
3555 true, false, true),
3556 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET,
3557 &vmw_cmd_dx_set_constant_buffer_offset,
3558 true, false, true),
3559 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET,
3560 &vmw_cmd_dx_set_constant_buffer_offset,
3561 true, false, true),
3562 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3563 true, false, true),
3564
3565 /*
3566 * SM5 commands
3567 */
3568 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3569 true, false, true),
3570 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3571 true, false, true),
3572 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3573 true, false, true),
3574 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3575 &vmw_cmd_clear_uav_float, true, false, true),
3576 VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3577 false, true),
3578 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3579 true),
3580 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3581 &vmw_cmd_indexed_instanced_indirect, true, false, true),
3582 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3583 &vmw_cmd_instanced_indirect, true, false, true),
3584 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3585 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3586 &vmw_cmd_dispatch_indirect, true, false, true),
3587 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3588 false, true),
3589 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3590 &vmw_cmd_sm5_view_define, true, false, true),
3591 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3592 &vmw_cmd_dx_define_streamoutput, true, false, true),
3593 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3594 &vmw_cmd_dx_bind_streamoutput, true, false, true),
3595 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2,
3596 &vmw_cmd_dx_so_define, true, false, true),
3597 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V4,
3598 &vmw_cmd_invalid, false, false, true),
3599 };
3600
vmw_cmd_describe(const void * buf,u32 * size,char const ** cmd)3601 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3602 {
3603 u32 cmd_id = ((u32 *) buf)[0];
3604
3605 if (cmd_id >= SVGA_CMD_MAX) {
3606 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3607 const struct vmw_cmd_entry *entry;
3608
3609 *size = header->size + sizeof(SVGA3dCmdHeader);
3610 cmd_id = header->id;
3611 if (cmd_id >= SVGA_3D_CMD_MAX)
3612 return false;
3613
3614 cmd_id -= SVGA_3D_CMD_BASE;
3615 entry = &vmw_cmd_entries[cmd_id];
3616 *cmd = entry->cmd_name;
3617 return true;
3618 }
3619
3620 switch (cmd_id) {
3621 case SVGA_CMD_UPDATE:
3622 *cmd = "SVGA_CMD_UPDATE";
3623 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3624 break;
3625 case SVGA_CMD_DEFINE_GMRFB:
3626 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3627 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3628 break;
3629 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3630 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3631 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3632 break;
3633 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3634 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3635 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3636 break;
3637 default:
3638 *cmd = "UNKNOWN";
3639 *size = 0;
3640 return false;
3641 }
3642
3643 return true;
3644 }
3645
vmw_cmd_check(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t * size)3646 static int vmw_cmd_check(struct vmw_private *dev_priv,
3647 struct vmw_sw_context *sw_context, void *buf,
3648 uint32_t *size)
3649 {
3650 uint32_t cmd_id;
3651 uint32_t size_remaining = *size;
3652 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3653 int ret;
3654 const struct vmw_cmd_entry *entry;
3655 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3656
3657 cmd_id = ((uint32_t *)buf)[0];
3658 /* Handle any none 3D commands */
3659 if (unlikely(cmd_id < SVGA_CMD_MAX))
3660 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3661
3662
3663 cmd_id = header->id;
3664 *size = header->size + sizeof(SVGA3dCmdHeader);
3665
3666 cmd_id -= SVGA_3D_CMD_BASE;
3667 if (unlikely(*size > size_remaining))
3668 goto out_invalid;
3669
3670 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3671 goto out_invalid;
3672
3673 entry = &vmw_cmd_entries[cmd_id];
3674 if (unlikely(!entry->func))
3675 goto out_invalid;
3676
3677 if (unlikely(!entry->user_allow && !sw_context->kernel))
3678 goto out_privileged;
3679
3680 if (unlikely(entry->gb_disable && gb))
3681 goto out_old;
3682
3683 if (unlikely(entry->gb_enable && !gb))
3684 goto out_new;
3685
3686 ret = entry->func(dev_priv, sw_context, header);
3687 if (unlikely(ret != 0)) {
3688 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3689 cmd_id + SVGA_3D_CMD_BASE, ret);
3690 return ret;
3691 }
3692
3693 return 0;
3694 out_invalid:
3695 VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3696 cmd_id + SVGA_3D_CMD_BASE);
3697 return -EINVAL;
3698 out_privileged:
3699 VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3700 cmd_id + SVGA_3D_CMD_BASE);
3701 return -EPERM;
3702 out_old:
3703 VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3704 cmd_id + SVGA_3D_CMD_BASE);
3705 return -EINVAL;
3706 out_new:
3707 VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3708 cmd_id + SVGA_3D_CMD_BASE);
3709 return -EINVAL;
3710 }
3711
vmw_cmd_check_all(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,void * buf,uint32_t size)3712 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3713 struct vmw_sw_context *sw_context, void *buf,
3714 uint32_t size)
3715 {
3716 int32_t cur_size = size;
3717 int ret;
3718
3719 sw_context->buf_start = buf;
3720
3721 while (cur_size > 0) {
3722 size = cur_size;
3723 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3724 if (unlikely(ret != 0))
3725 return ret;
3726 buf = (void *)((unsigned long) buf + size);
3727 cur_size -= size;
3728 }
3729
3730 if (unlikely(cur_size != 0)) {
3731 VMW_DEBUG_USER("Command verifier out of sync.\n");
3732 return -EINVAL;
3733 }
3734
3735 return 0;
3736 }
3737
vmw_free_relocations(struct vmw_sw_context * sw_context)3738 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3739 {
3740 /* Memory is validation context memory, so no need to free it */
3741 INIT_LIST_HEAD(&sw_context->bo_relocations);
3742 }
3743
vmw_apply_relocations(struct vmw_sw_context * sw_context)3744 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3745 {
3746 struct vmw_relocation *reloc;
3747 struct ttm_buffer_object *bo;
3748
3749 list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3750 bo = &reloc->vbo->tbo;
3751 switch (bo->resource->mem_type) {
3752 case TTM_PL_VRAM:
3753 reloc->location->offset += bo->resource->start << PAGE_SHIFT;
3754 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3755 break;
3756 case VMW_PL_GMR:
3757 reloc->location->gmrId = bo->resource->start;
3758 break;
3759 case VMW_PL_MOB:
3760 *reloc->mob_loc = bo->resource->start;
3761 break;
3762 default:
3763 BUG();
3764 }
3765 }
3766 vmw_free_relocations(sw_context);
3767 }
3768
vmw_resize_cmd_bounce(struct vmw_sw_context * sw_context,uint32_t size)3769 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3770 uint32_t size)
3771 {
3772 if (likely(sw_context->cmd_bounce_size >= size))
3773 return 0;
3774
3775 if (sw_context->cmd_bounce_size == 0)
3776 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3777
3778 while (sw_context->cmd_bounce_size < size) {
3779 sw_context->cmd_bounce_size =
3780 PAGE_ALIGN(sw_context->cmd_bounce_size +
3781 (sw_context->cmd_bounce_size >> 1));
3782 }
3783
3784 vfree(sw_context->cmd_bounce);
3785 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3786
3787 if (sw_context->cmd_bounce == NULL) {
3788 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3789 sw_context->cmd_bounce_size = 0;
3790 return -ENOMEM;
3791 }
3792
3793 return 0;
3794 }
3795
3796 /*
3797 * vmw_execbuf_fence_commands - create and submit a command stream fence
3798 *
3799 * Creates a fence object and submits a command stream marker.
3800 * If this fails for some reason, We sync the fifo and return NULL.
3801 * It is then safe to fence buffers with a NULL pointer.
3802 *
3803 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3804 * userspace handle if @p_handle is not NULL, otherwise not.
3805 */
3806
vmw_execbuf_fence_commands(struct drm_file * file_priv,struct vmw_private * dev_priv,struct vmw_fence_obj ** p_fence,uint32_t * p_handle)3807 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3808 struct vmw_private *dev_priv,
3809 struct vmw_fence_obj **p_fence,
3810 uint32_t *p_handle)
3811 {
3812 uint32_t sequence;
3813 int ret;
3814 bool synced = false;
3815
3816 /* p_handle implies file_priv. */
3817 BUG_ON(p_handle != NULL && file_priv == NULL);
3818
3819 ret = vmw_cmd_send_fence(dev_priv, &sequence);
3820 if (unlikely(ret != 0)) {
3821 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3822 synced = true;
3823 }
3824
3825 if (p_handle != NULL)
3826 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3827 sequence, p_fence, p_handle);
3828 else
3829 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3830
3831 if (unlikely(ret != 0 && !synced)) {
3832 (void) vmw_fallback_wait(dev_priv, false, false, sequence,
3833 false, VMW_FENCE_WAIT_TIMEOUT);
3834 *p_fence = NULL;
3835 }
3836
3837 return ret;
3838 }
3839
3840 /**
3841 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3842 *
3843 * @dev_priv: Pointer to a vmw_private struct.
3844 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3845 * @ret: Return value from fence object creation.
3846 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3847 * the information should be copied.
3848 * @fence: Pointer to the fenc object.
3849 * @fence_handle: User-space fence handle.
3850 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3851 *
3852 * This function copies fence information to user-space. If copying fails, the
3853 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3854 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3855 * will hopefully be detected.
3856 *
3857 * Also if copying fails, user-space will be unable to signal the fence object
3858 * so we wait for it immediately, and then unreference the user-space reference.
3859 */
3860 int
vmw_execbuf_copy_fence_user(struct vmw_private * dev_priv,struct vmw_fpriv * vmw_fp,int ret,struct drm_vmw_fence_rep __user * user_fence_rep,struct vmw_fence_obj * fence,uint32_t fence_handle,int32_t out_fence_fd)3861 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3862 struct vmw_fpriv *vmw_fp, int ret,
3863 struct drm_vmw_fence_rep __user *user_fence_rep,
3864 struct vmw_fence_obj *fence, uint32_t fence_handle,
3865 int32_t out_fence_fd)
3866 {
3867 struct drm_vmw_fence_rep fence_rep;
3868
3869 if (user_fence_rep == NULL)
3870 return 0;
3871
3872 memset(&fence_rep, 0, sizeof(fence_rep));
3873
3874 fence_rep.error = ret;
3875 fence_rep.fd = out_fence_fd;
3876 if (ret == 0) {
3877 BUG_ON(fence == NULL);
3878
3879 fence_rep.handle = fence_handle;
3880 fence_rep.seqno = fence->base.seqno;
3881 fence_rep.passed_seqno = vmw_fences_update(dev_priv->fman);
3882 }
3883
3884 /*
3885 * copy_to_user errors will be detected by user space not seeing
3886 * fence_rep::error filled in. Typically user-space would have pre-set
3887 * that member to -EFAULT.
3888 */
3889 ret = copy_to_user(user_fence_rep, &fence_rep,
3890 sizeof(fence_rep));
3891
3892 /*
3893 * User-space lost the fence object. We need to sync and unreference the
3894 * handle.
3895 */
3896 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3897 ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle);
3898 VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3899 (void) vmw_fence_obj_wait(fence, false, false,
3900 VMW_FENCE_WAIT_TIMEOUT);
3901 }
3902
3903 return ret ? -EFAULT : 0;
3904 }
3905
3906 /**
3907 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3908 *
3909 * @dev_priv: Pointer to a device private structure.
3910 * @kernel_commands: Pointer to the unpatched command batch.
3911 * @command_size: Size of the unpatched command batch.
3912 * @sw_context: Structure holding the relocation lists.
3913 *
3914 * Side effects: If this function returns 0, then the command batch pointed to
3915 * by @kernel_commands will have been modified.
3916 */
vmw_execbuf_submit_fifo(struct vmw_private * dev_priv,void * kernel_commands,u32 command_size,struct vmw_sw_context * sw_context)3917 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3918 void *kernel_commands, u32 command_size,
3919 struct vmw_sw_context *sw_context)
3920 {
3921 void *cmd;
3922
3923 if (sw_context->dx_ctx_node)
3924 cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
3925 sw_context->dx_ctx_node->ctx->id);
3926 else
3927 cmd = VMW_CMD_RESERVE(dev_priv, command_size);
3928
3929 if (!cmd)
3930 return -ENOMEM;
3931
3932 vmw_apply_relocations(sw_context);
3933 memcpy(cmd, kernel_commands, command_size);
3934 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3935 vmw_resource_relocations_free(&sw_context->res_relocations);
3936 vmw_cmd_commit(dev_priv, command_size);
3937
3938 return 0;
3939 }
3940
3941 /**
3942 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3943 * command buffer manager.
3944 *
3945 * @dev_priv: Pointer to a device private structure.
3946 * @header: Opaque handle to the command buffer allocation.
3947 * @command_size: Size of the unpatched command batch.
3948 * @sw_context: Structure holding the relocation lists.
3949 *
3950 * Side effects: If this function returns 0, then the command buffer represented
3951 * by @header will have been modified.
3952 */
vmw_execbuf_submit_cmdbuf(struct vmw_private * dev_priv,struct vmw_cmdbuf_header * header,u32 command_size,struct vmw_sw_context * sw_context)3953 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3954 struct vmw_cmdbuf_header *header,
3955 u32 command_size,
3956 struct vmw_sw_context *sw_context)
3957 {
3958 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3959 SVGA3D_INVALID_ID);
3960 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3961 header);
3962
3963 vmw_apply_relocations(sw_context);
3964 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3965 vmw_resource_relocations_free(&sw_context->res_relocations);
3966 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3967
3968 return 0;
3969 }
3970
3971 /**
3972 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3973 * submission using a command buffer.
3974 *
3975 * @dev_priv: Pointer to a device private structure.
3976 * @user_commands: User-space pointer to the commands to be submitted.
3977 * @command_size: Size of the unpatched command batch.
3978 * @header: Out parameter returning the opaque pointer to the command buffer.
3979 *
3980 * This function checks whether we can use the command buffer manager for
3981 * submission and if so, creates a command buffer of suitable size and copies
3982 * the user data into that buffer.
3983 *
3984 * On successful return, the function returns a pointer to the data in the
3985 * command buffer and *@header is set to non-NULL.
3986 *
3987 * @kernel_commands: If command buffers could not be used, the function will
3988 * return the value of @kernel_commands on function call. That value may be
3989 * NULL. In that case, the value of *@header will be set to NULL.
3990 *
3991 * If an error is encountered, the function will return a pointer error value.
3992 * If the function is interrupted by a signal while sleeping, it will return
3993 * -ERESTARTSYS casted to a pointer error value.
3994 */
vmw_execbuf_cmdbuf(struct vmw_private * dev_priv,void __user * user_commands,void * kernel_commands,u32 command_size,struct vmw_cmdbuf_header ** header)3995 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
3996 void __user *user_commands,
3997 void *kernel_commands, u32 command_size,
3998 struct vmw_cmdbuf_header **header)
3999 {
4000 size_t cmdbuf_size;
4001 int ret;
4002
4003 *header = NULL;
4004 if (command_size > SVGA_CB_MAX_SIZE) {
4005 VMW_DEBUG_USER("Command buffer is too large.\n");
4006 return ERR_PTR(-EINVAL);
4007 }
4008
4009 if (!dev_priv->cman || kernel_commands)
4010 return kernel_commands;
4011
4012 /* If possible, add a little space for fencing. */
4013 cmdbuf_size = command_size + 512;
4014 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
4015 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
4016 header);
4017 if (IS_ERR(kernel_commands))
4018 return kernel_commands;
4019
4020 ret = copy_from_user(kernel_commands, user_commands, command_size);
4021 if (ret) {
4022 VMW_DEBUG_USER("Failed copying commands.\n");
4023 vmw_cmdbuf_header_free(*header);
4024 *header = NULL;
4025 return ERR_PTR(-EFAULT);
4026 }
4027
4028 return kernel_commands;
4029 }
4030
vmw_execbuf_tie_context(struct vmw_private * dev_priv,struct vmw_sw_context * sw_context,uint32_t handle)4031 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4032 struct vmw_sw_context *sw_context,
4033 uint32_t handle)
4034 {
4035 struct vmw_resource *res;
4036 int ret;
4037 unsigned int size;
4038
4039 if (handle == SVGA3D_INVALID_ID)
4040 return 0;
4041
4042 size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
4043 ret = vmw_validation_preload_res(sw_context->ctx, size);
4044 if (ret)
4045 return ret;
4046
4047 ret = vmw_user_resource_lookup_handle
4048 (dev_priv, sw_context->fp->tfile, handle,
4049 user_context_converter, &res);
4050 if (ret != 0) {
4051 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4052 (unsigned int) handle);
4053 return ret;
4054 }
4055
4056 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET,
4057 vmw_val_add_flag_none);
4058 if (unlikely(ret != 0)) {
4059 vmw_resource_unreference(&res);
4060 return ret;
4061 }
4062
4063 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
4064 sw_context->man = vmw_context_res_man(res);
4065
4066 vmw_resource_unreference(&res);
4067 return 0;
4068 }
4069
vmw_execbuf_process(struct drm_file * file_priv,struct vmw_private * dev_priv,void __user * user_commands,void * kernel_commands,uint32_t command_size,uint64_t throttle_us,uint32_t dx_context_handle,struct drm_vmw_fence_rep __user * user_fence_rep,struct vmw_fence_obj ** out_fence,uint32_t flags)4070 int vmw_execbuf_process(struct drm_file *file_priv,
4071 struct vmw_private *dev_priv,
4072 void __user *user_commands, void *kernel_commands,
4073 uint32_t command_size, uint64_t throttle_us,
4074 uint32_t dx_context_handle,
4075 struct drm_vmw_fence_rep __user *user_fence_rep,
4076 struct vmw_fence_obj **out_fence, uint32_t flags)
4077 {
4078 struct vmw_sw_context *sw_context = &dev_priv->ctx;
4079 struct vmw_fence_obj *fence = NULL;
4080 struct vmw_cmdbuf_header *header;
4081 uint32_t handle = 0;
4082 int ret;
4083 int32_t out_fence_fd = -1;
4084 struct sync_file *sync_file = NULL;
4085 DECLARE_VAL_CONTEXT(val_ctx, sw_context, 1);
4086
4087 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4088 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4089 if (out_fence_fd < 0) {
4090 VMW_DEBUG_USER("Failed to get a fence fd.\n");
4091 return out_fence_fd;
4092 }
4093 }
4094
4095 if (throttle_us) {
4096 VMW_DEBUG_USER("Throttling is no longer supported.\n");
4097 }
4098
4099 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4100 kernel_commands, command_size,
4101 &header);
4102 if (IS_ERR(kernel_commands)) {
4103 ret = PTR_ERR(kernel_commands);
4104 goto out_free_fence_fd;
4105 }
4106
4107 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4108 if (ret) {
4109 ret = -ERESTARTSYS;
4110 goto out_free_header;
4111 }
4112
4113 sw_context->kernel = false;
4114 if (kernel_commands == NULL) {
4115 ret = vmw_resize_cmd_bounce(sw_context, command_size);
4116 if (unlikely(ret != 0))
4117 goto out_unlock;
4118
4119 ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4120 command_size);
4121 if (unlikely(ret != 0)) {
4122 ret = -EFAULT;
4123 VMW_DEBUG_USER("Failed copying commands.\n");
4124 goto out_unlock;
4125 }
4126
4127 kernel_commands = sw_context->cmd_bounce;
4128 } else if (!header) {
4129 sw_context->kernel = true;
4130 }
4131
4132 sw_context->filp = file_priv;
4133 sw_context->fp = vmw_fpriv(file_priv);
4134 INIT_LIST_HEAD(&sw_context->ctx_list);
4135 sw_context->cur_query_bo = dev_priv->pinned_bo;
4136 sw_context->last_query_ctx = NULL;
4137 sw_context->needs_post_query_barrier = false;
4138 sw_context->dx_ctx_node = NULL;
4139 sw_context->dx_query_mob = NULL;
4140 sw_context->dx_query_ctx = NULL;
4141 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4142 INIT_LIST_HEAD(&sw_context->res_relocations);
4143 INIT_LIST_HEAD(&sw_context->bo_relocations);
4144
4145 if (sw_context->staged_bindings)
4146 vmw_binding_state_reset(sw_context->staged_bindings);
4147
4148 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4149 sw_context->ctx = &val_ctx;
4150 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4151 if (unlikely(ret != 0))
4152 goto out_err_nores;
4153
4154 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4155 command_size);
4156 if (unlikely(ret != 0))
4157 goto out_err_nores;
4158
4159 ret = vmw_resources_reserve(sw_context);
4160 if (unlikely(ret != 0))
4161 goto out_err_nores;
4162
4163 ret = vmw_validation_bo_reserve(&val_ctx, true);
4164 if (unlikely(ret != 0))
4165 goto out_err_nores;
4166
4167 ret = vmw_validation_bo_validate(&val_ctx, true);
4168 if (unlikely(ret != 0))
4169 goto out_err;
4170
4171 ret = vmw_validation_res_validate(&val_ctx, true);
4172 if (unlikely(ret != 0))
4173 goto out_err;
4174
4175 vmw_validation_drop_ht(&val_ctx);
4176
4177 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4178 if (unlikely(ret != 0)) {
4179 ret = -ERESTARTSYS;
4180 goto out_err;
4181 }
4182
4183 if (dev_priv->has_mob) {
4184 ret = vmw_rebind_contexts(sw_context);
4185 if (unlikely(ret != 0))
4186 goto out_unlock_binding;
4187 }
4188
4189 if (!header) {
4190 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4191 command_size, sw_context);
4192 } else {
4193 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4194 sw_context);
4195 header = NULL;
4196 }
4197 mutex_unlock(&dev_priv->binding_mutex);
4198 if (ret)
4199 goto out_err;
4200
4201 vmw_query_bo_switch_commit(dev_priv, sw_context);
4202 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
4203 (user_fence_rep) ? &handle : NULL);
4204 /*
4205 * This error is harmless, because if fence submission fails,
4206 * vmw_fifo_send_fence will sync. The error will be propagated to
4207 * user-space in @fence_rep
4208 */
4209 if (ret != 0)
4210 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
4211
4212 vmw_execbuf_bindings_commit(sw_context, false);
4213 vmw_bind_dx_query_mob(sw_context);
4214 vmw_validation_res_unreserve(&val_ctx, false);
4215
4216 vmw_validation_bo_fence(sw_context->ctx, fence);
4217
4218 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4219 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4220
4221 /*
4222 * If anything fails here, give up trying to export the fence and do a
4223 * sync since the user mode will not be able to sync the fence itself.
4224 * This ensures we are still functionally correct.
4225 */
4226 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4227
4228 sync_file = sync_file_create(&fence->base);
4229 if (!sync_file) {
4230 VMW_DEBUG_USER("Sync file create failed for fence\n");
4231 put_unused_fd(out_fence_fd);
4232 out_fence_fd = -1;
4233
4234 (void) vmw_fence_obj_wait(fence, false, false,
4235 VMW_FENCE_WAIT_TIMEOUT);
4236 }
4237 }
4238
4239 ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4240 user_fence_rep, fence, handle, out_fence_fd);
4241
4242 if (sync_file) {
4243 if (ret) {
4244 /* usercopy of fence failed, put the file object */
4245 fput(sync_file->file);
4246 put_unused_fd(out_fence_fd);
4247 } else {
4248 /* Link the fence with the FD created earlier */
4249 fd_install(out_fence_fd, sync_file->file);
4250 }
4251 }
4252
4253 /* Don't unreference when handing fence out */
4254 if (unlikely(out_fence != NULL)) {
4255 *out_fence = fence;
4256 fence = NULL;
4257 } else if (likely(fence != NULL)) {
4258 vmw_fence_obj_unreference(&fence);
4259 }
4260
4261 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4262 mutex_unlock(&dev_priv->cmdbuf_mutex);
4263
4264 /*
4265 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4266 * in resource destruction paths.
4267 */
4268 vmw_validation_unref_lists(&val_ctx);
4269
4270 return ret;
4271
4272 out_unlock_binding:
4273 mutex_unlock(&dev_priv->binding_mutex);
4274 out_err:
4275 vmw_validation_bo_backoff(&val_ctx);
4276 out_err_nores:
4277 vmw_execbuf_bindings_commit(sw_context, true);
4278 vmw_validation_res_unreserve(&val_ctx, true);
4279 vmw_resource_relocations_free(&sw_context->res_relocations);
4280 vmw_free_relocations(sw_context);
4281 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4282 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4283 out_unlock:
4284 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4285 vmw_validation_drop_ht(&val_ctx);
4286 WARN_ON(!list_empty(&sw_context->ctx_list));
4287 mutex_unlock(&dev_priv->cmdbuf_mutex);
4288
4289 /*
4290 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4291 * in resource destruction paths.
4292 */
4293 vmw_validation_unref_lists(&val_ctx);
4294 out_free_header:
4295 if (header)
4296 vmw_cmdbuf_header_free(header);
4297 out_free_fence_fd:
4298 if (out_fence_fd >= 0)
4299 put_unused_fd(out_fence_fd);
4300
4301 return ret;
4302 }
4303
4304 /**
4305 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4306 *
4307 * @dev_priv: The device private structure.
4308 *
4309 * This function is called to idle the fifo and unpin the query buffer if the
4310 * normal way to do this hits an error, which should typically be extremely
4311 * rare.
4312 */
vmw_execbuf_unpin_panic(struct vmw_private * dev_priv)4313 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4314 {
4315 VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
4316
4317 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4318 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4319 if (dev_priv->dummy_query_bo_pinned) {
4320 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4321 dev_priv->dummy_query_bo_pinned = false;
4322 }
4323 }
4324
4325
4326 /**
4327 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4328 * bo.
4329 *
4330 * @dev_priv: The device private structure.
4331 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4332 * query barrier that flushes all queries touching the current buffer pointed to
4333 * by @dev_priv->pinned_bo
4334 *
4335 * This function should be used to unpin the pinned query bo, or as a query
4336 * barrier when we need to make sure that all queries have finished before the
4337 * next fifo command. (For example on hardware context destructions where the
4338 * hardware may otherwise leak unfinished queries).
4339 *
4340 * This function does not return any failure codes, but make attempts to do safe
4341 * unpinning in case of errors.
4342 *
4343 * The function will synchronize on the previous query barrier, and will thus
4344 * not finish until that barrier has executed.
4345 *
4346 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4347 * calling this function.
4348 */
__vmw_execbuf_release_pinned_bo(struct vmw_private * dev_priv,struct vmw_fence_obj * fence)4349 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4350 struct vmw_fence_obj *fence)
4351 {
4352 int ret = 0;
4353 struct vmw_fence_obj *lfence = NULL;
4354 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4355
4356 if (dev_priv->pinned_bo == NULL)
4357 goto out_unlock;
4358
4359 vmw_bo_placement_set(dev_priv->pinned_bo,
4360 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
4361 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
4362 ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo);
4363 if (ret)
4364 goto out_no_reserve;
4365
4366 vmw_bo_placement_set(dev_priv->dummy_query_bo,
4367 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
4368 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
4369 ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo);
4370 if (ret)
4371 goto out_no_reserve;
4372
4373 ret = vmw_validation_bo_reserve(&val_ctx, false);
4374 if (ret)
4375 goto out_no_reserve;
4376
4377 if (dev_priv->query_cid_valid) {
4378 BUG_ON(fence != NULL);
4379 ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
4380 if (ret)
4381 goto out_no_emit;
4382 dev_priv->query_cid_valid = false;
4383 }
4384
4385 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4386 if (dev_priv->dummy_query_bo_pinned) {
4387 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4388 dev_priv->dummy_query_bo_pinned = false;
4389 }
4390 if (fence == NULL) {
4391 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4392 NULL);
4393 fence = lfence;
4394 }
4395 vmw_validation_bo_fence(&val_ctx, fence);
4396 if (lfence != NULL)
4397 vmw_fence_obj_unreference(&lfence);
4398
4399 vmw_validation_unref_lists(&val_ctx);
4400 vmw_bo_unreference(&dev_priv->pinned_bo);
4401
4402 out_unlock:
4403 return;
4404 out_no_emit:
4405 vmw_validation_bo_backoff(&val_ctx);
4406 out_no_reserve:
4407 vmw_validation_unref_lists(&val_ctx);
4408 vmw_execbuf_unpin_panic(dev_priv);
4409 vmw_bo_unreference(&dev_priv->pinned_bo);
4410 }
4411
4412 /**
4413 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
4414 *
4415 * @dev_priv: The device private structure.
4416 *
4417 * This function should be used to unpin the pinned query bo, or as a query
4418 * barrier when we need to make sure that all queries have finished before the
4419 * next fifo command. (For example on hardware context destructions where the
4420 * hardware may otherwise leak unfinished queries).
4421 *
4422 * This function does not return any failure codes, but make attempts to do safe
4423 * unpinning in case of errors.
4424 *
4425 * The function will synchronize on the previous query barrier, and will thus
4426 * not finish until that barrier has executed.
4427 */
vmw_execbuf_release_pinned_bo(struct vmw_private * dev_priv)4428 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4429 {
4430 mutex_lock(&dev_priv->cmdbuf_mutex);
4431 if (dev_priv->query_cid_valid)
4432 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4433 mutex_unlock(&dev_priv->cmdbuf_mutex);
4434 }
4435
vmw_execbuf_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)4436 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4437 struct drm_file *file_priv)
4438 {
4439 struct vmw_private *dev_priv = vmw_priv(dev);
4440 struct drm_vmw_execbuf_arg *arg = data;
4441 int ret;
4442 struct dma_fence *in_fence = NULL;
4443
4444 MKS_STAT_TIME_DECL(MKSSTAT_KERN_EXECBUF);
4445 MKS_STAT_TIME_PUSH(MKSSTAT_KERN_EXECBUF);
4446
4447 /*
4448 * Extend the ioctl argument while maintaining backwards compatibility:
4449 * We take different code paths depending on the value of arg->version.
4450 *
4451 * Note: The ioctl argument is extended and zeropadded by core DRM.
4452 */
4453 if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4454 arg->version == 0)) {
4455 VMW_DEBUG_USER("Incorrect execbuf version.\n");
4456 ret = -EINVAL;
4457 goto mksstats_out;
4458 }
4459
4460 switch (arg->version) {
4461 case 1:
4462 /* For v1 core DRM have extended + zeropadded the data */
4463 arg->context_handle = (uint32_t) -1;
4464 break;
4465 case 2:
4466 default:
4467 /* For v2 and later core DRM would have correctly copied it */
4468 break;
4469 }
4470
4471 /* If imported a fence FD from elsewhere, then wait on it */
4472 if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4473 in_fence = sync_file_get_fence(arg->imported_fence_fd);
4474
4475 if (!in_fence) {
4476 VMW_DEBUG_USER("Cannot get imported fence\n");
4477 ret = -EINVAL;
4478 goto mksstats_out;
4479 }
4480
4481 ret = dma_fence_wait(in_fence, true);
4482 if (ret)
4483 goto out;
4484 }
4485
4486 ret = vmw_execbuf_process(file_priv, dev_priv,
4487 (void __user *)(unsigned long)arg->commands,
4488 NULL, arg->command_size, arg->throttle_us,
4489 arg->context_handle,
4490 (void __user *)(unsigned long)arg->fence_rep,
4491 NULL, arg->flags);
4492
4493 if (unlikely(ret != 0))
4494 goto out;
4495
4496 out:
4497 if (in_fence)
4498 dma_fence_put(in_fence);
4499
4500 mksstats_out:
4501 MKS_STAT_TIME_POP(MKSSTAT_KERN_EXECBUF);
4502 return ret;
4503 }
4504