xref: /linux/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c (revision 260f6f4fda93c8485c8037865c941b42b9cba5d2)
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright (c) 2009-2025 Broadcom. All Rights Reserved. The term
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.
6  *
7  **************************************************************************/
8 
9 #include "vmwgfx_drv.h"
10 
11 #define VMW_FENCE_WRAP (1 << 31)
12 
13 struct vmw_fence_manager {
14 	struct vmw_private *dev_priv;
15 	spinlock_t lock;
16 	struct list_head fence_list;
17 	bool fifo_down;
18 	u64 ctx;
19 };
20 
21 struct vmw_user_fence {
22 	struct ttm_base_object base;
23 	struct vmw_fence_obj fence;
24 };
25 
26 /**
27  * struct vmw_event_fence_action - fence callback that delivers a DRM event.
28  *
29  * @base:  For use with dma_fence_add_callback(...)
30  * @event: A pointer to the pending event.
31  * @dev: Pointer to a struct drm_device so we can access the event stuff.
32  * @tv_sec: If non-null, the variable pointed to will be assigned
33  * current time tv_sec val when the fence signals.
34  * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
35  * be assigned the current time tv_usec val when the fence signals.
36  */
37 struct vmw_event_fence_action {
38 	struct dma_fence_cb base;
39 
40 	struct drm_pending_event *event;
41 	struct drm_device *dev;
42 
43 	uint32_t *tv_sec;
44 	uint32_t *tv_usec;
45 };
46 
47 static struct vmw_fence_manager *
fman_from_fence(struct vmw_fence_obj * fence)48 fman_from_fence(struct vmw_fence_obj *fence)
49 {
50 	return container_of(fence->base.lock, struct vmw_fence_manager, lock);
51 }
52 
vmw_fence_obj_destroy(struct dma_fence * f)53 static void vmw_fence_obj_destroy(struct dma_fence *f)
54 {
55 	struct vmw_fence_obj *fence =
56 		container_of(f, struct vmw_fence_obj, base);
57 	struct vmw_fence_manager *fman = fman_from_fence(fence);
58 
59 	if (!list_empty(&fence->head)) {
60 		/* The fence manager still has an implicit reference to this
61 		 * fence via the fence list if head is set. Because the lock is
62 		 * required to be held when the fence manager updates the fence
63 		 * list either the fence will have been removed after we get
64 		 * the lock below or we can safely remove it and the fence
65 		 * manager will never see it. This implies the fence is being
66 		 * deleted without being signaled which is dubious but valid
67 		 * if there are no callbacks. The dma_fence code that calls
68 		 * this hook will warn about deleted unsignaled with callbacks
69 		 * so no need to warn again here.
70 		 */
71 		spin_lock(&fman->lock);
72 		list_del_init(&fence->head);
73 		if (fence->waiter_added)
74 			vmw_seqno_waiter_remove(fman->dev_priv);
75 		spin_unlock(&fman->lock);
76 	}
77 	fence->destroy(fence);
78 }
79 
vmw_fence_get_driver_name(struct dma_fence * f)80 static const char *vmw_fence_get_driver_name(struct dma_fence *f)
81 {
82 	return "vmwgfx";
83 }
84 
vmw_fence_get_timeline_name(struct dma_fence * f)85 static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
86 {
87 	return "svga";
88 }
89 
90 /* When we toggle signaling for the SVGA device there is a race period from
91  * the time we first read the fence seqno to the time we enable interrupts.
92  * If we miss the interrupt for a fence during this period its likely the driver
93  * will stall. As a result we need to re-read the seqno after interrupts are
94  * enabled. If interrupts were already enabled we just increment the number of
95  * seqno waiters.
96  */
vmw_fence_enable_signaling(struct dma_fence * f)97 static bool vmw_fence_enable_signaling(struct dma_fence *f)
98 {
99 	u32 seqno;
100 	struct vmw_fence_obj *fence =
101 		container_of(f, struct vmw_fence_obj, base);
102 
103 	struct vmw_fence_manager *fman = fman_from_fence(fence);
104 	struct vmw_private *dev_priv = fman->dev_priv;
105 check_for_race:
106 	seqno = vmw_fence_read(dev_priv);
107 	if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
108 		if (fence->waiter_added) {
109 			vmw_seqno_waiter_remove(dev_priv);
110 			fence->waiter_added = false;
111 		}
112 		return false;
113 	} else if (!fence->waiter_added) {
114 		fence->waiter_added = true;
115 		if (vmw_seqno_waiter_add(dev_priv))
116 			goto check_for_race;
117 	}
118 	return true;
119 }
120 
121 static u32 __vmw_fences_update(struct vmw_fence_manager *fman);
122 
123 static const struct dma_fence_ops vmw_fence_ops = {
124 	.get_driver_name = vmw_fence_get_driver_name,
125 	.get_timeline_name = vmw_fence_get_timeline_name,
126 	.enable_signaling = vmw_fence_enable_signaling,
127 	.release = vmw_fence_obj_destroy,
128 };
129 
vmw_fence_manager_init(struct vmw_private * dev_priv)130 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
131 {
132 	struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
133 
134 	if (unlikely(!fman))
135 		return NULL;
136 
137 	fman->dev_priv = dev_priv;
138 	spin_lock_init(&fman->lock);
139 	INIT_LIST_HEAD(&fman->fence_list);
140 	fman->fifo_down = true;
141 	fman->ctx = dma_fence_context_alloc(1);
142 
143 	return fman;
144 }
145 
vmw_fence_manager_takedown(struct vmw_fence_manager * fman)146 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
147 {
148 	bool lists_empty;
149 
150 	spin_lock(&fman->lock);
151 	lists_empty = list_empty(&fman->fence_list);
152 	spin_unlock(&fman->lock);
153 
154 	BUG_ON(!lists_empty);
155 	kfree(fman);
156 }
157 
vmw_fence_obj_init(struct vmw_fence_manager * fman,struct vmw_fence_obj * fence,u32 seqno,void (* destroy)(struct vmw_fence_obj * fence))158 static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
159 			      struct vmw_fence_obj *fence, u32 seqno,
160 			      void (*destroy) (struct vmw_fence_obj *fence))
161 {
162 	int ret = 0;
163 
164 	dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
165 		       fman->ctx, seqno);
166 	fence->destroy = destroy;
167 
168 	spin_lock(&fman->lock);
169 	if (unlikely(fman->fifo_down)) {
170 		ret = -EBUSY;
171 		goto out_unlock;
172 	}
173 	/* This creates an implicit reference to the fence from the fence
174 	 * manager. It will be dropped when the fence is signaled which is
175 	 * expected to happen before deletion. The dtor has code to catch
176 	 * the rare deletion before signaling case.
177 	 */
178 	list_add_tail(&fence->head, &fman->fence_list);
179 
180 out_unlock:
181 	spin_unlock(&fman->lock);
182 	return ret;
183 
184 }
185 
__vmw_fences_update(struct vmw_fence_manager * fman)186 static u32 __vmw_fences_update(struct vmw_fence_manager *fman)
187 {
188 	struct vmw_fence_obj *fence, *next_fence;
189 	const bool cookie = dma_fence_begin_signalling();
190 	const u32 seqno = vmw_fence_read(fman->dev_priv);
191 
192 	list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
193 		if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
194 			list_del_init(&fence->head);
195 			if (fence->waiter_added) {
196 				vmw_seqno_waiter_remove(fman->dev_priv);
197 				fence->waiter_added = false;
198 			}
199 			dma_fence_signal_locked(&fence->base);
200 		} else
201 			break;
202 	}
203 	dma_fence_end_signalling(cookie);
204 	atomic_set_release(&fman->dev_priv->last_read_seqno, seqno);
205 	return seqno;
206 }
207 
vmw_fences_update(struct vmw_fence_manager * fman)208 u32 vmw_fences_update(struct vmw_fence_manager *fman)
209 {
210 	u32 seqno;
211 	spin_lock(&fman->lock);
212 	seqno = __vmw_fences_update(fman);
213 	spin_unlock(&fman->lock);
214 	return seqno;
215 }
216 
vmw_fence_obj_signaled(struct vmw_fence_obj * fence)217 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
218 {
219 	struct vmw_fence_manager *fman = fman_from_fence(fence);
220 
221 	if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
222 		return true;
223 
224 	vmw_fences_update(fman);
225 
226 	return dma_fence_is_signaled(&fence->base);
227 }
228 
vmw_fence_obj_wait(struct vmw_fence_obj * fence,bool lazy,bool interruptible,unsigned long timeout)229 int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
230 		       bool interruptible, unsigned long timeout)
231 {
232 	long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
233 
234 	if (likely(ret > 0))
235 		return 0;
236 	else if (ret == 0)
237 		return -EBUSY;
238 	else
239 		return ret;
240 }
241 
vmw_fence_destroy(struct vmw_fence_obj * fence)242 static void vmw_fence_destroy(struct vmw_fence_obj *fence)
243 {
244 	dma_fence_free(&fence->base);
245 }
246 
vmw_fence_create(struct vmw_fence_manager * fman,uint32_t seqno,struct vmw_fence_obj ** p_fence)247 int vmw_fence_create(struct vmw_fence_manager *fman,
248 		     uint32_t seqno,
249 		     struct vmw_fence_obj **p_fence)
250 {
251 	struct vmw_fence_obj *fence;
252 	int ret;
253 
254 	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
255 	if (unlikely(!fence))
256 		return -ENOMEM;
257 
258 	ret = vmw_fence_obj_init(fman, fence, seqno, vmw_fence_destroy);
259 	if (unlikely(ret != 0))
260 		goto out_err_init;
261 
262 	*p_fence = fence;
263 	return 0;
264 
265 out_err_init:
266 	kfree(fence);
267 	return ret;
268 }
269 
270 
vmw_user_fence_destroy(struct vmw_fence_obj * fence)271 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
272 {
273 	struct vmw_user_fence *ufence =
274 		container_of(fence, struct vmw_user_fence, fence);
275 
276 	ttm_base_object_kfree(ufence, base);
277 }
278 
vmw_user_fence_base_release(struct ttm_base_object ** p_base)279 static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
280 {
281 	struct ttm_base_object *base = *p_base;
282 	struct vmw_user_fence *ufence =
283 		container_of(base, struct vmw_user_fence, base);
284 	struct vmw_fence_obj *fence = &ufence->fence;
285 
286 	*p_base = NULL;
287 	vmw_fence_obj_unreference(&fence);
288 }
289 
vmw_user_fence_create(struct drm_file * file_priv,struct vmw_fence_manager * fman,uint32_t seqno,struct vmw_fence_obj ** p_fence,uint32_t * p_handle)290 int vmw_user_fence_create(struct drm_file *file_priv,
291 			  struct vmw_fence_manager *fman,
292 			  uint32_t seqno,
293 			  struct vmw_fence_obj **p_fence,
294 			  uint32_t *p_handle)
295 {
296 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
297 	struct vmw_user_fence *ufence;
298 	struct vmw_fence_obj *tmp;
299 	int ret;
300 
301 	ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
302 	if (unlikely(!ufence)) {
303 		ret = -ENOMEM;
304 		goto out_no_object;
305 	}
306 
307 	ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
308 				 vmw_user_fence_destroy);
309 	if (unlikely(ret != 0)) {
310 		kfree(ufence);
311 		goto out_no_object;
312 	}
313 
314 	/*
315 	 * The base object holds a reference which is freed in
316 	 * vmw_user_fence_base_release.
317 	 */
318 	tmp = vmw_fence_obj_reference(&ufence->fence);
319 
320 	ret = ttm_base_object_init(tfile, &ufence->base, false,
321 				   VMW_RES_FENCE,
322 				   &vmw_user_fence_base_release);
323 
324 
325 	if (unlikely(ret != 0)) {
326 		/*
327 		 * Free the base object's reference
328 		 */
329 		vmw_fence_obj_unreference(&tmp);
330 		goto out_err;
331 	}
332 
333 	*p_fence = &ufence->fence;
334 	*p_handle = ufence->base.handle;
335 
336 	return 0;
337 out_err:
338 	tmp = &ufence->fence;
339 	vmw_fence_obj_unreference(&tmp);
340 out_no_object:
341 	return ret;
342 }
343 
344 /*
345  * vmw_fence_fifo_down - signal all unsignaled fence objects.
346  */
347 
vmw_fence_fifo_down(struct vmw_fence_manager * fman)348 void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
349 {
350 	int ret;
351 
352 	/*
353 	 * The list may be altered while we traverse it, so always
354 	 * restart when we've released the fman->lock.
355 	 */
356 
357 	spin_lock(&fman->lock);
358 	fman->fifo_down = true;
359 	while (!list_empty(&fman->fence_list)) {
360 		struct vmw_fence_obj *fence =
361 			list_entry(fman->fence_list.prev, struct vmw_fence_obj,
362 				   head);
363 		dma_fence_get(&fence->base);
364 		spin_unlock(&fman->lock);
365 
366 		ret = vmw_fence_obj_wait(fence, false, false,
367 					 VMW_FENCE_WAIT_TIMEOUT);
368 
369 		if (unlikely(ret != 0)) {
370 			list_del_init(&fence->head);
371 			dma_fence_signal(&fence->base);
372 		}
373 
374 		BUG_ON(!list_empty(&fence->head));
375 		dma_fence_put(&fence->base);
376 		spin_lock(&fman->lock);
377 	}
378 	spin_unlock(&fman->lock);
379 }
380 
vmw_fence_fifo_up(struct vmw_fence_manager * fman)381 void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
382 {
383 	spin_lock(&fman->lock);
384 	fman->fifo_down = false;
385 	spin_unlock(&fman->lock);
386 }
387 
388 
389 /**
390  * vmw_fence_obj_lookup - Look up a user-space fence object
391  *
392  * @tfile: A struct ttm_object_file identifying the caller.
393  * @handle: A handle identifying the fence object.
394  * @return: A struct vmw_user_fence base ttm object on success or
395  * an error pointer on failure.
396  *
397  * The fence object is looked up and type-checked. The caller needs
398  * to have opened the fence object first, but since that happens on
399  * creation and fence objects aren't shareable, that's not an
400  * issue currently.
401  */
402 static struct ttm_base_object *
vmw_fence_obj_lookup(struct ttm_object_file * tfile,u32 handle)403 vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
404 {
405 	struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
406 
407 	if (!base) {
408 		pr_err("Invalid fence object handle 0x%08lx.\n",
409 		       (unsigned long)handle);
410 		return ERR_PTR(-EINVAL);
411 	}
412 
413 	if (base->refcount_release != vmw_user_fence_base_release) {
414 		pr_err("Invalid fence object handle 0x%08lx.\n",
415 		       (unsigned long)handle);
416 		ttm_base_object_unref(&base);
417 		return ERR_PTR(-EINVAL);
418 	}
419 
420 	return base;
421 }
422 
423 
vmw_fence_obj_wait_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)424 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
425 			     struct drm_file *file_priv)
426 {
427 	struct drm_vmw_fence_wait_arg *arg =
428 	    (struct drm_vmw_fence_wait_arg *)data;
429 	unsigned long timeout;
430 	struct ttm_base_object *base;
431 	struct vmw_fence_obj *fence;
432 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
433 	int ret;
434 	uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
435 
436 	/*
437 	 * 64-bit division not present on 32-bit systems, so do an
438 	 * approximation. (Divide by 1000000).
439 	 */
440 
441 	wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
442 	  (wait_timeout >> 26);
443 
444 	if (!arg->cookie_valid) {
445 		arg->cookie_valid = 1;
446 		arg->kernel_cookie = jiffies + wait_timeout;
447 	}
448 
449 	base = vmw_fence_obj_lookup(tfile, arg->handle);
450 	if (IS_ERR(base))
451 		return PTR_ERR(base);
452 
453 	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
454 
455 	timeout = jiffies;
456 	if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
457 		ret = ((vmw_fence_obj_signaled(fence)) ?
458 		       0 : -EBUSY);
459 		goto out;
460 	}
461 
462 	timeout = (unsigned long)arg->kernel_cookie - timeout;
463 
464 	ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
465 
466 out:
467 	ttm_base_object_unref(&base);
468 
469 	/*
470 	 * Optionally unref the fence object.
471 	 */
472 
473 	if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
474 		return ttm_ref_object_base_unref(tfile, arg->handle);
475 	return ret;
476 }
477 
vmw_fence_obj_signaled_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)478 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
479 				 struct drm_file *file_priv)
480 {
481 	struct drm_vmw_fence_signaled_arg *arg =
482 		(struct drm_vmw_fence_signaled_arg *) data;
483 	struct ttm_base_object *base;
484 	struct vmw_fence_obj *fence;
485 	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
486 	struct vmw_private *dev_priv = vmw_priv(dev);
487 
488 	base = vmw_fence_obj_lookup(tfile, arg->handle);
489 	if (IS_ERR(base))
490 		return PTR_ERR(base);
491 
492 	fence = &(container_of(base, struct vmw_user_fence, base)->fence);
493 
494 	arg->signaled = vmw_fence_obj_signaled(fence);
495 
496 	arg->signaled_flags = arg->flags;
497 	arg->passed_seqno = atomic_read_acquire(&dev_priv->last_read_seqno);
498 
499 	ttm_base_object_unref(&base);
500 
501 	return 0;
502 }
503 
504 
vmw_fence_obj_unref_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)505 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
506 			      struct drm_file *file_priv)
507 {
508 	struct drm_vmw_fence_arg *arg =
509 		(struct drm_vmw_fence_arg *) data;
510 
511 	return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
512 					 arg->handle);
513 }
514 
515 /**
516  * vmw_event_fence_action_seq_passed
517  *
518  * @action: The struct vmw_fence_action embedded in a struct
519  * vmw_event_fence_action.
520  *
521  * This function is called when the seqno of the fence where @action is
522  * attached has passed. It queues the event on the submitter's event list.
523  * This function is always called from atomic context.
524  */
vmw_event_fence_action_seq_passed(struct dma_fence * f,struct dma_fence_cb * cb)525 static void vmw_event_fence_action_seq_passed(struct dma_fence *f,
526 					      struct dma_fence_cb *cb)
527 {
528 	struct vmw_event_fence_action *eaction =
529 		container_of(cb, struct vmw_event_fence_action, base);
530 	struct drm_device *dev = eaction->dev;
531 	struct drm_pending_event *event = eaction->event;
532 
533 	if (unlikely(event == NULL))
534 		return;
535 
536 	spin_lock_irq(&dev->event_lock);
537 
538 	if (likely(eaction->tv_sec != NULL)) {
539 		struct timespec64 ts;
540 
541 		ktime_to_timespec64(f->timestamp);
542 		/* monotonic time, so no y2038 overflow */
543 		*eaction->tv_sec = ts.tv_sec;
544 		*eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC;
545 	}
546 
547 	drm_send_event_locked(dev, eaction->event);
548 	eaction->event = NULL;
549 	spin_unlock_irq(&dev->event_lock);
550 	dma_fence_put(f);
551 	kfree(eaction);
552 }
553 
554 /**
555  * vmw_event_fence_action_queue - Post an event for sending when a fence
556  * object seqno has passed.
557  *
558  * @file_priv: The file connection on which the event should be posted.
559  * @fence: The fence object on which to post the event.
560  * @event: Event to be posted. This event should've been alloced
561  * using k[mz]alloc, and should've been completely initialized.
562  * @tv_sec: If non-null, the variable pointed to will be assigned
563  * current time tv_sec val when the fence signals.
564  * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
565  * be assigned the current time tv_usec val when the fence signals.
566  * @interruptible: Interruptible waits if possible.
567  *
568  * As a side effect, the object pointed to by @event may have been
569  * freed when this function returns. If this function returns with
570  * an error code, the caller needs to free that object.
571  */
572 
vmw_event_fence_action_queue(struct drm_file * file_priv,struct vmw_fence_obj * fence,struct drm_pending_event * event,uint32_t * tv_sec,uint32_t * tv_usec,bool interruptible)573 int vmw_event_fence_action_queue(struct drm_file *file_priv,
574 				 struct vmw_fence_obj *fence,
575 				 struct drm_pending_event *event,
576 				 uint32_t *tv_sec,
577 				 uint32_t *tv_usec,
578 				 bool interruptible)
579 {
580 	struct vmw_event_fence_action *eaction;
581 	struct vmw_fence_manager *fman = fman_from_fence(fence);
582 
583 	eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
584 	if (unlikely(!eaction))
585 		return -ENOMEM;
586 
587 	eaction->event = event;
588 	eaction->dev = &fman->dev_priv->drm;
589 	eaction->tv_sec = tv_sec;
590 	eaction->tv_usec = tv_usec;
591 
592 	vmw_fence_obj_reference(fence); // Dropped in CB
593 	if (dma_fence_add_callback(&fence->base, &eaction->base,
594 				   vmw_event_fence_action_seq_passed) < 0)
595 		vmw_event_fence_action_seq_passed(&fence->base, &eaction->base);
596 	return 0;
597 }
598 
599 struct vmw_event_fence_pending {
600 	struct drm_pending_event base;
601 	struct drm_vmw_event_fence event;
602 };
603 
vmw_event_fence_action_create(struct drm_file * file_priv,struct vmw_fence_obj * fence,uint32_t flags,uint64_t user_data,bool interruptible)604 static int vmw_event_fence_action_create(struct drm_file *file_priv,
605 				  struct vmw_fence_obj *fence,
606 				  uint32_t flags,
607 				  uint64_t user_data,
608 				  bool interruptible)
609 {
610 	struct vmw_event_fence_pending *event;
611 	struct vmw_fence_manager *fman = fman_from_fence(fence);
612 	struct drm_device *dev = &fman->dev_priv->drm;
613 	int ret;
614 
615 	event = kzalloc(sizeof(*event), GFP_KERNEL);
616 	if (unlikely(!event)) {
617 		DRM_ERROR("Failed to allocate an event.\n");
618 		ret = -ENOMEM;
619 		goto out_no_space;
620 	}
621 
622 	event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
623 	event->event.base.length = sizeof(event->event);
624 	event->event.user_data = user_data;
625 
626 	ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
627 
628 	if (unlikely(ret != 0)) {
629 		DRM_ERROR("Failed to allocate event space for this file.\n");
630 		kfree(event);
631 		goto out_no_space;
632 	}
633 
634 	if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
635 		ret = vmw_event_fence_action_queue(file_priv, fence,
636 						   &event->base,
637 						   &event->event.tv_sec,
638 						   &event->event.tv_usec,
639 						   interruptible);
640 	else
641 		ret = vmw_event_fence_action_queue(file_priv, fence,
642 						   &event->base,
643 						   NULL,
644 						   NULL,
645 						   interruptible);
646 	if (ret != 0)
647 		goto out_no_queue;
648 
649 	return 0;
650 
651 out_no_queue:
652 	drm_event_cancel_free(dev, &event->base);
653 out_no_space:
654 	return ret;
655 }
656 
vmw_fence_event_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)657 int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
658 			  struct drm_file *file_priv)
659 {
660 	struct vmw_private *dev_priv = vmw_priv(dev);
661 	struct drm_vmw_fence_event_arg *arg =
662 		(struct drm_vmw_fence_event_arg *) data;
663 	struct vmw_fence_obj *fence = NULL;
664 	struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
665 	struct ttm_object_file *tfile = vmw_fp->tfile;
666 	struct drm_vmw_fence_rep __user *user_fence_rep =
667 		(struct drm_vmw_fence_rep __user *)(unsigned long)
668 		arg->fence_rep;
669 	uint32_t handle;
670 	int ret;
671 
672 	/*
673 	 * Look up an existing fence object,
674 	 * and if user-space wants a new reference,
675 	 * add one.
676 	 */
677 	if (arg->handle) {
678 		struct ttm_base_object *base =
679 			vmw_fence_obj_lookup(tfile, arg->handle);
680 
681 		if (IS_ERR(base))
682 			return PTR_ERR(base);
683 
684 		fence = &(container_of(base, struct vmw_user_fence,
685 				       base)->fence);
686 		(void) vmw_fence_obj_reference(fence);
687 
688 		if (user_fence_rep != NULL) {
689 			ret = ttm_ref_object_add(vmw_fp->tfile, base,
690 						 NULL, false);
691 			if (unlikely(ret != 0)) {
692 				DRM_ERROR("Failed to reference a fence "
693 					  "object.\n");
694 				goto out_no_ref_obj;
695 			}
696 			handle = base->handle;
697 		}
698 		ttm_base_object_unref(&base);
699 	}
700 
701 	/*
702 	 * Create a new fence object.
703 	 */
704 	if (!fence) {
705 		ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
706 						 &fence,
707 						 (user_fence_rep) ?
708 						 &handle : NULL);
709 		if (unlikely(ret != 0)) {
710 			DRM_ERROR("Fence event failed to create fence.\n");
711 			return ret;
712 		}
713 	}
714 
715 	BUG_ON(fence == NULL);
716 
717 	ret = vmw_event_fence_action_create(file_priv, fence,
718 					    arg->flags,
719 					    arg->user_data,
720 					    true);
721 	if (unlikely(ret != 0)) {
722 		if (ret != -ERESTARTSYS)
723 			DRM_ERROR("Failed to attach event to fence.\n");
724 		goto out_no_create;
725 	}
726 
727 	vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
728 				    handle, -1);
729 	vmw_fence_obj_unreference(&fence);
730 	return 0;
731 out_no_create:
732 	if (user_fence_rep != NULL)
733 		ttm_ref_object_base_unref(tfile, handle);
734 out_no_ref_obj:
735 	vmw_fence_obj_unreference(&fence);
736 	return ret;
737 }
738