Lines Matching +full:wakeup +full:- +full:event +full:- +full:action

1 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 * Copyright 2011-2014 VMware, Inc., Palo Alto, CA., USA
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
59 * struct vmw_event_fence_action - fence action that delivers a drm event.
61 * @e: A struct drm_pending_event that controls the event delivery.
62 * @action: A struct vmw_fence_action to hook up to a fence.
63 * @fence: A referenced pointer to the fence to keep it alive while @action
65 * @dev: Pointer to a struct drm_device so we can access the event stuff.
66 * @kref: Both @e and @action has destructors, so we need to refcount.
68 * @tv_sec: If non-null, the variable pointed to will be assigned
74 struct vmw_fence_action action; member
76 struct drm_pending_event *event; member
87 return container_of(fence->base.lock, struct vmw_fence_manager, lock); in fman_from_fence()
95 * b) On-demand when we have waiters. Sleeping waiters will switch on the
105 * which has an action attached, and each time vmw_fences_update is called,
119 spin_lock(&fman->lock); in vmw_fence_obj_destroy()
120 list_del_init(&fence->head); in vmw_fence_obj_destroy()
121 --fman->num_fence_objects; in vmw_fence_obj_destroy()
122 spin_unlock(&fman->lock); in vmw_fence_obj_destroy()
123 fence->destroy(fence); in vmw_fence_obj_destroy()
142 struct vmw_private *dev_priv = fman->dev_priv; in vmw_fence_enable_signaling()
144 u32 *fifo_mem = dev_priv->mmio_virt; in vmw_fence_enable_signaling()
146 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) in vmw_fence_enable_signaling()
165 wake_up_process(wait->task); in vmwgfx_wait_cb()
176 struct vmw_private *dev_priv = fman->dev_priv; in vmw_fence_wait()
186 spin_lock(f->lock); in vmw_fence_wait()
188 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) in vmw_fence_wait()
192 ret = -ERESTARTSYS; in vmw_fence_wait()
198 list_add(&cb.base.node, &f->cb_list); in vmw_fence_wait()
205 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the in vmw_fence_wait()
213 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) { in vmw_fence_wait()
220 ret = -ERESTARTSYS; in vmw_fence_wait()
227 spin_unlock(f->lock); in vmw_fence_wait()
231 spin_lock(f->lock); in vmw_fence_wait()
238 spin_unlock(f->lock); in vmw_fence_wait()
265 struct vmw_fence_action *action, *next_action; in vmw_fence_work_func() local
270 mutex_lock(&fman->goal_irq_mutex); in vmw_fence_work_func()
272 spin_lock(&fman->lock); in vmw_fence_work_func()
273 list_splice_init(&fman->cleanup_list, &list); in vmw_fence_work_func()
274 seqno_valid = fman->seqno_valid; in vmw_fence_work_func()
275 spin_unlock(&fman->lock); in vmw_fence_work_func()
277 if (!seqno_valid && fman->goal_irq_on) { in vmw_fence_work_func()
278 fman->goal_irq_on = false; in vmw_fence_work_func()
279 vmw_goal_waiter_remove(fman->dev_priv); in vmw_fence_work_func()
281 mutex_unlock(&fman->goal_irq_mutex); in vmw_fence_work_func()
292 list_for_each_entry_safe(action, next_action, &list, head) { in vmw_fence_work_func()
293 list_del_init(&action->head); in vmw_fence_work_func()
294 if (action->cleanup) in vmw_fence_work_func()
295 action->cleanup(action); in vmw_fence_work_func()
307 fman->dev_priv = dev_priv; in vmw_fence_manager_init()
308 spin_lock_init(&fman->lock); in vmw_fence_manager_init()
309 INIT_LIST_HEAD(&fman->fence_list); in vmw_fence_manager_init()
310 INIT_LIST_HEAD(&fman->cleanup_list); in vmw_fence_manager_init()
311 INIT_WORK(&fman->work, &vmw_fence_work_func); in vmw_fence_manager_init()
312 fman->fifo_down = true; in vmw_fence_manager_init()
313 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)) + in vmw_fence_manager_init()
315 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); in vmw_fence_manager_init()
316 fman->event_fence_action_size = in vmw_fence_manager_init()
318 mutex_init(&fman->goal_irq_mutex); in vmw_fence_manager_init()
319 fman->ctx = dma_fence_context_alloc(1); in vmw_fence_manager_init()
328 (void) cancel_work_sync(&fman->work); in vmw_fence_manager_takedown()
330 spin_lock(&fman->lock); in vmw_fence_manager_takedown()
331 lists_empty = list_empty(&fman->fence_list) && in vmw_fence_manager_takedown()
332 list_empty(&fman->cleanup_list); in vmw_fence_manager_takedown()
333 spin_unlock(&fman->lock); in vmw_fence_manager_takedown()
345 dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock, in vmw_fence_obj_init()
346 fman->ctx, seqno); in vmw_fence_obj_init()
347 INIT_LIST_HEAD(&fence->seq_passed_actions); in vmw_fence_obj_init()
348 fence->destroy = destroy; in vmw_fence_obj_init()
350 spin_lock(&fman->lock); in vmw_fence_obj_init()
351 if (unlikely(fman->fifo_down)) { in vmw_fence_obj_init()
352 ret = -EBUSY; in vmw_fence_obj_init()
355 list_add_tail(&fence->head, &fman->fence_list); in vmw_fence_obj_init()
356 ++fman->num_fence_objects; in vmw_fence_obj_init()
359 spin_unlock(&fman->lock); in vmw_fence_obj_init()
367 struct vmw_fence_action *action, *next_action; in vmw_fences_perform_actions() local
369 list_for_each_entry_safe(action, next_action, list, head) { in vmw_fences_perform_actions()
370 list_del_init(&action->head); in vmw_fences_perform_actions()
371 fman->pending_actions[action->type]--; in vmw_fences_perform_actions()
372 if (action->seq_passed != NULL) in vmw_fences_perform_actions()
373 action->seq_passed(action); in vmw_fences_perform_actions()
376 * Add the cleanup action to the cleanup list so that in vmw_fences_perform_actions()
380 list_add_tail(&action->head, &fman->cleanup_list); in vmw_fences_perform_actions()
385 * vmw_fence_goal_new_locked - Figure out a new device fence goal
396 * action attached, and sets the seqno of that fence as a new fence goal.
407 if (likely(!fman->seqno_valid)) in vmw_fence_goal_new_locked()
410 fifo_mem = fman->dev_priv->mmio_virt; in vmw_fence_goal_new_locked()
412 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP)) in vmw_fence_goal_new_locked()
415 fman->seqno_valid = false; in vmw_fence_goal_new_locked()
416 list_for_each_entry(fence, &fman->fence_list, head) { in vmw_fence_goal_new_locked()
417 if (!list_empty(&fence->seq_passed_actions)) { in vmw_fence_goal_new_locked()
418 fman->seqno_valid = true; in vmw_fence_goal_new_locked()
419 vmw_mmio_write(fence->base.seqno, in vmw_fence_goal_new_locked()
430 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
437 * It is typically called when an action has been attached to a fence to
450 if (dma_fence_is_signaled_locked(&fence->base)) in vmw_fence_goal_check_locked()
453 fifo_mem = fman->dev_priv->mmio_virt; in vmw_fence_goal_check_locked()
455 if (likely(fman->seqno_valid && in vmw_fence_goal_check_locked()
456 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP)) in vmw_fence_goal_check_locked()
459 vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL); in vmw_fence_goal_check_locked()
460 fman->seqno_valid = true; in vmw_fence_goal_check_locked()
471 u32 *fifo_mem = fman->dev_priv->mmio_virt; in __vmw_fences_update()
475 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { in __vmw_fences_update()
476 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) { in __vmw_fences_update()
477 list_del_init(&fence->head); in __vmw_fences_update()
478 dma_fence_signal_locked(&fence->base); in __vmw_fences_update()
480 list_splice_init(&fence->seq_passed_actions, in __vmw_fences_update()
502 if (!list_empty(&fman->cleanup_list)) in __vmw_fences_update()
503 (void) schedule_work(&fman->work); in __vmw_fences_update()
508 spin_lock(&fman->lock); in vmw_fences_update()
510 spin_unlock(&fman->lock); in vmw_fences_update()
517 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) in vmw_fence_obj_signaled()
522 return dma_fence_is_signaled(&fence->base); in vmw_fence_obj_signaled()
528 long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout); in vmw_fence_obj_wait()
533 return -EBUSY; in vmw_fence_obj_wait()
540 struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv; in vmw_fence_obj_flush()
547 dma_fence_free(&fence->base); in vmw_fence_destroy()
559 return -ENOMEM; in vmw_fence_create()
585 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv), in vmw_user_fence_destroy()
586 fman->user_fence_size); in vmw_user_fence_destroy()
594 struct vmw_fence_obj *fence = &ufence->fence; in vmw_user_fence_base_release()
606 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; in vmw_user_fence_create()
609 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv); in vmw_user_fence_create()
618 * be created by a user-space request. in vmw_user_fence_create()
621 ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size, in vmw_user_fence_create()
628 ret = -ENOMEM; in vmw_user_fence_create()
632 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno, in vmw_user_fence_create()
643 tmp = vmw_fence_obj_reference(&ufence->fence); in vmw_user_fence_create()
644 ret = ttm_base_object_init(tfile, &ufence->base, false, in vmw_user_fence_create()
657 *p_fence = &ufence->fence; in vmw_user_fence_create()
658 *p_handle = ufence->base.handle; in vmw_user_fence_create()
662 tmp = &ufence->fence; in vmw_user_fence_create()
665 ttm_mem_global_free(mem_glob, fman->user_fence_size); in vmw_user_fence_create()
671 * vmw_wait_dma_fence - Wait for a dma fence
693 /* From i915: Note that if the fence-array was created in in vmw_wait_dma_fence()
694 * signal-on-any mode, we should *not* decompose it into its individual in vmw_wait_dma_fence()
695 * fences. However, we don't currently store which mode the fence-array in vmw_wait_dma_fence()
696 * is operating in. Fortunately, the only user of signal-on-any is in vmw_wait_dma_fence()
697 * private to amdgpu and we should not see any incoming fence-array in vmw_wait_dma_fence()
698 * from sync-file being in signal-on-any mode. in vmw_wait_dma_fence()
702 for (i = 0; i < fence_array->num_fences; i++) { in vmw_wait_dma_fence()
703 struct dma_fence *child = fence_array->fences[i]; in vmw_wait_dma_fence()
716 * vmw_fence_fifo_down - signal all unsignaled fence objects.
726 * restart when we've released the fman->lock. in vmw_fence_fifo_down()
729 spin_lock(&fman->lock); in vmw_fence_fifo_down()
730 fman->fifo_down = true; in vmw_fence_fifo_down()
731 while (!list_empty(&fman->fence_list)) { in vmw_fence_fifo_down()
733 list_entry(fman->fence_list.prev, struct vmw_fence_obj, in vmw_fence_fifo_down()
735 dma_fence_get(&fence->base); in vmw_fence_fifo_down()
736 spin_unlock(&fman->lock); in vmw_fence_fifo_down()
742 list_del_init(&fence->head); in vmw_fence_fifo_down()
743 dma_fence_signal(&fence->base); in vmw_fence_fifo_down()
745 list_splice_init(&fence->seq_passed_actions, in vmw_fence_fifo_down()
750 BUG_ON(!list_empty(&fence->head)); in vmw_fence_fifo_down()
751 dma_fence_put(&fence->base); in vmw_fence_fifo_down()
752 spin_lock(&fman->lock); in vmw_fence_fifo_down()
754 spin_unlock(&fman->lock); in vmw_fence_fifo_down()
759 spin_lock(&fman->lock); in vmw_fence_fifo_up()
760 fman->fifo_down = false; in vmw_fence_fifo_up()
761 spin_unlock(&fman->lock); in vmw_fence_fifo_up()
766 * vmw_fence_obj_lookup - Look up a user-space fence object
773 * The fence object is looked up and type-checked. The caller needs
786 return ERR_PTR(-EINVAL); in vmw_fence_obj_lookup()
789 if (base->refcount_release != vmw_user_fence_base_release) { in vmw_fence_obj_lookup()
793 return ERR_PTR(-EINVAL); in vmw_fence_obj_lookup()
808 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; in vmw_fence_obj_wait_ioctl()
810 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ); in vmw_fence_obj_wait_ioctl()
813 * 64-bit division not present on 32-bit systems, so do an in vmw_fence_obj_wait_ioctl()
817 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) - in vmw_fence_obj_wait_ioctl()
820 if (!arg->cookie_valid) { in vmw_fence_obj_wait_ioctl()
821 arg->cookie_valid = 1; in vmw_fence_obj_wait_ioctl()
822 arg->kernel_cookie = jiffies + wait_timeout; in vmw_fence_obj_wait_ioctl()
825 base = vmw_fence_obj_lookup(tfile, arg->handle); in vmw_fence_obj_wait_ioctl()
829 fence = &(container_of(base, struct vmw_user_fence, base)->fence); in vmw_fence_obj_wait_ioctl()
832 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) { in vmw_fence_obj_wait_ioctl()
834 0 : -EBUSY); in vmw_fence_obj_wait_ioctl()
838 timeout = (unsigned long)arg->kernel_cookie - timeout; in vmw_fence_obj_wait_ioctl()
840 ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout); in vmw_fence_obj_wait_ioctl()
849 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF)) in vmw_fence_obj_wait_ioctl()
850 return ttm_ref_object_base_unref(tfile, arg->handle, in vmw_fence_obj_wait_ioctl()
863 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; in vmw_fence_obj_signaled_ioctl()
866 base = vmw_fence_obj_lookup(tfile, arg->handle); in vmw_fence_obj_signaled_ioctl()
870 fence = &(container_of(base, struct vmw_user_fence, base)->fence); in vmw_fence_obj_signaled_ioctl()
873 arg->signaled = vmw_fence_obj_signaled(fence); in vmw_fence_obj_signaled_ioctl()
875 arg->signaled_flags = arg->flags; in vmw_fence_obj_signaled_ioctl()
876 spin_lock(&fman->lock); in vmw_fence_obj_signaled_ioctl()
877 arg->passed_seqno = dev_priv->last_read_seqno; in vmw_fence_obj_signaled_ioctl()
878 spin_unlock(&fman->lock); in vmw_fence_obj_signaled_ioctl()
892 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, in vmw_fence_obj_unref_ioctl()
893 arg->handle, in vmw_fence_obj_unref_ioctl()
900 * @action: The struct vmw_fence_action embedded in a struct
903 * This function is called when the seqno of the fence where @action is
904 * attached has passed. It queues the event on the submitter's event list.
907 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) in vmw_event_fence_action_seq_passed() argument
910 container_of(action, struct vmw_event_fence_action, action); in vmw_event_fence_action_seq_passed()
911 struct drm_device *dev = eaction->dev; in vmw_event_fence_action_seq_passed()
912 struct drm_pending_event *event = eaction->event; in vmw_event_fence_action_seq_passed() local
914 if (unlikely(event == NULL)) in vmw_event_fence_action_seq_passed()
917 spin_lock_irq(&dev->event_lock); in vmw_event_fence_action_seq_passed()
919 if (likely(eaction->tv_sec != NULL)) { in vmw_event_fence_action_seq_passed()
924 *eaction->tv_sec = ts.tv_sec; in vmw_event_fence_action_seq_passed()
925 *eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC; in vmw_event_fence_action_seq_passed()
928 drm_send_event_locked(dev, eaction->event); in vmw_event_fence_action_seq_passed()
929 eaction->event = NULL; in vmw_event_fence_action_seq_passed()
930 spin_unlock_irq(&dev->event_lock); in vmw_event_fence_action_seq_passed()
936 * @action: The struct vmw_fence_action embedded in a struct
942 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action) in vmw_event_fence_action_cleanup() argument
945 container_of(action, struct vmw_event_fence_action, action); in vmw_event_fence_action_cleanup()
947 vmw_fence_obj_unreference(&eaction->fence); in vmw_event_fence_action_cleanup()
953 * vmw_fence_obj_add_action - Add an action to a fence object.
955 * @fence - The fence object.
956 * @action - The action to add.
958 * Note that the action callbacks may be executed before this function
962 struct vmw_fence_action *action) in vmw_fence_obj_add_action() argument
967 mutex_lock(&fman->goal_irq_mutex); in vmw_fence_obj_add_action()
968 spin_lock(&fman->lock); in vmw_fence_obj_add_action()
970 fman->pending_actions[action->type]++; in vmw_fence_obj_add_action()
971 if (dma_fence_is_signaled_locked(&fence->base)) { in vmw_fence_obj_add_action()
975 list_add_tail(&action->head, &action_list); in vmw_fence_obj_add_action()
978 list_add_tail(&action->head, &fence->seq_passed_actions); in vmw_fence_obj_add_action()
987 spin_unlock(&fman->lock); in vmw_fence_obj_add_action()
990 if (!fman->goal_irq_on) { in vmw_fence_obj_add_action()
991 fman->goal_irq_on = true; in vmw_fence_obj_add_action()
992 vmw_goal_waiter_add(fman->dev_priv); in vmw_fence_obj_add_action()
996 mutex_unlock(&fman->goal_irq_mutex); in vmw_fence_obj_add_action()
1001 * vmw_event_fence_action_create - Post an event for sending when a fence
1004 * @file_priv: The file connection on which the event should be posted.
1005 * @fence: The fence object on which to post the event.
1006 * @event: Event to be posted. This event should've been alloced
1010 * As a side effect, the object pointed to by @event may have been
1017 struct drm_pending_event *event, in vmw_event_fence_action_queue() argument
1027 return -ENOMEM; in vmw_event_fence_action_queue()
1029 eaction->event = event; in vmw_event_fence_action_queue()
1031 eaction->action.seq_passed = vmw_event_fence_action_seq_passed; in vmw_event_fence_action_queue()
1032 eaction->action.cleanup = vmw_event_fence_action_cleanup; in vmw_event_fence_action_queue()
1033 eaction->action.type = VMW_ACTION_EVENT; in vmw_event_fence_action_queue()
1035 eaction->fence = vmw_fence_obj_reference(fence); in vmw_event_fence_action_queue()
1036 eaction->dev = fman->dev_priv->dev; in vmw_event_fence_action_queue()
1037 eaction->tv_sec = tv_sec; in vmw_event_fence_action_queue()
1038 eaction->tv_usec = tv_usec; in vmw_event_fence_action_queue()
1040 vmw_fence_obj_add_action(fence, &eaction->action); in vmw_event_fence_action_queue()
1047 struct drm_vmw_event_fence event; member
1056 struct vmw_event_fence_pending *event; in vmw_event_fence_action_create() local
1058 struct drm_device *dev = fman->dev_priv->dev; in vmw_event_fence_action_create()
1061 event = kzalloc(sizeof(*event), GFP_KERNEL); in vmw_event_fence_action_create()
1062 if (unlikely(!event)) { in vmw_event_fence_action_create()
1063 DRM_ERROR("Failed to allocate an event.\n"); in vmw_event_fence_action_create()
1064 ret = -ENOMEM; in vmw_event_fence_action_create()
1068 event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED; in vmw_event_fence_action_create()
1069 event->event.base.length = sizeof(*event); in vmw_event_fence_action_create()
1070 event->event.user_data = user_data; in vmw_event_fence_action_create()
1072 ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base); in vmw_event_fence_action_create()
1075 DRM_ERROR("Failed to allocate event space for this file.\n"); in vmw_event_fence_action_create()
1076 kfree(event); in vmw_event_fence_action_create()
1082 &event->base, in vmw_event_fence_action_create()
1083 &event->event.tv_sec, in vmw_event_fence_action_create()
1084 &event->event.tv_usec, in vmw_event_fence_action_create()
1088 &event->base, in vmw_event_fence_action_create()
1098 drm_event_cancel_free(dev, &event->base); in vmw_event_fence_action_create()
1111 struct ttm_object_file *tfile = vmw_fp->tfile; in vmw_fence_event_ioctl()
1114 arg->fence_rep; in vmw_fence_event_ioctl()
1120 * and if user-space wants a new reference, in vmw_fence_event_ioctl()
1123 if (arg->handle) { in vmw_fence_event_ioctl()
1125 vmw_fence_obj_lookup(tfile, arg->handle); in vmw_fence_event_ioctl()
1131 base)->fence); in vmw_fence_event_ioctl()
1135 ret = ttm_ref_object_add(vmw_fp->tfile, base, in vmw_fence_event_ioctl()
1142 handle = base->handle; in vmw_fence_event_ioctl()
1156 DRM_ERROR("Fence event failed to create fence.\n"); in vmw_fence_event_ioctl()
1164 arg->flags, in vmw_fence_event_ioctl()
1165 arg->user_data, in vmw_fence_event_ioctl()
1168 if (ret != -ERESTARTSYS) in vmw_fence_event_ioctl()
1169 DRM_ERROR("Failed to attach event to fence.\n"); in vmw_fence_event_ioctl()
1174 handle, -1, NULL); in vmw_fence_event_ioctl()