Lines Matching +full:wakeup +full:- +full:event +full:- +full:action

1 // SPDX-License-Identifier: GPL-2.0 OR MIT
4 * Copyright 2011-2023 VMware, Inc., Palo Alto, CA., USA
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
56 * struct vmw_event_fence_action - fence action that delivers a drm event.
58 * @action: A struct vmw_fence_action to hook up to a fence.
59 * @event: A pointer to the pending event.
60 * @fence: A referenced pointer to the fence to keep it alive while @action
62 * @dev: Pointer to a struct drm_device so we can access the event stuff.
63 * @tv_sec: If non-null, the variable pointed to will be assigned
69 struct vmw_fence_action action; member
71 struct drm_pending_event *event; member
82 return container_of(fence->base.lock, struct vmw_fence_manager, lock); in fman_from_fence()
87 if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0) in vmw_fence_goal_read()
95 if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0) in vmw_fence_goal_write()
106 * b) On-demand when we have waiters. Sleeping waiters will switch on the
116 * which has an action attached, and each time vmw_fences_update is called,
130 spin_lock(&fman->lock); in vmw_fence_obj_destroy()
131 list_del_init(&fence->head); in vmw_fence_obj_destroy()
132 --fman->num_fence_objects; in vmw_fence_obj_destroy()
133 spin_unlock(&fman->lock); in vmw_fence_obj_destroy()
134 fence->destroy(fence); in vmw_fence_obj_destroy()
153 struct vmw_private *dev_priv = fman->dev_priv; in vmw_fence_enable_signaling()
156 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) in vmw_fence_enable_signaling()
173 wake_up_process(wait->task); in vmwgfx_wait_cb()
184 struct vmw_private *dev_priv = fman->dev_priv; in vmw_fence_wait()
193 spin_lock(f->lock); in vmw_fence_wait()
195 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) in vmw_fence_wait()
199 ret = -ERESTARTSYS; in vmw_fence_wait()
205 list_add(&cb.base.node, &f->cb_list); in vmw_fence_wait()
212 * DMA_FENCE_FLAG_SIGNALED_BIT + wakeup is protected by the in vmw_fence_wait()
220 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags)) { in vmw_fence_wait()
227 ret = -ERESTARTSYS; in vmw_fence_wait()
234 spin_unlock(f->lock); in vmw_fence_wait()
238 spin_lock(f->lock); in vmw_fence_wait()
245 spin_unlock(f->lock); in vmw_fence_wait()
272 struct vmw_fence_action *action, *next_action; in vmw_fence_work_func() local
277 mutex_lock(&fman->goal_irq_mutex); in vmw_fence_work_func()
279 spin_lock(&fman->lock); in vmw_fence_work_func()
280 list_splice_init(&fman->cleanup_list, &list); in vmw_fence_work_func()
281 seqno_valid = fman->seqno_valid; in vmw_fence_work_func()
282 spin_unlock(&fman->lock); in vmw_fence_work_func()
284 if (!seqno_valid && fman->goal_irq_on) { in vmw_fence_work_func()
285 fman->goal_irq_on = false; in vmw_fence_work_func()
286 vmw_goal_waiter_remove(fman->dev_priv); in vmw_fence_work_func()
288 mutex_unlock(&fman->goal_irq_mutex); in vmw_fence_work_func()
299 list_for_each_entry_safe(action, next_action, &list, head) { in vmw_fence_work_func()
300 list_del_init(&action->head); in vmw_fence_work_func()
301 if (action->cleanup) in vmw_fence_work_func()
302 action->cleanup(action); in vmw_fence_work_func()
314 fman->dev_priv = dev_priv; in vmw_fence_manager_init()
315 spin_lock_init(&fman->lock); in vmw_fence_manager_init()
316 INIT_LIST_HEAD(&fman->fence_list); in vmw_fence_manager_init()
317 INIT_LIST_HEAD(&fman->cleanup_list); in vmw_fence_manager_init()
318 INIT_WORK(&fman->work, &vmw_fence_work_func); in vmw_fence_manager_init()
319 fman->fifo_down = true; in vmw_fence_manager_init()
320 mutex_init(&fman->goal_irq_mutex); in vmw_fence_manager_init()
321 fman->ctx = dma_fence_context_alloc(1); in vmw_fence_manager_init()
330 (void) cancel_work_sync(&fman->work); in vmw_fence_manager_takedown()
332 spin_lock(&fman->lock); in vmw_fence_manager_takedown()
333 lists_empty = list_empty(&fman->fence_list) && in vmw_fence_manager_takedown()
334 list_empty(&fman->cleanup_list); in vmw_fence_manager_takedown()
335 spin_unlock(&fman->lock); in vmw_fence_manager_takedown()
347 dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock, in vmw_fence_obj_init()
348 fman->ctx, seqno); in vmw_fence_obj_init()
349 INIT_LIST_HEAD(&fence->seq_passed_actions); in vmw_fence_obj_init()
350 fence->destroy = destroy; in vmw_fence_obj_init()
352 spin_lock(&fman->lock); in vmw_fence_obj_init()
353 if (unlikely(fman->fifo_down)) { in vmw_fence_obj_init()
354 ret = -EBUSY; in vmw_fence_obj_init()
357 list_add_tail(&fence->head, &fman->fence_list); in vmw_fence_obj_init()
358 ++fman->num_fence_objects; in vmw_fence_obj_init()
361 spin_unlock(&fman->lock); in vmw_fence_obj_init()
369 struct vmw_fence_action *action, *next_action; in vmw_fences_perform_actions() local
371 list_for_each_entry_safe(action, next_action, list, head) { in vmw_fences_perform_actions()
372 list_del_init(&action->head); in vmw_fences_perform_actions()
373 fman->pending_actions[action->type]--; in vmw_fences_perform_actions()
374 if (action->seq_passed != NULL) in vmw_fences_perform_actions()
375 action->seq_passed(action); in vmw_fences_perform_actions()
378 * Add the cleanup action to the cleanup list so that in vmw_fences_perform_actions()
382 list_add_tail(&action->head, &fman->cleanup_list); in vmw_fences_perform_actions()
387 * vmw_fence_goal_new_locked - Figure out a new device fence goal
398 * action attached, and sets the seqno of that fence as a new fence goal.
408 if (likely(!fman->seqno_valid)) in vmw_fence_goal_new_locked()
411 goal_seqno = vmw_fence_goal_read(fman->dev_priv); in vmw_fence_goal_new_locked()
412 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP)) in vmw_fence_goal_new_locked()
415 fman->seqno_valid = false; in vmw_fence_goal_new_locked()
416 list_for_each_entry(fence, &fman->fence_list, head) { in vmw_fence_goal_new_locked()
417 if (!list_empty(&fence->seq_passed_actions)) { in vmw_fence_goal_new_locked()
418 fman->seqno_valid = true; in vmw_fence_goal_new_locked()
419 vmw_fence_goal_write(fman->dev_priv, in vmw_fence_goal_new_locked()
420 fence->base.seqno); in vmw_fence_goal_new_locked()
430 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
437 * It is typically called when an action has been attached to a fence to
449 if (dma_fence_is_signaled_locked(&fence->base)) in vmw_fence_goal_check_locked()
452 goal_seqno = vmw_fence_goal_read(fman->dev_priv); in vmw_fence_goal_check_locked()
453 if (likely(fman->seqno_valid && in vmw_fence_goal_check_locked()
454 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP)) in vmw_fence_goal_check_locked()
457 vmw_fence_goal_write(fman->dev_priv, fence->base.seqno); in vmw_fence_goal_check_locked()
458 fman->seqno_valid = true; in vmw_fence_goal_check_locked()
470 seqno = vmw_fence_read(fman->dev_priv); in __vmw_fences_update()
472 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { in __vmw_fences_update()
473 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) { in __vmw_fences_update()
474 list_del_init(&fence->head); in __vmw_fences_update()
475 dma_fence_signal_locked(&fence->base); in __vmw_fences_update()
477 list_splice_init(&fence->seq_passed_actions, in __vmw_fences_update()
492 new_seqno = vmw_fence_read(fman->dev_priv); in __vmw_fences_update()
499 if (!list_empty(&fman->cleanup_list)) in __vmw_fences_update()
500 (void) schedule_work(&fman->work); in __vmw_fences_update()
505 spin_lock(&fman->lock); in vmw_fences_update()
507 spin_unlock(&fman->lock); in vmw_fences_update()
514 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) in vmw_fence_obj_signaled()
519 return dma_fence_is_signaled(&fence->base); in vmw_fence_obj_signaled()
525 long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout); in vmw_fence_obj_wait()
530 return -EBUSY; in vmw_fence_obj_wait()
537 dma_fence_free(&fence->base); in vmw_fence_destroy()
549 return -ENOMEM; in vmw_fence_create()
578 struct vmw_fence_obj *fence = &ufence->fence; in vmw_user_fence_base_release()
590 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; in vmw_user_fence_create()
597 ret = -ENOMEM; in vmw_user_fence_create()
601 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno, in vmw_user_fence_create()
612 tmp = vmw_fence_obj_reference(&ufence->fence); in vmw_user_fence_create()
614 ret = ttm_base_object_init(tfile, &ufence->base, false, in vmw_user_fence_create()
627 *p_fence = &ufence->fence; in vmw_user_fence_create()
628 *p_handle = ufence->base.handle; in vmw_user_fence_create()
632 tmp = &ufence->fence; in vmw_user_fence_create()
639 * vmw_fence_fifo_down - signal all unsignaled fence objects.
649 * restart when we've released the fman->lock. in vmw_fence_fifo_down()
652 spin_lock(&fman->lock); in vmw_fence_fifo_down()
653 fman->fifo_down = true; in vmw_fence_fifo_down()
654 while (!list_empty(&fman->fence_list)) { in vmw_fence_fifo_down()
656 list_entry(fman->fence_list.prev, struct vmw_fence_obj, in vmw_fence_fifo_down()
658 dma_fence_get(&fence->base); in vmw_fence_fifo_down()
659 spin_unlock(&fman->lock); in vmw_fence_fifo_down()
665 list_del_init(&fence->head); in vmw_fence_fifo_down()
666 dma_fence_signal(&fence->base); in vmw_fence_fifo_down()
668 list_splice_init(&fence->seq_passed_actions, in vmw_fence_fifo_down()
673 BUG_ON(!list_empty(&fence->head)); in vmw_fence_fifo_down()
674 dma_fence_put(&fence->base); in vmw_fence_fifo_down()
675 spin_lock(&fman->lock); in vmw_fence_fifo_down()
677 spin_unlock(&fman->lock); in vmw_fence_fifo_down()
682 spin_lock(&fman->lock); in vmw_fence_fifo_up()
683 fman->fifo_down = false; in vmw_fence_fifo_up()
684 spin_unlock(&fman->lock); in vmw_fence_fifo_up()
689 * vmw_fence_obj_lookup - Look up a user-space fence object
696 * The fence object is looked up and type-checked. The caller needs
709 return ERR_PTR(-EINVAL); in vmw_fence_obj_lookup()
712 if (base->refcount_release != vmw_user_fence_base_release) { in vmw_fence_obj_lookup()
716 return ERR_PTR(-EINVAL); in vmw_fence_obj_lookup()
731 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; in vmw_fence_obj_wait_ioctl()
733 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ); in vmw_fence_obj_wait_ioctl()
736 * 64-bit division not present on 32-bit systems, so do an in vmw_fence_obj_wait_ioctl()
740 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) - in vmw_fence_obj_wait_ioctl()
743 if (!arg->cookie_valid) { in vmw_fence_obj_wait_ioctl()
744 arg->cookie_valid = 1; in vmw_fence_obj_wait_ioctl()
745 arg->kernel_cookie = jiffies + wait_timeout; in vmw_fence_obj_wait_ioctl()
748 base = vmw_fence_obj_lookup(tfile, arg->handle); in vmw_fence_obj_wait_ioctl()
752 fence = &(container_of(base, struct vmw_user_fence, base)->fence); in vmw_fence_obj_wait_ioctl()
755 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) { in vmw_fence_obj_wait_ioctl()
757 0 : -EBUSY); in vmw_fence_obj_wait_ioctl()
761 timeout = (unsigned long)arg->kernel_cookie - timeout; in vmw_fence_obj_wait_ioctl()
763 ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout); in vmw_fence_obj_wait_ioctl()
772 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF)) in vmw_fence_obj_wait_ioctl()
773 return ttm_ref_object_base_unref(tfile, arg->handle); in vmw_fence_obj_wait_ioctl()
785 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; in vmw_fence_obj_signaled_ioctl()
788 base = vmw_fence_obj_lookup(tfile, arg->handle); in vmw_fence_obj_signaled_ioctl()
792 fence = &(container_of(base, struct vmw_user_fence, base)->fence); in vmw_fence_obj_signaled_ioctl()
795 arg->signaled = vmw_fence_obj_signaled(fence); in vmw_fence_obj_signaled_ioctl()
797 arg->signaled_flags = arg->flags; in vmw_fence_obj_signaled_ioctl()
798 spin_lock(&fman->lock); in vmw_fence_obj_signaled_ioctl()
799 arg->passed_seqno = dev_priv->last_read_seqno; in vmw_fence_obj_signaled_ioctl()
800 spin_unlock(&fman->lock); in vmw_fence_obj_signaled_ioctl()
814 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, in vmw_fence_obj_unref_ioctl()
815 arg->handle); in vmw_fence_obj_unref_ioctl()
821 * @action: The struct vmw_fence_action embedded in a struct
824 * This function is called when the seqno of the fence where @action is
825 * attached has passed. It queues the event on the submitter's event list.
828 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) in vmw_event_fence_action_seq_passed() argument
831 container_of(action, struct vmw_event_fence_action, action); in vmw_event_fence_action_seq_passed()
832 struct drm_device *dev = eaction->dev; in vmw_event_fence_action_seq_passed()
833 struct drm_pending_event *event = eaction->event; in vmw_event_fence_action_seq_passed() local
835 if (unlikely(event == NULL)) in vmw_event_fence_action_seq_passed()
838 spin_lock_irq(&dev->event_lock); in vmw_event_fence_action_seq_passed()
840 if (likely(eaction->tv_sec != NULL)) { in vmw_event_fence_action_seq_passed()
845 *eaction->tv_sec = ts.tv_sec; in vmw_event_fence_action_seq_passed()
846 *eaction->tv_usec = ts.tv_nsec / NSEC_PER_USEC; in vmw_event_fence_action_seq_passed()
849 drm_send_event_locked(dev, eaction->event); in vmw_event_fence_action_seq_passed()
850 eaction->event = NULL; in vmw_event_fence_action_seq_passed()
851 spin_unlock_irq(&dev->event_lock); in vmw_event_fence_action_seq_passed()
857 * @action: The struct vmw_fence_action embedded in a struct
863 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action) in vmw_event_fence_action_cleanup() argument
866 container_of(action, struct vmw_event_fence_action, action); in vmw_event_fence_action_cleanup()
868 vmw_fence_obj_unreference(&eaction->fence); in vmw_event_fence_action_cleanup()
874 * vmw_fence_obj_add_action - Add an action to a fence object.
877 * @action: The action to add.
879 * Note that the action callbacks may be executed before this function
883 struct vmw_fence_action *action) in vmw_fence_obj_add_action() argument
888 mutex_lock(&fman->goal_irq_mutex); in vmw_fence_obj_add_action()
889 spin_lock(&fman->lock); in vmw_fence_obj_add_action()
891 fman->pending_actions[action->type]++; in vmw_fence_obj_add_action()
892 if (dma_fence_is_signaled_locked(&fence->base)) { in vmw_fence_obj_add_action()
896 list_add_tail(&action->head, &action_list); in vmw_fence_obj_add_action()
899 list_add_tail(&action->head, &fence->seq_passed_actions); in vmw_fence_obj_add_action()
908 spin_unlock(&fman->lock); in vmw_fence_obj_add_action()
911 if (!fman->goal_irq_on) { in vmw_fence_obj_add_action()
912 fman->goal_irq_on = true; in vmw_fence_obj_add_action()
913 vmw_goal_waiter_add(fman->dev_priv); in vmw_fence_obj_add_action()
917 mutex_unlock(&fman->goal_irq_mutex); in vmw_fence_obj_add_action()
922 * vmw_event_fence_action_queue - Post an event for sending when a fence
925 * @file_priv: The file connection on which the event should be posted.
926 * @fence: The fence object on which to post the event.
927 * @event: Event to be posted. This event should've been alloced
929 * @tv_sec: If non-null, the variable pointed to will be assigned
935 * As a side effect, the object pointed to by @event may have been
942 struct drm_pending_event *event, in vmw_event_fence_action_queue() argument
952 return -ENOMEM; in vmw_event_fence_action_queue()
954 eaction->event = event; in vmw_event_fence_action_queue()
956 eaction->action.seq_passed = vmw_event_fence_action_seq_passed; in vmw_event_fence_action_queue()
957 eaction->action.cleanup = vmw_event_fence_action_cleanup; in vmw_event_fence_action_queue()
958 eaction->action.type = VMW_ACTION_EVENT; in vmw_event_fence_action_queue()
960 eaction->fence = vmw_fence_obj_reference(fence); in vmw_event_fence_action_queue()
961 eaction->dev = &fman->dev_priv->drm; in vmw_event_fence_action_queue()
962 eaction->tv_sec = tv_sec; in vmw_event_fence_action_queue()
963 eaction->tv_usec = tv_usec; in vmw_event_fence_action_queue()
965 vmw_fence_obj_add_action(fence, &eaction->action); in vmw_event_fence_action_queue()
972 struct drm_vmw_event_fence event; member
981 struct vmw_event_fence_pending *event; in vmw_event_fence_action_create() local
983 struct drm_device *dev = &fman->dev_priv->drm; in vmw_event_fence_action_create()
986 event = kzalloc(sizeof(*event), GFP_KERNEL); in vmw_event_fence_action_create()
987 if (unlikely(!event)) { in vmw_event_fence_action_create()
988 DRM_ERROR("Failed to allocate an event.\n"); in vmw_event_fence_action_create()
989 ret = -ENOMEM; in vmw_event_fence_action_create()
993 event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED; in vmw_event_fence_action_create()
994 event->event.base.length = sizeof(*event); in vmw_event_fence_action_create()
995 event->event.user_data = user_data; in vmw_event_fence_action_create()
997 ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base); in vmw_event_fence_action_create()
1000 DRM_ERROR("Failed to allocate event space for this file.\n"); in vmw_event_fence_action_create()
1001 kfree(event); in vmw_event_fence_action_create()
1007 &event->base, in vmw_event_fence_action_create()
1008 &event->event.tv_sec, in vmw_event_fence_action_create()
1009 &event->event.tv_usec, in vmw_event_fence_action_create()
1013 &event->base, in vmw_event_fence_action_create()
1023 drm_event_cancel_free(dev, &event->base); in vmw_event_fence_action_create()
1036 struct ttm_object_file *tfile = vmw_fp->tfile; in vmw_fence_event_ioctl()
1039 arg->fence_rep; in vmw_fence_event_ioctl()
1045 * and if user-space wants a new reference, in vmw_fence_event_ioctl()
1048 if (arg->handle) { in vmw_fence_event_ioctl()
1050 vmw_fence_obj_lookup(tfile, arg->handle); in vmw_fence_event_ioctl()
1056 base)->fence); in vmw_fence_event_ioctl()
1060 ret = ttm_ref_object_add(vmw_fp->tfile, base, in vmw_fence_event_ioctl()
1067 handle = base->handle; in vmw_fence_event_ioctl()
1081 DRM_ERROR("Fence event failed to create fence.\n"); in vmw_fence_event_ioctl()
1089 arg->flags, in vmw_fence_event_ioctl()
1090 arg->user_data, in vmw_fence_event_ioctl()
1093 if (ret != -ERESTARTSYS) in vmw_fence_event_ioctl()
1094 DRM_ERROR("Failed to attach event to fence.\n"); in vmw_fence_event_ioctl()
1099 handle, -1); in vmw_fence_event_ioctl()