Lines Matching +full:no +full:- +full:read +full:- +full:rollover

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013-2016 Red Hat
7 #include <linux/dma-fence.h>
15 struct msm_drm_private *priv = fctx->dev->dev_private; in fctx2gpu()
16 return priv->gpu; in fctx2gpu()
24 kthread_queue_work(fctx2gpu(fctx)->worker, &fctx->deadline_work); in deadline_timer()
35 if (msm_fence_completed(fctx, fctx->next_deadline_fence)) in deadline_work()
51 return ERR_PTR(-ENOMEM); in msm_fence_context_alloc()
53 fctx->dev = dev; in msm_fence_context_alloc()
54 strscpy(fctx->name, name, sizeof(fctx->name)); in msm_fence_context_alloc()
55 fctx->context = dma_fence_context_alloc(1); in msm_fence_context_alloc()
56 fctx->index = index++; in msm_fence_context_alloc()
57 fctx->fenceptr = fenceptr; in msm_fence_context_alloc()
58 spin_lock_init(&fctx->spinlock); in msm_fence_context_alloc()
61 * Start out close to the 32b fence rollover point, so we can in msm_fence_context_alloc()
64 fctx->last_fence = 0xffffff00; in msm_fence_context_alloc()
65 fctx->completed_fence = fctx->last_fence; in msm_fence_context_alloc()
66 *fctx->fenceptr = fctx->last_fence; in msm_fence_context_alloc()
68 hrtimer_setup(&fctx->deadline_timer, deadline_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); in msm_fence_context_alloc()
70 kthread_init_work(&fctx->deadline_work, deadline_work); in msm_fence_context_alloc()
72 fctx->next_deadline = ktime_get(); in msm_fence_context_alloc()
85 * Note: Check completed_fence first, as fenceptr is in a write-combine in msm_fence_completed()
86 * mapping, so it will be more expensive to read. in msm_fence_completed()
88 return (int32_t)(fctx->completed_fence - fence) >= 0 || in msm_fence_completed()
89 (int32_t)(*fctx->fenceptr - fence) >= 0; in msm_fence_completed()
97 spin_lock_irqsave(&fctx->spinlock, flags); in msm_update_fence()
98 if (fence_after(fence, fctx->completed_fence)) in msm_update_fence()
99 fctx->completed_fence = fence; in msm_update_fence()
100 if (msm_fence_completed(fctx, fctx->next_deadline_fence)) in msm_update_fence()
101 hrtimer_cancel(&fctx->deadline_timer); in msm_update_fence()
102 spin_unlock_irqrestore(&fctx->spinlock, flags); in msm_update_fence()
123 return f->fctx->name; in msm_fence_get_timeline_name()
129 return msm_fence_completed(f->fctx, f->base.seqno); in msm_fence_signaled()
135 struct msm_fence_context *fctx = f->fctx; in msm_fence_set_deadline()
139 spin_lock_irqsave(&fctx->spinlock, flags); in msm_fence_set_deadline()
142 if (ktime_after(now, fctx->next_deadline) || in msm_fence_set_deadline()
143 ktime_before(deadline, fctx->next_deadline)) { in msm_fence_set_deadline()
144 fctx->next_deadline = deadline; in msm_fence_set_deadline()
145 fctx->next_deadline_fence = in msm_fence_set_deadline()
146 max(fctx->next_deadline_fence, (uint32_t)fence->seqno); in msm_fence_set_deadline()
156 kthread_queue_work(fctx2gpu(fctx)->worker, in msm_fence_set_deadline()
157 &fctx->deadline_work); in msm_fence_set_deadline()
159 hrtimer_start(&fctx->deadline_timer, deadline, in msm_fence_set_deadline()
164 spin_unlock_irqrestore(&fctx->spinlock, flags); in msm_fence_set_deadline()
181 return ERR_PTR(-ENOMEM); in msm_fence_alloc()
183 return &f->base; in msm_fence_alloc()
191 f->fctx = fctx; in msm_fence_init()
194 * Until this point, the fence was just some pre-allocated memory, in msm_fence_init()
195 * no-one should have taken a reference to it yet. in msm_fence_init()
197 WARN_ON(kref_read(&fence->refcount)); in msm_fence_init()
199 dma_fence_init(&f->base, &msm_fence_ops, &fctx->spinlock, in msm_fence_init()
200 fctx->context, ++fctx->last_fence); in msm_fence_init()