xref: /linux/drivers/gpu/drm/drm_vblank_work.c (revision 260f6f4fda93c8485c8037865c941b42b9cba5d2) !
1 // SPDX-License-Identifier: MIT
2 
3 #include <uapi/linux/sched/types.h>
4 
5 #include <linux/export.h>
6 
7 #include <drm/drm_print.h>
8 #include <drm/drm_vblank.h>
9 #include <drm/drm_vblank_work.h>
10 #include <drm/drm_crtc.h>
11 
12 #include "drm_internal.h"
13 
14 /**
15  * DOC: vblank works
16  *
17  * Many DRM drivers need to program hardware in a time-sensitive manner, many
18  * times with a deadline of starting and finishing within a certain region of
19  * the scanout. Most of the time the safest way to accomplish this is to
20  * simply do said time-sensitive programming in the driver's IRQ handler,
21  * which allows drivers to avoid being preempted during these critical
22  * regions. Or even better, the hardware may even handle applying such
23  * time-critical programming independently of the CPU.
24  *
25  * While there's a decent amount of hardware that's designed so that the CPU
26  * doesn't need to be concerned with extremely time-sensitive programming,
27  * there's a few situations where it can't be helped. Some unforgiving
28  * hardware may require that certain time-sensitive programming be handled
29  * completely by the CPU, and said programming may even take too long to
30  * handle in an IRQ handler. Another such situation would be where the driver
31  * needs to perform a task that needs to complete within a specific scanout
32  * period, but might possibly block and thus cannot be handled in an IRQ
33  * context. Both of these situations can't be solved perfectly in Linux since
34  * we're not a realtime kernel, and thus the scheduler may cause us to miss
35  * our deadline if it decides to preempt us. But for some drivers, it's good
36  * enough if we can lower our chance of being preempted to an absolute
37  * minimum.
38  *
39  * This is where &drm_vblank_work comes in. &drm_vblank_work provides a simple
40  * generic delayed work implementation which delays work execution until a
41  * particular vblank has passed, and then executes the work at realtime
42  * priority. This provides the best possible chance at performing
43  * time-sensitive hardware programming on time, even when the system is under
44  * heavy load. &drm_vblank_work also supports rescheduling, so that self
45  * re-arming work items can be easily implemented.
46  */
47 
drm_handle_vblank_works(struct drm_vblank_crtc * vblank)48 void drm_handle_vblank_works(struct drm_vblank_crtc *vblank)
49 {
50 	struct drm_vblank_work *work, *next;
51 	u64 count = atomic64_read(&vblank->count);
52 	bool wake = false;
53 
54 	assert_spin_locked(&vblank->dev->event_lock);
55 
56 	list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
57 		if (!drm_vblank_passed(count, work->count))
58 			continue;
59 
60 		list_del_init(&work->node);
61 		drm_vblank_put(vblank->dev, vblank->pipe);
62 		kthread_queue_work(vblank->worker, &work->base);
63 		wake = true;
64 	}
65 	if (wake)
66 		wake_up_all(&vblank->work_wait_queue);
67 }
68 
69 /* Handle cancelling any pending vblank work items and drop respective vblank
70  * references in response to vblank interrupts being disabled.
71  */
drm_vblank_cancel_pending_works(struct drm_vblank_crtc * vblank)72 void drm_vblank_cancel_pending_works(struct drm_vblank_crtc *vblank)
73 {
74 	struct drm_vblank_work *work, *next;
75 
76 	assert_spin_locked(&vblank->dev->event_lock);
77 
78 	drm_WARN_ONCE(vblank->dev, !list_empty(&vblank->pending_work),
79 		      "Cancelling pending vblank works!\n");
80 
81 	list_for_each_entry_safe(work, next, &vblank->pending_work, node) {
82 		list_del_init(&work->node);
83 		drm_vblank_put(vblank->dev, vblank->pipe);
84 	}
85 
86 	wake_up_all(&vblank->work_wait_queue);
87 }
88 
89 /**
90  * drm_vblank_work_schedule - schedule a vblank work
91  * @work: vblank work to schedule
92  * @count: target vblank count
93  * @nextonmiss: defer until the next vblank if target vblank was missed
94  *
95  * Schedule @work for execution once the crtc vblank count reaches @count.
96  *
97  * If the crtc vblank count has already reached @count and @nextonmiss is
98  * %false the work starts to execute immediately.
99  *
100  * If the crtc vblank count has already reached @count and @nextonmiss is
101  * %true the work is deferred until the next vblank (as if @count has been
102  * specified as crtc vblank count + 1).
103  *
104  * If @work is already scheduled, this function will reschedule said work
105  * using the new @count. This can be used for self-rearming work items.
106  *
107  * Returns:
108  * %1 if @work was successfully (re)scheduled, %0 if it was either already
109  * scheduled or cancelled, or a negative error code on failure.
110  */
drm_vblank_work_schedule(struct drm_vblank_work * work,u64 count,bool nextonmiss)111 int drm_vblank_work_schedule(struct drm_vblank_work *work,
112 			     u64 count, bool nextonmiss)
113 {
114 	struct drm_vblank_crtc *vblank = work->vblank;
115 	struct drm_device *dev = vblank->dev;
116 	u64 cur_vbl;
117 	unsigned long irqflags;
118 	bool passed, inmodeset, rescheduling = false, wake = false;
119 	int ret = 0;
120 
121 	spin_lock_irqsave(&dev->event_lock, irqflags);
122 	if (work->cancelling)
123 		goto out;
124 
125 	spin_lock(&dev->vbl_lock);
126 	inmodeset = vblank->inmodeset;
127 	spin_unlock(&dev->vbl_lock);
128 	if (inmodeset)
129 		goto out;
130 
131 	if (list_empty(&work->node)) {
132 		ret = drm_vblank_get(dev, vblank->pipe);
133 		if (ret < 0)
134 			goto out;
135 	} else if (work->count == count) {
136 		/* Already scheduled w/ same vbl count */
137 		goto out;
138 	} else {
139 		rescheduling = true;
140 	}
141 
142 	work->count = count;
143 	cur_vbl = drm_vblank_count(dev, vblank->pipe);
144 	passed = drm_vblank_passed(cur_vbl, count);
145 	if (passed)
146 		drm_dbg_core(dev,
147 			     "crtc %d vblank %llu already passed (current %llu)\n",
148 			     vblank->pipe, count, cur_vbl);
149 
150 	if (!nextonmiss && passed) {
151 		drm_vblank_put(dev, vblank->pipe);
152 		ret = kthread_queue_work(vblank->worker, &work->base);
153 
154 		if (rescheduling) {
155 			list_del_init(&work->node);
156 			wake = true;
157 		}
158 	} else {
159 		if (!rescheduling)
160 			list_add_tail(&work->node, &vblank->pending_work);
161 		ret = true;
162 	}
163 
164 out:
165 	spin_unlock_irqrestore(&dev->event_lock, irqflags);
166 	if (wake)
167 		wake_up_all(&vblank->work_wait_queue);
168 	return ret;
169 }
170 EXPORT_SYMBOL(drm_vblank_work_schedule);
171 
172 /**
173  * drm_vblank_work_cancel_sync - cancel a vblank work and wait for it to
174  * finish executing
175  * @work: vblank work to cancel
176  *
177  * Cancel an already scheduled vblank work and wait for its
178  * execution to finish.
179  *
180  * On return, @work is guaranteed to no longer be scheduled or running, even
181  * if it's self-arming.
182  *
183  * Returns:
184  * %True if the work was cancelled before it started to execute, %false
185  * otherwise.
186  */
drm_vblank_work_cancel_sync(struct drm_vblank_work * work)187 bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work)
188 {
189 	struct drm_vblank_crtc *vblank = work->vblank;
190 	struct drm_device *dev = vblank->dev;
191 	bool ret = false;
192 
193 	spin_lock_irq(&dev->event_lock);
194 	if (!list_empty(&work->node)) {
195 		list_del_init(&work->node);
196 		drm_vblank_put(vblank->dev, vblank->pipe);
197 		ret = true;
198 	}
199 
200 	work->cancelling++;
201 	spin_unlock_irq(&dev->event_lock);
202 
203 	wake_up_all(&vblank->work_wait_queue);
204 
205 	if (kthread_cancel_work_sync(&work->base))
206 		ret = true;
207 
208 	spin_lock_irq(&dev->event_lock);
209 	work->cancelling--;
210 	spin_unlock_irq(&dev->event_lock);
211 
212 	return ret;
213 }
214 EXPORT_SYMBOL(drm_vblank_work_cancel_sync);
215 
216 /**
217  * drm_vblank_work_flush - wait for a scheduled vblank work to finish
218  * executing
219  * @work: vblank work to flush
220  *
221  * Wait until @work has finished executing once.
222  */
drm_vblank_work_flush(struct drm_vblank_work * work)223 void drm_vblank_work_flush(struct drm_vblank_work *work)
224 {
225 	struct drm_vblank_crtc *vblank = work->vblank;
226 	struct drm_device *dev = vblank->dev;
227 
228 	spin_lock_irq(&dev->event_lock);
229 	wait_event_lock_irq(vblank->work_wait_queue, list_empty(&work->node),
230 			    dev->event_lock);
231 	spin_unlock_irq(&dev->event_lock);
232 
233 	kthread_flush_work(&work->base);
234 }
235 EXPORT_SYMBOL(drm_vblank_work_flush);
236 
237 /**
238  * drm_vblank_work_flush_all - flush all currently pending vblank work on crtc.
239  * @crtc: crtc for which vblank work to flush
240  *
241  * Wait until all currently queued vblank work on @crtc
242  * has finished executing once.
243  */
drm_vblank_work_flush_all(struct drm_crtc * crtc)244 void drm_vblank_work_flush_all(struct drm_crtc *crtc)
245 {
246 	struct drm_device *dev = crtc->dev;
247 	struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(crtc)];
248 
249 	spin_lock_irq(&dev->event_lock);
250 	wait_event_lock_irq(vblank->work_wait_queue,
251 			    list_empty(&vblank->pending_work),
252 			    dev->event_lock);
253 	spin_unlock_irq(&dev->event_lock);
254 
255 	kthread_flush_worker(vblank->worker);
256 }
257 EXPORT_SYMBOL(drm_vblank_work_flush_all);
258 
259 /**
260  * drm_vblank_work_init - initialize a vblank work item
261  * @work: vblank work item
262  * @crtc: CRTC whose vblank will trigger the work execution
263  * @func: work function to be executed
264  *
265  * Initialize a vblank work item for a specific crtc.
266  */
drm_vblank_work_init(struct drm_vblank_work * work,struct drm_crtc * crtc,void (* func)(struct kthread_work * work))267 void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc,
268 			  void (*func)(struct kthread_work *work))
269 {
270 	kthread_init_work(&work->base, func);
271 	INIT_LIST_HEAD(&work->node);
272 	work->vblank = drm_crtc_vblank_crtc(crtc);
273 }
274 EXPORT_SYMBOL(drm_vblank_work_init);
275 
drm_vblank_worker_init(struct drm_vblank_crtc * vblank)276 int drm_vblank_worker_init(struct drm_vblank_crtc *vblank)
277 {
278 	struct kthread_worker *worker;
279 
280 	INIT_LIST_HEAD(&vblank->pending_work);
281 	init_waitqueue_head(&vblank->work_wait_queue);
282 	worker = kthread_run_worker(0, "card%d-crtc%d",
283 				       vblank->dev->primary->index,
284 				       vblank->pipe);
285 	if (IS_ERR(worker))
286 		return PTR_ERR(worker);
287 
288 	vblank->worker = worker;
289 
290 	sched_set_fifo(worker->task);
291 	return 0;
292 }
293