xref: /linux/drivers/gpu/drm/drm_flip_work.c (revision 260f6f4fda93c8485c8037865c941b42b9cba5d2)
1 /*
2  * Copyright (C) 2013 Red Hat
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  */
23 
24 #include <linux/export.h>
25 #include <linux/slab.h>
26 
27 #include <drm/drm_flip_work.h>
28 #include <drm/drm_print.h>
29 #include <drm/drm_util.h>
30 
31 struct drm_flip_task {
32 	struct list_head node;
33 	void *data;
34 };
35 
drm_flip_work_allocate_task(void * data,gfp_t flags)36 static struct drm_flip_task *drm_flip_work_allocate_task(void *data, gfp_t flags)
37 {
38 	struct drm_flip_task *task;
39 
40 	task = kzalloc(sizeof(*task), flags);
41 	if (task)
42 		task->data = data;
43 
44 	return task;
45 }
46 
drm_flip_work_queue_task(struct drm_flip_work * work,struct drm_flip_task * task)47 static void drm_flip_work_queue_task(struct drm_flip_work *work, struct drm_flip_task *task)
48 {
49 	unsigned long flags;
50 
51 	spin_lock_irqsave(&work->lock, flags);
52 	list_add_tail(&task->node, &work->queued);
53 	spin_unlock_irqrestore(&work->lock, flags);
54 }
55 
56 /**
57  * drm_flip_work_queue - queue work
58  * @work: the flip-work
59  * @val: the value to queue
60  *
61  * Queues work, that will later be run (passed back to drm_flip_func_t
62  * func) on a work queue after drm_flip_work_commit() is called.
63  */
drm_flip_work_queue(struct drm_flip_work * work,void * val)64 void drm_flip_work_queue(struct drm_flip_work *work, void *val)
65 {
66 	struct drm_flip_task *task;
67 
68 	task = drm_flip_work_allocate_task(val,
69 				drm_can_sleep() ? GFP_KERNEL : GFP_ATOMIC);
70 	if (task) {
71 		drm_flip_work_queue_task(work, task);
72 	} else {
73 		DRM_ERROR("%s could not allocate task!\n", work->name);
74 		work->func(work, val);
75 	}
76 }
77 EXPORT_SYMBOL(drm_flip_work_queue);
78 
79 /**
80  * drm_flip_work_commit - commit queued work
81  * @work: the flip-work
82  * @wq: the work-queue to run the queued work on
83  *
84  * Trigger work previously queued by drm_flip_work_queue() to run
85  * on a workqueue.  The typical usage would be to queue work (via
86  * drm_flip_work_queue()) at any point (from vblank irq and/or
87  * prior), and then from vblank irq commit the queued work.
88  */
drm_flip_work_commit(struct drm_flip_work * work,struct workqueue_struct * wq)89 void drm_flip_work_commit(struct drm_flip_work *work,
90 		struct workqueue_struct *wq)
91 {
92 	unsigned long flags;
93 
94 	spin_lock_irqsave(&work->lock, flags);
95 	list_splice_tail(&work->queued, &work->commited);
96 	INIT_LIST_HEAD(&work->queued);
97 	spin_unlock_irqrestore(&work->lock, flags);
98 	queue_work(wq, &work->worker);
99 }
100 EXPORT_SYMBOL(drm_flip_work_commit);
101 
flip_worker(struct work_struct * w)102 static void flip_worker(struct work_struct *w)
103 {
104 	struct drm_flip_work *work = container_of(w, struct drm_flip_work, worker);
105 	struct list_head tasks;
106 	unsigned long flags;
107 
108 	while (1) {
109 		struct drm_flip_task *task, *tmp;
110 
111 		INIT_LIST_HEAD(&tasks);
112 		spin_lock_irqsave(&work->lock, flags);
113 		list_splice_tail(&work->commited, &tasks);
114 		INIT_LIST_HEAD(&work->commited);
115 		spin_unlock_irqrestore(&work->lock, flags);
116 
117 		if (list_empty(&tasks))
118 			break;
119 
120 		list_for_each_entry_safe(task, tmp, &tasks, node) {
121 			work->func(work, task->data);
122 			kfree(task);
123 		}
124 	}
125 }
126 
127 /**
128  * drm_flip_work_init - initialize flip-work
129  * @work: the flip-work to initialize
130  * @name: debug name
131  * @func: the callback work function
132  *
133  * Initializes/allocates resources for the flip-work
134  */
drm_flip_work_init(struct drm_flip_work * work,const char * name,drm_flip_func_t func)135 void drm_flip_work_init(struct drm_flip_work *work,
136 		const char *name, drm_flip_func_t func)
137 {
138 	work->name = name;
139 	INIT_LIST_HEAD(&work->queued);
140 	INIT_LIST_HEAD(&work->commited);
141 	spin_lock_init(&work->lock);
142 	work->func = func;
143 
144 	INIT_WORK(&work->worker, flip_worker);
145 }
146 EXPORT_SYMBOL(drm_flip_work_init);
147 
148 /**
149  * drm_flip_work_cleanup - cleans up flip-work
150  * @work: the flip-work to cleanup
151  *
152  * Destroy resources allocated for the flip-work
153  */
drm_flip_work_cleanup(struct drm_flip_work * work)154 void drm_flip_work_cleanup(struct drm_flip_work *work)
155 {
156 	WARN_ON(!list_empty(&work->queued) || !list_empty(&work->commited));
157 }
158 EXPORT_SYMBOL(drm_flip_work_cleanup);
159