1 // SPDX-License-Identifier: GPL-2.0+
2 
3 #include <linux/dma-fence.h>
4 
5 #include <drm/drm_atomic.h>
6 #include <drm/drm_atomic_helper.h>
7 #include <drm/drm_managed.h>
8 #include <drm/drm_probe_helper.h>
9 #include <drm/drm_vblank.h>
10 
11 #include "vkms_drv.h"
12 
vkms_vblank_simulate(struct hrtimer * timer)13 static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
14 {
15 	struct vkms_output *output = container_of(timer, struct vkms_output,
16 						  vblank_hrtimer);
17 	struct drm_crtc *crtc = &output->crtc;
18 	struct vkms_crtc_state *state;
19 	u64 ret_overrun;
20 	bool ret, fence_cookie;
21 
22 	fence_cookie = dma_fence_begin_signalling();
23 
24 	ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
25 					  output->period_ns);
26 	if (ret_overrun != 1)
27 		pr_warn("%s: vblank timer overrun\n", __func__);
28 
29 	spin_lock(&output->lock);
30 	ret = drm_crtc_handle_vblank(crtc);
31 	if (!ret)
32 		DRM_ERROR("vkms failure on handling vblank");
33 
34 	state = output->composer_state;
35 	spin_unlock(&output->lock);
36 
37 	if (state && output->composer_enabled) {
38 		u64 frame = drm_crtc_accurate_vblank_count(crtc);
39 
40 		/* update frame_start only if a queued vkms_composer_worker()
41 		 * has read the data
42 		 */
43 		spin_lock(&output->composer_lock);
44 		if (!state->crc_pending)
45 			state->frame_start = frame;
46 		else
47 			DRM_DEBUG_DRIVER("crc worker falling behind, frame_start: %llu, frame_end: %llu\n",
48 					 state->frame_start, frame);
49 		state->frame_end = frame;
50 		state->crc_pending = true;
51 		spin_unlock(&output->composer_lock);
52 
53 		ret = queue_work(output->composer_workq, &state->composer_work);
54 		if (!ret)
55 			DRM_DEBUG_DRIVER("Composer worker already queued\n");
56 	}
57 
58 	dma_fence_end_signalling(fence_cookie);
59 
60 	return HRTIMER_RESTART;
61 }
62 
vkms_enable_vblank(struct drm_crtc * crtc)63 static int vkms_enable_vblank(struct drm_crtc *crtc)
64 {
65 	struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
66 	struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
67 
68 	hrtimer_setup(&out->vblank_hrtimer, &vkms_vblank_simulate, CLOCK_MONOTONIC,
69 		      HRTIMER_MODE_REL);
70 	out->period_ns = ktime_set(0, vblank->framedur_ns);
71 	hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL);
72 
73 	return 0;
74 }
75 
vkms_disable_vblank(struct drm_crtc * crtc)76 static void vkms_disable_vblank(struct drm_crtc *crtc)
77 {
78 	struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
79 
80 	hrtimer_cancel(&out->vblank_hrtimer);
81 }
82 
vkms_get_vblank_timestamp(struct drm_crtc * crtc,int * max_error,ktime_t * vblank_time,bool in_vblank_irq)83 static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
84 				      int *max_error, ktime_t *vblank_time,
85 				      bool in_vblank_irq)
86 {
87 	struct vkms_output *output = drm_crtc_to_vkms_output(crtc);
88 	struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc);
89 
90 	if (!READ_ONCE(vblank->enabled)) {
91 		*vblank_time = ktime_get();
92 		return true;
93 	}
94 
95 	*vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires);
96 
97 	if (WARN_ON(*vblank_time == vblank->time))
98 		return true;
99 
100 	/*
101 	 * To prevent races we roll the hrtimer forward before we do any
102 	 * interrupt processing - this is how real hw works (the interrupt is
103 	 * only generated after all the vblank registers are updated) and what
104 	 * the vblank core expects. Therefore we need to always correct the
105 	 * timestampe by one frame.
106 	 */
107 	*vblank_time -= output->period_ns;
108 
109 	return true;
110 }
111 
112 static struct drm_crtc_state *
vkms_atomic_crtc_duplicate_state(struct drm_crtc * crtc)113 vkms_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
114 {
115 	struct vkms_crtc_state *vkms_state;
116 
117 	if (WARN_ON(!crtc->state))
118 		return NULL;
119 
120 	vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
121 	if (!vkms_state)
122 		return NULL;
123 
124 	__drm_atomic_helper_crtc_duplicate_state(crtc, &vkms_state->base);
125 
126 	INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
127 
128 	return &vkms_state->base;
129 }
130 
vkms_atomic_crtc_destroy_state(struct drm_crtc * crtc,struct drm_crtc_state * state)131 static void vkms_atomic_crtc_destroy_state(struct drm_crtc *crtc,
132 					   struct drm_crtc_state *state)
133 {
134 	struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state);
135 
136 	__drm_atomic_helper_crtc_destroy_state(state);
137 
138 	WARN_ON(work_pending(&vkms_state->composer_work));
139 	kfree(vkms_state->active_planes);
140 	kfree(vkms_state);
141 }
142 
vkms_atomic_crtc_reset(struct drm_crtc * crtc)143 static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
144 {
145 	struct vkms_crtc_state *vkms_state =
146 		kzalloc(sizeof(*vkms_state), GFP_KERNEL);
147 
148 	if (crtc->state)
149 		vkms_atomic_crtc_destroy_state(crtc, crtc->state);
150 
151 	__drm_atomic_helper_crtc_reset(crtc, &vkms_state->base);
152 	if (vkms_state)
153 		INIT_WORK(&vkms_state->composer_work, vkms_composer_worker);
154 }
155 
156 static const struct drm_crtc_funcs vkms_crtc_funcs = {
157 	.set_config             = drm_atomic_helper_set_config,
158 	.page_flip              = drm_atomic_helper_page_flip,
159 	.reset                  = vkms_atomic_crtc_reset,
160 	.atomic_duplicate_state = vkms_atomic_crtc_duplicate_state,
161 	.atomic_destroy_state   = vkms_atomic_crtc_destroy_state,
162 	.enable_vblank		= vkms_enable_vblank,
163 	.disable_vblank		= vkms_disable_vblank,
164 	.get_vblank_timestamp	= vkms_get_vblank_timestamp,
165 	.get_crc_sources	= vkms_get_crc_sources,
166 	.set_crc_source		= vkms_set_crc_source,
167 	.verify_crc_source	= vkms_verify_crc_source,
168 };
169 
vkms_crtc_atomic_check(struct drm_crtc * crtc,struct drm_atomic_state * state)170 static int vkms_crtc_atomic_check(struct drm_crtc *crtc,
171 				  struct drm_atomic_state *state)
172 {
173 	struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state,
174 									  crtc);
175 	struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(crtc_state);
176 	struct drm_plane *plane;
177 	struct drm_plane_state *plane_state;
178 	int i = 0, ret;
179 
180 	if (vkms_state->active_planes)
181 		return 0;
182 
183 	ret = drm_atomic_add_affected_planes(crtc_state->state, crtc);
184 	if (ret < 0)
185 		return ret;
186 
187 	drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
188 		plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane);
189 		WARN_ON(!plane_state);
190 
191 		if (!plane_state->visible)
192 			continue;
193 
194 		i++;
195 	}
196 
197 	vkms_state->active_planes = kcalloc(i, sizeof(plane), GFP_KERNEL);
198 	if (!vkms_state->active_planes)
199 		return -ENOMEM;
200 	vkms_state->num_active_planes = i;
201 
202 	i = 0;
203 	drm_for_each_plane_mask(plane, crtc->dev, crtc_state->plane_mask) {
204 		plane_state = drm_atomic_get_existing_plane_state(crtc_state->state, plane);
205 
206 		if (!plane_state->visible)
207 			continue;
208 
209 		vkms_state->active_planes[i++] =
210 			to_vkms_plane_state(plane_state);
211 	}
212 
213 	return 0;
214 }
215 
vkms_crtc_atomic_enable(struct drm_crtc * crtc,struct drm_atomic_state * state)216 static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
217 				    struct drm_atomic_state *state)
218 {
219 	drm_crtc_vblank_on(crtc);
220 }
221 
vkms_crtc_atomic_disable(struct drm_crtc * crtc,struct drm_atomic_state * state)222 static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
223 				     struct drm_atomic_state *state)
224 {
225 	drm_crtc_vblank_off(crtc);
226 }
227 
vkms_crtc_atomic_begin(struct drm_crtc * crtc,struct drm_atomic_state * state)228 static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
229 				   struct drm_atomic_state *state)
230 	__acquires(&vkms_output->lock)
231 {
232 	struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
233 
234 	/* This lock is held across the atomic commit to block vblank timer
235 	 * from scheduling vkms_composer_worker until the composer is updated
236 	 */
237 	spin_lock_irq(&vkms_output->lock);
238 }
239 
vkms_crtc_atomic_flush(struct drm_crtc * crtc,struct drm_atomic_state * state)240 static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
241 				   struct drm_atomic_state *state)
242 	__releases(&vkms_output->lock)
243 {
244 	struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
245 
246 	if (crtc->state->event) {
247 		spin_lock(&crtc->dev->event_lock);
248 
249 		if (drm_crtc_vblank_get(crtc) != 0)
250 			drm_crtc_send_vblank_event(crtc, crtc->state->event);
251 		else
252 			drm_crtc_arm_vblank_event(crtc, crtc->state->event);
253 
254 		spin_unlock(&crtc->dev->event_lock);
255 
256 		crtc->state->event = NULL;
257 	}
258 
259 	vkms_output->composer_state = to_vkms_crtc_state(crtc->state);
260 
261 	spin_unlock_irq(&vkms_output->lock);
262 }
263 
264 static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
265 	.atomic_check	= vkms_crtc_atomic_check,
266 	.atomic_begin	= vkms_crtc_atomic_begin,
267 	.atomic_flush	= vkms_crtc_atomic_flush,
268 	.atomic_enable	= vkms_crtc_atomic_enable,
269 	.atomic_disable	= vkms_crtc_atomic_disable,
270 };
271 
vkms_crtc_init(struct drm_device * dev,struct drm_plane * primary,struct drm_plane * cursor)272 struct vkms_output *vkms_crtc_init(struct drm_device *dev, struct drm_plane *primary,
273 				   struct drm_plane *cursor)
274 {
275 	struct vkms_output *vkms_out;
276 	struct drm_crtc *crtc;
277 	int ret;
278 
279 	vkms_out = drmm_crtc_alloc_with_planes(dev, struct vkms_output, crtc,
280 					       primary, cursor,
281 					       &vkms_crtc_funcs, NULL);
282 	if (IS_ERR(vkms_out)) {
283 		DRM_DEV_ERROR(dev->dev, "Failed to init CRTC\n");
284 		return vkms_out;
285 	}
286 
287 	crtc = &vkms_out->crtc;
288 
289 	drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
290 
291 	ret = drm_mode_crtc_set_gamma_size(crtc, VKMS_LUT_SIZE);
292 	if (ret) {
293 		DRM_ERROR("Failed to set gamma size\n");
294 		return ERR_PTR(ret);
295 	}
296 
297 	drm_crtc_enable_color_mgmt(crtc, 0, false, VKMS_LUT_SIZE);
298 
299 	spin_lock_init(&vkms_out->lock);
300 	spin_lock_init(&vkms_out->composer_lock);
301 
302 	vkms_out->composer_workq = drmm_alloc_ordered_workqueue(dev, "vkms_composer", 0);
303 	if (IS_ERR(vkms_out->composer_workq))
304 		return ERR_CAST(vkms_out->composer_workq);
305 	if (!vkms_out->composer_workq)
306 		return ERR_PTR(-ENOMEM);
307 
308 	return vkms_out;
309 }
310