xref: /linux/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c (revision 260f6f4fda93c8485c8037865c941b42b9cba5d2)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <drm/drm_crtc.h>
8 #include <drm/drm_flip_work.h>
9 #include <drm/drm_managed.h>
10 #include <drm/drm_mode.h>
11 #include <drm/drm_probe_helper.h>
12 #include <drm/drm_vblank.h>
13 
14 #include "mdp4_kms.h"
15 #include "msm_gem.h"
16 
17 struct mdp4_crtc {
18 	struct drm_crtc base;
19 	char name[8];
20 	int ovlp;
21 	enum mdp4_dma dma;
22 	bool enabled;
23 
24 	/* which mixer/encoder we route output to: */
25 	int mixer;
26 
27 	struct {
28 		spinlock_t lock;
29 		bool stale;
30 		uint32_t width, height;
31 		uint32_t x, y;
32 
33 		/* next cursor to scan-out: */
34 		uint32_t next_iova;
35 		struct drm_gem_object *next_bo;
36 
37 		/* current cursor being scanned out: */
38 		struct drm_gem_object *scanout_bo;
39 	} cursor;
40 
41 
42 	/* if there is a pending flip, these will be non-null: */
43 	struct drm_pending_vblank_event *event;
44 
45 	/* Bits have been flushed at the last commit,
46 	 * used to decide if a vsync has happened since last commit.
47 	 */
48 	u32 flushed_mask;
49 
50 #define PENDING_CURSOR 0x1
51 #define PENDING_FLIP   0x2
52 	atomic_t pending;
53 
54 	/* for unref'ing cursor bo's after scanout completes: */
55 	struct drm_flip_work unref_cursor_work;
56 
57 	struct mdp_irq vblank;
58 	struct mdp_irq err;
59 };
60 #define to_mdp4_crtc(x) container_of(x, struct mdp4_crtc, base)
61 
get_kms(struct drm_crtc * crtc)62 static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
63 {
64 	struct msm_drm_private *priv = crtc->dev->dev_private;
65 	return to_mdp4_kms(to_mdp_kms(priv->kms));
66 }
67 
request_pending(struct drm_crtc * crtc,uint32_t pending)68 static void request_pending(struct drm_crtc *crtc, uint32_t pending)
69 {
70 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
71 
72 	atomic_or(pending, &mdp4_crtc->pending);
73 	mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
74 }
75 
crtc_flush(struct drm_crtc * crtc)76 static void crtc_flush(struct drm_crtc *crtc)
77 {
78 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
79 	struct mdp4_kms *mdp4_kms = get_kms(crtc);
80 	struct drm_plane *plane;
81 	uint32_t flush = 0;
82 
83 	drm_atomic_crtc_for_each_plane(plane, crtc) {
84 		enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
85 		flush |= pipe2flush(pipe_id);
86 	}
87 
88 	flush |= ovlp2flush(mdp4_crtc->ovlp);
89 
90 	DBG("%s: flush=%08x", mdp4_crtc->name, flush);
91 
92 	mdp4_crtc->flushed_mask = flush;
93 
94 	mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
95 }
96 
97 /* if file!=NULL, this is preclose potential cancel-flip path */
complete_flip(struct drm_crtc * crtc,struct drm_file * file)98 static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
99 {
100 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
101 	struct drm_device *dev = crtc->dev;
102 	struct drm_pending_vblank_event *event;
103 	unsigned long flags;
104 
105 	spin_lock_irqsave(&dev->event_lock, flags);
106 	event = mdp4_crtc->event;
107 	if (event) {
108 		mdp4_crtc->event = NULL;
109 		DBG("%s: send event: %p", mdp4_crtc->name, event);
110 		drm_crtc_send_vblank_event(crtc, event);
111 	}
112 	spin_unlock_irqrestore(&dev->event_lock, flags);
113 }
114 
unref_cursor_worker(struct drm_flip_work * work,void * val)115 static void unref_cursor_worker(struct drm_flip_work *work, void *val)
116 {
117 	struct mdp4_crtc *mdp4_crtc =
118 		container_of(work, struct mdp4_crtc, unref_cursor_work);
119 	struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
120 	struct msm_kms *kms = &mdp4_kms->base.base;
121 
122 	msm_gem_unpin_iova(val, kms->vm);
123 	drm_gem_object_put(val);
124 }
125 
126 /* statically (for now) map planes to mixer stage (z-order): */
127 static const int idxs[] = {
128 		[VG1]  = 1,
129 		[VG2]  = 2,
130 		[RGB1] = 0,
131 		[RGB2] = 0,
132 		[RGB3] = 0,
133 		[VG3]  = 3,
134 		[VG4]  = 4,
135 
136 };
137 
138 /* setup mixer config, for which we need to consider all crtc's and
139  * the planes attached to them
140  *
141  * TODO may possibly need some extra locking here
142  */
setup_mixer(struct mdp4_kms * mdp4_kms)143 static void setup_mixer(struct mdp4_kms *mdp4_kms)
144 {
145 	struct drm_mode_config *config = &mdp4_kms->dev->mode_config;
146 	struct drm_crtc *crtc;
147 	uint32_t mixer_cfg = 0;
148 	static const enum mdp_mixer_stage_id stages[] = {
149 			STAGE_BASE, STAGE0, STAGE1, STAGE2, STAGE3,
150 	};
151 
152 	list_for_each_entry(crtc, &config->crtc_list, head) {
153 		struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
154 		struct drm_plane *plane;
155 
156 		drm_atomic_crtc_for_each_plane(plane, crtc) {
157 			enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
158 			int idx = idxs[pipe_id];
159 			mixer_cfg = mixercfg(mixer_cfg, mdp4_crtc->mixer,
160 					pipe_id, stages[idx]);
161 		}
162 	}
163 
164 	mdp4_write(mdp4_kms, REG_MDP4_LAYERMIXER_IN_CFG, mixer_cfg);
165 }
166 
blend_setup(struct drm_crtc * crtc)167 static void blend_setup(struct drm_crtc *crtc)
168 {
169 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
170 	struct mdp4_kms *mdp4_kms = get_kms(crtc);
171 	struct drm_plane *plane;
172 	int i, ovlp = mdp4_crtc->ovlp;
173 	bool alpha[4]= { false, false, false, false };
174 
175 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW0(ovlp), 0);
176 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_LOW1(ovlp), 0);
177 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH0(ovlp), 0);
178 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_TRANSP_HIGH1(ovlp), 0);
179 
180 	drm_atomic_crtc_for_each_plane(plane, crtc) {
181 		enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
182 		int idx = idxs[pipe_id];
183 		if (idx > 0) {
184 			const struct msm_format *format =
185 					msm_framebuffer_format(plane->state->fb);
186 			alpha[idx-1] = format->alpha_enable;
187 		}
188 	}
189 
190 	for (i = 0; i < 4; i++) {
191 		uint32_t op;
192 
193 		if (alpha[i]) {
194 			op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_PIXEL) |
195 					MDP4_OVLP_STAGE_OP_BG_ALPHA(FG_PIXEL) |
196 					MDP4_OVLP_STAGE_OP_BG_INV_ALPHA;
197 		} else {
198 			op = MDP4_OVLP_STAGE_OP_FG_ALPHA(FG_CONST) |
199 					MDP4_OVLP_STAGE_OP_BG_ALPHA(BG_CONST);
200 		}
201 
202 		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_FG_ALPHA(ovlp, i), 0xff);
203 		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_BG_ALPHA(ovlp, i), 0x00);
204 		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_OP(ovlp, i), op);
205 		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_CO3(ovlp, i), 1);
206 		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW0(ovlp, i), 0);
207 		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_LOW1(ovlp, i), 0);
208 		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH0(ovlp, i), 0);
209 		mdp4_write(mdp4_kms, REG_MDP4_OVLP_STAGE_TRANSP_HIGH1(ovlp, i), 0);
210 	}
211 
212 	setup_mixer(mdp4_kms);
213 }
214 
mdp4_crtc_mode_set_nofb(struct drm_crtc * crtc)215 static void mdp4_crtc_mode_set_nofb(struct drm_crtc *crtc)
216 {
217 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
218 	struct mdp4_kms *mdp4_kms = get_kms(crtc);
219 	enum mdp4_dma dma = mdp4_crtc->dma;
220 	int ovlp = mdp4_crtc->ovlp;
221 	struct drm_display_mode *mode;
222 
223 	if (WARN_ON(!crtc->state))
224 		return;
225 
226 	mode = &crtc->state->adjusted_mode;
227 
228 	DBG("%s: set mode: " DRM_MODE_FMT,
229 			mdp4_crtc->name, DRM_MODE_ARG(mode));
230 
231 	mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
232 			MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
233 			MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
234 
235 	/* take data from pipe: */
236 	mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_BASE(dma), 0);
237 	mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_STRIDE(dma), 0);
238 	mdp4_write(mdp4_kms, REG_MDP4_DMA_DST_SIZE(dma),
239 			MDP4_DMA_DST_SIZE_WIDTH(0) |
240 			MDP4_DMA_DST_SIZE_HEIGHT(0));
241 
242 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_BASE(ovlp), 0);
243 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_SIZE(ovlp),
244 			MDP4_OVLP_SIZE_WIDTH(mode->hdisplay) |
245 			MDP4_OVLP_SIZE_HEIGHT(mode->vdisplay));
246 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_STRIDE(ovlp), 0);
247 
248 	mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
249 
250 	if (dma == DMA_E) {
251 		mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
252 		mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
253 		mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
254 	}
255 }
256 
mdp4_crtc_atomic_disable(struct drm_crtc * crtc,struct drm_atomic_state * state)257 static void mdp4_crtc_atomic_disable(struct drm_crtc *crtc,
258 				     struct drm_atomic_state *state)
259 {
260 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
261 	struct mdp4_kms *mdp4_kms = get_kms(crtc);
262 	unsigned long flags;
263 
264 	DBG("%s", mdp4_crtc->name);
265 
266 	if (WARN_ON(!mdp4_crtc->enabled))
267 		return;
268 
269 	/* Disable/save vblank irq handling before power is disabled */
270 	drm_crtc_vblank_off(crtc);
271 
272 	mdp_irq_unregister(&mdp4_kms->base, &mdp4_crtc->err);
273 	mdp4_disable(mdp4_kms);
274 
275 	if (crtc->state->event && !crtc->state->active) {
276 		WARN_ON(mdp4_crtc->event);
277 		spin_lock_irqsave(&mdp4_kms->dev->event_lock, flags);
278 		drm_crtc_send_vblank_event(crtc, crtc->state->event);
279 		crtc->state->event = NULL;
280 		spin_unlock_irqrestore(&mdp4_kms->dev->event_lock, flags);
281 	}
282 
283 	mdp4_crtc->enabled = false;
284 }
285 
mdp4_crtc_atomic_enable(struct drm_crtc * crtc,struct drm_atomic_state * state)286 static void mdp4_crtc_atomic_enable(struct drm_crtc *crtc,
287 				    struct drm_atomic_state *state)
288 {
289 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
290 	struct mdp4_kms *mdp4_kms = get_kms(crtc);
291 
292 	DBG("%s", mdp4_crtc->name);
293 
294 	if (WARN_ON(mdp4_crtc->enabled))
295 		return;
296 
297 	mdp4_enable(mdp4_kms);
298 
299 	/* Restore vblank irq handling after power is enabled */
300 	drm_crtc_vblank_on(crtc);
301 
302 	mdp_irq_register(&mdp4_kms->base, &mdp4_crtc->err);
303 
304 	crtc_flush(crtc);
305 
306 	mdp4_crtc->enabled = true;
307 }
308 
mdp4_crtc_atomic_check(struct drm_crtc * crtc,struct drm_atomic_state * state)309 static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
310 		struct drm_atomic_state *state)
311 {
312 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
313 	DBG("%s: check", mdp4_crtc->name);
314 	// TODO anything else to check?
315 	return 0;
316 }
317 
mdp4_crtc_atomic_begin(struct drm_crtc * crtc,struct drm_atomic_state * state)318 static void mdp4_crtc_atomic_begin(struct drm_crtc *crtc,
319 				   struct drm_atomic_state *state)
320 {
321 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
322 	DBG("%s: begin", mdp4_crtc->name);
323 }
324 
mdp4_crtc_atomic_flush(struct drm_crtc * crtc,struct drm_atomic_state * state)325 static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc,
326 				   struct drm_atomic_state *state)
327 {
328 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
329 	struct drm_device *dev = crtc->dev;
330 	unsigned long flags;
331 
332 	DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event);
333 
334 	WARN_ON(mdp4_crtc->event);
335 
336 	spin_lock_irqsave(&dev->event_lock, flags);
337 	mdp4_crtc->event = crtc->state->event;
338 	crtc->state->event = NULL;
339 	spin_unlock_irqrestore(&dev->event_lock, flags);
340 
341 	blend_setup(crtc);
342 	crtc_flush(crtc);
343 	request_pending(crtc, PENDING_FLIP);
344 }
345 
346 #define CURSOR_WIDTH 64
347 #define CURSOR_HEIGHT 64
348 
349 /* called from IRQ to update cursor related registers (if needed).  The
350  * cursor registers, other than x/y position, appear not to be double
351  * buffered, and changing them other than from vblank seems to trigger
352  * underflow.
353  */
update_cursor(struct drm_crtc * crtc)354 static void update_cursor(struct drm_crtc *crtc)
355 {
356 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
357 	struct mdp4_kms *mdp4_kms = get_kms(crtc);
358 	struct msm_kms *kms = &mdp4_kms->base.base;
359 	enum mdp4_dma dma = mdp4_crtc->dma;
360 	unsigned long flags;
361 
362 	spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
363 	if (mdp4_crtc->cursor.stale) {
364 		struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
365 		struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
366 		uint64_t iova = mdp4_crtc->cursor.next_iova;
367 
368 		if (next_bo) {
369 			/* take a obj ref + iova ref when we start scanning out: */
370 			drm_gem_object_get(next_bo);
371 			msm_gem_get_and_pin_iova(next_bo, kms->vm, &iova);
372 
373 			/* enable cursor: */
374 			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
375 					MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
376 					MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
377 			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
378 			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
379 					MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
380 					MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
381 		} else {
382 			/* disable cursor: */
383 			mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
384 					mdp4_kms->blank_cursor_iova);
385 		}
386 
387 		/* and drop the iova ref + obj rev when done scanning out: */
388 		if (prev_bo)
389 			drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo);
390 
391 		mdp4_crtc->cursor.scanout_bo = next_bo;
392 		mdp4_crtc->cursor.stale = false;
393 	}
394 
395 	mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
396 			MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
397 			MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
398 
399 	spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
400 }
401 
mdp4_crtc_cursor_set(struct drm_crtc * crtc,struct drm_file * file_priv,uint32_t handle,uint32_t width,uint32_t height)402 static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
403 		struct drm_file *file_priv, uint32_t handle,
404 		uint32_t width, uint32_t height)
405 {
406 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
407 	struct mdp4_kms *mdp4_kms = get_kms(crtc);
408 	struct msm_kms *kms = &mdp4_kms->base.base;
409 	struct drm_device *dev = crtc->dev;
410 	struct drm_gem_object *cursor_bo, *old_bo;
411 	unsigned long flags;
412 	uint64_t iova;
413 	int ret;
414 
415 	if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
416 		DRM_DEV_ERROR(dev->dev, "bad cursor size: %dx%d\n", width, height);
417 		return -EINVAL;
418 	}
419 
420 	if (handle) {
421 		cursor_bo = drm_gem_object_lookup(file_priv, handle);
422 		if (!cursor_bo)
423 			return -ENOENT;
424 	} else {
425 		cursor_bo = NULL;
426 	}
427 
428 	if (cursor_bo) {
429 		ret = msm_gem_get_and_pin_iova(cursor_bo, kms->vm, &iova);
430 		if (ret)
431 			goto fail;
432 	} else {
433 		iova = 0;
434 	}
435 
436 	spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
437 	old_bo = mdp4_crtc->cursor.next_bo;
438 	mdp4_crtc->cursor.next_bo   = cursor_bo;
439 	mdp4_crtc->cursor.next_iova = iova;
440 	mdp4_crtc->cursor.width     = width;
441 	mdp4_crtc->cursor.height    = height;
442 	mdp4_crtc->cursor.stale     = true;
443 	spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
444 
445 	if (old_bo) {
446 		/* drop our previous reference: */
447 		drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, old_bo);
448 	}
449 
450 	request_pending(crtc, PENDING_CURSOR);
451 
452 	return 0;
453 
454 fail:
455 	drm_gem_object_put(cursor_bo);
456 	return ret;
457 }
458 
mdp4_crtc_cursor_move(struct drm_crtc * crtc,int x,int y)459 static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
460 {
461 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
462 	unsigned long flags;
463 
464 	spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
465 	mdp4_crtc->cursor.x = x;
466 	mdp4_crtc->cursor.y = y;
467 	spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
468 
469 	crtc_flush(crtc);
470 	request_pending(crtc, PENDING_CURSOR);
471 
472 	return 0;
473 }
474 
475 static const struct drm_crtc_funcs mdp4_crtc_funcs = {
476 	.set_config = drm_atomic_helper_set_config,
477 	.page_flip = drm_atomic_helper_page_flip,
478 	.cursor_set = mdp4_crtc_cursor_set,
479 	.cursor_move = mdp4_crtc_cursor_move,
480 	.reset = drm_atomic_helper_crtc_reset,
481 	.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
482 	.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
483 	.enable_vblank  = msm_crtc_enable_vblank,
484 	.disable_vblank = msm_crtc_disable_vblank,
485 };
486 
487 static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
488 	.mode_set_nofb = mdp4_crtc_mode_set_nofb,
489 	.atomic_check = mdp4_crtc_atomic_check,
490 	.atomic_begin = mdp4_crtc_atomic_begin,
491 	.atomic_flush = mdp4_crtc_atomic_flush,
492 	.atomic_enable = mdp4_crtc_atomic_enable,
493 	.atomic_disable = mdp4_crtc_atomic_disable,
494 };
495 
mdp4_crtc_vblank_irq(struct mdp_irq * irq,uint32_t irqstatus)496 static void mdp4_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
497 {
498 	struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, vblank);
499 	struct drm_crtc *crtc = &mdp4_crtc->base;
500 	struct msm_drm_private *priv = crtc->dev->dev_private;
501 	unsigned pending;
502 
503 	mdp_irq_unregister(&get_kms(crtc)->base, &mdp4_crtc->vblank);
504 
505 	pending = atomic_xchg(&mdp4_crtc->pending, 0);
506 
507 	if (pending & PENDING_FLIP) {
508 		complete_flip(crtc, NULL);
509 	}
510 
511 	if (pending & PENDING_CURSOR) {
512 		update_cursor(crtc);
513 		drm_flip_work_commit(&mdp4_crtc->unref_cursor_work, priv->kms->wq);
514 	}
515 }
516 
mdp4_crtc_err_irq(struct mdp_irq * irq,uint32_t irqstatus)517 static void mdp4_crtc_err_irq(struct mdp_irq *irq, uint32_t irqstatus)
518 {
519 	struct mdp4_crtc *mdp4_crtc = container_of(irq, struct mdp4_crtc, err);
520 	struct drm_crtc *crtc = &mdp4_crtc->base;
521 	DBG("%s: error: %08x", mdp4_crtc->name, irqstatus);
522 	crtc_flush(crtc);
523 }
524 
mdp4_crtc_wait_for_flush_done(struct drm_crtc * crtc)525 static void mdp4_crtc_wait_for_flush_done(struct drm_crtc *crtc)
526 {
527 	struct drm_device *dev = crtc->dev;
528 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
529 	struct mdp4_kms *mdp4_kms = get_kms(crtc);
530 	int ret;
531 
532 	ret = drm_crtc_vblank_get(crtc);
533 	if (ret)
534 		return;
535 
536 	ret = wait_event_timeout(dev->vblank[drm_crtc_index(crtc)].queue,
537 		!(mdp4_read(mdp4_kms, REG_MDP4_OVERLAY_FLUSH) &
538 			mdp4_crtc->flushed_mask),
539 		msecs_to_jiffies(50));
540 	if (ret <= 0)
541 		dev_warn(dev->dev, "vblank time out, crtc=%s\n", mdp4_crtc->base.name);
542 
543 	mdp4_crtc->flushed_mask = 0;
544 
545 	drm_crtc_vblank_put(crtc);
546 }
547 
mdp4_crtc_vblank(struct drm_crtc * crtc)548 uint32_t mdp4_crtc_vblank(struct drm_crtc *crtc)
549 {
550 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
551 	return mdp4_crtc->vblank.irqmask;
552 }
553 
554 /* set dma config, ie. the format the encoder wants. */
mdp4_crtc_set_config(struct drm_crtc * crtc,uint32_t config)555 void mdp4_crtc_set_config(struct drm_crtc *crtc, uint32_t config)
556 {
557 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
558 	struct mdp4_kms *mdp4_kms = get_kms(crtc);
559 
560 	mdp4_write(mdp4_kms, REG_MDP4_DMA_CONFIG(mdp4_crtc->dma), config);
561 }
562 
563 /* set interface for routing crtc->encoder: */
mdp4_crtc_set_intf(struct drm_crtc * crtc,enum mdp4_intf intf,int mixer)564 void mdp4_crtc_set_intf(struct drm_crtc *crtc, enum mdp4_intf intf, int mixer)
565 {
566 	struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
567 	struct mdp4_kms *mdp4_kms = get_kms(crtc);
568 	uint32_t intf_sel;
569 
570 	intf_sel = mdp4_read(mdp4_kms, REG_MDP4_DISP_INTF_SEL);
571 
572 	switch (mdp4_crtc->dma) {
573 	case DMA_P:
574 		intf_sel &= ~MDP4_DISP_INTF_SEL_PRIM__MASK;
575 		intf_sel |= MDP4_DISP_INTF_SEL_PRIM(intf);
576 		break;
577 	case DMA_S:
578 		intf_sel &= ~MDP4_DISP_INTF_SEL_SEC__MASK;
579 		intf_sel |= MDP4_DISP_INTF_SEL_SEC(intf);
580 		break;
581 	case DMA_E:
582 		intf_sel &= ~MDP4_DISP_INTF_SEL_EXT__MASK;
583 		intf_sel |= MDP4_DISP_INTF_SEL_EXT(intf);
584 		break;
585 	}
586 
587 	if (intf == INTF_DSI_VIDEO) {
588 		intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_CMD;
589 		intf_sel |= MDP4_DISP_INTF_SEL_DSI_VIDEO;
590 	} else if (intf == INTF_DSI_CMD) {
591 		intf_sel &= ~MDP4_DISP_INTF_SEL_DSI_VIDEO;
592 		intf_sel |= MDP4_DISP_INTF_SEL_DSI_CMD;
593 	}
594 
595 	mdp4_crtc->mixer = mixer;
596 
597 	blend_setup(crtc);
598 
599 	DBG("%s: intf_sel=%08x", mdp4_crtc->name, intf_sel);
600 
601 	mdp4_write(mdp4_kms, REG_MDP4_DISP_INTF_SEL, intf_sel);
602 }
603 
mdp4_crtc_wait_for_commit_done(struct drm_crtc * crtc)604 void mdp4_crtc_wait_for_commit_done(struct drm_crtc *crtc)
605 {
606 	/* wait_for_flush_done is the only case for now.
607 	 * Later we will have command mode CRTC to wait for
608 	 * other event.
609 	 */
610 	mdp4_crtc_wait_for_flush_done(crtc);
611 }
612 
613 static const char *dma_names[] = {
614 		"DMA_P", "DMA_S", "DMA_E",
615 };
616 
mdp4_crtc_flip_cleanup(struct drm_device * dev,void * ptr)617 static void mdp4_crtc_flip_cleanup(struct drm_device *dev, void *ptr)
618 {
619 	struct mdp4_crtc *mdp4_crtc = ptr;
620 
621 	drm_flip_work_cleanup(&mdp4_crtc->unref_cursor_work);
622 }
623 
624 /* initialize crtc */
mdp4_crtc_init(struct drm_device * dev,struct drm_plane * plane,int ovlp_id,enum mdp4_dma dma_id)625 struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
626 		struct drm_plane *plane, int ovlp_id,
627 		enum mdp4_dma dma_id)
628 {
629 	struct drm_crtc *crtc = NULL;
630 	struct mdp4_crtc *mdp4_crtc;
631 	int ret;
632 
633 	mdp4_crtc = drmm_crtc_alloc_with_planes(dev, struct mdp4_crtc, base,
634 						plane, NULL,
635 						&mdp4_crtc_funcs, NULL);
636 	if (IS_ERR(mdp4_crtc))
637 		return ERR_CAST(mdp4_crtc);
638 
639 	crtc = &mdp4_crtc->base;
640 
641 	mdp4_crtc->ovlp = ovlp_id;
642 	mdp4_crtc->dma = dma_id;
643 
644 	mdp4_crtc->vblank.irqmask = dma2irq(mdp4_crtc->dma);
645 	mdp4_crtc->vblank.irq = mdp4_crtc_vblank_irq;
646 
647 	mdp4_crtc->err.irqmask = dma2err(mdp4_crtc->dma);
648 	mdp4_crtc->err.irq = mdp4_crtc_err_irq;
649 
650 	snprintf(mdp4_crtc->name, sizeof(mdp4_crtc->name), "%s:%d",
651 			dma_names[dma_id], ovlp_id);
652 
653 	spin_lock_init(&mdp4_crtc->cursor.lock);
654 
655 	drm_flip_work_init(&mdp4_crtc->unref_cursor_work,
656 			"unref cursor", unref_cursor_worker);
657 	ret = drmm_add_action_or_reset(dev, mdp4_crtc_flip_cleanup, mdp4_crtc);
658 	if (ret)
659 		return ERR_PTR(ret);
660 
661 	drm_crtc_helper_add(crtc, &mdp4_crtc_helper_funcs);
662 
663 	return crtc;
664 }
665