1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
3  * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved.
4  */
5 
6 #include <linux/delay.h>
7 
8 #include <drm/drm_managed.h>
9 
10 #include "dpu_hwio.h"
11 #include "dpu_hw_ctl.h"
12 #include "dpu_kms.h"
13 #include "dpu_trace.h"
14 
15 #define   CTL_LAYER(lm)                 \
16 	(((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
17 #define   CTL_LAYER_EXT(lm)             \
18 	(0x40 + (((lm) - LM_0) * 0x004))
19 #define   CTL_LAYER_EXT2(lm)             \
20 	(0x70 + (((lm) - LM_0) * 0x004))
21 #define   CTL_LAYER_EXT3(lm)             \
22 	(0xA0 + (((lm) - LM_0) * 0x004))
23 #define CTL_LAYER_EXT4(lm)             \
24 	(0xB8 + (((lm) - LM_0) * 0x004))
25 #define   CTL_TOP                       0x014
26 #define   CTL_FLUSH                     0x018
27 #define   CTL_START                     0x01C
28 #define   CTL_PREPARE                   0x0d0
29 #define   CTL_SW_RESET                  0x030
30 #define   CTL_LAYER_EXTN_OFFSET         0x40
31 #define   CTL_MERGE_3D_ACTIVE           0x0E4
32 #define   CTL_DSC_ACTIVE                0x0E8
33 #define   CTL_WB_ACTIVE                 0x0EC
34 #define   CTL_CWB_ACTIVE                0x0F0
35 #define   CTL_INTF_ACTIVE               0x0F4
36 #define   CTL_CDM_ACTIVE                0x0F8
37 #define   CTL_FETCH_PIPE_ACTIVE         0x0FC
38 #define   CTL_MERGE_3D_FLUSH            0x100
39 #define   CTL_DSC_FLUSH                0x104
40 #define   CTL_WB_FLUSH                  0x108
41 #define   CTL_CWB_FLUSH                 0x10C
42 #define   CTL_INTF_FLUSH                0x110
43 #define   CTL_CDM_FLUSH                0x114
44 #define   CTL_PERIPH_FLUSH              0x128
45 #define   CTL_INTF_MASTER               0x134
46 #define   CTL_DSPP_n_FLUSH(n)           ((0x13C) + ((n) * 4))
47 
48 #define CTL_MIXER_BORDER_OUT            BIT(24)
49 #define CTL_FLUSH_MASK_CTL              BIT(17)
50 
51 #define DPU_REG_RESET_TIMEOUT_US        2000
52 #define  MERGE_3D_IDX   23
53 #define  DSC_IDX        22
54 #define CDM_IDX         26
55 #define  PERIPH_IDX     30
56 #define  INTF_IDX       31
57 #define WB_IDX          16
58 #define CWB_IDX         28
59 #define  DSPP_IDX       29  /* From DPU hw rev 7.x.x */
60 #define CTL_INVALID_BIT                 0xffff
61 #define CTL_DEFAULT_GROUP_ID		0xf
62 
63 static const u32 fetch_tbl[SSPP_MAX] = {CTL_INVALID_BIT, 16, 17, 18, 19,
64 	CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, CTL_INVALID_BIT, 0,
65 	1, 2, 3, 4, 5};
66 
_mixer_stages(const struct dpu_lm_cfg * mixer,int count,enum dpu_lm lm)67 static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
68 		enum dpu_lm lm)
69 {
70 	int i;
71 	int stages = -EINVAL;
72 
73 	for (i = 0; i < count; i++) {
74 		if (lm == mixer[i].id) {
75 			stages = mixer[i].sblk->maxblendstages;
76 			break;
77 		}
78 	}
79 
80 	return stages;
81 }
82 
dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl * ctx)83 static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
84 {
85 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
86 
87 	return DPU_REG_READ(c, CTL_FLUSH);
88 }
89 
dpu_hw_ctl_trigger_start(struct dpu_hw_ctl * ctx)90 static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
91 {
92 	trace_dpu_hw_ctl_trigger_start(ctx->pending_flush_mask,
93 				       dpu_hw_ctl_get_flush_register(ctx));
94 	DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
95 }
96 
dpu_hw_ctl_is_started(struct dpu_hw_ctl * ctx)97 static inline bool dpu_hw_ctl_is_started(struct dpu_hw_ctl *ctx)
98 {
99 	return !!(DPU_REG_READ(&ctx->hw, CTL_START) & BIT(0));
100 }
101 
dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl * ctx)102 static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
103 {
104 	trace_dpu_hw_ctl_trigger_prepare(ctx->pending_flush_mask,
105 					 dpu_hw_ctl_get_flush_register(ctx));
106 	DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
107 }
108 
dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl * ctx)109 static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
110 {
111 	trace_dpu_hw_ctl_clear_pending_flush(ctx->pending_flush_mask,
112 				     dpu_hw_ctl_get_flush_register(ctx));
113 	ctx->pending_flush_mask = 0x0;
114 	ctx->pending_intf_flush_mask = 0;
115 	ctx->pending_wb_flush_mask = 0;
116 	ctx->pending_cwb_flush_mask = 0;
117 	ctx->pending_merge_3d_flush_mask = 0;
118 	ctx->pending_dsc_flush_mask = 0;
119 	ctx->pending_cdm_flush_mask = 0;
120 
121 	memset(ctx->pending_dspp_flush_mask, 0,
122 		sizeof(ctx->pending_dspp_flush_mask));
123 }
124 
dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl * ctx,u32 flushbits)125 static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
126 		u32 flushbits)
127 {
128 	trace_dpu_hw_ctl_update_pending_flush(flushbits,
129 					      ctx->pending_flush_mask);
130 	ctx->pending_flush_mask |= flushbits;
131 }
132 
dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl * ctx)133 static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
134 {
135 	return ctx->pending_flush_mask;
136 }
137 
dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl * ctx)138 static inline void dpu_hw_ctl_trigger_flush_v1(struct dpu_hw_ctl *ctx)
139 {
140 	int dspp;
141 
142 	if (ctx->pending_flush_mask & BIT(MERGE_3D_IDX))
143 		DPU_REG_WRITE(&ctx->hw, CTL_MERGE_3D_FLUSH,
144 				ctx->pending_merge_3d_flush_mask);
145 	if (ctx->pending_flush_mask & BIT(INTF_IDX))
146 		DPU_REG_WRITE(&ctx->hw, CTL_INTF_FLUSH,
147 				ctx->pending_intf_flush_mask);
148 	if (ctx->pending_flush_mask & BIT(WB_IDX))
149 		DPU_REG_WRITE(&ctx->hw, CTL_WB_FLUSH,
150 				ctx->pending_wb_flush_mask);
151 	if (ctx->pending_flush_mask & BIT(CWB_IDX))
152 		DPU_REG_WRITE(&ctx->hw, CTL_CWB_FLUSH,
153 				ctx->pending_cwb_flush_mask);
154 
155 	if (ctx->pending_flush_mask & BIT(DSPP_IDX))
156 		for (dspp = DSPP_0; dspp < DSPP_MAX; dspp++) {
157 			if (ctx->pending_dspp_flush_mask[dspp - DSPP_0])
158 				DPU_REG_WRITE(&ctx->hw,
159 				CTL_DSPP_n_FLUSH(dspp - DSPP_0),
160 				ctx->pending_dspp_flush_mask[dspp - DSPP_0]);
161 		}
162 
163 	if (ctx->pending_flush_mask & BIT(PERIPH_IDX))
164 		DPU_REG_WRITE(&ctx->hw, CTL_PERIPH_FLUSH,
165 			      ctx->pending_periph_flush_mask);
166 
167 	if (ctx->pending_flush_mask & BIT(DSC_IDX))
168 		DPU_REG_WRITE(&ctx->hw, CTL_DSC_FLUSH,
169 			      ctx->pending_dsc_flush_mask);
170 
171 	if (ctx->pending_flush_mask & BIT(CDM_IDX))
172 		DPU_REG_WRITE(&ctx->hw, CTL_CDM_FLUSH,
173 			      ctx->pending_cdm_flush_mask);
174 
175 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
176 }
177 
dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl * ctx)178 static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
179 {
180 	trace_dpu_hw_ctl_trigger_pending_flush(ctx->pending_flush_mask,
181 				     dpu_hw_ctl_get_flush_register(ctx));
182 	DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
183 }
184 
dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl * ctx,enum dpu_sspp sspp)185 static void dpu_hw_ctl_update_pending_flush_sspp(struct dpu_hw_ctl *ctx,
186 	enum dpu_sspp sspp)
187 {
188 	switch (sspp) {
189 	case SSPP_VIG0:
190 		ctx->pending_flush_mask |=  BIT(0);
191 		break;
192 	case SSPP_VIG1:
193 		ctx->pending_flush_mask |= BIT(1);
194 		break;
195 	case SSPP_VIG2:
196 		ctx->pending_flush_mask |= BIT(2);
197 		break;
198 	case SSPP_VIG3:
199 		ctx->pending_flush_mask |= BIT(18);
200 		break;
201 	case SSPP_RGB0:
202 		ctx->pending_flush_mask |= BIT(3);
203 		break;
204 	case SSPP_RGB1:
205 		ctx->pending_flush_mask |= BIT(4);
206 		break;
207 	case SSPP_RGB2:
208 		ctx->pending_flush_mask |= BIT(5);
209 		break;
210 	case SSPP_RGB3:
211 		ctx->pending_flush_mask |= BIT(19);
212 		break;
213 	case SSPP_DMA0:
214 		ctx->pending_flush_mask |= BIT(11);
215 		break;
216 	case SSPP_DMA1:
217 		ctx->pending_flush_mask |= BIT(12);
218 		break;
219 	case SSPP_DMA2:
220 		ctx->pending_flush_mask |= BIT(24);
221 		break;
222 	case SSPP_DMA3:
223 		ctx->pending_flush_mask |= BIT(25);
224 		break;
225 	case SSPP_DMA4:
226 		ctx->pending_flush_mask |= BIT(13);
227 		break;
228 	case SSPP_DMA5:
229 		ctx->pending_flush_mask |= BIT(14);
230 		break;
231 	case SSPP_CURSOR0:
232 		ctx->pending_flush_mask |= BIT(22);
233 		break;
234 	case SSPP_CURSOR1:
235 		ctx->pending_flush_mask |= BIT(23);
236 		break;
237 	default:
238 		break;
239 	}
240 }
241 
dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl * ctx,enum dpu_lm lm)242 static void dpu_hw_ctl_update_pending_flush_mixer(struct dpu_hw_ctl *ctx,
243 	enum dpu_lm lm)
244 {
245 	switch (lm) {
246 	case LM_0:
247 		ctx->pending_flush_mask |= BIT(6);
248 		break;
249 	case LM_1:
250 		ctx->pending_flush_mask |= BIT(7);
251 		break;
252 	case LM_2:
253 		ctx->pending_flush_mask |= BIT(8);
254 		break;
255 	case LM_3:
256 		ctx->pending_flush_mask |= BIT(9);
257 		break;
258 	case LM_4:
259 		ctx->pending_flush_mask |= BIT(10);
260 		break;
261 	case LM_5:
262 		ctx->pending_flush_mask |= BIT(20);
263 		break;
264 	default:
265 		break;
266 	}
267 
268 	ctx->pending_flush_mask |= CTL_FLUSH_MASK_CTL;
269 }
270 
dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl * ctx,enum dpu_intf intf)271 static void dpu_hw_ctl_update_pending_flush_intf(struct dpu_hw_ctl *ctx,
272 		enum dpu_intf intf)
273 {
274 	switch (intf) {
275 	case INTF_0:
276 		ctx->pending_flush_mask |= BIT(31);
277 		break;
278 	case INTF_1:
279 		ctx->pending_flush_mask |= BIT(30);
280 		break;
281 	case INTF_2:
282 		ctx->pending_flush_mask |= BIT(29);
283 		break;
284 	case INTF_3:
285 		ctx->pending_flush_mask |= BIT(28);
286 		break;
287 	default:
288 		break;
289 	}
290 }
291 
dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl * ctx,enum dpu_wb wb)292 static void dpu_hw_ctl_update_pending_flush_wb(struct dpu_hw_ctl *ctx,
293 		enum dpu_wb wb)
294 {
295 	switch (wb) {
296 	case WB_0:
297 	case WB_1:
298 	case WB_2:
299 		ctx->pending_flush_mask |= BIT(WB_IDX);
300 		break;
301 	default:
302 		break;
303 	}
304 }
305 
dpu_hw_ctl_update_pending_flush_cdm(struct dpu_hw_ctl * ctx,enum dpu_cdm cdm_num)306 static void dpu_hw_ctl_update_pending_flush_cdm(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num)
307 {
308 	/* update pending flush only if CDM_0 is flushed */
309 	if (cdm_num == CDM_0)
310 		ctx->pending_flush_mask |= BIT(CDM_IDX);
311 }
312 
dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl * ctx,enum dpu_wb wb)313 static void dpu_hw_ctl_update_pending_flush_wb_v1(struct dpu_hw_ctl *ctx,
314 		enum dpu_wb wb)
315 {
316 	ctx->pending_wb_flush_mask |= BIT(wb - WB_0);
317 	ctx->pending_flush_mask |= BIT(WB_IDX);
318 }
319 
dpu_hw_ctl_update_pending_flush_cwb_v1(struct dpu_hw_ctl * ctx,enum dpu_cwb cwb)320 static void dpu_hw_ctl_update_pending_flush_cwb_v1(struct dpu_hw_ctl *ctx,
321 		enum dpu_cwb cwb)
322 {
323 	ctx->pending_cwb_flush_mask |= BIT(cwb - CWB_0);
324 	ctx->pending_flush_mask |= BIT(CWB_IDX);
325 }
326 
dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl * ctx,enum dpu_intf intf)327 static void dpu_hw_ctl_update_pending_flush_intf_v1(struct dpu_hw_ctl *ctx,
328 		enum dpu_intf intf)
329 {
330 	ctx->pending_intf_flush_mask |= BIT(intf - INTF_0);
331 	ctx->pending_flush_mask |= BIT(INTF_IDX);
332 }
333 
dpu_hw_ctl_update_pending_flush_periph_v1(struct dpu_hw_ctl * ctx,enum dpu_intf intf)334 static void dpu_hw_ctl_update_pending_flush_periph_v1(struct dpu_hw_ctl *ctx,
335 						      enum dpu_intf intf)
336 {
337 	ctx->pending_periph_flush_mask |= BIT(intf - INTF_0);
338 	ctx->pending_flush_mask |= BIT(PERIPH_IDX);
339 }
340 
dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl * ctx,enum dpu_merge_3d merge_3d)341 static void dpu_hw_ctl_update_pending_flush_merge_3d_v1(struct dpu_hw_ctl *ctx,
342 		enum dpu_merge_3d merge_3d)
343 {
344 	ctx->pending_merge_3d_flush_mask |= BIT(merge_3d - MERGE_3D_0);
345 	ctx->pending_flush_mask |= BIT(MERGE_3D_IDX);
346 }
347 
dpu_hw_ctl_update_pending_flush_dsc_v1(struct dpu_hw_ctl * ctx,enum dpu_dsc dsc_num)348 static void dpu_hw_ctl_update_pending_flush_dsc_v1(struct dpu_hw_ctl *ctx,
349 						   enum dpu_dsc dsc_num)
350 {
351 	ctx->pending_dsc_flush_mask |= BIT(dsc_num - DSC_0);
352 	ctx->pending_flush_mask |= BIT(DSC_IDX);
353 }
354 
dpu_hw_ctl_update_pending_flush_cdm_v1(struct dpu_hw_ctl * ctx,enum dpu_cdm cdm_num)355 static void dpu_hw_ctl_update_pending_flush_cdm_v1(struct dpu_hw_ctl *ctx, enum dpu_cdm cdm_num)
356 {
357 	ctx->pending_cdm_flush_mask |= BIT(cdm_num - CDM_0);
358 	ctx->pending_flush_mask |= BIT(CDM_IDX);
359 }
360 
dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl * ctx,enum dpu_dspp dspp,u32 dspp_sub_blk)361 static void dpu_hw_ctl_update_pending_flush_dspp(struct dpu_hw_ctl *ctx,
362 	enum dpu_dspp dspp, u32 dspp_sub_blk)
363 {
364 	switch (dspp) {
365 	case DSPP_0:
366 		ctx->pending_flush_mask |= BIT(13);
367 		break;
368 	case DSPP_1:
369 		ctx->pending_flush_mask |= BIT(14);
370 		break;
371 	case DSPP_2:
372 		ctx->pending_flush_mask |= BIT(15);
373 		break;
374 	case DSPP_3:
375 		ctx->pending_flush_mask |= BIT(21);
376 		break;
377 	default:
378 		break;
379 	}
380 }
381 
dpu_hw_ctl_update_pending_flush_dspp_sub_blocks(struct dpu_hw_ctl * ctx,enum dpu_dspp dspp,u32 dspp_sub_blk)382 static void dpu_hw_ctl_update_pending_flush_dspp_sub_blocks(
383 	struct dpu_hw_ctl *ctx,	enum dpu_dspp dspp, u32 dspp_sub_blk)
384 {
385 	if (dspp >= DSPP_MAX)
386 		return;
387 
388 	switch (dspp_sub_blk) {
389 	case DPU_DSPP_PCC:
390 		ctx->pending_dspp_flush_mask[dspp - DSPP_0] |= BIT(4);
391 		break;
392 	default:
393 		return;
394 	}
395 
396 	ctx->pending_flush_mask |= BIT(DSPP_IDX);
397 }
398 
dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl * ctx,u32 timeout_us)399 static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
400 {
401 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
402 	ktime_t timeout;
403 	u32 status;
404 
405 	timeout = ktime_add_us(ktime_get(), timeout_us);
406 
407 	/*
408 	 * it takes around 30us to have mdp finish resetting its ctl path
409 	 * poll every 50us so that reset should be completed at 1st poll
410 	 */
411 	do {
412 		status = DPU_REG_READ(c, CTL_SW_RESET);
413 		status &= 0x1;
414 		if (status)
415 			usleep_range(20, 50);
416 	} while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
417 
418 	return status;
419 }
420 
dpu_hw_ctl_reset_control(struct dpu_hw_ctl * ctx)421 static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
422 {
423 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
424 
425 	pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
426 	DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
427 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
428 		return -EINVAL;
429 
430 	return 0;
431 }
432 
dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl * ctx)433 static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
434 {
435 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
436 	u32 status;
437 
438 	status = DPU_REG_READ(c, CTL_SW_RESET);
439 	status &= 0x01;
440 	if (!status)
441 		return 0;
442 
443 	pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
444 	if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
445 		pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
446 		return -EINVAL;
447 	}
448 
449 	return 0;
450 }
451 
dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl * ctx)452 static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
453 {
454 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
455 	int i;
456 
457 	for (i = 0; i < ctx->mixer_count; i++) {
458 		enum dpu_lm mixer_id = ctx->mixer_hw_caps[i].id;
459 
460 		DPU_REG_WRITE(c, CTL_LAYER(mixer_id), 0);
461 		DPU_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), 0);
462 		DPU_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
463 		DPU_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
464 	}
465 
466 	DPU_REG_WRITE(c, CTL_FETCH_PIPE_ACTIVE, 0);
467 }
468 
469 struct ctl_blend_config {
470 	int idx, shift, ext_shift;
471 };
472 
473 static const struct ctl_blend_config ctl_blend_config[][2] = {
474 	[SSPP_NONE] = { { -1 }, { -1 } },
475 	[SSPP_MAX] =  { { -1 }, { -1 } },
476 	[SSPP_VIG0] = { { 0, 0,  0  }, { 3, 0 } },
477 	[SSPP_VIG1] = { { 0, 3,  2  }, { 3, 4 } },
478 	[SSPP_VIG2] = { { 0, 6,  4  }, { 3, 8 } },
479 	[SSPP_VIG3] = { { 0, 26, 6  }, { 3, 12 } },
480 	[SSPP_RGB0] = { { 0, 9,  8  }, { -1 } },
481 	[SSPP_RGB1] = { { 0, 12, 10 }, { -1 } },
482 	[SSPP_RGB2] = { { 0, 15, 12 }, { -1 } },
483 	[SSPP_RGB3] = { { 0, 29, 14 }, { -1 } },
484 	[SSPP_DMA0] = { { 0, 18, 16 }, { 2, 8 } },
485 	[SSPP_DMA1] = { { 0, 21, 18 }, { 2, 12 } },
486 	[SSPP_DMA2] = { { 2, 0      }, { 2, 16 } },
487 	[SSPP_DMA3] = { { 2, 4      }, { 2, 20 } },
488 	[SSPP_DMA4] = { { 4, 0      }, { 4, 8 } },
489 	[SSPP_DMA5] = { { 4, 4      }, { 4, 12 } },
490 	[SSPP_CURSOR0] =  { { 1, 20 }, { -1 } },
491 	[SSPP_CURSOR1] =  { { 1, 26 }, { -1 } },
492 };
493 
dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl * ctx,enum dpu_lm lm,struct dpu_hw_stage_cfg * stage_cfg)494 static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
495 	enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
496 {
497 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
498 	u32 mix, ext, mix_ext;
499 	u32 mixercfg[5] = { 0 };
500 	int i, j;
501 	int stages;
502 	int pipes_per_stage;
503 
504 	stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
505 	if (stages < 0)
506 		return;
507 
508 	if (test_bit(DPU_MIXER_SOURCESPLIT,
509 		&ctx->mixer_hw_caps->features))
510 		pipes_per_stage = PIPES_PER_STAGE;
511 	else
512 		pipes_per_stage = 1;
513 
514 	mixercfg[0] = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
515 
516 	if (!stage_cfg)
517 		goto exit;
518 
519 	for (i = 0; i <= stages; i++) {
520 		/* overflow to ext register if 'i + 1 > 7' */
521 		mix = (i + 1) & 0x7;
522 		ext = i >= 7;
523 		mix_ext = (i + 1) & 0xf;
524 
525 		for (j = 0 ; j < pipes_per_stage; j++) {
526 			enum dpu_sspp_multirect_index rect_index =
527 				stage_cfg->multirect_index[i][j];
528 			enum dpu_sspp pipe = stage_cfg->stage[i][j];
529 			const struct ctl_blend_config *cfg =
530 				&ctl_blend_config[pipe][rect_index == DPU_SSPP_RECT_1];
531 
532 			/*
533 			 * CTL_LAYER has 3-bit field (and extra bits in EXT register),
534 			 * all EXT registers has 4-bit fields.
535 			 */
536 			if (cfg->idx == -1) {
537 				continue;
538 			} else if (cfg->idx == 0) {
539 				mixercfg[0] |= mix << cfg->shift;
540 				mixercfg[1] |= ext << cfg->ext_shift;
541 			} else {
542 				mixercfg[cfg->idx] |= mix_ext << cfg->shift;
543 			}
544 		}
545 	}
546 
547 exit:
548 	DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg[0]);
549 	DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg[1]);
550 	DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg[2]);
551 	DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg[3]);
552 	if ((test_bit(DPU_CTL_HAS_LAYER_EXT4, &ctx->caps->features)))
553 		DPU_REG_WRITE(c, CTL_LAYER_EXT4(lm), mixercfg[4]);
554 }
555 
556 
dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl * ctx,struct dpu_hw_intf_cfg * cfg)557 static void dpu_hw_ctl_intf_cfg_v1(struct dpu_hw_ctl *ctx,
558 		struct dpu_hw_intf_cfg *cfg)
559 {
560 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
561 	u32 intf_active = 0;
562 	u32 dsc_active = 0;
563 	u32 wb_active = 0;
564 	u32 cwb_active = 0;
565 	u32 mode_sel = 0;
566 
567 	/* CTL_TOP[31:28] carries group_id to collate CTL paths
568 	 * per VM. Explicitly disable it until VM support is
569 	 * added in SW. Power on reset value is not disable.
570 	 */
571 	if ((test_bit(DPU_CTL_VM_CFG, &ctx->caps->features)))
572 		mode_sel = CTL_DEFAULT_GROUP_ID  << 28;
573 
574 	if (cfg->intf_mode_sel == DPU_CTL_MODE_SEL_CMD)
575 		mode_sel |= BIT(17);
576 
577 	intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
578 	wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
579 	cwb_active = DPU_REG_READ(c, CTL_CWB_ACTIVE);
580 	dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
581 
582 	if (cfg->intf)
583 		intf_active |= BIT(cfg->intf - INTF_0);
584 
585 	if (cfg->wb)
586 		wb_active |= BIT(cfg->wb - WB_0);
587 
588 	if (cfg->cwb)
589 		cwb_active |= cfg->cwb;
590 
591 	if (cfg->dsc)
592 		dsc_active |= cfg->dsc;
593 
594 	DPU_REG_WRITE(c, CTL_TOP, mode_sel);
595 	DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
596 	DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
597 	DPU_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active);
598 	DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
599 
600 	if (cfg->merge_3d)
601 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
602 			      BIT(cfg->merge_3d - MERGE_3D_0));
603 
604 	if (cfg->cdm)
605 		DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cfg->cdm);
606 }
607 
dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl * ctx,struct dpu_hw_intf_cfg * cfg)608 static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
609 		struct dpu_hw_intf_cfg *cfg)
610 {
611 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
612 	u32 intf_cfg = 0;
613 
614 	intf_cfg |= (cfg->intf & 0xF) << 4;
615 
616 	if (cfg->mode_3d) {
617 		intf_cfg |= BIT(19);
618 		intf_cfg |= (cfg->mode_3d - 0x1) << 20;
619 	}
620 
621 	if (cfg->wb)
622 		intf_cfg |= (cfg->wb & 0x3) + 2;
623 
624 	switch (cfg->intf_mode_sel) {
625 	case DPU_CTL_MODE_SEL_VID:
626 		intf_cfg &= ~BIT(17);
627 		intf_cfg &= ~(0x3 << 15);
628 		break;
629 	case DPU_CTL_MODE_SEL_CMD:
630 		intf_cfg |= BIT(17);
631 		intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
632 		break;
633 	default:
634 		pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
635 		return;
636 	}
637 
638 	DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
639 }
640 
dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl * ctx,struct dpu_hw_intf_cfg * cfg)641 static void dpu_hw_ctl_reset_intf_cfg_v1(struct dpu_hw_ctl *ctx,
642 		struct dpu_hw_intf_cfg *cfg)
643 {
644 	struct dpu_hw_blk_reg_map *c = &ctx->hw;
645 	u32 intf_active = 0;
646 	u32 wb_active = 0;
647 	u32 cwb_active = 0;
648 	u32 merge3d_active = 0;
649 	u32 dsc_active;
650 	u32 cdm_active;
651 
652 	/*
653 	 * This API resets each portion of the CTL path namely,
654 	 * clearing the sspps staged on the lm, merge_3d block,
655 	 * interfaces , writeback etc to ensure clean teardown of the pipeline.
656 	 * This will be used for writeback to begin with to have a
657 	 * proper teardown of the writeback session but upon further
658 	 * validation, this can be extended to all interfaces.
659 	 */
660 	if (cfg->merge_3d) {
661 		merge3d_active = DPU_REG_READ(c, CTL_MERGE_3D_ACTIVE);
662 		merge3d_active &= ~BIT(cfg->merge_3d - MERGE_3D_0);
663 		DPU_REG_WRITE(c, CTL_MERGE_3D_ACTIVE,
664 				merge3d_active);
665 	}
666 
667 	dpu_hw_ctl_clear_all_blendstages(ctx);
668 
669 	if (cfg->intf) {
670 		intf_active = DPU_REG_READ(c, CTL_INTF_ACTIVE);
671 		intf_active &= ~BIT(cfg->intf - INTF_0);
672 		DPU_REG_WRITE(c, CTL_INTF_ACTIVE, intf_active);
673 	}
674 
675 	if (cfg->cwb) {
676 		cwb_active = DPU_REG_READ(c, CTL_CWB_ACTIVE);
677 		cwb_active &= ~cfg->cwb;
678 		DPU_REG_WRITE(c, CTL_CWB_ACTIVE, cwb_active);
679 	}
680 
681 	if (cfg->wb) {
682 		wb_active = DPU_REG_READ(c, CTL_WB_ACTIVE);
683 		wb_active &= ~BIT(cfg->wb - WB_0);
684 		DPU_REG_WRITE(c, CTL_WB_ACTIVE, wb_active);
685 	}
686 
687 	if (cfg->dsc) {
688 		dsc_active = DPU_REG_READ(c, CTL_DSC_ACTIVE);
689 		dsc_active &= ~cfg->dsc;
690 		DPU_REG_WRITE(c, CTL_DSC_ACTIVE, dsc_active);
691 	}
692 
693 	if (cfg->cdm) {
694 		cdm_active = DPU_REG_READ(c, CTL_CDM_ACTIVE);
695 		cdm_active &= ~cfg->cdm;
696 		DPU_REG_WRITE(c, CTL_CDM_ACTIVE, cdm_active);
697 	}
698 }
699 
dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl * ctx,unsigned long * fetch_active)700 static void dpu_hw_ctl_set_fetch_pipe_active(struct dpu_hw_ctl *ctx,
701 	unsigned long *fetch_active)
702 {
703 	int i;
704 	u32 val = 0;
705 
706 	if (fetch_active) {
707 		for (i = 0; i < SSPP_MAX; i++) {
708 			if (test_bit(i, fetch_active) &&
709 				fetch_tbl[i] != CTL_INVALID_BIT)
710 				val |= BIT(fetch_tbl[i]);
711 		}
712 	}
713 
714 	DPU_REG_WRITE(&ctx->hw, CTL_FETCH_PIPE_ACTIVE, val);
715 }
716 
_setup_ctl_ops(struct dpu_hw_ctl_ops * ops,unsigned long cap)717 static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
718 		unsigned long cap)
719 {
720 	if (cap & BIT(DPU_CTL_ACTIVE_CFG)) {
721 		ops->trigger_flush = dpu_hw_ctl_trigger_flush_v1;
722 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg_v1;
723 		ops->reset_intf_cfg = dpu_hw_ctl_reset_intf_cfg_v1;
724 		ops->update_pending_flush_intf =
725 			dpu_hw_ctl_update_pending_flush_intf_v1;
726 
727 		ops->update_pending_flush_periph =
728 			dpu_hw_ctl_update_pending_flush_periph_v1;
729 
730 		ops->update_pending_flush_merge_3d =
731 			dpu_hw_ctl_update_pending_flush_merge_3d_v1;
732 		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb_v1;
733 		ops->update_pending_flush_cwb = dpu_hw_ctl_update_pending_flush_cwb_v1;
734 		ops->update_pending_flush_dsc =
735 			dpu_hw_ctl_update_pending_flush_dsc_v1;
736 		ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm_v1;
737 	} else {
738 		ops->trigger_flush = dpu_hw_ctl_trigger_flush;
739 		ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
740 		ops->update_pending_flush_intf =
741 			dpu_hw_ctl_update_pending_flush_intf;
742 		ops->update_pending_flush_wb = dpu_hw_ctl_update_pending_flush_wb;
743 		ops->update_pending_flush_cdm = dpu_hw_ctl_update_pending_flush_cdm;
744 	}
745 	ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
746 	ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
747 	ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
748 	ops->get_flush_register = dpu_hw_ctl_get_flush_register;
749 	ops->trigger_start = dpu_hw_ctl_trigger_start;
750 	ops->is_started = dpu_hw_ctl_is_started;
751 	ops->trigger_pending = dpu_hw_ctl_trigger_pending;
752 	ops->reset = dpu_hw_ctl_reset_control;
753 	ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
754 	ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
755 	ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
756 	ops->update_pending_flush_sspp = dpu_hw_ctl_update_pending_flush_sspp;
757 	ops->update_pending_flush_mixer = dpu_hw_ctl_update_pending_flush_mixer;
758 	if (cap & BIT(DPU_CTL_DSPP_SUB_BLOCK_FLUSH))
759 		ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp_sub_blocks;
760 	else
761 		ops->update_pending_flush_dspp = dpu_hw_ctl_update_pending_flush_dspp;
762 
763 	if (cap & BIT(DPU_CTL_FETCH_ACTIVE))
764 		ops->set_active_pipes = dpu_hw_ctl_set_fetch_pipe_active;
765 };
766 
767 /**
768  * dpu_hw_ctl_init() - Initializes the ctl_path hw driver object.
769  * Should be called before accessing any ctl_path register.
770  * @dev:  Corresponding device for devres management
771  * @cfg:  ctl_path catalog entry for which driver object is required
772  * @addr: mapped register io address of MDP
773  * @mixer_count: Number of mixers in @mixer
774  * @mixer: Pointer to an array of Layer Mixers defined in the catalog
775  */
dpu_hw_ctl_init(struct drm_device * dev,const struct dpu_ctl_cfg * cfg,void __iomem * addr,u32 mixer_count,const struct dpu_lm_cfg * mixer)776 struct dpu_hw_ctl *dpu_hw_ctl_init(struct drm_device *dev,
777 				   const struct dpu_ctl_cfg *cfg,
778 				   void __iomem *addr,
779 				   u32 mixer_count,
780 				   const struct dpu_lm_cfg *mixer)
781 {
782 	struct dpu_hw_ctl *c;
783 
784 	c = drmm_kzalloc(dev, sizeof(*c), GFP_KERNEL);
785 	if (!c)
786 		return ERR_PTR(-ENOMEM);
787 
788 	c->hw.blk_addr = addr + cfg->base;
789 	c->hw.log_mask = DPU_DBG_MASK_CTL;
790 
791 	c->caps = cfg;
792 	_setup_ctl_ops(&c->ops, c->caps->features);
793 	c->idx = cfg->id;
794 	c->mixer_count = mixer_count;
795 	c->mixer_hw_caps = mixer;
796 
797 	return c;
798 }
799