1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
4 */
5
6 #include <linux/string_choices.h>
7 #include "mdp5_kms.h"
8 #include "mdp5_ctl.h"
9
10 /*
11 * CTL - MDP Control Pool Manager
12 *
13 * Controls are shared between all display interfaces.
14 *
15 * They are intended to be used for data path configuration.
16 * The top level register programming describes the complete data path for
17 * a specific data path ID - REG_MDP5_CTL_*(<id>, ...)
18 *
19 * Hardware capabilities determine the number of concurrent data paths
20 *
21 * In certain use cases (high-resolution dual pipe), one single CTL can be
22 * shared across multiple CRTCs.
23 */
24
25 #define CTL_STAT_BUSY 0x1
26 #define CTL_STAT_BOOKED 0x2
27
28 struct mdp5_ctl {
29 struct mdp5_ctl_manager *ctlm;
30
31 u32 id;
32
33 /* CTL status bitmask */
34 u32 status;
35
36 bool encoder_enabled;
37
38 /* pending flush_mask bits */
39 u32 flush_mask;
40
41 /* REG_MDP5_CTL_*(<id>) registers access info + lock: */
42 spinlock_t hw_lock;
43 u32 reg_offset;
44
45 /* when do CTL registers need to be flushed? (mask of trigger bits) */
46 u32 pending_ctl_trigger;
47
48 bool cursor_on;
49
50 /* True if the current CTL has FLUSH bits pending for single FLUSH. */
51 bool flush_pending;
52
53 struct mdp5_ctl *pair; /* Paired CTL to be flushed together */
54 };
55
56 struct mdp5_ctl_manager {
57 struct drm_device *dev;
58
59 /* number of CTL / Layer Mixers in this hw config: */
60 u32 nlm;
61 u32 nctl;
62
63 /* to filter out non-present bits in the current hardware config */
64 u32 flush_hw_mask;
65
66 /* status for single FLUSH */
67 bool single_flush_supported;
68 u32 single_flush_pending_mask;
69
70 /* pool of CTLs + lock to protect resource allocation (ctls[i].busy) */
71 spinlock_t pool_lock;
72 struct mdp5_ctl ctls[MAX_CTL];
73 };
74
75 static inline
get_kms(struct mdp5_ctl_manager * ctl_mgr)76 struct mdp5_kms *get_kms(struct mdp5_ctl_manager *ctl_mgr)
77 {
78 struct msm_drm_private *priv = ctl_mgr->dev->dev_private;
79
80 return to_mdp5_kms(to_mdp_kms(priv->kms));
81 }
82
83 static inline
ctl_write(struct mdp5_ctl * ctl,u32 reg,u32 data)84 void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data)
85 {
86 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
87
88 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
89 mdp5_write(mdp5_kms, reg, data);
90 }
91
92 static inline
ctl_read(struct mdp5_ctl * ctl,u32 reg)93 u32 ctl_read(struct mdp5_ctl *ctl, u32 reg)
94 {
95 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
96
97 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */
98 return mdp5_read(mdp5_kms, reg);
99 }
100
set_display_intf(struct mdp5_kms * mdp5_kms,struct mdp5_interface * intf)101 static void set_display_intf(struct mdp5_kms *mdp5_kms,
102 struct mdp5_interface *intf)
103 {
104 unsigned long flags;
105 u32 intf_sel;
106
107 spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
108 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
109
110 switch (intf->num) {
111 case 0:
112 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF0__MASK;
113 intf_sel |= MDP5_DISP_INTF_SEL_INTF0(intf->type);
114 break;
115 case 1:
116 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF1__MASK;
117 intf_sel |= MDP5_DISP_INTF_SEL_INTF1(intf->type);
118 break;
119 case 2:
120 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF2__MASK;
121 intf_sel |= MDP5_DISP_INTF_SEL_INTF2(intf->type);
122 break;
123 case 3:
124 intf_sel &= ~MDP5_DISP_INTF_SEL_INTF3__MASK;
125 intf_sel |= MDP5_DISP_INTF_SEL_INTF3(intf->type);
126 break;
127 default:
128 BUG();
129 break;
130 }
131
132 mdp5_write(mdp5_kms, REG_MDP5_DISP_INTF_SEL, intf_sel);
133 spin_unlock_irqrestore(&mdp5_kms->resource_lock, flags);
134 }
135
set_ctl_op(struct mdp5_ctl * ctl,struct mdp5_pipeline * pipeline)136 static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
137 {
138 unsigned long flags;
139 struct mdp5_interface *intf = pipeline->intf;
140 u32 ctl_op = 0;
141
142 if (!mdp5_cfg_intf_is_virtual(intf->type))
143 ctl_op |= MDP5_CTL_OP_INTF_NUM(INTF0 + intf->num);
144
145 switch (intf->type) {
146 case INTF_DSI:
147 if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
148 ctl_op |= MDP5_CTL_OP_CMD_MODE;
149 break;
150
151 case INTF_WB:
152 if (intf->mode == MDP5_INTF_WB_MODE_LINE)
153 ctl_op |= MDP5_CTL_OP_MODE(MODE_WB_2_LINE);
154 break;
155
156 default:
157 break;
158 }
159
160 if (pipeline->r_mixer)
161 ctl_op |= MDP5_CTL_OP_PACK_3D_ENABLE |
162 MDP5_CTL_OP_PACK_3D(1);
163
164 spin_lock_irqsave(&ctl->hw_lock, flags);
165 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op);
166 spin_unlock_irqrestore(&ctl->hw_lock, flags);
167 }
168
mdp5_ctl_set_pipeline(struct mdp5_ctl * ctl,struct mdp5_pipeline * pipeline)169 int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline)
170 {
171 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm);
172 struct mdp5_interface *intf = pipeline->intf;
173
174 /* Virtual interfaces need not set a display intf (e.g.: Writeback) */
175 if (!mdp5_cfg_intf_is_virtual(intf->type))
176 set_display_intf(mdp5_kms, intf);
177
178 set_ctl_op(ctl, pipeline);
179
180 return 0;
181 }
182
start_signal_needed(struct mdp5_ctl * ctl,struct mdp5_pipeline * pipeline)183 static bool start_signal_needed(struct mdp5_ctl *ctl,
184 struct mdp5_pipeline *pipeline)
185 {
186 struct mdp5_interface *intf = pipeline->intf;
187
188 if (!ctl->encoder_enabled)
189 return false;
190
191 switch (intf->type) {
192 case INTF_WB:
193 return true;
194 case INTF_DSI:
195 return intf->mode == MDP5_INTF_DSI_MODE_COMMAND;
196 default:
197 return false;
198 }
199 }
200
201 /*
202 * send_start_signal() - Overlay Processor Start Signal
203 *
204 * For a given control operation (display pipeline), a START signal needs to be
205 * executed in order to kick off operation and activate all layers.
206 * e.g.: DSI command mode, Writeback
207 */
send_start_signal(struct mdp5_ctl * ctl)208 static void send_start_signal(struct mdp5_ctl *ctl)
209 {
210 unsigned long flags;
211
212 spin_lock_irqsave(&ctl->hw_lock, flags);
213 ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1);
214 spin_unlock_irqrestore(&ctl->hw_lock, flags);
215 }
216
217 /**
218 * mdp5_ctl_set_encoder_state() - set the encoder state
219 *
220 * @ctl: the CTL instance
221 * @pipeline: the encoder's INTF + MIXER configuration
222 * @enabled: true, when encoder is ready for data streaming; false, otherwise.
223 *
224 * Note:
225 * This encoder state is needed to trigger START signal (data path kickoff).
226 */
mdp5_ctl_set_encoder_state(struct mdp5_ctl * ctl,struct mdp5_pipeline * pipeline,bool enabled)227 int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl,
228 struct mdp5_pipeline *pipeline,
229 bool enabled)
230 {
231 struct mdp5_interface *intf = pipeline->intf;
232
233 if (WARN_ON(!ctl))
234 return -EINVAL;
235
236 ctl->encoder_enabled = enabled;
237 DBG("intf_%d: %s", intf->num, str_on_off(enabled));
238
239 if (start_signal_needed(ctl, pipeline)) {
240 send_start_signal(ctl);
241 }
242
243 return 0;
244 }
245
246 /*
247 * Note:
248 * CTL registers need to be flushed after calling this function
249 * (call mdp5_ctl_commit() with mdp_ctl_flush_mask_ctl() mask)
250 */
mdp5_ctl_set_cursor(struct mdp5_ctl * ctl,struct mdp5_pipeline * pipeline,int cursor_id,bool enable)251 int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
252 int cursor_id, bool enable)
253 {
254 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
255 unsigned long flags;
256 u32 blend_cfg;
257 struct mdp5_hw_mixer *mixer = pipeline->mixer;
258
259 if (WARN_ON(!mixer)) {
260 DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM",
261 ctl->id);
262 return -EINVAL;
263 }
264
265 if (pipeline->r_mixer) {
266 DRM_DEV_ERROR(ctl_mgr->dev->dev, "unsupported configuration");
267 return -EINVAL;
268 }
269
270 spin_lock_irqsave(&ctl->hw_lock, flags);
271
272 blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm));
273
274 if (enable)
275 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
276 else
277 blend_cfg &= ~MDP5_CTL_LAYER_REG_CURSOR_OUT;
278
279 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
280 ctl->cursor_on = enable;
281
282 spin_unlock_irqrestore(&ctl->hw_lock, flags);
283
284 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id);
285
286 return 0;
287 }
288
mdp_ctl_blend_mask(enum mdp5_pipe pipe,enum mdp_mixer_stage_id stage)289 static u32 mdp_ctl_blend_mask(enum mdp5_pipe pipe,
290 enum mdp_mixer_stage_id stage)
291 {
292 switch (pipe) {
293 case SSPP_VIG0: return MDP5_CTL_LAYER_REG_VIG0(stage);
294 case SSPP_VIG1: return MDP5_CTL_LAYER_REG_VIG1(stage);
295 case SSPP_VIG2: return MDP5_CTL_LAYER_REG_VIG2(stage);
296 case SSPP_RGB0: return MDP5_CTL_LAYER_REG_RGB0(stage);
297 case SSPP_RGB1: return MDP5_CTL_LAYER_REG_RGB1(stage);
298 case SSPP_RGB2: return MDP5_CTL_LAYER_REG_RGB2(stage);
299 case SSPP_DMA0: return MDP5_CTL_LAYER_REG_DMA0(stage);
300 case SSPP_DMA1: return MDP5_CTL_LAYER_REG_DMA1(stage);
301 case SSPP_VIG3: return MDP5_CTL_LAYER_REG_VIG3(stage);
302 case SSPP_RGB3: return MDP5_CTL_LAYER_REG_RGB3(stage);
303 case SSPP_CURSOR0:
304 case SSPP_CURSOR1:
305 default: return 0;
306 }
307 }
308
mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,enum mdp_mixer_stage_id stage)309 static u32 mdp_ctl_blend_ext_mask(enum mdp5_pipe pipe,
310 enum mdp_mixer_stage_id stage)
311 {
312 if (stage < STAGE6 && (pipe != SSPP_CURSOR0 && pipe != SSPP_CURSOR1))
313 return 0;
314
315 switch (pipe) {
316 case SSPP_VIG0: return MDP5_CTL_LAYER_EXT_REG_VIG0_BIT3;
317 case SSPP_VIG1: return MDP5_CTL_LAYER_EXT_REG_VIG1_BIT3;
318 case SSPP_VIG2: return MDP5_CTL_LAYER_EXT_REG_VIG2_BIT3;
319 case SSPP_RGB0: return MDP5_CTL_LAYER_EXT_REG_RGB0_BIT3;
320 case SSPP_RGB1: return MDP5_CTL_LAYER_EXT_REG_RGB1_BIT3;
321 case SSPP_RGB2: return MDP5_CTL_LAYER_EXT_REG_RGB2_BIT3;
322 case SSPP_DMA0: return MDP5_CTL_LAYER_EXT_REG_DMA0_BIT3;
323 case SSPP_DMA1: return MDP5_CTL_LAYER_EXT_REG_DMA1_BIT3;
324 case SSPP_VIG3: return MDP5_CTL_LAYER_EXT_REG_VIG3_BIT3;
325 case SSPP_RGB3: return MDP5_CTL_LAYER_EXT_REG_RGB3_BIT3;
326 case SSPP_CURSOR0: return MDP5_CTL_LAYER_EXT_REG_CURSOR0(stage);
327 case SSPP_CURSOR1: return MDP5_CTL_LAYER_EXT_REG_CURSOR1(stage);
328 default: return 0;
329 }
330 }
331
mdp5_ctl_reset_blend_regs(struct mdp5_ctl * ctl)332 static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl)
333 {
334 unsigned long flags;
335 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
336 int i;
337
338 spin_lock_irqsave(&ctl->hw_lock, flags);
339
340 for (i = 0; i < ctl_mgr->nlm; i++) {
341 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0);
342 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0);
343 }
344
345 spin_unlock_irqrestore(&ctl->hw_lock, flags);
346 }
347
348 #define PIPE_LEFT 0
349 #define PIPE_RIGHT 1
mdp5_ctl_blend(struct mdp5_ctl * ctl,struct mdp5_pipeline * pipeline,enum mdp5_pipe stage[][MAX_PIPE_STAGE],enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],u32 stage_cnt,u32 ctl_blend_op_flags)350 int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
351 enum mdp5_pipe stage[][MAX_PIPE_STAGE],
352 enum mdp5_pipe r_stage[][MAX_PIPE_STAGE],
353 u32 stage_cnt, u32 ctl_blend_op_flags)
354 {
355 struct mdp5_hw_mixer *mixer = pipeline->mixer;
356 struct mdp5_hw_mixer *r_mixer = pipeline->r_mixer;
357 unsigned long flags;
358 u32 blend_cfg = 0, blend_ext_cfg = 0;
359 u32 r_blend_cfg = 0, r_blend_ext_cfg = 0;
360 int i, start_stage;
361
362 mdp5_ctl_reset_blend_regs(ctl);
363
364 if (ctl_blend_op_flags & MDP5_CTL_BLEND_OP_FLAG_BORDER_OUT) {
365 start_stage = STAGE0;
366 blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
367 if (r_mixer)
368 r_blend_cfg |= MDP5_CTL_LAYER_REG_BORDER_COLOR;
369 } else {
370 start_stage = STAGE_BASE;
371 }
372
373 for (i = start_stage; stage_cnt && i <= STAGE_MAX; i++) {
374 blend_cfg |=
375 mdp_ctl_blend_mask(stage[i][PIPE_LEFT], i) |
376 mdp_ctl_blend_mask(stage[i][PIPE_RIGHT], i);
377 blend_ext_cfg |=
378 mdp_ctl_blend_ext_mask(stage[i][PIPE_LEFT], i) |
379 mdp_ctl_blend_ext_mask(stage[i][PIPE_RIGHT], i);
380 if (r_mixer) {
381 r_blend_cfg |=
382 mdp_ctl_blend_mask(r_stage[i][PIPE_LEFT], i) |
383 mdp_ctl_blend_mask(r_stage[i][PIPE_RIGHT], i);
384 r_blend_ext_cfg |=
385 mdp_ctl_blend_ext_mask(r_stage[i][PIPE_LEFT], i) |
386 mdp_ctl_blend_ext_mask(r_stage[i][PIPE_RIGHT], i);
387 }
388 }
389
390 spin_lock_irqsave(&ctl->hw_lock, flags);
391 if (ctl->cursor_on)
392 blend_cfg |= MDP5_CTL_LAYER_REG_CURSOR_OUT;
393
394 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg);
395 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm),
396 blend_ext_cfg);
397 if (r_mixer) {
398 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm),
399 r_blend_cfg);
400 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm),
401 r_blend_ext_cfg);
402 }
403 spin_unlock_irqrestore(&ctl->hw_lock, flags);
404
405 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm);
406 if (r_mixer)
407 ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm);
408
409 DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x", mixer->lm,
410 blend_cfg, blend_ext_cfg);
411 if (r_mixer)
412 DBG("lm%d: blend config = 0x%08x. ext_cfg = 0x%08x",
413 r_mixer->lm, r_blend_cfg, r_blend_ext_cfg);
414
415 return 0;
416 }
417
mdp_ctl_flush_mask_encoder(struct mdp5_interface * intf)418 u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf)
419 {
420 if (intf->type == INTF_WB)
421 return MDP5_CTL_FLUSH_WB;
422
423 switch (intf->num) {
424 case 0: return MDP5_CTL_FLUSH_TIMING_0;
425 case 1: return MDP5_CTL_FLUSH_TIMING_1;
426 case 2: return MDP5_CTL_FLUSH_TIMING_2;
427 case 3: return MDP5_CTL_FLUSH_TIMING_3;
428 default: return 0;
429 }
430 }
431
mdp_ctl_flush_mask_cursor(int cursor_id)432 u32 mdp_ctl_flush_mask_cursor(int cursor_id)
433 {
434 switch (cursor_id) {
435 case 0: return MDP5_CTL_FLUSH_CURSOR_0;
436 case 1: return MDP5_CTL_FLUSH_CURSOR_1;
437 default: return 0;
438 }
439 }
440
mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)441 u32 mdp_ctl_flush_mask_pipe(enum mdp5_pipe pipe)
442 {
443 switch (pipe) {
444 case SSPP_VIG0: return MDP5_CTL_FLUSH_VIG0;
445 case SSPP_VIG1: return MDP5_CTL_FLUSH_VIG1;
446 case SSPP_VIG2: return MDP5_CTL_FLUSH_VIG2;
447 case SSPP_RGB0: return MDP5_CTL_FLUSH_RGB0;
448 case SSPP_RGB1: return MDP5_CTL_FLUSH_RGB1;
449 case SSPP_RGB2: return MDP5_CTL_FLUSH_RGB2;
450 case SSPP_DMA0: return MDP5_CTL_FLUSH_DMA0;
451 case SSPP_DMA1: return MDP5_CTL_FLUSH_DMA1;
452 case SSPP_VIG3: return MDP5_CTL_FLUSH_VIG3;
453 case SSPP_RGB3: return MDP5_CTL_FLUSH_RGB3;
454 case SSPP_CURSOR0: return MDP5_CTL_FLUSH_CURSOR_0;
455 case SSPP_CURSOR1: return MDP5_CTL_FLUSH_CURSOR_1;
456 default: return 0;
457 }
458 }
459
mdp_ctl_flush_mask_lm(int lm)460 u32 mdp_ctl_flush_mask_lm(int lm)
461 {
462 switch (lm) {
463 case 0: return MDP5_CTL_FLUSH_LM0;
464 case 1: return MDP5_CTL_FLUSH_LM1;
465 case 2: return MDP5_CTL_FLUSH_LM2;
466 case 3: return MDP5_CTL_FLUSH_LM3;
467 case 4: return MDP5_CTL_FLUSH_LM4;
468 case 5: return MDP5_CTL_FLUSH_LM5;
469 default: return 0;
470 }
471 }
472
fix_sw_flush(struct mdp5_ctl * ctl,struct mdp5_pipeline * pipeline,u32 flush_mask)473 static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline,
474 u32 flush_mask)
475 {
476 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
477 u32 sw_mask = 0;
478 #define BIT_NEEDS_SW_FIX(bit) \
479 (!(ctl_mgr->flush_hw_mask & bit) && (flush_mask & bit))
480
481 /* for some targets, cursor bit is the same as LM bit */
482 if (BIT_NEEDS_SW_FIX(MDP5_CTL_FLUSH_CURSOR_0))
483 sw_mask |= mdp_ctl_flush_mask_lm(pipeline->mixer->lm);
484
485 return sw_mask;
486 }
487
fix_for_single_flush(struct mdp5_ctl * ctl,u32 * flush_mask,u32 * flush_id)488 static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask,
489 u32 *flush_id)
490 {
491 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
492
493 if (ctl->pair) {
494 DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask);
495 ctl->flush_pending = true;
496 ctl_mgr->single_flush_pending_mask |= (*flush_mask);
497 *flush_mask = 0;
498
499 if (ctl->pair->flush_pending) {
500 *flush_id = min_t(u32, ctl->id, ctl->pair->id);
501 *flush_mask = ctl_mgr->single_flush_pending_mask;
502
503 ctl->flush_pending = false;
504 ctl->pair->flush_pending = false;
505 ctl_mgr->single_flush_pending_mask = 0;
506
507 DBG("Single FLUSH mask %x,ID %d", *flush_mask,
508 *flush_id);
509 }
510 }
511 }
512
513 /**
514 * mdp5_ctl_commit() - Register Flush
515 *
516 * @ctl: the CTL instance
517 * @pipeline: the encoder's INTF + MIXER configuration
518 * @flush_mask: bitmask of display controller hw blocks to flush
519 * @start: if true, immediately update flush registers and set START
520 * bit, otherwise accumulate flush_mask bits until we are
521 * ready to START
522 *
523 * The flush register is used to indicate several registers are all
524 * programmed, and are safe to update to the back copy of the double
525 * buffered registers.
526 *
527 * Some registers FLUSH bits are shared when the hardware does not have
528 * dedicated bits for them; handling these is the job of fix_sw_flush().
529 *
530 * CTL registers need to be flushed in some circumstances; if that is the
531 * case, some trigger bits will be present in both flush mask and
532 * ctl->pending_ctl_trigger.
533 *
534 * Return H/W flushed bit mask.
535 */
mdp5_ctl_commit(struct mdp5_ctl * ctl,struct mdp5_pipeline * pipeline,u32 flush_mask,bool start)536 u32 mdp5_ctl_commit(struct mdp5_ctl *ctl,
537 struct mdp5_pipeline *pipeline,
538 u32 flush_mask, bool start)
539 {
540 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm;
541 unsigned long flags;
542 u32 flush_id = ctl->id;
543 u32 curr_ctl_flush_mask;
544
545 VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger);
546
547 if (ctl->pending_ctl_trigger & flush_mask) {
548 flush_mask |= MDP5_CTL_FLUSH_CTL;
549 ctl->pending_ctl_trigger = 0;
550 }
551
552 flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask);
553
554 flush_mask &= ctl_mgr->flush_hw_mask;
555
556 curr_ctl_flush_mask = flush_mask;
557
558 fix_for_single_flush(ctl, &flush_mask, &flush_id);
559
560 if (!start) {
561 ctl->flush_mask |= flush_mask;
562 return curr_ctl_flush_mask;
563 } else {
564 flush_mask |= ctl->flush_mask;
565 ctl->flush_mask = 0;
566 }
567
568 if (flush_mask) {
569 spin_lock_irqsave(&ctl->hw_lock, flags);
570 ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask);
571 spin_unlock_irqrestore(&ctl->hw_lock, flags);
572 }
573
574 if (start_signal_needed(ctl, pipeline)) {
575 send_start_signal(ctl);
576 }
577
578 return curr_ctl_flush_mask;
579 }
580
mdp5_ctl_get_commit_status(struct mdp5_ctl * ctl)581 u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl)
582 {
583 return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id));
584 }
585
mdp5_ctl_get_ctl_id(struct mdp5_ctl * ctl)586 int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl)
587 {
588 return WARN_ON(!ctl) ? -EINVAL : ctl->id;
589 }
590
591 /*
592 * mdp5_ctl_pair() - Associate 2 booked CTLs for single FLUSH
593 */
mdp5_ctl_pair(struct mdp5_ctl * ctlx,struct mdp5_ctl * ctly,bool enable)594 int mdp5_ctl_pair(struct mdp5_ctl *ctlx, struct mdp5_ctl *ctly, bool enable)
595 {
596 struct mdp5_ctl_manager *ctl_mgr = ctlx->ctlm;
597 struct mdp5_kms *mdp5_kms = get_kms(ctl_mgr);
598
599 /* do nothing silently if hw doesn't support */
600 if (!ctl_mgr->single_flush_supported)
601 return 0;
602
603 if (!enable) {
604 ctlx->pair = NULL;
605 ctly->pair = NULL;
606 mdp5_write(mdp5_kms, REG_MDP5_SPARE_0, 0);
607 return 0;
608 } else if ((ctlx->pair != NULL) || (ctly->pair != NULL)) {
609 DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTLs already paired\n");
610 return -EINVAL;
611 } else if (!(ctlx->status & ctly->status & CTL_STAT_BOOKED)) {
612 DRM_DEV_ERROR(ctl_mgr->dev->dev, "Only pair booked CTLs\n");
613 return -EINVAL;
614 }
615
616 ctlx->pair = ctly;
617 ctly->pair = ctlx;
618
619 mdp5_write(mdp5_kms, REG_MDP5_SPARE_0,
620 MDP5_SPARE_0_SPLIT_DPL_SINGLE_FLUSH_EN);
621
622 return 0;
623 }
624
625 /*
626 * mdp5_ctl_request() - CTL allocation
627 *
628 * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs.
629 * If no CTL is available in preferred category, allocate from the other one.
630 *
631 * @return fail if no CTL is available.
632 */
mdp5_ctlm_request(struct mdp5_ctl_manager * ctl_mgr,int intf_num)633 struct mdp5_ctl *mdp5_ctlm_request(struct mdp5_ctl_manager *ctl_mgr,
634 int intf_num)
635 {
636 struct mdp5_ctl *ctl = NULL;
637 const u32 checkm = CTL_STAT_BUSY | CTL_STAT_BOOKED;
638 u32 match = ((intf_num == 1) || (intf_num == 2)) ? CTL_STAT_BOOKED : 0;
639 unsigned long flags;
640 int c;
641
642 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
643
644 /* search the preferred */
645 for (c = 0; c < ctl_mgr->nctl; c++)
646 if ((ctl_mgr->ctls[c].status & checkm) == match)
647 goto found;
648
649 dev_warn(ctl_mgr->dev->dev,
650 "fall back to the other CTL category for INTF %d!\n", intf_num);
651
652 match ^= CTL_STAT_BOOKED;
653 for (c = 0; c < ctl_mgr->nctl; c++)
654 if ((ctl_mgr->ctls[c].status & checkm) == match)
655 goto found;
656
657 DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!");
658 goto unlock;
659
660 found:
661 ctl = &ctl_mgr->ctls[c];
662 ctl->status |= CTL_STAT_BUSY;
663 ctl->pending_ctl_trigger = 0;
664 DBG("CTL %d allocated", ctl->id);
665
666 unlock:
667 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
668 return ctl;
669 }
670
mdp5_ctlm_hw_reset(struct mdp5_ctl_manager * ctl_mgr)671 void mdp5_ctlm_hw_reset(struct mdp5_ctl_manager *ctl_mgr)
672 {
673 unsigned long flags;
674 int c;
675
676 for (c = 0; c < ctl_mgr->nctl; c++) {
677 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
678
679 spin_lock_irqsave(&ctl->hw_lock, flags);
680 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0);
681 spin_unlock_irqrestore(&ctl->hw_lock, flags);
682 }
683 }
684
mdp5_ctlm_init(struct drm_device * dev,void __iomem * mmio_base,struct mdp5_cfg_handler * cfg_hnd)685 struct mdp5_ctl_manager *mdp5_ctlm_init(struct drm_device *dev,
686 void __iomem *mmio_base, struct mdp5_cfg_handler *cfg_hnd)
687 {
688 struct mdp5_ctl_manager *ctl_mgr;
689 const struct mdp5_cfg_hw *hw_cfg = mdp5_cfg_get_hw_config(cfg_hnd);
690 int rev = mdp5_cfg_get_hw_rev(cfg_hnd);
691 unsigned dsi_cnt = 0;
692 const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl;
693 unsigned long flags;
694 int c, ret;
695
696 ctl_mgr = devm_kzalloc(dev->dev, sizeof(*ctl_mgr), GFP_KERNEL);
697 if (!ctl_mgr) {
698 DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n");
699 return ERR_PTR(-ENOMEM);
700 }
701
702 if (WARN_ON(ctl_cfg->count > MAX_CTL)) {
703 DRM_DEV_ERROR(dev->dev, "Increase static pool size to at least %d\n",
704 ctl_cfg->count);
705 return ERR_PTR(-ENOSPC);
706 }
707
708 /* initialize the CTL manager: */
709 ctl_mgr->dev = dev;
710 ctl_mgr->nlm = hw_cfg->lm.count;
711 ctl_mgr->nctl = ctl_cfg->count;
712 ctl_mgr->flush_hw_mask = ctl_cfg->flush_hw_mask;
713 spin_lock_init(&ctl_mgr->pool_lock);
714
715 /* initialize each CTL of the pool: */
716 spin_lock_irqsave(&ctl_mgr->pool_lock, flags);
717 for (c = 0; c < ctl_mgr->nctl; c++) {
718 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c];
719
720 if (WARN_ON(!ctl_cfg->base[c])) {
721 DRM_DEV_ERROR(dev->dev, "CTL_%d: base is null!\n", c);
722 ret = -EINVAL;
723 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
724 return ERR_PTR(ret);
725 }
726 ctl->ctlm = ctl_mgr;
727 ctl->id = c;
728 ctl->reg_offset = ctl_cfg->base[c];
729 ctl->status = 0;
730 spin_lock_init(&ctl->hw_lock);
731 }
732
733 /*
734 * In bonded DSI case, CTL0 and CTL1 are always assigned to two DSI
735 * interfaces to support single FLUSH feature (Flush CTL0 and CTL1 when
736 * only write into CTL0's FLUSH register) to keep two DSI pipes in sync.
737 * Single FLUSH is supported from hw rev v3.0.
738 */
739 for (c = 0; c < ARRAY_SIZE(hw_cfg->intf.connect); c++)
740 if (hw_cfg->intf.connect[c] == INTF_DSI)
741 dsi_cnt++;
742 if ((rev >= 3) && (dsi_cnt > 1)) {
743 ctl_mgr->single_flush_supported = true;
744 /* Reserve CTL0/1 for INTF1/2 */
745 ctl_mgr->ctls[0].status |= CTL_STAT_BOOKED;
746 ctl_mgr->ctls[1].status |= CTL_STAT_BOOKED;
747 }
748 spin_unlock_irqrestore(&ctl_mgr->pool_lock, flags);
749 DBG("Pool of %d CTLs created.", ctl_mgr->nctl);
750
751 return ctl_mgr;
752 }
753