Lines Matching full:ctl
10 * CTL - MDP Control Pool Manager
20 * In certain use cases (high-resolution dual pipe), one single CTL can be
32 /* CTL status bitmask */
44 /* when do CTL registers need to be flushed? (mask of trigger bits) */
49 /* True if the current CTL has FLUSH bits pending for single FLUSH. */
52 struct mdp5_ctl *pair; /* Paired CTL to be flushed together */
58 /* number of CTL / Layer Mixers in this hw config: */
83 void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data) in ctl_write() argument
85 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); in ctl_write()
87 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */ in ctl_write()
92 u32 ctl_read(struct mdp5_ctl *ctl, u32 reg) in ctl_read() argument
94 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); in ctl_read()
96 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */ in ctl_read()
135 static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline) in set_ctl_op() argument
163 spin_lock_irqsave(&ctl->hw_lock, flags); in set_ctl_op()
164 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op); in set_ctl_op()
165 spin_unlock_irqrestore(&ctl->hw_lock, flags); in set_ctl_op()
168 int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline) in mdp5_ctl_set_pipeline() argument
170 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); in mdp5_ctl_set_pipeline()
177 set_ctl_op(ctl, pipeline); in mdp5_ctl_set_pipeline()
182 static bool start_signal_needed(struct mdp5_ctl *ctl, in start_signal_needed() argument
187 if (!ctl->encoder_enabled) in start_signal_needed()
207 static void send_start_signal(struct mdp5_ctl *ctl) in send_start_signal() argument
211 spin_lock_irqsave(&ctl->hw_lock, flags); in send_start_signal()
212 ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1); in send_start_signal()
213 spin_unlock_irqrestore(&ctl->hw_lock, flags); in send_start_signal()
224 int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, in mdp5_ctl_set_encoder_state() argument
230 if (WARN_ON(!ctl)) in mdp5_ctl_set_encoder_state()
233 ctl->encoder_enabled = enabled; in mdp5_ctl_set_encoder_state()
236 if (start_signal_needed(ctl, pipeline)) { in mdp5_ctl_set_encoder_state()
237 send_start_signal(ctl); in mdp5_ctl_set_encoder_state()
245 * CTL registers need to be flushed after calling this function
248 int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, in mdp5_ctl_set_cursor() argument
251 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in mdp5_ctl_set_cursor()
257 DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM", in mdp5_ctl_set_cursor()
258 ctl->id); in mdp5_ctl_set_cursor()
267 spin_lock_irqsave(&ctl->hw_lock, flags); in mdp5_ctl_set_cursor()
269 blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm)); in mdp5_ctl_set_cursor()
276 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg); in mdp5_ctl_set_cursor()
277 ctl->cursor_on = enable; in mdp5_ctl_set_cursor()
279 spin_unlock_irqrestore(&ctl->hw_lock, flags); in mdp5_ctl_set_cursor()
281 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id); in mdp5_ctl_set_cursor()
329 static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl) in mdp5_ctl_reset_blend_regs() argument
332 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in mdp5_ctl_reset_blend_regs()
335 spin_lock_irqsave(&ctl->hw_lock, flags); in mdp5_ctl_reset_blend_regs()
338 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0); in mdp5_ctl_reset_blend_regs()
339 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0); in mdp5_ctl_reset_blend_regs()
342 spin_unlock_irqrestore(&ctl->hw_lock, flags); in mdp5_ctl_reset_blend_regs()
347 int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, in mdp5_ctl_blend() argument
359 mdp5_ctl_reset_blend_regs(ctl); in mdp5_ctl_blend()
387 spin_lock_irqsave(&ctl->hw_lock, flags); in mdp5_ctl_blend()
388 if (ctl->cursor_on) in mdp5_ctl_blend()
391 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg); in mdp5_ctl_blend()
392 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm), in mdp5_ctl_blend()
395 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm), in mdp5_ctl_blend()
397 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm), in mdp5_ctl_blend()
400 spin_unlock_irqrestore(&ctl->hw_lock, flags); in mdp5_ctl_blend()
402 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm); in mdp5_ctl_blend()
404 ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm); in mdp5_ctl_blend()
470 static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, in fix_sw_flush() argument
473 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in fix_sw_flush()
485 static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask, in fix_for_single_flush() argument
488 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in fix_for_single_flush()
490 if (ctl->pair) { in fix_for_single_flush()
491 DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask); in fix_for_single_flush()
492 ctl->flush_pending = true; in fix_for_single_flush()
496 if (ctl->pair->flush_pending) { in fix_for_single_flush()
497 *flush_id = min_t(u32, ctl->id, ctl->pair->id); in fix_for_single_flush()
500 ctl->flush_pending = false; in fix_for_single_flush()
501 ctl->pair->flush_pending = false; in fix_for_single_flush()
520 * CTL registers need to be flushed in some circumstances; if that is the
522 * ctl->pending_ctl_trigger.
526 u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, in mdp5_ctl_commit() argument
530 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in mdp5_ctl_commit()
532 u32 flush_id = ctl->id; in mdp5_ctl_commit()
535 VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger); in mdp5_ctl_commit()
537 if (ctl->pending_ctl_trigger & flush_mask) { in mdp5_ctl_commit()
539 ctl->pending_ctl_trigger = 0; in mdp5_ctl_commit()
542 flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask); in mdp5_ctl_commit()
548 fix_for_single_flush(ctl, &flush_mask, &flush_id); in mdp5_ctl_commit()
551 ctl->flush_mask |= flush_mask; in mdp5_ctl_commit()
554 flush_mask |= ctl->flush_mask; in mdp5_ctl_commit()
555 ctl->flush_mask = 0; in mdp5_ctl_commit()
559 spin_lock_irqsave(&ctl->hw_lock, flags); in mdp5_ctl_commit()
560 ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask); in mdp5_ctl_commit()
561 spin_unlock_irqrestore(&ctl->hw_lock, flags); in mdp5_ctl_commit()
564 if (start_signal_needed(ctl, pipeline)) { in mdp5_ctl_commit()
565 send_start_signal(ctl); in mdp5_ctl_commit()
571 u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl) in mdp5_ctl_get_commit_status() argument
573 return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id)); in mdp5_ctl_get_commit_status()
576 int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl) in mdp5_ctl_get_ctl_id() argument
578 return WARN_ON(!ctl) ? -EINVAL : ctl->id; in mdp5_ctl_get_ctl_id()
616 * mdp5_ctl_request() - CTL allocation
618 * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs.
619 * If no CTL is available in preferred category, allocate from the other one.
621 * @return fail if no CTL is available.
626 struct mdp5_ctl *ctl = NULL; in mdp5_ctlm_request() local
640 "fall back to the other CTL category for INTF %d!\n", intf_num); in mdp5_ctlm_request()
647 DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!"); in mdp5_ctlm_request()
651 ctl = &ctl_mgr->ctls[c]; in mdp5_ctlm_request()
652 ctl->status |= CTL_STAT_BUSY; in mdp5_ctlm_request()
653 ctl->pending_ctl_trigger = 0; in mdp5_ctlm_request()
654 DBG("CTL %d allocated", ctl->id); in mdp5_ctlm_request()
658 return ctl; in mdp5_ctlm_request()
667 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; in mdp5_ctlm_hw_reset() local
669 spin_lock_irqsave(&ctl->hw_lock, flags); in mdp5_ctlm_hw_reset()
670 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0); in mdp5_ctlm_hw_reset()
671 spin_unlock_irqrestore(&ctl->hw_lock, flags); in mdp5_ctlm_hw_reset()
687 const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl; in mdp5_ctlm_init()
693 DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n"); in mdp5_ctlm_init()
705 /* initialize the CTL manager: */ in mdp5_ctlm_init()
712 /* initialize each CTL of the pool: */ in mdp5_ctlm_init()
715 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; in mdp5_ctlm_init() local
723 ctl->ctlm = ctl_mgr; in mdp5_ctlm_init()
724 ctl->id = c; in mdp5_ctlm_init()
725 ctl->reg_offset = ctl_cfg->base[c]; in mdp5_ctlm_init()
726 ctl->status = 0; in mdp5_ctlm_init()
727 spin_lock_init(&ctl->hw_lock); in mdp5_ctlm_init()