Lines Matching full:ctl

11  * CTL - MDP Control Pool Manager
21 * In certain use cases (high-resolution dual pipe), one single CTL can be
33 /* CTL status bitmask */
45 /* when do CTL registers need to be flushed? (mask of trigger bits) */
50 /* True if the current CTL has FLUSH bits pending for single FLUSH. */
53 struct mdp5_ctl *pair; /* Paired CTL to be flushed together */
59 /* number of CTL / Layer Mixers in this hw config: */
84 void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data) in ctl_write() argument
86 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); in ctl_write()
88 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */ in ctl_write()
93 u32 ctl_read(struct mdp5_ctl *ctl, u32 reg) in ctl_read() argument
95 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); in ctl_read()
97 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */ in ctl_read()
136 static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline) in set_ctl_op() argument
164 spin_lock_irqsave(&ctl->hw_lock, flags); in set_ctl_op()
165 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op); in set_ctl_op()
166 spin_unlock_irqrestore(&ctl->hw_lock, flags); in set_ctl_op()
169 int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline) in mdp5_ctl_set_pipeline() argument
171 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); in mdp5_ctl_set_pipeline()
178 set_ctl_op(ctl, pipeline); in mdp5_ctl_set_pipeline()
183 static bool start_signal_needed(struct mdp5_ctl *ctl, in start_signal_needed() argument
188 if (!ctl->encoder_enabled) in start_signal_needed()
208 static void send_start_signal(struct mdp5_ctl *ctl) in send_start_signal() argument
212 spin_lock_irqsave(&ctl->hw_lock, flags); in send_start_signal()
213 ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1); in send_start_signal()
214 spin_unlock_irqrestore(&ctl->hw_lock, flags); in send_start_signal()
220 * @ctl: the CTL instance
227 int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, in mdp5_ctl_set_encoder_state() argument
233 if (WARN_ON(!ctl)) in mdp5_ctl_set_encoder_state()
236 ctl->encoder_enabled = enabled; in mdp5_ctl_set_encoder_state()
239 if (start_signal_needed(ctl, pipeline)) { in mdp5_ctl_set_encoder_state()
240 send_start_signal(ctl); in mdp5_ctl_set_encoder_state()
248 * CTL registers need to be flushed after calling this function
251 int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, in mdp5_ctl_set_cursor() argument
254 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in mdp5_ctl_set_cursor()
260 DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM", in mdp5_ctl_set_cursor()
261 ctl->id); in mdp5_ctl_set_cursor()
270 spin_lock_irqsave(&ctl->hw_lock, flags); in mdp5_ctl_set_cursor()
272 blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm)); in mdp5_ctl_set_cursor()
279 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg); in mdp5_ctl_set_cursor()
280 ctl->cursor_on = enable; in mdp5_ctl_set_cursor()
282 spin_unlock_irqrestore(&ctl->hw_lock, flags); in mdp5_ctl_set_cursor()
284 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id); in mdp5_ctl_set_cursor()
332 static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl) in mdp5_ctl_reset_blend_regs() argument
335 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in mdp5_ctl_reset_blend_regs()
338 spin_lock_irqsave(&ctl->hw_lock, flags); in mdp5_ctl_reset_blend_regs()
341 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0); in mdp5_ctl_reset_blend_regs()
342 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0); in mdp5_ctl_reset_blend_regs()
345 spin_unlock_irqrestore(&ctl->hw_lock, flags); in mdp5_ctl_reset_blend_regs()
350 int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, in mdp5_ctl_blend() argument
362 mdp5_ctl_reset_blend_regs(ctl); in mdp5_ctl_blend()
390 spin_lock_irqsave(&ctl->hw_lock, flags); in mdp5_ctl_blend()
391 if (ctl->cursor_on) in mdp5_ctl_blend()
394 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg); in mdp5_ctl_blend()
395 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm), in mdp5_ctl_blend()
398 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm), in mdp5_ctl_blend()
400 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm), in mdp5_ctl_blend()
403 spin_unlock_irqrestore(&ctl->hw_lock, flags); in mdp5_ctl_blend()
405 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm); in mdp5_ctl_blend()
407 ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm); in mdp5_ctl_blend()
473 static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, in fix_sw_flush() argument
476 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in fix_sw_flush()
488 static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask, in fix_for_single_flush() argument
491 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in fix_for_single_flush()
493 if (ctl->pair) { in fix_for_single_flush()
494 DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask); in fix_for_single_flush()
495 ctl->flush_pending = true; in fix_for_single_flush()
499 if (ctl->pair->flush_pending) { in fix_for_single_flush()
500 *flush_id = min_t(u32, ctl->id, ctl->pair->id); in fix_for_single_flush()
503 ctl->flush_pending = false; in fix_for_single_flush()
504 ctl->pair->flush_pending = false; in fix_for_single_flush()
516 * @ctl: the CTL instance
530 * CTL registers need to be flushed in some circumstances; if that is the
532 * ctl->pending_ctl_trigger.
536 u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, in mdp5_ctl_commit() argument
540 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in mdp5_ctl_commit()
542 u32 flush_id = ctl->id; in mdp5_ctl_commit()
545 VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger); in mdp5_ctl_commit()
547 if (ctl->pending_ctl_trigger & flush_mask) { in mdp5_ctl_commit()
549 ctl->pending_ctl_trigger = 0; in mdp5_ctl_commit()
552 flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask); in mdp5_ctl_commit()
558 fix_for_single_flush(ctl, &flush_mask, &flush_id); in mdp5_ctl_commit()
561 ctl->flush_mask |= flush_mask; in mdp5_ctl_commit()
564 flush_mask |= ctl->flush_mask; in mdp5_ctl_commit()
565 ctl->flush_mask = 0; in mdp5_ctl_commit()
569 spin_lock_irqsave(&ctl->hw_lock, flags); in mdp5_ctl_commit()
570 ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask); in mdp5_ctl_commit()
571 spin_unlock_irqrestore(&ctl->hw_lock, flags); in mdp5_ctl_commit()
574 if (start_signal_needed(ctl, pipeline)) { in mdp5_ctl_commit()
575 send_start_signal(ctl); in mdp5_ctl_commit()
581 u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl) in mdp5_ctl_get_commit_status() argument
583 return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id)); in mdp5_ctl_get_commit_status()
586 int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl) in mdp5_ctl_get_ctl_id() argument
588 return WARN_ON(!ctl) ? -EINVAL : ctl->id; in mdp5_ctl_get_ctl_id()
626 * mdp5_ctl_request() - CTL allocation
628 * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs.
629 * If no CTL is available in preferred category, allocate from the other one.
631 * @return fail if no CTL is available.
636 struct mdp5_ctl *ctl = NULL; in mdp5_ctlm_request() local
650 "fall back to the other CTL category for INTF %d!\n", intf_num); in mdp5_ctlm_request()
657 DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!"); in mdp5_ctlm_request()
661 ctl = &ctl_mgr->ctls[c]; in mdp5_ctlm_request()
662 ctl->status |= CTL_STAT_BUSY; in mdp5_ctlm_request()
663 ctl->pending_ctl_trigger = 0; in mdp5_ctlm_request()
664 DBG("CTL %d allocated", ctl->id); in mdp5_ctlm_request()
668 return ctl; in mdp5_ctlm_request()
677 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; in mdp5_ctlm_hw_reset() local
679 spin_lock_irqsave(&ctl->hw_lock, flags); in mdp5_ctlm_hw_reset()
680 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0); in mdp5_ctlm_hw_reset()
681 spin_unlock_irqrestore(&ctl->hw_lock, flags); in mdp5_ctlm_hw_reset()
692 const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl; in mdp5_ctlm_init()
698 DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n"); in mdp5_ctlm_init()
708 /* initialize the CTL manager: */ in mdp5_ctlm_init()
715 /* initialize each CTL of the pool: */ in mdp5_ctlm_init()
718 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; in mdp5_ctlm_init() local
726 ctl->ctlm = ctl_mgr; in mdp5_ctlm_init()
727 ctl->id = c; in mdp5_ctlm_init()
728 ctl->reg_offset = ctl_cfg->base[c]; in mdp5_ctlm_init()
729 ctl->status = 0; in mdp5_ctlm_init()
730 spin_lock_init(&ctl->hw_lock); in mdp5_ctlm_init()