Lines Matching full:ctl
10 * CTL - MDP Control Pool Manager
20 * In certain use cases (high-resolution dual pipe), one single CTL can be
32 /* CTL status bitmask */
44 /* when do CTL registers need to be flushed? (mask of trigger bits) */
49 /* True if the current CTL has FLUSH bits pending for single FLUSH. */
52 struct mdp5_ctl *pair; /* Paired CTL to be flushed together */
58 /* number of CTL / Layer Mixers in this hw config: */
83 void ctl_write(struct mdp5_ctl *ctl, u32 reg, u32 data) in ctl_write() argument
85 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); in ctl_write()
87 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */ in ctl_write()
92 u32 ctl_read(struct mdp5_ctl *ctl, u32 reg) in ctl_read() argument
94 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); in ctl_read()
96 (void)ctl->reg_offset; /* TODO use this instead of mdp5_write */ in ctl_read()
135 static void set_ctl_op(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline) in set_ctl_op() argument
163 spin_lock_irqsave(&ctl->hw_lock, flags); in set_ctl_op()
164 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), ctl_op); in set_ctl_op()
165 spin_unlock_irqrestore(&ctl->hw_lock, flags); in set_ctl_op()
168 int mdp5_ctl_set_pipeline(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline) in mdp5_ctl_set_pipeline() argument
170 struct mdp5_kms *mdp5_kms = get_kms(ctl->ctlm); in mdp5_ctl_set_pipeline()
177 set_ctl_op(ctl, pipeline); in mdp5_ctl_set_pipeline()
182 static bool start_signal_needed(struct mdp5_ctl *ctl, in start_signal_needed() argument
187 if (!ctl->encoder_enabled) in start_signal_needed()
207 static void send_start_signal(struct mdp5_ctl *ctl) in send_start_signal() argument
211 spin_lock_irqsave(&ctl->hw_lock, flags); in send_start_signal()
212 ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1); in send_start_signal()
213 spin_unlock_irqrestore(&ctl->hw_lock, flags); in send_start_signal()
219 * @ctl: the CTL instance
226 int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, in mdp5_ctl_set_encoder_state() argument
232 if (WARN_ON(!ctl)) in mdp5_ctl_set_encoder_state()
235 ctl->encoder_enabled = enabled; in mdp5_ctl_set_encoder_state()
238 if (start_signal_needed(ctl, pipeline)) { in mdp5_ctl_set_encoder_state()
239 send_start_signal(ctl); in mdp5_ctl_set_encoder_state()
247 * CTL registers need to be flushed after calling this function
250 int mdp5_ctl_set_cursor(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, in mdp5_ctl_set_cursor() argument
253 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in mdp5_ctl_set_cursor()
259 DRM_DEV_ERROR(ctl_mgr->dev->dev, "CTL %d cannot find LM", in mdp5_ctl_set_cursor()
260 ctl->id); in mdp5_ctl_set_cursor()
269 spin_lock_irqsave(&ctl->hw_lock, flags); in mdp5_ctl_set_cursor()
271 blend_cfg = ctl_read(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm)); in mdp5_ctl_set_cursor()
278 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg); in mdp5_ctl_set_cursor()
279 ctl->cursor_on = enable; in mdp5_ctl_set_cursor()
281 spin_unlock_irqrestore(&ctl->hw_lock, flags); in mdp5_ctl_set_cursor()
283 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_cursor(cursor_id); in mdp5_ctl_set_cursor()
331 static void mdp5_ctl_reset_blend_regs(struct mdp5_ctl *ctl) in mdp5_ctl_reset_blend_regs() argument
334 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in mdp5_ctl_reset_blend_regs()
337 spin_lock_irqsave(&ctl->hw_lock, flags); in mdp5_ctl_reset_blend_regs()
340 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, i), 0x0); in mdp5_ctl_reset_blend_regs()
341 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, i), 0x0); in mdp5_ctl_reset_blend_regs()
344 spin_unlock_irqrestore(&ctl->hw_lock, flags); in mdp5_ctl_reset_blend_regs()
349 int mdp5_ctl_blend(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, in mdp5_ctl_blend() argument
361 mdp5_ctl_reset_blend_regs(ctl); in mdp5_ctl_blend()
389 spin_lock_irqsave(&ctl->hw_lock, flags); in mdp5_ctl_blend()
390 if (ctl->cursor_on) in mdp5_ctl_blend()
393 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, mixer->lm), blend_cfg); in mdp5_ctl_blend()
394 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, mixer->lm), in mdp5_ctl_blend()
397 ctl_write(ctl, REG_MDP5_CTL_LAYER_REG(ctl->id, r_mixer->lm), in mdp5_ctl_blend()
399 ctl_write(ctl, REG_MDP5_CTL_LAYER_EXT_REG(ctl->id, r_mixer->lm), in mdp5_ctl_blend()
402 spin_unlock_irqrestore(&ctl->hw_lock, flags); in mdp5_ctl_blend()
404 ctl->pending_ctl_trigger = mdp_ctl_flush_mask_lm(mixer->lm); in mdp5_ctl_blend()
406 ctl->pending_ctl_trigger |= mdp_ctl_flush_mask_lm(r_mixer->lm); in mdp5_ctl_blend()
472 static u32 fix_sw_flush(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, in fix_sw_flush() argument
475 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in fix_sw_flush()
487 static void fix_for_single_flush(struct mdp5_ctl *ctl, u32 *flush_mask, in fix_for_single_flush() argument
490 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in fix_for_single_flush()
492 if (ctl->pair) { in fix_for_single_flush()
493 DBG("CTL %d FLUSH pending mask %x", ctl->id, *flush_mask); in fix_for_single_flush()
494 ctl->flush_pending = true; in fix_for_single_flush()
498 if (ctl->pair->flush_pending) { in fix_for_single_flush()
499 *flush_id = min_t(u32, ctl->id, ctl->pair->id); in fix_for_single_flush()
502 ctl->flush_pending = false; in fix_for_single_flush()
503 ctl->pair->flush_pending = false; in fix_for_single_flush()
515 * @ctl: the CTL instance
529 * CTL registers need to be flushed in some circumstances; if that is the
531 * ctl->pending_ctl_trigger.
535 u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, in mdp5_ctl_commit() argument
539 struct mdp5_ctl_manager *ctl_mgr = ctl->ctlm; in mdp5_ctl_commit()
541 u32 flush_id = ctl->id; in mdp5_ctl_commit()
544 VERB("flush_mask=%x, trigger=%x", flush_mask, ctl->pending_ctl_trigger); in mdp5_ctl_commit()
546 if (ctl->pending_ctl_trigger & flush_mask) { in mdp5_ctl_commit()
548 ctl->pending_ctl_trigger = 0; in mdp5_ctl_commit()
551 flush_mask |= fix_sw_flush(ctl, pipeline, flush_mask); in mdp5_ctl_commit()
557 fix_for_single_flush(ctl, &flush_mask, &flush_id); in mdp5_ctl_commit()
560 ctl->flush_mask |= flush_mask; in mdp5_ctl_commit()
563 flush_mask |= ctl->flush_mask; in mdp5_ctl_commit()
564 ctl->flush_mask = 0; in mdp5_ctl_commit()
568 spin_lock_irqsave(&ctl->hw_lock, flags); in mdp5_ctl_commit()
569 ctl_write(ctl, REG_MDP5_CTL_FLUSH(flush_id), flush_mask); in mdp5_ctl_commit()
570 spin_unlock_irqrestore(&ctl->hw_lock, flags); in mdp5_ctl_commit()
573 if (start_signal_needed(ctl, pipeline)) { in mdp5_ctl_commit()
574 send_start_signal(ctl); in mdp5_ctl_commit()
580 u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl) in mdp5_ctl_get_commit_status() argument
582 return ctl_read(ctl, REG_MDP5_CTL_FLUSH(ctl->id)); in mdp5_ctl_get_commit_status()
585 int mdp5_ctl_get_ctl_id(struct mdp5_ctl *ctl) in mdp5_ctl_get_ctl_id() argument
587 return WARN_ON(!ctl) ? -EINVAL : ctl->id; in mdp5_ctl_get_ctl_id()
625 * mdp5_ctl_request() - CTL allocation
627 * Try to return booked CTL for @intf_num is 1 or 2, unbooked for other INTFs.
628 * If no CTL is available in preferred category, allocate from the other one.
630 * @return fail if no CTL is available.
635 struct mdp5_ctl *ctl = NULL; in mdp5_ctlm_request() local
649 "fall back to the other CTL category for INTF %d!\n", intf_num); in mdp5_ctlm_request()
656 DRM_DEV_ERROR(ctl_mgr->dev->dev, "No more CTL available!"); in mdp5_ctlm_request()
660 ctl = &ctl_mgr->ctls[c]; in mdp5_ctlm_request()
661 ctl->status |= CTL_STAT_BUSY; in mdp5_ctlm_request()
662 ctl->pending_ctl_trigger = 0; in mdp5_ctlm_request()
663 DBG("CTL %d allocated", ctl->id); in mdp5_ctlm_request()
667 return ctl; in mdp5_ctlm_request()
676 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; in mdp5_ctlm_hw_reset() local
678 spin_lock_irqsave(&ctl->hw_lock, flags); in mdp5_ctlm_hw_reset()
679 ctl_write(ctl, REG_MDP5_CTL_OP(ctl->id), 0); in mdp5_ctlm_hw_reset()
680 spin_unlock_irqrestore(&ctl->hw_lock, flags); in mdp5_ctlm_hw_reset()
691 const struct mdp5_ctl_block *ctl_cfg = &hw_cfg->ctl; in mdp5_ctlm_init()
697 DRM_DEV_ERROR(dev->dev, "failed to allocate CTL manager\n"); in mdp5_ctlm_init()
707 /* initialize the CTL manager: */ in mdp5_ctlm_init()
714 /* initialize each CTL of the pool: */ in mdp5_ctlm_init()
717 struct mdp5_ctl *ctl = &ctl_mgr->ctls[c]; in mdp5_ctlm_init() local
725 ctl->ctlm = ctl_mgr; in mdp5_ctlm_init()
726 ctl->id = c; in mdp5_ctlm_init()
727 ctl->reg_offset = ctl_cfg->base[c]; in mdp5_ctlm_init()
728 ctl->status = 0; in mdp5_ctlm_init()
729 spin_lock_init(&ctl->hw_lock); in mdp5_ctlm_init()