Loading drivers/gpu/drm/msm/sde/sde_crtc.c +171 −4 Original line number Diff line number Diff line Loading @@ -478,6 +478,111 @@ static void sde_crtc_vblank_cb(void *data) SDE_EVT32_IRQ(DRMID(crtc)); } static void sde_crtc_frame_event_work(struct kthread_work *work) { struct sde_crtc_frame_event *fevent; struct drm_crtc *crtc; struct sde_crtc *sde_crtc; struct sde_kms *sde_kms; unsigned long flags; if (!work) { SDE_ERROR("invalid work handle\n"); return; } fevent = container_of(work, struct sde_crtc_frame_event, work); if (!fevent->crtc) { SDE_ERROR("invalid crtc\n"); return; } crtc = fevent->crtc; sde_crtc = to_sde_crtc(crtc); sde_kms = _sde_crtc_get_kms(crtc); if (!sde_kms) { SDE_ERROR("invalid kms handle\n"); return; } SDE_DEBUG("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event, ktime_to_ns(fevent->ts)); if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE || fevent->event == SDE_ENCODER_FRAME_EVENT_ERROR) { if (atomic_read(&sde_crtc->frame_pending) < 1) { /* this should not happen */ SDE_ERROR("crtc%d ts:%lld invalid frame_pending:%d\n", crtc->base.id, ktime_to_ns(fevent->ts), atomic_read(&sde_crtc->frame_pending)); SDE_EVT32(DRMID(crtc), fevent->event, 0); } else if (atomic_dec_return(&sde_crtc->frame_pending) == 0) { /* release bandwidth and other resources */ SDE_DEBUG("crtc%d ts:%lld last pending\n", crtc->base.id, ktime_to_ns(fevent->ts)); SDE_EVT32(DRMID(crtc), fevent->event, 1); } else { SDE_EVT32(DRMID(crtc), fevent->event, 2); } } else { SDE_ERROR("crtc%d ts:%lld unknown event %u\n", crtc->base.id, ktime_to_ns(fevent->ts), fevent->event); SDE_EVT32(DRMID(crtc), fevent->event, 3); } spin_lock_irqsave(&sde_crtc->spin_lock, flags); list_add_tail(&fevent->list, &sde_crtc->frame_event_list); spin_unlock_irqrestore(&sde_crtc->spin_lock, flags); } static void sde_crtc_frame_event_cb(void *data, u32 event) { struct drm_crtc *crtc = (struct drm_crtc *)data; struct sde_crtc *sde_crtc; struct msm_drm_private *priv; struct list_head *list, *next; struct sde_crtc_frame_event *fevent; unsigned long flags; int pipe_id; if (!crtc || !crtc->dev || !crtc->dev->dev_private) { SDE_ERROR("invalid parameters\n"); return; } sde_crtc = to_sde_crtc(crtc); priv = crtc->dev->dev_private; pipe_id = drm_crtc_index(crtc); SDE_DEBUG("crtc%d\n", crtc->base.id); SDE_EVT32(DRMID(crtc), event); spin_lock_irqsave(&sde_crtc->spin_lock, flags); list_for_each_safe(list, next, &sde_crtc->frame_event_list) { list_del_init(list); break; } spin_unlock_irqrestore(&sde_crtc->spin_lock, flags); if (!list) { SDE_ERROR("crtc%d event %d overflow\n", crtc->base.id, event); SDE_EVT32(DRMID(crtc), event); return; } fevent = container_of(list, struct sde_crtc_frame_event, list); fevent->event = event; fevent->crtc = crtc; fevent->ts = ktime_get(); kthread_queue_work(&priv->disp_thread[pipe_id].worker, &fevent->work); } void sde_crtc_complete_commit(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { Loading Loading @@ -839,12 +944,14 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc) { struct drm_encoder *encoder; struct drm_device *dev; struct sde_crtc *sde_crtc; if (!crtc) { SDE_ERROR("invalid argument\n"); return; } dev = crtc->dev; sde_crtc = to_sde_crtc(crtc); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc != crtc) Loading @@ -854,7 +961,29 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc) * Encoder will flush/start now, unless it has a tx pending. * If so, it may delay and flush at an irq event (e.g. ppdone) */ sde_encoder_schedule_kickoff(encoder); sde_encoder_prepare_for_kickoff(encoder); } if (atomic_read(&sde_crtc->frame_pending) > 2) { /* framework allows only 1 outstanding + current */ SDE_ERROR("crtc%d invalid frame pending\n", crtc->base.id); SDE_EVT32(DRMID(crtc), 0); return; } else if (atomic_inc_return(&sde_crtc->frame_pending) == 1) { /* acquire bandwidth and other resources */ SDE_DEBUG("crtc%d first commit\n", crtc->base.id); SDE_EVT32(DRMID(crtc), 1); } else { SDE_DEBUG("crtc%d commit\n", crtc->base.id); SDE_EVT32(DRMID(crtc), 2); } list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc != crtc) continue; sde_encoder_kickoff(encoder); } } Loading Loading @@ -945,10 +1074,12 @@ static void sde_crtc_disable(struct drm_crtc *crtc) SDE_DEBUG("crtc%d\n", crtc->base.id); mutex_lock(&sde_crtc->crtc_lock); SDE_EVT32(DRMID(crtc)); if (atomic_read(&sde_crtc->vblank_refcount)) { SDE_ERROR("crtc%d invalid vblank refcount %d\n", crtc->base.id, atomic_read(&sde_crtc->vblank_refcount)); SDE_ERROR("crtc%d invalid vblank refcount\n", crtc->base.id); SDE_EVT32(DRMID(crtc)); drm_for_each_encoder(encoder, crtc->dev) { if (encoder->crtc != crtc) continue; Loading @@ -958,6 +1089,20 @@ static void sde_crtc_disable(struct drm_crtc *crtc) atomic_set(&sde_crtc->vblank_refcount, 0); } if (atomic_read(&sde_crtc->frame_pending)) { /* release bandwidth and other resources */ SDE_ERROR("crtc%d invalid frame pending\n", crtc->base.id); SDE_EVT32(DRMID(crtc)); atomic_set(&sde_crtc->frame_pending, 0); } drm_for_each_encoder(encoder, crtc->dev) { if (encoder->crtc != crtc) continue; sde_encoder_register_frame_event_callback(encoder, NULL, NULL); } memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers)); sde_crtc->num_mixers = 0; mutex_unlock(&sde_crtc->crtc_lock); Loading @@ -970,6 +1115,7 @@ static void sde_crtc_enable(struct drm_crtc *crtc) struct sde_hw_mixer *lm; struct drm_display_mode *mode; struct sde_hw_mixer_cfg cfg; struct drm_encoder *encoder; int i; if (!crtc) { Loading @@ -978,6 +1124,7 @@ static void sde_crtc_enable(struct drm_crtc *crtc) } SDE_DEBUG("crtc%d\n", crtc->base.id); SDE_EVT32(DRMID(crtc)); sde_crtc = to_sde_crtc(crtc); mixer = sde_crtc->mixers; Loading @@ -989,6 +1136,13 @@ static void sde_crtc_enable(struct drm_crtc *crtc) drm_mode_debug_printmodeline(mode); drm_for_each_encoder(encoder, crtc->dev) { if (encoder->crtc != crtc) continue; sde_encoder_register_frame_event_callback(encoder, sde_crtc_frame_event_cb, (void *)crtc); } for (i = 0; i < sde_crtc->num_mixers; i++) { lm = mixer[i].hw_lm; cfg.out_width = sde_crtc_mixer_width(sde_crtc, mode); Loading Loading @@ -1557,6 +1711,7 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane) struct sde_crtc *sde_crtc = NULL; struct msm_drm_private *priv = NULL; struct sde_kms *kms = NULL; int i; priv = dev->dev_private; kms = to_sde_kms(priv->kms); Loading @@ -1569,6 +1724,18 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane) crtc->dev = dev; atomic_set(&sde_crtc->vblank_refcount, 0); spin_lock_init(&sde_crtc->spin_lock); atomic_set(&sde_crtc->frame_pending, 0); INIT_LIST_HEAD(&sde_crtc->frame_event_list); for (i = 0; i < ARRAY_SIZE(sde_crtc->frame_events); i++) { INIT_LIST_HEAD(&sde_crtc->frame_events[i].list); list_add(&sde_crtc->frame_events[i].list, &sde_crtc->frame_event_list); kthread_init_work(&sde_crtc->frame_events[i].work, sde_crtc_frame_event_work); } drm_crtc_init_with_planes(dev, crtc, plane, NULL, &sde_crtc_funcs, NULL); Loading drivers/gpu/drm/msm/sde/sde_crtc.h +28 −1 Original line number Diff line number Diff line Loading @@ -26,6 +26,9 @@ #define SDE_CRTC_NAME_SIZE 12 /* define the maximum number of in-flight frame events */ #define SDE_CRTC_FRAME_EVENT_SIZE 2 /** * struct sde_crtc_mixer: stores the map for each virtual pipeline in the CRTC * @hw_lm: LM HW Driver context Loading @@ -44,6 +47,22 @@ struct sde_crtc_mixer { u32 flush_mask; }; /** * struct sde_crtc_frame_event: stores crtc frame event for crtc processing * @work: base work structure * @crtc: Pointer to crtc handling this event * @list: event list * @ts: timestamp at queue entry * @event: event identifier */ struct sde_crtc_frame_event { struct kthread_work work; struct drm_crtc *crtc; struct list_head list; ktime_t ts; u32 event; }; /** * struct sde_crtc - virtualized CRTC data structure * @base : Base drm crtc structure Loading @@ -53,7 +72,6 @@ struct sde_crtc_mixer { * @mixer : List of active mixers * @event : Pointer to last received drm vblank event. If there is a * pending vblank event, this will be non-null. * @pending : Whether or not an update is pending * @vsync_count : Running count of received vsync events * @drm_requested_vblank : Whether vblanks have been enabled in the encoder * @property_info : Opaque structure for generic property support Loading @@ -67,6 +85,10 @@ struct sde_crtc_mixer { * @active_list : list of color processing features are active * @dirty_list : list of color processing features are dirty * @crtc_lock : crtc lock around create, destroy and access. * @frame_pending : Whether or not an update is pending * @frame_events : static allocation of in-flight frame events * @frame_event_list : available frame event list * @spin_lock : spin lock for frame event, transaction status, etc... */ struct sde_crtc { struct drm_crtc base; Loading Loading @@ -99,6 +121,11 @@ struct sde_crtc { struct list_head dirty_list; struct mutex crtc_lock; atomic_t frame_pending; struct sde_crtc_frame_event frame_events[SDE_CRTC_FRAME_EVENT_SIZE]; struct list_head frame_event_list; spinlock_t spin_lock; }; #define to_sde_crtc(x) container_of(x, struct sde_crtc, base) Loading drivers/gpu/drm/msm/sde/sde_encoder.c +132 −2 Original line number Diff line number Diff line Loading @@ -39,6 +39,9 @@ #define SDE_ERROR_ENC(e, fmt, ...) SDE_ERROR("enc%d " fmt,\ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) /* timeout in frames waiting for frame done */ #define SDE_ENCODER_FRAME_DONE_TIMEOUT 60 /* * Two to anticipate panels that can do cmd/vid dynamic switching * plan is to create all possible physical encoder types, and switch between Loading Loading @@ -75,6 +78,14 @@ * @debugfs_root: Debug file system root file node * @enc_lock: Lock around physical encoder create/destroy and access. * @frame_busy_mask: Bitmask tracking which phys_enc we are still * busy processing current command. * Bit0 = phys_encs[0] etc. * @crtc_frame_event_cb: callback handler for frame event * @crtc_frame_event_cb_data: callback handler private data * @crtc_frame_event: callback event * @frame_done_timeout: frame done timeout in Hz * @frame_done_timer: watchdog timer for frame done event */ struct sde_encoder_virt { struct drm_encoder base; Loading @@ -93,6 +104,13 @@ struct sde_encoder_virt { struct dentry *debugfs_root; struct mutex enc_lock; DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL); void (*crtc_frame_event_cb)(void *, u32 event); void *crtc_frame_event_cb_data; u32 crtc_frame_event; atomic_t frame_done_timeout; struct timer_list frame_done_timer; }; #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base) Loading Loading @@ -511,6 +529,11 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc) SDE_EVT32(DRMID(drm_enc)); if (atomic_xchg(&sde_enc->frame_done_timeout, 0)) { SDE_ERROR("enc%d timeout pending\n", drm_enc->base.id); del_timer_sync(&sde_enc->frame_done_timer); } for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; Loading Loading @@ -627,6 +650,56 @@ void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc, } } void sde_encoder_register_frame_event_callback(struct drm_encoder *drm_enc, void (*frame_event_cb)(void *, u32 event), void *frame_event_cb_data) { struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); unsigned long lock_flags; bool enable; enable = frame_event_cb ? true : false; if (!drm_enc) { SDE_ERROR("invalid encoder\n"); return; } SDE_DEBUG_ENC(sde_enc, "\n"); SDE_EVT32(DRMID(drm_enc), enable, 0); spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags); sde_enc->crtc_frame_event_cb = frame_event_cb; sde_enc->crtc_frame_event_cb_data = frame_event_cb_data; spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags); } static void sde_encoder_frame_done_callback( struct drm_encoder *drm_enc, struct sde_encoder_phys *ready_phys, u32 event) { struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); unsigned int i; /* One of the physical encoders has become idle */ for (i = 0; i < sde_enc->num_phys_encs; i++) if (sde_enc->phys_encs[i] == ready_phys) { clear_bit(i, sde_enc->frame_busy_mask); sde_enc->crtc_frame_event |= event; SDE_EVT32(DRMID(drm_enc), i, sde_enc->frame_busy_mask[0]); } if (!sde_enc->frame_busy_mask[0]) { atomic_set(&sde_enc->frame_done_timeout, 0); del_timer(&sde_enc->frame_done_timer); if (sde_enc->crtc_frame_event_cb) sde_enc->crtc_frame_event_cb( sde_enc->crtc_frame_event_cb_data, sde_enc->crtc_frame_event); } } /** * _sde_encoder_trigger_flush - trigger flush for a physical encoder * drm_enc: Pointer to drm encoder structure Loading Loading @@ -742,6 +815,7 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc) } pending_flush = 0x0; sde_enc->crtc_frame_event = 0; /* update pending counts and trigger kickoff ctl flush atomically */ spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags); Loading @@ -757,6 +831,8 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc) if (!ctl) continue; set_bit(i, sde_enc->frame_busy_mask); if (!phys->ops.needs_single_flush || !phys->ops.needs_single_flush(phys)) _sde_encoder_trigger_flush(&sde_enc->base, phys, 0x0); Loading @@ -777,7 +853,7 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc) spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags); } void sde_encoder_schedule_kickoff(struct drm_encoder *drm_enc) void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc) { struct sde_encoder_virt *sde_enc; struct sde_encoder_phys *phys; Loading @@ -798,8 +874,29 @@ void sde_encoder_schedule_kickoff(struct drm_encoder *drm_enc) if (phys && phys->ops.prepare_for_kickoff) phys->ops.prepare_for_kickoff(phys); } } void sde_encoder_kickoff(struct drm_encoder *drm_enc) { struct sde_encoder_virt *sde_enc; struct sde_encoder_phys *phys; unsigned int i; if (!drm_enc) { SDE_ERROR("invalid encoder\n"); return; } sde_enc = to_sde_encoder_virt(drm_enc); SDE_DEBUG_ENC(sde_enc, "\n"); atomic_set(&sde_enc->frame_done_timeout, SDE_ENCODER_FRAME_DONE_TIMEOUT * 1000 / drm_enc->crtc->state->adjusted_mode.vrefresh); mod_timer(&sde_enc->frame_done_timer, jiffies + ((atomic_read(&sde_enc->frame_done_timeout) * HZ) / 1000)); /* all phys encs are ready to go, trigger the kickoff */ /* All phys encs are ready to go, trigger the kickoff */ _sde_encoder_kickoff_phys(sde_enc); /* allow phys encs to handle any post-kickoff business */ Loading Loading @@ -1094,6 +1191,7 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc, struct sde_encoder_virt_ops parent_ops = { sde_encoder_vblank_callback, sde_encoder_underrun_callback, sde_encoder_frame_done_callback, }; struct sde_enc_phys_init_params phys_params; Loading Loading @@ -1196,6 +1294,34 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc, return ret; } static void sde_encoder_frame_done_timeout(unsigned long data) { struct drm_encoder *drm_enc = (struct drm_encoder *) data; struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); struct msm_drm_private *priv; if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) { SDE_ERROR("invalid parameters\n"); return; } priv = drm_enc->dev->dev_private; if (!sde_enc->frame_busy_mask[0] || !sde_enc->crtc_frame_event_cb) { SDE_DEBUG("enc%d invalid timeout\n", drm_enc->base.id); SDE_EVT32(DRMID(drm_enc), sde_enc->frame_busy_mask[0], 0); return; } else if (!atomic_xchg(&sde_enc->frame_done_timeout, 0)) { SDE_ERROR("enc%d invalid timeout\n", drm_enc->base.id); SDE_EVT32(DRMID(drm_enc), 0, 1); return; } SDE_EVT32(DRMID(drm_enc), 0, 2); sde_enc->crtc_frame_event_cb(sde_enc->crtc_frame_event_cb_data, SDE_ENCODER_FRAME_EVENT_ERROR); } struct drm_encoder *sde_encoder_init( struct drm_device *dev, struct msm_display_info *disp_info) Loading Loading @@ -1226,6 +1352,10 @@ struct drm_encoder *sde_encoder_init( drm_encoder_helper_add(drm_enc, &sde_encoder_helper_funcs); bs_init(sde_enc); atomic_set(&sde_enc->frame_done_timeout, 0); setup_timer(&sde_enc->frame_done_timer, sde_encoder_frame_done_timeout, (unsigned long) sde_enc); _sde_encoder_init_debugfs(drm_enc, sde_enc, sde_kms); SDE_DEBUG_ENC(sde_enc, "created\n"); Loading drivers/gpu/drm/msm/sde/sde_encoder.h +24 −7 Original line number Diff line number Diff line Loading @@ -24,6 +24,9 @@ #include "msm_prop.h" #include "sde_hw_mdss.h" #define SDE_ENCODER_FRAME_EVENT_DONE BIT(0) #define SDE_ENCODER_FRAME_EVENT_ERROR BIT(1) /** * Encoder functions and data types * @intfs: Interfaces this encoder is using, INTF_MODE_NONE if unused Loading Loading @@ -59,16 +62,30 @@ void sde_encoder_register_vblank_callback(struct drm_encoder *encoder, void (*cb)(void *), void *data); /** * sde_encoder_schedule_kickoff - Register a callback with the encoder to * trigger a double buffer flip of the ctl path (i.e. ctl flush and start) * at the appropriate time. * sde_encoder_register_frame_event_callback - provide callback to encoder that * will be called after the request is complete, or other events. * @encoder: encoder pointer * @cb: callback pointer, provide NULL to deregister * @data: user data provided to callback */ void sde_encoder_register_frame_event_callback(struct drm_encoder *encoder, void (*cb)(void *, u32), void *data); /** * sde_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl * path (i.e. ctl flush and start) at next appropriate time. * Immediately: if no previous commit is outstanding. * Delayed: Save the callback, and return. Does not block. Callback will * be triggered later. E.g. cmd encoder will trigger at pp_done irq * irq if it outstanding. * Delayed: Block until next trigger can be issued. * @encoder: encoder pointer */ void sde_encoder_prepare_for_kickoff(struct drm_encoder *encoder); /** * sde_encoder_kickoff - trigger a double buffer flip of the ctl path * (i.e. ctl flush and start) immediately. * @encoder: encoder pointer */ void sde_encoder_schedule_kickoff(struct drm_encoder *encoder); void sde_encoder_kickoff(struct drm_encoder *drm_enc); /** * sde_encoder_wait_nxt_committed - Wait for hardware to have flushed the Loading drivers/gpu/drm/msm/sde/sde_encoder_phys.h +4 −0 Original line number Diff line number Diff line Loading @@ -56,12 +56,16 @@ struct sde_encoder_phys; * Note: This is called from IRQ handler context. * @handle_underrun_virt: Notify virtual encoder of underrun IRQ reception * Note: This is called from IRQ handler context. * @handle_frame_done: Notify virtual encoder that this phys encoder * completes last request frame. */ struct sde_encoder_virt_ops { void (*handle_vblank_virt)(struct drm_encoder *, struct sde_encoder_phys *phys); void (*handle_underrun_virt)(struct drm_encoder *, struct sde_encoder_phys *phys); void (*handle_frame_done)(struct drm_encoder *, struct sde_encoder_phys *phys, u32 event); }; /** Loading Loading
drivers/gpu/drm/msm/sde/sde_crtc.c +171 −4 Original line number Diff line number Diff line Loading @@ -478,6 +478,111 @@ static void sde_crtc_vblank_cb(void *data) SDE_EVT32_IRQ(DRMID(crtc)); } static void sde_crtc_frame_event_work(struct kthread_work *work) { struct sde_crtc_frame_event *fevent; struct drm_crtc *crtc; struct sde_crtc *sde_crtc; struct sde_kms *sde_kms; unsigned long flags; if (!work) { SDE_ERROR("invalid work handle\n"); return; } fevent = container_of(work, struct sde_crtc_frame_event, work); if (!fevent->crtc) { SDE_ERROR("invalid crtc\n"); return; } crtc = fevent->crtc; sde_crtc = to_sde_crtc(crtc); sde_kms = _sde_crtc_get_kms(crtc); if (!sde_kms) { SDE_ERROR("invalid kms handle\n"); return; } SDE_DEBUG("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event, ktime_to_ns(fevent->ts)); if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE || fevent->event == SDE_ENCODER_FRAME_EVENT_ERROR) { if (atomic_read(&sde_crtc->frame_pending) < 1) { /* this should not happen */ SDE_ERROR("crtc%d ts:%lld invalid frame_pending:%d\n", crtc->base.id, ktime_to_ns(fevent->ts), atomic_read(&sde_crtc->frame_pending)); SDE_EVT32(DRMID(crtc), fevent->event, 0); } else if (atomic_dec_return(&sde_crtc->frame_pending) == 0) { /* release bandwidth and other resources */ SDE_DEBUG("crtc%d ts:%lld last pending\n", crtc->base.id, ktime_to_ns(fevent->ts)); SDE_EVT32(DRMID(crtc), fevent->event, 1); } else { SDE_EVT32(DRMID(crtc), fevent->event, 2); } } else { SDE_ERROR("crtc%d ts:%lld unknown event %u\n", crtc->base.id, ktime_to_ns(fevent->ts), fevent->event); SDE_EVT32(DRMID(crtc), fevent->event, 3); } spin_lock_irqsave(&sde_crtc->spin_lock, flags); list_add_tail(&fevent->list, &sde_crtc->frame_event_list); spin_unlock_irqrestore(&sde_crtc->spin_lock, flags); } static void sde_crtc_frame_event_cb(void *data, u32 event) { struct drm_crtc *crtc = (struct drm_crtc *)data; struct sde_crtc *sde_crtc; struct msm_drm_private *priv; struct list_head *list, *next; struct sde_crtc_frame_event *fevent; unsigned long flags; int pipe_id; if (!crtc || !crtc->dev || !crtc->dev->dev_private) { SDE_ERROR("invalid parameters\n"); return; } sde_crtc = to_sde_crtc(crtc); priv = crtc->dev->dev_private; pipe_id = drm_crtc_index(crtc); SDE_DEBUG("crtc%d\n", crtc->base.id); SDE_EVT32(DRMID(crtc), event); spin_lock_irqsave(&sde_crtc->spin_lock, flags); list_for_each_safe(list, next, &sde_crtc->frame_event_list) { list_del_init(list); break; } spin_unlock_irqrestore(&sde_crtc->spin_lock, flags); if (!list) { SDE_ERROR("crtc%d event %d overflow\n", crtc->base.id, event); SDE_EVT32(DRMID(crtc), event); return; } fevent = container_of(list, struct sde_crtc_frame_event, list); fevent->event = event; fevent->crtc = crtc; fevent->ts = ktime_get(); kthread_queue_work(&priv->disp_thread[pipe_id].worker, &fevent->work); } void sde_crtc_complete_commit(struct drm_crtc *crtc, struct drm_crtc_state *old_state) { Loading Loading @@ -839,12 +944,14 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc) { struct drm_encoder *encoder; struct drm_device *dev; struct sde_crtc *sde_crtc; if (!crtc) { SDE_ERROR("invalid argument\n"); return; } dev = crtc->dev; sde_crtc = to_sde_crtc(crtc); list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc != crtc) Loading @@ -854,7 +961,29 @@ void sde_crtc_commit_kickoff(struct drm_crtc *crtc) * Encoder will flush/start now, unless it has a tx pending. * If so, it may delay and flush at an irq event (e.g. ppdone) */ sde_encoder_schedule_kickoff(encoder); sde_encoder_prepare_for_kickoff(encoder); } if (atomic_read(&sde_crtc->frame_pending) > 2) { /* framework allows only 1 outstanding + current */ SDE_ERROR("crtc%d invalid frame pending\n", crtc->base.id); SDE_EVT32(DRMID(crtc), 0); return; } else if (atomic_inc_return(&sde_crtc->frame_pending) == 1) { /* acquire bandwidth and other resources */ SDE_DEBUG("crtc%d first commit\n", crtc->base.id); SDE_EVT32(DRMID(crtc), 1); } else { SDE_DEBUG("crtc%d commit\n", crtc->base.id); SDE_EVT32(DRMID(crtc), 2); } list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (encoder->crtc != crtc) continue; sde_encoder_kickoff(encoder); } } Loading Loading @@ -945,10 +1074,12 @@ static void sde_crtc_disable(struct drm_crtc *crtc) SDE_DEBUG("crtc%d\n", crtc->base.id); mutex_lock(&sde_crtc->crtc_lock); SDE_EVT32(DRMID(crtc)); if (atomic_read(&sde_crtc->vblank_refcount)) { SDE_ERROR("crtc%d invalid vblank refcount %d\n", crtc->base.id, atomic_read(&sde_crtc->vblank_refcount)); SDE_ERROR("crtc%d invalid vblank refcount\n", crtc->base.id); SDE_EVT32(DRMID(crtc)); drm_for_each_encoder(encoder, crtc->dev) { if (encoder->crtc != crtc) continue; Loading @@ -958,6 +1089,20 @@ static void sde_crtc_disable(struct drm_crtc *crtc) atomic_set(&sde_crtc->vblank_refcount, 0); } if (atomic_read(&sde_crtc->frame_pending)) { /* release bandwidth and other resources */ SDE_ERROR("crtc%d invalid frame pending\n", crtc->base.id); SDE_EVT32(DRMID(crtc)); atomic_set(&sde_crtc->frame_pending, 0); } drm_for_each_encoder(encoder, crtc->dev) { if (encoder->crtc != crtc) continue; sde_encoder_register_frame_event_callback(encoder, NULL, NULL); } memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers)); sde_crtc->num_mixers = 0; mutex_unlock(&sde_crtc->crtc_lock); Loading @@ -970,6 +1115,7 @@ static void sde_crtc_enable(struct drm_crtc *crtc) struct sde_hw_mixer *lm; struct drm_display_mode *mode; struct sde_hw_mixer_cfg cfg; struct drm_encoder *encoder; int i; if (!crtc) { Loading @@ -978,6 +1124,7 @@ static void sde_crtc_enable(struct drm_crtc *crtc) } SDE_DEBUG("crtc%d\n", crtc->base.id); SDE_EVT32(DRMID(crtc)); sde_crtc = to_sde_crtc(crtc); mixer = sde_crtc->mixers; Loading @@ -989,6 +1136,13 @@ static void sde_crtc_enable(struct drm_crtc *crtc) drm_mode_debug_printmodeline(mode); drm_for_each_encoder(encoder, crtc->dev) { if (encoder->crtc != crtc) continue; sde_encoder_register_frame_event_callback(encoder, sde_crtc_frame_event_cb, (void *)crtc); } for (i = 0; i < sde_crtc->num_mixers; i++) { lm = mixer[i].hw_lm; cfg.out_width = sde_crtc_mixer_width(sde_crtc, mode); Loading Loading @@ -1557,6 +1711,7 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane) struct sde_crtc *sde_crtc = NULL; struct msm_drm_private *priv = NULL; struct sde_kms *kms = NULL; int i; priv = dev->dev_private; kms = to_sde_kms(priv->kms); Loading @@ -1569,6 +1724,18 @@ struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane) crtc->dev = dev; atomic_set(&sde_crtc->vblank_refcount, 0); spin_lock_init(&sde_crtc->spin_lock); atomic_set(&sde_crtc->frame_pending, 0); INIT_LIST_HEAD(&sde_crtc->frame_event_list); for (i = 0; i < ARRAY_SIZE(sde_crtc->frame_events); i++) { INIT_LIST_HEAD(&sde_crtc->frame_events[i].list); list_add(&sde_crtc->frame_events[i].list, &sde_crtc->frame_event_list); kthread_init_work(&sde_crtc->frame_events[i].work, sde_crtc_frame_event_work); } drm_crtc_init_with_planes(dev, crtc, plane, NULL, &sde_crtc_funcs, NULL); Loading
drivers/gpu/drm/msm/sde/sde_crtc.h +28 −1 Original line number Diff line number Diff line Loading @@ -26,6 +26,9 @@ #define SDE_CRTC_NAME_SIZE 12 /* define the maximum number of in-flight frame events */ #define SDE_CRTC_FRAME_EVENT_SIZE 2 /** * struct sde_crtc_mixer: stores the map for each virtual pipeline in the CRTC * @hw_lm: LM HW Driver context Loading @@ -44,6 +47,22 @@ struct sde_crtc_mixer { u32 flush_mask; }; /** * struct sde_crtc_frame_event: stores crtc frame event for crtc processing * @work: base work structure * @crtc: Pointer to crtc handling this event * @list: event list * @ts: timestamp at queue entry * @event: event identifier */ struct sde_crtc_frame_event { struct kthread_work work; struct drm_crtc *crtc; struct list_head list; ktime_t ts; u32 event; }; /** * struct sde_crtc - virtualized CRTC data structure * @base : Base drm crtc structure Loading @@ -53,7 +72,6 @@ struct sde_crtc_mixer { * @mixer : List of active mixers * @event : Pointer to last received drm vblank event. If there is a * pending vblank event, this will be non-null. * @pending : Whether or not an update is pending * @vsync_count : Running count of received vsync events * @drm_requested_vblank : Whether vblanks have been enabled in the encoder * @property_info : Opaque structure for generic property support Loading @@ -67,6 +85,10 @@ struct sde_crtc_mixer { * @active_list : list of color processing features are active * @dirty_list : list of color processing features are dirty * @crtc_lock : crtc lock around create, destroy and access. * @frame_pending : Whether or not an update is pending * @frame_events : static allocation of in-flight frame events * @frame_event_list : available frame event list * @spin_lock : spin lock for frame event, transaction status, etc... */ struct sde_crtc { struct drm_crtc base; Loading Loading @@ -99,6 +121,11 @@ struct sde_crtc { struct list_head dirty_list; struct mutex crtc_lock; atomic_t frame_pending; struct sde_crtc_frame_event frame_events[SDE_CRTC_FRAME_EVENT_SIZE]; struct list_head frame_event_list; spinlock_t spin_lock; }; #define to_sde_crtc(x) container_of(x, struct sde_crtc, base) Loading
drivers/gpu/drm/msm/sde/sde_encoder.c +132 −2 Original line number Diff line number Diff line Loading @@ -39,6 +39,9 @@ #define SDE_ERROR_ENC(e, fmt, ...) SDE_ERROR("enc%d " fmt,\ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__) /* timeout in frames waiting for frame done */ #define SDE_ENCODER_FRAME_DONE_TIMEOUT 60 /* * Two to anticipate panels that can do cmd/vid dynamic switching * plan is to create all possible physical encoder types, and switch between Loading Loading @@ -75,6 +78,14 @@ * @debugfs_root: Debug file system root file node * @enc_lock: Lock around physical encoder create/destroy and access. * @frame_busy_mask: Bitmask tracking which phys_enc we are still * busy processing current command. * Bit0 = phys_encs[0] etc. * @crtc_frame_event_cb: callback handler for frame event * @crtc_frame_event_cb_data: callback handler private data * @crtc_frame_event: callback event * @frame_done_timeout: frame done timeout in Hz * @frame_done_timer: watchdog timer for frame done event */ struct sde_encoder_virt { struct drm_encoder base; Loading @@ -93,6 +104,13 @@ struct sde_encoder_virt { struct dentry *debugfs_root; struct mutex enc_lock; DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL); void (*crtc_frame_event_cb)(void *, u32 event); void *crtc_frame_event_cb_data; u32 crtc_frame_event; atomic_t frame_done_timeout; struct timer_list frame_done_timer; }; #define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base) Loading Loading @@ -511,6 +529,11 @@ static void sde_encoder_virt_disable(struct drm_encoder *drm_enc) SDE_EVT32(DRMID(drm_enc)); if (atomic_xchg(&sde_enc->frame_done_timeout, 0)) { SDE_ERROR("enc%d timeout pending\n", drm_enc->base.id); del_timer_sync(&sde_enc->frame_done_timer); } for (i = 0; i < sde_enc->num_phys_encs; i++) { struct sde_encoder_phys *phys = sde_enc->phys_encs[i]; Loading Loading @@ -627,6 +650,56 @@ void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc, } } void sde_encoder_register_frame_event_callback(struct drm_encoder *drm_enc, void (*frame_event_cb)(void *, u32 event), void *frame_event_cb_data) { struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); unsigned long lock_flags; bool enable; enable = frame_event_cb ? true : false; if (!drm_enc) { SDE_ERROR("invalid encoder\n"); return; } SDE_DEBUG_ENC(sde_enc, "\n"); SDE_EVT32(DRMID(drm_enc), enable, 0); spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags); sde_enc->crtc_frame_event_cb = frame_event_cb; sde_enc->crtc_frame_event_cb_data = frame_event_cb_data; spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags); } static void sde_encoder_frame_done_callback( struct drm_encoder *drm_enc, struct sde_encoder_phys *ready_phys, u32 event) { struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); unsigned int i; /* One of the physical encoders has become idle */ for (i = 0; i < sde_enc->num_phys_encs; i++) if (sde_enc->phys_encs[i] == ready_phys) { clear_bit(i, sde_enc->frame_busy_mask); sde_enc->crtc_frame_event |= event; SDE_EVT32(DRMID(drm_enc), i, sde_enc->frame_busy_mask[0]); } if (!sde_enc->frame_busy_mask[0]) { atomic_set(&sde_enc->frame_done_timeout, 0); del_timer(&sde_enc->frame_done_timer); if (sde_enc->crtc_frame_event_cb) sde_enc->crtc_frame_event_cb( sde_enc->crtc_frame_event_cb_data, sde_enc->crtc_frame_event); } } /** * _sde_encoder_trigger_flush - trigger flush for a physical encoder * drm_enc: Pointer to drm encoder structure Loading Loading @@ -742,6 +815,7 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc) } pending_flush = 0x0; sde_enc->crtc_frame_event = 0; /* update pending counts and trigger kickoff ctl flush atomically */ spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags); Loading @@ -757,6 +831,8 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc) if (!ctl) continue; set_bit(i, sde_enc->frame_busy_mask); if (!phys->ops.needs_single_flush || !phys->ops.needs_single_flush(phys)) _sde_encoder_trigger_flush(&sde_enc->base, phys, 0x0); Loading @@ -777,7 +853,7 @@ static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc) spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags); } void sde_encoder_schedule_kickoff(struct drm_encoder *drm_enc) void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc) { struct sde_encoder_virt *sde_enc; struct sde_encoder_phys *phys; Loading @@ -798,8 +874,29 @@ void sde_encoder_schedule_kickoff(struct drm_encoder *drm_enc) if (phys && phys->ops.prepare_for_kickoff) phys->ops.prepare_for_kickoff(phys); } } void sde_encoder_kickoff(struct drm_encoder *drm_enc) { struct sde_encoder_virt *sde_enc; struct sde_encoder_phys *phys; unsigned int i; if (!drm_enc) { SDE_ERROR("invalid encoder\n"); return; } sde_enc = to_sde_encoder_virt(drm_enc); SDE_DEBUG_ENC(sde_enc, "\n"); atomic_set(&sde_enc->frame_done_timeout, SDE_ENCODER_FRAME_DONE_TIMEOUT * 1000 / drm_enc->crtc->state->adjusted_mode.vrefresh); mod_timer(&sde_enc->frame_done_timer, jiffies + ((atomic_read(&sde_enc->frame_done_timeout) * HZ) / 1000)); /* all phys encs are ready to go, trigger the kickoff */ /* All phys encs are ready to go, trigger the kickoff */ _sde_encoder_kickoff_phys(sde_enc); /* allow phys encs to handle any post-kickoff business */ Loading Loading @@ -1094,6 +1191,7 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc, struct sde_encoder_virt_ops parent_ops = { sde_encoder_vblank_callback, sde_encoder_underrun_callback, sde_encoder_frame_done_callback, }; struct sde_enc_phys_init_params phys_params; Loading Loading @@ -1196,6 +1294,34 @@ static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc, return ret; } static void sde_encoder_frame_done_timeout(unsigned long data) { struct drm_encoder *drm_enc = (struct drm_encoder *) data; struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc); struct msm_drm_private *priv; if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) { SDE_ERROR("invalid parameters\n"); return; } priv = drm_enc->dev->dev_private; if (!sde_enc->frame_busy_mask[0] || !sde_enc->crtc_frame_event_cb) { SDE_DEBUG("enc%d invalid timeout\n", drm_enc->base.id); SDE_EVT32(DRMID(drm_enc), sde_enc->frame_busy_mask[0], 0); return; } else if (!atomic_xchg(&sde_enc->frame_done_timeout, 0)) { SDE_ERROR("enc%d invalid timeout\n", drm_enc->base.id); SDE_EVT32(DRMID(drm_enc), 0, 1); return; } SDE_EVT32(DRMID(drm_enc), 0, 2); sde_enc->crtc_frame_event_cb(sde_enc->crtc_frame_event_cb_data, SDE_ENCODER_FRAME_EVENT_ERROR); } struct drm_encoder *sde_encoder_init( struct drm_device *dev, struct msm_display_info *disp_info) Loading Loading @@ -1226,6 +1352,10 @@ struct drm_encoder *sde_encoder_init( drm_encoder_helper_add(drm_enc, &sde_encoder_helper_funcs); bs_init(sde_enc); atomic_set(&sde_enc->frame_done_timeout, 0); setup_timer(&sde_enc->frame_done_timer, sde_encoder_frame_done_timeout, (unsigned long) sde_enc); _sde_encoder_init_debugfs(drm_enc, sde_enc, sde_kms); SDE_DEBUG_ENC(sde_enc, "created\n"); Loading
drivers/gpu/drm/msm/sde/sde_encoder.h +24 −7 Original line number Diff line number Diff line Loading @@ -24,6 +24,9 @@ #include "msm_prop.h" #include "sde_hw_mdss.h" #define SDE_ENCODER_FRAME_EVENT_DONE BIT(0) #define SDE_ENCODER_FRAME_EVENT_ERROR BIT(1) /** * Encoder functions and data types * @intfs: Interfaces this encoder is using, INTF_MODE_NONE if unused Loading Loading @@ -59,16 +62,30 @@ void sde_encoder_register_vblank_callback(struct drm_encoder *encoder, void (*cb)(void *), void *data); /** * sde_encoder_schedule_kickoff - Register a callback with the encoder to * trigger a double buffer flip of the ctl path (i.e. ctl flush and start) * at the appropriate time. * sde_encoder_register_frame_event_callback - provide callback to encoder that * will be called after the request is complete, or other events. * @encoder: encoder pointer * @cb: callback pointer, provide NULL to deregister * @data: user data provided to callback */ void sde_encoder_register_frame_event_callback(struct drm_encoder *encoder, void (*cb)(void *, u32), void *data); /** * sde_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl * path (i.e. ctl flush and start) at next appropriate time. * Immediately: if no previous commit is outstanding. * Delayed: Save the callback, and return. Does not block. Callback will * be triggered later. E.g. cmd encoder will trigger at pp_done irq * irq if it outstanding. * Delayed: Block until next trigger can be issued. * @encoder: encoder pointer */ void sde_encoder_prepare_for_kickoff(struct drm_encoder *encoder); /** * sde_encoder_kickoff - trigger a double buffer flip of the ctl path * (i.e. ctl flush and start) immediately. * @encoder: encoder pointer */ void sde_encoder_schedule_kickoff(struct drm_encoder *encoder); void sde_encoder_kickoff(struct drm_encoder *drm_enc); /** * sde_encoder_wait_nxt_committed - Wait for hardware to have flushed the Loading
drivers/gpu/drm/msm/sde/sde_encoder_phys.h +4 −0 Original line number Diff line number Diff line Loading @@ -56,12 +56,16 @@ struct sde_encoder_phys; * Note: This is called from IRQ handler context. * @handle_underrun_virt: Notify virtual encoder of underrun IRQ reception * Note: This is called from IRQ handler context. * @handle_frame_done: Notify virtual encoder that this phys encoder * completes last request frame. */ struct sde_encoder_virt_ops { void (*handle_vblank_virt)(struct drm_encoder *, struct sde_encoder_phys *phys); void (*handle_underrun_virt)(struct drm_encoder *, struct sde_encoder_phys *phys); void (*handle_frame_done)(struct drm_encoder *, struct sde_encoder_phys *phys, u32 event); }; /** Loading